Compare commits

..

276 commits

Author SHA1 Message Date
Victor Shyba
151337fa36 linter: ignore raise-missing-from 2020-09-04 00:11:18 -03:00
Lex Berezhny
7ebb9d06df discover and run all unittests 2020-08-20 14:27:28 -04:00
Lex Berezhny
b99de6b872 skip tests needing to be updated to use new APIs 2020-08-20 14:24:08 -04:00
Lex Berezhny
889464e51d drop components 2020-08-20 14:22:46 -04:00
Lex Berezhny
35c3ff1e30 test cleaup and fixing 2020-08-20 13:31:58 -04:00
Lex Berezhny
aa75b9bb25 moved wallet specific unit tests into wallet dir 2020-08-20 13:30:33 -04:00
Lex Berezhny
f995ceae8b pylint 2020-08-20 10:46:28 -04:00
Lex Berezhny
c1803434aa support clean exit 2020-08-20 10:44:29 -04:00
Lex Berezhny
4dfbdcc2d7
Update docker-compose.yml 2020-08-14 00:29:51 -04:00
Lex Berezhny
272940b6d6 removed docker/hooks 2020-08-14 03:47:53 +00:00
Lex Berezhny
baf384d6e0 cleanup 2020-08-14 03:42:04 +00:00
Lex Berezhny
a012c04974 docker-compose.yml 2020-08-14 03:38:33 +00:00
Lex Berezhny
1fa117a104 upgrade distro 2020-08-14 00:33:44 +00:00
Lex Berezhny
33a157959d index added 2020-08-13 12:18:39 -04:00
Lex Berezhny
747eace4ab lint 2020-08-13 12:08:35 -04:00
Lex Berezhny
8fa2d746e7 re-enable all github workflows 2020-08-13 11:57:10 -04:00
Lex Berezhny
a77b1f9997 move to docker dir 2020-08-13 11:50:20 -04:00
Lex Berezhny
c914f24089 chmod lbrynet 2020-08-13 11:35:24 -04:00
Lex Berezhny
e41a71a64e no dockerignore 2020-08-13 11:25:21 -04:00
Lex Berezhny
e59e238fd5 dockerfile bin error 2020-08-13 11:17:45 -04:00
Lex Berezhny
4c331d00e7 moved Dockerfile 2020-08-13 00:40:12 -04:00
Lex Berezhny
f56229bcce Dockerfile 2020-08-13 00:29:44 -04:00
Lex Berezhny
abf1247f61 runs-on github action 2020-08-13 00:20:45 -04:00
Lex Berezhny
4b230a97f9 docker 2020-08-13 00:17:43 -04:00
Lex Berezhny
bf7ac1562f docker 2020-08-12 03:34:37 -04:00
Lex Berezhny
2000d75c7a start --full-node 2020-08-12 00:08:06 -04:00
Lex Berezhny
1259002b51 systemd script installation 2020-08-11 22:50:24 -04:00
Lex Berezhny
70d9f4cf79 convert claim_id to channel_hash 2020-08-10 18:53:00 -04:00
Lex Berezhny
23723f8041 added support_search to api 2020-08-10 18:37:21 -04:00
Lex Berezhny
51a0f7ddc8 add index for release_time 2020-08-10 17:48:26 -04:00
Lex Berezhny
1228700487 typo 2020-08-10 14:10:15 -04:00
Lex Berezhny
67c5c192f3 fix order_by and include_totals 2020-08-10 14:03:12 -04:00
Lex Berezhny
6b9cf5b48c wip simulate_sync_console 2020-08-07 20:45:52 -04:00
Lex Berezhny
0a91bd35c5 db.resolve takes list 2020-08-06 11:45:23 -04:00
Lex Berezhny
84639cfb2e protobuf claim_search 2020-08-04 13:49:59 -04:00
Lex Berezhny
0b245aab31 use base64 for protobuf response 2020-08-04 11:47:03 -04:00
Lex Berezhny
ee3db31541 protobuf resolve response 2020-08-04 10:41:49 -04:00
Lex Berezhny
8a3b960a85 delete tag when rewinding 2020-08-03 22:36:14 -04:00
Lex Berezhny
2952609972 fixed bug when maintenance sync spanned multiple files 2020-08-03 16:53:40 -04:00
Lex Berezhny
9ab8a7dd81 magic numbers 2020-08-03 12:24:13 -04:00
Lex Berezhny
a7555932a9
Merge pull request #3012 from lbryio/unclosed_warnings_fix
fix unclosed transport warnings on tests
2020-08-03 10:33:51 -04:00
Victor Shyba
9411b26fd3 fix unclosed transport warnings on tests 2020-08-03 03:30:29 -03:00
Lex Berezhny
85db7d3ce7 lbrycrd print text response 2020-07-31 14:38:45 -04:00
Lex Berezhny
87f1d5b0ae added lbrycrd-rpc-user/pass 2020-07-30 14:51:04 -04:00
Lex Berezhny
64b8caeb5c cleanup lbrycrd 2020-07-29 11:46:44 -04:00
Lex Berezhny
3315175d1c raise unauthorized error when cannot connect to lbrycrd 2020-07-29 11:32:43 -04:00
Lex Berezhny
a802d1f686 clear tasks 2020-07-29 00:41:40 -04:00
Lex Berezhny
684e389283 basicConfig 2020-07-29 00:35:39 -04:00
Lex Berezhny
895719a13d basic logging 2020-07-29 00:14:40 -04:00
Lex Berezhny
8f2cce7f61 removed extraneous @staticmethod annotation 2020-07-27 12:33:40 -04:00
Lex Berezhny
9114a9794d fix channel integration test 2020-07-27 12:19:12 -04:00
Lex Berezhny
a1f3254261 lint 2020-07-27 11:52:24 -04:00
Lex Berezhny
dbc0da2817 event generator and better ZMQ handling 2020-07-27 10:58:57 -04:00
Lex Berezhny
412ace1c6f remove assert causing failures in console.py 2020-07-22 18:07:18 -04:00
Lex Berezhny
8100efb48c expose api as POST not GET 2020-07-20 23:48:27 -04:00
Lex Berezhny
8d164dfed3 skipIf fix 2020-07-20 23:38:21 -04:00
Lex Berezhny
ffea76cdd5 skip event buffering tests on all OSes except linux 2020-07-20 23:36:47 -04:00
Lex Berezhny
72ddb0c195 use normalized instead of claim_name for claim search 2020-07-20 22:44:18 -04:00
Lex Berezhny
c57080711e added 99 as special number for no processes 2020-07-20 10:46:59 -04:00
Lex Berezhny
96aea579ac temporary debug print statements 2020-07-20 09:57:52 -04:00
Lex Berezhny
e2aae23575 commit db transaction after setting work_mem 2020-07-16 23:27:24 -04:00
Lex Berezhny
2ae700feb3 vaccum claim after updating channel stats 2020-07-16 10:53:27 -04:00
Lex Berezhny
d1ac066c6d include claim_hash in txo_claim_chahges postgres index 2020-07-16 10:53:03 -04:00
Lex Berezhny
b61424979d lint 2020-07-15 23:50:52 -04:00
Lex Berezhny
ca10874006 add vacuuming for tx table 2020-07-14 17:11:06 -04:00
Lex Berezhny
a4680878c4 fixed issues with database transaction isolation 2020-07-14 16:46:24 -04:00
Lex Berezhny
1c29ae7204 run vacuum after every sync 2020-07-14 13:26:46 -04:00
Andrey Beletsky
86069b10ca Bind API server to the configured host instead of localhost 2020-07-14 14:56:47 +07:00
Lex Berezhny
9c5e2a8c8d fix none exception 2020-07-14 00:11:30 -04:00
Lex Berezhny
622a3b77ef indent 2020-07-14 00:05:25 -04:00
Lex Berezhny
0dff82c31c save all claims, even bad ones, also fix cli 2020-07-13 23:58:49 -04:00
Lex Berezhny
8e683c9cd0 indexing fixes 2020-07-13 21:00:24 -04:00
Lex Berezhny
69c45d43d3 fixed multi block file test 2020-07-13 18:32:44 -04:00
Lex Berezhny
fab7b5579c skip test unreliable on mac 2020-07-13 18:21:41 -04:00
Lex Berezhny
9e87394fca lower event stress test params since it fails too easily on CI, attempt 3 2020-07-13 16:57:34 -04:00
Lex Berezhny
8c6633de17 lower event stress test params since it fails too easily on CI, attempt 2 2020-07-13 16:53:02 -04:00
Lex Berezhny
3af71a2674 lower event stress test params since it fails too easily on CI 2020-07-13 16:45:35 -04:00
Lex Berezhny
b792b134a2 require ubuntu 20.04 for integration tests 2020-07-13 16:41:08 -04:00
Lex Berezhny
f50196d395 fix channel integration test 2020-07-13 15:50:00 -04:00
Lex Berezhny
248e04089b pylint 2020-07-13 15:45:21 -04:00
Lex Berezhny
8fd92cb649 more indexes 2020-07-13 14:29:38 -04:00
Lex Berezhny
af4138ff51 minor progress refinement 2020-07-13 13:12:01 -04:00
Lex Berezhny
462daf4dc4 increase work_mem and add more indexes 2020-07-13 12:38:28 -04:00
Lex Berezhny
e63151a370 merged channel and content syncs and no longer storing canonical_url in column 2020-07-13 09:30:32 -04:00
Lex Berezhny
09a2b2fa46 wider labels 2020-07-13 00:59:44 -04:00
Lex Berezhny
a3d91329fe performance 2020-07-13 00:55:30 -04:00
Lex Berezhny
7bf96fd637 silence invalid name errors when creating short_url 2020-07-12 18:07:12 -04:00
Lex Berezhny
5157b2535b correctly update main bar 2020-07-12 17:57:41 -04:00
Lex Berezhny
0151ce8040 use correct last progress bar state value 2020-07-12 17:29:58 -04:00
Lex Berezhny
5328ed105e reduce TX_FLUSH_SIZE 2020-07-12 16:44:36 -04:00
Lex Berezhny
7f01b1cb84 flipped main and secondary progress bars 2020-07-12 16:43:44 -04:00
Lex Berezhny
862c51946a test robustness 2020-07-12 16:28:13 -04:00
Lex Berezhny
1790ee3018 bug fixes and performance improvements 2020-07-12 16:27:51 -04:00
Lex Berezhny
7a4e5dcb05 handle claims in abandoned channels 2020-07-12 12:02:58 -04:00
Lex Berezhny
24a88db595 console fix 2020-07-12 09:22:06 -04:00
Lex Berezhny
915233c96c fixes 2020-07-12 08:55:25 -04:00
Lex Berezhny
aa9365f218 refactored sync 2020-07-11 18:18:33 -04:00
Lex Berezhny
15b8891fce make resolve command work from cli 2020-07-07 10:52:41 -04:00
Lex Berezhny
f8a8a75ae9 regenerated interface metadata 2020-07-07 10:49:40 -04:00
Lex Berezhny
d18ed6c19b wrap short url creation in try/except 2020-07-07 00:20:11 -04:00
Lex Berezhny
4aa44d3b5a set work_mem for postgres 2020-07-06 22:42:15 -04:00
Lex Berezhny
34a9dff141 fixed conflict with two cursors when one is reading and another is commiting 2020-07-06 09:18:22 -04:00
Lex Berezhny
7d9bf03574 refactored 2020-07-05 23:03:45 -04:00
Lex Berezhny
3fe1981657 faster inputs/outputs sync 2020-07-03 15:47:25 -04:00
Lex Berezhny
192c79c49c capture stderr before process pool starts 2020-07-01 18:32:20 -04:00
Lex Berezhny
5883c9bc6c hide error from protobuf 2020-06-30 23:13:38 -04:00
Lex Berezhny
4b50d1e329 vacuum before setting inputs 2020-06-30 19:37:00 -04:00
Lex Berezhny
39d8a20fd5 pgcopy COPY command 2020-06-30 17:32:51 -04:00
Lex Berezhny
9ccf00f56b add pgcopy to setup.py 2020-06-30 12:18:03 -04:00
Lex Berezhny
46662b55c7 clean up build workflow 2020-06-30 12:17:48 -04:00
Lex Berezhny
b45a222f98 event subscription and publishing bug fixes 2020-06-29 18:10:26 -04:00
Lex Berezhny
434c1bc6b3 test_nulls 2020-06-27 23:23:57 -04:00
Lex Berezhny
2495df8859 null in claim description 2020-06-27 23:14:28 -04:00
Lex Berezhny
635aebfeeb fixed null claim name support for postgres 2020-06-27 23:00:12 -04:00
Lex Berezhny
81926a42f9 fix postgres db connection 2020-06-27 22:31:49 -04:00
Lex Berezhny
f2ff4410dc re-enable postgres integration tests 2020-06-27 22:24:59 -04:00
Lex Berezhny
564018c937 better debug badrow 2020-06-27 11:46:49 -04:00
Lex Berezhny
4d1eafc0a4 debug badrow 2020-06-27 09:39:23 -04:00
Lex Berezhny
211f8b2e59 pause console elapsed timer when no blocks read/save 2020-06-27 09:22:52 -04:00
Lex Berezhny
9500be26fd no order by file 2020-06-26 23:37:10 -04:00
Lex Berezhny
017ef5b41a order by file 2020-06-26 23:35:11 -04:00
Lex Berezhny
4b19861a74 console improvements 2020-06-26 21:52:01 -04:00
Lex Berezhny
5a0a987f0c refactored and simplified the blockchain sync 2020-06-26 10:39:58 -04:00
Lex Berezhny
4cb4659489 removed Takeovers for now 2020-06-26 10:39:16 -04:00
Lex Berezhny
e64b108404 console 2020-06-23 13:11:16 -04:00
Lex Berezhny
7870abaef4 script for testing console progress bars 2020-06-23 12:28:01 -04:00
Lex Berezhny
71e14c8e63 least 2020-06-22 20:30:05 -04:00
Lex Berezhny
b3b6361429 fix test 2020-06-22 18:47:10 -04:00
Lex Berezhny
0d5441f3bf update 2020-06-22 11:44:27 -04:00
Lex Berezhny
9757c69189 pylint 2020-06-22 11:02:59 -04:00
Lex Berezhny
d8fb31aedd addeed greatest sql function that supports postgres and sqlite 2020-06-22 10:52:17 -04:00
Lex Berezhny
54a0bf9290 fixups to make lbrynet work on cli with postgres 2020-06-21 23:21:43 -04:00
Lex Berezhny
a3ef8d7411 pylint 2020-06-21 20:14:14 -04:00
Lex Berezhny
4810ff5f94 run more tests on github workflow 2020-06-21 19:55:13 -04:00
Lex Berezhny
2306edebf7 tests/integration/blockchain/test_blockchain.py 2020-06-21 19:54:59 -04:00
Lex Berezhny
db5a33dc3f blockchain/test_database.py 2020-06-21 19:54:34 -04:00
Lex Berezhny
597bebb5be tests/unit/db 2020-06-21 19:53:18 -04:00
Lex Berezhny
73ff1d3b3a run more tests 2020-06-21 19:52:59 -04:00
Lex Berezhny
9198877098 include_is_spent no longer needed 2020-06-21 19:52:39 -04:00
Lex Berezhny
46da2584ca db 2020-06-21 19:51:09 -04:00
Lex Berezhny
53b7d0a58b blockchain 2020-06-21 19:50:53 -04:00
Lex Berezhny
d1a243247d progress 2020-06-19 14:28:34 -04:00
Lex Berezhny
18dc5fbc9f run crypto and schema tests 2020-06-10 15:45:57 -04:00
Lex Berezhny
410212c17a update tests 2020-06-10 15:45:29 -04:00
Lex Berezhny
a39f87b3c5 more resilient test 2020-06-10 15:25:45 -04:00
Lex Berezhny
147b9d5ad1 run another integration test 2020-06-09 23:52:08 -04:00
Lex Berezhny
1f210c0b0b skip event queue test until it can be more reliable 2020-06-09 23:42:21 -04:00
Lex Berezhny
86df4bdd11 run more unittests 2020-06-09 23:28:07 -04:00
Lex Berezhny
096f74d79b silence expected exception 2020-06-09 23:27:45 -04:00
Lex Berezhny
ae8bc59c65 remove duplicate dewies test 2020-06-09 23:27:19 -04:00
Lex Berezhny
01dbbb4c3a skip blockchain sync unittest 2020-06-09 23:26:59 -04:00
Lex Berezhny
96d1926da4 upgrad pyyaml 2020-06-09 23:25:42 -04:00
Lex Berezhny
2c10e71774 coverage on windows 2020-06-09 20:59:32 -04:00
Lex Berezhny
ea6be53071 run all steps 2020-06-09 20:55:16 -04:00
Lex Berezhny
cb5250f630 integration coverage 2020-06-09 20:48:38 -04:00
Lex Berezhny
54e83daa59 back to pinned coverallsapp 2020-06-09 18:13:27 -04:00
Lex Berezhny
df44d6ef56 back to bboe 2020-06-09 17:13:07 -04:00
Lex Berezhny
aa4ef94e15 andremiras coveralls - spacing tests-unit 2020-06-09 17:00:26 -04:00
Lex Berezhny
fad144cb96 andremiras coveralls - spacing 2020-06-09 16:59:34 -04:00
Lex Berezhny
1fe444bca2 andremiras coveralls 2020-06-09 16:58:22 -04:00
Lex Berezhny
55196ccb6b master github action 2020-06-09 16:46:59 -04:00
Lex Berezhny
b3cb50aff0 back to bboe 2020-06-09 16:30:55 -04:00
Lex Berezhny
ebf36f513c re-check default coveralls package 2020-06-09 14:03:26 -04:00
Lex Berezhny
47d207ff77 bboe coveralls 2020-06-09 13:56:01 -04:00
Lex Berezhny
d99e4221f2 pin coveralls app github action 2020-06-09 13:43:26 -04:00
Lex Berezhny
56ff1342c4 workflow typo 2020-06-09 13:01:18 -04:00
Lex Berezhny
a8c8614948 workflow indentation 2020-06-09 13:00:24 -04:00
Lex Berezhny
0e9184048c try coveralls again 2020-06-09 12:58:05 -04:00
Lex Berezhny
c01dceebcd try codecov python lib 2020-06-06 21:00:38 -04:00
Lex Berezhny
e3cc6ea224 moved codecov into tox 2020-06-06 20:54:34 -04:00
Lex Berezhny
44bbd9578d sans build 2020-06-06 20:19:09 -04:00
Lex Berezhny
7855f9c93f sans unit tests 2020-06-06 20:02:31 -04:00
Lex Berezhny
76e21f65df fix codecov test flags 2020-06-06 13:39:03 -04:00
Lex Berezhny
53c8876b5e fix codecov test flags 2020-06-06 13:27:59 -04:00
Lex Berezhny
2927875830 placeholder classes to pass pylint 2020-06-06 13:20:26 -04:00
Lex Berezhny
b9d954a394 install coveragge 2020-06-06 13:10:42 -04:00
Lex Berezhny
a0fb3424aa more codecov experiments 2020-06-06 13:01:53 -04:00
Lex Berezhny
269c0f714e trying without codecov github action 2020-06-06 12:52:54 -04:00
Lex Berezhny
99d2a3f42b debugging codecov 2020-06-06 12:44:54 -04:00
Lex Berezhny
a1aa578bc0 codecov2 2020-06-06 12:23:32 -04:00
Lex Berezhny
82062b5601 trying codecov 2020-06-06 12:20:19 -04:00
Lex Berezhny
db1f984558 moved fast_sync.py script 2020-06-05 19:21:39 -04:00
Lex Berezhny
e66445b46e deleted lbry.extras 2020-06-05 19:18:00 -04:00
Lex Berezhny
82b69109bd pylint 2020-06-05 00:51:55 -04:00
Lex Berezhny
ec4e36446c update lbrycrd download URL 2020-06-05 00:38:57 -04:00
Lex Berezhny
3ef83febc0 progress 2020-06-05 00:35:22 -04:00
Lex Berezhny
ffecd02fbc deleted db/search 2020-06-05 00:31:26 -04:00
Lex Berezhny
ebddb1f0f5 moved exchange rate manager 2020-06-04 18:48:44 -04:00
Lex Berezhny
9d0b9805b2 github actions install psycopg2 2020-06-04 18:17:37 -04:00
Lex Berezhny
8653839c16 no tests run on github actions, just build binaries for now 2020-06-04 17:52:10 -04:00
Lex Berezhny
29b1f93699 switch cli to use pre-generated interface 2020-06-04 17:51:01 -04:00
Lex Berezhny
97c285f22b removed redundant assert equal 2020-06-04 17:50:21 -04:00
Lex Berezhny
c12e07de11 stricter check for old windows directory structure 2020-06-04 17:49:59 -04:00
Lex Berezhny
726bae97b0 generated lbry.service.metadata 2020-06-04 17:29:34 -04:00
Lex Berezhny
5efb36ffd2 windows & mac path tests 2020-06-04 17:29:34 -04:00
Lex Berezhny
b4d6b14599 + tqdm 2020-06-04 17:29:34 -04:00
Lex Berezhny
9a40381f5a imports 2020-06-04 17:29:34 -04:00
Lex Berezhny
47f6d542c5 import fix 2020-06-04 17:29:34 -04:00
Lex Berezhny
8799caa0e4 github actions 2020-06-04 17:29:34 -04:00
Lex Berezhny
a042377a7b run unittest on multiple platforms 2020-06-03 19:43:08 -04:00
Lex Berezhny
596ed08395 EventQueuePublisher uses a buffer to reduce number of tasks created 2020-05-22 18:40:21 -04:00
Lex Berezhny
2af29b892b new cli 2020-05-20 18:05:49 -04:00
Lex Berezhny
7ffb169376 Service.run -> Daemon.run 2020-05-20 18:05:13 -04:00
Lex Berezhny
d11f4f9bed switched Client class from plain GET RPC to WebSockets 2020-05-20 18:04:18 -04:00
Lex Berezhny
f2e844c476 ignore warnings when parsing old claims 2020-05-20 18:02:47 -04:00
Lex Berezhny
83f7eab0e7 tests for directory configurability 2020-05-20 18:02:05 -04:00
Lex Berezhny
4a9f9906a0 event streams can now be "closed" and thus you can listen for .last event 2020-05-20 18:01:34 -04:00
Lex Berezhny
b7ff6569e4 added Database.temp_sqlite_regtest 2020-05-20 17:59:26 -04:00
Lex Berezhny
12915143b8 improved directory structure configurability 2020-05-20 17:57:42 -04:00
Lex Berezhny
06d93e667a event reporting from blockchain.sync 2020-05-20 17:54:38 -04:00
Lex Berezhny
b341187b14 moved necessary code to get download directory to upstream appdirs and deleted winpaths.py 2020-05-19 17:05:03 -04:00
Lex Berezhny
a996e65eff integration tests updated 2020-05-18 08:29:15 -04:00
Lex Berezhny
0886a7946e unit tests updated 2020-05-18 08:28:23 -04:00
Lex Berezhny
be6ebf0047 refactored wallet and mnemonic 2020-05-18 08:26:36 -04:00
Lex Berezhny
7c4f943bcb updated to testcase 2020-05-18 08:24:44 -04:00
Lex Berezhny
5b5c45ea76 API is now typed and includes sharable argument lists 2020-05-18 08:24:15 -04:00
Lex Berezhny
6986211c1e lbry.blockchain fixes and cleanup 2020-05-18 08:22:23 -04:00
Lex Berezhny
8ac78990d8 updated setup.py and lbry/__init__.py now includes imports to solve cyclic import issues 2020-05-18 08:20:13 -04:00
Lex Berezhny
4f879bbbae new cli 2020-05-18 08:15:53 -04:00
Lex Berezhny
e3f080a7ad extract singular tags, languages, locations from kwargs 2020-05-18 08:15:24 -04:00
Lex Berezhny
d232eeaf81 plain execute function 2020-05-18 08:14:30 -04:00
Lex Berezhny
5d6388b366 added db_url to config 2020-05-18 08:13:29 -04:00
Lex Berezhny
955e44631d new API parser 2020-05-12 11:02:34 -04:00
Lex Berezhny
8dc5150dbe moved test fixture file 2020-05-12 10:35:34 -04:00
Lex Berezhny
d488bfd9d4 refactored mnemonic.py 2020-05-09 18:33:23 -04:00
Lex Berezhny
391b95fd12 lbry.wallet.words minified removing line breaks and extra space and converted to tuples 2020-05-08 16:25:56 -04:00
Lex Berezhny
4b172e4180 updated testcase to use api instead of daemon 2020-05-08 10:34:57 -04:00
Lex Berezhny
29ef4425b0 fix import in managed_stream 2020-05-08 10:33:41 -04:00
Lex Berezhny
558b1aeadf made lbry.db.queries.add_channel_keys_to_txo_results async 2020-05-08 10:30:40 -04:00
Lex Berezhny
41ce3e4ad8 fix Output source offset calculation and made generat_channel_private_key async 2020-05-08 10:23:50 -04:00
Lex Berezhny
8c91777e5d refactored lbry.wallet 2020-05-06 10:53:31 -04:00
Lex Berezhny
db89607e4e added create_default_wallet and create_default_account conf options 2020-05-06 10:50:29 -04:00
Lex Berezhny
d476f08d13 convenience methods on ledger 2020-05-06 10:50:00 -04:00
Lex Berezhny
2a0c653c37 claim_search progress 2020-05-01 23:25:07 -04:00
Lex Berezhny
219c7cf37d removed lbry.wallet.server and lbry.wallet.orchstr8 2020-05-01 23:22:17 -04:00
Lex Berezhny
2f575a393f minor updates to stream_manager.py 2020-05-01 09:34:57 -04:00
Lex Berezhny
713c665588 unittests 2020-05-01 09:34:34 -04:00
Lex Berezhny
9554b66a37 wip lbry.service 2020-05-01 09:33:58 -04:00
Lex Berezhny
533f31cc89 wip lbry.wallet 2020-05-01 09:33:10 -04:00
Lex Berezhny
fef09c1773 wip misc 2020-05-01 09:31:33 -04:00
Lex Berezhny
6a33d86bfe wip lbry.db 2020-05-01 09:29:44 -04:00
Lex Berezhny
ccd32eae70 wip lbry.blockchain 2020-05-01 09:28:51 -04:00
Lex Berezhny
c61c9726b0 lbry/blockchain/constants.py -> lbry/constants.py 2020-04-28 13:28:30 -04:00
Lex Berezhny
fd5be69d55 lbry/wallet/server/db/{reader,fts} -> lbry/db 2020-04-28 12:26:17 -04:00
Lex Berezhny
1f72751a88 unit test __init__.py files 2020-04-25 08:58:18 -04:00
Lex Berezhny
362ab67186 more wallet unit test cleanup 2020-04-25 08:51:41 -04:00
Lex Berezhny
ffe7fcf124 refactored wallet unit tests, moved most things to blockchain 2020-04-25 08:44:36 -04:00
Lex Berezhny
1f5dbc3eb8 renamed: wallet/stream.py -> event.py 2020-04-25 08:29:25 -04:00
Lex Berezhny
4dd85a169b cleaning up wallet directory, mostly moving things to blockchain 2020-04-25 08:25:29 -04:00
Lex Berezhny
fe547f1b0e dropping custom wallet rpc implementation 2020-04-25 08:05:46 -04:00
Lex Berezhny
ba154c799e increase timeout for wallet server payment service 2020-04-12 16:14:18 -04:00
Lex Berezhny
e2ffd24d51 listen for wallet pay service payment earlier in test 2020-04-12 15:51:18 -04:00
Lex Berezhny
8545ab880b psycopg2 returns Decimal for SUM() 2020-04-12 15:30:25 -04:00
Lex Berezhny
4e85f34353 pylint 2020-04-12 12:05:03 -04:00
Lex Berezhny
777c6342f8 merged some old stashed code 2020-04-12 11:59:00 -04:00
Lex Berezhny
10c262a095 postgres 2020-04-12 11:06:05 -04:00
Lex Berezhny
74e3471bd9 setup.py 2020-04-12 09:26:59 -04:00
Lex Berezhny
4048cfb3e8
Update node.py 2020-04-11 21:20:02 -04:00
Lex Berezhny
8b7b284c0d
Update setup.py 2020-04-11 21:07:07 -04:00
Lex Berezhny
2d4d51388b
Update setup.py 2020-04-11 20:54:08 -04:00
Lex Berezhny
06b75d07dc sudo 2020-04-11 20:21:28 -04:00
Lex Berezhny
87a44fd41c syntax 2020-04-11 20:20:21 -04:00
Lex Berezhny
70693f4d1a apt-get update 2020-04-11 20:17:55 -04:00
Lex Berezhny
42224dadb6 pylint 2020-04-11 20:15:04 -04:00
Lex Berezhny
53fc94d688 pylint and sqlalchemy 1.4 2020-04-11 20:01:10 -04:00
Lex Berezhny
0b6d01fecc sqlalchemy 2020-04-11 17:31:20 -04:00
Lex Berezhny
ae9d4af8c0 postgresql support added 2020-04-11 17:27:41 -04:00
Lex Berezhny
2309d6354c schema chaining 2020-04-08 19:03:01 -04:00
Lex Berezhny
fd2f9846e9 parsing and inserting works 2020-04-08 19:03:01 -04:00
Lex Berezhny
66666e1167 script to generate blockchain test data 2020-04-08 19:03:01 -04:00
Lex Berezhny
4c0fbb84d6 reproduce issue with lbrycrd -reindex 2020-04-08 19:03:01 -04:00
Lex Berezhny
fd3448ffb8 remove plyvel and add pyzmq 2020-04-08 19:03:01 -04:00
Lex Berezhny
6d93f97b51 Transaction._deserialize now optionally accepts stream and returns self 2020-04-08 19:03:01 -04:00
Lex Berezhny
2ee13ce39f BCDataStream can now also take file pointer as argument 2020-04-08 19:03:01 -04:00
Lex Berezhny
bad4320ddf initial import of fast sync 2020-04-08 19:03:01 -04:00
Lex Berezhny
a220736dea updated docs 2020-04-08 12:57:24 -04:00
315 changed files with 35153 additions and 40488 deletions

View file

@ -1,24 +1,24 @@
name: ci
on: ["push", "pull_request", "workflow_dispatch"]
on: push
jobs:
lint:
name: lint
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v1
- uses: actions/setup-python@v1
with:
python-version: '3.9'
python-version: '3.7'
- name: extract pip cache
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- run: pip install --user --upgrade pip wheel
- run: pip install -e .[lint]
- run: |
pip install --user --upgrade pip wheel
pip install -e .[lint]
- run: make lint
tests-unit:
@ -26,49 +26,37 @@ jobs:
strategy:
matrix:
os:
- ubuntu-20.04
- ubuntu-latest
- macos-latest
- windows-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v1
- uses: actions/setup-python@v1
with:
python-version: '3.9'
python-version: '3.7'
- name: set pip cache dir
shell: bash
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
id: pip-cache
run: echo "::set-output name=dir::$(pip cache dir)"
- name: extract pip cache
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: ${{ env.PIP_CACHE_DIR }}
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- id: os-name
uses: ASzc/change-string-case-action@v5
with:
string: ${{ runner.os }}
- run: python -m pip install --user --upgrade pip wheel
- if: startsWith(runner.os, 'linux')
run: pip install -e .[test]
- if: startsWith(runner.os, 'linux')
env:
- run: |
pip install --user --upgrade pip wheel
pip install -e .[test]
- env:
HOME: /tmp
run: make test-unit-coverage
- if: startsWith(runner.os, 'linux') != true
run: pip install -e .[test]
- if: startsWith(runner.os, 'linux') != true
env:
HOME: /tmp
run: coverage run --source=lbry -m unittest tests/unit/test_conf.py
- name: submit coverage report
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_FLAG_NAME: tests-unit-${{ steps.os-name.outputs.lowercase }}
run: coverage run -m unittest discover -v tests.unit
- env:
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_PARALLEL: true
name: Submit to coveralls
run: |
pip install coveralls
coveralls --service=github
pip install https://github.com/bboe/coveralls-python/archive/github_actions.zip
coveralls
tests-integration:
name: "tests / integration"
@ -76,131 +64,134 @@ jobs:
strategy:
matrix:
test:
- datanetwork
# - datanetwork
- blockchain
- claims
- takeovers
- transactions
- other
# - other
db:
- sqlite
- postgres
services:
postgres:
image: postgres:12
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: postgres
ports:
- 5432:5432
options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5
steps:
- name: Configure sysctl limits
run: |
sudo swapoff -a
sudo sysctl -w vm.swappiness=1
sudo sysctl -w fs.file-max=262144
sudo sysctl -w vm.max_map_count=262144
- name: Runs Elasticsearch
uses: elastic/elastic-github-actions/elasticsearch@master
- uses: actions/checkout@v1
- uses: actions/setup-python@v1
with:
stack-version: 7.12.1
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
python-version: '3.7'
- if: matrix.test == 'other'
run: |
sudo apt-get update
sudo apt-get install -y --no-install-recommends ffmpeg
- name: extract pip cache
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: ./.tox
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
restore-keys: txo-integration-${{ matrix.test }}-
- run: pip install tox coverage coveralls
- if: matrix.test == 'claims'
run: rm -rf .tox
- run: tox -e ${{ matrix.test }}
- name: submit coverage report
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_FLAG_NAME: tests-integration-${{ matrix.test }}
key: tox-integration-${{ matrix.test }}-${{ matrix.db }}-${{ hashFiles('setup.py') }}
restore-keys: txo-integration-${{ matrix.test }}-${{ matrix.db }}-
- run: pip install tox
- env:
TEST_DB: ${{ matrix.db }}
run: tox -e ${{ matrix.test }}
- env:
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_PARALLEL: true
name: Submit to coveralls
run: |
pip install https://github.com/bboe/coveralls-python/archive/github_actions.zip
coverage combine tests
coveralls --service=github
coveralls
coverage:
coveralls-finished:
needs: ["tests-unit", "tests-integration"]
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- name: finalize coverage report submission
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
pip install coveralls
coveralls --service=github --finish
- name: Coveralls Finished
uses: coverallsapp/github-action@57daa114
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
parallel-finished: true
build:
needs: ["lint", "tests-unit", "tests-integration"]
name: "build / binary"
name: "build"
strategy:
matrix:
os:
- ubuntu-20.04
- ubuntu-16.04
- macos-latest
- windows-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v1
- uses: actions/setup-python@v1
with:
python-version: '3.9'
- id: os-name
uses: ASzc/change-string-case-action@v5
with:
string: ${{ runner.os }}
python-version: '3.7'
- name: set pip cache dir
shell: bash
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
id: pip-cache
run: echo "::set-output name=dir::$(pip cache dir)"
- name: extract pip cache
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: ${{ env.PIP_CACHE_DIR }}
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- run: pip install pyinstaller==4.6
- run: pip install -e .
- if: startsWith(github.ref, 'refs/tags/v')
run: python docker/set_build.py
- name: Setup
run: |
pip install --user --upgrade pip wheel
pip install sqlalchemy@git+https://github.com/eukreign/pyinstaller.git@sqlalchemy
- if: startsWith(runner.os, 'linux')
run: |
sudo apt-get install libzmq3-dev
pip install -e .[postgres]
- if: startsWith(runner.os, 'mac')
run: |
brew install zeromq
pip install -e .
- if: startsWith(runner.os, 'linux') || startsWith(runner.os, 'mac')
name: Build & Run (Unix)
run: |
pyinstaller --onefile --name lbrynet lbry/extras/cli.py
pyinstaller --onefile --name lbrynet lbry/cli.py
chmod +x dist/lbrynet
dist/lbrynet --version
- if: startsWith(runner.os, 'windows')
name: Build & Run (Windows)
run: |
pip install pywin32==301
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
pip install pywin32
pip install -e .
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/cli.py
dist/lbrynet.exe --version
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v2
with:
name: lbrynet-${{ steps.os-name.outputs.lowercase }}
name: lbrynet-${{ matrix.os }}
path: dist/
release:
name: "release"
if: startsWith(github.ref, 'refs/tags/v')
docker:
needs: ["build"]
runs-on: ubuntu-20.04
name: "build (docker)"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/download-artifact@v2
- name: upload binaries
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_API_TOKEN }}
run: |
pip install githubrelease
chmod +x lbrynet-macos/lbrynet
chmod +x lbrynet-linux/lbrynet
zip --junk-paths lbrynet-mac.zip lbrynet-macos/lbrynet
zip --junk-paths lbrynet-linux.zip lbrynet-linux/lbrynet
zip --junk-paths lbrynet-windows.zip lbrynet-windows/lbrynet.exe
ls -lh
githubrelease release lbryio/lbry-sdk info ${GITHUB_REF#refs/tags/}
githubrelease asset lbryio/lbry-sdk upload ${GITHUB_REF#refs/tags/} \
lbrynet-mac.zip lbrynet-linux.zip lbrynet-windows.zip
githubrelease release lbryio/lbry-sdk publish ${GITHUB_REF#refs/tags/}
- name: fetch lbrynet binary
uses: actions/download-artifact@v2
with:
name: lbrynet-ubuntu-16.04
- run: |
chmod +x lbrynet
mv lbrynet docker
- name: build and push docker image
uses: docker/build-push-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
repository: lbry/lbrynet
path: docker
tag_with_ref: true
tag_with_sha: true
add_git_labels: true

View file

@ -1,22 +0,0 @@
name: slack
on:
release:
types: [published]
jobs:
release:
name: "slack notification"
runs-on: ubuntu-20.04
steps:
- uses: LoveToKnow/slackify-markdown-action@v1.0.0
id: markdown
with:
text: "There is a new SDK release: ${{github.event.release.html_url}}\n${{ github.event.release.body }}"
- uses: slackapi/slack-github-action@v1.14.0
env:
CHANGELOG: '<!channel> ${{ steps.markdown.outputs.text }}'
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_RELEASE_BOT_WEBHOOK }}
with:
payload: '{"type": "mrkdwn", "text": ${{ toJSON(env.CHANGELOG) }} }'

8
.gitignore vendored
View file

@ -6,17 +6,13 @@
/.coverage*
/lbry-venv
/venv
/lbry/blockchain
lbry.egg-info
__pycache__
_trial_temp/
trending*.log
/tests/integration/claims/files
/tests/integration/blockchain/files
/tests/.coverage.*
/lbry/blockchain/bin
/lbry/wallet/bin
/.vscode
/.gitignore

210
.gitlab-ci.yml Normal file
View file

@ -0,0 +1,210 @@
default:
image: python:3.7
#cache:
# directories:
# - $HOME/venv
# - $HOME/.cache/pip
# - $HOME/Library/Caches/pip
# - $HOME/Library/Caches/Homebrew
# - $TRAVIS_BUILD_DIR/.tox
stages:
- test
- build
- assets
- release
.tagged:
rules:
- if: '$CI_COMMIT_TAG =~ /^v[0-9\.]+$/'
when: on_success
test:lint:
stage: test
script:
- make install tools
- make lint
test:unit:
stage: test
script:
- make install tools
- HOME=/tmp coverage run -p --source=lbry -m unittest discover -vv tests.unit
test:datanetwork-integration:
stage: test
script:
- pip install tox-travis
- tox -e datanetwork
test:blockchain-integration:
stage: test
script:
- pip install tox-travis
- tox -e blockchain
test:other-integration:
stage: test
script:
- apt-get update
- apt-get install -y --no-install-recommends ffmpeg
- pip install tox-travis
- tox -e other
test:json-api:
stage: test
script:
- make install tools
- HOME=/tmp coverage run -p --source=lbry scripts/generate_json_api.py
.build:
stage: build
artifacts:
expire_in: 1 day
paths:
- lbrynet-${OS}.zip
script:
- pip install --upgrade 'setuptools<45.0.0'
- pip install pyinstaller
- pip install -e .
- python3.7 docker/set_build.py # must come after lbry is installed because it imports lbry
- pyinstaller --onefile --name lbrynet lbry/extras/cli.py
- chmod +x dist/lbrynet
- zip --junk-paths ${CI_PROJECT_DIR}/lbrynet-${OS}.zip dist/lbrynet # gitlab expects artifacts to be in $CI_PROJECT_DIR
- openssl dgst -sha256 ${CI_PROJECT_DIR}/lbrynet-${OS}.zip | egrep -o [0-9a-f]+$ # get sha256 of asset. works on mac and ubuntu
- dist/lbrynet --version
build:linux:
extends: .build
image: ubuntu:16.04
variables:
OS: linux
before_script:
- apt-get update
- apt-get install -y --no-install-recommends software-properties-common zip curl build-essential
- add-apt-repository -y ppa:deadsnakes/ppa
- apt-get update
- apt-get install -y --no-install-recommends python3.7-dev
- python3.7 <(curl -q https://bootstrap.pypa.io/get-pip.py) # make sure we get pip with python3.7
build:mac:
extends: .build
tags: [macos] # makes gitlab use the mac runner
variables:
OS: mac
GIT_DEPTH: 5
VENV: /tmp/gitlab-lbry-sdk-venv
before_script:
# - brew upgrade python || true
- python3 --version | grep -q '^Python 3\.7\.' # dont upgrade python on every run. just make sure we're on the right Python
# - pip3 install --user --upgrade pip virtualenv
- pip3 --version | grep -q '\(python 3\.7\)'
- virtualenv --python=python3.7 "${VENV}"
- source "${VENV}/bin/activate"
after_script:
- rm -rf "${VENV}"
build:windows:
extends: .build
tags: [windows] # makes gitlab use the windows runner
variables:
OS: windows
GIT_DEPTH: 5
before_script:
- ./docker/install_choco.ps1
- choco install -y --x86 python3 7zip checksum
- python --version # | findstr /B "Python 3\.7\." # dont upgrade python on every run. just make sure we're on the right Python
- pip --version # | findstr /E '\(python 3\.7\)'
- pip install virtualenv pywin32
- virtualenv venv
- venv/Scripts/activate.ps1
- pip install pip==19.3.1; $true # $true ignores errors. need this to get the correct coincurve wheel. see commit notes for details.
after_script:
- rmdir -Recurse venv
script:
- pip install --upgrade 'setuptools<45.0.0'
- pip install pyinstaller==3.5
- pip install -e .
- python docker/set_build.py # must come after lbry is installed because it imports lbry
- pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico -F -n lbrynet lbry/extras/cli.py
- 7z a -tzip $env:CI_PROJECT_DIR/lbrynet-${OS}.zip ./dist/lbrynet.exe
- checksum --type=sha256 --file=$env:CI_PROJECT_DIR/lbrynet-${OS}.zip
- dist/lbrynet.exe --version
# s3 = upload asset to s3 (build.lbry.io)
.s3:
stage: assets
variables:
GIT_STRATEGY: none
script:
- "[ -f lbrynet-${OS}.zip ]" # check that asset exists before trying to upload
- pip install awscli
- S3_PATH="daemon/gitlab-build-${CI_PIPELINE_ID}_commit-${CI_COMMIT_SHA:0:7}$( if [ ! -z ${CI_COMMIT_TAG} ]; then echo _tag-${CI_COMMIT_TAG}; else echo _branch-${CI_COMMIT_REF_NAME}; fi )"
- AWS_ACCESS_KEY_ID=${ARTIFACTS_KEY} AWS_SECRET_ACCESS_KEY=${ARTIFACTS_SECRET} AWS_REGION=${ARTIFACTS_REGION}
aws s3 cp lbrynet-${OS}.zip s3://${ARTIFACTS_BUCKET}/${S3_PATH}/lbrynet-${OS}.zip
s3:linux:
extends: .s3
variables: {OS: linux}
needs: ["build:linux"]
s3:mac:
extends: .s3
variables: {OS: mac}
needs: ["build:mac"]
s3:windows:
extends: .s3
variables: {OS: windows}
needs: ["build:windows"]
# github = upload assets to github when there's a tagged release
.github:
extends: .tagged
stage: assets
variables:
GIT_STRATEGY: none
script:
- "[ -f lbrynet-${OS}.zip ]" # check that asset exists before trying to upload. githubrelease won't error if its missing
- pip install githubrelease
- githubrelease --no-progress --github-token ${GITHUB_CI_USER_ACCESS_TOKEN} asset lbryio/lbry-sdk upload ${CI_COMMIT_TAG} lbrynet-${OS}.zip
github:linux:
extends: .github
variables: {OS: linux}
needs: ["build:linux"]
github:mac:
extends: .github
variables: {OS: mac}
needs: ["build:mac"]
github:windows:
extends: .github
variables: {OS: windows}
needs: ["build:windows"]
publish:
extends: .tagged
stage: release
variables:
GIT_STRATEGY: none
script:
- pip install githubrelease
- githubrelease --no-progress --github-token ${GITHUB_CI_USER_ACCESS_TOKEN} release lbryio/lbry-sdk publish ${CI_COMMIT_TAG}
- >
curl -X POST -H 'Content-type: application/json' --data '{"text":"<!channel> There is a new SDK release: https://github.com/lbryio/lbry-sdk/releases/tag/'"${CI_COMMIT_TAG}"'\n'"$(curl -s "https://api.github.com/repos/lbryio/lbry-sdk/releases/tags/${CI_COMMIT_TAG}" | egrep '\w*\"body\":' | cut -d':' -f 2- | tail -c +3 | head -c -2)"'", "channel":"tech"}' "$(echo ${SLACK_WEBHOOK_URL_BASE64} | base64 -d)"

View file

@ -9,29 +9,20 @@ Here's a video walkthrough of this setup, which is itself hosted by the LBRY net
## Prerequisites
Running `lbrynet` from source requires Python 3.7. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
Running `lbrynet` from source requires Python 3.7 or higher. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
After installing Python 3.7, you'll need to install some additional libraries depending on your operating system.
Because of [issue #2769](https://github.com/lbryio/lbry-sdk/issues/2769)
at the moment the `lbrynet` daemon will only work correctly with Python 3.7.
If Python 3.8+ is used, the daemon will start but the RPC server
may not accept messages, returning the following:
```
Could not connect to daemon. Are you sure it's running?
```
After installing python 3, you'll need to install some additional libraries depending on your operating system.
### macOS
macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/).
These environment variables also need to be set:
```
PYTHONUNBUFFERED=1
EVENT_NOKQUEUE=1
```
1. PYTHONUNBUFFERED=1
2. EVENT_NOKQUEUE=1
Remaining dependencies can then be installed by running:
```
brew install python protobuf
```
@ -40,17 +31,14 @@ Assistance installing Python3: https://docs.python-guide.org/starting/install3/o
### Linux
On Ubuntu (we recommend 18.04 or 20.04), install the following:
On Ubuntu (16.04 minimum, we recommend 18.04), install the following:
```
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt-get update
sudo apt-get install build-essential python3.7 python3.7-dev git python3.7-venv libssl-dev python-protobuf
```
The [deadsnakes PPA](https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa) provides Python 3.7
for those Ubuntu distributions that no longer have it in their
official repositories.
On Raspbian, you will also need to install `python-pyparsing`.
If you're running another Linux distro, install the equivalent of the above packages for your system.
@ -59,119 +47,62 @@ If you're running another Linux distro, install the equivalent of the above pack
### Linux/Mac
Clone the repository:
```bash
git clone https://github.com/lbryio/lbry-sdk.git
cd lbry-sdk
```
To install on Linux/Mac:
Create a Python virtual environment for lbry-sdk:
```bash
python3.7 -m venv lbry-venv
```
```
Clone the repository:
$ git clone https://github.com/lbryio/lbry-sdk.git
$ cd lbry-sdk
Activate virtual environment:
```bash
source lbry-venv/bin/activate
```
Create a Python virtual environment for lbry-sdk:
$ python3.7 -m venv lbry-venv
Activating lbry-sdk virtual environment:
$ source lbry-venv/bin/activate
Make sure you're on Python 3.7+ (as the default Python in virtual environment):
$ python --version
Make sure you're on Python 3.7+ as default in the virtual environment:
```bash
python --version
```
Install packages:
$ make install
Install packages:
```bash
make install
```
If you are on Linux and using PyCharm, generates initial configs:
$ make idea
```
If you are on Linux and using PyCharm, generates initial configs:
```bash
make idea
```
To verify your installation, `which lbrynet` should return a path inside
of the `lbry-venv` folder.
```bash
(lbry-venv) $ which lbrynet
/opt/lbry-sdk/lbry-venv/bin/lbrynet
```
To exit the virtual environment simply use the command `deactivate`.
To verify your installation, `which lbrynet` should return a path inside of the `lbry-venv` folder created by the `python3.7 -m venv lbry-venv` command.
### Windows
Clone the repository:
```bash
git clone https://github.com/lbryio/lbry-sdk.git
cd lbry-sdk
```
To install on Windows:
Create a Python virtual environment for lbry-sdk:
```bash
python -m venv lbry-venv
```
```
Clone the repository:
> git clone https://github.com/lbryio/lbry-sdk.git
> cd lbry-sdk
Activate virtual environment:
```bash
lbry-venv\Scripts\activate
```
Create a Python virtual environment for lbry-sdk:
> python -m venv lbry-venv
Install packages:
```bash
pip install -e .
```
Activating lbry-sdk virtual environment:
> lbry-venv\Scripts\activate
Install packages:
> pip install -e .
```
## Run the tests
### Elasticsearch
For running integration tests, Elasticsearch is required to be available at localhost:9200/
To run the unit tests from the repo directory:
The easiest way to start it is using docker with:
```bash
make elastic-docker
```
Alternative installation methods are available [at Elasticsearch website](https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html).
To run the unit and integration tests from the repo directory:
```
python -m unittest discover tests.unit
python -m unittest discover tests.integration
```
```
python -m unittest discover tests.unit
```
## Usage
To start the API server:
```
lbrynet start
```
`lbrynet start`
Whenever the code inside [lbry-sdk/lbry](./lbry)
is modified we should run `make install` to recompile the `lbrynet`
executable with the newest code.
## Development
When developing, remember to enter the environment,
and if you wish start the server interactively.
```bash
$ source lbry-venv/bin/activate
(lbry-venv) $ python lbry/extras/cli.py start
```
Parameters can be passed in the same way.
```bash
(lbry-venv) $ python lbry/extras/cli.py wallet balance
```
If a Python debugger (`pdb` or `ipdb`) is installed we can also start it
in this way, set up break points, and step through the code.
```bash
(lbry-venv) $ pip install ipdb
(lbry-venv) $ ipdb lbry/extras/cli.py
```
Happy hacking!

View file

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2015-2022 LBRY Inc
Copyright (c) 2015-2020 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,

View file

@ -1,26 +1,20 @@
.PHONY: install tools lint test test-unit test-unit-coverage test-integration idea
install:
pip install -e .
.PHONY: tools lint test idea
lint:
pylint --rcfile=setup.cfg lbry
#mypy --ignore-missing-imports lbry
test: test-unit test-integration
test-unit:
python -m unittest discover tests.unit
test-unit-coverage:
coverage run --source=lbry -m unittest discover -vv tests.unit
test-integration:
test:
tox
idea:
mkdir -p .idea
cp -r scripts/idea/* .idea
elastic-docker:
docker run -d -v lbryhub:/usr/share/elasticsearch/data -p 9200:9200 -p 9300:9300 -e"ES_JAVA_OPTS=-Xms512m -Xmx512m" -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.12.1
start:
dropdb lbry
createdb lbry
lbrynet start --full-node \
--db-url=postgresql:///lbry --workers=28 --console=advanced --no-spv-address-filters \
--lbrycrd-rpc-user=lbry --lbrycrd-rpc-pass=somethingelse \
--lbrycrd-dir=${HOME}/.lbrycrd --data-dir=/tmp/tmp-lbrynet

View file

@ -1,10 +1,10 @@
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![build](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml/badge.svg)](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml) [![coverage](https://coveralls.io/repos/github/lbryio/lbry-sdk/badge.svg)](https://coveralls.io/github/lbryio/lbry-sdk)
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![Gitlab CI Badge](https://ci.lbry.tech/lbry/lbry-sdk/badges/master/pipeline.svg)](https://ci.lbry.tech/lbry/lbry-sdk)
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
LBRY SDK for Python is currently the most fully featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components include:
* Built on Python 3.7 and `asyncio`.
* Built on Python 3.7+ and `asyncio`.
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/dht)).
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/blob_exchange)).
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/schema)).
@ -41,7 +41,7 @@ This project is MIT licensed. For the full license, see [LICENSE](LICENSE).
## Security
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our GPG key is here](https://lbry.com/faq/gpg-key) if you need it.
## Contact

View file

@ -1,9 +0,0 @@
# Security Policy
## Supported Versions
While we are not at v1.0 yet, only the latest release will be supported.
## Reporting a Vulnerability
See https://lbry.com/faq/security

5
docker/Dockerfile Normal file
View file

@ -0,0 +1,5 @@
FROM ubuntu:20.04
COPY lbrynet /bin
RUN lbrynet --version
ENTRYPOINT ["lbrynet"]
CMD ["start", "--full-node"]

View file

@ -1,43 +0,0 @@
FROM debian:10-slim
ARG user=lbry
ARG projects_dir=/home/$user
ARG db_dir=/database
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
automake libtool \
tar unzip \
build-essential \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
RUN mkdir -p $db_dir
RUN chown -R $user:$user $db_dir
USER $user
WORKDIR $projects_dir
RUN python3 -m pip install -U setuptools pip
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
VOLUME $db_dir
ENTRYPOINT ["python3", "scripts/dht_node.py"]

View file

@ -0,0 +1,8 @@
FROM ubuntu:20.04
RUN apt-get update && \
apt-get install -y wget unzip && \
wget -nv https://build.lbry.io/lbrycrd/block_info_fix_try2/lbrycrd-linux.zip && \
unzip -d /bin lbrycrd-linux.zip && \
rm -rf lbrycrd-linux.zip /var/lib/apt/lists/*
RUN lbrycrdd --version
ENTRYPOINT ["lbrycrdd"]

View file

@ -1,56 +0,0 @@
FROM debian:10-slim
ARG user=lbry
ARG db_dir=/database
ARG projects_dir=/home/$user
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
tar unzip \
build-essential \
automake libtool \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-cffi \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
RUN mkdir -p $db_dir
RUN chown -R $user:$user $db_dir
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
USER $user
WORKDIR $projects_dir
RUN pip install uvloop
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
# entry point
ARG host=0.0.0.0
ARG tcp_port=50001
ARG daemon_url=http://lbry:lbry@localhost:9245/
VOLUME $db_dir
ENV TCP_PORT=$tcp_port
ENV HOST=$host
ENV DAEMON_URL=$daemon_url
ENV DB_DIRECTORY=$db_dir
ENV MAX_SESSIONS=1000000000
ENV MAX_SEND=1000000000000000000
ENV EVENT_LOOP_POLICY=uvloop
COPY ./docker/wallet_server_entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

View file

@ -1,45 +0,0 @@
FROM debian:10-slim
ARG user=lbry
ARG downloads_dir=/database
ARG projects_dir=/home/$user
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
automake libtool \
tar unzip \
build-essential \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
RUN mkdir -p $downloads_dir
RUN chown -R $user:$user $downloads_dir
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
USER $user
WORKDIR $projects_dir
RUN pip install uvloop
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
# entry point
VOLUME $downloads_dir
COPY ./docker/webconf.yaml /webconf.yaml
ENTRYPOINT ["/home/lbry/.local/bin/lbrynet", "start", "--config=/webconf.yaml"]

View file

@ -1,9 +0,0 @@
### How to run with docker-compose
1. Edit config file and after that fix permissions with
```
sudo chown -R 999:999 webconf.yaml
```
2. Start SDK with
```
docker-compose up -d
```

View file

@ -1,49 +0,0 @@
version: "3"
volumes:
wallet_server:
es01:
services:
wallet_server:
depends_on:
- es01
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
restart: always
network_mode: host
ports:
- "50001:50001" # rpc port
- "2112:2112" # uncomment to enable prometheus
volumes:
- "wallet_server:/database"
environment:
- DAEMON_URL=http://lbry:lbry@127.0.0.1:9245
- MAX_QUERY_WORKERS=4
- CACHE_MB=1024
- CACHE_ALL_TX_HASHES=
- CACHE_ALL_CLAIM_TXOS=
- MAX_SEND=1000000000000000000
- MAX_RECEIVE=1000000000000000000
- MAX_SESSIONS=100000
- HOST=0.0.0.0
- TCP_PORT=50001
- PROMETHEUS_PORT=2112
- FILTERING_CHANNEL_IDS=770bd7ecba84fd2f7607fb15aedd2b172c2e153f 95e5db68a3101df19763f3a5182e4b12ba393ee8
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
container_name: es01
environment:
- node.name=es01
- discovery.type=single-node
- indices.query.bool.max_clause_count=8192
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms4g -Xmx4g" # no more than 32, remember to disable swap
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- es01:/usr/share/elasticsearch/data
ports:
- 127.0.0.1:9200:9200

View file

@ -1,9 +1,41 @@
version: '3'
version: "3.8"
volumes:
lbrycrd-data:
services:
websdk:
image: vshyba/websdk
ports:
- '5279:5279'
- '5280:5280'
volumes:
- ./webconf.yaml:/webconf.yaml
postgres:
image: postgres:12
environment:
POSTGRES_USER: lbry
POSTGRES_PASSWORD: lbry
lbrycrd:
image: lbry/lbrycrd
build:
context: .
dockerfile: Dockerfile.lbrycrd
volumes:
- lbrycrd-data:/root/.lbrycrd
command: >
-rpcbind=lbrycrd
-rpcallowip=0.0.0.0/0
-rpcuser=lbryuser
-rpcpassword=lbrypass
-zmqpubhashblock=tcp://lbrycrd:29000
lbrynet:
image: lbry/lbrynet:fast_wallet_server_sync
depends_on:
- postgres
- lbrycrd
volumes:
- lbrycrd-data:/lbrycrd
command: >
start
--full-node
--api=0.0.0.0:5279
--db-url=postgresql://lbry:lbry@postgres:5432/lbry
--workers=12
--console=basic
--no-spv-address-filters
--lbrycrd-rpc-host=lbrycrd
--lbrycrd-rpc-user=lbryuser
--lbrycrd-rpc-pass=lbrypass
--lbrycrd-dir=/lbrycrd

View file

@ -1,7 +0,0 @@
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd "$DIR/../.." ## make sure we're in the right place. Docker Hub screws this up sometimes
echo "docker build dir: $(pwd)"
docker build --build-arg DOCKER_TAG=$DOCKER_TAG --build-arg DOCKER_COMMIT=$SOURCE_COMMIT -f $DOCKERFILE_PATH -t $IMAGE_NAME .

View file

@ -1,11 +0,0 @@
# requires powershell and .NET 4+. see https://chocolatey.org/install for more info.
$chocoVersion = powershell choco -v
if(-not($chocoVersion)){
Write-Output "Chocolatey is not installed, installing now"
Write-Output "IF YOU KEEP GETTING THIS MESSAGE ON EVERY BUILD, TRY RESTARTING THE GITLAB RUNNER SO IT GETS CHOCO INTO IT'S ENV"
Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
}
else{
Write-Output "Chocolatey version $chocoVersion is already installed"
}

View file

@ -20,7 +20,7 @@ def _check_and_set(d: dict, key: str, value: str):
def main():
build_info = {item: build_info_mod.__dict__[item] for item in dir(build_info_mod) if not item.startswith("__")}
commit_hash = os.getenv('DOCKER_COMMIT', os.getenv('GITHUB_SHA'))
commit_hash = os.getenv('DOCKER_COMMIT', os.getenv('CI_COMMIT_SHA', os.getenv('TRAVIS_COMMIT')))
if commit_hash is None:
raise ValueError("Commit hash not found in env vars")
_check_and_set(build_info, "COMMIT_HASH", commit_hash[:6])
@ -30,10 +30,8 @@ def main():
_check_and_set(build_info, "DOCKER_TAG", docker_tag)
_check_and_set(build_info, "BUILD", "docker")
else:
if re.match(r'refs/tags/v\d+\.\d+\.\d+$', str(os.getenv('GITHUB_REF'))):
_check_and_set(build_info, "BUILD", "release")
else:
_check_and_set(build_info, "BUILD", "qa")
ci_tag = os.getenv('CI_COMMIT_TAG', os.getenv('TRAVIS_TAG'))
_check_and_set(build_info, "BUILD", "release" if re.match(r'v\d+\.\d+\.\d+$', str(ci_tag)) else "qa")
log.debug("build info: %s", ", ".join([f"{k}={v}" for k, v in build_info.items()]))
with open(build_info_mod.__file__, 'w') as f:

View file

@ -1,25 +0,0 @@
#!/bin/bash
# entrypoint for wallet server Docker image
set -euo pipefail
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
files="$(ls)"
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
echo "Extracting snapshot..."
filename="$(grep -vf <(echo "$files") <(ls))" # finds the file that was not there before
case "$filename" in
*.tgz|*.tar.gz|*.tar.bz2 ) tar xvf "$filename" --directory /database ;;
*.zip ) unzip "$filename" -d /database ;;
* ) echo "Don't know how to extract ${filename}. SNAPSHOT COULD NOT BE LOADED" && exit 1 ;;
esac
rm "$filename"
fi
/home/lbry/.local/bin/lbry-hub-elastic-sync
echo 'starting server'
/home/lbry/.local/bin/lbry-hub "$@"

View file

@ -1,9 +0,0 @@
allowed_origin: "*"
max_key_fee: "0.0 USD"
save_files: false
save_blobs: false
streaming_server: "0.0.0.0:5280"
api: "0.0.0.0:5279"
data_dir: /tmp
download_dir: /tmp
wallet_dir: /tmp

File diff suppressed because one or more lines are too long

View file

@ -1,7 +0,0 @@
.git
.tox
__pycache__
dist
lbry.egg-info
docs
tests

View file

@ -1,2 +1,8 @@
__version__ = "0.113.0"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
__version__ = "1.0.0"
from lbry.wallet import Account, Wallet, WalletManager
from lbry.blockchain import Ledger, RegTestLedger, TestNetLedger
from lbry.blockchain import Transaction, Output, Input
from lbry.blockchain import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc
from lbry.service import API, Daemon, FullNode, LightClient
from lbry.db.database import Database
from lbry.conf import Config

View file

@ -1,6 +1,5 @@
import os
import re
import time
import asyncio
import binascii
import logging
@ -71,27 +70,21 @@ class AbstractBlob:
'writers',
'verified',
'writing',
'readers',
'added_on',
'is_mine',
'readers'
]
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False,
):
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None):
self.loop = loop
self.blob_hash = blob_hash
self.length = length
self.blob_completed_callback = blob_completed_callback
self.blob_directory = blob_directory
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
self.verified: asyncio.Event = asyncio.Event()
self.writing: asyncio.Event = asyncio.Event()
self.verified: asyncio.Event = asyncio.Event(loop=self.loop)
self.writing: asyncio.Event = asyncio.Event(loop=self.loop)
self.readers: typing.List[typing.BinaryIO] = []
self.added_on = added_on or time.time()
self.is_mine = is_mine
if not is_valid_blobhash(blob_hash):
raise InvalidBlobHashError(blob_hash)
@ -117,7 +110,7 @@ class AbstractBlob:
if reader in self.readers:
self.readers.remove(reader)
def _write_blob(self, blob_bytes: bytes) -> asyncio.Task:
def _write_blob(self, blob_bytes: bytes):
raise NotImplementedError()
def set_length(self, length) -> None:
@ -187,42 +180,35 @@ class AbstractBlob:
@classmethod
async def create_from_unencrypted(
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
) -> BlobInfo:
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None) -> BlobInfo:
"""
Create an encrypted BlobFile from plaintext bytes
"""
blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted)
length = len(blob_bytes)
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir, added_on, is_mine)
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir)
writer = blob.get_blob_writer()
writer.write(blob_bytes)
await blob.verified.wait()
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), added_on, blob_hash, is_mine)
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash)
def save_verified_blob(self, verified_bytes: bytes):
if self.verified.is_set():
return
def update_events(_):
self.verified.set()
self.writing.clear()
if self.is_writeable():
self.writing.set()
task = self._write_blob(verified_bytes)
task.add_done_callback(update_events)
self._write_blob(verified_bytes)
self.verified.set()
if self.blob_completed_callback:
task.add_done_callback(lambda _: self.blob_completed_callback(self))
self.blob_completed_callback(self)
def get_blob_writer(self, peer_address: typing.Optional[str] = None,
peer_port: typing.Optional[int] = None) -> HashBlobWriter:
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
fut = asyncio.Future()
fut = asyncio.Future(loop=self.loop)
writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
self.writers[(peer_address, peer_port)] = writer
@ -256,13 +242,11 @@ class BlobBuffer(AbstractBlob):
"""
An in-memory only blob
"""
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
):
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None):
self._verified_bytes: typing.Optional[BytesIO] = None
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
@contextlib.contextmanager
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
@ -277,11 +261,9 @@ class BlobBuffer(AbstractBlob):
self.verified.clear()
def _write_blob(self, blob_bytes: bytes):
async def write():
if self._verified_bytes:
raise OSError("already have bytes for blob")
self._verified_bytes = BytesIO(blob_bytes)
return self.loop.create_task(write())
if self._verified_bytes:
raise OSError("already have bytes for blob")
self._verified_bytes = BytesIO(blob_bytes)
def delete(self):
if self._verified_bytes:
@ -299,12 +281,10 @@ class BlobFile(AbstractBlob):
"""
A blob existing on the local file system
"""
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
):
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None):
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
if not blob_directory or not os.path.isdir(blob_directory):
raise OSError(f"invalid blob directory '{blob_directory}'")
self.file_path = os.path.join(self.blob_directory, self.blob_hash)
@ -339,28 +319,22 @@ class BlobFile(AbstractBlob):
handle.close()
def _write_blob(self, blob_bytes: bytes):
def _write_blob():
with open(self.file_path, 'wb') as f:
f.write(blob_bytes)
async def write_blob():
await self.loop.run_in_executor(None, _write_blob)
return self.loop.create_task(write_blob())
with open(self.file_path, 'wb') as f:
f.write(blob_bytes)
def delete(self):
super().delete()
if os.path.isfile(self.file_path):
os.remove(self.file_path)
return super().delete()
@classmethod
async def create_from_unencrypted(
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int, added_on: float, is_mine: bool,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None
) -> BlobInfo:
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'],
asyncio.Task]] = None) -> BlobInfo:
if not blob_dir or not os.path.isdir(blob_dir):
raise OSError(f"cannot create blob in directory: '{blob_dir}'")
return await super().create_from_unencrypted(
loop, blob_dir, key, iv, unencrypted, blob_num, added_on, is_mine, blob_completed_callback
loop, blob_dir, key, iv, unencrypted, blob_num, blob_completed_callback
)

View file

@ -7,19 +7,13 @@ class BlobInfo:
'blob_num',
'length',
'iv',
'added_on',
'is_mine'
]
def __init__(
self, blob_num: int, length: int, iv: str, added_on,
blob_hash: typing.Optional[str] = None, is_mine=False):
def __init__(self, blob_num: int, length: int, iv: str, blob_hash: typing.Optional[str] = None):
self.blob_hash = blob_hash
self.blob_num = blob_num
self.length = length
self.iv = iv
self.added_on = added_on
self.is_mine = is_mine
def as_dict(self) -> typing.Dict:
d = {

View file

@ -2,7 +2,7 @@ import os
import typing
import asyncio
import logging
from lbry.utils import LRUCacheWithMetrics
from lbry.utils import LRUCache
from lbry.blob.blob_file import is_valid_blobhash, BlobFile, BlobBuffer, AbstractBlob
from lbry.stream.descriptor import StreamDescriptor
from lbry.connection_manager import ConnectionManager
@ -10,7 +10,11 @@ from lbry.connection_manager import ConnectionManager
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.dht.protocol.data_store import DictDataStore
from lbry.extras.daemon.storage import SQLiteStorage
class SQLiteStorage:
pass
log = logging.getLogger(__name__)
@ -32,34 +36,34 @@ class BlobManager:
else self._node_data_store.completed_blobs
self.blobs: typing.Dict[str, AbstractBlob] = {}
self.config = config
self.decrypted_blob_lru_cache = None if not self.config.blob_lru_cache_size else LRUCacheWithMetrics(
self.decrypted_blob_lru_cache = None if not self.config.blob_lru_cache_size else LRUCache(
self.config.blob_lru_cache_size)
self.connection_manager = ConnectionManager(loop)
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None, is_mine: bool = False):
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None):
if self.config.save_blobs or (
is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))):
return BlobFile(
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
)
return BlobBuffer(
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
)
def get_blob(self, blob_hash, length: typing.Optional[int] = None, is_mine: bool = False):
def get_blob(self, blob_hash, length: typing.Optional[int] = None):
if blob_hash in self.blobs:
if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer):
buffer = self.blobs.pop(blob_hash)
if blob_hash in self.completed_blob_hashes:
self.completed_blob_hashes.remove(blob_hash)
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
if buffer.is_readable():
with buffer.reader_context() as reader:
self.blobs[blob_hash].write_blob(reader.read())
if length and self.blobs[blob_hash].length is None:
self.blobs[blob_hash].set_length(length)
else:
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
return self.blobs[blob_hash]
def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool:
@ -83,8 +87,6 @@ class BlobManager:
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
if to_add:
self.completed_blob_hashes.update(to_add)
# check blobs that aren't set as finished but were seen on disk
await self.ensure_completed_blobs_status(in_blobfiles_dir - to_add)
if self.config.track_bandwidth:
self.connection_manager.start()
return True
@ -107,26 +109,13 @@ class BlobManager:
if isinstance(blob, BlobFile):
if blob.blob_hash not in self.completed_blob_hashes:
self.completed_blob_hashes.add(blob.blob_hash)
return self.loop.create_task(self.storage.add_blobs(
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=True)
)
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=True))
else:
return self.loop.create_task(self.storage.add_blobs(
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False)
)
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=False))
async def ensure_completed_blobs_status(self, blob_hashes: typing.Iterable[str]):
"""Ensures that completed blobs from a given list of blob hashes are set as 'finished' in the database."""
to_add = []
for blob_hash in blob_hashes:
if not self.is_blob_verified(blob_hash):
continue
blob = self.get_blob(blob_hash)
to_add.append((blob.blob_hash, blob.length, blob.added_on, blob.is_mine))
if len(to_add) > 500:
await self.storage.add_blobs(*to_add, finished=True)
to_add.clear()
return await self.storage.add_blobs(*to_add, finished=True)
def check_completed_blobs(self, blob_hashes: typing.List[str]) -> typing.List[str]:
"""Returns of the blobhashes_to_check, which are valid"""
return [blob_hash for blob_hash in blob_hashes if self.is_blob_verified(blob_hash)]
def delete_blob(self, blob_hash: str):
if not is_valid_blobhash(blob_hash):

View file

@ -1,77 +0,0 @@
import asyncio
import logging
log = logging.getLogger(__name__)
class DiskSpaceManager:
def __init__(self, config, db, blob_manager, cleaning_interval=30 * 60, analytics=None):
self.config = config
self.db = db
self.blob_manager = blob_manager
self.cleaning_interval = cleaning_interval
self.running = False
self.task = None
self.analytics = analytics
self._used_space_bytes = None
async def get_free_space_mb(self, is_network_blob=False):
limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
space_used_mb = await self.get_space_used_mb()
space_used_mb = space_used_mb['network_storage'] if is_network_blob else space_used_mb['content_storage']
return max(0, limit_mb - space_used_mb)
async def get_space_used_bytes(self):
self._used_space_bytes = await self.db.get_stored_blob_disk_usage()
return self._used_space_bytes
async def get_space_used_mb(self, cached=True):
cached = cached and self._used_space_bytes is not None
space_used_bytes = self._used_space_bytes if cached else await self.get_space_used_bytes()
return {key: int(value/1024.0/1024.0) for key, value in space_used_bytes.items()}
async def clean(self):
await self._clean(False)
await self._clean(True)
async def _clean(self, is_network_blob=False):
space_used_mb = await self.get_space_used_mb(cached=False)
if is_network_blob:
space_used_mb = space_used_mb['network_storage']
else:
space_used_mb = space_used_mb['content_storage'] + space_used_mb['private_storage']
storage_limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
if self.analytics:
asyncio.create_task(
self.analytics.send_disk_space_used(space_used_mb, storage_limit_mb, is_network_blob)
)
delete = []
available = storage_limit_mb - space_used_mb
if storage_limit_mb == 0 if not is_network_blob else available >= 0:
return 0
for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False, is_network_blob=is_network_blob):
delete.append(blob_hash)
available += int(file_size/1024.0/1024.0)
if available >= 0:
break
if delete:
await self.db.stop_all_files()
await self.blob_manager.delete_blobs(delete, delete_from_db=True)
self._used_space_bytes = None
return len(delete)
async def cleaning_loop(self):
while self.running:
await asyncio.sleep(self.cleaning_interval)
await self.clean()
async def start(self):
self.running = True
self.task = asyncio.create_task(self.cleaning_loop())
self.task.add_done_callback(lambda _: log.info("Stopping blob cleanup service."))
async def stop(self):
if self.running:
self.running = False
self.task.cancel()

View file

@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.buf = b''
# this is here to handle the race when the downloader is closed right as response_fut gets a result
self.closed = asyncio.Event()
self.closed = asyncio.Event(loop=self.loop)
def data_received(self, data: bytes):
if self.connection_manager:
@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.transport.write(msg)
if self.connection_manager:
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout)
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop)
availability_response = response.get_availability_response()
price_response = response.get_price_response()
blob_response = response.get_blob_response()
@ -151,9 +151,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
f" timeout in {self.peer_timeout}"
log.debug(msg)
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
await asyncio.wait_for(self.writer.finished, self.peer_timeout)
# wait for the io to finish
await self.blob.verified.wait()
await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop)
log.info("%s at %fMB/s", msg,
round((float(self._blob_bytes_received) /
float(time.perf_counter() - start_time)) / 1000000.0, 2))
@ -187,7 +185,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
try:
self._blob_bytes_received = 0
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
self._response_fut = asyncio.Future()
self._response_fut = asyncio.Future(loop=self.loop)
return await self._download_blob()
except OSError:
# i'm not sure how to fix this race condition - jack
@ -244,7 +242,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract
try:
if not connected_protocol:
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
peer_connect_timeout)
peer_connect_timeout, loop=loop)
connected_protocol = protocol
if blob is None or blob.get_is_verified() or not blob.is_writeable():
# blob is None happens when we are just opening a connection

View file

@ -3,7 +3,6 @@ import typing
import logging
from lbry.utils import cache_concurrent
from lbry.blob_exchange.client import request_blob
from lbry.dht.node import get_kademlia_peers_from_hosts
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.dht.node import Node
@ -30,7 +29,7 @@ class BlobDownloader:
self.failures: typing.Dict['KademliaPeer', int] = {}
self.connection_failures: typing.Set['KademliaPeer'] = set()
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
self.is_running = asyncio.Event()
self.is_running = asyncio.Event(loop=self.loop)
def should_race_continue(self, blob: 'AbstractBlob'):
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
@ -64,8 +63,8 @@ class BlobDownloader:
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
async def new_peer_or_finished(self):
active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)]
await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED')
def cleanup_active(self):
if not self.active_connections and not self.connections:
@ -88,6 +87,7 @@ class BlobDownloader:
if blob.get_is_verified():
return blob
self.is_running.set()
tried_for_this_blob: typing.Set['KademliaPeer'] = set()
try:
while not blob.get_is_verified() and self.is_running.is_set():
batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
@ -97,15 +97,24 @@ class BlobDownloader:
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
)
re_add: typing.Set['KademliaPeer'] = set()
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
if peer in self.ignored:
continue
if peer in self.active_connections or not self.should_race_continue(blob):
if peer in tried_for_this_blob:
continue
if peer in self.active_connections:
if peer not in re_add:
re_add.add(peer)
continue
if not self.should_race_continue(blob):
break
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
self.active_connections[peer] = t
self.peer_queue.put_nowait(list(batch))
tried_for_this_blob.add(peer)
if not re_add:
self.peer_queue.put_nowait(list(batch))
await self.new_peer_or_finished()
self.cleanup_active()
log.debug("downloaded %s", blob_hash[:8])
@ -124,14 +133,11 @@ class BlobDownloader:
protocol.close()
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', node: 'Node',
blob_hash: str) -> 'AbstractBlob':
search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download)
search_queue.put_nowait(blob_hash)
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)
if fixed_peers:
loop.call_later(config.fixed_peer_delay, peer_queue.put_nowait, fixed_peers)
peer_queue, accumulate_task = node.accumulate_peers(search_queue)
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
try:
return await downloader.download_blob(blob_hash)

View file

@ -1,7 +1,6 @@
import asyncio
import binascii
import logging
import socket
import typing
from json.decoder import JSONDecodeError
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
@ -25,19 +24,19 @@ class BlobServerProtocol(asyncio.Protocol):
self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout
self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event()
self.started_listening = asyncio.Event(loop=self.loop)
self.buf = b''
self.transport: typing.Optional[asyncio.Transport] = None
self.lbrycrd_address = lbrycrd_address
self.peer_address_and_port: typing.Optional[str] = None
self.started_transfer = asyncio.Event()
self.transfer_finished = asyncio.Event()
self.started_transfer = asyncio.Event(loop=self.loop)
self.transfer_finished = asyncio.Event(loop=self.loop)
self.close_on_idle_task: typing.Optional[asyncio.Task] = None
async def close_on_idle(self):
while self.transport:
try:
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop)
except asyncio.TimeoutError:
log.debug("closing idle connection from %s", self.peer_address_and_port)
return self.close()
@ -101,7 +100,7 @@ class BlobServerProtocol(asyncio.Protocol):
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
self.started_transfer.set()
try:
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop)
if sent and sent > 0:
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
@ -138,7 +137,7 @@ class BlobServerProtocol(asyncio.Protocol):
try:
request = BlobRequest.deserialize(self.buf + data)
self.buf = remainder
except (UnicodeDecodeError, JSONDecodeError):
except JSONDecodeError:
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
self.close()
@ -157,7 +156,7 @@ class BlobServer:
self.loop = loop
self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event()
self.started_listening = asyncio.Event(loop=self.loop)
self.lbrycrd_address = lbrycrd_address
self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout
@ -168,13 +167,6 @@ class BlobServer:
raise Exception("already running")
async def _start_server():
# checking if the port is in use
# thx https://stackoverflow.com/a/52872579
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)) == 0:
# the port is already in use!
log.error("Failed to bind TCP %s:%d", interface, port)
server = await self.loop.create_server(
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
self.idle_timeout, self.transfer_timeout),

View file

@ -0,0 +1,4 @@
from .ledger import Ledger, RegTestLedger, TestNetLedger
from .transaction import Transaction, Output, Input
from .bcd_data_stream import BCDataStream
from .dewies import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc

View file

@ -4,8 +4,11 @@ from io import BytesIO
class BCDataStream:
def __init__(self, data=None):
self.data = BytesIO(data)
def __init__(self, data=None, fp=None):
self.data = fp or BytesIO(data)
def tell(self):
return self.data.tell()
def reset(self):
self.data.seek(0)

61
lbry/blockchain/block.py Normal file
View file

@ -0,0 +1,61 @@
import struct
from typing import Set
from typing import NamedTuple, List
from chiabip158 import PyBIP158 # pylint: disable=no-name-in-module
from lbry.crypto.hash import double_sha256
from lbry.blockchain.transaction import Transaction
from lbry.blockchain.bcd_data_stream import BCDataStream
ZERO_BLOCK = bytes((0,)*32)
def create_block_filter(address_hashes: Set[bytes]) -> bytes:
return bytes(PyBIP158([bytearray(a) for a in address_hashes]).GetEncoded())
def get_block_filter(block_filter: bytes) -> PyBIP158:
return PyBIP158(bytearray(block_filter))
class Block(NamedTuple):
height: int
version: int
file_number: int
block_hash: bytes
prev_block_hash: bytes
merkle_root: bytes
claim_trie_root: bytes
timestamp: int
bits: int
nonce: int
txs: List[Transaction]
@staticmethod
def from_data_stream(stream: BCDataStream, height: int, file_number: int):
header = stream.data.read(112)
version, = struct.unpack('<I', header[:4])
timestamp, bits, nonce = struct.unpack('<III', header[100:112])
tx_count = stream.read_compact_size()
return Block(
height=height,
version=version,
file_number=file_number,
block_hash=double_sha256(header),
prev_block_hash=header[4:36],
merkle_root=header[36:68],
claim_trie_root=header[68:100][::-1],
timestamp=timestamp,
bits=bits,
nonce=nonce,
txs=[
Transaction(height=height, position=i, timestamp=timestamp).deserialize(stream)
for i in range(tx_count)
]
)
@property
def is_first_block(self):
return self.prev_block_hash == ZERO_BLOCK

View file

@ -740,506 +740,4 @@ HASHES = {
738000: 'aebdf15b23eb7a37600f67d45bf6586b1d5bff3d5f3459adc2f6211ab3dd0bcb',
739000: '3f5a894ac42f95f7d54ce25c42ea0baf1a05b2da0e9406978de0dc53484d8b04',
740000: '55debc22f995d844eafa0a90296c9f4f433e2b7f38456fff45dd3c66cef04e37',
741000: '927b47fc909b4b55c067bbd75d8638af1400fac076cb642e9500a747d849e458',
742000: '97fa3d83eb94114496e418c118f549ebfb8f6d123d0b40a12ecb093239557646',
743000: '482b66d8d5084703079c28e3ae69e5dee735f762d6fcf9743e75f04e139fd181',
744000: 'f406890d5c70808a58fb14429bad812a3185bdb9dace1aa57de76663f92b5013',
745000: '2bd0802cbb8aa4441a159104d39515a4ff6fc8dfe616bc83e88197847c78bcff',
746000: '24d090a7b6359db3d5d714a69ddc9a6f2e8ff8f044b723220a8ba32df785fd54',
747000: '07c4ce9ce5310ee472cf753ddb03c39c5fee6c910d491daffd38615205411633',
748000: 'ea913798c0f09d0a27eae7c852954c2c88b8c3b7f23f8fba26b68a3952d0ffde',
749000: '23f256adebfe35d49ba84ad49f3f71fc67f7745091c91f22e65f1cc2e23b8f2c',
750000: '96db12ee3a295f3d5c56d244e6e7493f58c08d3427e379940e5d4f891a41ec26',
751000: 'cedaf12415dac1314942e58ced80830b92fbfabc41f42a0b0f054f0672ef9822',
752000: '293606bcd9fbbee5584724301b2cf86bb69204820023e1fb46c238ddfbc660ab',
753000: 'f4d43cbb38b7d97919dedc0f5a6dc8007896c4f443b76f3e5693e25bc46760cf',
754000: 'fcaad22fd815311280fe451086516375d1d9d92b2990c7c351407df5aa19011e',
755000: 'b9276f10d1844cb5b0308766c8db960490ac34a73c4653d0a91202789a6ccb9b',
756000: '2fe5581f1110c1c8dcea46cad647551bd6bd640cb37738d863e189bd8f368347',
757000: 'b9d915f366f0b010429a52245b0fb02774157eb9fd8f66bce32dcd3acc71c2a1',
758000: '62d1854fc15db56b5d0e05ceeb54c1297966bf9dc7f7a0a14b42c059fc485d1b',
759000: 'f4ca9f69d16d092f4a0ea5102e6343b21204c4ea9cd9b22cddd77dbb5d68ade3',
760000: 'df3bb86641330d8cc7f55a2fd0da28251219e95babe960a308b18e08a7d88fc8',
761000: 'a93029475de4bc7569b6ae802d658cd91c84cc253772712a279f140a6c3b91b1',
762000: '307e289dc6ec8bcd62ca8831e4159d5edd780f2fae55ba55dd446225450f46f8',
763000: '293f73514abca24f374473bd0394179812952a04ea13dc60ef5ada5331fa274f',
764000: 'dd8b082db9281e3d9bacf15d6b352fda186d2d2923c7731844d0d4764dd71db8',
765000: '201239e562d2571bf47347b3522fff89632aecea3b2d8cef05151f88b2b0bcdb',
766000: '4a55a538b51b5650979e64521998cd5c5ad055ba9f3ac0e3e2a28febc6cc2798',
767000: '3916666f2adbb05ea98ec1961f9546b9afa0f6910ec95e42ce37267f2ae4f79c',
768000: 'dc0ad881eedcb5fd4954238f462080d6e7636b058d481698ed1c077e0ce2207e',
769000: 'eaf10a1e1ec6e129289b8479a05df03e0808f1f0946f1995de6524e9ebe7a461',
770000: '7200c64f22e32de7f999583361c933680fc9a2ffcb9a5ab73d3076fd49ec7537',
771000: 'd883111a2eeacff80ce31df35ab6c943805b9e48877b413fccf371e5dbfa7fb2',
772000: '3977d3c60edb9c80c97bb2b759b1659cbb650ad2d3a6f61d2caec83f1b2ae84c',
773000: '9c7175fb8646a1a82383b4c534fd01bcf92d65c43d87ae854d51a784b04dc77e',
774000: 'e0e92485f86e5fffa87b3497424e43b02a37710517d9d3f272392e8cdc56e5e9',
775000: '6395229113d3aa2105afbaeb8b59621a536fc61fe272314b2fc3bdda98dd66cc',
776000: 'b4b00207328b5f032bd4f0b634f91323ff520ada8c8bfec241b23c8e4bfd5a4e',
777000: '14cdc6f5f7b4bd5bad745dfe6fcd114e9194026412a2e1b3f345be2eef433d16',
778000: 'd3cd7b68be504c32117b670d38d59d44b02dcf3d65811efc2ca5531d902623cc',
779000: 'afcd220e4040cb5f92d4b38fc204e59822df2218f767f2c4b33597b238a35f77',
780000: '78252a9cfc289a70192ed8dd3dddeb1b9a4f9b8eff9a5d0ac259b3254472cf68',
781000: '02ebc3f17d947481a311b4771c254f1e002b6a9198d4a5258ce6c13165aadddc',
782000: '8dd9f1f372ee6d688a0bcdc3b342c77804ba5a646a218be4bc2aa02d846206c0',
783000: 'e46b0d02ec2ef488fae455665e107520e1bd2b4f35ca52af7ad8addd2f72fa73',
784000: '9ee8a8de94231e3ae3a610b82fdbca48dc14d9b80791d20af6c365a31822df6f',
785000: '21e1cc12def8173a50158b2833bd91a62140c61646f5e08aecaee3e6da20735e',
786000: 'b3e659f84d73de42888cc0f2b69bae71dd5fa6756a437a4b21958b182faa316e',
787000: 'a9be7ba00ea6a9ea6bd03d8412ec014ca7e8cda6bdc33382f165e702811b8836',
788000: 'a4c14729f8a68c03f5a0ccd890ac6a92b39c143f1f752fe81ad051eb52d8dce0',
789000: '5cf66d224e5645097efc9c3c0392b51c8ca8ea1295151921a7912a2f04ee1274',
790000: '676769ade71c33bc102bce416e66eb2c6794b03d7b8f5a590c87c380da463775',
791000: '0228e074451797bf6bfbc941bcafcbadc972d32e4e1e0c5da015513f65714217',
792000: '0fa3d00a1f19c5ac060e10a410cf7cea18eac5f89018d79ce51ac3fc66bbb365',
793000: '5f68d0868b424e32f5ce3d8e7d9f18979da7b831b8ef4e3974d62fb20ff53a97',
794000: '34508c56423739c00a837801b654b07decb274d02b383eff396d23c4d64bc0e9',
795000: '7f70910c855d1fd88cd7f9be8a3b94314ee408a31a2da6301404bf8deb07c12c',
796000: 'b74ab8813b1d2a0967fea0e66597572e5f0b5a285e21f5150fcc9d5f757de130',
797000: 'bba27b1491d907ab1baa456cb651dc5b071231b1b6ad27b62d351ca12c25dbfd',
798000: 'e75dcb15b2fc91f02e75e600dde9f6f46c09672533bc82a5d6916c4a2cd8613a',
799000: 'adf62c826a3e0b33af439a7881918ae4ce19c5fb2ca37d21243415f7d716aa65',
800000: 'd8f0ca13a8c8a19c254a3a6ba15150a34711dca96f2d877162cc44aa2acfb268',
801000: '2a8c7104c4040a2bc31913ae25e9361df5bac9477368c708f86c1ca640480887',
802000: '1f3b09d3561c4a8a056b263289bd492dc6c0d604c3fa195935e735d1c0ddc40e',
803000: '037769628c40a701fdb4b16d79084b8fbb319fde79770a7ac842f3cdc813099e',
804000: 'a0c6a089e5fa1e3589ca282085fe7201a5705776d81b257ffd252b2947fa6428',
805000: 'b2ac99bfc4a488e7b7624b31ee061991a6dd0881bb005cd13f3dd2e66a08fe19',
806000: 'ffe63cb999a278280b80a667d2dcb60c40e43a53f733914d8bec808b694ebf83',
807000: 'eddb09fc6c4869a59b520d0befb1fb6ac952333f3cc5de086539c85ea8558778',
808000: '0f4fb3f9172e52897ea992d9f3a2024126c4d2e63e9888739f11fb1f5e4c1f46',
809000: '9641dd720d23ced2f1cb6e5cf46ac4e547afb9f56263c4cf58e3b19d407cf401',
810000: 'de6dc953acd7e5ef213b3aaf1c4a9ee1d5b756bfce5525ee105214647e243a85',
811000: 'c52c83712ca12b24b2db1b4a575e7f352b1d560cbf702e121a03bdca9e8be23d',
812000: '83143734bb965318a53a38a7e403dcdb3e3fadedb01ab12c370417fc2a0655c0',
813000: 'e480deff10c5a84fc957e3aed936690e24b74dd08fa8858a8a953c2f7383b914',
814000: '810d33afcee07b9abe16c6cdc3a041038daa131c476b0daf48a080007f08b490',
815000: 'b4aeb9e16fddd27844b2d56bc2b221134039bb5642c9e9ba88372afbdeac3972',
816000: '86e73b67aae3d248011b8f66ed414cb8a9ba4b2a3cf7e32773cfbff055d719b7',
817000: '3ebb8b83752b48242016cb682f0f6bd14e15371bf1163a5933193eaa0edeb351',
818000: '4d925e17f642f220bbf317d3d5355d2f41fbce325f190f8c3b32dc0b337d24d6',
819000: 'b9cc126d620f6b99d90a00d35957b0e428aaaa7c986bc9e816a60e4334572961',
820000: '9c2f8c142bed1f94dca29276f7c83958be8cfe11773bb9b56c808fbcf7d3b1f8',
821000: 'e5509eb98895cfa12a8da5d54c1df3f52472ffcbdf707adbf84a4a9c5d356203',
822000: '764aada4802ebfe4ef935ab50af06a4f83aa556c49fdde3d9e12e1abd230c16b',
823000: '1dbd745c2e96a365d865f990d109137d32d42977f503af55d8c00b109d31d3c3',
824000: '954304a0b0c8f549c3bffd5ff46b5b8f05b0f0fde2a36f24fd5af9d774fb3079',
825000: '17808b14f2056c1a5d46cb7617e9de9be6a1a6084edbc1bdb778586467a72297',
826000: '3ca1167d4cac8b187829b23001b438617c43704b42462c4eb001b0d434cb9651',
827000: '246d1607245e4a202f420393ac2e30e9cbf5eb5570dc997073b897f6d8643023',
828000: '1764730a8dc3e89d02d168ff6bb54e8c903820b74711af6ff27bd0c8545577e7',
829000: 'd9f3ab0cd823c6305bd8b95a96188bb4f2ca90b4d66c5d12293e8b6192bac0f2',
830000: 'd4ff51f0092b04aedf8d39937680d8e8309b1be21d36e7833ed36f8e30aad6ea',
831000: '3e92e76721b962396dce52993fa7606552f0907b38f7b2bd7b21ada98c145f47',
832000: 'df12fcdb4cbe53ba627ace6de898298de175f8671d3d90170732d110fcdc34b8',
833000: '25167ff38ae4a5964b618cabe0a12d4de62ac7a4c47448cdb4499e09e108d5b9',
834000: 'd31f5309ea179a1e386e835fc372e47dcda6871a3a239abfba50c4f368994f13',
835000: 'aff7e8dd3e55ea807fcbe284014075f420b3a23f1b0eb47bacdc1c91d2899813',
836000: '3b5ac6d64c470739bb17d1544a285affb40f2d33e92687e5ba7c5ac602e0d72a',
837000: 'd5619cbfe4f27c55f2bf9351b4891636cf64fef88212a5eeeae7bd3de47fe0bd',
838000: '1f9102a49c6ac470cb5d0050e5300b1443840d6d65719b835e3bea484aafb2ec',
839000: '3f63e391f0fbc5787fbe4ace3bada3816261294ea1c6ee435001801023682f90',
840000: '777894fd12bd0d6dee7bcde2995c68e55e7094e3122da38571e4b6c4304b75e0',
841000: 'ceb0c598c788e25e43e25aa4beff5c7377035824844cf1675eaea537074df028',
842000: '8661cf2065dc713d2ba043f0b81f0effcc940eeb3e91906a21ff22c210561dcd',
843000: '0dc2766f90415009d0c86bedffee6ebcf58042eb08262c0c67c4e9ed86b2aec8',
844000: '26d072da864cab268a12794977b04ec44fb69ef3978e2342e82225974dac54dd',
845000: '95e93bb60be8d5f07a1f4d26290c914957a82fc9d26ae8a3f20082eda27406ff',
846000: 'f1bdc39af7705e58ab8b6c31dc70dce1e115db1cfd8cc9b037949dfbec82a59a',
847000: 'f5f10f06396ecf2765d8a081141d489737c1d8d57c281f28f57c4cb2f90db883',
848000: '331b8ef08605bae8d749893af9ed54f0df4f07a5a002108a2a0aea82d0360979',
849000: '75b5f6233ab9a1bbc3c8b2893e5b22a0aa98e7ea635261255dc3c281f67d2260',
850000: '5d7e6fe83e0ea1910a54a00090704737671d6f44df4228e21440ad1fc15e595f',
851000: '7822db25d3ff0f6695ee38bad91edf317b5c6611673d28f1d22053110bb558be',
852000: '2f0effad83a3561fc1a2806a562786a641d9ddb18d16bb9308006e7d324a21e9',
853000: 'f603b2eaff11d5296377d990651317d40a1b2599ad2c5250eab131090f4b9458',
854000: '34d59b26a50f18a9f250736d0f2e69d28b7e196fbef9b8a26c6b0b75c16aa194',
855000: '76dd1ffff3946c0878969886fcf177ce5ab5560df19ddf006f9bcb02ae3e4e4f',
856000: '74ff0b6f64e9dd5802fec2aac1d3ae194d28b9264114adaf0a882b46c8c918fe',
857000: '7b5badfa2e4f40aa597a504d7ebe83c3705a2c6169a8c168ce293db223bc2d32',
858000: '2bb0767a0f72b20d45ecfc3e34517dbda16d85758e040cf0e147f4cbd0cc57ac',
859000: '3d741b9c365a91ed76f85824b94d19ec19b608d232660840ba59c7aa4b2cb67f',
860000: 'd481a5a117878c0e3acd1f5844e150fb30e617577947d9846b1d214d703b71b0',
861000: '54033424e488a3f1ad6946d4a6d9acb48465d6b1dbe8e1c2504a54cc84d7cad4',
862000: '464bc3820a8cc8844dc9e26c388009e9982c656d46ef4b4fd0a2cb0e4eea0aaa',
863000: 'd1aa94be2174f66780c4f226b9da3f6712b0f37af8dec33360bea83ca261b342',
864000: '8c16008f11de5bc395d88cd802514ff647450f1bc136724b9aaf2ccce10a494f',
865000: '3dae86012e97a201e2e1a47c899001ac00f78dc108026ed7c4194858c6c6dd5a',
866000: 'afe5b0ccab995e1a1fa25fbc24c1d4b1a92c43042d03395f8743dcd806e72fd8',
867000: 'c83716ac171aa9ab0d414833db340fa30e82bfda6cc616d3038529caab9b5600',
868000: '8c409fe03cd35ef2d8e366818788b40eaeb4c8f6ae91450d75f4a66ca5f69cad',
869000: '1d47909ceba790b8e1ce2e9902ee2775ea99e58efdb95668f9803a8ccf95f286',
870000: '9adf5da1476388f053aa42de636da169d1cf1c9652cdf7cd9ad4fb18a0eb3388',
871000: '8ad57fb1e74bcba0b5614fbac003be2bb32275dd85b38f2d28a0585005a99cfc',
872000: '84a32e92012a356106e9657da8dab1a5491ea588fc29d411c69b20680c666420',
873000: 'adf5921bbbfaa43929f67e6a070975313b77b456e262c700a27be611fceb17ae',
874000: '09eaa7c4b18c79a46a2895190333f72336826223d5c986849a06f5153f49f2a5',
875000: '235d7e4f31966507312149ea4c5e294aa84c695cf840117f0ef5963be7a0bda1',
876000: '9aa9cb806ccbec0475ac330b496c5b2edeba38ba3f1e13ddd54a01457634a288',
877000: 'c1e7f9b2b20bb1c4c0deadbc786d31fdf36f262325342aa23d1a66e2846b22bc',
878000: 'ee0d2b20ac28ce23ab38698a57c6beff14f12b7af9d027c05cc92f652695f46b',
879000: '0eb0810f4b81d1845b0a88f05449408df2e45715c9210a656f45278c5fdf7956',
880000: 'e7d613027e3b4ca38d09bbef07998b57db237c6d67f1e8ea50024d2e0d9a1a72',
881000: '21af4d355d8756b8bf0369b2d79b5c824148ae069026ba5c14f9dd6b7555e1db',
882000: 'bc26f028e547ec44fc3864925bd1493211773b5cb9a9583ba4c1909b89fe0d33',
883000: '170a624f4be04cd2fd435cfb6ba1f31b9ef5d7b084a25dfa23cd118c2752029e',
884000: '46cccb7a12b4d01d07c211b7b8db41321cd73f30069df27bcdb3bb600c0272b0',
885000: '7c27f79d5a99baf0f81f2b09eb5c1bf905976a0f872e02bd4ca9e82f0ed50cb0',
886000: '256e3e00cecc72dbbfef5cea627ecf1d43b56edd5fd1642a2bc4e97c17056f34',
887000: '658ebac7dfa62bc7a22b1a9ba4e5b425a866f7550a6b40fd07de47119fd1f7e8',
888000: '497a9d02868605b9ff6e7f15948a83a7e07606829107e63c2e091c90c7a7b4d4',
889000: '561daaa7ebc87e586d37a96ecfbc72484d7eb602824f38f484ed333e78208e9e',
890000: 'ab5a8cb625b28343f8fac858eab6576c856dab88bde8cda02b80b3edfd307d71',
891000: '2e81d9fc885ddc09222b298ac9efbb73638a5721802b9256de6505ecf122dbaa',
892000: '73be08881b8832e986c0bb9a06c70fff346edb2afaf69630e47e4a4a90c5fece',
893000: 'd39079dcaa4d8af1c26f0edf7e16df43cd857a31e0aa4c4123226793f1ab497f',
894000: '0a3b677d72c590d4b1ff7a9b4098d6b52d0dc10d64c30c2766d18e6eb02872cd',
895000: 'a3bbba831f48c5b68e494ee63015b487782c64c5c24bb29436283360c28fd1e0',
896000: '20af178a192ca43975ab6c838fe97ca42ba6c682682eddbc6481efd153ecb0a2',
897000: '8d0ee14b9fdb853a09ab2951d26b8f7cb8bc8038b09513bd330ee4b0bdcc4780',
898000: 'c97fbb70f804408b131a98f9fb4c04cdf2df1655d3e8ff2e0d58ed8537349f4e',
899000: 'eba2be80478e8dec2d66ca40b853580c5dad040351c64c177e3d8c25aff6c1b6',
900000: 'c4dc344a993558418b93b3f60aaef0030e2a4116086577fbf1e2f544bdbddae1',
901000: '36d84229afa63045875fc8fea0c55de8eb90694b3a37cceb825c87abf1fea998',
902000: '8ca4890ecfc5e3f9d767e4fcdf318a1e3e3597675bbcfe534d64e76bc4e8fbf4',
903000: '8b9f6a7514033c57668ca94fb3758cc6d1ef37ac982c2ff5a9f0f206fcd8d0a8',
904000: 'e9ae813991f35ca89af2fe1f1b6adf9e93c6b1dd6a74f003ebbe699a30b252ea',
905000: 'd426489d01d4f4c829f2eb68a67721d2c0e1c71e8c33ef9253593447e8603462',
906000: '63000bbed97451e68d64485c02c1c3d90b4156237dac315f4e012ffb538e375b',
907000: '96759653a4e514541effa7ef86d9f22a272ddde7b069149d17e9d9203a1edafb',
908000: 'eec6477d2f3b71bde76dc2380d6e06aa8aa306ca56ba1dd15a31c22ae0db501b',
909000: 'd5c2984cf130335aa29296ba5b17672d00360fe0ec73977326180014908c0b55',
910000: '7b99cb1c94144f606937903e173bd9ef63bfffd3db8110693fa4c2caa0abc21f',
911000: '95eed0d9dd9869ac6f83fa67863e77f24df69bcb90fef70918f30b2400e24ea8',
912000: '34c3c8780c54ecced50f0a6b394309d09ee6ce37cd98794699c63771d1d91144',
913000: '536052ddcd445702160288ef3f669ce56868c085315556c9f5ca081ef0c0b9e1',
914000: '1bcd1fe9632f93a0a1fe7d8a1891a4fc6ef1be40ccf887524a9095ed7aa9fa44',
915000: '139bad9fa12ec72a37b62ad8511300ebfda89330fa5d5a83861f864b6adeae67',
916000: '81d15282214ff83e2a034212eb58abeafcb5664d3734bff13b22b4c093b20fea',
917000: 'f31081031cebe450e4450ef397d91790fc0068e98e6746cd0aab86d17e4448f5',
918000: '4af8eb28616ef0e859b5471650c7f8e910cd692a6b4ff3a7171a709db2f18e4e',
919000: '78a197b5f9733e9e4dc9820e1c79bd335beb19f6b87056e48e8e21fbe27d83d6',
920000: '33d20f86d1367f07d6731e1e2cc9305252b281b1b092403133924cc1052f501d',
921000: '6926f1e31e7fe9b8f7a81efa73d5635f8f28c1db1708e4d57f6e7ead951a4beb',
922000: '811e2335798eb54696a4b11ca3a44b9d79486262119383d542491afa9ae80204',
923000: '8f47ac365bc380885db809f2818ffc7dd2076aaa0f9bf6c180df1b4358dc842e',
924000: '535e79802c10630c17fb8fddec3ba2bf85eedbc0c076f3575f8189fe887ba993',
925000: 'ca43bd24d17d75d55e72e45549384b395c62e1daf0d3f58f296e18168b918fbf',
926000: '9a03be89e0725877d42296e6c995d9c48bb5f4bbd971f5a9add191af2d1c144b',
927000: 'a14e0ef6bd1bc221dbba99031c16ddbbd76394186677c29bdf07b89fa2a6efac',
928000: 'b16931bd7392e9db26be975b072024210fb5fe6ee22fc0809d51980aa8068a98',
929000: '4da56a2e66fcd98a70039d9061ea5eb0fb6d9460b437d2191e47441182419a04',
930000: '87e820e2237a54c4ea100bdd0145598f05add92185cd3d0929aa2d5099f4d5e0',
931000: '515b22c91172157c443a47cf213014aff144181a77e276e291535ab3762bb1ae',
932000: 'e130c6a9eb416f96256d1f90256a148957daa32f56af228d2d9ce6ff27ce2011',
933000: '30c992ec7a9a320fb4db260373121efc7b5e7fc744f4b31defbe6a7608e0749e',
934000: 'ec490fa0de6b1d78a4121a5044f501bbb3bd9e448c18121cea87eb8e3cadba41',
935000: '603e4ae6a6d936c79b3f1c9f9e88305930953b9b390dac442976a6e8395fc520',
936000: '2b756fe2de4328e598ed511b8828e5c2c6b5cdda1b5e7c1c26f8e0424c81afa9',
937000: '1ae0f15f14a0d4819e34a6c18de9428a9e43e17d75383bffa9ffb18358e93b63',
938000: 'cbd7001825ec87b8c6917d6e9e7dc5c8d7767788b6ffd61a61d0c612dbe5de66',
939000: 'd770d0395aa79076044783fb37a1bb173cb95c93ff1ba82c34a72c4d8e425a03',
940000: '3341d0a0349d091d88d233cd6ea6e0ad553d52039b4d47af51b8a8e7573a7916',
941000: '16123b8758e99344ebe6670cd95826881b274c31d4da2a051052955a32bade3a',
942000: 'ac7430961e77f902918fe79a52cbf6b523e3f2804ec83d0b17908e131ea9ea68',
943000: '2ad08a6877e4687dcb7a623adeddc88403e8082efd6de28328b351282dc141e2',
944000: '81382e8c1f47fa7c03fa1726f9b09ed1cd38140fe50683896eaa1b403d7e5fe3',
945000: '152bfbb166da04dab16030af28ae65b3275819eed1d0bbfc11eba65616ebefd6',
946000: '25b3da0962f87a0d3e4aec8b16483efbcab9514893a42fd31f4cb544ddc45a1f',
947000: '2cb738ba342436628ff292797e3d36c4752d71bdc1af87fe758d469d06e36e0e',
948000: 'b3683e18570fcc8b986720514539181ec43fb5dbc20fe314c56ab6bd31ab766a',
949000: '94ced5bfba55ccffc909bf098d537e047d8d4cbb79f5e2a74146073f39804865',
950000: 'b11543cd2aedae27f6ddc3d2b431c897fdcfe59ed3c926b0777bc1e99de4d12a',
951000: '21508881a7f80fcd0b9b27bbcfba634b39c6525f5313968c4605cd55b4fec446',
952000: 'f9b3ed919c9ca20cd2927d899ee7a86c93c2dd919dafb6fdb792f2d9f1895cb0',
953000: 'cf578d8e80eec4102dc1b5321f10b36020b3b32f4b5d4664c90c412ca2ef6b42',
954000: 'ed17c919ae5c4be835966b47f667d6082c75917b95584b2d2aff0e32f5c8aa98',
955000: '948ea467fa01a20122e2146669214fdd3bb025038554609f7299ece5bca63e39',
956000: 'b50ff4c02957ed8764215d25f206f6f1fe6d0eb712a378b937ff952dd479afd2',
957000: '169922a3e51517ba6104a883d29aac03a9d20b4d448bd2773137b0d790e3db6b',
958000: '92258ac2e8b53167dc30436d93f385d432bd549711ab9790ba4e8263c5c54382',
959000: '7ca824697459eb302bcd7fba9d255fb269555abe7cf9d2dd5e54e196d751e682',
960000: '89f9ec925d23698076d84f9e852ab04fc956ac4465827303de0c3bb0b685eb32',
961000: '41cf75cd71bc12b93674c416e8b01b7410eb9e09eb8727ad93ff0b833c9966c9',
962000: '7db1f1dbff3e389713067879bfedf9513ec74bb1e128b13fc2fe23ad55fd0306',
963000: 'a35e71c611b2227adeac824d151d2f09bdbecd5765a4e62c6e74a3e4290abc66',
964000: 'dc1811130e249d2208d6f85838512b4e5482efb0bd2f619164a68a0c60d7f248',
965000: '92f5e25dd1c03102720dd0c3136b1a0769901bf89fcc0262a5e24405f349ca07',
966000: '08243d780d8ba96a940f409b87d9c6b8a95c92804173b9156ada0dad35b628dc',
967000: 'cb769a8935bb6faeb981da74f4079babbbb89476f825cc897f43e79790295260',
968000: 'ff3fc27d2998f4dc4ac1ff378afe14c7d0f43cc328deb9c978ec0e067d1dfaf9',
969000: 'e41a3452f45d5f025627d08c9c41017679e9c4804371dd1cc02f3ed49f85dbb2',
970000: 'f5eaaf7ba6b47245a4a8096a7785c7b25dc6db342ac2ccbba0c321e97ab58284',
971000: '75414062f1d4ed675dadc8f04ba10147a484aaca1ae316dc0b896a92809b3db6',
972000: '5bcf2ee00133774c7d060a1a1863dfccc20d5127ecb542470f607dec2504fe6f',
973000: '07d15b9656ecde2cd86a9d22c3de8b6505d6bab2aa5a94560b0db9119f1f6f6c',
974000: '2059e7924d7a210a88f5a65abc61152506a82edccd27416e796c81b9b8003f13',
975000: '7fcf5d8b2c0e51cfbdaa2502a9da0bdb323646899dad37dacc39af9f9e16fc5c',
976000: '02acb8cf87a0900436eccfca50371948531041d7b8b410a902205f84dd7fb88e',
977000: '2636dfd5a47016c893265473e78ecbf2000769d886f0d01ee7a91e9397210d15',
978000: 'ce92f52a35096b94bea73a7d4e113bc4564a4a589b66f1ab86f61c822cf9ee76',
979000: '21b8102f5b76be0c8e20d537ebc78ebe46bfcea6b6d2dda950ce5b48e85f72d7',
980000: 'f4df0bd63b36105705de62266d654612d9804bad7069d41344de269657e6f084',
981000: 'f006cd2718d98d774a5cd18394db7744c812fa149c8a63e76bab934aee89f571',
982000: 'da5d6609265d9153022d823b0260aa07e7511ceff7a3fd2ca7ce83cb3900a661',
983000: '3a26f3f02aa145fa8c5268fbe10dd9c3546d7dda57489ca5d4b161beb0d5a6e2',
984000: '968e8cd37a1137797d40f39f106cae62d1e252b46c7473b9434ad5f870ee88fb',
985000: '3129c3bf20deace1a9c92646a9d769da7a07f18dcd5b7a7b1e8cf5fd5390f8e1',
986000: '6ce830ca5da322ddbb97fc572ea03218913d070e5910516b33c6113b02b23c21',
987000: '7fb1a8635623847132ab766a99b792953379f782d1115b9649f5f9c5a742ca04',
988000: '5e8e6c6da7f271129c20c4dd891dcb1df4f9d690ee7cf391c6b7fbd028a0da4c',
989000: '12919e34bb9a9ac1d2a01e221eb8c511117fc4e1b3ae15355d95caf4673bdb08',
990000: '016f8b18227a0c09da55594a98638ad5b0fbb4896e2ab6163ac40b6015b2811e',
991000: 'ddf8cd6e2f4ee07530ae7567cef4fa2c2fd4a655cb20e20422e66fd49bde6489',
992000: 'dca77707c0caa3a9605f3dadf593402339c29448869907fb31f6c624e942dcbd',
993000: 'de9acc4c7c482ecac741fd6acbbc3a333afab52f3fe5eea4130c0770299a56dd',
994000: '54420631f8a801a1b8f391088f599ee22cedc06f24bf67f18272feb8fe70c682',
995000: '4b44b26e3e2495716dfd86fc42594cd4b1e4b70bdab4f0905cce4cb9556e008a',
996000: 'd6e41fd301fc5f519c343ceb39c9ff845656a4482e4e182abdcd3963fd5fde1c',
997000: 'd68b6a509d742b182ffb5a98b0e585a2320a5d3fe6977ad3e6cd06835ef2ea55',
998000: '1efcdcbadbec54ce3a93a1857253614536c34f05a0b1924f24bff194dc3392e1',
999000: '10a7713e46f47527f3819b4a9257a03f3e207d18e4917d6bcb43fdea3ba82b9a',
1000000: '1b4ddb1436df05f07807d6337b93ee1aa8b600fd6a910a8fd5313a39e0440eec',
1001000: 'cde0df1abdae26d2c2bdc111be15fb33231c5e167bb8b8f8eec667d71379fee4',
1002000: 'd7ce7a96a3ca73a4dfd6a1780e23f834f339142519ea7f45d256c113e27e4857',
1003000: 'b1a9b1c562ec62b9dd746d336b4211afc37482d0274ff692a44fa17ac9fe9a28',
1004000: '7afd6d0fb0014fbe16a31c84d3f1731736eaeef35e40bb1a1f232fb00345deae',
1005000: '4af61ce4cda5de58277f7a67cadea5d3f6ce56e54785b188e32306e00b0414df',
1006000: '08e1fb7295efd4a48cb999d899a3d481b682ddbce738fecd88a6d32cbe8234f0',
1007000: '14a367a41603dd690541daee8aa4a2882260059e3f85bd8978b7431e8f7db844',
1008000: 'e673230e62aaefad0678611f94ff35ee8a6e18eb96438bdfb4b614f54f54dba7',
1009000: 'e191af8fb71d0d91419abd19443af3d3f23ee4fe359bb8c390429cc838132bde',
1010000: 'ffdba58f184cf60838b75b7899b6633e7cfd34cf36eded572c0133d07387bc49',
1011000: '40801af3a5546cb9d53e05e21b74be09de9a421b762ca1d52d2266f5c2055ce8',
1012000: '552519acebed0e38102f5270dc60b1da7a123600b6b94169ae74462ae454693f',
1013000: '1eee96f48418929927eaa9642777bc806d326cfffaf077bc8695a7ecd438d631',
1014000: 'a471093e1de2a8db586412d7351c8d88e44ea890f46e9b43251af427a0a4a879',
1015000: '57532f5a522295cc139f008bdcb7a1e6d02e6035d5221b2687c7c216f06297a2',
1016000: 'ec46dba07addcb6e62f58456a53c513d876f1c49ae7d76d230adb8debd26027d',
1017000: '33ea8d25f342a7465ed71e4bab2b91007991e0994c61d321e3625301a1390322',
1018000: '4871c03cc95d4ce0a39bd2cebbb001b2ea1cce1b3561bb841d88f43bb9d12ffd',
1019000: 'f5248257576eb2ff4139d6374cc7ce34121cc942598cf9e04d2bd572e09189bb',
1020000: 'e7785286897c85cfb0276957bff216039eeb11bc1ebca89d0bb586022caa5750',
1021000: 'a30220f17d060634c5f6a1ddc5ea34b01c18fb5eb7e0e8267b66bf5a49525627',
1022000: '6083ea49e64ac0d4507c674237cf87d30b90b285ec63d082e626df0223eb7c9c',
1023000: '1dc5596d716bc33ee0f56fc40c1f073155a58a7692935c9e5854ef3b65b76828',
1024000: '065adfee40dc33abff07fb55339571712b959bc1830dc60b6691e36eab1508ae',
1025000: 'bb6903752d31278570e774b80a80782179c78f099e58c3dc4cba7afea7a471c4',
1026000: 'f3050f3c2f3a76f5084856b0f089383517caa3f51530fbc29335308f5f170625',
1027000: '746ed3701510d07958d11a06f22dbb839d9858373dc5a33249dd69e91bab01fd',
1028000: '43f7a96ea6a45b78c29ad4a2f8680ef184438c2bd3686172b0564e0ae6dd7ba1',
1029000: 'cbb9916099c59e14fe61d284374f4feaa3d43afec59e4698ed92143576f24b34',
1030000: '2e805fc2331e32e586ea692bc3d4e6b11e1ec3f1cab6e331b459f9f1ac9a1f1e',
1031000: '04f324f8f6d4f9901cf65f78dc91d6010ea6cf125f5ac0253b57b5f1f79e81e0',
1032000: '60ca62f52fdfd858b0ee0fdb380648bde85ca14e2a73565205ed4ee0bc861c77',
1033000: 'eb60aac23d599d3099cf98ed8fc3213f1bc06bc1c677429b303e9c81f79f1340',
1034000: 'f0328df2daf119ce673ddfa7a39a84576985f701f7a7dec3f56f58c2019ebd4d',
1035000: 'f9d3cbce3854de168d8835c96917c01be6244c8f82641e8d9398dfffec4e7107',
1036000: '7dca97e6e1d6ed70aa7805f74b768009a270e7ebe1dd951e8727d1d2f2d271f2',
1037000: '5329504126b2845b3044f423b521e77ff58d7d242f24bf87c87f4d8d4e03a947',
1038000: '5bad3ad55e3daa415f3182a1f2a099fe1767e8fae34e9bb95d47e242b8971434',
1039000: 'c29729b8ba49ac0043fe4aa6fc971f8ac3eda68ff92970957ada39a2989b2491',
1040000: 'f303aebfc9267600c081d0c021065743f93790df6f5c924a86b773788e0c45be',
1041000: 'a1cbe5059fa2275707785b77970c36d79b12c1ba93121bc9064ab9b64abacf7b',
1042000: '004b0dd4e438abc54ae832d733df32a6ba35b75e6d3e0c9c1dee5a7950507295',
1043000: '31893a3fe7bb4f6dd546c7a8de4a65990e94046aab442d18c68b6bf6acd54518',
1044000: '2c4dd479948acc42946f94050810000b0539864ad24a67a7251bff1c4971b035',
1045000: '1cea782d60df35a88b30ae205ce37e30abc7cad2b22181722be150bd92c53814',
1046000: 'ee808f0efb0f2ef93e8599d8b7f0e2e7c3cdc42353e4ea5165028b961f43d548',
1047000: '75f057e2a8cb1d46e5c943d63cc56936a6bac8b1cb89300593845a20baf39765',
1048000: '2abcd227f5314baed85e3c5b49d3888a60085c1845c955a8bf96aa3dd6394798',
1049000: '5d0ec24b9acd5ab21b42f68e1f3142b7bf83433b98f2fa9794586c8eff45893e',
1050000: '1d364b13a4c17bd67a6d1e5f77c26d02faa014d7cd152b4da70380f168b8e0ff',
1051000: 'b9a20cec21de84433be9b85817dd4803e875d9275dbc02907b29888431859bae',
1052000: '424cb56b00407d73b309b2081dd0bf89213cf024e3aafb3090506aa0ba10f835',
1053000: '6df3041a32fafd6a4e08778546d077cf591e1a2a16e77fe7a610efc2b542a9ff',
1054000: '78f8dee794f3d4366019339d7ba74ad2b543ecd25dc575620f66e1d535411971',
1055000: '43b8e9dae5addd58a7cccf62ba57ab46ffdaa2dcd113cc8ca537e9101b54c096',
1056000: '86b7f3741343f85d93410b78cc3fbf03d49b60a664e908703016aa56a206ae7e',
1057000: 'b033cf6ec622be6a99dff536a2cf73b36d3c3f8c3835ee17e0dd357403e85c41',
1058000: 'a65a6db692a8358e399a5ac3c818902fdb60595262ae05531084848febead249',
1059000: 'f6d781d2e2fdb4b7b074d1d8123875d899cdbd6be375cb4288e86f1d14a929f6',
1060000: 'cd9019bb1de4926cca16a7bef1a46786f10a3260d467cda0775f73361795abc9',
1061000: 'ed4f5dc6f475f95b40595632fafd9e7e5eef388b6cc15772204c0b0e9ee4e542',
1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80',
1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465',
1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6',
1065000: '563188accc4a6e311bd5046516a92a233f11f891b2304d37f151c5a6002b6958',
1066000: '333f1b4e996fac87e32dec667533715b31f1736b4342806a81d568b5c5238456',
1067000: 'df59a0b7319d5269bdf55043d91ec62bbb30829bb7054da623717a394b6ed678',
1068000: '06d8b674a205393edaf20c1d837baadc9caf0b0a675645246263cc163302241d',
1069000: 'ac065c48fad1383039d39e23c8367bad7cf9a37e07a5294cd7b04af5827b9961',
1070000: '90cd8b50f94208bc459081356474a961f6b764a1217f8fd291f5e4828081b730',
1071000: '3c0aa207ba9eea45458ab4fa26d6a027862592adb9bcce30915816e777dc6cfc',
1072000: '3d556c08f2300b67b704d3cbf46e22866e3ac164472b5930e2ada23b08475a0f',
1073000: 'a39b5c54c24efe3066aa203358b96baea405cd59aac6b0b48930e77799b4dd7d',
1074000: 'e8c8273d5a50a60e8744716c9f31496fb29eca87b4d68643f4ecd7ec4e400e23',
1075000: 'b8043ae41a1d0d7d4310c85764fcba1424733df347ffc2e8cbda1fe6ccbb5153',
1076000: '58468db1f91805e767d334824d6bffe54e0f900d1fb2a89b105086a493053b3d',
1077000: '04a78749b58465efa3a56d1735cd082c1f0f796e26486c7136950dbaf6effaa4',
1078000: 'e1dd6b58c75b01a67d4a4594dc7b4b2ee9e7d7fa7b25fd6246ce0e86eff33c75',
1079000: 'd239af017a6bb664485b14ad15e0eb703775e43018a045a8612b3697794460da',
1080000: '29ae5503f8c1249fefeb63fd967a71a70588ee0db1c97497e16366163a684341',
1081000: '05103ab27469e0859cbcd3daf42faa2bae798f522534697c7f2b34f7a050ee0f',
1082000: '4553d2cb7e90b6db11d242e287fe96822e6cd60e6388b94bf9006411f202ba03',
1083000: '97995acd178b2a142d571d5ae1c2a3deaf93a909fd91fb9c541d57f73e32dc99',
1084000: '9e3f23376af14d76ab24cd54e321dec019af73ad61067d959ff90043acc5ffcc',
1085000: '81c056b14f13cee0d6d6c8079fdd5a1a84c3a5c76cc9448612e8ef6d3531300e',
1086000: '8a0004f6809bdd075915a804e43991dfe8f22e05679d2fdaf8e373f101bac5c2',
1087000: '27c45a4c9ad24e038f2ebe40835a1c49ac7221d7185082866ee354351ba87c7a',
1088000: 'fd27e21747117b00b4ada1cba161ac49edb57cca540f86ac5ba885050f08f824',
1089000: 'bff867335767103bc3ed15ede5b9fde88016f8ede15dc5bf3e81ea40dcfc61ae',
1090000: '608f75016d1db08888dd59640f63e838c19bdfa833c0cc177ad3d2b818b0db5b',
1091000: '90750b452bd4dedaab6b57fecbfe88f71ce3d5437fad7f9ec0fdd270445c7526',
1092000: '98287b39f9f1233017dc5d932e5c77f0521ca84587eb3f39f0e7b6c297c749af',
1093000: '68a5846ed05c9bb142197849106838765f90f15c10b2cc938eef49b95eaa9d33',
1094000: '5660a1aac2fc763a417fc656c8887fc8186bf613ae1ccbb1a664fb43ce1fa1d6',
1095000: '62bad3db418b3f4cad3596881b645b72479c71deb0d39c7a4c8bd1577dc225fd',
1096000: 'e0e4b2b183591f10dd5614c289412f2fb5e320b7d3278f7c028f42f591872666',
1097000: 'a233a233fc2aa5dab9e75106d91388343ef969458ea974f1409a2ab5fc441911',
1098000: '16dfa5fa6cbd1188e562697b5f00ac206960d0851ed84adf37ae975fd5ffdd6a',
1099000: 'b8a870b7dc6d3263730c00f59d52aa6cce35dc59aa8fba715034cc2d14927260',
1100000: 'a3cd7749743da22a3846dcc2edbf1df21b938e829419389e3bc09284797c5b43',
1101000: '75b14c2a95e2a095949729b7c0b624bd725a2de98404a8e3247b60c977d0198e',
1102000: '4d3af64d37064dd5f57e25d61f248a1e21c1b1cadd7bb1404e35c9fbe06f1fd4',
1103000: 'd73c92bfed358dfcd7659228974ab75ea2fc86f2301ee47133adad8075203872',
1104000: '30cd82354f37bc0b412123867c7e1835206022a7501853bf8c0d3df02f291645',
1105000: '1d2ef984f26693dce77460cd2694e5da46e675077e91a1cea26051733b01a7ef',
1106000: '51c076c304222fe3ca308ba6968c46fef448f85be13a095cecb75b90e7954698',
1107000: '99e2221339e16acc34c9816f2ef7b866c2dd753aa3cbe484ae831959a23ece68',
1108000: '0f1227c250296bfe88eb7eb41703f99f633cfe02870816111e0cadfe778ddb19',
1109000: 'b35447f1ad76f95bc4f5886e4028d33acb3ad7b5000dd15516d3f11ce4baa990',
1110000: 'ac7baff996062bfaaaddd7d496b17e3ec1c8d34b2143095645ff22fb3888ae00',
1111000: '430bbbdcca36b2d69b6a2dd8b07c583a060a467e5f9acbc6de62462e1f7c7036',
1112000: 'e5274dea029dc44baff55c05b0555f91b74d29ffd40e3a8c4e2c5b57f9d40bef',
1113000: 'cf43863249fa42cfe108220dd40169dac702b0dd9cf5cb699cf2fc96feda8371',
1114000: 'fa1c0e551784d21c451564124d2d730e616724f3e535de3c186bcdeb47e80a8f',
1115000: '49fe6ecee35a397b83b5a704e950ad028cfb4b7e7a524021e789f4acc0fd6ffe',
1116000: '74ecded36751aa8b7901b31f0d16d75d111fc3c40b567f649c04f74ed028aa5c',
1117000: 'd9ca760a22190bdf545766b47d963c738a4edcc27f4d15ca801b35751577cfa7',
1118000: 'c28d42f871682800ac4e867608227cfb6bc4c00b618e83a8556f201a1c28813c',
1119000: 'c5fafc4e1785b0b9e84bb052e392154a5ba1aefe612998017e90772bcd554e08',
1120000: 'aa054d428bc9ccee0761da92163817163413065fe1e67ef79a056c5233ea3476',
1121000: '0df295bb944218503bd1bf66d2ece0c50fd22dae3391b80673a7ad1e4e5c3934',
1122000: 'a13abb350a26673b3933b1de307a60a6845ca594d502599548c6253e21a6d8e8',
1123000: 'a4bc6a3abf9ed1f4b14338ff0f03f83456312bc91a93fa89ae6db493050115e1',
1124000: '65869938df99adf0dda76200291ce09a54c9bcc787e4bb62cd72c367db58f4f0',
1125000: 'ea5e918233b14c3c73d488a906e3741c61bdcafe0393bd0404168fe80c950a46',
1126000: 'ce88cd35104fcec51bcee77302e03162dc694802536f5b668786b2245e61bca5',
1127000: 'ea19c0c8d205be4be87d02c5301c9ed331e7d75e25b93d1c2137c248882af515',
1128000: '006f32d63c2a3adcf4fbad0b0629c97f1beab6446a9c27fbde9472f2d066219e',
1129000: '218e5392e1ecf471c3bbc3d79c24dee30ac8db315dbeb61317318efb3f221163',
1130000: '30b9da0bd8364e9cd5551b2529341a01a3b7257a238d15b2560e2c99fdb324e8',
1131000: '8a7f382cfa023d2eba6639443e67206f8883b57d23ce7e1339234b8bb3098a82',
1132000: 'bf9af68a6fe2112d8fe311dfd52334ae2e7b0bac6675c9ebfddb1f386c212668',
1133000: '1a30951e2be633502a47c255a93ddbb9ed231d6bb4c55a807c0e910b437766b3',
1134000: 'a9bcaf3300b7915e701a8e396eb13f0c7287576323420be7aab3c3ba48020f76',
1135000: '337eed9ed072b5ad862af2d3d651f1b49fa852abc590b7e1c2dc381b496f438a',
1136000: '208761dbc29ec58302d722a05e937a3cf9e78bfb6495be395dd7b54f02e169dc',
1137000: '4e5b67ff3324b64e268049fdc3d82982b847ee359d409ade6368864c38a111e5',
1138000: '55d1d0833021a664e85eec8cc90a0985e67cc80d28841aaa8c2231ec28087ebb',
1139000: 'e750ada1ec9fa0f2f2461ed68958c7d116a699a82ec12911da5563139f8df19e',
1140000: '9cf81407b6ccc8046f0233f97484166945758f7392bb54841c912fcb34cf205c',
1141000: 'fccf32b2fae03e3b6b562483776625f9843cd68734c55659e2069cde7e383170',
1142000: 'c3608c215dd6569da6c1871c4d72a09ab1caa9663647f2a9454b5693d5d72a65',
1143000: 'bd39cb8c4e529d15bbea6baeec66afe52ca18afe32bd812f28fbb0676647cdff',
1144000: '6e42d02538565ce7e2d9bf31a304f1fd0ac122d35d17a030160575815901b0b1',
1145000: 'b9722e1de2904ce1219140fffb1f4f9f5a041f885faa634404238d103c738b4c',
1146000: 'd4de4271459966cee774f538a243d7db0689b213b296463d42e45c93194d7861',
1147000: '51fadf109f22bb85574d0fbcbd0b20992983e89aee3d415a7b1c37c44775d9a9',
1148000: '137e1fe8da31680d21a42e7421eb608a883a497314e4404625ce44b0edadde6a',
1149000: 'cb87867eb04203ce15e0763a2f4389376cea75e0a2877f55e2911c575bef07a8',
1150000: '977528ca7953a2c9c19fefaa3aab7ebdec3ac324d74a07d83764ba25d9be0689',
1151000: 'a09c51c832600ded63a19201df008075273ea248fd406886e93a2cbaa3bba46b',
1152000: '0e5367cfa0f00dd932a5bcc00dcc807fa6825161806bed588e16a57947b4b32d',
1153000: '55a9de3dcde2efb56a3c5fea7d22b98c1e180db9a4d4f4f6be7aae1f1cbd7608',
1154000: 'abc58cf71c4691ebfaef920252730cf69abbe9de88b424c03051b9b03e85d45a',
1155000: '4f074ce73c8a096620b8a32498362eb66a072eae95d561f2d53557cd513ae785',
1156000: '540a838a0f0a8834466b17dd456d35b8acae2ec8419f8bd9a704d9ea439062ac',
1157000: 'd5310ac671abdb658ea028db86c23fc729af965f91d67a37218c1412cf32a1f5',
1158000: '162d906a07e6c35e7c3ebf7069a200521605a97920f5b589d31b19bfd7766ee2',
1159000: '600bd8f5e1e62219e220f4dcb650db5812e79956f95ae8a50e83126932685ee0',
1160000: '91319398d1a805fac8582c8485e6d84e7490d6cfa6e44e2c630665b6bce0e6b8',
1161000: 'f7ad3cff6ee76e1e3df4abe70c600e4af66e1df55bf7b03aee12251d4455a1d4',
1162000: '85b9fbba669c2a4d3f85cdb5123f9538c05bd66172b7236d756703f99258454d',
1163000: '966085d767d1e5e2e8baf8eda8c11472ec5351181c418b503585284009aaea79',
1164000: '1c94e1b531215c019b12caf407296d8868481f49524b7180c7161b0363c1f789',
1165000: '803b6bf93735aeae2cf607824e2adf0d754b58da2516c2da1e485c697e472143',
1166000: '872561a82f7991633d0927d25cb659d096bbe556fe6dac7a0b6a679820733069',
1167000: '6bd7cdd605a3179b54c8af88d1638bf8133fab12cbf0a78d37cf21eddf4395a1',
1168000: '79946f5758c1817239cc642d27298bd710983551a8236e49832c6d818b097337',
1169000: 'b0994c60728e74de4aa361f37fa85e5296ce3188ae4e0b66d7b34fe86a239c9c',
1170000: 'a54188a5a64e0cf8da2406d16a0ac3983b087fc7d6231b6f8abf92cf11dc78cd',
1171000: 'ec2924d98e470cc6359821e6468df2c15d60301861d443188730342581230ef2',
1172000: 'b4ac11116aa73ce19428009a80e583e19dc9bcd380f7f7ce272a92921d5868d2',
1173000: '501d3551f762999dd5a799f3c5658fff2a7f3aff0511488272cd7693fefb8f9d',
1174000: '4660074ea48a78ae453cb14b694b2844cc0fb63ed9352ed20d11158bbb5c1f28',
1175000: '0727f6b1d9f8fe5677a9ffa0d475f53f5a419ef90b80896c22c2c95de22175de',
1176000: '150633d6a35496c24a93c9e19817e90f649c56b7e2558f99e97325bfd5df8b17',
1177000: '0849e19f22571b62dba8ff02f6b5a064a7ac36e7ed491321b3663567e8e17294',
1178000: '770dd463e7bad80f689f12934e4ae06e24378d1545dcf211fd143beaef49464e',
1179000: '059d383dcc60a49b658b674d92fc35cab07b06329c58d73818b6387cb0c06534',
1180000: 'e547cb3c636243ca9ae4cfb92c30a0f583eda84e329a5c1e5f64a26fc6fc791e',
1181000: '4521a4396ab02f73d45d7a3393ea1c602d255778d52c12079c88bfbad32aab43',
1182000: '051cfe993e4b0b34233403a9e8c397dd50e8b78a30fb07e9c260604ee9e624a9',
1183000: '44a69c99bb8b85e84ae279f2d8e5400d51cb3d5f0bcd178db49d55548cd66191',
1184000: '2a1d23c9bb3c71a533e0c9d25b03bfa7e9db8e014645f3e7fbede6d99fff0191',
1185000: 'bb90d6c6d77819163a9e909ee621d874707cdb21c91b1d9e861b204cf37d0ffa',
1186000: '4a92051b738ea0e28c64c64f1eb6f0405bc7c3427bef91ff20f4c43cf084d750',
1187000: 'f782ac330ca20fb5d8a094ee0f0f8c086a76e3f03ecc6a2c42f8fd07e52e0f41',
1188000: '94cb7b653dd3d838c186420158cf0e73db73ec28deaf67d9a2ca902caba4141a',
1189000: 'c8128e59b9ec948de890184578a113478ea63f7d57cb75c2c8d5c001a5a724c0',
1190000: '4da643bd35e5b98932ae21515a6bffb9c72f2cd8d514cd2d7eac1922af785c3f',
1191000: '0f922d86658ac3f53c5f9db360c68ab3f3253a925f23e1323820e3384214719a',
1192000: '4c3ab631cf5ba0c236f7c64af6f790fc24448319de6f75dbd28df4e2648d0b7d',
1193000: 'eda118d1fac3470a1f8f01f5c78108c8ecdcd6420be30f6d20f1d1831e7b6975',
1194000: '5723fff88abd9bb5088476fa5f4221a61c6f8a718703a92f13248ad350abeea2',
1195000: '1715846f82d011919e3446c6ce675a65fb80338bd791d4e735702c4767d9adc4',
1196000: 'b497667996aee2db61e88f442e728be15ab0b2b64cfd43198691fcf6cdafacc8',
1197000: '309a6170d837b8cb334fb888a64ed4e47e6592747e93c8e9d1bf7d608cfef87d',
1198000: '3ea918ef64a67dec20051519e6aefaeb7aca2d8583baca9ad5c5bd07073e513a',
1199000: '4ec7b7361b0243e5b2996a16e3b27acd662126b95fe542a487c7030e47ea3667',
1200000: 'b829c742686fcd642d0f9443336d7e2c4eab81667c90ce553df1350ed10b4233',
1201000: '44c022887f1e126fd281b1cae26b2017fa6415a64b105762c87643204ce165a5',
1202000: 'b11cc739eb28a14f4e47be125aa7e62d6d6f90c8f8014ee70044ed506d53d938',
1203000: '997a7c5fd7a98b39c9ca0790519924d73c3567656b605c97a6fdb7b406c3c64d',
1204000: '7d25d872e17195ee277243f7a5a39aa64d8750cec62e4777146acf61a8e76b04',
1205000: 'ce8486ae745a4645bee081ef3291d9505174bed05b0668d963b2998b7643dbb0',
1206000: '46a0bcea3c411c600dffe3e06e3d1dfbf5879a7ec4dcf3848e794cefcbf2bc0b',
1207000: '37e6297bf6e4e2bdd40401d4d7f95e3e3bdafd4a7f76b9c52865cefc6b82b20b',
1208000: 'd09e3982a9827b8cf56a5a2f4031dc6b082926c1fd57b63beaaa6cfd534eb902',
1209000: '54ae9010a9f146c83464e7ee60b30d9dbee36418561abc4e8d61bce9baa2d21d',
1210000: '5dcfd33f8e5ac21c9ba8553758b8cd8afae7961cad428530b5109c2db2ebf39f',
1211000: '91c952348bb2c3dfac0d6531a3dac770ea6dab571af257530e9c55493c96bdd9',
1212000: 'e62cc3fe044a7f5de4c04a8aed5619548f9d5c6fad9f989d3382cb96de1d780d',
1213000: '66b46ffdca8acf1dd04528dadb28b6ac4ce38807c1b84abd685d4ddb3dc59a34',
1214000: '2ce4091756ad23746bab4906f46545953cadaf61deae0d78e8a10d4eb51866b1',
1215000: '83ce3ca087799cdc4b4c5e7cfeb4a127708724a7ca76aa5f7f4ec1ed48b5fca6',
1216000: '7d07b739b7991fbd74926281bf51bba9d5721afab39598720f9ff5f7410a6721',
1217000: '76adf49491670d0e8379058eacf0228f330f3c18955dfea1ebe43bc11ee065f3',
1218000: '77f422e7301a81692dec69e5c6d35fa988a00a4d820ad0ebb1d595add36558cc',
1219000: '8ba9d944f8c468c81799294aeea8dc05ed1bb90bb26552fcd190bd88fedcddf2',
1220000: '00330367c255e0fe51b374597995c53353bc5700ad7d603cbd4197141933fe9c',
1221000: '3ba8b316b7964f31fdf628ed869a6fd023680cca6611257a31efe22e4d17e578',
1222000: '016e58d3fb6a29a3f9281789359460e776e9feb2f0db500482b6e231e1272aef',
1223000: 'fdfe767c29a3de7acd913b627d1e5fa887a1af9974f6a8a6474db822468c785c',
1224000: '92239f6207bff3689c554e92b24fe2e7be4a2203104ad8ef08b2c6bedd9aeccf',
1225000: '9a2f2dd9527b533d3d743efc55236e73e15192171bc8d0cd910918d1ab00aef7',
1226000: 'eb8269c75b8c5f66e6ea88ad70883dddcf8a75a45198ca7a46eb0ec606a791bb',
1227000: '5c82e624390cd57942dc9d64344eaa3d8991e0437e01802473053245b706290c',
1228000: '51e9a7d727f07fc01be7c03e3dd854eb666697f05bf89259baac628520d4402c',
1229000: 'c4bfdb651c9abdeda717fb9c8a4c8a6c9c0f78c13d3e6cae3f24f504d734c643',
1230000: '9f1ce781d16f2334567cbfb22fff42c14d2b9290cc2883746f435a1fb127021d',
1231000: '5c996634b377412ae0a3d8f541f3cc4a354aab72c198aa23a5cfc2678cbabf09',
1232000: '86702316a2d1730fbae01a08f36fffe5bf6d3ebb7d76b35a1617713766698b46',
1233000: 'fb16b63916c0287cb9b01d0c5aad626ced1b73c49a374c9009703aa90fd27a82',
1234000: '7c6f7904602ccd86bfb05cb8d6b5547c989c57cb2e214e93f1220fa4fe29bcb0',
1235000: '898b0f20811f52aa5a6bd0c35eff86fca3fbe3b066e423644fa77b2e269d9513',
1236000: '39128910ef624b6a8bbd390a311b5587c0991cda834eed996d814fe410cac352',
1237000: 'a0709afeedb64af4168ce8cf3dbda667a248df8e91da96acb2333686a2b89325',
1238000: 'e00075e7ba8c18cc277bfc5115ae6ff6b9678e6e99efd6e45f549ef8a3981a3d',
1239000: '3fba891600738f2d37e279209d52bbe6dc7ce005eeed62048247c96f370e7cd5',
1240000: 'def9bf1bec9325db90bb070f532972cfdd74e814c2b5e74a4d5a7c09a963a5f1',
1241000: '6a5d187e32bc189ac786959e1fe846031b97ae1ce202c22e1bdb1d2a963005fd',
1242000: 'a74d7c0b104eaf76c53a3a31ce51b75bbd8e05b5e84c31f593f505a13d83634c',
}

242
lbry/blockchain/database.py Normal file
View file

@ -0,0 +1,242 @@
import os.path
import asyncio
import sqlite3
from typing import List, Optional
from concurrent.futures import ThreadPoolExecutor
from lbry.schema.url import normalize_name
from .bcd_data_stream import BCDataStream
FILES = [
'claims',
'block_index',
]
def make_short_url(r):
try:
return f'{normalize_name(r["name"].decode())}#{r["shortestID"] or r["claimID"][::-1].hex()[0]}'
except UnicodeDecodeError:
# print(f'failed making short url due to name parse error for claim_id: {r["claimID"][::-1].hex()}')
return "INVALID NAME"
class FindShortestID:
__slots__ = 'short_id', 'new_id'
def __init__(self):
self.short_id = ''
self.new_id = None
def step(self, other_id, new_id):
other_id = other_id[::-1].hex()
if self.new_id is None:
self.new_id = new_id[::-1].hex()
for i in range(len(self.new_id)):
if other_id[i] != self.new_id[i]:
if i > len(self.short_id)-1:
self.short_id = self.new_id[:i+1]
break
def finalize(self):
return self.short_id
class BlockchainDB:
def __init__(self, directory: str):
self.directory = directory
self.connection: Optional[sqlite3.Connection] = None
self.executor: Optional[ThreadPoolExecutor] = None
async def run_in_executor(self, *args):
return await asyncio.get_running_loop().run_in_executor(self.executor, *args)
def sync_open(self):
self.connection = sqlite3.connect(
os.path.join(self.directory, FILES[0]+'.sqlite'),
timeout=60.0 * 5
)
for file in FILES[1:]:
self.connection.execute(
f"ATTACH DATABASE '{os.path.join(self.directory, file+'.sqlite')}' AS {file}"
)
self.connection.create_aggregate("find_shortest_id", 2, FindShortestID)
self.connection.execute("CREATE INDEX IF NOT EXISTS claim_originalheight ON claim (originalheight);")
self.connection.execute("CREATE INDEX IF NOT EXISTS claim_updateheight ON claim (updateheight);")
self.connection.execute("create index IF NOT EXISTS support_blockheight on support (blockheight);")
self.connection.row_factory = sqlite3.Row
async def open(self):
assert self.executor is None, "Database is already open."
self.executor = ThreadPoolExecutor(max_workers=1)
return await self.run_in_executor(self.sync_open)
def sync_close(self):
self.connection.close()
self.connection = None
async def close(self):
if self.executor is not None:
if self.connection is not None:
await self.run_in_executor(self.sync_close)
self.executor.shutdown()
self.executor = None
async def commit(self):
await self.run_in_executor(self.connection.commit)
def sync_execute(self, sql: str, *args):
return self.connection.execute(sql, *args)
async def execute(self, sql: str, *args):
return await self.run_in_executor(self.sync_execute, sql, *args)
def sync_execute_fetchall(self, sql: str, *args) -> List[dict]:
return self.connection.execute(sql, *args).fetchall()
async def execute_fetchall(self, sql: str, *args) -> List[dict]:
return await self.run_in_executor(self.sync_execute_fetchall, sql, *args)
def sync_get_best_height(self) -> int:
sql = "SELECT MAX(height) FROM block_info"
return self.connection.execute(sql).fetchone()[0]
async def get_best_height(self) -> int:
return await self.run_in_executor(self.sync_get_best_height)
def sync_get_block_files(self, file_number: int = None, start_height: int = None) -> List[dict]:
sql = """
SELECT
file as file_number,
COUNT(hash) as blocks,
SUM(txcount) as txs,
MAX(height) as best_height,
MIN(height) as start_height
FROM block_info
WHERE status&1 AND status&4
"""
args = ()
if file_number is not None and start_height is not None:
sql += "AND file = ? AND height >= ?"
args = (file_number, start_height)
return [dict(r) for r in self.sync_execute_fetchall(sql + " GROUP BY file ORDER BY file ASC;", args)]
async def get_block_files(self, file_number: int = None, start_height: int = None) -> List[dict]:
return await self.run_in_executor(
self.sync_get_block_files, file_number, start_height
)
def sync_get_blocks_in_file(self, block_file: int, start_height=0) -> List[dict]:
return [dict(r) for r in self.sync_execute_fetchall(
"""
SELECT datapos as data_offset, height, hash as block_hash, txCount as txs
FROM block_info
WHERE file = ? AND height >= ? AND status&1 AND status&4
ORDER BY datapos ASC;
""", (block_file, start_height)
)]
async def get_blocks_in_file(self, block_file: int, start_height=0) -> List[dict]:
return await self.run_in_executor(self.sync_get_blocks_in_file, block_file, start_height)
def sync_get_claim_support_txo_hashes(self, at_height: int) -> set:
return {
r['txID'] + BCDataStream.uint32.pack(r['txN'])
for r in self.connection.execute(
"""
SELECT txID, txN FROM claim WHERE updateHeight = ?
UNION
SELECT txID, txN FROM support WHERE blockHeight = ?
""", (at_height, at_height)
).fetchall()
}
def sync_get_takeover_count(self, start_height: int, end_height: int) -> int:
sql = """
SELECT COUNT(*) FROM claim WHERE name IN (
SELECT name FROM takeover
WHERE name IS NOT NULL AND height BETWEEN ? AND ?
)
""", (start_height, end_height)
return self.connection.execute(*sql).fetchone()[0]
async def get_takeover_count(self, start_height: int, end_height: int) -> int:
return await self.run_in_executor(self.sync_get_takeover_count, start_height, end_height)
def sync_get_takeovers(self, start_height: int, end_height: int) -> List[dict]:
sql = """
SELECT name, claimID, MAX(height) AS height FROM takeover
WHERE name IS NOT NULL AND height BETWEEN ? AND ?
GROUP BY name
""", (start_height, end_height)
return [{
'normalized': normalize_name(r['name'].decode()),
'claim_hash': r['claimID'],
'height': r['height']
} for r in self.sync_execute_fetchall(*sql)]
async def get_takeovers(self, start_height: int, end_height: int) -> List[dict]:
return await self.run_in_executor(self.sync_get_takeovers, start_height, end_height)
def sync_get_claim_metadata_count(self, start_height: int, end_height: int) -> int:
sql = "SELECT COUNT(*) FROM claim WHERE originalHeight BETWEEN ? AND ?"
return self.connection.execute(sql, (start_height, end_height)).fetchone()[0]
async def get_claim_metadata_count(self, start_height: int, end_height: int) -> int:
return await self.run_in_executor(self.sync_get_claim_metadata_count, start_height, end_height)
def sync_get_claim_metadata(self, claim_hashes) -> List[dict]:
sql = f"""
SELECT
name, claimID, activationHeight, expirationHeight, originalHeight,
(SELECT
CASE WHEN takeover.claimID = claim.claimID THEN takeover.height END
FROM takeover WHERE takeover.name = claim.name
ORDER BY height DESC LIMIT 1
) AS takeoverHeight,
(SELECT find_shortest_id(c.claimid, claim.claimid) FROM claim AS c
WHERE
c.nodename = claim.nodename AND
c.originalheight <= claim.originalheight AND
c.claimid != claim.claimid
) AS shortestID
FROM claim
WHERE claimID IN ({','.join(['?' for _ in claim_hashes])})
ORDER BY claimID
""", claim_hashes
return [{
"name": r["name"],
"claim_hash": r["claimID"],
"activation_height": r["activationHeight"],
"expiration_height": r["expirationHeight"],
"takeover_height": r["takeoverHeight"],
"creation_height": r["originalHeight"],
"short_url": make_short_url(r),
} for r in self.sync_execute_fetchall(*sql)]
async def get_claim_metadata(self, start_height: int, end_height: int) -> List[dict]:
return await self.run_in_executor(self.sync_get_claim_metadata, start_height, end_height)
def sync_get_support_metadata_count(self, start_height: int, end_height: int) -> int:
sql = "SELECT COUNT(*) FROM support WHERE blockHeight BETWEEN ? AND ?"
return self.connection.execute(sql, (start_height, end_height)).fetchone()[0]
async def get_support_metadata_count(self, start_height: int, end_height: int) -> int:
return await self.run_in_executor(self.sync_get_support_metadata_count, start_height, end_height)
def sync_get_support_metadata(self, start_height: int, end_height: int) -> List[dict]:
sql = """
SELECT name, txid, txn, activationHeight, expirationHeight
FROM support WHERE blockHeight BETWEEN ? AND ?
""", (start_height, end_height)
return [{
"name": r['name'],
"txo_hash_pk": r['txID'] + BCDataStream.uint32.pack(r['txN']),
"activation_height": r['activationHeight'],
"expiration_height": r['expirationHeight'],
} for r in self.sync_execute_fetchall(*sql)]
async def get_support_metadata(self, start_height: int, end_height: int) -> List[dict]:
return await self.run_in_executor(self.sync_get_support_metadata, start_height, end_height)

View file

@ -1,10 +1,19 @@
import re
import textwrap
from .util import coins_to_satoshis, satoshis_to_coins
from decimal import Decimal
from lbry.constants import COIN
def lbc_to_dewies(lbc: str) -> int:
try:
return coins_to_satoshis(lbc)
if not isinstance(lbc, str):
raise ValueError("{coins} must be a string")
result = re.search(r'^(\d{1,10})\.(\d{1,8})$', lbc)
if result is not None:
whole, fractional = result.groups()
return int(whole + fractional.ljust(8, "0"))
raise ValueError(f"'{lbc}' is not a valid coin decimal")
except ValueError:
raise ValueError(textwrap.dedent(
f"""
@ -30,13 +39,17 @@ def lbc_to_dewies(lbc: str) -> int:
def dewies_to_lbc(dewies) -> str:
return satoshis_to_coins(dewies)
coins = '{:.8f}'.format(dewies / COIN).rstrip('0')
if coins.endswith('.'):
return coins+'0'
else:
return coins
def dict_values_to_lbc(d):
lbc_dict = {}
for key, value in d.items():
if isinstance(value, int):
if isinstance(value, (int, Decimal)):
lbc_dict[key] = dewies_to_lbc(value)
elif isinstance(value, dict):
lbc_dict[key] = dict_values_to_lbc(value)

View file

@ -1,5 +1,5 @@
from binascii import hexlify, unhexlify
from .constants import NULL_HASH32
from lbry.constants import NULL_HASH32
class TXRef:
@ -29,28 +29,35 @@ class TXRef:
class TXRefImmutable(TXRef):
__slots__ = ('_height',)
__slots__ = ('_height', '_timestamp')
def __init__(self):
super().__init__()
self._height = -1
self._timestamp = -1
@classmethod
def from_hash(cls, tx_hash: bytes, height: int) -> 'TXRefImmutable':
def from_hash(cls, tx_hash: bytes, height: int, timestamp: int) -> 'TXRefImmutable':
ref = cls()
ref._hash = tx_hash
ref._id = hexlify(tx_hash[::-1]).decode()
ref._height = height
ref._timestamp = timestamp
return ref
@classmethod
def from_id(cls, tx_id: str, height: int) -> 'TXRefImmutable':
def from_id(cls, tx_id: str, height: int, timestamp: int) -> 'TXRefImmutable':
ref = cls()
ref._id = tx_id
ref._hash = unhexlify(tx_id)[::-1]
ref._height = height
ref._timestamp = timestamp
return ref
@property
def height(self):
return self._height
@property
def timestamp(self):
return self._timestamp

View file

@ -5,13 +5,14 @@ import asyncio
import logging
import zlib
from datetime import date
from concurrent.futures.thread import ThreadPoolExecutor
from io import BytesIO
from typing import Optional, Iterator, Tuple, Callable
from binascii import hexlify, unhexlify
from lbry.crypto.hash import sha512, double_sha256, ripemd160
from lbry.wallet.util import ArithUint256, date_to_julian_day
from lbry.blockchain.util import ArithUint256
from .checkpoints import HASHES
@ -41,22 +42,23 @@ class Headers:
validate_difficulty: bool = True
def __init__(self, path) -> None:
self.io = None
if path == ':memory:':
self.io = BytesIO()
self.path = path
self._size: Optional[int] = None
self.chunk_getter: Optional[Callable] = None
self.executor = ThreadPoolExecutor(1)
self.known_missing_checkpointed_chunks = set()
self.check_chunk_lock = asyncio.Lock()
async def open(self):
self.io = BytesIO()
if not self.executor:
self.executor = ThreadPoolExecutor(1)
if self.path != ':memory:':
def _readit():
if os.path.exists(self.path):
with open(self.path, 'r+b') as header_file:
self.io.seek(0)
self.io.write(header_file.read())
await asyncio.get_event_loop().run_in_executor(None, _readit)
if not os.path.exists(self.path):
self.io = open(self.path, 'w+b')
else:
self.io = open(self.path, 'r+b')
bytes_size = self.io.seek(0, os.SEEK_END)
self._size = bytes_size // self.header_size
max_checkpointed_height = max(self.checkpoints.keys() or [-1]) + 1000
@ -70,14 +72,10 @@ class Headers:
await self.get_all_missing_headers()
async def close(self):
if self.io is not None:
def _close():
flags = 'r+b' if os.path.exists(self.path) else 'w+b'
with open(self.path, flags) as header_file:
header_file.write(self.io.getbuffer())
await asyncio.get_event_loop().run_in_executor(None, _close)
self.io.close()
self.io = None
if self.executor:
self.executor.shutdown()
self.executor = None
self.io.close()
@staticmethod
def serialize(header):
@ -137,30 +135,28 @@ class Headers:
except struct.error:
raise IndexError(f"failed to get {height}, at {len(self)}")
def estimated_timestamp(self, height, try_real_headers=True):
def estimated_timestamp(self, height):
if height <= 0:
return
if try_real_headers and self.has_header(height):
offset = height * self.header_size
return struct.unpack('<I', self.io.getbuffer()[offset + 100: offset + 104])[0]
return int(self.first_block_timestamp + (height * self.timestamp_average_offset))
def estimated_julian_day(self, height):
return date_to_julian_day(date.fromtimestamp(self.estimated_timestamp(height, False)))
def estimated_date(self, height):
return date.fromtimestamp(self.estimated_timestamp(height))
async def get_raw_header(self, height) -> bytes:
if self.chunk_getter:
await self.ensure_chunk_at(height)
if not 0 <= height <= self.height:
raise IndexError(f"{height} is out of bounds, current height: {self.height}")
return self._read(height)
return await asyncio.get_running_loop().run_in_executor(self.executor, self._read, height)
def _read(self, height, count=1):
offset = height * self.header_size
return bytes(self.io.getbuffer()[offset: offset + self.header_size * count])
self.io.seek(height * self.header_size, os.SEEK_SET)
return self.io.read(self.header_size * count)
def chunk_hash(self, start, count):
return self.hash_header(self._read(start, count)).decode()
self.io.seek(start * self.header_size, os.SEEK_SET)
return self.hash_header(self.io.read(count * self.header_size)).decode()
async def ensure_checkpointed_size(self):
max_checkpointed_height = max(self.checkpoints.keys() or [-1])
@ -169,7 +165,7 @@ class Headers:
async def ensure_chunk_at(self, height):
async with self.check_chunk_lock:
if self.has_header(height):
if await self.has_header(height):
log.debug("has header %s", height)
return
return await self.fetch_chunk(height)
@ -183,7 +179,7 @@ class Headers:
)
chunk_hash = self.hash_header(chunk).decode()
if self.checkpoints.get(start) == chunk_hash:
self._write(start, chunk)
await asyncio.get_running_loop().run_in_executor(self.executor, self._write, start, chunk)
if start in self.known_missing_checkpointed_chunks:
self.known_missing_checkpointed_chunks.remove(start)
return
@ -193,23 +189,27 @@ class Headers:
f"Checkpoint mismatch at height {start}. Expected {self.checkpoints[start]}, but got {chunk_hash} instead."
)
def has_header(self, height):
async def has_header(self, height):
normalized_height = (height // 1000) * 1000
if normalized_height in self.checkpoints:
return normalized_height not in self.known_missing_checkpointed_chunks
empty = '56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d'
all_zeroes = '789d737d4f448e554b318c94063bbfa63e9ccda6e208f5648ca76ee68896557b'
return self.chunk_hash(height, 1) not in (empty, all_zeroes)
def _has_header(height):
empty = '56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d'
all_zeroes = '789d737d4f448e554b318c94063bbfa63e9ccda6e208f5648ca76ee68896557b'
return self.chunk_hash(height, 1) not in (empty, all_zeroes)
return await asyncio.get_running_loop().run_in_executor(self.executor, _has_header, height)
async def get_all_missing_headers(self):
# Heavy operation done in one optimized shot
for chunk_height, expected_hash in reversed(list(self.checkpoints.items())):
if chunk_height in self.known_missing_checkpointed_chunks:
continue
if self.chunk_hash(chunk_height, 1000) != expected_hash:
self.known_missing_checkpointed_chunks.add(chunk_height)
return self.known_missing_checkpointed_chunks
def _io_checkall():
for chunk_height, expected_hash in reversed(list(self.checkpoints.items())):
if chunk_height in self.known_missing_checkpointed_chunks:
continue
if self.chunk_hash(chunk_height, 1000) != expected_hash:
self.known_missing_checkpointed_chunks.add(chunk_height)
return self.known_missing_checkpointed_chunks
return await asyncio.get_running_loop().run_in_executor(self.executor, _io_checkall)
@property
def height(self) -> int:
@ -241,7 +241,7 @@ class Headers:
bail = True
chunk = chunk[:(height-e.height)*self.header_size]
if chunk:
added += self._write(height, chunk)
added += await asyncio.get_running_loop().run_in_executor(self.executor, self._write, height, chunk)
if bail:
break
return added
@ -306,7 +306,9 @@ class Headers:
previous_header_hash = fail = None
batch_size = 36
for height in range(start_height, self.height, batch_size):
headers = self._read(height, batch_size)
headers = await asyncio.get_running_loop().run_in_executor(
self.executor, self._read, height, batch_size
)
if len(headers) % self.header_size != 0:
headers = headers[:(len(headers) // self.header_size) * self.header_size]
for header_hash, header in self._iterate_headers(height, headers):
@ -322,11 +324,12 @@ class Headers:
assert start_height > 0 and height == start_height
if fail:
log.warning("Header file corrupted at height %s, truncating it.", height - 1)
self.io.seek(max(0, (height - 1)) * self.header_size, os.SEEK_SET)
self.io.truncate()
self.io.flush()
self._size = self.io.seek(0, os.SEEK_END) // self.header_size
return
def __truncate(at_height):
self.io.seek(max(0, (at_height - 1)) * self.header_size, os.SEEK_SET)
self.io.truncate()
self.io.flush()
self._size = self.io.seek(0, os.SEEK_END) // self.header_size
return await asyncio.get_running_loop().run_in_executor(self.executor, __truncate, height)
previous_header_hash = header_hash
@classmethod

310
lbry/blockchain/lbrycrd.py Normal file
View file

@ -0,0 +1,310 @@
import os
import struct
import shutil
import asyncio
import logging
import zipfile
import tempfile
import urllib.request
from typing import Optional
from binascii import hexlify
import aiohttp
import zmq
import zmq.asyncio
from lbry.conf import Config
from lbry.event import EventController
from lbry.error import LbrycrdEventSubscriptionError, LbrycrdUnauthorizedError
from .database import BlockchainDB
from .ledger import Ledger, RegTestLedger
log = logging.getLogger(__name__)
DOWNLOAD_URL = (
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.4.5/lbrycrd-linux-1745.zip'
)
class Process(asyncio.SubprocessProtocol):
IGNORE_OUTPUT = [
b'keypool keep',
b'keypool reserve',
b'keypool return',
]
def __init__(self):
self.ready = asyncio.Event()
self.stopped = asyncio.Event()
def pipe_data_received(self, fd, data):
if not any(ignore in data for ignore in self.IGNORE_OUTPUT):
if b'Error:' in data:
log.error(data.decode())
else:
for line in data.decode().splitlines():
log.debug(line.rstrip())
if b'Error:' in data:
self.ready.set()
raise SystemError(data.decode())
if b'Done loading' in data:
self.ready.set()
def process_exited(self):
self.stopped.set()
self.ready.set()
ZMQ_BLOCK_EVENT = 'pubhashblock'
class Lbrycrd:
def __init__(self, ledger: Ledger):
self.ledger, self.conf = ledger, ledger.conf
self.data_dir = self.actual_data_dir = ledger.conf.lbrycrd_dir
if self.is_regtest:
self.actual_data_dir = os.path.join(self.data_dir, 'regtest')
self.blocks_dir = os.path.join(self.actual_data_dir, 'blocks')
self.bin_dir = os.path.join(os.path.dirname(__file__), 'bin')
self.daemon_bin = os.path.join(self.bin_dir, 'lbrycrdd')
self.cli_bin = os.path.join(self.bin_dir, 'lbrycrd-cli')
self.protocol = None
self.transport = None
self.subscribed = False
self.subscription: Optional[asyncio.Task] = None
self.default_generate_address = None
self._on_block_controller = EventController()
self.on_block = self._on_block_controller.stream
self.on_block.listen(lambda e: log.info('%s %s', hexlify(e['hash']), e['msg']))
self.db = BlockchainDB(self.actual_data_dir)
self.session: Optional[aiohttp.ClientSession] = None
@classmethod
def temp_regtest(cls):
return cls(RegTestLedger(
Config.with_same_dir(tempfile.mkdtemp()).set(
lbrycrd_rpc_port=9245 + 2, # avoid conflict with default rpc port
lbrycrd_peer_port=9246 + 2, # avoid conflict with default peer port
lbrycrd_zmq_blocks="tcp://127.0.0.1:29000"
)
))
@staticmethod
def get_block_file_name(block_file_number):
return f'blk{block_file_number:05}.dat'
def get_block_file_path(self, block_file_number):
return os.path.join(
self.actual_data_dir, 'blocks',
self.get_block_file_name(block_file_number)
)
@property
def is_regtest(self):
return isinstance(self.ledger, RegTestLedger)
@property
def rpc_url(self):
return (
f'http://{self.conf.lbrycrd_rpc_user}:{self.conf.lbrycrd_rpc_pass}'
f'@{self.conf.lbrycrd_rpc_host}:{self.conf.lbrycrd_rpc_port}/'
)
@property
def exists(self):
return (
os.path.exists(self.cli_bin) and
os.path.exists(self.daemon_bin)
)
async def download(self):
downloaded_file = os.path.join(
self.bin_dir, DOWNLOAD_URL[DOWNLOAD_URL.rfind('/')+1:]
)
if not os.path.exists(self.bin_dir):
os.mkdir(self.bin_dir)
if not os.path.exists(downloaded_file):
log.info('Downloading: %s', DOWNLOAD_URL)
async with aiohttp.ClientSession() as session:
async with session.get(DOWNLOAD_URL) as response:
with open(downloaded_file, 'wb') as out_file:
while True:
chunk = await response.content.read(4096)
if not chunk:
break
out_file.write(chunk)
with urllib.request.urlopen(DOWNLOAD_URL) as response:
with open(downloaded_file, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
log.info('Extracting: %s', downloaded_file)
with zipfile.ZipFile(downloaded_file) as dotzip:
dotzip.extractall(self.bin_dir)
# zipfile bug https://bugs.python.org/issue15795
os.chmod(self.cli_bin, 0o755)
os.chmod(self.daemon_bin, 0o755)
return self.exists
async def ensure(self):
return self.exists or await self.download()
def get_start_command(self, *args):
if self.is_regtest:
args += ('-regtest',)
if self.conf.lbrycrd_zmq_blocks:
args += (f'-zmqpubhashblock={self.conf.lbrycrd_zmq_blocks}',)
return (
self.daemon_bin,
f'-datadir={self.data_dir}',
f'-port={self.conf.lbrycrd_peer_port}',
f'-rpcport={self.conf.lbrycrd_rpc_port}',
f'-rpcuser={self.conf.lbrycrd_rpc_user}',
f'-rpcpassword={self.conf.lbrycrd_rpc_pass}',
'-server', '-printtoconsole',
*args
)
async def open(self):
self.session = aiohttp.ClientSession()
await self.db.open()
async def close(self):
await self.db.close()
if self.session is not None:
await self.session.close()
async def start(self, *args):
loop = asyncio.get_running_loop()
command = self.get_start_command(*args)
log.info(' '.join(command))
self.transport, self.protocol = await loop.subprocess_exec(Process, *command)
await self.protocol.ready.wait()
assert not self.protocol.stopped.is_set()
await self.open()
async def stop(self, cleanup=True):
try:
await self.close()
self.transport.terminate()
await self.protocol.stopped.wait()
assert self.transport.get_returncode() == 0, "lbrycrd daemon exit with error"
self.transport.close()
finally:
if cleanup:
await self.cleanup()
async def cleanup(self):
await asyncio.get_running_loop().run_in_executor(
None, shutil.rmtree, self.data_dir, True
)
async def ensure_subscribable(self):
zmq_notifications = await self.get_zmq_notifications()
subs = {e['type']: e['address'] for e in zmq_notifications}
if ZMQ_BLOCK_EVENT not in subs:
raise LbrycrdEventSubscriptionError(ZMQ_BLOCK_EVENT)
if not self.conf.lbrycrd_zmq_blocks:
self.conf.lbrycrd_zmq_blocks = subs[ZMQ_BLOCK_EVENT]
async def subscribe(self):
if not self.subscribed:
self.subscribed = True
ctx = zmq.asyncio.Context.instance()
sock = ctx.socket(zmq.SUB) # pylint: disable=no-member
sock.connect(self.conf.lbrycrd_zmq_blocks)
sock.subscribe("hashblock")
self.subscription = asyncio.create_task(self.subscription_handler(sock))
async def subscription_handler(self, sock):
try:
while self.subscribed:
msg = await sock.recv_multipart()
await self._on_block_controller.add({
'hash': msg[1],
'msg': struct.unpack('<I', msg[2])[0]
})
except asyncio.CancelledError:
sock.close()
raise
def unsubscribe(self):
if self.subscribed:
self.subscribed = False
self.subscription.cancel()
self.subscription = None
async def rpc(self, method, params=None):
message = {
"jsonrpc": "1.0",
"id": "1",
"method": method,
"params": params or []
}
async with self.session.post(self.rpc_url, json=message) as resp:
if resp.status == 401:
raise LbrycrdUnauthorizedError()
try:
result = await resp.json()
except aiohttp.ContentTypeError as e:
raise Exception(await resp.text()) from e
if not result['error']:
return result['result']
else:
result['error'].update(method=method, params=params)
raise Exception(result['error'])
async def get_zmq_notifications(self):
return await self.rpc("getzmqnotifications")
async def generate(self, blocks):
if self.default_generate_address is None:
self.default_generate_address = await self.get_new_address()
return await self.generate_to_address(blocks, self.default_generate_address)
async def get_new_address(self):
return await self.rpc("getnewaddress")
async def generate_to_address(self, blocks, address):
return await self.rpc("generatetoaddress", [blocks, address])
async def send_to_address(self, address, amount):
return await self.rpc("sendtoaddress", [address, amount])
async def get_block(self, block_hash):
return await self.rpc("getblock", [block_hash])
async def get_raw_transaction(self, txid):
return await self.rpc("getrawtransaction", [txid])
async def fund_raw_transaction(self, tx):
return await self.rpc("fundrawtransaction", [tx])
async def sign_raw_transaction_with_wallet(self, tx):
return await self.rpc("signrawtransactionwithwallet", [tx])
async def send_raw_transaction(self, tx):
return await self.rpc("sendrawtransaction", [tx])
async def claim_name(self, name, data, amount):
return await self.rpc("claimname", [name, data, amount])
async def update_claim(self, txid, data, amount):
return await self.rpc("updateclaim", [txid, data, amount])
async def abandon_claim(self, txid, address):
return await self.rpc("abandonclaim", [txid, address])
async def support_claim(self, name, claim_id, amount, value="", istip=False):
return await self.rpc("supportclaim", [name, claim_id, amount, value, istip])
async def abandon_support(self, txid, address):
return await self.rpc("abandonsupport", [txid, address])

166
lbry/blockchain/ledger.py Normal file
View file

@ -0,0 +1,166 @@
import typing
from binascii import unhexlify
from string import hexdigits
from lbry.crypto.hash import hash160, double_sha256
from lbry.crypto.base58 import Base58
from lbry.schema.url import URL
from .header import Headers, UnvalidatedHeaders
from .checkpoints import HASHES
from .dewies import lbc_to_dewies
if typing.TYPE_CHECKING:
from lbry.conf import Config
class Ledger:
name = 'LBRY Credits'
symbol = 'LBC'
network_name = 'mainnet'
headers_class = Headers
secret_prefix = bytes((0x1c,))
pubkey_address_prefix = bytes((0x55,))
script_address_prefix = bytes((0x7a,))
extended_public_key_prefix = unhexlify('0488b21e')
extended_private_key_prefix = unhexlify('0488ade4')
max_target = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
genesis_bits = 0x1f00ffff
target_timespan = 150
fee_per_byte = 50
fee_per_name_char = 200000
checkpoints = HASHES
def __init__(self, conf: 'Config'):
self.conf = conf
self.coin_selection_strategy = None
@classmethod
def get_id(cls):
return '{}_{}'.format(cls.symbol.lower(), cls.network_name.lower())
@classmethod
def hash160_to_address(cls, h160):
raw_address = cls.pubkey_address_prefix + h160
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
@staticmethod
def address_to_hash160(address):
return Base58.decode(address)[1:21]
@classmethod
def is_valid_address(cls, address):
decoded = Base58.decode_check(address)
return decoded[0] == cls.pubkey_address_prefix[0]
@classmethod
def valid_address_or_error(cls, address):
try:
assert cls.is_valid_address(address)
except:
raise Exception(f"'{address}' is not a valid address")
@staticmethod
def valid_claim_id(claim_id: str):
if not len(claim_id) == 40:
raise Exception(f"Incorrect claimid length: {len(claim_id)}")
if set(claim_id).difference(hexdigits):
raise Exception("Claim id is not hex encoded")
@staticmethod
def valid_channel_name_or_error(name: str):
try:
if not name:
raise Exception("Channel name cannot be blank.")
parsed = URL.parse(name)
if not parsed.has_channel:
raise Exception("Channel names must start with '@' symbol.")
if parsed.channel.name != name:
raise Exception("Channel name has invalid character")
except (TypeError, ValueError):
raise Exception("Invalid channel name.")
@staticmethod
def valid_stream_name_or_error(name: str):
try:
if not name:
raise Exception('Stream name cannot be blank.')
parsed = URL.parse(name)
if parsed.has_channel:
raise Exception(
"Stream names cannot start with '@' symbol. This is reserved for channels claims."
)
if not parsed.has_stream or parsed.stream.name != name:
raise Exception('Stream name has invalid characters.')
except (TypeError, ValueError):
raise Exception("Invalid stream name.")
@staticmethod
def valid_collection_name_or_error(name: str):
try:
if not name:
raise Exception('Collection name cannot be blank.')
parsed = URL.parse(name)
if parsed.has_channel:
raise Exception(
"Collection names cannot start with '@' symbol. This is reserved for channels claims."
)
if not parsed.has_stream or parsed.stream.name != name:
raise Exception('Collection name has invalid characters.')
except (TypeError, ValueError):
raise Exception("Invalid collection name.")
@staticmethod
def get_dewies_or_error(argument: str, lbc: str, positive_value=False):
try:
dewies = lbc_to_dewies(lbc)
if positive_value and dewies <= 0:
raise ValueError(f"'{argument}' value must be greater than 0.0")
return dewies
except ValueError as e:
raise ValueError(f"Invalid value for '{argument}': {e.args[0]}")
def get_fee_address(self, kwargs: dict, claim_address: str) -> str:
if 'fee_address' in kwargs:
self.valid_address_or_error(kwargs['fee_address'])
return kwargs['fee_address']
if 'fee_currency' in kwargs or 'fee_amount' in kwargs:
return claim_address
@classmethod
def public_key_to_address(cls, public_key):
return cls.hash160_to_address(hash160(public_key))
@staticmethod
def private_key_to_wif(private_key):
return b'\x1c' + private_key + b'\x01'
class TestNetLedger(Ledger):
network_name = 'testnet'
pubkey_address_prefix = bytes((111,))
script_address_prefix = bytes((196,))
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
checkpoints = {}
class RegTestLedger(Ledger):
network_name = 'regtest'
headers_class = UnvalidatedHeaders
pubkey_address_prefix = bytes((111,))
script_address_prefix = bytes((196,))
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
max_target = 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
genesis_bits = 0x207fffff
target_timespan = 1
checkpoints = {}

View file

@ -17,7 +17,6 @@ OP_HASH160 = 0xa9
OP_EQUALVERIFY = 0x88
OP_CHECKSIG = 0xac
OP_CHECKMULTISIG = 0xae
OP_CHECKLOCKTIMEVERIFY = 0xb1
OP_EQUAL = 0x87
OP_PUSHDATA1 = 0x4c
OP_PUSHDATA2 = 0x4d
@ -277,7 +276,7 @@ class Template:
elif isinstance(opcode, PUSH_INTEGER):
data = values[opcode.name]
source.write_many(push_data(
data.to_bytes((data.bit_length() + 8) // 8, byteorder='little', signed=True)
data.to_bytes((data.bit_length() + 7) // 8, byteorder='little')
))
elif isinstance(opcode, PUSH_SUBSCRIPT):
data = values[opcode.name]
@ -295,20 +294,25 @@ class Template:
class Script:
__slots__ = 'source', '_template', '_values', '_template_hint'
__slots__ = 'source', 'offset', '_template', '_values', '_template_hint'
templates: List[Template] = []
NO_SCRIPT = Template('no_script', None) # special case
def __init__(self, source=None, template=None, values=None, template_hint=None):
def __init__(self, source=None, template=None, values=None, template_hint=None, offset=None):
self.source = source
self.offset = offset
self._template = template
self._values = values
self._template_hint = template_hint
if source is None and template and values:
self.generate()
@property
def length(self):
return len(self.source)
@property
def template(self):
if self._template is None:
@ -358,27 +362,19 @@ class InputScript(Script):
REDEEM_PUBKEY_HASH = Template('pubkey_hash', (
PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey')
))
MULTI_SIG_SCRIPT = Template('multi_sig', (
REDEEM_SCRIPT = Template('script', (
SMALL_INTEGER('signatures_count'), PUSH_MANY('pubkeys'), SMALL_INTEGER('pubkeys_count'),
OP_CHECKMULTISIG
))
REDEEM_SCRIPT_HASH_MULTI_SIG = Template('script_hash+multi_sig', (
OP_0, PUSH_MANY('signatures'), PUSH_SUBSCRIPT('script', MULTI_SIG_SCRIPT)
))
TIME_LOCK_SCRIPT = Template('timelock', (
PUSH_INTEGER('height'), OP_CHECKLOCKTIMEVERIFY, OP_DROP,
# rest is identical to OutputScript.PAY_PUBKEY_HASH:
OP_DUP, OP_HASH160, PUSH_SINGLE('pubkey_hash'), OP_EQUALVERIFY, OP_CHECKSIG
))
REDEEM_SCRIPT_HASH_TIME_LOCK = Template('script_hash+timelock', (
PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey'), PUSH_SUBSCRIPT('script', TIME_LOCK_SCRIPT)
REDEEM_SCRIPT_HASH = Template('script_hash', (
OP_0, PUSH_MANY('signatures'), PUSH_SUBSCRIPT('script', REDEEM_SCRIPT)
))
templates = [
REDEEM_PUBKEY,
REDEEM_PUBKEY_HASH,
REDEEM_SCRIPT_HASH_TIME_LOCK,
REDEEM_SCRIPT_HASH_MULTI_SIG,
REDEEM_SCRIPT_HASH,
REDEEM_SCRIPT
]
@classmethod
@ -389,38 +385,20 @@ class InputScript(Script):
})
@classmethod
def redeem_multi_sig_script_hash(cls, signatures, pubkeys):
return cls(template=cls.REDEEM_SCRIPT_HASH_MULTI_SIG, values={
def redeem_script_hash(cls, signatures, pubkeys):
return cls(template=cls.REDEEM_SCRIPT_HASH, values={
'signatures': signatures,
'script': cls(template=cls.MULTI_SIG_SCRIPT, values={
'signatures_count': len(signatures),
'pubkeys': pubkeys,
'pubkeys_count': len(pubkeys)
})
'script': cls.redeem_script(signatures, pubkeys)
})
@classmethod
def redeem_time_lock_script_hash(cls, signature, pubkey, height=None, pubkey_hash=None, script_source=None):
if height and pubkey_hash:
script = cls(template=cls.TIME_LOCK_SCRIPT, values={
'height': height,
'pubkey_hash': pubkey_hash
})
elif script_source:
script = cls(source=script_source, template=cls.TIME_LOCK_SCRIPT)
script.parse(script.template)
else:
raise ValueError("script_source or both height and pubkey_hash are required.")
return cls(template=cls.REDEEM_SCRIPT_HASH_TIME_LOCK, values={
'signature': signature,
'pubkey': pubkey,
'script': script
def redeem_script(cls, signatures, pubkeys):
return cls(template=cls.REDEEM_SCRIPT, values={
'signatures_count': len(signatures),
'pubkeys': pubkeys,
'pubkeys_count': len(pubkeys)
})
@property
def is_script_hash(self):
return self.template.name.startswith('script_hash+')
class OutputScript(Script):
@ -487,6 +465,21 @@ class OutputScript(Script):
UPDATE_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes
))
SELL_SCRIPT = Template('sell_script', (
OP_VERIFY, OP_DROP, OP_DROP, OP_DROP, PUSH_INTEGER('price'), OP_PRICECHECK
))
SELL_CLAIM = Template('sell_claim+pay_script_hash', (
OP_SELL_CLAIM, PUSH_SINGLE('claim_id'), PUSH_SUBSCRIPT('sell_script', SELL_SCRIPT),
PUSH_SUBSCRIPT('receive_script', InputScript.REDEEM_SCRIPT), OP_2DROP, OP_2DROP
) + PAY_SCRIPT_HASH.opcodes)
BUY_CLAIM = Template('buy_claim+pay_script_hash', (
OP_BUY_CLAIM, PUSH_SINGLE('sell_id'),
PUSH_SINGLE('claim_id'), PUSH_SINGLE('claim_version'),
PUSH_SINGLE('owner_pubkey_hash'), PUSH_SINGLE('negotiation_signature'),
OP_2DROP, OP_2DROP, OP_2DROP,
) + PAY_SCRIPT_HASH.opcodes)
templates = [
PAY_PUBKEY_FULL,
PAY_PUBKEY_HASH,
@ -501,6 +494,8 @@ class OutputScript(Script):
SUPPORT_CLAIM_DATA_SCRIPT,
UPDATE_CLAIM_PUBKEY,
UPDATE_CLAIM_SCRIPT,
SELL_CLAIM, SELL_SCRIPT,
BUY_CLAIM,
]
@classmethod
@ -560,6 +555,30 @@ class OutputScript(Script):
'pubkey_hash': pubkey_hash
})
@classmethod
def sell_script(cls, price):
return cls(template=cls.SELL_SCRIPT, values={
'price': price,
})
@classmethod
def sell_claim(cls, claim_id, price, signatures, pubkeys):
return cls(template=cls.SELL_CLAIM, values={
'claim_id': claim_id,
'sell_script': OutputScript.sell_script(price),
'receive_script': InputScript.redeem_script(signatures, pubkeys)
})
@classmethod
def buy_claim(cls, sell_id, claim_id, claim_version, owner_pubkey_hash, negotiation_signature):
return cls(template=cls.BUY_CLAIM, values={
'sell_id': sell_id,
'claim_id': claim_id,
'claim_version': claim_version,
'owner_pubkey_hash': owner_pubkey_hash,
'negotiation_signature': negotiation_signature,
})
@property
def is_pay_pubkey_hash(self):
return self.template.name.endswith('pay_pubkey_hash')
@ -588,6 +607,17 @@ class OutputScript(Script):
def is_support_claim_data(self):
return self.template.name.startswith('support_claim+data+')
@property
def is_sell_claim(self):
return self.template.name.startswith('sell_claim+')
@property
def is_buy_claim(self):
return self.template.name.startswith('buy_claim+')
@property
def is_claim_involved(self):
return any((self.is_claim_name, self.is_support_claim, self.is_update_claim))
return any((
self.is_claim_name, self.is_support_claim, self.is_update_claim,
self.is_sell_claim, self.is_buy_claim
))

View file

@ -0,0 +1 @@
from .synchronizer import BlockchainSync

View file

@ -0,0 +1,211 @@
import logging
from sqlalchemy import table, bindparam, text, func, union
from sqlalchemy.future import select
from sqlalchemy.schema import CreateTable
from lbry.db.tables import Block as BlockTable, TX, TXO, TXI, Claim, Tag, Support
from lbry.db.tables import (
pg_add_tx_constraints_and_indexes,
pg_add_txo_constraints_and_indexes,
pg_add_txi_constraints_and_indexes,
)
from lbry.db.query_context import ProgressContext, event_emitter, context
from lbry.db.sync import set_input_addresses, update_spent_outputs
from lbry.blockchain.block import Block, create_block_filter
from lbry.blockchain.bcd_data_stream import BCDataStream
from .context import get_or_initialize_lbrycrd
log = logging.getLogger(__name__)
def get_best_block_height_for_file(file_number):
return context().fetchone(
select(func.coalesce(func.max(BlockTable.c.height), -1).label('height'))
.where(BlockTable.c.file_number == file_number)
)['height']
@event_emitter("blockchain.sync.blocks.file", "blocks", "txs", throttle=100)
def sync_block_file(
file_number: int, start_height: int, txs: int, flush_size: int, p: ProgressContext
):
chain = get_or_initialize_lbrycrd(p.ctx)
new_blocks = chain.db.sync_get_blocks_in_file(file_number, start_height)
if not new_blocks:
return -1
file_name = chain.get_block_file_name(file_number)
p.start(len(new_blocks), txs, progress_id=file_number, label=file_name)
block_file_path = chain.get_block_file_path(file_number)
done_blocks = done_txs = 0
last_block_processed, loader = -1, p.ctx.get_bulk_loader()
with open(block_file_path, "rb") as fp:
stream = BCDataStream(fp=fp)
for done_blocks, block_info in enumerate(new_blocks, start=1):
block_height = block_info["height"]
fp.seek(block_info["data_offset"])
block = Block.from_data_stream(stream, block_height, file_number)
loader.add_block(block)
if len(loader.txs) >= flush_size:
done_txs += loader.flush(TX)
p.step(done_blocks, done_txs)
last_block_processed = block_height
if p.ctx.stop_event.is_set():
return last_block_processed
if loader.txs:
done_txs += loader.flush(TX)
p.step(done_blocks, done_txs)
return last_block_processed
@event_emitter("blockchain.sync.spends.main", "steps")
def sync_spends(initial_sync: bool, p: ProgressContext):
if initial_sync:
p.start(
7 +
len(pg_add_tx_constraints_and_indexes) +
len(pg_add_txi_constraints_and_indexes) +
len(pg_add_txo_constraints_and_indexes)
)
# 1. tx table stuff
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM ANALYZE tx;"))
p.step()
for constraint in pg_add_tx_constraints_and_indexes:
if p.ctx.is_postgres:
p.ctx.execute(text(constraint))
p.step()
# A. Update TXIs to have the address of TXO they are spending.
# 2. txi table reshuffling
p.ctx.execute(text("ALTER TABLE txi RENAME TO old_txi;"))
p.ctx.execute(CreateTable(TXI, include_foreign_key_constraints=[]))
if p.ctx.is_postgres:
p.ctx.execute(text("ALTER TABLE txi DROP CONSTRAINT txi_pkey;"))
p.step()
# 3. insert
old_txi = table("old_txi", *(c.copy() for c in TXI.columns)) # pylint: disable=not-an-iterable
columns = [c for c in old_txi.columns if c.name != "address"] + [TXO.c.address]
join_txi_on_txo = old_txi.join(TXO, old_txi.c.txo_hash == TXO.c.txo_hash)
select_txis = select(*columns).select_from(join_txi_on_txo)
insert_txis = TXI.insert().from_select(columns, select_txis)
p.ctx.execute(insert_txis)
p.step()
# 4. drop old txi and vacuum
p.ctx.execute(text("DROP TABLE old_txi;"))
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM ANALYZE txi;"))
p.step()
for constraint in pg_add_txi_constraints_and_indexes:
if p.ctx.is_postgres:
p.ctx.execute(text(constraint))
p.step()
# B. Update TXOs to have the height at which they were spent (if they were).
# 5. txo table reshuffling
p.ctx.execute(text("ALTER TABLE txo RENAME TO old_txo;"))
p.ctx.execute(CreateTable(TXO, include_foreign_key_constraints=[]))
if p.ctx.is_postgres:
p.ctx.execute(text("ALTER TABLE txo DROP CONSTRAINT txo_pkey;"))
p.step()
# 6. insert
old_txo = table("old_txo", *(c.copy() for c in TXO.columns)) # pylint: disable=not-an-iterable
columns = [c for c in old_txo.columns if c.name != "spent_height"]
insert_columns = columns + [TXO.c.spent_height]
select_columns = columns + [func.coalesce(TXI.c.height, 0).label("spent_height")]
join_txo_on_txi = old_txo.join(TXI, old_txo.c.txo_hash == TXI.c.txo_hash, isouter=True)
select_txos = select(*select_columns).select_from(join_txo_on_txi)
insert_txos = TXO.insert().from_select(insert_columns, select_txos)
p.ctx.execute(insert_txos)
p.step()
# 7. drop old txo
p.ctx.execute(text("DROP TABLE old_txo;"))
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM ANALYZE txo;"))
p.step()
for constraint in pg_add_txo_constraints_and_indexes:
if p.ctx.is_postgres:
p.ctx.execute(text(constraint))
p.step()
else:
p.start(5)
# 1. Update spent TXOs setting spent_height
update_spent_outputs(p.ctx)
p.step()
# 2. Update TXIs to have the address of TXO they are spending.
set_input_addresses(p.ctx)
p.step()
# 3. Update tx visibility map, which speeds up index-only scans.
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM tx;"))
p.step()
# 4. Update txi visibility map, which speeds up index-only scans.
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM txi;"))
p.step()
# 4. Update txo visibility map, which speeds up index-only scans.
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM txo;"))
p.step()
@event_emitter("blockchain.sync.filter.generate", "blocks")
def sync_block_filters(p: ProgressContext):
blocks = []
all_filters = []
all_addresses = []
for block in get_blocks_without_filters():
addresses = {
p.ctx.ledger.address_to_hash160(r["address"])
for r in get_block_tx_addresses(block_hash=block["block_hash"])
}
all_addresses.extend(addresses)
block_filter = create_block_filter(addresses)
all_filters.append(block_filter)
blocks.append({"pk": block["block_hash"], "block_filter": block_filter})
p.ctx.execute(
BlockTable.update().where(BlockTable.c.block_hash == bindparam("pk")), blocks
)
def get_blocks_without_filters():
return context().fetchall(
select(BlockTable.c.block_hash)
.where(BlockTable.c.block_filter.is_(None))
)
def get_block_tx_addresses(block_hash=None, tx_hash=None):
if block_hash is not None:
constraint = (TX.c.block_hash == block_hash)
elif tx_hash is not None:
constraint = (TX.c.tx_hash == tx_hash)
else:
raise ValueError('block_hash or tx_hash must be provided.')
return context().fetchall(
union(
select(TXO.c.address).select_from(TXO.join(TX))
.where((TXO.c.address.isnot_(None)) & constraint),
select(TXI.c.address).select_from(TXI.join(TX))
.where((TXI.c.address.isnot_(None)) & constraint),
)
)
@event_emitter("blockchain.sync.rewind.main", "steps")
def rewind(height: int, p: ProgressContext):
deletes = [
BlockTable.delete().where(BlockTable.c.height >= height),
TXI.delete().where(TXI.c.height >= height),
TXO.delete().where(TXO.c.height >= height),
TX.delete().where(TX.c.height >= height),
Tag.delete().where(
Tag.c.claim_hash.in_(
select(Claim.c.claim_hash).where(Claim.c.height >= height)
)
),
Claim.delete().where(Claim.c.height >= height),
Support.delete().where(Support.c.height >= height),
]
for delete in p.iter(deletes):
p.ctx.execute(delete)

View file

@ -0,0 +1,257 @@
import logging
from typing import Tuple
from sqlalchemy import case, func, desc, text
from sqlalchemy.future import select
from lbry.db.queries.txio import (
minimum_txo_columns, row_to_txo,
where_unspent_txos, where_claims_with_changed_supports,
count_unspent_txos, where_channels_with_changed_content,
where_abandoned_claims, count_channels_with_changed_content
)
from lbry.db.query_context import ProgressContext, event_emitter
from lbry.db.tables import TX, TXO, Claim, Support, pg_add_claim_and_tag_constraints_and_indexes
from lbry.db.utils import least
from lbry.db.constants import TXO_TYPES, CLAIM_TYPE_CODES
from lbry.blockchain.transaction import Output
from .context import get_or_initialize_lbrycrd
log = logging.getLogger(__name__)
def channel_content_count_calc(signable):
return (
select(func.count(signable.c.claim_hash))
.where((signable.c.channel_hash == Claim.c.claim_hash) & signable.c.is_signature_valid)
.scalar_subquery()
)
support = TXO.alias('support')
def staked_support_aggregation(aggregate):
return (
select(aggregate).where(
(support.c.txo_type == TXO_TYPES['support']) &
(support.c.spent_height == 0)
).scalar_subquery()
)
def staked_support_amount_calc(other):
return (
staked_support_aggregation(func.coalesce(func.sum(support.c.amount), 0))
.where(support.c.claim_hash == other.c.claim_hash)
)
def staked_support_count_calc(other):
return (
staked_support_aggregation(func.coalesce(func.count('*'), 0))
.where(support.c.claim_hash == other.c.claim_hash)
)
def make_label(action, blocks):
if blocks[0] == blocks[-1]:
return f"{action} {blocks[0]:>6}"
else:
return f"{action} {blocks[0]:>6}-{blocks[-1]:>6}"
def select_claims_for_saving(
blocks: Tuple[int, int],
missing_in_claims_table=False,
missing_or_stale_in_claims_table=False,
):
channel_txo = TXO.alias('channel_txo')
return select(
*minimum_txo_columns, TXO.c.claim_hash,
staked_support_amount_calc(TXO).label('staked_support_amount'),
staked_support_count_calc(TXO).label('staked_support_count'),
TXO.c.signature, TXO.c.signature_digest,
case([(
TXO.c.channel_hash.isnot(None),
select(channel_txo.c.public_key).select_from(channel_txo).where(
(channel_txo.c.txo_type == TXO_TYPES['channel']) &
(channel_txo.c.claim_hash == TXO.c.channel_hash) &
(channel_txo.c.height <= TXO.c.height)
).order_by(desc(channel_txo.c.height)).limit(1).scalar_subquery()
)]).label('channel_public_key')
).where(
where_unspent_txos(
CLAIM_TYPE_CODES, blocks,
missing_in_claims_table=missing_in_claims_table,
missing_or_stale_in_claims_table=missing_or_stale_in_claims_table,
)
).select_from(TXO.join(TX))
def row_to_claim_for_saving(row) -> Tuple[Output, dict]:
return row_to_txo(row), {
'staked_support_amount': int(row.staked_support_amount),
'staked_support_count': int(row.staked_support_count),
'signature': row.signature,
'signature_digest': row.signature_digest,
'channel_public_key': row.channel_public_key
}
@event_emitter("blockchain.sync.claims.insert", "claims")
def claims_insert(
blocks: Tuple[int, int],
missing_in_claims_table: bool,
flush_size: int,
p: ProgressContext
):
chain = get_or_initialize_lbrycrd(p.ctx)
p.start(
count_unspent_txos(
CLAIM_TYPE_CODES, blocks,
missing_in_claims_table=missing_in_claims_table,
), progress_id=blocks[0], label=make_label("add claims", blocks)
)
with p.ctx.connect_streaming() as c:
loader = p.ctx.get_bulk_loader()
cursor = c.execute(select_claims_for_saving(
blocks, missing_in_claims_table=missing_in_claims_table
).order_by(TXO.c.claim_hash))
for rows in cursor.partitions(900):
claim_metadata = chain.db.sync_get_claim_metadata(
claim_hashes=[row['claim_hash'] for row in rows]
)
i = 0
for row in rows:
metadata = claim_metadata[i] if i < len(claim_metadata) else {}
if metadata and metadata['claim_hash'] == row.claim_hash:
i += 1
txo, extra = row_to_claim_for_saving(row)
extra.update({
'short_url': metadata.get('short_url'),
'creation_height': metadata.get('creation_height'),
'activation_height': metadata.get('activation_height'),
'expiration_height': metadata.get('expiration_height'),
'takeover_height': metadata.get('takeover_height'),
})
loader.add_claim(txo, **extra)
if len(loader.claims) >= flush_size:
p.add(loader.flush(Claim))
p.add(loader.flush(Claim))
@event_emitter("blockchain.sync.claims.indexes", "steps")
def claims_constraints_and_indexes(p: ProgressContext):
p.start(2 + len(pg_add_claim_and_tag_constraints_and_indexes))
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM ANALYZE claim;"))
p.step()
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM ANALYZE tag;"))
p.step()
for constraint in pg_add_claim_and_tag_constraints_and_indexes:
if p.ctx.is_postgres:
p.ctx.execute(text(constraint))
p.step()
@event_emitter("blockchain.sync.claims.vacuum", "steps")
def claims_vacuum(p: ProgressContext):
p.start(2)
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM claim;"))
p.step()
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM tag;"))
p.step()
@event_emitter("blockchain.sync.claims.update", "claims")
def claims_update(blocks: Tuple[int, int], p: ProgressContext):
p.start(
count_unspent_txos(CLAIM_TYPE_CODES, blocks, missing_or_stale_in_claims_table=True),
progress_id=blocks[0], label=make_label("mod claims", blocks)
)
with p.ctx.connect_streaming() as c:
loader = p.ctx.get_bulk_loader()
cursor = c.execute(select_claims_for_saving(
blocks, missing_or_stale_in_claims_table=True
))
for row in cursor:
txo, extra = row_to_claim_for_saving(row)
loader.update_claim(txo, **extra)
if len(loader.update_claims) >= 25:
p.add(loader.flush(Claim))
p.add(loader.flush(Claim))
@event_emitter("blockchain.sync.claims.delete", "claims")
def claims_delete(claims, p: ProgressContext):
p.start(claims, label="del claims")
deleted = p.ctx.execute(Claim.delete().where(where_abandoned_claims()))
p.step(deleted.rowcount)
@event_emitter("blockchain.sync.claims.takeovers", "claims")
def update_takeovers(blocks: Tuple[int, int], takeovers, p: ProgressContext):
p.start(takeovers, label=make_label("mod winner", blocks))
chain = get_or_initialize_lbrycrd(p.ctx)
with p.ctx.engine.begin() as c:
for takeover in chain.db.sync_get_takeovers(start_height=blocks[0], end_height=blocks[-1]):
update_claims = (
Claim.update()
.where(Claim.c.normalized == takeover['normalized'])
.values(
is_controlling=case(
[(Claim.c.claim_hash == takeover['claim_hash'], True)],
else_=False
),
takeover_height=case(
[(Claim.c.claim_hash == takeover['claim_hash'], takeover['height'])],
else_=None
),
activation_height=least(Claim.c.activation_height, takeover['height']),
)
)
result = c.execute(update_claims)
p.add(result.rowcount)
@event_emitter("blockchain.sync.claims.stakes", "claims")
def update_stakes(blocks: Tuple[int, int], claims: int, p: ProgressContext):
p.start(claims)
sql = (
Claim.update()
.where(where_claims_with_changed_supports(blocks))
.values(
staked_support_amount=staked_support_amount_calc(Claim),
staked_support_count=staked_support_count_calc(Claim),
)
)
result = p.ctx.execute(sql)
p.step(result.rowcount)
@event_emitter("blockchain.sync.claims.channels", "channels")
def update_channel_stats(blocks: Tuple[int, int], initial_sync: int, p: ProgressContext):
update_sql = Claim.update().values(
signed_claim_count=channel_content_count_calc(Claim.alias('content')),
signed_support_count=channel_content_count_calc(Support),
)
if initial_sync:
p.start(p.ctx.fetchtotal(Claim.c.claim_type == TXO_TYPES['channel']), label="channel stats")
update_sql = update_sql.where(Claim.c.claim_type == TXO_TYPES['channel'])
elif blocks:
p.start(count_channels_with_changed_content(blocks), label="channel stats")
update_sql = update_sql.where(where_channels_with_changed_content(blocks))
else:
return
result = p.ctx.execute(update_sql)
if result.rowcount and p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM claim;"))
p.step(result.rowcount)

View file

@ -0,0 +1,17 @@
from contextvars import ContextVar
from lbry.db import query_context
from lbry.blockchain.lbrycrd import Lbrycrd
_chain: ContextVar[Lbrycrd] = ContextVar('chain')
def get_or_initialize_lbrycrd(ctx=None) -> Lbrycrd:
chain = _chain.get(None)
if chain is not None:
return chain
chain = Lbrycrd((ctx or query_context.context()).ledger)
chain.db.sync_open()
_chain.set(chain)
return chain

View file

@ -0,0 +1,95 @@
import logging
from typing import Tuple
from sqlalchemy import case, desc, text
from sqlalchemy.future import select
from lbry.db.tables import TX, TXO, Support, pg_add_support_constraints_and_indexes
from lbry.db.query_context import ProgressContext, event_emitter
from lbry.db.queries import row_to_txo
from lbry.db.constants import TXO_TYPES
from lbry.db.queries.txio import (
minimum_txo_columns,
where_unspent_txos, where_abandoned_supports,
count_unspent_txos,
)
from .claims import make_label
log = logging.getLogger(__name__)
@event_emitter("blockchain.sync.supports.insert", "supports")
def supports_insert(
blocks: Tuple[int, int],
missing_in_supports_table: bool,
flush_size: int,
p: ProgressContext
):
p.start(
count_unspent_txos(
TXO_TYPES['support'], blocks,
missing_in_supports_table=missing_in_supports_table,
), progress_id=blocks[0], label=make_label("add supprt", blocks)
)
channel_txo = TXO.alias('channel_txo')
select_supports = select(
*minimum_txo_columns, TXO.c.claim_hash,
TXO.c.signature, TXO.c.signature_digest,
case([(
TXO.c.channel_hash.isnot(None),
select(channel_txo.c.public_key).select_from(channel_txo).where(
(channel_txo.c.txo_type == TXO_TYPES['channel']) &
(channel_txo.c.claim_hash == TXO.c.channel_hash) &
(channel_txo.c.height <= TXO.c.height)
).order_by(desc(channel_txo.c.height)).limit(1).scalar_subquery()
)]).label('channel_public_key'),
).select_from(
TXO.join(TX)
).where(
where_unspent_txos(
TXO_TYPES['support'], blocks,
missing_in_supports_table=missing_in_supports_table,
)
)
with p.ctx.connect_streaming() as c:
loader = p.ctx.get_bulk_loader()
for row in c.execute(select_supports):
txo = row_to_txo(row)
loader.add_support(
txo,
signature=row.signature,
signature_digest=row.signature_digest,
channel_public_key=row.channel_public_key
)
if len(loader.supports) >= flush_size:
p.add(loader.flush(Support))
p.add(loader.flush(Support))
@event_emitter("blockchain.sync.supports.delete", "supports")
def supports_delete(supports, p: ProgressContext):
p.start(supports, label="del supprt")
deleted = p.ctx.execute(Support.delete().where(where_abandoned_supports()))
p.step(deleted.rowcount)
@event_emitter("blockchain.sync.supports.indexes", "steps")
def supports_constraints_and_indexes(p: ProgressContext):
p.start(1 + len(pg_add_support_constraints_and_indexes))
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM ANALYZE support;"))
p.step()
for constraint in pg_add_support_constraints_and_indexes:
if p.ctx.is_postgres:
p.ctx.execute(text(constraint))
p.step()
@event_emitter("blockchain.sync.supports.vacuum", "steps")
def supports_vacuum(p: ProgressContext):
p.start(1)
if p.ctx.is_postgres:
p.ctx.execute_notx(text("VACUUM support;"))
p.step()

View file

@ -0,0 +1,309 @@
import os
import asyncio
import logging
from typing import Optional, Tuple, Set, List, Coroutine
from lbry.db import Database
from lbry.db import queries as q
from lbry.db.constants import TXO_TYPES, CLAIM_TYPE_CODES
from lbry.db.query_context import Event, Progress
from lbry.event import BroadcastSubscription
from lbry.service.base import Sync, BlockEvent
from lbry.blockchain.lbrycrd import Lbrycrd
from . import blocks as block_phase, claims as claim_phase, supports as support_phase
log = logging.getLogger(__name__)
BLOCKS_INIT_EVENT = Event.add("blockchain.sync.blocks.init", "steps")
BLOCKS_MAIN_EVENT = Event.add("blockchain.sync.blocks.main", "blocks", "txs")
FILTER_INIT_EVENT = Event.add("blockchain.sync.filter.init", "steps")
FILTER_MAIN_EVENT = Event.add("blockchain.sync.filter.main", "blocks")
CLAIMS_INIT_EVENT = Event.add("blockchain.sync.claims.init", "steps")
CLAIMS_MAIN_EVENT = Event.add("blockchain.sync.claims.main", "claims")
TRENDS_INIT_EVENT = Event.add("blockchain.sync.trends.init", "steps")
TRENDS_MAIN_EVENT = Event.add("blockchain.sync.trends.main", "blocks")
SUPPORTS_INIT_EVENT = Event.add("blockchain.sync.supports.init", "steps")
SUPPORTS_MAIN_EVENT = Event.add("blockchain.sync.supports.main", "supports")
class BlockchainSync(Sync):
TX_FLUSH_SIZE = 25_000 # flush to db after processing this many TXs and update progress
CLAIM_FLUSH_SIZE = 25_000 # flush to db after processing this many claims and update progress
SUPPORT_FLUSH_SIZE = 25_000 # flush to db after processing this many supports and update progress
FILTER_FLUSH_SIZE = 10_000 # flush to db after processing this many filters and update progress
def __init__(self, chain: Lbrycrd, db: Database):
super().__init__(chain.ledger, db)
self.chain = chain
self.pid = os.getpid()
self.on_block_subscription: Optional[BroadcastSubscription] = None
self.advance_loop_task: Optional[asyncio.Task] = None
self.advance_loop_event = asyncio.Event()
async def start(self):
self.db.stop_event.clear()
await self.chain.ensure_subscribable()
self.advance_loop_task = asyncio.create_task(self.advance())
await self.advance_loop_task
await self.chain.subscribe()
self.advance_loop_task = asyncio.create_task(self.advance_loop())
self.on_block_subscription = self.chain.on_block.listen(
lambda e: self.advance_loop_event.set()
)
async def stop(self):
self.chain.unsubscribe()
if self.on_block_subscription is not None:
self.on_block_subscription.cancel()
self.db.stop_event.set()
if self.advance_loop_task is not None:
self.advance_loop_task.cancel()
async def run_tasks(self, tasks: List[Coroutine]) -> Optional[Set[asyncio.Future]]:
done, pending = await asyncio.wait(
tasks, return_when=asyncio.FIRST_EXCEPTION
)
if pending:
self.db.stop_event.set()
for future in pending:
future.cancel()
for future in done:
future.result()
return
return done
async def get_best_block_height_for_file(self, file_number) -> int:
return await self.db.run(
block_phase.get_best_block_height_for_file, file_number
)
async def sync_blocks(self) -> Optional[Tuple[int, int]]:
tasks = []
starting_height = None
tx_count = block_count = 0
with Progress(self.db.message_queue, BLOCKS_INIT_EVENT) as p:
ending_height = await self.chain.db.get_best_height()
for chain_file in p.iter(await self.chain.db.get_block_files()):
# block files may be read and saved out of order, need to check
# each file individually to see if we have missing blocks
our_best_file_height = await self.get_best_block_height_for_file(
chain_file['file_number']
)
if our_best_file_height == chain_file['best_height']:
# we have all blocks in this file, skipping
continue
if -1 < our_best_file_height < chain_file['best_height']:
# we have some blocks, need to figure out what we're missing
# call get_block_files again limited to this file and current_height
chain_file = (await self.chain.db.get_block_files(
file_number=chain_file['file_number'], start_height=our_best_file_height+1,
))[0]
tx_count += chain_file['txs']
block_count += chain_file['blocks']
file_start_height = chain_file['start_height']
starting_height = min(
file_start_height if starting_height is None else starting_height,
file_start_height
)
tasks.append(self.db.run(
block_phase.sync_block_file, chain_file['file_number'], file_start_height,
chain_file['txs'], self.TX_FLUSH_SIZE
))
with Progress(self.db.message_queue, BLOCKS_MAIN_EVENT) as p:
p.start(block_count, tx_count, extra={
"starting_height": starting_height,
"ending_height": ending_height,
"files": len(tasks),
"claims": await self.chain.db.get_claim_metadata_count(starting_height, ending_height),
"supports": await self.chain.db.get_support_metadata_count(starting_height, ending_height),
})
completed = await self.run_tasks(tasks)
if completed:
best_height_processed = max(f.result() for f in completed)
return starting_height, best_height_processed
async def sync_filters(self):
if not self.conf.spv_address_filters:
return
with Progress(self.db.message_queue, FILTER_MAIN_EVENT) as p:
blocks = 0
tasks = []
# for chunk in range(select min(height), max(height) from block where filter is null):
# tasks.append(self.db.run(block_phase.sync_filters, chunk, self.FILTER_FLUSH_SIZE))
p.start(blocks)
await self.run_tasks(tasks)
async def sync_spends(self, blocks_added):
if blocks_added:
await self.db.run(block_phase.sync_spends, blocks_added[0] == 0)
async def count_unspent_txos(
self,
txo_types: Tuple[int, ...],
blocks: Tuple[int, int] = None,
missing_in_supports_table: bool = False,
missing_in_claims_table: bool = False,
missing_or_stale_in_claims_table: bool = False,
) -> int:
return await self.db.run(
q.count_unspent_txos, txo_types, blocks,
missing_in_supports_table,
missing_in_claims_table,
missing_or_stale_in_claims_table,
)
async def distribute_unspent_txos(
self,
txo_types: Tuple[int, ...],
blocks: Tuple[int, int] = None,
missing_in_supports_table: bool = False,
missing_in_claims_table: bool = False,
missing_or_stale_in_claims_table: bool = False,
) -> int:
return await self.db.run(
q.distribute_unspent_txos, txo_types, blocks,
missing_in_supports_table,
missing_in_claims_table,
missing_or_stale_in_claims_table,
self.db.workers
)
async def count_abandoned_supports(self) -> int:
return await self.db.run(q.count_abandoned_supports)
async def count_abandoned_claims(self) -> int:
return await self.db.run(q.count_abandoned_claims)
async def count_claims_with_changed_supports(self, blocks) -> int:
return await self.db.run(q.count_claims_with_changed_supports, blocks)
async def count_channels_with_changed_content(self, blocks) -> int:
return await self.db.run(q.count_channels_with_changed_content, blocks)
async def count_takeovers(self, blocks) -> int:
return await self.chain.db.get_takeover_count(
start_height=blocks[0], end_height=blocks[-1]
)
async def sync_claims(self, blocks) -> bool:
delete_claims = takeovers = claims_with_changed_supports = 0
initial_sync = not await self.db.has_claims()
with Progress(self.db.message_queue, CLAIMS_INIT_EVENT) as p:
if initial_sync:
total, batches = await self.distribute_unspent_txos(CLAIM_TYPE_CODES)
elif blocks:
p.start(4)
# 1. content claims to be inserted or updated
total = await self.count_unspent_txos(
CLAIM_TYPE_CODES, blocks, missing_or_stale_in_claims_table=True
)
batches = [blocks] if total else []
p.step()
# 2. claims to be deleted
delete_claims = await self.count_abandoned_claims()
total += delete_claims
p.step()
# 3. claims to be updated with new support totals
claims_with_changed_supports = await self.count_claims_with_changed_supports(blocks)
total += claims_with_changed_supports
p.step()
# 5. claims to be updated due to name takeovers
takeovers = await self.count_takeovers(blocks)
total += takeovers
p.step()
else:
return initial_sync
with Progress(self.db.message_queue, CLAIMS_MAIN_EVENT) as p:
p.start(total)
if batches:
await self.run_tasks([
self.db.run(claim_phase.claims_insert, batch, not initial_sync, self.CLAIM_FLUSH_SIZE)
for batch in batches
])
if not initial_sync:
await self.run_tasks([
self.db.run(claim_phase.claims_update, batch) for batch in batches
])
if delete_claims:
await self.db.run(claim_phase.claims_delete, delete_claims)
if takeovers:
await self.db.run(claim_phase.update_takeovers, blocks, takeovers)
if claims_with_changed_supports:
await self.db.run(claim_phase.update_stakes, blocks, claims_with_changed_supports)
if initial_sync:
await self.db.run(claim_phase.claims_constraints_and_indexes)
else:
await self.db.run(claim_phase.claims_vacuum)
return initial_sync
async def sync_supports(self, blocks):
delete_supports = 0
initial_sync = not await self.db.has_supports()
with Progress(self.db.message_queue, SUPPORTS_INIT_EVENT) as p:
if initial_sync:
total, support_batches = await self.distribute_unspent_txos(TXO_TYPES['support'])
elif blocks:
p.start(2)
# 1. supports to be inserted
total = await self.count_unspent_txos(
TXO_TYPES['support'], blocks, missing_in_supports_table=True
)
support_batches = [blocks] if total else []
p.step()
# 2. supports to be deleted
delete_supports = await self.count_abandoned_supports()
total += delete_supports
p.step()
else:
return
with Progress(self.db.message_queue, SUPPORTS_MAIN_EVENT) as p:
p.start(total)
if support_batches:
await self.run_tasks([
self.db.run(
support_phase.supports_insert, batch, not initial_sync, self.SUPPORT_FLUSH_SIZE
) for batch in support_batches
])
if delete_supports:
await self.db.run(support_phase.supports_delete, delete_supports)
if initial_sync:
await self.db.run(support_phase.supports_constraints_and_indexes)
else:
await self.db.run(support_phase.supports_vacuum)
async def sync_channel_stats(self, blocks, initial_sync):
await self.db.run(claim_phase.update_channel_stats, blocks, initial_sync)
async def sync_trends(self):
pass
async def advance(self):
blocks_added = await self.sync_blocks()
sync_filters_task = asyncio.create_task(self.sync_filters())
sync_trends_task = asyncio.create_task(self.sync_trends())
await self.sync_spends(blocks_added)
initial_claim_sync = await self.sync_claims(blocks_added)
await self.sync_supports(blocks_added)
await self.sync_channel_stats(blocks_added, initial_claim_sync)
await sync_trends_task
await sync_filters_task
if blocks_added:
await self._on_block_controller.add(BlockEvent(blocks_added[-1]))
async def advance_loop(self):
while True:
await self.advance_loop_event.wait()
self.advance_loop_event.clear()
try:
await self.advance()
except asyncio.CancelledError:
return
except Exception as e:
log.exception(e)
await self.stop()
async def rewind(self, height):
await self.db.run(block_phase.rewind, height)

View file

@ -1,10 +1,19 @@
import struct
import hashlib
import logging
import typing
import asyncio
from datetime import date
from binascii import hexlify, unhexlify
from typing import List, Iterable, Optional, Tuple
from typing import List, Iterable, Optional, Union
import ecdsa
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_der_public_key
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.utils import Prehashed
from cryptography.exceptions import InvalidSignature
from lbry.error import InsufficientFundsError
from lbry.crypto.hash import hash160, sha256
from lbry.crypto.base58 import Base58
from lbry.schema.url import normalize_name
@ -14,16 +23,9 @@ from lbry.schema.purchase import Purchase
from lbry.schema.support import Support
from .script import InputScript, OutputScript
from .constants import COIN, DUST, NULL_HASH32
from .bcd_data_stream import BCDataStream
from .hash import TXRef, TXRefImmutable
from .util import ReadOnlyList
from .bip32 import PrivateKey, PublicKey
if typing.TYPE_CHECKING:
from lbry.wallet.account import Account
from lbry.wallet.ledger import Ledger
from lbry.wallet.wallet import Wallet
log = logging.getLogger()
@ -52,6 +54,10 @@ class TXRefMutable(TXRef):
def height(self):
return self.tx.height
@property
def timestamp(self):
return self.tx.timestamp
def reset(self):
self._id = None
self._hash = None
@ -101,7 +107,7 @@ class InputOutput:
__slots__ = 'tx_ref', 'position'
def __init__(self, tx_ref: TXRef = None, position: int = None) -> None:
def __init__(self, tx_ref: Union[TXRef, TXRefImmutable] = None, position: int = None) -> None:
self.tx_ref = tx_ref
self.position = position
@ -123,6 +129,7 @@ class Input(InputOutput):
NULL_SIGNATURE = b'\x00'*72
NULL_PUBLIC_KEY = b'\x00'*33
NULL_HASH32 = b'\x00'*32
__slots__ = 'txo_ref', 'sequence', 'coinbase', 'script'
@ -146,12 +153,10 @@ class Input(InputOutput):
return cls(txo.ref, script)
@classmethod
def spend_time_lock(cls, txo: 'Output', script_source: bytes) -> 'Input':
""" Create an input to spend time lock script."""
script = InputScript.redeem_time_lock_script_hash(
cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY, script_source=script_source
)
return cls(txo.ref, script)
def create_coinbase(cls) -> 'Input':
tx_ref = TXRefImmutable.from_hash(cls.NULL_HASH32, 0, 0)
txo_ref = TXORef(tx_ref, 0)
return cls(txo_ref, b'beef')
@property
def amount(self) -> int:
@ -169,7 +174,7 @@ class Input(InputOutput):
@classmethod
def deserialize_from(cls, stream):
tx_ref = TXRefImmutable.from_hash(stream.read(32), -1)
tx_ref = TXRefImmutable.from_hash(stream.read(32), -1, -1)
position = stream.read_uint32()
script = stream.read_string()
sequence = stream.read_uint32()
@ -192,24 +197,10 @@ class Input(InputOutput):
stream.write_uint32(self.sequence)
class OutputEffectiveAmountEstimator:
__slots__ = 'txo', 'txi', 'fee', 'effective_amount'
def __init__(self, ledger: 'Ledger', txo: 'Output') -> None:
self.txo = txo
self.txi = Input.spend(txo)
self.fee: int = self.txi.get_fee(ledger)
self.effective_amount: int = txo.amount - self.fee
def __lt__(self, other):
return self.effective_amount < other.effective_amount
class Output(InputOutput):
__slots__ = (
'amount', 'script', 'is_internal_transfer', 'is_spent', 'is_my_output', 'is_my_input',
'amount', 'script', 'is_internal_transfer', 'spent_height', 'is_my_output', 'is_my_input',
'channel', 'private_key', 'meta', 'sent_supports', 'sent_tips', 'received_tips',
'purchase', 'purchased_claim', 'purchase_receipt',
'reposted_claim', 'claims', '_signable'
@ -217,25 +208,24 @@ class Output(InputOutput):
def __init__(self, amount: int, script: OutputScript,
tx_ref: TXRef = None, position: int = None,
is_internal_transfer: Optional[bool] = None, is_spent: Optional[bool] = None,
is_internal_transfer: Optional[bool] = None, spent_height: Optional[bool] = None,
is_my_output: Optional[bool] = None, is_my_input: Optional[bool] = None,
sent_supports: Optional[int] = None, sent_tips: Optional[int] = None,
received_tips: Optional[int] = None,
channel: Optional['Output'] = None,
private_key: Optional[PrivateKey] = None
channel: Optional['Output'] = None, private_key: Optional[str] = None
) -> None:
super().__init__(tx_ref, position)
self.amount = amount
self.script = script
self.is_internal_transfer = is_internal_transfer
self.is_spent = is_spent
self.spent_height = spent_height
self.is_my_output = is_my_output
self.is_my_input = is_my_input
self.sent_supports = sent_supports
self.sent_tips = sent_tips
self.received_tips = received_tips
self.channel = channel
self.private_key: PrivateKey = private_key
self.private_key = private_key
self.purchase: 'Output' = None # txo containing purchase metadata
self.purchased_claim: 'Output' = None # resolved claim pointed to by purchase
self.purchase_receipt: 'Output' = None # txo representing purchase receipt for this claim
@ -247,7 +237,7 @@ class Output(InputOutput):
def update_annotations(self, annotated: 'Output'):
if annotated is None:
self.is_internal_transfer = None
self.is_spent = None
self.spent_height = None
self.is_my_output = None
self.is_my_input = None
self.sent_supports = None
@ -255,7 +245,7 @@ class Output(InputOutput):
self.received_tips = None
else:
self.is_internal_transfer = annotated.is_internal_transfer
self.is_spent = annotated.is_spent
self.spent_height = annotated.spent_height
self.is_my_output = annotated.is_my_output
self.is_my_input = annotated.is_my_input
self.sent_supports = annotated.sent_supports
@ -273,48 +263,36 @@ class Output(InputOutput):
return self.ref.id
@property
def is_pubkey_hash(self):
return 'pubkey_hash' in self.script.values
def hash(self):
return self.ref.hash
@property
def is_spent(self):
if self.spent_height is not None:
return self.spent_height > 0
@property
def pubkey_hash(self):
return self.script.values['pubkey_hash']
@property
def is_script_hash(self):
return 'script_hash' in self.script.values
@property
def script_hash(self):
return self.script.values['script_hash']
@property
def has_address(self):
return self.is_pubkey_hash or self.is_script_hash
return 'pubkey_hash' in self.script.values
def get_address(self, ledger):
if self.is_pubkey_hash:
return ledger.hash160_to_address(self.pubkey_hash)
elif self.is_script_hash:
return ledger.hash160_to_script_address(self.script_hash)
def get_estimator(self, ledger):
return OutputEffectiveAmountEstimator(ledger, self)
return ledger.hash160_to_address(self.pubkey_hash)
@classmethod
def pay_pubkey_hash(cls, amount, pubkey_hash):
return cls(amount, OutputScript.pay_pubkey_hash(pubkey_hash))
@classmethod
def pay_script_hash(cls, amount, pubkey_hash):
return cls(amount, OutputScript.pay_script_hash(pubkey_hash))
@classmethod
def deserialize_from(cls, stream):
return cls(
amount=stream.read_uint64(),
script=OutputScript(stream.read_string())
)
def deserialize_from(cls, stream, transaction_offset: int = 0):
amount = stream.read_uint64()
length = stream.read_compact_size()
offset = stream.tell()-transaction_offset
script = OutputScript(stream.read(length), offset=offset)
return cls(amount=amount, script=script)
def serialize_to(self, stream, alternate_script=None):
stream.write_uint64(self.amount)
@ -400,6 +378,13 @@ class Output(InputOutput):
self._signable = self.support
return self._signable
@property
def can_decode_signable(self) -> Signable:
try:
return self.signable
except Exception:
return False
@property
def permanent_url(self) -> str:
if self.script.is_claim_involved:
@ -425,15 +410,28 @@ class Output(InputOutput):
]
return sha256(b''.join(pieces))
def get_encoded_signature(self):
signature = hexlify(self.signable.signature)
r = int(signature[:int(len(signature)/2)], 16)
s = int(signature[int(len(signature)/2):], 16)
return ecdsa.util.sigencode_der(r, s, len(signature)*4)
@staticmethod
def is_signature_valid(signature, digest, public_key_bytes):
return PublicKey\
.from_compressed(public_key_bytes)\
.verify(signature, digest)
def is_signature_valid(encoded_signature, signature_digest, public_key_bytes):
try:
public_key = load_der_public_key(public_key_bytes, default_backend())
public_key.verify( # pylint: disable=no-value-for-parameter
encoded_signature, signature_digest,
ec.ECDSA(Prehashed(hashes.SHA256()))
)
return True
except (ValueError, InvalidSignature):
pass
return False
def is_signed_by(self, channel: 'Output', ledger=None):
return self.is_signature_valid(
self.signable.signature,
self.get_encoded_signature(),
self.get_signature_digest(ledger),
channel.claim.channel.public_key_bytes
)
@ -446,27 +444,30 @@ class Output(InputOutput):
self.signable.signing_channel_hash,
self.signable.to_message_bytes()
]))
self.signable.signature = channel.private_key.sign_compact(digest)
self.signable.signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
self.script.generate()
def sign_data(self, data: bytes, timestamp: str) -> str:
pieces = [timestamp.encode(), self.claim_hash, data]
digest = sha256(b''.join(pieces))
signature = self.private_key.sign_compact(digest)
return hexlify(signature).decode()
def clear_signature(self):
self.channel = None
self.signable.clear_signature()
self.claim.clear_signature()
def set_channel_private_key(self, private_key: PrivateKey):
@staticmethod
def _sync_generate_channel_private_key():
private_key = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
public_key_bytes = private_key.get_verifying_key().to_der()
return private_key, public_key_bytes
async def generate_channel_private_key(self):
private_key, public_key_bytes = await asyncio.get_running_loop().run_in_executor(
None, Output._sync_generate_channel_private_key
)
self.private_key = private_key
self.claim.channel.public_key_bytes = private_key.public_key.pubkey_bytes
self.claim.channel.public_key_bytes = public_key_bytes
self.script.generate()
return self.private_key
def is_channel_private_key(self, private_key: PrivateKey):
return self.claim.channel.public_key_bytes == private_key.public_key.pubkey_bytes
def is_channel_private_key(self, private_key):
return self.claim.channel.public_key_bytes == private_key.get_verifying_key().to_der()
@classmethod
def pay_claim_name_pubkey_hash(
@ -532,6 +533,13 @@ class Output(InputOutput):
if self.purchased_claim is not None:
return self.purchased_claim.claim_id
@property
def purchased_claim_hash(self):
if self.purchase is not None:
return self.purchase.purchase_data.claim_hash
if self.purchased_claim is not None:
return self.purchased_claim.claim_hash
@property
def has_price(self):
if self.can_decode_claim:
@ -549,10 +557,9 @@ class Output(InputOutput):
class Transaction:
def __init__(self, raw=None, version: int = 1, locktime: int = 0, is_verified: bool = False,
height: int = -2, position: int = -1, julian_day: int = None) -> None:
height: int = -2, position: int = -1, timestamp: int = 0) -> None:
self._raw = raw
self._raw_sans_segwit = None
self._raw_outputs = None
self.is_segwit_flag = 0
self.witnesses: List[bytes] = []
self.ref = TXRefMutable(self)
@ -568,9 +575,13 @@ class Transaction:
# +num: confirmed in a specific block (height)
self.height = height
self.position = position
self._day = julian_day
self.timestamp = timestamp
self._day: int = 0
if raw is not None:
self._deserialize()
self.deserialize()
def __repr__(self):
return f"TX({self.id[:10]}...{self.id[-10:]})"
@property
def is_broadcast(self):
@ -592,9 +603,10 @@ class Transaction:
def hash(self):
return self.ref.hash
def get_julian_day(self, ledger):
if self._day is None and self.height > 0:
self._day = ledger.headers.estimated_julian_day(self.height)
@property
def day(self):
if self._day is None and self.timestamp > 0:
self._day = date.fromtimestamp(self.timestamp).toordinal()
return self._day
@property
@ -614,7 +626,6 @@ class Transaction:
def _reset(self):
self._raw = None
self._raw_sans_segwit = None
self._raw_outputs = None
self.ref.reset()
@property
@ -708,7 +719,9 @@ class Transaction:
stream.write_compact_size(len(self._inputs))
for txin in self._inputs:
txin.serialize_to(stream)
self._serialize_outputs(stream)
stream.write_compact_size(len(self._outputs))
for txout in self._outputs:
txout.serialize_to(stream)
stream.write_uint32(self.locktime)
return stream.get_bytes()
@ -718,29 +731,21 @@ class Transaction:
stream.write_compact_size(len(self._inputs))
for i, txin in enumerate(self._inputs):
if signing_input == i:
if txin.script.is_script_hash:
txin.serialize_to(stream, txin.script.values['script'].source)
else:
assert txin.txo_ref.txo is not None
txin.serialize_to(stream, txin.txo_ref.txo.script.source)
assert txin.txo_ref.txo is not None
txin.serialize_to(stream, txin.txo_ref.txo.script.source)
else:
txin.serialize_to(stream, b'')
self._serialize_outputs(stream)
stream.write_compact_size(len(self._outputs))
for txout in self._outputs:
txout.serialize_to(stream)
stream.write_uint32(self.locktime)
stream.write_uint32(self.signature_hash_type(1)) # signature hash type: SIGHASH_ALL
return stream.get_bytes()
def _serialize_outputs(self, stream):
if self._raw_outputs is None:
self._raw_outputs = BCDataStream()
self._raw_outputs.write_compact_size(len(self._outputs))
for txout in self._outputs:
txout.serialize_to(self._raw_outputs)
stream.write(self._raw_outputs.get_bytes())
def _deserialize(self):
if self._raw is not None:
stream = BCDataStream(self._raw)
def deserialize(self, stream=None):
if self._raw is not None or stream is not None:
stream = stream or BCDataStream(self._raw)
start = stream.tell()
self.version = stream.read_uint32()
input_count = stream.read_compact_size()
if input_count == 0:
@ -751,7 +756,7 @@ class Transaction:
])
output_count = stream.read_compact_size()
self._add(self._outputs, [
Output.deserialize_from(stream) for _ in range(output_count)
Output.deserialize_from(stream, start) for _ in range(output_count)
])
if self.is_segwit_flag:
# drain witness portion of transaction
@ -761,205 +766,12 @@ class Transaction:
for _ in range(stream.read_compact_size()):
self.witnesses.append(stream.read(stream.read_compact_size()))
self.locktime = stream.read_uint32()
@classmethod
def ensure_all_have_same_ledger_and_wallet(
cls, funding_accounts: Iterable['Account'],
change_account: 'Account' = None) -> Tuple['Ledger', 'Wallet']:
ledger = wallet = None
for account in funding_accounts:
if ledger is None:
ledger = account.ledger
wallet = account.wallet
if ledger != account.ledger:
raise ValueError(
'All funding accounts used to create a transaction must be on the same ledger.'
)
if wallet != account.wallet:
raise ValueError(
'All funding accounts used to create a transaction must be from the same wallet.'
)
if change_account is not None:
if change_account.ledger != ledger:
raise ValueError('Change account must use same ledger as funding accounts.')
if change_account.wallet != wallet:
raise ValueError('Change account must use same wallet as funding accounts.')
if ledger is None:
raise ValueError('No ledger found.')
if wallet is None:
raise ValueError('No wallet found.')
return ledger, wallet
@classmethod
async def create(cls, inputs: Iterable[Input], outputs: Iterable[Output],
funding_accounts: Iterable['Account'], change_account: 'Account',
sign: bool = True):
""" Find optimal set of inputs when only outputs are provided; add change
outputs if only inputs are provided or if inputs are greater than outputs. """
tx = cls() \
.add_inputs(inputs) \
.add_outputs(outputs)
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
# value of the outputs plus associated fees
cost = (
tx.get_base_fee(ledger) +
tx.get_total_output_sum(ledger)
)
# value of the inputs less the cost to spend those inputs
payment = tx.get_effective_input_sum(ledger)
try:
for _ in range(5):
if payment < cost:
deficit = cost - payment
spendables = await ledger.get_spendable_utxos(deficit, funding_accounts)
if not spendables:
raise InsufficientFundsError()
payment += sum(s.effective_amount for s in spendables)
tx.add_inputs(s.txi for s in spendables)
cost_of_change = (
tx.get_base_fee(ledger) +
Output.pay_pubkey_hash(COIN, NULL_HASH32).get_fee(ledger)
)
if payment > cost:
change = payment - cost
change_amount = change - cost_of_change
if change_amount > DUST:
change_address = await change_account.change.get_or_create_usable_address()
change_hash160 = change_account.ledger.address_to_hash160(change_address)
change_output = Output.pay_pubkey_hash(change_amount, change_hash160)
change_output.is_internal_transfer = True
tx.add_outputs([Output.pay_pubkey_hash(change_amount, change_hash160)])
if tx._outputs:
break
# this condition and the outer range(5) loop cover an edge case
# whereby a single input is just enough to cover the fee and
# has some change left over, but the change left over is less
# than the cost_of_change: thus the input is completely
# consumed and no output is added, which is an invalid tx.
# to be able to spend this input we must increase the cost
# of the TX and run through the balance algorithm a second time
# adding an extra input and change output, making tx valid.
# we do this 5 times in case the other UTXOs added are also
# less than the fee, after 5 attempts we give up and go home
cost += cost_of_change + 1
if sign:
await tx.sign(funding_accounts)
except Exception as e:
log.exception('Failed to create transaction:')
await ledger.release_tx(tx)
raise e
return tx
return self
@staticmethod
def signature_hash_type(hash_type):
return hash_type
async def sign(self, funding_accounts: Iterable['Account'], extra_keys: dict = None):
self._reset()
ledger, wallet = self.ensure_all_have_same_ledger_and_wallet(funding_accounts)
for i, txi in enumerate(self._inputs):
assert txi.script is not None
assert txi.txo_ref.txo is not None
txo_script = txi.txo_ref.txo.script
if txo_script.is_pay_pubkey_hash or txo_script.is_pay_script_hash:
if 'pubkey_hash' in txo_script.values:
address = ledger.hash160_to_address(txo_script.values.get('pubkey_hash', ''))
private_key = await ledger.get_private_key_for_address(wallet, address)
else:
private_key = next(iter(extra_keys.values()))
assert private_key is not None, 'Cannot find private key for signing output.'
tx = self._serialize_for_signature(i)
txi.script.values['signature'] = \
private_key.sign(tx) + bytes((self.signature_hash_type(1),))
txi.script.values['pubkey'] = private_key.public_key.pubkey_bytes
txi.script.generate()
else:
raise NotImplementedError("Don't know how to spend this output.")
self._reset()
@classmethod
def pay(cls, amount: int, address: bytes, funding_accounts: List['Account'], change_account: 'Account'):
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
output = Output.pay_pubkey_hash(amount, ledger.address_to_hash160(address))
return cls.create([], [output], funding_accounts, change_account)
@classmethod
def claim_create(
cls, name: str, claim: Claim, amount: int, holding_address: str,
funding_accounts: List['Account'], change_account: 'Account', signing_channel: Output = None):
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
claim_output = Output.pay_claim_name_pubkey_hash(
amount, name, claim, ledger.address_to_hash160(holding_address)
)
if signing_channel is not None:
claim_output.sign(signing_channel, b'placeholder txid:nout')
return cls.create([], [claim_output], funding_accounts, change_account, sign=False)
@classmethod
def claim_update(
cls, previous_claim: Output, claim: Claim, amount: int, holding_address: str,
funding_accounts: List['Account'], change_account: 'Account', signing_channel: Output = None):
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
updated_claim = Output.pay_update_claim_pubkey_hash(
amount, previous_claim.claim_name, previous_claim.claim_id,
claim, ledger.address_to_hash160(holding_address)
)
if signing_channel is not None:
updated_claim.sign(signing_channel, b'placeholder txid:nout')
else:
updated_claim.clear_signature()
return cls.create(
[Input.spend(previous_claim)], [updated_claim], funding_accounts, change_account, sign=False
)
@classmethod
def support(cls, claim_name: str, claim_id: str, amount: int, holding_address: str,
funding_accounts: List['Account'], change_account: 'Account', signing_channel: Output = None,
comment: str = None):
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
if signing_channel is not None or comment is not None:
support = Support()
if comment is not None:
support.comment = comment
support_output = Output.pay_support_data_pubkey_hash(
amount, claim_name, claim_id, support, ledger.address_to_hash160(holding_address)
)
if signing_channel is not None:
support_output.sign(signing_channel, b'placeholder txid:nout')
else:
support_output = Output.pay_support_pubkey_hash(
amount, claim_name, claim_id, ledger.address_to_hash160(holding_address)
)
return cls.create([], [support_output], funding_accounts, change_account, sign=False)
@classmethod
def purchase(cls, claim_id: str, amount: int, merchant_address: bytes,
funding_accounts: List['Account'], change_account: 'Account'):
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
payment = Output.pay_pubkey_hash(amount, ledger.address_to_hash160(merchant_address))
data = Output.add_purchase_data(Purchase(claim_id))
return cls.create([], [payment, data], funding_accounts, change_account)
@classmethod
async def spend_time_lock(cls, time_locked_txo: Output, script: bytes, account: 'Account'):
txi = Input.spend_time_lock(time_locked_txo, script)
txi.sequence = 0xFFFFFFFE
tx = await cls.create([txi], [], [account], account, sign=False)
tx.locktime = txi.script.values['script'].values['height']
tx._reset()
return tx
@property
def my_inputs(self):
for txi in self.inputs:

View file

@ -1,28 +1,4 @@
import re
from typing import TypeVar, Sequence, Optional
from .constants import COIN
def date_to_julian_day(d):
return d.toordinal() + 1721424.5
def coins_to_satoshis(coins):
if not isinstance(coins, str):
raise ValueError("{coins} must be a string")
result = re.search(r'^(\d{1,10})\.(\d{1,8})$', coins)
if result is not None:
whole, fractional = result.groups()
return int(whole+fractional.ljust(8, "0"))
raise ValueError("'{lbc}' is not a valid coin decimal")
def satoshis_to_coins(satoshis):
coins = '{:.8f}'.format(satoshis / COIN).rstrip('0')
if coins.endswith('.'):
return coins+'0'
else:
return coins
T = TypeVar('T')
@ -44,18 +20,6 @@ def subclass_tuple(name, base):
return type(name, (base,), {'__slots__': ()})
class cachedproperty:
def __init__(self, f):
self.f = f
def __get__(self, obj, objtype):
obj = obj or objtype
value = self.f(obj)
setattr(obj, self.f.__name__, value)
return value
class ArithUint256:
# https://github.com/bitcoin/bitcoin/blob/master/src/arith_uint256.cpp

View file

@ -1,78 +1,20 @@
import os
import sys
import shutil
import signal
import pathlib
import json
import asyncio
import pathlib
import argparse
import logging
import logging.handlers
import textwrap
import subprocess
import aiohttp
from aiohttp.web import GracefulExit
from docopt import docopt
from lbry import __version__ as lbrynet_version
from lbry.extras.daemon.daemon import Daemon
from lbry import __version__
from lbry.conf import Config, CLIConfig
log = logging.getLogger('lbry')
def display(data):
print(json.dumps(data, indent=2))
async def execute_command(conf, method, params, callback=display):
async with aiohttp.ClientSession() as session:
try:
message = {'method': method, 'params': params}
async with session.get(conf.api_connection_url, json=message) as resp:
try:
data = await resp.json()
if 'result' in data:
return callback(data['result'])
elif 'error' in data:
return callback(data['error'])
except Exception as e:
log.exception('Could not process response from server:', exc_info=e)
except aiohttp.ClientConnectionError:
print("Could not connect to daemon. Are you sure it's running?")
def normalize_value(x, key=None):
if not isinstance(x, str):
return x
if key in ('uri', 'channel_name', 'name', 'file_name', 'claim_name', 'download_directory'):
return x
if x.lower() == 'true':
return True
if x.lower() == 'false':
return False
if x.isdigit():
return int(x)
return x
def remove_brackets(key):
if key.startswith("<") and key.endswith(">"):
return str(key[1:-1])
return key
def set_kwargs(parsed_args):
kwargs = {}
for key, arg in parsed_args.items():
if arg is None:
continue
k = None
if key.startswith("--") and remove_brackets(key[2:]) not in kwargs:
k = remove_brackets(key[2:])
elif remove_brackets(key) not in kwargs:
k = remove_brackets(key)
kwargs[k] = normalize_value(arg, k)
return kwargs
from lbry.service import Daemon, Client
from lbry.service.metadata import interface
from lbry.service.full_node import FullNode
from lbry.blockchain.ledger import Ledger
from lbry.console import Advanced as AdvancedConsole, Basic as BasicConsole
def split_subparser_argument(parent, original, name, condition):
@ -153,17 +95,10 @@ class HelpFormatter(argparse.HelpFormatter):
)
def add_command_parser(parent, command):
subcommand = parent.add_parser(
command['name'],
help=command['doc'].strip().splitlines()[0]
)
subcommand.set_defaults(
api_method_name=command['api_method_name'],
command=command['name'],
doc=command['doc'],
replaced_by=command.get('replaced_by', None)
)
def add_command_parser(parent, method_name, command):
short = command['desc']['text'][0] if command['desc'] else ''
subcommand = parent.add_parser(command['name'], help=short)
subcommand.set_defaults(api_method_name=method_name, command=command['name'], doc=command['help'])
def get_argument_parser():
@ -182,6 +117,10 @@ def get_argument_parser():
usage='lbrynet start [--config FILE] [--data-dir DIR] [--wallet-dir DIR] [--download-dir DIR] ...',
help='Start LBRY Network interface.'
)
start.add_argument(
'--full-node', dest='full_node', action="store_true",
help='Start a full node with local blockchain data, requires lbrycrd.'
)
start.add_argument(
'--quiet', dest='quiet', action="store_true",
help='Disable all console output.'
@ -199,26 +138,32 @@ def get_argument_parser():
'--initial-headers', dest='initial_headers',
help='Specify path to initial blockchain headers, faster than downloading them on first run.'
)
install = sub.add_parser("install", help="Install lbrynet with various system services.")
install.add_argument("system", choices=["systemd"])
install.add_argument(
"--global", dest="install_global", action="store_true",
help="Install system wide (requires running as root), default is for current user only."
)
Config.contribute_to_argparse(start)
start.set_defaults(command='start', start_parser=start, doc=start.format_help())
install.set_defaults(command='install', install_parser=install, doc=install.format_help())
api = Daemon.get_api_definitions()
groups = {}
for group_name in sorted(api['groups']):
group_parser = sub.add_parser(group_name, group_name=group_name, help=api['groups'][group_name])
for group_name in sorted(interface['groups']):
group_parser = sub.add_parser(group_name, group_name=group_name, help=interface['groups'][group_name])
groups[group_name] = group_parser.add_subparsers(metavar='COMMAND')
nicer_order = ['stop', 'get', 'publish', 'resolve']
for command_name in sorted(api['commands']):
for command_name in sorted(interface['commands']):
if command_name not in nicer_order:
nicer_order.append(command_name)
for command_name in nicer_order:
command = api['commands'][command_name]
if command['group'] is None:
add_command_parser(sub, command)
command = interface['commands'][command_name]
if command.get('group') is None:
add_command_parser(sub, command_name, command)
else:
add_command_parser(groups[command['group']], command)
add_command_parser(groups[command['group']], command_name, command)
return root
@ -226,66 +171,64 @@ def get_argument_parser():
def ensure_directory_exists(path: str):
if not os.path.isdir(path):
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
use_effective_ids = os.access in os.supports_effective_ids
if not os.access(path, os.W_OK, effective_ids=use_effective_ids):
raise PermissionError(f"The following directory is not writable: {path}")
LOG_MODULES = 'lbry', 'aioupnp'
async def execute_command(conf, method, params):
client = Client(f"http://{conf.api}/ws")
await client.connect()
resp = await client.send(method, **params)
print(await resp.first)
await client.disconnect()
def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config):
default_formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(name)s:%(lineno)d: %(message)s")
file_handler = logging.handlers.RotatingFileHandler(conf.log_file_path, maxBytes=2097152, backupCount=5)
file_handler.setFormatter(default_formatter)
for module_name in LOG_MODULES:
logger.getChild(module_name).addHandler(file_handler)
if not args.quiet:
handler = logging.StreamHandler()
handler.setFormatter(default_formatter)
for module_name in LOG_MODULES:
logger.getChild(module_name).addHandler(handler)
logger.getChild('lbry').setLevel(logging.INFO)
logger.getChild('aioupnp').setLevel(logging.WARNING)
logger.getChild('aiohttp').setLevel(logging.CRITICAL)
if args.verbose is not None:
if len(args.verbose) > 0:
for module in args.verbose:
logger.getChild(module).setLevel(logging.DEBUG)
else:
logger.getChild('lbry').setLevel(logging.DEBUG)
def normalize_value(x, key=None):
if not isinstance(x, str):
return x
if key in ('uri', 'channel_name', 'name', 'file_name', 'claim_name', 'download_directory'):
return x
if x.lower() == 'true':
return True
if x.lower() == 'false':
return False
if x.isdigit():
return int(x)
return x
def run_daemon(args: argparse.Namespace, conf: Config):
loop = asyncio.get_event_loop()
if args.verbose is not None:
loop.set_debug(True)
if not args.no_logging:
setup_logging(logging.getLogger(), args, conf)
daemon = Daemon(conf)
def remove_brackets(key):
if key.startswith("<") and key.endswith(">"):
return str(key[1:-1])
return key
def __exit():
raise GracefulExit()
try:
loop.add_signal_handler(signal.SIGINT, __exit)
loop.add_signal_handler(signal.SIGTERM, __exit)
except NotImplementedError:
pass # Not implemented on Windows
def set_kwargs(parsed_args):
kwargs = {}
for key, arg in parsed_args.items():
if arg is None:
continue
k = None
if key.startswith("--") and remove_brackets(key[2:]) not in kwargs:
k = remove_brackets(key[2:])
elif remove_brackets(key) not in kwargs:
k = remove_brackets(key)
kwargs[k] = normalize_value(arg, k)
return kwargs
try:
loop.run_until_complete(daemon.start())
loop.run_forever()
except (GracefulExit, KeyboardInterrupt, asyncio.CancelledError):
pass
finally:
loop.run_until_complete(daemon.stop())
logging.shutdown()
if hasattr(loop, 'shutdown_asyncgens'):
loop.run_until_complete(loop.shutdown_asyncgens())
def install_systemd_service():
systemd_service = textwrap.dedent(f"""\
[Unit]
Description=LBRYnet
[Service]
Type=simple
ExecStart={sys.argv[0]} start --full-node
""")
subprocess.run(
["systemctl", "edit", "--user", "--full", "--force", "lbrynet.service"],
input=systemd_service, text=True, check=True,
env=dict(os.environ, SYSTEMD_EDITOR="cp /dev/stdin"),
)
def main(argv=None):
@ -293,34 +236,36 @@ def main(argv=None):
parser = get_argument_parser()
args, command_args = parser.parse_known_args(argv)
conf = Config.create_from_arguments(args)
conf = Config()
conf.set_arguments(args)
conf.set_environment()
conf.set_default_paths()
conf.set_persisted()
for directory in (conf.data_dir, conf.download_dir, conf.wallet_dir):
ensure_directory_exists(directory)
if args.cli_version:
print(f"lbrynet {lbrynet_version}")
print(f"lbrynet {__version__}")
elif args.command == 'start':
if args.help:
args.start_parser.print_help()
elif args.full_node:
service = FullNode(Ledger(conf))
if conf.console == "advanced":
console = AdvancedConsole(service)
else:
console = BasicConsole(service)
return Daemon(service, console).run()
else:
if args.initial_headers:
ledger_path = os.path.join(conf.wallet_dir, 'lbc_mainnet')
ensure_directory_exists(ledger_path)
current_size = 0
headers_path = os.path.join(ledger_path, 'headers')
if os.path.exists(headers_path):
current_size = os.stat(headers_path).st_size
if os.stat(args.initial_headers).st_size > current_size:
log.info('Copying header from %s to %s', args.initial_headers, headers_path)
shutil.copy(args.initial_headers, headers_path)
run_daemon(args, conf)
print('Only `start --full-node` is currently supported.')
elif args.command == 'install':
if args.help:
args.install_parser.print_help()
elif args.system == 'systemd':
install_systemd_service()
elif args.command is not None:
doc = args.doc
api_method_name = args.api_method_name
if args.replaced_by:
print(f"{args.api_method_name} is deprecated, using {args.replaced_by['api_method_name']}.")
doc = args.replaced_by['doc']
api_method_name = args.replaced_by['api_method_name']
if args.help:
print(doc)
else:

View file

@ -1,21 +1,22 @@
import os
import re
import sys
import typing
import logging
from typing import List, Dict, Tuple, Union, TypeVar, Generic, Optional
from argparse import ArgumentParser
from contextlib import contextmanager
from appdirs import user_data_dir, user_config_dir
from typing import Tuple
import yaml
from lbry.utils.dirs import user_data_dir, user_download_dir
from lbry.error import InvalidCurrencyError
from lbry.dht import constants
from lbry.wallet.coinselection import STRATEGIES
from lbry.wallet.coinselection import COIN_SELECTION_STRATEGIES
log = logging.getLogger(__name__)
NOT_SET = type('NOT_SET', (object,), {}) # pylint: disable=invalid-name
T = TypeVar('T')
T = typing.TypeVar('T')
CURRENCIES = {
'BTC': {'type': 'crypto'},
@ -24,11 +25,11 @@ CURRENCIES = {
}
class Setting(Generic[T]):
class Setting(typing.Generic[T]):
def __init__(self, doc: str, default: Optional[T] = None,
previous_names: Optional[List[str]] = None,
metavar: Optional[str] = None):
def __init__(self, doc: str, default: typing.Optional[T] = None,
previous_names: typing.Optional[typing.List[str]] = None,
metavar: typing.Optional[str] = None):
self.doc = doc
self.default = default
self.previous_names = previous_names or []
@ -45,7 +46,7 @@ class Setting(Generic[T]):
def no_cli_name(self):
return f"--no-{self.name.replace('_', '-')}"
def __get__(self, obj: Optional['BaseConfig'], owner) -> T:
def __get__(self, obj: typing.Optional['BaseConfig'], owner) -> T:
if obj is None:
return self
for location in obj.search_order:
@ -53,7 +54,7 @@ class Setting(Generic[T]):
return location[self.name]
return self.default
def __set__(self, obj: 'BaseConfig', val: Union[T, NOT_SET]):
def __set__(self, obj: 'BaseConfig', val: typing.Union[T, NOT_SET]):
if val == NOT_SET:
for location in obj.modify_order:
if self.name in location:
@ -63,18 +64,6 @@ class Setting(Generic[T]):
for location in obj.modify_order:
location[self.name] = val
def is_set(self, obj: 'BaseConfig') -> bool:
for location in obj.search_order:
if self.name in location:
return True
return False
def is_set_to_default(self, obj: 'BaseConfig') -> bool:
for location in obj.search_order:
if self.name in location:
return location[self.name] == self.default
return False
def validate(self, value):
raise NotImplementedError()
@ -99,7 +88,7 @@ class String(Setting[str]):
f"Setting '{self.name}' must be a string."
# TODO: removes this after pylint starts to understand generics
def __get__(self, obj: Optional['BaseConfig'], owner) -> str: # pylint: disable=useless-super-delegation
def __get__(self, obj: typing.Optional['BaseConfig'], owner) -> str: # pylint: disable=useless-super-delegation
return super().__get__(obj, owner)
@ -212,7 +201,7 @@ class MaxKeyFee(Setting[dict]):
class StringChoice(String):
def __init__(self, doc: str, valid_values: List[str], default: str, *args, **kwargs):
def __init__(self, doc: str, valid_values: typing.List[str], default: str, *args, **kwargs):
super().__init__(doc, default, *args, **kwargs)
if not valid_values:
raise ValueError("No valid values provided")
@ -285,95 +274,17 @@ class Strings(ListSetting):
f"'{self.name}' must be a string."
class KnownHubsList:
def __init__(self, config: 'Config' = None, file_name: str = 'known_hubs.yml'):
self.file_name = file_name
self.path = os.path.join(config.wallet_dir, self.file_name) if config else None
self.hubs: Dict[Tuple[str, int], Dict] = {}
if self.exists:
self.load()
@property
def exists(self):
return self.path and os.path.exists(self.path)
@property
def serialized(self) -> Dict[str, Dict]:
return {f"{host}:{port}": details for (host, port), details in self.hubs.items()}
def filter(self, match_none=False, **kwargs):
if not kwargs:
return self.hubs
result = {}
for hub, details in self.hubs.items():
for key, constraint in kwargs.items():
value = details.get(key)
if value == constraint or (match_none and value is None):
result[hub] = details
break
return result
def load(self):
if self.path:
with open(self.path, 'r') as known_hubs_file:
raw = known_hubs_file.read()
for hub, details in yaml.safe_load(raw).items():
self.set(hub, details)
def save(self):
if self.path:
with open(self.path, 'w') as known_hubs_file:
known_hubs_file.write(yaml.safe_dump(self.serialized, default_flow_style=False))
def set(self, hub: str, details: Dict):
if hub and hub.count(':') == 1:
host, port = hub.split(':')
hub_parts = (host, int(port))
if hub_parts not in self.hubs:
self.hubs[hub_parts] = details
return hub
def add_hubs(self, hubs: List[str]):
added = False
for hub in hubs:
if self.set(hub, {}) is not None:
added = True
return added
def items(self):
return self.hubs.items()
def __bool__(self):
return len(self) > 0
def __len__(self):
return self.hubs.__len__()
def __iter__(self):
return iter(self.hubs)
class EnvironmentAccess:
PREFIX = 'LBRY_'
def __init__(self, config: 'BaseConfig', environ: dict):
self.configuration = config
self.data = {}
if environ:
self.load(environ)
def load(self, environ):
for setting in self.configuration.get_settings():
value = environ.get(f'{self.PREFIX}{setting.name.upper()}', NOT_SET)
if value != NOT_SET and not (isinstance(setting, ListSetting) and value is None):
self.data[setting.name] = setting.deserialize(value)
def __init__(self, environ: dict):
self.environ = environ
def __contains__(self, item: str):
return item in self.data
return f'{self.PREFIX}{item.upper()}' in self.environ
def __getitem__(self, item: str):
return self.data[item]
return self.environ[f'{self.PREFIX}{item.upper()}']
class ArgumentAccess:
@ -414,7 +325,7 @@ class ConfigFileAccess:
cls = type(self.configuration)
with open(self.path, 'r') as config_file:
raw = config_file.read()
serialized = yaml.safe_load(raw) or {}
serialized = yaml.full_load(raw) or {}
for key, value in serialized.items():
attr = getattr(cls, key, None)
if attr is None:
@ -458,7 +369,7 @@ class ConfigFileAccess:
del self.data[key]
TBC = TypeVar('TBC', bound='BaseConfig')
TBC = typing.TypeVar('TBC', bound='BaseConfig')
class BaseConfig:
@ -471,8 +382,12 @@ class BaseConfig:
self.environment = {} # from environment variables
self.persisted = {} # from config file
self._updating_config = False
self.set(**kwargs)
def set(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
return self
@contextmanager
def update_config(self):
@ -532,7 +447,7 @@ class BaseConfig:
self.arguments = ArgumentAccess(self, args)
def set_environment(self, environ=None):
self.environment = EnvironmentAccess(self, environ or os.environ)
self.environment = EnvironmentAccess(environ or os.environ)
def set_persisted(self, config_file_path=None):
if config_file_path is None:
@ -558,16 +473,16 @@ class TranscodeConfig(BaseConfig):
'', previous_names=['ffmpeg_folder'])
video_encoder = String('FFmpeg codec and parameters for the video encoding. '
'Example: libaom-av1 -crf 25 -b:v 0 -strict experimental',
'libx264 -crf 24 -preset faster -pix_fmt yuv420p')
video_bitrate_maximum = Integer('Maximum bits per second allowed for video streams (0 to disable).', 5_000_000)
'libx264 -crf 21 -preset faster -pix_fmt yuv420p')
video_bitrate_maximum = Integer('Maximum bits per second allowed for video streams (0 to disable).', 8400000)
video_scaler = String('FFmpeg scaling parameters for reducing bitrate. '
'Example: -vf "scale=-2:720,fps=24" -maxrate 5M -bufsize 3M',
r'-vf "scale=if(gte(iw\,ih)\,min(1920\,iw)\,-2):if(lt(iw\,ih)\,min(1920\,ih)\,-2)" '
r'-maxrate 5500K -bufsize 5000K')
r'-maxrate 8400K -bufsize 5000K')
audio_encoder = String('FFmpeg codec and parameters for the audio encoding. '
'Example: libopus -b:a 128k',
'aac -b:a 160k')
volume_filter = String('FFmpeg filter for audio normalization. Exmple: -af loudnorm', '')
volume_filter = String('FFmpeg filter for audio normalization.', '-af loudnorm')
volume_analysis_time = Integer('Maximum seconds into the file that we examine audio volume (0 to disable).', 240)
@ -589,22 +504,29 @@ class CLIConfig(TranscodeConfig):
class Config(CLIConfig):
jurisdiction = String("Limit interactions to wallet server in this jurisdiction.")
db_url = String("Database connection URL, uses a local file based SQLite by default.")
workers = Integer(
"Multiprocessing, specify number of worker processes lbrynet can start (including main process)."
" (-1: threads only, 0: equal to number of CPUs, >1: specific number of processes)", -1
)
console = StringChoice(
"Basic text console output or advanced colored output with progress bars.",
["basic", "advanced"], "advanced"
)
# directories
data_dir = Path("Directory path to store blobs.", metavar='DIR')
download_dir = Path(
"Directory path to place assembled files downloaded from LBRY.",
previous_names=['download_directory'], metavar='DIR'
)
wallet_dir = Path(
"Directory containing a 'wallets' subdirectory with 'default_wallet' file.",
previous_names=['lbryum_wallet_dir'], metavar='DIR'
)
download_dir = Path("Directory to store downloaded files.", metavar='DIR')
data_dir = Path("Main directory containing blobs, wallets and blockchain data.", metavar='DIR')
blob_dir = Path("Directory to store blobs (default: 'data_dir'/blobs).", metavar='DIR')
wallet_dir = Path("Directory to store wallets (default: 'data_dir'/wallets).", metavar='DIR')
wallets = Strings(
"Wallet files in 'wallet_dir' to load at startup.",
['default_wallet']
"Wallet files in 'wallet_dir' to load at startup.", ['default_wallet']
)
create_default_wallet = Toggle(
"Create an initial wallet if it does not exist on startup.", True
)
create_default_account = Toggle(
"Create an initial account if it does not exist in the default wallet.", True
)
# network
@ -613,7 +535,7 @@ class Config(CLIConfig):
"ports or have firewall rules you likely want to disable this.", True
)
udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port'])
tcp_port = Integer("TCP port to listen for incoming blob requests", 4444, previous_names=['peer_port'])
tcp_port = Integer("TCP port to listen for incoming blob requests", 3333, previous_names=['peer_port'])
prometheus_port = Integer("Port to expose prometheus metrics (off by default)", 0)
network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0')
@ -622,24 +544,17 @@ class Config(CLIConfig):
"Routing table bucket index below which we always split the bucket if given a new key to add to it and "
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
"use.", 2
)
is_bootstrap_node = Toggle(
"When running as a bootstrap node, disable all logic related to balancing the routing table, so we can "
"add as many peers as possible and better help first-runs.", False
"use.", 1
)
# protocol timeouts
download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0)
blob_download_timeout = Float("Timeout to download a blob from a peer", 30.0)
hub_timeout = Float("Timeout when making a hub request", 30.0)
peer_connect_timeout = Float("Timeout to establish a TCP connection to a peer", 3.0)
node_rpc_timeout = Float("Timeout when making a DHT request", constants.RPC_TIMEOUT)
# blob announcement and download
save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True)
network_storage_limit = Integer("Disk space in MB to be allocated for helping the P2P network. 0 = disable", 0)
blob_storage_limit = Integer("Disk space in MB to be allocated for blob storage. 0 = no limit", 0)
blob_lru_cache_size = Integer(
"LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when "
"replying to a range request. Set to 0 to disable.", 32
@ -656,7 +571,6 @@ class Config(CLIConfig):
"Maximum number of peers to connect to while downloading a blob", 4,
previous_names=['max_connections_per_stream']
)
concurrent_hub_requests = Integer("Maximum number of concurrent hub requests", 32)
fixed_peer_delay = Float(
"Amount of seconds before adding the reflector servers as potential peers to download from in case dht"
"peers are not found or are slow", 2.0
@ -677,22 +591,9 @@ class Config(CLIConfig):
)
# servers
reflector_servers = Servers("Reflector re-hosting servers for mirroring publishes", [
reflector_servers = Servers("Reflector re-hosting servers", [
('reflector.lbry.com', 5566)
])
fixed_peers = Servers("Fixed peers to fall back to if none are found on P2P for a blob", [
('cdn.reflector.lbry.com', 5567)
])
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
('tracker.lbry.com', 9252),
('tracker.lbry.grin.io', 9252),
('tracker.lbry.pigg.es', 9252),
('tracker.lizard.technology', 9252),
('s1.lbry.network', 9252),
])
lbryum_servers = Servers("SPV wallet servers", [
('spv11.lbry.com', 50001),
('spv12.lbry.com', 50001),
@ -703,36 +604,39 @@ class Config(CLIConfig):
('spv17.lbry.com', 50001),
('spv18.lbry.com', 50001),
('spv19.lbry.com', 50001),
('hub.lbry.grin.io', 50001),
('hub.lizard.technology', 50001),
('s1.lbry.network', 50001),
])
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
('dht.lbry.grin.io', 4444), # Grin
('dht.lbry.madiator.com', 4444), # Madiator
('dht.lbry.pigg.es', 4444), # Pigges
('lbrynet1.lbry.com', 4444), # US EAST
('lbrynet2.lbry.com', 4444), # US WEST
('lbrynet3.lbry.com', 4444), # EU
('lbrynet4.lbry.com', 4444), # ASIA
('dht.lizard.technology', 4444), # Jack
('s2.lbry.network', 4444),
('lbrynet4.lbry.com', 4444) # ASIA
])
comment_server = String("Comment server API URL", "https://comments.lbry.com/api")
# blockchain
lbrycrd_rpc_user = String("Username for connecting to lbrycrd.", "rpcuser")
lbrycrd_rpc_pass = String("Password for connecting to lbrycrd.", "rpcpassword")
lbrycrd_rpc_host = String("Hostname for connecting to lbrycrd.", "localhost")
lbrycrd_rpc_port = Integer("Port for connecting to lbrycrd.", 9245)
lbrycrd_peer_port = Integer("Port for connecting to lbrycrd.", 9246)
lbrycrd_zmq_blocks = String("ZMQ block events address.")
lbrycrd_dir = Path("Directory containing lbrycrd data.", metavar='DIR')
blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main')
spv_address_filters = Toggle(
"Generate Golomb-Rice coding filters for blocks and transactions. Enables "
"light client to synchronize with a full node.",
True
)
# daemon
save_files = Toggle("Save downloaded files when calling `get` by default", False)
save_files = Toggle("Save downloaded files when calling `get` by default", True)
components_to_skip = Strings("components which will be skipped during start-up of daemon", [])
share_usage_data = Toggle(
"Whether to share usage stats and diagnostic info with LBRY.", False,
previous_names=['upload_log', 'upload_log', 'share_debug_info']
)
track_bandwidth = Toggle("Track bandwidth usage", True)
allowed_origin = String(
"Allowed `Origin` header value for API request (sent by browser), use * to allow "
"all hosts; default is to only allow API requests with no `Origin` value.", "")
# media server
streaming_server = String('Host name and port to serve streaming media over range requests',
@ -742,10 +646,8 @@ class Config(CLIConfig):
coin_selection_strategy = StringChoice(
"Strategy to use when selecting UTXOs for a transaction",
STRATEGIES, "prefer_confirmed"
)
COIN_SELECTION_STRATEGIES, "standard")
transaction_cache_size = Integer("Transaction cache size", 2 ** 17)
save_resolved_claims = Toggle(
"Save content claims to the database when they are resolved to keep file_list up to date, "
"only disable this if file_x commands are not needed", True
@ -759,10 +661,18 @@ class Config(CLIConfig):
def streaming_port(self):
return int(self.streaming_server.split(':')[1])
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_default_paths()
self.known_hubs = KnownHubsList(self)
@classmethod
def with_null_dir(cls):
return cls.with_same_dir('/dev/null')
@classmethod
def with_same_dir(cls, same_dir):
return cls(
data_dir=same_dir,
download_dir=same_dir,
wallet_dir=same_dir,
lbrycrd_dir=same_dir,
)
def set_default_paths(self):
if 'darwin' in sys.platform.lower():
@ -774,62 +684,76 @@ class Config(CLIConfig):
else:
return
cls = type(self)
cls.data_dir.default, cls.wallet_dir.default, cls.download_dir.default = get_directories()
cls.config.default = os.path.join(
self.data_dir, 'daemon_settings.yml'
)
cls.data_dir.default, cls.wallet_dir.default,\
cls.blob_dir.default, cls.download_dir.default = get_directories()
old_settings_file = os.path.join(self.data_dir, 'daemon_settings.yml')
if os.path.exists(old_settings_file):
cls.config.default = old_settings_file
else:
cls.config.default = os.path.join(self.data_dir, 'settings.yml')
if self.data_dir != cls.data_dir.default:
cls.blob_dir.default = os.path.join(self.data_dir, 'blobs')
cls.wallet_dir.default = os.path.join(self.data_dir, 'wallets')
@property
def log_file_path(self):
return os.path.join(self.data_dir, 'lbrynet.log')
return os.path.join(self.data_dir, 'daemon.log')
@property
def db_url_or_default(self):
if self.db_url:
return self.db_url
return 'sqlite:///'+os.path.join(self.data_dir, f'{self.blockchain_name}.db')
def get_windows_directories() -> Tuple[str, str, str]:
from lbry.winpaths import get_path, FOLDERID, UserHandle, \
PathNotFoundException # pylint: disable=import-outside-toplevel
try:
download_dir = get_path(FOLDERID.Downloads, UserHandle.current)
except PathNotFoundException:
download_dir = os.getcwd()
def get_windows_directories() -> Tuple[str, str, str, str]:
# very old
data_dir = user_data_dir('lbrynet', roaming=True)
blob_dir = os.path.join(data_dir, 'blobfiles')
wallet_dir = os.path.join(user_data_dir('lbryum', roaming=True), 'wallets')
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
return data_dir, wallet_dir, blob_dir, user_download_dir()
# old
appdata = get_path(FOLDERID.RoamingAppData, UserHandle.current)
data_dir = os.path.join(appdata, 'lbrynet')
lbryum_dir = os.path.join(appdata, 'lbryum')
if os.path.isdir(data_dir) or os.path.isdir(lbryum_dir):
return data_dir, lbryum_dir, download_dir
# new
data_dir = user_data_dir('lbrynet', 'lbry')
lbryum_dir = user_data_dir('lbryum', 'lbry')
return data_dir, lbryum_dir, download_dir
def get_darwin_directories() -> Tuple[str, str, str]:
data_dir = user_data_dir('LBRY')
lbryum_dir = os.path.expanduser('~/.lbryum')
download_dir = os.path.expanduser('~/Downloads')
return data_dir, lbryum_dir, download_dir
def get_linux_directories() -> Tuple[str, str, str]:
try:
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read())
if down_dir:
down_dir = re.sub(r'\$HOME', os.getenv('HOME') or os.path.expanduser("~/"), down_dir.group(1))
download_dir = re.sub('\"', '', down_dir)
except OSError:
download_dir = os.getenv('XDG_DOWNLOAD_DIR')
if not download_dir:
download_dir = os.path.expanduser('~/Downloads')
# old
data_dir = os.path.expanduser('~/.lbrynet')
lbryum_dir = os.path.expanduser('~/.lbryum')
if os.path.isdir(data_dir) or os.path.isdir(lbryum_dir):
return data_dir, lbryum_dir, download_dir
blob_dir = os.path.join(data_dir, 'blobfiles')
wallet_dir = os.path.join(user_data_dir('lbryum', 'lbry'), 'wallets')
if os.path.isdir(blob_dir) and os.path.isdir(wallet_dir):
return data_dir, wallet_dir, blob_dir, user_download_dir()
# new
return user_data_dir('lbry/lbrynet'), user_data_dir('lbry/lbryum'), download_dir
return get_universal_directories()
def get_darwin_directories() -> Tuple[str, str, str, str]:
data_dir = user_data_dir('LBRY')
blob_dir = os.path.join(data_dir, 'blobfiles')
wallet_dir = os.path.expanduser('~/.lbryum/wallets')
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
return data_dir, wallet_dir, blob_dir, user_download_dir()
return get_universal_directories()
def get_linux_directories() -> Tuple[str, str, str, str]:
# very old
data_dir = os.path.expanduser('~/.lbrynet')
blob_dir = os.path.join(data_dir, 'blobfiles')
wallet_dir = os.path.join(os.path.expanduser('~/.lbryum'), 'wallets')
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
return data_dir, wallet_dir, blob_dir, user_download_dir()
# old
data_dir = user_data_dir('lbry/lbrynet')
blob_dir = os.path.join(data_dir, 'blobfiles')
wallet_dir = user_data_dir('lbry/lbryum/wallets')
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
return data_dir, wallet_dir, blob_dir, user_download_dir()
# new
return get_universal_directories()
def get_universal_directories() -> Tuple[str, str, str, str]:
lbrynet_dir = user_data_dir('lbrynet', 'LBRY')
return (
lbrynet_dir,
os.path.join(lbrynet_dir, 'wallets'),
os.path.join(lbrynet_dir, 'blobs'),
user_download_dir()
)

View file

@ -67,7 +67,7 @@ class ConnectionManager:
while True:
last = time.perf_counter()
await asyncio.sleep(0.1)
await asyncio.sleep(0.1, loop=self.loop)
self._status['incoming_bps'].clear()
self._status['outgoing_bps'].clear()
now = time.perf_counter()

492
lbry/console.py Normal file
View file

@ -0,0 +1,492 @@
import os
import sys
import time
import itertools
import logging
from typing import Dict, Any
from tempfile import TemporaryFile
from tqdm.std import tqdm, Bar
from tqdm.utils import FormatReplace, _unicode, disp_len, disp_trim, _is_ascii
from lbry import __version__
from lbry.service.base import Service
from lbry.service.full_node import FullNode
from lbry.service.light_client import LightClient
log = logging.getLogger(__name__)
class RedirectOutput:
silence_lines = [
b'libprotobuf ERROR google/protobuf/wire_format_lite.cc:626',
]
def __init__(self, stream_type: str):
assert stream_type in ('stderr', 'stdout')
self.stream_type = stream_type
self.stream_no = getattr(sys, stream_type).fileno()
self.last_flush = time.time()
self.last_read = 0
self.backup = None
self.file = None
def __enter__(self):
self.backup = os.dup(self.stream_no)
setattr(sys, self.stream_type, os.fdopen(self.backup, 'w'))
self.file = TemporaryFile()
self.backup = os.dup2(self.file.fileno(), self.stream_no)
def __exit__(self, exc_type, exc_val, exc_tb):
self.file.close()
os.dup2(self.backup, self.stream_no)
os.close(self.backup)
setattr(sys, self.stream_type, os.fdopen(self.stream_no, 'w'))
def capture(self):
self.__enter__()
def release(self):
self.__exit__(None, None, None)
def flush(self, writer, force=False):
if not force and (time.time() - self.last_flush) < 5:
return
self.file.seek(self.last_read)
for line in self.file.readlines():
silence = False
for bad_line in self.silence_lines:
if bad_line in line:
silence = True
break
if not silence:
writer(line.decode().rstrip())
self.last_read = self.file.tell()
self.last_flush = time.time()
class Console:
def __init__(self, service: Service):
self.service = service
def starting(self):
pass
def stopping(self):
pass
class Basic(Console):
def __init__(self, service: Service):
super().__init__(service)
self.service.sync.on_progress.listen(self.on_sync_progress)
self.tasks = {}
logging.basicConfig(level=logging.INFO)
def starting(self):
conf = self.service.conf
s = [f'LBRY v{__version__}']
if isinstance(self.service, FullNode):
s.append('Full Node')
elif isinstance(self.service, LightClient):
s.append('Light Client')
if conf.workers == -1:
s.append('Threads Only')
else:
workers = os.cpu_count() if conf.workers == 0 else conf.workers
s.append(f'{workers} Worker' if workers == 1 else f'{workers} Workers')
s.append(f'({os.cpu_count()} CPUs available)')
log.info(' '.join(s))
def stopping(self):
log.info('exiting')
@staticmethod
def maybe_log_progress(event, done, total, last):
if done == 0:
log.info("%s 0%%", event)
return 0
elif done == total:
log.info("%s 100%%", event)
return 1
else:
percent = done/total
if percent >= 0.25 > last:
log.info("%s 25%%", event)
return 0.25
elif percent >= 0.50 > last:
log.info("%s 50%%", event)
return 0.50
elif percent >= 0.75 > last:
log.info("%s 75%%", event)
return 0.75
return last
def on_sync_progress(self, event):
e, data = event["event"], event["data"]
name, current, total, last = e, data['done'][0], 0, 0
if not e.endswith("init") and not e.endswith("main") and not e.endswith("indexes"):
name = f"{e}#{data['id']}"
if "total" in data:
total, last = self.tasks[name] = (data["total"][0], last)
elif name in self.tasks:
total, last = self.tasks[name]
elif total == 0:
return
progress_status = (total, self.maybe_log_progress(name, current, total, last))
if progress_status[1] == 1:
del self.tasks[name]
else:
self.tasks[name] = progress_status
class Bar2(Bar):
def __init__(self, frac, default_len=10, charset=None):
super().__init__(frac[0], default_len, charset)
self.frac2 = frac[1]
def __format__(self, format_spec):
width = self.default_len
row1 = (1,)*int(self.frac2 * width * 2)
row2 = (2,)*int(self.frac * width * 2)
fill = []
for one, two, _ in itertools.zip_longest(row1, row2, range(width*2)):
fill.append((one or 0)+(two or 0))
bar = []
for i in range(0, width*2, 2):
if fill[i] == 1:
if fill[i+1] == 1:
bar.append('')
else:
bar.append('')
elif fill[i] == 2:
if fill[i+1] == 2:
bar.append('')
else:
bar.append('')
elif fill[i] == 3:
if fill[i+1] == 1:
bar.append('')
elif fill[i+1] == 2:
bar.append('')
elif fill[i+1] == 3:
bar.append('')
else:
bar.append('')
else:
bar.append(' ')
return ''.join(bar)
class tqdm2(tqdm): # pylint: disable=invalid-name
def __init__(self, initial=(0, 0), unit=('it', 'it'), total=(None, None), **kwargs):
self.n2 = self.last_print_n2 = initial[1] # pylint: disable=invalid-name
self.unit2 = unit[1]
self.total2 = total[1]
super().__init__(initial=initial[0], unit=unit[0], total=total[0], **kwargs)
@property
def format_dict(self):
d = super().format_dict
d.update({
'n2': self.n2,
'unit2': self.unit2,
'total2': self.total2,
})
return d
def update(self, n=(1, 1)):
if self.disable:
return
last_last_print_t = self.last_print_t
self.n2 += n[1]
super().update(n[0])
if last_last_print_t != self.last_print_t:
self.last_print_n2 = self.n2
@staticmethod
def format_meter(
n, total, elapsed, ncols=None, prefix='', ascii=False, # pylint: disable=redefined-builtin
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs
):
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
ncols=ncols, desc=prefix or '', unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix, unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
**extra_kwargs)
# total is known: we can predict some stats
if total:
n2, total2 = extra_kwargs['n2'], extra_kwargs['total2'] # pylint: disable=invalid-name
# fractional and percentage progress
frac = n / total
frac2 = n2 / total2
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
try:
nobar = bar_format.format(bar=full_bar, **format_dict)
except UnicodeEncodeError:
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar2(
(frac, frac2),
max(1, ncols - disp_len(nobar))
if ncols else 10,
charset=Bar2.ASCII if ascii is True else ascii or Bar2.UTF)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
res = bar_format.format(bar=full_bar, **format_dict)
return disp_trim(res, ncols) if ncols else res
elif bar_format:
# user-specified bar_format but no total
l_bar += '|'
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar2(
(0, 0),
max(1, ncols - disp_len(nobar))
if ncols else 10,
charset=Bar2.BLANK)
res = bar_format.format(bar=full_bar, **format_dict)
return disp_trim(res, ncols) if ncols else res
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
class Advanced(Basic):
FORMAT = '{l_bar}{bar}| {n_fmt:>8}/{total_fmt:>8} [{elapsed:>7}<{remaining:>8}, {rate_fmt:>17}]'
def __init__(self, service: Service):
super().__init__(service)
self.bars: Dict[Any, tqdm] = {}
self.stderr = RedirectOutput('stderr')
def starting(self):
self.stderr.capture()
super().starting()
def stopping(self):
for bar in self.bars.values():
bar.close()
super().stopping()
#self.stderr.flush(self.bars['read'].write, True)
#self.stderr.release()
def get_or_create_bar(self, name, desc, units, totals, leave=False, bar_format=None, postfix=None, position=None):
bar = self.bars.get(name)
if bar is None:
if len(units) == 2:
bar = self.bars[name] = tqdm2(
desc=desc, unit=units, total=totals,
bar_format=bar_format or self.FORMAT, leave=leave,
postfix=postfix, position=position
)
else:
bar = self.bars[name] = tqdm(
desc=desc, unit=units[0], total=totals[0],
bar_format=bar_format or self.FORMAT, leave=leave,
postfix=postfix, position=position
)
return bar
def sync_init(self, name, d):
bar_name = f"{name}#{d['id']}"
bar = self.bars.get(bar_name)
if bar is None:
label = d.get('label', name[-11:])
self.get_or_create_bar(bar_name, label, d['units'], d['total'], True)
else:
if d['done'][0] != -1:
bar.update(d['done'][0] - bar.n)
if d['done'][0] == -1 or d['done'][0] == bar.total:
bar.close()
self.bars.pop(bar_name)
def sync_main(self, name, d):
bar = self.bars.get(name)
if bar is None:
label = d.get('label', name[-11:])
self.get_or_create_bar(name, label, d['units'], d['total'], True)
#self.last_stats = f"{d['txs']:,d} txs, {d['claims']:,d} claims and {d['supports']:,d} supports"
#self.get_or_create_bar("read", "├─ blocks read", "blocks", d['blocks'], True)
#self.get_or_create_bar("save", "└─┬ txs saved", "txs", d['txs'], True)
else:
if d['done'] == (-1,)*len(d['done']):
base_name = name[:name.rindex('.')]
for child_name, child_bar in self.bars.items():
if child_name.startswith(base_name):
child_bar.close()
bar.close()
self.bars.pop(name)
else:
if len(d['done']) == 2:
bar.update((d['done'][0]-bar.n, d['done'][1]-bar.n2))
else:
bar.update(d['done'][0]-bar.n)
def sync_task(self, name, d):
bar_name = f"{name}#{d['id']}"
bar = self.bars.get(bar_name)
if bar is None:
#assert d['done'][0] == 0
label = d.get('label', name[-11:])
self.get_or_create_bar(
f"{name}#{d['id']}", label, d['units'], d['total'],
name.split('.')[-1] not in ('insert', 'update', 'file')
)
else:
if d['done'][0] != -1:
main_bar_name = f"{name[:name.rindex('.')]}.main"
if len(d['done']) > 1:
diff = tuple(a-b for a, b in zip(d['done'], (bar.n, bar.n2)))
else:
diff = d['done'][0] - bar.n
if main_bar_name != name:
main_bar = self.bars.get(main_bar_name)
if main_bar and main_bar.unit == bar.unit:
main_bar.update(diff)
bar.update(diff)
if d['done'][0] == -1 or d['done'][0] == bar.total:
bar.close()
self.bars.pop(bar_name)
def update_other_bars(self, e, d):
if d['total'] == 0:
return
bar = self.bars.get(e)
if not bar:
name = (
' '.join(e.split('.')[-2:])
.replace('support', 'suprt')
.replace('channels', 'chanls')
.replace('signatures', 'sigs')
)
bar = self.get_or_create_bar(e, f"├─ {name:>12}", d['unit'], d['total'], True)
diff = d['step']-bar.n
bar.update(diff)
#if d['step'] == d['total']:
#bar.close()
def on_sync_progress(self, event):
e, d = event['event'], event.get('data', {})
if e.endswith(".init"):
self.sync_init(e, d)
elif e.endswith(".main"):
self.sync_main(e, d)
else:
self.sync_task(e, d)
# if e.endswith("sync.start"):
# self.sync_start(d)
# self.stderr.flush(self.bars['read'].write)
# elif e.endswith("sync.complete"):
# self.stderr.flush(self.bars['read'].write, True)
# self.sync_complete()
# else:
# self.stderr.flush(self.bars['read'].write)
# self.update_progress(e, d)

View file

@ -1,2 +1,4 @@
NULL_HASH32 = b'\x00'*32
CENT = 1000000
COIN = 100*CENT

View file

@ -1,19 +1,8 @@
from asn1crypto.keys import PrivateKeyInfo, ECPrivateKey
from coincurve import PublicKey as cPublicKey, PrivateKey as cPrivateKey
from coincurve.utils import (
pem_to_der, lib as libsecp256k1, ffi as libsecp256k1_ffi
)
from coincurve.ecdsa import CDATA_SIG_LENGTH
from coincurve import PublicKey, PrivateKey as _PrivateKey
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
from lbry.crypto.base58 import Base58
from .util import cachedproperty
class KeyPath:
RECEIVE = 0
CHANGE = 1
CHANNEL = 2
from lbry.utils import cachedproperty
class DerivationError(Exception):
@ -57,11 +46,9 @@ class _KeyBase:
if len(raw_serkey) != 33:
raise ValueError('raw_serkey must have length 33')
return (
ver_bytes + bytes((self.depth,))
+ self.parent_fingerprint() + self.n.to_bytes(4, 'big')
+ self.chain_code + raw_serkey
)
return (ver_bytes + bytes((self.depth,))
+ self.parent_fingerprint() + self.n.to_bytes(4, 'big')
+ self.chain_code + raw_serkey)
def identifier(self):
raise NotImplementedError
@ -82,30 +69,26 @@ class _KeyBase:
return Base58.encode_check(self.extended_key())
class PublicKey(_KeyBase):
class PubKey(_KeyBase):
""" A BIP32 public key. """
def __init__(self, ledger, pubkey, chain_code, n, depth, parent=None):
super().__init__(ledger, chain_code, n, depth, parent)
if isinstance(pubkey, cPublicKey):
if isinstance(pubkey, PublicKey):
self.verifying_key = pubkey
else:
self.verifying_key = self._verifying_key_from_pubkey(pubkey)
@classmethod
def from_compressed(cls, public_key_bytes, ledger=None) -> 'PublicKey':
return cls(ledger, public_key_bytes, bytes((0,)*32), 0, 0)
@classmethod
def _verifying_key_from_pubkey(cls, pubkey):
""" Converts a 33-byte compressed pubkey into an coincurve.PublicKey object. """
""" Converts a 33-byte compressed pubkey into an PublicKey object. """
if not isinstance(pubkey, (bytes, bytearray)):
raise TypeError('pubkey must be raw bytes')
if len(pubkey) != 33:
raise ValueError('pubkey must be 33 bytes')
if pubkey[0] not in (2, 3):
raise ValueError('invalid pubkey prefix byte')
return cPublicKey(pubkey)
return PublicKey(pubkey)
@cachedproperty
def pubkey_bytes(self):
@ -120,7 +103,7 @@ class PublicKey(_KeyBase):
def ec_point(self):
return self.verifying_key.point()
def child(self, n: int) -> 'PublicKey':
def child(self, n: int):
""" Return the derived child extended pubkey at index N. """
if not 0 <= n < (1 << 31):
raise ValueError('invalid BIP32 public key child number')
@ -128,7 +111,7 @@ class PublicKey(_KeyBase):
msg = self.pubkey_bytes + n.to_bytes(4, 'big')
L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name
derived_key = self.verifying_key.add(L_b)
return PublicKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
return PubKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
def identifier(self):
""" Return the key's identifier as 20 bytes. """
@ -141,36 +124,6 @@ class PublicKey(_KeyBase):
self.pubkey_bytes
)
def verify(self, signature, digest) -> bool:
""" Verify that a signature is valid for a 32 byte digest. """
if len(signature) != 64:
raise ValueError('Signature must be 64 bytes long.')
if len(digest) != 32:
raise ValueError('Digest must be 32 bytes long.')
key = self.verifying_key
raw_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
parsed = libsecp256k1.secp256k1_ecdsa_signature_parse_compact(
key.context.ctx, raw_signature, signature
)
assert parsed == 1
normalized_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
libsecp256k1.secp256k1_ecdsa_signature_normalize(
key.context.ctx, normalized_signature, raw_signature
)
verified = libsecp256k1.secp256k1_ecdsa_verify(
key.context.ctx, normalized_signature, digest, key.public_key
)
return bool(verified)
class PrivateKey(_KeyBase):
"""A BIP32 private key."""
@ -179,7 +132,7 @@ class PrivateKey(_KeyBase):
def __init__(self, ledger, privkey, chain_code, n, depth, parent=None):
super().__init__(ledger, chain_code, n, depth, parent)
if isinstance(privkey, cPrivateKey):
if isinstance(privkey, _PrivateKey):
self.signing_key = privkey
else:
self.signing_key = self._signing_key_from_privkey(privkey)
@ -187,7 +140,7 @@ class PrivateKey(_KeyBase):
@classmethod
def _signing_key_from_privkey(cls, private_key):
""" Converts a 32-byte private key into an coincurve.PrivateKey object. """
return cPrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
return _PrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
@classmethod
def _private_key_secret_exponent(cls, private_key):
@ -199,40 +152,24 @@ class PrivateKey(_KeyBase):
return int.from_bytes(private_key, 'big')
@classmethod
def from_seed(cls, ledger, seed) -> 'PrivateKey':
def from_seed(cls, ledger, seed):
# This hard-coded message string seems to be coin-independent...
hmac = hmac_sha512(b'Bitcoin seed', seed)
privkey, chain_code = hmac[:32], hmac[32:]
return cls(ledger, privkey, chain_code, 0, 0)
@classmethod
def from_pem(cls, ledger, pem) -> 'PrivateKey':
der = pem_to_der(pem.encode())
try:
key_int = ECPrivateKey.load(der).native['private_key']
except ValueError:
key_int = PrivateKeyInfo.load(der).native['private_key']['private_key']
private_key = cPrivateKey.from_int(key_int)
return cls(ledger, private_key, bytes((0,)*32), 0, 0)
@classmethod
def from_bytes(cls, ledger, key_bytes) -> 'PrivateKey':
return cls(ledger, cPrivateKey(key_bytes), bytes((0,)*32), 0, 0)
@cachedproperty
def private_key_bytes(self):
""" Return the serialized private key (no leading zero byte). """
return self.signing_key.secret
@cachedproperty
def public_key(self) -> PublicKey:
def public_key(self):
""" Return the corresponding extended public key. """
verifying_key = self.signing_key.public_key
parent_pubkey = self.parent.public_key if self.parent else None
return PublicKey(
self.ledger, verifying_key, self.chain_code,
self.n, self.depth, parent_pubkey
)
return PubKey(self.ledger, verifying_key, self.chain_code, self.n, self.depth,
parent_pubkey)
def ec_point(self):
return self.public_key.ec_point()
@ -245,12 +182,11 @@ class PrivateKey(_KeyBase):
""" Return the private key encoded in Wallet Import Format. """
return self.ledger.private_key_to_wif(self.private_key_bytes)
@property
def address(self):
""" The public key as a P2PKH address. """
return self.public_key.address
def child(self, n) -> 'PrivateKey':
def child(self, n):
""" Return the derived child extended private key at index N."""
if not 0 <= n < (1 << 32):
raise ValueError('invalid BIP32 private key child number')
@ -269,28 +205,6 @@ class PrivateKey(_KeyBase):
""" Produce a signature for piece of data by double hashing it and signing the hash. """
return self.signing_key.sign(data, hasher=double_sha256)
def sign_compact(self, digest):
""" Produce a compact signature. """
key = self.signing_key
signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
signed = libsecp256k1.secp256k1_ecdsa_sign(
key.context.ctx, signature, digest, key.secret,
libsecp256k1_ffi.NULL, libsecp256k1_ffi.NULL
)
if not signed:
raise ValueError('The private key was invalid.')
serialized = libsecp256k1_ffi.new('unsigned char[%d]' % CDATA_SIG_LENGTH)
compacted = libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(
key.context.ctx, serialized, signature
)
if compacted != 1:
raise ValueError('The signature could not be compacted.')
return bytes(libsecp256k1_ffi.buffer(serialized, CDATA_SIG_LENGTH))
def identifier(self):
"""Return the key's identifier as 20 bytes."""
return self.public_key.identifier()
@ -302,12 +216,9 @@ class PrivateKey(_KeyBase):
b'\0' + self.private_key_bytes
)
def to_pem(self):
return self.signing_key.to_pem()
def _from_extended_key(ledger, ekey):
"""Return a PublicKey or PrivateKey from an extended key raw bytes."""
"""Return a PubKey or PrivateKey from an extended key raw bytes."""
if not isinstance(ekey, (bytes, bytearray)):
raise TypeError('extended key must be raw bytes')
if len(ekey) != 78:
@ -319,7 +230,7 @@ def _from_extended_key(ledger, ekey):
if ekey[:4] == ledger.extended_public_key_prefix:
pubkey = ekey[45:]
key = PublicKey(ledger, pubkey, chain_code, n, depth)
key = PubKey(ledger, pubkey, chain_code, n, depth)
elif ekey[:4] == ledger.extended_private_key_prefix:
if ekey[45] != 0:
raise ValueError('invalid extended private key prefix byte')
@ -337,6 +248,6 @@ def from_extended_key_string(ledger, ekey_str):
xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd
3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL
return a PublicKey or PrivateKey.
return a PubKey or PrivateKey.
"""
return _from_extended_key(ledger, Base58.decode_check(ekey_str))

5
lbry/db/__init__.py Normal file
View file

@ -0,0 +1,5 @@
from .database import Database, Result
from .constants import (
TXO_TYPES, SPENDABLE_TYPE_CODES,
CLAIM_TYPE_CODES, CLAIM_TYPE_NAMES
)

74
lbry/db/constants.py Normal file
View file

@ -0,0 +1,74 @@
MAX_QUERY_VARIABLES = 900
TXO_TYPES = {
"other": 0,
"stream": 1,
"channel": 2,
"support": 3,
"purchase": 4,
"collection": 5,
"repost": 6,
}
CLAIM_TYPE_NAMES = [
'stream',
'channel',
'collection',
'repost',
]
CONTENT_TYPE_NAMES = [
name for name in CLAIM_TYPE_NAMES if name != "channel"
]
CLAIM_TYPE_CODES = [
TXO_TYPES[name] for name in CLAIM_TYPE_NAMES
]
CONTENT_TYPE_CODES = [
TXO_TYPES[name] for name in CONTENT_TYPE_NAMES
]
SPENDABLE_TYPE_CODES = [
TXO_TYPES['other'],
TXO_TYPES['purchase']
]
STREAM_TYPES = {
'video': 1,
'audio': 2,
'image': 3,
'document': 4,
'binary': 5,
'model': 6
}
MATURE_TAGS = (
'nsfw', 'porn', 'xxx', 'mature', 'adult', 'sex'
)
ATTRIBUTE_ARRAY_MAX_LENGTH = 100
SEARCH_INTEGER_PARAMS = {
'height', 'creation_height', 'activation_height', 'expiration_height',
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
'tx_position', 'channel_join', 'reposted',
'amount', 'effective_amount', 'support_amount',
'trending_group', 'trending_mixed',
'trending_local', 'trending_global',
}
SEARCH_PARAMS = {
'name', 'text', 'claim_id', 'claim_ids', 'txid', 'nout', 'channel', 'channel_ids', 'not_channel_ids',
'public_key_id', 'claim_type', 'stream_types', 'media_types', 'fee_currency',
'has_channel_signature', 'signature_valid',
'any_tags', 'all_tags', 'not_tags', 'reposted_claim_id',
'any_locations', 'all_locations', 'not_locations',
'any_languages', 'all_languages', 'not_languages',
'is_controlling', 'limit', 'offset', 'order_by',
'no_totals',
} | SEARCH_INTEGER_PARAMS
SEARCH_ORDER_FIELDS = {
'name', 'claim_hash', 'claim_id'
} | SEARCH_INTEGER_PARAMS

327
lbry/db/database.py Normal file
View file

@ -0,0 +1,327 @@
import os
import asyncio
import tempfile
import multiprocessing as mp
from typing import List, Optional, Iterable, Iterator, TypeVar, Generic, TYPE_CHECKING, Dict
from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor
from functools import partial
from sqlalchemy import create_engine, text
from lbry.event import EventController
from lbry.crypto.bip32 import PubKey
from lbry.blockchain.transaction import Transaction, Output
from .constants import TXO_TYPES, CLAIM_TYPE_CODES
from .query_context import initialize, uninitialize, ProgressPublisher
from . import queries as q
from . import sync
if TYPE_CHECKING:
from lbry.blockchain.ledger import Ledger
def clean_wallet_account_ids(constraints):
wallet = constraints.pop('wallet', None)
account = constraints.pop('account', None)
accounts = constraints.pop('accounts', [])
if account and not accounts:
accounts = [account]
if wallet:
constraints['wallet_account_ids'] = [account.id for account in wallet.accounts]
if not accounts:
accounts = wallet.accounts
if accounts:
constraints['account_ids'] = [account.id for account in accounts]
async def add_channel_keys_to_txo_results(accounts: List, txos: Iterable[Output]):
sub_channels = set()
for txo in txos:
if txo.claim.is_channel:
for account in accounts:
private_key = await account.get_channel_private_key(
txo.claim.channel.public_key_bytes
)
if private_key:
txo.private_key = private_key
break
if txo.channel is not None:
sub_channels.add(txo.channel)
if sub_channels:
await add_channel_keys_to_txo_results(accounts, sub_channels)
ResultType = TypeVar('ResultType')
class Result(Generic[ResultType]):
__slots__ = 'rows', 'total', 'censor'
def __init__(self, rows: List[ResultType], total, censor=None):
self.rows = rows
self.total = total
self.censor = censor
def __getitem__(self, item: int) -> ResultType:
return self.rows[item]
def __iter__(self) -> Iterator[ResultType]:
return iter(self.rows)
def __len__(self):
return len(self.rows)
def __repr__(self):
return repr(self.rows)
class Database:
def __init__(self, ledger: 'Ledger'):
self.url = ledger.conf.db_url_or_default
self.ledger = ledger
self.workers = self._normalize_worker_processes(ledger.conf.workers)
self.executor: Optional[Executor] = None
self.message_queue = mp.Queue()
self.stop_event = mp.Event()
self._on_progress_controller = EventController()
self.on_progress = self._on_progress_controller.stream
self.progress_publisher = ProgressPublisher(
self.message_queue, self._on_progress_controller
)
@staticmethod
def _normalize_worker_processes(workers):
if workers == 0:
return os.cpu_count()
elif workers > 0:
return workers
return 1
@classmethod
def temp_from_url_regtest(cls, db_url, lbrycrd_dir=None):
from lbry import Config, RegTestLedger # pylint: disable=import-outside-toplevel
directory = tempfile.mkdtemp()
conf = Config.with_same_dir(directory).set(db_url=db_url)
if lbrycrd_dir is not None:
conf.lbrycrd_dir = lbrycrd_dir
ledger = RegTestLedger(conf)
return cls(ledger)
@classmethod
def temp_sqlite_regtest(cls, lbrycrd_dir=None):
from lbry import Config, RegTestLedger # pylint: disable=import-outside-toplevel
directory = tempfile.mkdtemp()
conf = Config.with_same_dir(directory)
if lbrycrd_dir is not None:
conf.lbrycrd_dir = lbrycrd_dir
ledger = RegTestLedger(conf)
return cls(ledger)
@classmethod
def temp_sqlite(cls):
from lbry import Config, Ledger # pylint: disable=import-outside-toplevel
conf = Config.with_same_dir(tempfile.mkdtemp())
return cls(Ledger(conf))
@classmethod
def from_url(cls, db_url):
from lbry import Config, Ledger # pylint: disable=import-outside-toplevel
return cls(Ledger(Config.with_null_dir().set(db_url=db_url)))
@classmethod
def in_memory(cls):
return cls.from_url('sqlite:///:memory:')
def sync_create(self, name):
engine = create_engine(self.url)
db = engine.connect()
db.execute(text("COMMIT"))
db.execute(text(f"CREATE DATABASE {name}"))
async def create(self, name):
return await asyncio.get_running_loop().run_in_executor(None, self.sync_create, name)
def sync_drop(self, name):
engine = create_engine(self.url)
db = engine.connect()
db.execute(text("COMMIT"))
db.execute(text(f"DROP DATABASE IF EXISTS {name}"))
async def drop(self, name):
return await asyncio.get_running_loop().run_in_executor(None, self.sync_drop, name)
async def open(self):
assert self.executor is None, "Database already open."
self.progress_publisher.start()
kwargs = {
"initializer": initialize,
"initargs": (
self.ledger,
self.message_queue, self.stop_event
)
}
if self.workers > 1:
self.executor = ProcessPoolExecutor(max_workers=self.workers, **kwargs)
else:
self.executor = ThreadPoolExecutor(max_workers=1, **kwargs)
return await self.run(q.check_version_and_create_tables)
async def close(self):
self.progress_publisher.stop()
if self.executor is not None:
if isinstance(self.executor, ThreadPoolExecutor):
await self.run(uninitialize)
self.executor.shutdown()
self.executor = None
# fixes "OSError: handle is closed"
# seems to only happen when running in PyCharm
# https://github.com/python/cpython/pull/6084#issuecomment-564585446
# TODO: delete this in Python 3.8/3.9?
from concurrent.futures.process import _threads_wakeups # pylint: disable=import-outside-toplevel
_threads_wakeups.clear()
async def run(self, func, *args, **kwargs):
if kwargs:
clean_wallet_account_ids(kwargs)
return await asyncio.get_running_loop().run_in_executor(
self.executor, partial(func, *args, **kwargs)
)
async def fetch_result(self, func, *args, **kwargs) -> Result:
rows, total = await self.run(func, *args, **kwargs)
return Result(rows, total)
async def execute(self, sql):
return await self.run(q.execute, sql)
async def execute_fetchall(self, sql):
return await self.run(q.execute_fetchall, sql)
async def has_claims(self):
return await self.run(q.has_claims)
async def has_supports(self):
return await self.run(q.has_supports)
async def get_best_block_height(self) -> int:
return await self.run(q.get_best_block_height)
async def process_all_things_after_sync(self):
return await self.run(sync.process_all_things_after_sync)
async def insert_block(self, block):
return await self.run(q.insert_block, block)
async def insert_transaction(self, block_hash, tx):
return await self.run(q.insert_transaction, block_hash, tx)
async def update_address_used_times(self, addresses):
return await self.run(q.update_address_used_times, addresses)
async def reserve_outputs(self, txos, is_reserved=True):
txo_hashes = [txo.hash for txo in txos]
if txo_hashes:
return await self.run(
q.reserve_outputs, txo_hashes, is_reserved
)
async def release_outputs(self, txos):
return await self.reserve_outputs(txos, is_reserved=False)
async def release_tx(self, tx):
return await self.release_outputs([txi.txo_ref.txo for txi in tx.inputs])
async def release_all_outputs(self, account):
return await self.run(q.release_all_outputs, account.id)
async def get_balance(self, **constraints):
return await self.run(q.get_balance, **constraints)
async def get_report(self, accounts):
return await self.run(q.get_report, accounts=accounts)
async def get_addresses(self, **constraints) -> Result[dict]:
addresses = await self.fetch_result(q.get_addresses, **constraints)
if addresses and 'pubkey' in addresses[0]:
for address in addresses:
address['pubkey'] = PubKey(
self.ledger, bytes(address.pop('pubkey')), bytes(address.pop('chain_code')),
address.pop('n'), address.pop('depth')
)
return addresses
async def get_all_addresses(self):
return await self.run(q.get_all_addresses)
async def get_address(self, **constraints):
for address in await self.get_addresses(limit=1, **constraints):
return address
async def add_keys(self, account, chain, pubkeys):
return await self.run(q.add_keys, account, chain, pubkeys)
async def get_transactions(self, **constraints) -> Result[Transaction]:
return await self.fetch_result(q.get_transactions, **constraints)
async def get_transaction(self, **constraints) -> Optional[Transaction]:
txs = await self.get_transactions(limit=1, **constraints)
if txs:
return txs[0]
async def get_purchases(self, **constraints) -> Result[Output]:
return await self.fetch_result(q.get_purchases, **constraints)
async def search_claims(self, **constraints) -> Result[Output]:
#assert set(constraints).issubset(SEARCH_PARAMS), \
# f"Search query contains invalid arguments: {set(constraints).difference(SEARCH_PARAMS)}"
claims, total, censor = await self.run(q.search_claims, **constraints)
return Result(claims, total, censor)
async def protobuf_search_claims(self, **constraints) -> str:
return await self.run(q.protobuf_search_claims, **constraints)
async def search_supports(self, **constraints) -> Result[Output]:
return await self.fetch_result(q.search_supports, **constraints)
async def resolve(self, urls, **kwargs) -> Dict[str, Output]:
return await self.run(q.resolve, urls, **kwargs)
async def protobuf_resolve(self, urls, **kwargs) -> str:
return await self.run(q.protobuf_resolve, urls, **kwargs)
async def get_txo_sum(self, **constraints) -> int:
return await self.run(q.get_txo_sum, **constraints)
async def get_txo_plot(self, **constraints) -> List[dict]:
return await self.run(q.get_txo_plot, **constraints)
async def get_txos(self, **constraints) -> Result[Output]:
txos = await self.fetch_result(q.get_txos, **constraints)
if 'wallet' in constraints:
await add_channel_keys_to_txo_results(constraints['wallet'].accounts, txos)
return txos
async def get_utxos(self, **constraints) -> Result[Output]:
return await self.get_txos(spent_height=0, **constraints)
async def get_supports(self, **constraints) -> Result[Output]:
return await self.get_utxos(txo_type=TXO_TYPES['support'], **constraints)
async def get_claims(self, **constraints) -> Result[Output]:
if 'txo_type' not in constraints:
constraints['txo_type__in'] = CLAIM_TYPE_CODES
txos = await self.fetch_result(q.get_txos, **constraints)
if 'wallet' in constraints:
await add_channel_keys_to_txo_results(constraints['wallet'].accounts, txos)
return txos
async def get_streams(self, **constraints) -> Result[Output]:
return await self.get_claims(txo_type=TXO_TYPES['stream'], **constraints)
async def get_channels(self, **constraints) -> Result[Output]:
return await self.get_claims(txo_type=TXO_TYPES['channel'], **constraints)
async def get_collections(self, **constraints) -> Result[Output]:
return await self.get_claims(txo_type=TXO_TYPES['collection'], **constraints)

View file

@ -0,0 +1,52 @@
from lbry.wallet.database import constraints_to_sql
CREATE_FULL_TEXT_SEARCH = """
create virtual table if not exists search using fts5(
claim_name, channel_name, title, description, author, tags,
content=claim, tokenize=porter
);
"""
FTS_ORDER_BY = "bm25(search, 4.0, 8.0, 1.0, 0.5, 1.0, 0.5)"
def fts_action_sql(claims=None, action='insert'):
select = {
'rowid': "claim.rowid",
'claim_name': "claim.normalized",
'channel_name': "channel.normalized",
'title': "claim.title",
'description': "claim.description",
'author': "claim.author",
'tags': "(select group_concat(tag, ' ') from tag where tag.claim_hash=claim.claim_hash)"
}
if action == 'delete':
select['search'] = '"delete"'
where, values = "", {}
if claims:
where, values = constraints_to_sql({'claim.claim_hash__in': claims})
where = 'WHERE '+where
return f"""
INSERT INTO search ({','.join(select.keys())})
SELECT {','.join(select.values())} FROM claim
LEFT JOIN claim as channel ON (claim.channel_hash=channel.claim_hash) {where}
""", values
def update_full_text_search(action, outputs, db, is_first_sync):
if is_first_sync:
return
if not outputs:
return
if action in ("before-delete", "before-update"):
db.execute(*fts_action_sql(outputs, 'delete'))
elif action in ("after-insert", "after-update"):
db.execute(*fts_action_sql(outputs, 'insert'))
else:
raise ValueError(f"Invalid action for updating full text search: '{action}'")
def first_sync_finished(db):
db.execute(*fts_action_sql())

View file

@ -0,0 +1,5 @@
from .base import *
from .txio import *
from .search import *
from .resolve import *
from .address import *

View file

@ -0,0 +1,78 @@
import logging
from typing import Tuple, List, Optional
from sqlalchemy import func
from sqlalchemy.future import select
from ..utils import query
from ..query_context import context
from ..tables import TXO, PubkeyAddress, AccountAddress
log = logging.getLogger(__name__)
def update_address_used_times(addresses):
context().execute(
PubkeyAddress.update()
.values(used_times=(
select(func.count(TXO.c.address))
.where((TXO.c.address == PubkeyAddress.c.address)),
))
.where(PubkeyAddress.c.address._in(addresses))
)
def select_addresses(cols, **constraints):
return context().fetchall(query(
[AccountAddress, PubkeyAddress],
select(*cols).select_from(PubkeyAddress.join(AccountAddress)),
**constraints
))
def get_addresses(cols=None, include_total=False, **constraints) -> Tuple[List[dict], Optional[int]]:
if cols is None:
cols = (
PubkeyAddress.c.address,
PubkeyAddress.c.used_times,
AccountAddress.c.account,
AccountAddress.c.chain,
AccountAddress.c.pubkey,
AccountAddress.c.chain_code,
AccountAddress.c.n,
AccountAddress.c.depth
)
return (
select_addresses(cols, **constraints),
get_address_count(**constraints) if include_total else None
)
def get_address_count(**constraints):
count = select_addresses([func.count().label('total')], **constraints)
return count[0]['total'] or 0
def get_all_addresses(self):
return context().execute(select(PubkeyAddress.c.address))
def add_keys(account, chain, pubkeys):
c = context()
c.execute(
c.insert_or_ignore(PubkeyAddress)
.values([{'address': k.address} for k in pubkeys])
)
c.execute(
c.insert_or_ignore(AccountAddress)
.values([{
'account': account.id,
'address': k.address,
'chain': chain,
'pubkey': k.pubkey_bytes,
'chain_code': k.chain_code,
'n': k.n,
'depth': k.depth
} for k in pubkeys])
)

58
lbry/db/queries/base.py Normal file
View file

@ -0,0 +1,58 @@
from sqlalchemy import text
from sqlalchemy.future import select
from ..query_context import context
from ..tables import SCHEMA_VERSION, metadata, Version, Claim, Support, Block, TX
def execute(sql):
return context().execute(text(sql))
def execute_fetchall(sql):
return context().fetchall(text(sql))
def has_claims():
return context().has_records(Claim)
def has_supports():
return context().has_records(Support)
def get_best_block_height():
context().fetchmax(Block.c.height, -1)
def insert_block(block):
context().get_bulk_loader().add_block(block).flush()
def insert_transaction(block_hash, tx):
context().get_bulk_loader().add_transaction(block_hash, tx).flush(TX)
def check_version_and_create_tables():
with context("db.connecting") as ctx:
if ctx.has_table('version'):
version = ctx.fetchone(select(Version.c.version).limit(1))
if version and version['version'] == SCHEMA_VERSION:
return
metadata.drop_all(ctx.engine)
metadata.create_all(ctx.engine)
ctx.execute(Version.insert().values(version=SCHEMA_VERSION))
for table in metadata.sorted_tables:
disable_trigger_and_constraints(table.name)
def disable_trigger_and_constraints(table_name):
ctx = context()
if ctx.is_postgres:
ctx.execute(text(f"ALTER TABLE {table_name} DISABLE TRIGGER ALL;"))
if table_name in ('tag', 'stake'):
return
if ctx.is_postgres:
ctx.execute(text(
f"ALTER TABLE {table_name} DROP CONSTRAINT {table_name}_pkey CASCADE;"
))

View file

@ -0,0 +1,95 @@
import logging
import itertools
from operator import itemgetter
from typing import List, Dict
from lbry.schema.url import URL
from lbry.schema.result import Outputs as ResultOutput
from lbry.error import ResolveCensoredError
from lbry.blockchain.transaction import Output
from ..query_context import context
from .search import search_claims
log = logging.getLogger(__name__)
def _get_referenced_rows(txo_rows: List[dict], censor_channels: List[bytes]):
# censor = context().get_resolve_censor()
repost_hashes = set(filter(None, map(itemgetter('reposted_claim_hash'), txo_rows)))
channel_hashes = set(itertools.chain(
filter(None, map(itemgetter('channel_hash'), txo_rows)),
censor_channels
))
reposted_txos = []
if repost_hashes:
reposted_txos = search_claims(**{'claim.claim_hash__in': repost_hashes})
channel_hashes |= set(filter(None, map(itemgetter('channel_hash'), reposted_txos)))
channel_txos = []
if channel_hashes:
channel_txos = search_claims(**{'claim.claim_hash__in': channel_hashes})
# channels must come first for client side inflation to work properly
return channel_txos + reposted_txos
def protobuf_resolve(urls, **kwargs) -> str:
return ResultOutput.to_base64([resolve_url(raw_url) for raw_url in urls], [])
def resolve(urls, **kwargs) -> Dict[str, Output]:
return {url: resolve_url(url) for url in urls}
#txo_rows = [resolve_url(raw_url) for raw_url in urls]
#extra_txo_rows = _get_referenced_rows(
# [txo for txo in txo_rows if isinstance(txo, dict)],
# [txo.censor_hash for txo in txo_rows if isinstance(txo, ResolveCensoredError)]
#)
#return txo_rows, extra_txo_rows
def resolve_url(raw_url):
censor = context().get_resolve_censor()
try:
url = URL.parse(raw_url)
except ValueError as e:
return e
channel = None
if url.has_channel:
q = url.channel.to_dict()
if set(q) == {'name'}:
q['is_controlling'] = True
else:
q['order_by'] = ['^creation_height']
#matches = search_claims(censor, **q, limit=1)
matches = search_claims(**q, limit=1)[0]
if matches:
channel = matches[0]
elif censor.censored:
return ResolveCensoredError(raw_url, next(iter(censor.censored)))
else:
return LookupError(f'Could not find channel in "{raw_url}".')
if url.has_stream:
q = url.stream.to_dict()
if channel is not None:
q['order_by'] = ['^creation_height']
q['channel_hash'] = channel.claim_hash
q['is_signature_valid'] = True
elif set(q) == {'name'}:
q['is_controlling'] = True
# matches = search_claims(censor, **q, limit=1)
matches = search_claims(**q, limit=1)[0]
if matches:
return matches[0]
elif censor.censored:
return ResolveCensoredError(raw_url, next(iter(censor.censored)))
else:
return LookupError(f'Could not find claim at "{raw_url}".')
return channel

371
lbry/db/queries/search.py Normal file
View file

@ -0,0 +1,371 @@
import struct
import logging
from decimal import Decimal
from binascii import unhexlify
from typing import Tuple, List, Optional
from sqlalchemy import func, case
from sqlalchemy.future import select, Select
from lbry.schema.tags import clean_tags
from lbry.schema.result import Censor, Outputs as ResultOutput
from lbry.schema.url import normalize_name
from lbry.blockchain.transaction import Output
from ..utils import query
from ..query_context import context
from ..tables import TX, TXO, Claim, Support
from ..constants import (
TXO_TYPES, STREAM_TYPES, ATTRIBUTE_ARRAY_MAX_LENGTH,
SEARCH_INTEGER_PARAMS, SEARCH_ORDER_FIELDS
)
from .txio import BASE_SELECT_TXO_COLUMNS, rows_to_txos
log = logging.getLogger(__name__)
BASE_SELECT_SUPPORT_COLUMNS = BASE_SELECT_TXO_COLUMNS + [
Support.c.channel_hash,
Support.c.is_signature_valid,
]
def select_supports(cols: List = None, **constraints) -> Select:
if cols is None:
cols = BASE_SELECT_SUPPORT_COLUMNS
joins = Support.join(TXO, ).join(TX)
return query([Support], select(*cols).select_from(joins), **constraints)
def search_supports(**constraints) -> Tuple[List[Output], Optional[int]]:
total = None
if constraints.pop('include_total', False):
total = search_support_count(**constraints)
if 'claim_id' in constraints:
constraints['claim_hash'] = unhexlify(constraints.pop('claim_id'))[::-1]
rows = context().fetchall(select_supports(**constraints))
txos = rows_to_txos(rows, include_tx=False)
return txos, total
def search_support_count(**constraints) -> int:
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
count = context().fetchall(select_supports([func.count().label('total')], **constraints))
return count[0]['total'] or 0
channel_claim = Claim.alias('channel')
BASE_SELECT_CLAIM_COLUMNS = BASE_SELECT_TXO_COLUMNS + [
Claim.c.activation_height,
Claim.c.takeover_height,
Claim.c.creation_height,
Claim.c.is_controlling,
Claim.c.channel_hash,
Claim.c.reposted_count,
Claim.c.reposted_claim_hash,
Claim.c.short_url,
Claim.c.signed_claim_count,
Claim.c.signed_support_count,
(Claim.c.amount + Claim.c.staked_support_amount).label('staked_amount'),
Claim.c.staked_support_amount,
Claim.c.staked_support_count,
Claim.c.is_signature_valid,
case([(
channel_claim.c.short_url.isnot(None),
channel_claim.c.short_url + '/' + Claim.c.short_url
)]).label('canonical_url'),
]
def select_claims(cols: List = None, for_count=False, **constraints) -> Select:
if cols is None:
cols = BASE_SELECT_CLAIM_COLUMNS
if 'order_by' in constraints:
order_by_parts = constraints['order_by']
if isinstance(order_by_parts, str):
order_by_parts = [order_by_parts]
sql_order_by = []
for order_by in order_by_parts:
is_asc = order_by.startswith('^')
column = order_by[1:] if is_asc else order_by
if column not in SEARCH_ORDER_FIELDS:
raise NameError(f'{column} is not a valid order_by field')
if column == 'name':
column = 'claim_name'
nulls_last = ''
if column == 'release_time':
nulls_last = ' NULLs LAST'
sql_order_by.append(
f"claim.{column} ASC{nulls_last}" if is_asc else f"claim.{column} DESC{nulls_last}"
)
constraints['order_by'] = sql_order_by
ops = {'<=': '__lte', '>=': '__gte', '<': '__lt', '>': '__gt'}
for constraint in SEARCH_INTEGER_PARAMS:
if constraint in constraints:
value = constraints.pop(constraint)
postfix = ''
if isinstance(value, str):
if len(value) >= 2 and value[:2] in ops:
postfix, value = ops[value[:2]], value[2:]
elif len(value) >= 1 and value[0] in ops:
postfix, value = ops[value[0]], value[1:]
if constraint == 'fee_amount':
value = Decimal(value)*1000
constraints[f'{constraint}{postfix}'] = int(value)
if 'sequence' in constraints:
constraints['order_by'] = 'activation_height ASC'
constraints['offset'] = int(constraints.pop('sequence')) - 1
constraints['limit'] = 1
if 'amount_order' in constraints:
constraints['order_by'] = 'effective_amount DESC'
constraints['offset'] = int(constraints.pop('amount_order')) - 1
constraints['limit'] = 1
if 'claim_id' in constraints:
claim_id = constraints.pop('claim_id')
if len(claim_id) == 40:
constraints['claim_id'] = claim_id
else:
constraints['claim_id__like'] = f'{claim_id[:40]}%'
elif 'claim_ids' in constraints:
constraints['claim_id__in'] = set(constraints.pop('claim_ids'))
if 'reposted_claim_id' in constraints:
constraints['reposted_claim_hash'] = unhexlify(constraints.pop('reposted_claim_id'))[::-1]
if 'name' in constraints:
constraints['normalized'] = normalize_name(constraints.pop('name'))
if 'public_key_id' in constraints:
constraints['public_key_hash'] = (
context().ledger.address_to_hash160(constraints.pop('public_key_id')))
if 'channel_hash' in constraints:
constraints['channel_hash'] = constraints.pop('channel_hash')
if 'channel_ids' in constraints:
channel_ids = constraints.pop('channel_ids')
if channel_ids:
constraints['channel_hash__in'] = {
unhexlify(cid)[::-1] for cid in channel_ids
}
if 'not_channel_ids' in constraints:
not_channel_ids = constraints.pop('not_channel_ids')
if not_channel_ids:
not_channel_ids_binary = {
unhexlify(ncid)[::-1] for ncid in not_channel_ids
}
constraints['claim_hash__not_in#not_channel_ids'] = not_channel_ids_binary
if constraints.get('has_channel_signature', False):
constraints['channel_hash__not_in'] = not_channel_ids_binary
else:
constraints['null_or_not_channel__or'] = {
'signature_valid__is_null': True,
'channel_hash__not_in': not_channel_ids_binary
}
if 'signature_valid' in constraints:
has_channel_signature = constraints.pop('has_channel_signature', False)
if has_channel_signature:
constraints['signature_valid'] = constraints.pop('signature_valid')
else:
constraints['null_or_signature__or'] = {
'signature_valid__is_null': True,
'signature_valid': constraints.pop('signature_valid')
}
elif constraints.pop('has_channel_signature', False):
constraints['signature_valid__is_not_null'] = True
if 'txid' in constraints:
tx_hash = unhexlify(constraints.pop('txid'))[::-1]
nout = constraints.pop('nout', 0)
constraints['txo_hash'] = tx_hash + struct.pack('<I', nout)
if 'claim_type' in constraints:
claim_types = constraints.pop('claim_type')
if isinstance(claim_types, str):
claim_types = [claim_types]
if claim_types:
constraints['claim_type__in'] = {
TXO_TYPES[claim_type] for claim_type in claim_types
}
if 'stream_types' in constraints:
stream_types = constraints.pop('stream_types')
if stream_types:
constraints['stream_type__in'] = {
STREAM_TYPES[stream_type] for stream_type in stream_types
}
if 'media_types' in constraints:
media_types = constraints.pop('media_types')
if media_types:
constraints['media_type__in'] = set(media_types)
if 'fee_currency' in constraints:
constraints['fee_currency'] = constraints.pop('fee_currency').lower()
_apply_constraints_for_array_attributes(constraints, 'tag', clean_tags, for_count)
_apply_constraints_for_array_attributes(constraints, 'language', lambda _: _, for_count)
_apply_constraints_for_array_attributes(constraints, 'location', lambda _: _, for_count)
if 'text' in constraints:
# TODO: fix
constraints["search"] = constraints.pop("text")
return query(
[Claim],
select(*cols)
.select_from(
Claim.join(TXO).join(TX)
.join(channel_claim, Claim.c.channel_hash == channel_claim.c.claim_hash, isouter=True)
), **constraints
)
def protobuf_search_claims(**constraints) -> str:
txos, _, censor = search_claims(**constraints)
return ResultOutput.to_base64(txos, [], blocked=censor)
def search_claims(**constraints) -> Tuple[List[Output], Optional[int], Optional[Censor]]:
total = None
if constraints.pop('include_total', False):
total = search_claim_count(**constraints)
constraints['offset'] = abs(constraints.get('offset', 0))
constraints['limit'] = min(abs(constraints.get('limit', 10)), 50)
ctx = context()
search_censor = ctx.get_search_censor()
rows = context().fetchall(select_claims(**constraints))
txos = rows_to_txos(rows, include_tx=False)
return txos, total, search_censor
def search_claim_count(**constraints) -> int:
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
count = context().fetchall(select_claims([func.count().label('total')], **constraints))
return count[0]['total'] or 0
CLAIM_HASH_OR_REPOST_HASH_SQL = f"""
CASE WHEN claim.claim_type = {TXO_TYPES['repost']}
THEN claim.reposted_claim_hash
ELSE claim.claim_hash
END
"""
def _apply_constraints_for_array_attributes(constraints, attr, cleaner, for_count=False):
any_items = set(cleaner(constraints.pop(f'any_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
all_items = set(cleaner(constraints.pop(f'all_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
not_items = set(cleaner(constraints.pop(f'not_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
all_items = {item for item in all_items if item not in not_items}
any_items = {item for item in any_items if item not in not_items}
any_queries = {}
# if attr == 'tag':
# common_tags = any_items & COMMON_TAGS.keys()
# if common_tags:
# any_items -= common_tags
# if len(common_tags) < 5:
# for item in common_tags:
# index_name = COMMON_TAGS[item]
# any_queries[f'#_common_tag_{index_name}'] = f"""
# EXISTS(
# SELECT 1 FROM tag INDEXED BY tag_{index_name}_idx
# WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=tag.claim_hash
# AND tag = '{item}'
# )
# """
# elif len(common_tags) >= 5:
# constraints.update({
# f'$any_common_tag{i}': item for i, item in enumerate(common_tags)
# })
# values = ', '.join(
# f':$any_common_tag{i}' for i in range(len(common_tags))
# )
# any_queries[f'#_any_common_tags'] = f"""
# EXISTS(
# SELECT 1 FROM tag WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=tag.claim_hash
# AND tag IN ({values})
# )
# """
if any_items:
constraints.update({
f'$any_{attr}{i}': item for i, item in enumerate(any_items)
})
values = ', '.join(
f':$any_{attr}{i}' for i in range(len(any_items))
)
if for_count or attr == 'tag':
any_queries[f'#_any_{attr}'] = f"""
{CLAIM_HASH_OR_REPOST_HASH_SQL} IN (
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
)
"""
else:
any_queries[f'#_any_{attr}'] = f"""
EXISTS(
SELECT 1 FROM {attr} WHERE
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
AND {attr} IN ({values})
)
"""
if len(any_queries) == 1:
constraints.update(any_queries)
elif len(any_queries) > 1:
constraints[f'ORed_{attr}_queries__any'] = any_queries
if all_items:
constraints[f'$all_{attr}_count'] = len(all_items)
constraints.update({
f'$all_{attr}{i}': item for i, item in enumerate(all_items)
})
values = ', '.join(
f':$all_{attr}{i}' for i in range(len(all_items))
)
if for_count:
constraints[f'#_all_{attr}'] = f"""
{CLAIM_HASH_OR_REPOST_HASH_SQL} IN (
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
GROUP BY claim_hash HAVING COUNT({attr}) = :$all_{attr}_count
)
"""
else:
constraints[f'#_all_{attr}'] = f"""
{len(all_items)}=(
SELECT count(*) FROM {attr} WHERE
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
AND {attr} IN ({values})
)
"""
if not_items:
constraints.update({
f'$not_{attr}{i}': item for i, item in enumerate(not_items)
})
values = ', '.join(
f':$not_{attr}{i}' for i in range(len(not_items))
)
if for_count:
constraints[f'#_not_{attr}'] = f"""
{CLAIM_HASH_OR_REPOST_HASH_SQL} NOT IN (
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
)
"""
else:
constraints[f'#_not_{attr}'] = f"""
NOT EXISTS(
SELECT 1 FROM {attr} WHERE
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
AND {attr} IN ({values})
)
"""

578
lbry/db/queries/txio.py Normal file
View file

@ -0,0 +1,578 @@
import logging
from datetime import date
from typing import Tuple, List, Optional, Union
from sqlalchemy import union, func, text, between, distinct
from sqlalchemy.future import select, Select
from ...blockchain.transaction import (
Transaction, Output, OutputScript, TXRefImmutable
)
from ..tables import (
TX, TXO, TXI, txi_join_account, txo_join_account,
Claim, Support, AccountAddress
)
from ..utils import query, in_account_ids
from ..query_context import context
from ..constants import TXO_TYPES, CLAIM_TYPE_CODES, MAX_QUERY_VARIABLES
log = logging.getLogger(__name__)
minimum_txo_columns = (
TXO.c.amount, TXO.c.position.label('txo_position'),
TX.c.tx_hash, TX.c.height, TX.c.timestamp,
func.substr(TX.c.raw, TXO.c.script_offset + 1, TXO.c.script_length).label('src'),
)
def row_to_txo(row):
return Output(
amount=row.amount,
script=OutputScript(row.src),
tx_ref=TXRefImmutable.from_hash(row.tx_hash, row.height, row.timestamp),
position=row.txo_position,
)
def where_txo_type_in(txo_type: Optional[Union[tuple, int]] = None):
if txo_type is not None:
if isinstance(txo_type, int):
return TXO.c.txo_type == txo_type
assert len(txo_type) > 0
if len(txo_type) == 1:
return TXO.c.txo_type == txo_type[0]
else:
return TXO.c.txo_type.in_(txo_type)
return TXO.c.txo_type.in_(CLAIM_TYPE_CODES)
def where_unspent_txos(
txo_types: Tuple[int, ...],
blocks: Tuple[int, int] = None,
missing_in_supports_table: bool = False,
missing_in_claims_table: bool = False,
missing_or_stale_in_claims_table: bool = False,
):
condition = where_txo_type_in(txo_types) & (TXO.c.spent_height == 0)
if blocks is not None:
condition &= between(TXO.c.height, *blocks)
if missing_in_supports_table:
condition &= TXO.c.txo_hash.notin_(select(Support.c.txo_hash))
elif missing_or_stale_in_claims_table:
condition &= TXO.c.txo_hash.notin_(select(Claim.c.txo_hash))
elif missing_in_claims_table:
condition &= TXO.c.claim_hash.notin_(select(Claim.c.claim_hash))
return condition
def where_abandoned_claims():
return Claim.c.claim_hash.notin_(
select(TXO.c.claim_hash).where(where_unspent_txos(CLAIM_TYPE_CODES))
)
def count_abandoned_claims():
return context().fetchtotal(where_abandoned_claims())
def where_abandoned_supports():
return Support.c.txo_hash.notin_(
select(TXO.c.txo_hash).where(where_unspent_txos(TXO_TYPES['support']))
)
def count_abandoned_supports():
return context().fetchtotal(where_abandoned_supports())
def count_unspent_txos(
txo_types: Tuple[int, ...],
blocks: Tuple[int, int] = None,
missing_in_supports_table: bool = False,
missing_in_claims_table: bool = False,
missing_or_stale_in_claims_table: bool = False,
):
return context().fetchtotal(
where_unspent_txos(
txo_types, blocks,
missing_in_supports_table,
missing_in_claims_table,
missing_or_stale_in_claims_table,
)
)
def distribute_unspent_txos(
txo_types: Tuple[int, ...],
blocks: Tuple[int, int] = None,
missing_in_supports_table: bool = False,
missing_in_claims_table: bool = False,
missing_or_stale_in_claims_table: bool = False,
number_of_buckets: int = 10
) -> Tuple[int, List[Tuple[int, int]]]:
chunks = (
select(func.ntile(number_of_buckets).over(order_by=TXO.c.height).label('chunk'), TXO.c.height)
.where(
where_unspent_txos(
txo_types, blocks,
missing_in_supports_table,
missing_in_claims_table,
missing_or_stale_in_claims_table,
)
).cte('chunks')
)
sql = (
select(
func.count('*').label('items'),
func.min(chunks.c.height).label('start_height'),
func.max(chunks.c.height).label('end_height'),
).group_by(chunks.c.chunk).order_by(chunks.c.chunk)
)
total = 0
buckets = []
for bucket in context().fetchall(sql):
total += bucket['items']
if len(buckets) > 0:
if buckets[-1][-1] == bucket['start_height']:
if bucket['start_height'] == bucket['end_height']:
continue
bucket['start_height'] += 1
buckets.append((bucket['start_height'], bucket['end_height']))
return total, buckets
def where_changed_support_txos(blocks: Optional[Tuple[int, int]]):
return (
(TXO.c.txo_type == TXO_TYPES['support']) & (
between(TXO.c.height, blocks[0], blocks[-1]) |
between(TXO.c.spent_height, blocks[0], blocks[-1])
)
)
def where_claims_with_changed_supports(blocks: Optional[Tuple[int, int]]):
return Claim.c.claim_hash.in_(
select(TXO.c.claim_hash).where(
where_changed_support_txos(blocks)
)
)
def count_claims_with_changed_supports(blocks: Optional[Tuple[int, int]]) -> int:
sql = (
select(func.count(distinct(TXO.c.claim_hash)).label('total'))
.where(where_changed_support_txos(blocks))
)
return context().fetchone(sql)['total']
def where_changed_content_txos(blocks: Optional[Tuple[int, int]]):
return (
(TXO.c.channel_hash.isnot(None)) & (
between(TXO.c.height, blocks[0], blocks[-1]) |
between(TXO.c.spent_height, blocks[0], blocks[-1])
)
)
def where_channels_with_changed_content(blocks: Optional[Tuple[int, int]]):
return Claim.c.claim_hash.in_(
select(TXO.c.channel_hash).where(
where_changed_content_txos(blocks)
)
)
def count_channels_with_changed_content(blocks: Optional[Tuple[int, int]]):
sql = (
select(func.count(distinct(TXO.c.channel_hash)).label('total'))
.where(where_changed_content_txos(blocks))
)
return context().fetchone(sql)['total']
def select_transactions(cols, account_ids=None, **constraints):
s: Select = select(*cols).select_from(TX)
if not {'tx_hash', 'tx_hash__in'}.intersection(constraints):
assert account_ids, (
"'accounts' argument required when "
"no 'tx_hash' constraint is present"
)
where = in_account_ids(account_ids)
tx_hashes = union(
select(TXO.c.tx_hash).select_from(txo_join_account).where(where),
select(TXI.c.tx_hash).select_from(txi_join_account).where(where)
)
s = s.where(TX.c.tx_hash.in_(tx_hashes))
return context().fetchall(query([TX], s, **constraints))
TXO_NOT_MINE = Output(None, None, is_my_output=False)
def get_raw_transactions(tx_hashes):
return context().fetchall(
select(TX.c.tx_hash, TX.c.raw).where(TX.c.tx_hash.in_(tx_hashes))
)
def get_transactions(**constraints) -> Tuple[List[Transaction], Optional[int]]:
txs = []
sql = select(TX.c.raw, TX.c.height, TX.c.position).select_from(TX)
rows = context().fetchall(query([TX], sql, **constraints))
for row in rows:
txs.append(Transaction(row['raw'], height=row['height'], position=row['position']))
return txs, 0
def _get_transactions(
wallet=None, include_total=False, **constraints
) -> Tuple[List[Transaction], Optional[int]]:
include_is_my_input = constraints.pop('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
tx_rows = select_transactions(
[TX.c.tx_hash, TX.c.raw, TX.c.height, TX.c.position, TX.c.is_verified],
order_by=constraints.pop('order_by', ["height=0 DESC", "height DESC", "position DESC"]),
**constraints
)
txids, txs, txi_txoids = [], [], []
for row in tx_rows:
txids.append(row['tx_hash'])
txs.append(Transaction(
raw=row['raw'], height=row['height'], position=row['position'],
is_verified=bool(row['is_verified'])
))
for txi in txs[-1].inputs:
txi_txoids.append(txi.txo_ref.hash)
annotated_txos = {}
for offset in range(0, len(txids), MAX_QUERY_VARIABLES):
annotated_txos.update({
txo.id: txo for txo in
get_txos(
wallet=wallet,
tx_hash__in=txids[offset:offset + MAX_QUERY_VARIABLES], order_by='txo.tx_hash',
include_is_my_input=include_is_my_input,
include_is_my_output=include_is_my_output,
)[0]
})
referenced_txos = {}
for offset in range(0, len(txi_txoids), MAX_QUERY_VARIABLES):
referenced_txos.update({
txo.id: txo for txo in
get_txos(
wallet=wallet,
txo_hash__in=txi_txoids[offset:offset + MAX_QUERY_VARIABLES], order_by='txo.txo_hash',
include_is_my_output=include_is_my_output,
)[0]
})
for tx in txs:
for txi in tx.inputs:
txo = referenced_txos.get(txi.txo_ref.id)
if txo:
txi.txo_ref = txo.ref
for txo in tx.outputs:
_txo = annotated_txos.get(txo.id)
if _txo:
txo.update_annotations(_txo)
else:
txo.update_annotations(TXO_NOT_MINE)
for tx in txs:
txos = tx.outputs
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
txos[0].purchase = txos[1]
return txs, get_transaction_count(**constraints) if include_total else None
def get_transaction_count(**constraints):
constraints.pop('wallet', None)
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
count = select_transactions([func.count().label('total')], **constraints)
return count[0]['total'] or 0
BASE_SELECT_TXO_COLUMNS = [
TX.c.tx_hash, TX.c.raw, TX.c.height, TX.c.position.label('tx_position'),
TX.c.is_verified, TX.c.timestamp,
TXO.c.txo_type, TXO.c.position.label('txo_position'), TXO.c.amount, TXO.c.spent_height,
TXO.c.script_offset, TXO.c.script_length,
]
def select_txos(
cols=None, account_ids=None, is_my_input=None,
is_my_output=True, is_my_input_or_output=None, exclude_internal_transfers=False,
include_is_my_input=False, claim_id_not_in_claim_table=None,
txo_id_not_in_claim_table=None, txo_id_not_in_support_table=None,
**constraints
) -> Select:
if cols is None:
cols = BASE_SELECT_TXO_COLUMNS
s: Select = select(*cols)
if account_ids:
my_addresses = select(AccountAddress.c.address).where(in_account_ids(account_ids))
if is_my_input_or_output:
include_is_my_input = True
s = s.where(
TXO.c.address.in_(my_addresses) | (
(TXI.c.address.isnot(None)) &
(TXI.c.address.in_(my_addresses))
)
)
else:
if is_my_output:
s = s.where(TXO.c.address.in_(my_addresses))
elif is_my_output is False:
s = s.where(TXO.c.address.notin_(my_addresses))
if is_my_input:
include_is_my_input = True
s = s.where(
(TXI.c.address.isnot(None)) &
(TXI.c.address.in_(my_addresses))
)
elif is_my_input is False:
include_is_my_input = True
s = s.where(
(TXI.c.address.is_(None)) |
(TXI.c.address.notin_(my_addresses))
)
if exclude_internal_transfers:
include_is_my_input = True
s = s.where(
(TXO.c.txo_type != TXO_TYPES['other']) |
(TXO.c.address.notin_(my_addresses))
(TXI.c.address.is_(None)) |
(TXI.c.address.notin_(my_addresses))
)
joins = TXO.join(TX)
#if constraints.get('is_spent', None) is False:
# s = s.where((TXO.c.is_spent == False) & (TXO.c.is_reserved == False))
if include_is_my_input:
joins = joins.join(TXI, (TXI.c.position == 0) & (TXI.c.tx_hash == TXO.c.tx_hash), isouter=True)
if claim_id_not_in_claim_table:
s = s.where(TXO.c.claim_hash.notin_(select(Claim.c.claim_hash)))
elif txo_id_not_in_claim_table:
s = s.where(TXO.c.txo_hash.notin_(select(Claim.c.txo_hash)))
elif txo_id_not_in_support_table:
s = s.where(TXO.c.txo_hash.notin_(select(Support.c.txo_hash)))
return query([TXO, TX], s.select_from(joins), **constraints)
META_ATTRS = (
'activation_height', 'takeover_height', 'creation_height', 'staked_amount',
'short_url', 'canonical_url', 'staked_support_amount', 'staked_support_count',
'signed_claim_count', 'signed_support_count', 'is_signature_valid',
'reposted_count',
)
def rows_to_txos(rows: List[dict], include_tx=True) -> List[Output]:
txos = []
tx_cache = {}
for row in rows:
if include_tx:
if row['tx_hash'] not in tx_cache:
tx_cache[row['tx_hash']] = Transaction(
row['raw'], height=row['height'], position=row['tx_position'],
timestamp=row['timestamp'],
is_verified=bool(row['is_verified']),
)
txo = tx_cache[row['tx_hash']].outputs[row['txo_position']]
else:
source = row['raw'][row['script_offset']:row['script_offset']+row['script_length']]
txo = Output(
amount=row['amount'],
script=OutputScript(source),
tx_ref=TXRefImmutable.from_hash(row['tx_hash'], row['height'], row['timestamp']),
position=row['txo_position'],
)
txo.spent_height = bool(row['spent_height'])
if 'is_my_input' in row:
txo.is_my_input = bool(row['is_my_input'])
if 'is_my_output' in row:
txo.is_my_output = bool(row['is_my_output'])
if 'is_my_input' in row and 'is_my_output' in row:
if txo.is_my_input and txo.is_my_output and row['txo_type'] == TXO_TYPES['other']:
txo.is_internal_transfer = True
else:
txo.is_internal_transfer = False
if 'received_tips' in row:
txo.received_tips = row['received_tips']
for attr in META_ATTRS:
if attr in row:
txo.meta[attr] = row[attr]
txos.append(txo)
return txos
def get_txos(no_tx=False, include_total=False, **constraints) -> Tuple[List[Output], Optional[int]]:
wallet_account_ids = constraints.pop('wallet_account_ids', [])
include_is_my_input = constraints.get('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
include_received_tips = constraints.pop('include_received_tips', False)
select_columns = BASE_SELECT_TXO_COLUMNS + [
TXO.c.claim_name
]
my_accounts = None
if wallet_account_ids:
my_accounts = select(AccountAddress.c.address).where(in_account_ids(wallet_account_ids))
if include_is_my_output and my_accounts is not None:
if constraints.get('is_my_output', None) in (True, False):
select_columns.append(text(f"{1 if constraints['is_my_output'] else 0} AS is_my_output"))
else:
select_columns.append(TXO.c.address.in_(my_accounts).label('is_my_output'))
if include_is_my_input and my_accounts is not None:
if constraints.get('is_my_input', None) in (True, False):
select_columns.append(text(f"{1 if constraints['is_my_input'] else 0} AS is_my_input"))
else:
select_columns.append((
(TXI.c.address.isnot(None)) &
(TXI.c.address.in_(my_accounts))
).label('is_my_input'))
if include_received_tips:
support = TXO.alias('support')
select_columns.append(
select(func.coalesce(func.sum(support.c.amount), 0))
.select_from(support).where(
(support.c.claim_hash == TXO.c.claim_hash) &
(support.c.txo_type == TXO_TYPES['support']) &
(support.c.address.in_(my_accounts)) &
(support.c.txo_hash.notin_(select(TXI.c.txo_hash)))
).label('received_tips')
)
if 'order_by' not in constraints or constraints['order_by'] == 'height':
constraints['order_by'] = [
"tx.height=0 DESC", "tx.height DESC", "tx.position DESC", "txo.position"
]
elif constraints.get('order_by', None) == 'none':
del constraints['order_by']
rows = context().fetchall(select_txos(select_columns, **constraints))
txos = rows_to_txos(rows, not no_tx)
channel_hashes = set()
for txo in txos:
if txo.is_claim and txo.can_decode_claim:
if txo.claim.is_signed:
channel_hashes.add(txo.claim.signing_channel_hash)
if channel_hashes:
channels = {
txo.claim_hash: txo for txo in
get_txos(
txo_type=TXO_TYPES['channel'], spent_height=0,
wallet_account_ids=wallet_account_ids, claim_hash__in=channel_hashes
)[0]
}
for txo in txos:
if txo.is_claim and txo.can_decode_claim:
txo.channel = channels.get(txo.claim.signing_channel_hash, None)
return txos, get_txo_count(**constraints) if include_total else None
def _clean_txo_constraints_for_aggregation(constraints):
constraints.pop('include_is_my_input', None)
constraints.pop('include_is_my_output', None)
constraints.pop('include_received_tips', None)
constraints.pop('wallet_account_ids', None)
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
def get_txo_count(**constraints):
_clean_txo_constraints_for_aggregation(constraints)
count = context().fetchall(select_txos([func.count().label('total')], **constraints))
return count[0]['total'] or 0
def get_txo_sum(**constraints):
_clean_txo_constraints_for_aggregation(constraints)
result = context().fetchall(select_txos([func.sum(TXO.c.amount).label('total')], **constraints))
return result[0]['total'] or 0
def get_balance(**constraints):
return get_txo_sum(spent_height=0, **constraints)
def get_report(account_ids):
return
def get_txo_plot(start_day=None, days_back=0, end_day=None, days_after=None, **constraints):
_clean_txo_constraints_for_aggregation(constraints)
if start_day is None:
# TODO: Fix
current_ordinal = 0 # self.ledger.headers.estimated_date(self.ledger.headers.height).toordinal()
constraints['day__gte'] = current_ordinal - days_back
else:
constraints['day__gte'] = date.fromisoformat(start_day).toordinal()
if end_day is not None:
constraints['day__lte'] = date.fromisoformat(end_day).toordinal()
elif days_after is not None:
constraints['day__lte'] = constraints['day__gte'] + days_after
plot = context().fetchall(select_txos(
[TX.c.day, func.sum(TXO.c.amount).label('total')],
group_by='day', order_by='day', **constraints
))
for row in plot:
row['day'] = date.fromordinal(row['day'])
return plot
def get_purchases(**constraints) -> Tuple[List[Output], Optional[int]]:
accounts = constraints.pop('accounts', None)
assert accounts, "'accounts' argument required to find purchases"
if not {'purchased_claim_hash', 'purchased_claim_hash__in'}.intersection(constraints):
constraints['purchased_claim_hash__is_not_null'] = True
constraints['tx_hash__in'] = (
select(TXI.c.tx_hash).select_from(txi_join_account).where(in_account_ids(accounts))
)
txs, count = get_transactions(**constraints)
return [tx.outputs[0] for tx in txs], count
def get_supports_summary(self, **constraints):
return get_txos(
txo_type=TXO_TYPES['support'],
spent_height=0, is_my_output=True,
include_is_my_input=True,
no_tx=True,
**constraints
)
def reserve_outputs(txo_hashes, is_reserved=True):
context().execute(
TXO.update()
.values(is_reserved=is_reserved)
.where(TXO.c.txo_hash.in_(txo_hashes))
)
def release_all_outputs(account_id):
context().execute(
TXO.update().values(is_reserved=False).where(
TXO.c.is_reserved & TXO.c.address.in_(
select(AccountAddress.c.address).where(in_account_ids(account_id))
)
)
)

694
lbry/db/query_context.py Normal file
View file

@ -0,0 +1,694 @@
import os
import time
import traceback
import functools
from io import BytesIO
import multiprocessing as mp
from decimal import Decimal
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass, field
from contextvars import ContextVar
from sqlalchemy import create_engine, inspect, bindparam, func, exists, event as sqlalchemy_event
from sqlalchemy.future import select
from sqlalchemy.engine import Engine
from sqlalchemy.sql import Insert
try:
from pgcopy import CopyManager
except ImportError:
CopyManager = None
from lbry.event import EventQueuePublisher
from lbry.blockchain.ledger import Ledger
from lbry.blockchain.transaction import Transaction, Output, Input
from lbry.schema.tags import clean_tags
from lbry.schema.result import Censor
from lbry.schema.mime_types import guess_stream_type
from .utils import pg_insert
from .tables import Block, TX, TXO, TXI, Claim, Tag, Support
from .constants import TXO_TYPES, STREAM_TYPES
_context: ContextVar['QueryContext'] = ContextVar('_context')
@dataclass
class QueryContext:
engine: Engine
ledger: Ledger
message_queue: mp.Queue
stop_event: mp.Event
stack: List[List]
metrics: Dict
is_tracking_metrics: bool
blocked_streams: Dict
blocked_channels: Dict
filtered_streams: Dict
filtered_channels: Dict
pid: int
# QueryContext __enter__/__exit__ state
current_timer_name: Optional[str] = None
current_timer_time: float = 0
current_progress: Optional['ProgressContext'] = None
copy_managers: Dict[str, CopyManager] = field(default_factory=dict)
@property
def is_postgres(self):
return self.engine.dialect.name == 'postgresql'
@property
def is_sqlite(self):
return self.engine.dialect.name == 'sqlite'
def raise_unsupported_dialect(self):
raise RuntimeError(f'Unsupported database dialect: {self.engine.dialect.name}.')
def get_resolve_censor(self) -> Censor:
return Censor(self.blocked_streams, self.blocked_channels)
def get_search_censor(self) -> Censor:
return Censor(self.filtered_streams, self.filtered_channels)
def pg_copy(self, table, rows):
with self.engine.begin() as c:
copy_manager = self.copy_managers.get(table.name)
if copy_manager is None:
self.copy_managers[table.name] = copy_manager = CopyManager(
c.connection, table.name, rows[0].keys()
)
copy_manager.conn = c.connection
copy_manager.copy(map(dict.values, rows), BytesIO)
copy_manager.conn = None
def connect_without_transaction(self):
return self.engine.connect().execution_options(isolation_level="AUTOCOMMIT")
def connect_streaming(self):
return self.engine.connect().execution_options(stream_results=True)
def execute_notx(self, sql, *args):
with self.connect_without_transaction() as c:
return c.execute(sql, *args)
def execute(self, sql, *args):
with self.engine.begin() as c:
return c.execute(sql, *args)
def fetchone(self, sql, *args):
with self.engine.begin() as c:
row = c.execute(sql, *args).fetchone()
return dict(row._mapping) if row else row
def fetchall(self, sql, *args):
with self.engine.begin() as c:
rows = c.execute(sql, *args).fetchall()
return [dict(row._mapping) for row in rows]
def fetchtotal(self, condition) -> int:
sql = select(func.count('*').label('total')).where(condition)
return self.fetchone(sql)['total']
def fetchmax(self, column, default: int) -> int:
sql = select(func.coalesce(func.max(column), default).label('max_result'))
return self.fetchone(sql)['max_result']
def has_records(self, table) -> bool:
sql = select(exists([1], from_obj=table).label('result'))
return bool(self.fetchone(sql)['result'])
def insert_or_ignore(self, table):
if self.is_sqlite:
return table.insert().prefix_with("OR IGNORE")
elif self.is_postgres:
return pg_insert(table).on_conflict_do_nothing()
else:
self.raise_unsupported_dialect()
def insert_or_replace(self, table, replace):
if self.is_sqlite:
return table.insert().prefix_with("OR REPLACE")
elif self.is_postgres:
insert = pg_insert(table)
return insert.on_conflict_do_update(
table.primary_key, set_={col: getattr(insert.excluded, col) for col in replace}
)
else:
self.raise_unsupported_dialect()
def has_table(self, table):
return inspect(self.engine).has_table(table)
def get_bulk_loader(self) -> 'BulkLoader':
return BulkLoader(self)
def reset_metrics(self):
self.stack = []
self.metrics = {}
def with_timer(self, timer_name: str) -> 'QueryContext':
self.current_timer_name = timer_name
return self
@property
def elapsed(self):
return time.perf_counter() - self.current_timer_time
def __enter__(self) -> 'QueryContext':
self.current_timer_time = time.perf_counter()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.current_timer_name = None
self.current_timer_time = 0
self.current_progress = None
def context(with_timer: str = None) -> 'QueryContext':
if isinstance(with_timer, str):
return _context.get().with_timer(with_timer)
return _context.get()
def set_postgres_settings(connection, _):
cursor = connection.cursor()
cursor.execute('SET work_mem="500MB";')
cursor.execute('COMMIT;')
cursor.close()
def set_sqlite_settings(connection, _):
connection.isolation_level = None
cursor = connection.cursor()
cursor.execute('PRAGMA journal_mode=WAL;')
cursor.close()
def do_sqlite_begin(connection):
# see: https://bit.ly/3j4vvXm
connection.exec_driver_sql("BEGIN")
def initialize(
ledger: Ledger, message_queue: mp.Queue, stop_event: mp.Event,
track_metrics=False, block_and_filter=None):
url = ledger.conf.db_url_or_default
engine = create_engine(url)
if engine.name == "postgresql":
sqlalchemy_event.listen(engine, "connect", set_postgres_settings)
elif engine.name == "sqlite":
sqlalchemy_event.listen(engine, "connect", set_sqlite_settings)
sqlalchemy_event.listen(engine, "begin", do_sqlite_begin)
if block_and_filter is not None:
blocked_streams, blocked_channels, filtered_streams, filtered_channels = block_and_filter
else:
blocked_streams = blocked_channels = filtered_streams = filtered_channels = {}
_context.set(
QueryContext(
pid=os.getpid(), engine=engine,
ledger=ledger, message_queue=message_queue, stop_event=stop_event,
stack=[], metrics={}, is_tracking_metrics=track_metrics,
blocked_streams=blocked_streams, blocked_channels=blocked_channels,
filtered_streams=filtered_streams, filtered_channels=filtered_channels,
)
)
def uninitialize():
ctx = _context.get(None)
if ctx is not None:
ctx.engine.dispose()
_context.set(None)
class Event:
_events: List['Event'] = []
__slots__ = 'id', 'name', 'units'
def __init__(self, name: str, units: Tuple[str]):
self.id = None
self.name = name
self.units = units
@classmethod
def get_by_id(cls, event_id) -> 'Event':
return cls._events[event_id]
@classmethod
def get_by_name(cls, name) -> 'Event':
for event in cls._events:
if event.name == name:
return event
@classmethod
def add(cls, name: str, *units: str) -> 'Event':
assert cls.get_by_name(name) is None, f"Event {name} already exists."
assert name.count('.') == 3, f"Event {name} does not follow pattern of: [module].sync.[phase].[task]"
event = cls(name, units)
cls._events.append(event)
event.id = cls._events.index(event)
return event
def event_emitter(name: str, *units: str, throttle=1):
event = Event.add(name, *units)
def wrapper(f):
@functools.wraps(f)
def with_progress(*args, **kwargs):
with progress(event, throttle=throttle) as p:
try:
return f(*args, **kwargs, p=p)
except BreakProgress:
raise
except:
traceback.print_exc()
raise
return with_progress
return wrapper
class ProgressPublisher(EventQueuePublisher):
def message_to_event(self, message):
total, extra = None, None
if len(message) == 3:
event_id, progress_id, done = message
elif len(message) == 5:
event_id, progress_id, done, total, extra = message
else:
raise TypeError("progress message must be tuple of 3 or 5 values.")
event = Event.get_by_id(event_id)
d = {
"event": event.name,
"data": {"id": progress_id, "done": done}
}
if total is not None:
d['data']['total'] = total
d['data']['units'] = event.units
if isinstance(extra, dict):
d['data'].update(extra)
return d
class BreakProgress(Exception):
"""Break out of progress when total is 0."""
class Progress:
def __init__(self, message_queue: mp.Queue, event: Event, throttle=1):
self.message_queue = message_queue
self.event = event
self.progress_id = 0
self.throttle = throttle
self.last_done = (0,)*len(event.units)
self.last_done_queued = (0,)*len(event.units)
self.totals = (0,)*len(event.units)
def __enter__(self) -> 'Progress':
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.last_done != self.last_done_queued:
self.message_queue.put((self.event.id, self.progress_id, self.last_done))
self.last_done_queued = self.last_done
if exc_type == BreakProgress:
return True
if self.last_done != self.totals: # or exc_type is not None:
# TODO: add exception info into closing message if there is any
self.message_queue.put((
self.event.id, self.progress_id, (-1,)*len(self.event.units)
))
def start(self, *totals: int, progress_id=0, label=None, extra=None):
assert len(totals) == len(self.event.units), \
f"Totals {totals} do not match up with units {self.event.units}."
if not any(totals):
raise BreakProgress
self.totals = totals
self.progress_id = progress_id
extra = {} if extra is None else extra.copy()
if label is not None:
extra['label'] = label
self.step(*((0,)*len(totals)), force=True, extra=extra)
def step(self, *done: int, force=False, extra=None):
if done == ():
assert len(self.totals) == 1, "Incrementing step() only works with one unit progress."
done = (self.last_done[0]+1,)
assert len(done) == len(self.totals), \
f"Done elements {done} don't match total elements {self.totals}."
self.last_done = done
send_condition = force or extra is not None or (
# throttle rate of events being generated (only throttles first unit value)
(self.throttle == 1 or done[0] % self.throttle == 0) and
# deduplicate finish event by not sending a step where done == total
any(i < j for i, j in zip(done, self.totals)) and
# deduplicate same event
done != self.last_done_queued
)
if send_condition:
if extra is not None:
self.message_queue.put_nowait(
(self.event.id, self.progress_id, done, self.totals, extra)
)
else:
self.message_queue.put_nowait(
(self.event.id, self.progress_id, done)
)
self.last_done_queued = done
def add(self, *done: int, force=False, extra=None):
assert len(done) == len(self.last_done), \
f"Done elements {done} don't match total elements {self.last_done}."
self.step(
*(i+j for i, j in zip(self.last_done, done)),
force=force, extra=extra
)
def iter(self, items: List):
self.start(len(items))
for item in items:
yield item
self.step()
class ProgressContext(Progress):
def __init__(self, ctx: QueryContext, event: Event, throttle=1):
super().__init__(ctx.message_queue, event, throttle)
self.ctx = ctx
def __enter__(self) -> 'ProgressContext':
self.ctx.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return any((
self.ctx.__exit__(exc_type, exc_val, exc_tb),
super().__exit__(exc_type, exc_val, exc_tb)
))
def progress(e: Event, throttle=1) -> ProgressContext:
ctx = context(e.name)
ctx.current_progress = ProgressContext(ctx, e, throttle=throttle)
return ctx.current_progress
class BulkLoader:
def __init__(self, ctx: QueryContext):
self.ctx = ctx
self.ledger = ctx.ledger
self.blocks = []
self.txs = []
self.txos = []
self.txis = []
self.supports = []
self.claims = []
self.tags = []
self.update_claims = []
self.delete_tags = []
@staticmethod
def block_to_row(block: Block) -> dict:
return {
'block_hash': block.block_hash,
'previous_hash': block.prev_block_hash,
'file_number': block.file_number,
'height': 0 if block.is_first_block else block.height,
'timestamp': block.timestamp,
}
@staticmethod
def tx_to_row(block_hash: bytes, tx: Transaction) -> dict:
row = {
'tx_hash': tx.hash,
'block_hash': block_hash,
'raw': tx.raw,
'height': tx.height,
'position': tx.position,
'is_verified': tx.is_verified,
'timestamp': tx.timestamp,
'day': tx.day,
'purchased_claim_hash': None,
}
txos = tx.outputs
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
txos[0].purchase = txos[1]
row['purchased_claim_hash'] = txos[1].purchase_data.claim_hash
return row
@staticmethod
def txi_to_row(tx: Transaction, txi: Input) -> dict:
return {
'tx_hash': tx.hash,
'txo_hash': txi.txo_ref.hash,
'position': txi.position,
'height': tx.height,
}
def txo_to_row(self, tx: Transaction, txo: Output) -> dict:
row = {
'tx_hash': tx.hash,
'txo_hash': txo.hash,
'address': txo.get_address(self.ledger) if txo.has_address else None,
'position': txo.position,
'amount': txo.amount,
'height': tx.height,
'script_offset': txo.script.offset,
'script_length': txo.script.length,
'txo_type': 0,
'claim_id': None,
'claim_hash': None,
'claim_name': None,
'channel_hash': None,
'signature': None,
'signature_digest': None,
'public_key': None,
'public_key_hash': None
}
if txo.is_claim:
if txo.can_decode_claim:
claim = txo.claim
row['txo_type'] = TXO_TYPES.get(claim.claim_type, TXO_TYPES['stream'])
if claim.is_channel:
row['public_key'] = claim.channel.public_key_bytes
row['public_key_hash'] = self.ledger.address_to_hash160(
self.ledger.public_key_to_address(claim.channel.public_key_bytes)
)
else:
row['txo_type'] = TXO_TYPES['stream']
elif txo.is_support:
row['txo_type'] = TXO_TYPES['support']
elif txo.purchase is not None:
row['txo_type'] = TXO_TYPES['purchase']
row['claim_id'] = txo.purchased_claim_id
row['claim_hash'] = txo.purchased_claim_hash
if txo.script.is_claim_involved:
signable = txo.can_decode_signable
if signable and signable.is_signed:
row['channel_hash'] = signable.signing_channel_hash
row['signature'] = txo.get_encoded_signature()
row['signature_digest'] = txo.get_signature_digest(self.ledger)
row['claim_id'] = txo.claim_id
row['claim_hash'] = txo.claim_hash
try:
row['claim_name'] = txo.claim_name.replace('\x00', '')
except UnicodeDecodeError:
pass
return row
def claim_to_rows(
self, txo: Output, staked_support_amount: int, staked_support_count: int,
signature: bytes = None, signature_digest: bytes = None, channel_public_key: bytes = None,
) -> Tuple[dict, List]:
tx = txo.tx_ref
d = {
'claim_type': None,
'address': txo.get_address(self.ledger),
'txo_hash': txo.hash,
'amount': txo.amount,
'height': tx.height,
'timestamp': tx.timestamp,
# support
'staked_amount': txo.amount + staked_support_amount,
'staked_support_amount': staked_support_amount,
'staked_support_count': staked_support_count,
# basic metadata
'title': None,
'description': None,
'author': None,
# streams
'stream_type': None,
'media_type': None,
'duration': None,
'release_time': None,
'fee_amount': 0,
'fee_currency': None,
# reposts
'reposted_claim_hash': None,
# signed claims
'channel_hash': None,
'is_signature_valid': None,
}
claim = txo.can_decode_claim
if not claim:
return d, []
if claim.is_stream:
d['claim_type'] = TXO_TYPES['stream']
d['stream_type'] = STREAM_TYPES[guess_stream_type(d['media_type'])]
d['media_type'] = claim.stream.source.media_type
d['title'] = claim.stream.title.replace('\x00', '')
d['description'] = claim.stream.description.replace('\x00', '')
d['author'] = claim.stream.author.replace('\x00', '')
if claim.stream.video and claim.stream.video.duration:
d['duration'] = claim.stream.video.duration
if claim.stream.audio and claim.stream.audio.duration:
d['duration'] = claim.stream.audio.duration
if claim.stream.release_time:
d['release_time'] = claim.stream.release_time
if claim.stream.has_fee:
fee = claim.stream.fee
if isinstance(fee.amount, Decimal):
d['fee_amount'] = int(fee.amount*1000)
if isinstance(fee.currency, str):
d['fee_currency'] = fee.currency.lower()
elif claim.is_repost:
d['claim_type'] = TXO_TYPES['repost']
d['reposted_claim_hash'] = claim.repost.reference.claim_hash
elif claim.is_channel:
d['claim_type'] = TXO_TYPES['channel']
if claim.is_signed:
d['channel_hash'] = claim.signing_channel_hash
d['is_signature_valid'] = (
all((signature, signature_digest, channel_public_key)) and
Output.is_signature_valid(
signature, signature_digest, channel_public_key
)
)
tags = []
if claim.message.tags:
claim_hash = txo.claim_hash
tags = [
{'claim_hash': claim_hash, 'tag': tag}
for tag in clean_tags(claim.message.tags)
]
return d, tags
def support_to_row(
self, txo: Output, channel_public_key: bytes = None,
signature: bytes = None, signature_digest: bytes = None
):
tx = txo.tx_ref
d = {
'txo_hash': txo.ref.hash,
'claim_hash': txo.claim_hash,
'address': txo.get_address(self.ledger),
'amount': txo.amount,
'height': tx.height,
'timestamp': tx.timestamp,
'emoji': None,
'channel_hash': None,
'is_signature_valid': None,
}
support = txo.can_decode_support
if support:
d['emoji'] = support.emoji
if support.is_signed:
d['channel_hash'] = support.signing_channel_hash
d['is_signature_valid'] = (
all((signature, signature_digest, channel_public_key)) and
Output.is_signature_valid(
signature, signature_digest, channel_public_key
)
)
return d
def add_block(self, block: Block):
self.blocks.append(self.block_to_row(block))
for tx in block.txs:
self.add_transaction(block.block_hash, tx)
return self
def add_transaction(self, block_hash: bytes, tx: Transaction):
self.txs.append(self.tx_to_row(block_hash, tx))
for txi in tx.inputs:
if txi.coinbase is None:
self.txis.append(self.txi_to_row(tx, txi))
for txo in tx.outputs:
self.txos.append(self.txo_to_row(tx, txo))
return self
def add_support(self, txo: Output, **extra):
self.supports.append(self.support_to_row(txo, **extra))
def add_claim(
self, txo: Output, short_url: str,
creation_height: int, activation_height: int, expiration_height: int,
takeover_height: int = None, **extra
):
try:
claim_name = txo.claim_name.replace('\x00', '')
normalized_name = txo.normalized_name
except UnicodeDecodeError:
claim_name = normalized_name = ''
d, tags = self.claim_to_rows(txo, **extra)
d['claim_hash'] = txo.claim_hash
d['claim_id'] = txo.claim_id
d['claim_name'] = claim_name
d['normalized'] = normalized_name
d['short_url'] = short_url
d['creation_height'] = creation_height
d['activation_height'] = activation_height
d['expiration_height'] = expiration_height
d['takeover_height'] = takeover_height
d['is_controlling'] = takeover_height is not None
self.claims.append(d)
self.tags.extend(tags)
return self
def update_claim(self, txo: Output, **extra):
d, tags = self.claim_to_rows(txo, **extra)
d['pk'] = txo.claim_hash
self.update_claims.append(d)
self.delete_tags.append({'pk': txo.claim_hash})
self.tags.extend(tags)
return self
def get_queries(self):
return (
(Block.insert(), self.blocks),
(TX.insert(), self.txs),
(TXO.insert(), self.txos),
(TXI.insert(), self.txis),
(Claim.insert(), self.claims),
(Tag.delete().where(Tag.c.claim_hash == bindparam('pk')), self.delete_tags),
(Claim.update().where(Claim.c.claim_hash == bindparam('pk')), self.update_claims),
(Tag.insert(), self.tags),
(Support.insert(), self.supports),
)
def flush(self, return_row_count_for_table) -> int:
done = 0
for sql, rows in self.get_queries():
if not rows:
continue
if self.ctx.is_postgres and isinstance(sql, Insert):
self.ctx.pg_copy(sql.table, rows)
else:
self.ctx.execute(sql, rows)
if sql.table == return_row_count_for_table:
done += len(rows)
rows.clear()
return done

103
lbry/db/sync.py Normal file
View file

@ -0,0 +1,103 @@
from sqlalchemy.future import select
from lbry.db.query_context import progress, Event
from lbry.db.tables import TX, TXI, TXO, Claim, Support
from .constants import TXO_TYPES, CLAIM_TYPE_CODES
from .queries import (
BASE_SELECT_TXO_COLUMNS,
rows_to_txos, where_unspent_txos,
where_abandoned_supports,
where_abandoned_claims
)
SPENDS_UPDATE_EVENT = Event.add("client.sync.spends.update", "steps")
CLAIMS_INSERT_EVENT = Event.add("client.sync.claims.insert", "claims")
CLAIMS_UPDATE_EVENT = Event.add("client.sync.claims.update", "claims")
CLAIMS_DELETE_EVENT = Event.add("client.sync.claims.delete", "claims")
SUPPORT_INSERT_EVENT = Event.add("client.sync.supports.insert", "supports")
SUPPORT_UPDATE_EVENT = Event.add("client.sync.supports.update", "supports")
SUPPORT_DELETE_EVENT = Event.add("client.sync.supports.delete", "supports")
def process_all_things_after_sync():
with progress(SPENDS_UPDATE_EVENT) as p:
p.start(2)
update_spent_outputs(p.ctx)
p.step(1)
set_input_addresses(p.ctx)
p.step(2)
with progress(SUPPORT_DELETE_EVENT) as p:
p.start(1)
sql = Support.delete().where(where_abandoned_supports())
p.ctx.execute(sql)
with progress(SUPPORT_INSERT_EVENT) as p:
loader = p.ctx.get_bulk_loader()
sql = (
select(*BASE_SELECT_TXO_COLUMNS)
.where(where_unspent_txos(TXO_TYPES['support'], missing_in_supports_table=True))
.select_from(TXO.join(TX))
)
for support in rows_to_txos(p.ctx.fetchall(sql)):
loader.add_support(support)
loader.flush(Support)
with progress(CLAIMS_DELETE_EVENT) as p:
p.start(1)
sql = Claim.delete().where(where_abandoned_claims())
p.ctx.execute(sql)
with progress(CLAIMS_INSERT_EVENT) as p:
loader = p.ctx.get_bulk_loader()
sql = (
select(*BASE_SELECT_TXO_COLUMNS)
.where(where_unspent_txos(CLAIM_TYPE_CODES, missing_in_claims_table=True))
.select_from(TXO.join(TX))
)
for claim in rows_to_txos(p.ctx.fetchall(sql)):
loader.add_claim(claim, '', 0, 0, 0, 0, staked_support_amount=0, staked_support_count=0)
loader.flush(Claim)
with progress(CLAIMS_UPDATE_EVENT) as p:
loader = p.ctx.get_bulk_loader()
sql = (
select(*BASE_SELECT_TXO_COLUMNS)
.where(where_unspent_txos(CLAIM_TYPE_CODES, missing_or_stale_in_claims_table=True))
.select_from(TXO.join(TX))
)
for claim in rows_to_txos(p.ctx.fetchall(sql)):
loader.update_claim(claim)
loader.flush(Claim)
def set_input_addresses(ctx):
# Update TXIs to have the address of TXO they are spending.
if ctx.is_sqlite:
address_query = select(TXO.c.address).where(TXI.c.txo_hash == TXO.c.txo_hash)
set_addresses = (
TXI.update()
.values(address=address_query.scalar_subquery())
.where(TXI.c.address.is_(None))
)
else:
set_addresses = (
TXI.update()
.values({TXI.c.address: TXO.c.address})
.where((TXI.c.address.is_(None)) & (TXI.c.txo_hash == TXO.c.txo_hash))
)
ctx.execute(set_addresses)
def update_spent_outputs(ctx):
# Update spent TXOs setting spent_height
set_spent_height = (
TXO.update()
.values({
TXO.c.spent_height: (
select(TXI.c.height)
.where(TXI.c.txo_hash == TXO.c.txo_hash)
.scalar_subquery()
)
}).where(
(TXO.c.spent_height == 0) &
(TXO.c.txo_hash.in_(select(TXI.c.txo_hash).where(TXI.c.address.is_(None))))
)
)
ctx.execute(set_spent_height)

262
lbry/db/tables.py Normal file
View file

@ -0,0 +1,262 @@
# pylint: skip-file
from sqlalchemy import (
MetaData, Table, Column, ForeignKey,
LargeBinary, Text, SmallInteger, Integer, BigInteger, Boolean,
)
from .constants import TXO_TYPES, CLAIM_TYPE_CODES
SCHEMA_VERSION = '1.4'
metadata = MetaData()
Version = Table(
'version', metadata,
Column('version', Text, primary_key=True),
)
PubkeyAddress = Table(
'pubkey_address', metadata,
Column('address', Text, primary_key=True),
Column('used_times', Integer, server_default='0'),
)
AccountAddress = Table(
'account_address', metadata,
Column('account', Text, primary_key=True),
Column('address', Text, ForeignKey(PubkeyAddress.columns.address), primary_key=True),
Column('chain', SmallInteger),
Column('pubkey', LargeBinary),
Column('chain_code', LargeBinary),
Column('n', Integer),
Column('depth', SmallInteger),
)
Block = Table(
'block', metadata,
Column('block_hash', LargeBinary, primary_key=True),
Column('previous_hash', LargeBinary),
Column('file_number', SmallInteger),
Column('height', Integer),
Column('timestamp', Integer),
Column('block_filter', LargeBinary, nullable=True)
)
TX = Table(
'tx', metadata,
Column('block_hash', LargeBinary, nullable=True),
Column('tx_hash', LargeBinary, primary_key=True),
Column('raw', LargeBinary),
Column('height', Integer),
Column('position', SmallInteger),
Column('timestamp', Integer, nullable=True),
Column('day', Integer, nullable=True),
Column('is_verified', Boolean, server_default='FALSE'),
Column('purchased_claim_hash', LargeBinary, nullable=True),
Column('tx_filter', LargeBinary, nullable=True)
)
pg_add_tx_constraints_and_indexes = [
"ALTER TABLE tx ADD PRIMARY KEY (tx_hash);",
]
TXO = Table(
'txo', metadata,
Column('tx_hash', LargeBinary, ForeignKey(TX.columns.tx_hash)),
Column('txo_hash', LargeBinary, primary_key=True),
Column('address', Text),
Column('position', SmallInteger),
Column('amount', BigInteger),
Column('height', Integer),
Column('spent_height', Integer, server_default='0'),
Column('script_offset', Integer),
Column('script_length', Integer),
Column('is_reserved', Boolean, server_default='0'),
# claims
Column('txo_type', SmallInteger, server_default='0'),
Column('claim_id', Text, nullable=True),
Column('claim_hash', LargeBinary, nullable=True),
Column('claim_name', Text, nullable=True),
Column('channel_hash', LargeBinary, nullable=True), # claims in channel
Column('signature', LargeBinary, nullable=True),
Column('signature_digest', LargeBinary, nullable=True),
# channels
Column('public_key', LargeBinary, nullable=True),
Column('public_key_hash', LargeBinary, nullable=True),
)
txo_join_account = TXO.join(AccountAddress, TXO.columns.address == AccountAddress.columns.address)
pg_add_txo_constraints_and_indexes = [
"ALTER TABLE txo ADD PRIMARY KEY (txo_hash);",
# find appropriate channel public key for signing a content claim
f"CREATE INDEX txo_channel_hash_by_height_desc_w_pub_key "
f"ON txo (claim_hash, height desc) INCLUDE (public_key) "
f"WHERE txo_type={TXO_TYPES['channel']};",
# for calculating supports on a claim
f"CREATE INDEX txo_unspent_supports ON txo (claim_hash) INCLUDE (amount) "
f"WHERE spent_height = 0 AND txo_type={TXO_TYPES['support']};",
# for finding modified claims in a block range
f"CREATE INDEX txo_claim_changes "
f"ON txo (height DESC) INCLUDE (claim_hash, txo_hash) "
f"WHERE spent_height = 0 AND txo_type IN {tuple(CLAIM_TYPE_CODES)};",
# for finding claims which need support totals re-calculated in a block range
f"CREATE INDEX txo_added_supports_by_height ON txo (height DESC) "
f"INCLUDE (claim_hash) WHERE txo_type={TXO_TYPES['support']};",
f"CREATE INDEX txo_spent_supports_by_height ON txo (spent_height DESC) "
f"INCLUDE (claim_hash) WHERE txo_type={TXO_TYPES['support']};",
]
TXI = Table(
'txi', metadata,
Column('tx_hash', LargeBinary, ForeignKey(TX.columns.tx_hash)),
Column('txo_hash', LargeBinary, ForeignKey(TXO.columns.txo_hash), primary_key=True),
Column('address', Text, nullable=True),
Column('position', SmallInteger),
Column('height', Integer),
)
txi_join_account = TXI.join(AccountAddress, TXI.columns.address == AccountAddress.columns.address)
pg_add_txi_constraints_and_indexes = [
"ALTER TABLE txi ADD PRIMARY KEY (txo_hash);",
]
Claim = Table(
'claim', metadata,
Column('claim_hash', LargeBinary, primary_key=True),
Column('claim_id', Text),
Column('claim_name', Text),
Column('normalized', Text),
Column('address', Text),
Column('txo_hash', LargeBinary, ForeignKey(TXO.columns.txo_hash)),
Column('amount', BigInteger),
Column('staked_amount', BigInteger),
Column('timestamp', Integer), # last updated timestamp
Column('creation_timestamp', Integer),
Column('release_time', Integer, nullable=True),
Column('height', Integer), # last updated height
Column('creation_height', Integer),
Column('activation_height', Integer),
Column('expiration_height', Integer),
Column('takeover_height', Integer, nullable=True),
Column('is_controlling', Boolean),
# short_url: normalized#shortest-unique-claim_id
Column('short_url', Text),
# canonical_url: channel's-short_url/normalized#shortest-unique-claim_id-within-channel
# canonical_url is computed dynamically
Column('title', Text, nullable=True),
Column('author', Text, nullable=True),
Column('description', Text, nullable=True),
Column('claim_type', SmallInteger),
Column('claim_reposted_count', Integer, server_default='0'),
Column('staked_support_count', Integer, server_default='0'),
Column('staked_support_amount', BigInteger, server_default='0'),
# streams
Column('stream_type', SmallInteger, nullable=True),
Column('media_type', Text, nullable=True),
Column('fee_amount', BigInteger, server_default='0'),
Column('fee_currency', Text, nullable=True),
Column('duration', Integer, nullable=True),
# reposts
Column('reposted_claim_hash', LargeBinary, nullable=True),
Column('reposted_count', Integer, server_default='0'),
# claims which are channels
Column('signed_claim_count', Integer, server_default='0'),
Column('signed_support_count', Integer, server_default='0'),
# claims which are inside channels
Column('channel_hash', LargeBinary, nullable=True),
Column('is_signature_valid', Boolean, nullable=True),
Column('trending_group', BigInteger, server_default='0'),
Column('trending_mixed', BigInteger, server_default='0'),
Column('trending_local', BigInteger, server_default='0'),
Column('trending_global', BigInteger, server_default='0'),
)
Tag = Table(
'tag', metadata,
Column('claim_hash', LargeBinary),
Column('tag', Text),
)
pg_add_claim_and_tag_constraints_and_indexes = [
"ALTER TABLE claim ADD PRIMARY KEY (claim_hash);",
# for checking if claim is up-to-date
"CREATE UNIQUE INDEX claim_txo_hash ON claim (txo_hash);",
# used by takeover process to reset winning claims
"CREATE INDEX claim_normalized ON claim (normalized);",
# ordering and search by release_time
"CREATE INDEX claim_release_time ON claim (release_time DESC NULLs LAST);",
# used to count()/sum() claims signed by channel
"CREATE INDEX signed_content ON claim (channel_hash) "
"INCLUDE (amount) WHERE is_signature_valid;",
# basic tag indexes
"ALTER TABLE tag ADD PRIMARY KEY (claim_hash, tag);",
"CREATE INDEX tags ON tag (tag) INCLUDE (claim_hash);",
]
Support = Table(
'support', metadata,
Column('txo_hash', LargeBinary, ForeignKey(TXO.columns.txo_hash), primary_key=True),
Column('claim_hash', LargeBinary),
Column('address', Text),
Column('amount', BigInteger),
Column('height', Integer),
Column('timestamp', Integer),
# support metadata
Column('emoji', Text),
# signed supports
Column('channel_hash', LargeBinary, nullable=True),
Column('signature', LargeBinary, nullable=True),
Column('signature_digest', LargeBinary, nullable=True),
Column('is_signature_valid', Boolean, nullable=True),
)
pg_add_support_constraints_and_indexes = [
"ALTER TABLE support ADD PRIMARY KEY (txo_hash);",
# used to count()/sum() supports signed by channel
"CREATE INDEX signed_support ON support (channel_hash) "
"INCLUDE (amount) WHERE is_signature_valid;",
]
Stake = Table(
'stake', metadata,
Column('claim_hash', LargeBinary),
Column('height', Integer),
Column('stake_min', BigInteger),
Column('stake_max', BigInteger),
Column('stake_sum', BigInteger),
Column('stake_count', Integer),
Column('stake_unique', Integer),
)

173
lbry/db/utils.py Normal file
View file

@ -0,0 +1,173 @@
from itertools import islice
from typing import List, Union
from sqlalchemy import text, and_
from sqlalchemy.sql.expression import Select, FunctionElement
from sqlalchemy.types import Numeric
from sqlalchemy.ext.compiler import compiles
try:
from sqlalchemy.dialects.postgresql import insert as pg_insert # pylint: disable=unused-import
except ImportError:
pg_insert = None
from .tables import AccountAddress
class greatest(FunctionElement): # pylint: disable=invalid-name
type = Numeric()
name = 'greatest'
@compiles(greatest)
def default_greatest(element, compiler, **kw):
return "greatest(%s)" % compiler.process(element.clauses, **kw)
@compiles(greatest, 'sqlite')
def sqlite_greatest(element, compiler, **kw):
return "max(%s)" % compiler.process(element.clauses, **kw)
class least(FunctionElement): # pylint: disable=invalid-name
type = Numeric()
name = 'least'
@compiles(least)
def default_least(element, compiler, **kw):
return "least(%s)" % compiler.process(element.clauses, **kw)
@compiles(least, 'sqlite')
def sqlite_least(element, compiler, **kw):
return "min(%s)" % compiler.process(element.clauses, **kw)
def chunk(rows, step):
it, total = iter(rows), len(rows)
for _ in range(0, total, step):
yield list(islice(it, step))
total -= step
def constrain_single_or_list(constraints, column, value, convert=lambda x: x):
if value is not None:
if isinstance(value, list):
value = [convert(v) for v in value]
if len(value) == 1:
constraints[column] = value[0]
elif len(value) > 1:
constraints[f"{column}__in"] = value
else:
constraints[column] = convert(value)
return constraints
def in_account_ids(account_ids: Union[List[str], str]):
if isinstance(account_ids, list):
if len(account_ids) > 1:
return AccountAddress.c.account.in_(account_ids)
account_ids = account_ids[0]
return AccountAddress.c.account == account_ids
def query(table, s: Select, **constraints) -> Select:
limit = constraints.pop('limit', None)
if limit is not None:
s = s.limit(limit)
offset = constraints.pop('offset', None)
if offset is not None:
s = s.offset(offset)
order_by = constraints.pop('order_by', None)
if order_by:
if isinstance(order_by, str):
s = s.order_by(text(order_by))
elif isinstance(order_by, list):
s = s.order_by(text(', '.join(order_by)))
else:
raise ValueError("order_by must be string or list")
group_by = constraints.pop('group_by', None)
if group_by is not None:
s = s.group_by(text(group_by))
account_ids = constraints.pop('account_ids', [])
if account_ids:
s = s.where(in_account_ids(account_ids))
if constraints:
s = s.where(
constraints_to_clause(table, constraints)
)
return s
def constraints_to_clause(tables, constraints):
clause = []
for key, constraint in constraints.items():
if key.endswith('__not'):
col, op = key[:-len('__not')], '__ne__'
elif key.endswith('__is_null'):
col = key[:-len('__is_null')]
op = '__eq__'
constraint = None
elif key.endswith('__is_not_null'):
col = key[:-len('__is_not_null')]
op = '__ne__'
constraint = None
elif key.endswith('__lt'):
col, op = key[:-len('__lt')], '__lt__'
elif key.endswith('__lte'):
col, op = key[:-len('__lte')], '__le__'
elif key.endswith('__gt'):
col, op = key[:-len('__gt')], '__gt__'
elif key.endswith('__gte'):
col, op = key[:-len('__gte')], '__ge__'
elif key.endswith('__like'):
col, op = key[:-len('__like')], 'like'
elif key.endswith('__not_like'):
col, op = key[:-len('__not_like')], 'notlike'
elif key.endswith('__in') or key.endswith('__not_in'):
if key.endswith('__in'):
col, op, one_val_op = key[:-len('__in')], 'in_', '__eq__'
else:
col, op, one_val_op = key[:-len('__not_in')], 'notin_', '__ne__'
if isinstance(constraint, Select):
pass
elif constraint:
if isinstance(constraint, (list, set, tuple)):
if len(constraint) == 1:
op = one_val_op
constraint = next(iter(constraint))
elif isinstance(constraint, str):
constraint = text(constraint)
else:
raise ValueError(f"{col} requires a list, set or string as constraint value.")
else:
continue
else:
col, op = key, '__eq__'
attr = None
if '.' in col:
table_name, col = col.split('.')
_table = None
for table in tables:
if table.name == table_name.lower():
_table = table
break
if _table is not None:
attr = getattr(_table.c, col)
else:
raise ValueError(f"Table '{table_name}' not available: {', '.join([t.name for t in tables])}.")
else:
for table in tables:
attr = getattr(table.c, col, None)
if attr is not None:
break
if attr is None:
raise ValueError(f"Attribute '{col}' not found on tables: {', '.join([t.name for t in tables])}.")
clause.append(getattr(attr, op)(constraint))
return and_(*clause)

View file

@ -1,70 +1,57 @@
import asyncio
import typing
import logging
from prometheus_client import Counter, Gauge
if typing.TYPE_CHECKING:
from lbry.dht.node import Node
from lbry.extras.daemon.storage import SQLiteStorage
log = logging.getLogger(__name__)
class BlobAnnouncer:
announcements_sent_metric = Counter(
"announcements_sent", "Number of announcements sent and their respective status.", namespace="dht_node",
labelnames=("peers", "error"),
)
announcement_queue_size_metric = Gauge(
"announcement_queue_size", "Number of hashes waiting to be announced.", namespace="dht_node",
labelnames=("scope",)
)
class SQLiteStorage:
pass
class BlobAnnouncer:
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
self.loop = loop
self.node = node
self.storage = storage
self.announce_task: asyncio.Task = None
self.announce_queue: typing.List[str] = []
self._done = asyncio.Event()
self.announced = set()
async def _run_consumer(self):
while self.announce_queue:
try:
blob_hash = self.announce_queue.pop()
peers = len(await self.node.announce_blob(blob_hash))
self.announcements_sent_metric.labels(peers=peers, error=False).inc()
if peers > 4:
self.announced.add(blob_hash)
else:
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
except Exception as err:
self.announcements_sent_metric.labels(peers=0, error=True).inc()
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
async def _submit_announcement(self, blob_hash):
try:
peers = len(await self.node.announce_blob(blob_hash))
if peers > 4:
return blob_hash
else:
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise err
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
async def _announce(self, batch_size: typing.Optional[int] = 10):
while batch_size:
if not self.node.joined.is_set():
await self.node.joined.wait()
await asyncio.sleep(60)
await asyncio.sleep(60, loop=self.loop)
if not self.node.protocol.routing_table.get_peers():
log.warning("No peers in DHT, announce round skipped")
continue
self.announce_queue.extend(await self.storage.get_blobs_to_announce())
self.announcement_queue_size_metric.labels(scope="global").set(len(self.announce_queue))
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
while len(self.announce_queue) > 0:
log.info("%i blobs to announce", len(self.announce_queue))
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)])
announced = list(filter(None, self.announced))
announced = await asyncio.gather(*[
self._submit_announcement(
self.announce_queue.pop()) for _ in range(batch_size) if self.announce_queue
], loop=self.loop)
announced = list(filter(None, announced))
if announced:
await self.storage.update_last_announced_blobs(announced)
log.info("announced %i blobs", len(announced))
self.announced.clear()
self._done.set()
self._done.clear()
def start(self, batch_size: typing.Optional[int] = 10):
assert not self.announce_task or self.announce_task.done(), "already running"
@ -73,6 +60,3 @@ class BlobAnnouncer:
def stop(self):
if self.announce_task and not self.announce_task.done():
self.announce_task.cancel()
def wait(self):
return self._done.wait()

View file

@ -20,6 +20,7 @@ MAYBE_PING_DELAY = 300 # 5 minutes
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
RPC_ID_LENGTH = 20
PROTOCOL_VERSION = 1
BOTTOM_OUT_LIMIT = 3
MSG_SIZE_LIMIT = 1400

View file

@ -1,11 +1,9 @@
import logging
import asyncio
import typing
import binascii
import socket
from prometheus_client import Gauge
from lbry.utils import aclosing, resolve_host
from lbry.utils import resolve_host
from lbry.dht import constants
from lbry.dht.peer import make_kademlia_peer
from lbry.dht.protocol.distance import Distance
@ -20,32 +18,20 @@ log = logging.getLogger(__name__)
class Node:
storing_peers_metric = Gauge(
"storing_peers", "Number of peers storing blobs announced to this node", namespace="dht_node",
labelnames=("scope",),
)
stored_blob_with_x_bytes_colliding = Gauge(
"stored_blobs_x_bytes_colliding", "Number of blobs with at least X bytes colliding with this node id prefix",
namespace="dht_node", labelnames=("amount",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX,
storage: typing.Optional['SQLiteStorage'] = None):
self.loop = loop
self.internal_udp_port = internal_udp_port
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
split_buckets_under_index, is_bootstrap_node)
split_buckets_under_index)
self.listening_port: asyncio.DatagramTransport = None
self.joined = asyncio.Event()
self.joined = asyncio.Event(loop=self.loop)
self._join_task: asyncio.Task = None
self._refresh_task: asyncio.Task = None
self._storage = storage
@property
def stored_blob_hashes(self):
return self.protocol.data_store.keys()
async def refresh_node(self, force_once=False):
while True:
# remove peers with expired blob announcements from the datastore
@ -55,21 +41,17 @@ class Node:
# add all peers in the routing table
total_peers.extend(self.protocol.routing_table.get_peers())
# add all the peers who have announced blobs to us
storing_peers = self.protocol.data_store.get_storing_contacts()
self.storing_peers_metric.labels("global").set(len(storing_peers))
total_peers.extend(storing_peers)
counts = {0: 0, 1: 0, 2: 0}
node_id = self.protocol.node_id
for blob_hash in self.protocol.data_store.keys():
bytes_colliding = 0 if blob_hash[0] != node_id[0] else 2 if blob_hash[1] == node_id[1] else 1
counts[bytes_colliding] += 1
self.stored_blob_with_x_bytes_colliding.labels(amount=0).set(counts[0])
self.stored_blob_with_x_bytes_colliding.labels(amount=1).set(counts[1])
self.stored_blob_with_x_bytes_colliding.labels(amount=2).set(counts[2])
total_peers.extend(self.protocol.data_store.get_storing_contacts())
# get ids falling in the midpoint of each bucket that hasn't been recently updated
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
# populate/split the buckets further
buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts()
if buckets_with_contacts <= 3:
for i in range(buckets_with_contacts):
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
if self.protocol.routing_table.get_peers():
# if we have node ids to look up, perform the iterative search until we have k results
@ -79,7 +61,7 @@ class Node:
else:
if force_once:
break
fut = asyncio.Future()
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
await fut
continue
@ -93,12 +75,12 @@ class Node:
if force_once:
break
fut = asyncio.Future()
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
await fut
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
hash_value = bytes.fromhex(blob_hash)
hash_value = binascii.unhexlify(blob_hash.encode())
assert len(hash_value) == constants.HASH_LENGTH
peers = await self.peer_search(hash_value)
@ -108,12 +90,12 @@ class Node:
for peer in peers:
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
stored_to_tup = await asyncio.gather(
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers)
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop
)
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
if stored_to:
log.debug(
"Stored %s to %i of %i attempted peers", hash_value.hex()[:8],
"Stored %s to %i of %i attempted peers", binascii.hexlify(hash_value).decode()[:8],
len(stored_to), len(peers)
)
else:
@ -182,36 +164,39 @@ class Node:
for address, udp_port in known_node_urls or []
]))
except socket.gaierror:
await asyncio.sleep(30)
await asyncio.sleep(30, loop=self.loop)
continue
self.protocol.peer_manager.reset()
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
await asyncio.sleep(1)
await asyncio.sleep(1, loop=self.loop)
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
bottom_out_limit: int = constants.BOTTOM_OUT_LIMIT,
max_results: int = constants.K) -> IterativeNodeFinder:
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
return IterativeNodeFinder(self.loop, self.protocol, key, max_results, shortlist)
return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
key, bottom_out_limit, max_results, None, shortlist)
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
bottom_out_limit: int = 40,
max_results: int = -1) -> IterativeValueFinder:
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
return IterativeValueFinder(self.loop, self.protocol, key, max_results, shortlist)
return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
key, bottom_out_limit, max_results, None, shortlist)
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None
bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None
) -> typing.List['KademliaPeer']:
peers = []
async with aclosing(self.get_iterative_node_finder(
node_id, shortlist=shortlist, max_results=max_results)) as node_finder:
async for iteration_peers in node_finder:
peers.extend(iteration_peers)
async for iteration_peers in self.get_iterative_node_finder(
node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results):
peers.extend(iteration_peers)
distance = Distance(node_id)
peers.sort(key=lambda peer: distance(peer.node_id))
return peers[:count]
@ -237,46 +222,39 @@ class Node:
# prioritize peers who reply to a dht ping first
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
async with aclosing(self.get_iterative_value_finder(bytes.fromhex(blob_hash))) as value_finder:
async for results in value_finder:
to_put = []
for peer in results:
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
continue
is_good = self.protocol.peer_manager.peer_is_good(peer)
if is_good:
# the peer has replied recently over UDP, it can probably be reached on the TCP port
to_put.append(peer)
elif is_good is None:
if not peer.udp_port:
# TODO: use the same port for TCP and UDP
# the udp port must be guessed
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
# including on a network with several nodes, then assume the udp port is proportionately
# based on a starting port of 4444
udp_port_to_try = peer.tcp_port
if 3400 > peer.tcp_port > 3332:
udp_port_to_try = (peer.tcp_port - 3333) + 4444
self.loop.create_task(put_into_result_queue_after_pong(
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
))
else:
self.loop.create_task(put_into_result_queue_after_pong(peer))
async for results in self.get_iterative_value_finder(binascii.unhexlify(blob_hash.encode())):
to_put = []
for peer in results:
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
continue
is_good = self.protocol.peer_manager.peer_is_good(peer)
if is_good:
# the peer has replied recently over UDP, it can probably be reached on the TCP port
to_put.append(peer)
elif is_good is None:
if not peer.udp_port:
# TODO: use the same port for TCP and UDP
# the udp port must be guessed
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
# including on a network with several nodes, then assume the udp port is proportionately
# based on a starting port of 4444
udp_port_to_try = peer.tcp_port
if 3400 > peer.tcp_port > 3332:
udp_port_to_try = (peer.tcp_port - 3333) + 4444
self.loop.create_task(put_into_result_queue_after_pong(
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
))
else:
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
if to_put:
result_queue.put_nowait(to_put)
self.loop.create_task(put_into_result_queue_after_pong(peer))
else:
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
if to_put:
result_queue.put_nowait(to_put)
def accumulate_peers(self, search_queue: asyncio.Queue,
peer_queue: typing.Optional[asyncio.Queue] = None
) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
queue = peer_queue or asyncio.Queue()
queue = peer_queue or asyncio.Queue(loop=self.loop)
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))
async def get_kademlia_peers_from_hosts(peer_list: typing.List[typing.Tuple[str, int]]) -> typing.List['KademliaPeer']:
peer_address_list = [(await resolve_host(url, port, proto='tcp'), port) for url, port in peer_list]
kademlia_peer_list = [make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
for address, port in peer_address_list]
return kademlia_peer_list

View file

@ -1,21 +1,18 @@
import typing
import asyncio
import logging
import ipaddress
from binascii import hexlify
from dataclasses import dataclass, field
from functools import lru_cache
from prometheus_client import Gauge
from lbry.utils import is_valid_public_ipv4 as _is_valid_public_ipv4, LRUCache
from lbry.dht import constants
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
ALLOW_LOCALHOST = False
CACHE_SIZE = 16384
log = logging.getLogger(__name__)
@lru_cache(CACHE_SIZE)
@lru_cache(1024)
def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str],
udp_port: typing.Optional[int] = None,
tcp_port: typing.Optional[int] = None,
@ -23,32 +20,40 @@ def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional
return KademliaPeer(address, node_id, udp_port, tcp_port=tcp_port, allow_localhost=allow_localhost)
# the ipaddress module does not show these subnets as reserved
CARRIER_GRADE_NAT_SUBNET = ipaddress.ip_network('100.64.0.0/10')
IPV4_TO_6_RELAY_SUBNET = ipaddress.ip_network('192.88.99.0/24')
ALLOW_LOCALHOST = False
def is_valid_public_ipv4(address, allow_localhost: bool = False):
allow_localhost = bool(allow_localhost or ALLOW_LOCALHOST)
return _is_valid_public_ipv4(address, allow_localhost)
try:
parsed_ip = ipaddress.ip_address(address)
if parsed_ip.is_loopback and allow_localhost:
return True
return not any((parsed_ip.version != 4, parsed_ip.is_unspecified, parsed_ip.is_link_local,
parsed_ip.is_loopback, parsed_ip.is_multicast, parsed_ip.is_reserved, parsed_ip.is_private,
parsed_ip.is_reserved,
CARRIER_GRADE_NAT_SUBNET.supernet_of(ipaddress.ip_network(f"{address}/32")),
IPV4_TO_6_RELAY_SUBNET.supernet_of(ipaddress.ip_network(f"{address}/32"))))
except ipaddress.AddressValueError:
return False
class PeerManager:
peer_manager_keys_metric = Gauge(
"peer_manager_keys", "Number of keys tracked by PeerManager dicts (sum)", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop):
self._loop = loop
self._rpc_failures: typing.Dict[
typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]]
] = LRUCache(CACHE_SIZE)
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = LRUCache(CACHE_SIZE)
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = LRUCache(CACHE_SIZE)
self._node_tokens: typing.Dict[bytes, (float, bytes)] = LRUCache(CACHE_SIZE)
def count_cache_keys(self):
return len(self._rpc_failures) + len(self._last_replied) + len(self._last_sent) + len(
self._last_requested) + len(self._node_id_mapping) + len(self._node_id_reverse_mapping) + len(
self._node_tokens)
] = {}
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = {}
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = {}
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = {}
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = {}
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = {}
self._node_tokens: typing.Dict[bytes, (float, bytes)] = {}
def reset(self):
for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested):
@ -98,10 +103,6 @@ class PeerManager:
self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id))
self._node_id_mapping[(address, udp_port)] = node_id
self._node_id_reverse_mapping[node_id] = (address, udp_port)
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
def get_node_id_for_endpoint(self, address, port):
return self._node_id_mapping.get((address, port))
def prune(self): # TODO: periodically call this
now = self._loop.time()
@ -153,10 +154,9 @@ class PeerManager:
def peer_is_good(self, peer: 'KademliaPeer'):
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
def decode_tcp_peer_from_compact_address(compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
node_id, address, tcp_port = decode_compact_address(compact_address)
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
node_id, address, tcp_port = decode_compact_address(compact_address)
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
@dataclass(unsafe_hash=True)
@ -171,11 +171,11 @@ class KademliaPeer:
def __post_init__(self):
if self._node_id is not None:
if not len(self._node_id) == constants.HASH_LENGTH:
raise ValueError("invalid node_id: {}".format(self._node_id.hex()))
if self.udp_port is not None and not 1024 <= self.udp_port <= 65535:
raise ValueError(f"invalid udp port: {self.address}:{self.udp_port}")
if self.tcp_port is not None and not 1024 <= self.tcp_port <= 65535:
raise ValueError(f"invalid tcp port: {self.address}:{self.tcp_port}")
raise ValueError("invalid node_id: {}".format(hexlify(self._node_id).decode()))
if self.udp_port is not None and not 1 <= self.udp_port <= 65535:
raise ValueError("invalid udp port")
if self.tcp_port is not None and not 1 <= self.tcp_port <= 65535:
raise ValueError("invalid tcp port")
if not is_valid_public_ipv4(self.address, self.allow_localhost):
raise ValueError(f"invalid ip address: '{self.address}'")
@ -194,6 +194,3 @@ class KademliaPeer:
def compact_ip(self):
return make_compact_ip(self.address)
def __str__(self):
return f"{self.__class__.__name__}({self.node_id.hex()[:8]}@{self.address}:{self.udp_port}-{self.tcp_port})"

View file

@ -16,12 +16,6 @@ class DictDataStore:
self._peer_manager = peer_manager
self.completed_blobs: typing.Set[str] = set()
def keys(self):
return self._data_store.keys()
def __len__(self):
return self._data_store.__len__()
def removed_expired_peers(self):
now = self.loop.time()
keys = list(self._data_store.keys())

View file

@ -1,17 +1,18 @@
import asyncio
from binascii import hexlify
from itertools import chain
from collections import defaultdict, OrderedDict
from collections.abc import AsyncIterator
from collections import defaultdict
import typing
import logging
from typing import TYPE_CHECKING
from lbry.dht import constants
from lbry.dht.error import RemoteException, TransportNotConnected
from lbry.dht.protocol.distance import Distance
from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address
from lbry.dht.peer import make_kademlia_peer
from lbry.dht.serialization.datagram import PAGE_KEY
if TYPE_CHECKING:
from lbry.dht.protocol.routing_table import TreeRoutingTable
from lbry.dht.protocol.protocol import KademliaProtocol
from lbry.dht.peer import PeerManager, KademliaPeer
@ -26,15 +27,6 @@ class FindResponse:
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
raise NotImplementedError()
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
for contact_triple in self.get_close_triples():
node_id, address, udp_port = contact_triple
try:
yield make_kademlia_peer(node_id, address, udp_port)
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
peer_info.udp_port, address, udp_port)
class FindNodeResponse(FindResponse):
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
@ -65,33 +57,57 @@ class FindValueResponse(FindResponse):
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
class IterativeFinder(AsyncIterator):
def __init__(self, loop: asyncio.AbstractEventLoop,
protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K,
def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']:
"""
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table
:param routing_table: a TreeRoutingTable
:param key: a 48 byte hash
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
"""
if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key))
return shortlist or routing_table.find_close_peers(key)
class IterativeFinder:
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key))
self.loop = loop
self.peer_manager = protocol.peer_manager
self.peer_manager = peer_manager
self.routing_table = routing_table
self.protocol = protocol
self.key = key
self.max_results = max(constants.K, max_results)
self.bottom_out_limit = bottom_out_limit
self.max_results = max_results
self.exclude = exclude or []
self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
self.active: typing.Set['KademliaPeer'] = set()
self.contacted: typing.Set['KademliaPeer'] = set()
self.distance = Distance(key)
self.iteration_queue = asyncio.Queue()
self.closest_peer: typing.Optional['KademliaPeer'] = None
self.prev_closest_peer: typing.Optional['KademliaPeer'] = None
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
self.iteration_queue = asyncio.Queue(loop=self.loop)
self.running_probes: typing.Set[asyncio.Task] = set()
self.iteration_count = 0
self.bottom_out_count = 0
self.running = False
self.tasks: typing.List[asyncio.Task] = []
for peer in shortlist:
self.delayed_calls: typing.List[asyncio.Handle] = []
for peer in get_shortlist(routing_table, key, shortlist):
if peer.node_id:
self._add_active(peer, force=True)
self._add_active(peer)
else:
# seed nodes
self._schedule_probe(peer)
@ -123,79 +139,66 @@ class IterativeFinder(AsyncIterator):
"""
return []
def _add_active(self, peer, force=False):
if not force and self.peer_manager.peer_is_good(peer) is False:
return
if peer in self.contacted:
return
def _is_closer(self, peer: 'KademliaPeer') -> bool:
return not self.closest_peer or self.distance.is_closer(peer.node_id, self.closest_peer.node_id)
def _add_active(self, peer):
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
self.active[peer] = self.distance(peer.node_id)
self.active = OrderedDict(sorted(self.active.items(), key=lambda item: item[1]))
self.active.add(peer)
if self._is_closer(peer):
self.prev_closest_peer = self.closest_peer
self.closest_peer = peer
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
self._add_active(peer)
for new_peer in response.get_close_kademlia_peers(peer):
self._add_active(new_peer)
for contact_triple in response.get_close_triples():
node_id, address, udp_port = contact_triple
try:
self._add_active(make_kademlia_peer(node_id, address, udp_port))
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address,
peer.udp_port, address, udp_port)
self.check_result_ready(response)
self._log_state(reason="check result")
def _reset_closest(self, peer):
if peer in self.active:
del self.active[peer]
async def _send_probe(self, peer: 'KademliaPeer'):
try:
response = await self.send_probe(peer)
except asyncio.TimeoutError:
self._reset_closest(peer)
self.active.discard(peer)
return
except asyncio.CancelledError:
log.debug("%s[%x] cancelled probe",
type(self).__name__, id(self))
raise
except ValueError as err:
log.warning(str(err))
self._reset_closest(peer)
self.active.discard(peer)
return
except TransportNotConnected:
await self._aclose(reason="not connected")
return
return self.aclose()
except RemoteException:
self._reset_closest(peer)
return
return await self._handle_probe_result(peer, response)
def _search_round(self):
async def _search_round(self):
"""
Send up to constants.alpha (5) probes to closest active peers
"""
added = 0
for index, peer in enumerate(self.active.keys()):
if index == 0:
log.debug("%s[%x] closest to probe: %s",
type(self).__name__, id(self),
peer.node_id.hex()[:8])
if peer in self.contacted:
continue
if len(self.running_probes) >= constants.ALPHA:
break
if index > (constants.K + len(self.running_probes)):
to_probe = list(self.active - self.contacted)
to_probe.sort(key=lambda peer: self.distance(self.key))
for peer in to_probe:
if added >= constants.ALPHA:
break
origin_address = (peer.address, peer.udp_port)
if origin_address in self.exclude:
continue
if peer.node_id == self.protocol.node_id:
continue
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
continue
self._schedule_probe(peer)
added += 1
log.debug("%s[%x] running %d probes for key %s",
type(self).__name__, id(self),
len(self.running_probes), self.key.hex()[:8])
log.debug("running %d probes", len(self.running_probes))
if not added and not self.running_probes:
log.debug("%s[%x] search for %s exhausted",
type(self).__name__, id(self),
self.key.hex()[:8])
log.debug("search for %s exhausted", hexlify(self.key)[:8])
self.search_exhausted()
def _schedule_probe(self, peer: 'KademliaPeer'):
@ -204,24 +207,33 @@ class IterativeFinder(AsyncIterator):
t = self.loop.create_task(self._send_probe(peer))
def callback(_):
self.running_probes.pop(peer, None)
if self.running:
self._search_round()
self.running_probes.difference_update({
probe for probe in self.running_probes if probe.done() or probe == t
})
if not self.running_probes:
self.tasks.append(self.loop.create_task(self._search_task(0.0)))
t.add_done_callback(callback)
self.running_probes[peer] = t
self.running_probes.add(t)
def _log_state(self, reason="?"):
log.debug("%s[%x] [%s] %s: %i active nodes %i contacted %i produced %i queued",
type(self).__name__, id(self), self.key.hex()[:8],
reason, len(self.active), len(self.contacted),
self.iteration_count, self.iteration_queue.qsize())
async def _search_task(self, delay: typing.Optional[float] = constants.ITERATIVE_LOOKUP_DELAY):
try:
if self.running:
await self._search_round()
if self.running:
self.delayed_calls.append(self.loop.call_later(delay, self._search))
except (asyncio.CancelledError, StopAsyncIteration, TransportNotConnected):
if self.running:
self.loop.call_soon(self.aclose)
def _search(self):
self.tasks.append(self.loop.create_task(self._search_task()))
def __aiter__(self):
if self.running:
raise Exception("already running")
self.running = True
self.loop.call_soon(self._search_round)
self._search()
return self
async def __anext__(self) -> typing.List['KademliaPeer']:
@ -234,57 +246,47 @@ class IterativeFinder(AsyncIterator):
raise StopAsyncIteration
self.iteration_count += 1
return result
except asyncio.CancelledError:
await self._aclose(reason="cancelled")
raise
except StopAsyncIteration:
await self._aclose(reason="no more results")
except (asyncio.CancelledError, StopAsyncIteration):
self.loop.call_soon(self.aclose)
raise
async def _aclose(self, reason="?"):
log.debug("%s[%x] [%s] shutdown because %s: %i active nodes %i contacted %i produced %i queued",
type(self).__name__, id(self), self.key.hex()[:8],
reason, len(self.active), len(self.contacted),
self.iteration_count, self.iteration_queue.qsize())
def aclose(self):
self.running = False
self.iteration_queue.put_nowait(None)
for task in chain(self.tasks, self.running_probes.values()):
for task in chain(self.tasks, self.running_probes, self.delayed_calls):
task.cancel()
self.tasks.clear()
self.running_probes.clear()
self.delayed_calls.clear()
async def aclose(self):
if self.running:
await self._aclose(reason="aclose")
log.debug("%s[%x] [%s] async close completed",
type(self).__name__, id(self), self.key.hex()[:8])
class IterativeNodeFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop,
protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K,
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, protocol, key, max_results, shortlist)
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
shortlist)
self.yielded_peers: typing.Set['KademliaPeer'] = set()
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
log.debug("probe %s:%d (%s) for NODE %s",
peer.address, peer.udp_port, peer.node_id.hex()[:8] if peer.node_id else '', self.key.hex()[:8])
log.debug("probing %s:%d %s", peer.address, peer.udp_port, hexlify(peer.node_id)[:8] if peer.node_id else '')
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
return FindNodeResponse(self.key, response)
def search_exhausted(self):
self.put_result(self.active.keys(), finish=True)
self.put_result(self.active, finish=True)
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
not_yet_yielded = [
peer for peer in from_iter
if peer not in self.yielded_peers
and peer.node_id != self.protocol.node_id
and self.peer_manager.peer_is_good(peer) is True # return only peers who answered
and self.peer_manager.peer_is_good(peer) is not False
]
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
to_yield = not_yet_yielded[:max(constants.K, self.max_results)]
to_yield = not_yet_yielded[:min(constants.K, len(not_yet_yielded))]
if to_yield:
self.yielded_peers.update(to_yield)
self.iteration_queue.put_nowait(to_yield)
@ -296,15 +298,27 @@ class IterativeNodeFinder(IterativeFinder):
if found:
log.debug("found")
return self.put_result(self.active.keys(), finish=True)
return self.put_result(self.active, finish=True)
if self.prev_closest_peer and self.closest_peer and not self._is_closer(self.prev_closest_peer):
# log.info("improving, %i %i %i %i %i", len(self.shortlist), len(self.active), len(self.contacted),
# self.bottom_out_count, self.iteration_count)
self.bottom_out_count = 0
elif self.prev_closest_peer and self.closest_peer:
self.bottom_out_count += 1
log.info("bottom out %i %i %i", len(self.active), len(self.contacted), self.bottom_out_count)
if self.bottom_out_count >= self.bottom_out_limit or self.iteration_count >= self.bottom_out_limit:
log.info("limit hit")
self.put_result(self.active, True)
class IterativeValueFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop,
protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K,
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, protocol, key, max_results, shortlist)
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
shortlist)
self.blob_peers: typing.Set['KademliaPeer'] = set()
# this tracks the index of the most recent page we requested from each peer
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
@ -312,8 +326,6 @@ class IterativeValueFinder(IterativeFinder):
self.discovered_peers: typing.Dict['KademliaPeer', typing.Set['KademliaPeer']] = defaultdict(set)
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
log.debug("probe %s:%d (%s) for VALUE %s",
peer.address, peer.udp_port, peer.node_id.hex()[:8], self.key.hex()[:8])
page = self.peer_pages[peer]
response = await self.protocol.get_rpc_peer(peer).find_value(self.key, page=page)
parsed = FindValueResponse(self.key, response)
@ -323,7 +335,7 @@ class IterativeValueFinder(IterativeFinder):
decoded_peers = set()
for compact_addr in parsed.found_compact_addresses:
try:
decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr))
decoded_peers.add(self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr))
except ValueError:
log.warning("misbehaving peer %s:%i returned invalid peer for blob",
peer.address, peer.udp_port)
@ -335,6 +347,7 @@ class IterativeValueFinder(IterativeFinder):
already_known + len(parsed.found_compact_addresses))
if len(self.discovered_peers[peer]) != already_known + len(parsed.found_compact_addresses):
log.warning("misbehaving peer %s:%i returned duplicate peers for blob", peer.address, peer.udp_port)
parsed.found_compact_addresses.clear()
elif len(parsed.found_compact_addresses) >= constants.K and self.peer_pages[peer] < parsed.pages:
# the peer returned a full page and indicates it has more
self.peer_pages[peer] += 1
@ -345,15 +358,26 @@ class IterativeValueFinder(IterativeFinder):
def check_result_ready(self, response: FindValueResponse):
if response.found:
blob_peers = [decode_tcp_peer_from_compact_address(compact_addr)
blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)
for compact_addr in response.found_compact_addresses]
to_yield = []
self.bottom_out_count = 0
for blob_peer in blob_peers:
if blob_peer not in self.blob_peers:
self.blob_peers.add(blob_peer)
to_yield.append(blob_peer)
if to_yield:
# log.info("found %i new peers for blob", len(to_yield))
self.iteration_queue.put_nowait(to_yield)
# if self.max_results and len(self.blob_peers) >= self.max_results:
# log.info("enough blob peers found")
# if not self.finished.is_set():
# self.finished.set()
elif self.prev_closest_peer and self.closest_peer:
self.bottom_out_count += 1
if self.bottom_out_count >= self.bottom_out_limit:
log.info("blob peer search bottomed out")
self.iteration_queue.put_nowait(None)
def get_initial_result(self) -> typing.List['KademliaPeer']:
if self.protocol.data_store.has_peers_for_blob(self.key):

View file

@ -3,16 +3,13 @@ import socket
import functools
import hashlib
import asyncio
import time
import typing
import binascii
import random
from asyncio.protocols import DatagramProtocol
from asyncio.transports import DatagramTransport
from prometheus_client import Gauge, Counter, Histogram
from lbry.dht import constants
from lbry.dht.serialization.bencoding import DecodeError
from lbry.dht.serialization.datagram import decode_datagram, ErrorDatagram, ResponseDatagram, RequestDatagram
from lbry.dht.serialization.datagram import RESPONSE_TYPE, ERROR_TYPE, PAGE_KEY
from lbry.dht.error import RemoteException, TransportNotConnected
@ -33,11 +30,6 @@ OLD_PROTOCOL_ERRORS = {
class KademliaRPC:
stored_blob_metric = Gauge(
"stored_blobs", "Number of blobs announced by other peers", namespace="dht_node",
labelnames=("scope",),
)
def __init__(self, protocol: 'KademliaProtocol', loop: asyncio.AbstractEventLoop, peer_port: int = 3333):
self.protocol = protocol
self.loop = loop
@ -69,7 +61,6 @@ class KademliaRPC:
self.protocol.data_store.add_peer_to_blob(
rpc_contact, blob_hash
)
self.stored_blob_metric.labels("global").set(len(self.protocol.data_store))
return b'OK'
def find_node(self, rpc_contact: 'KademliaPeer', key: bytes) -> typing.List[typing.Tuple[bytes, str, int]]:
@ -105,7 +96,7 @@ class KademliaRPC:
if not rpc_contact.tcp_port or peer.compact_address_tcp() != rpc_contact.compact_address_tcp()
]
# if we don't have k storing peers to return and we have this hash locally, include our contact information
if len(peers) < constants.K and key.hex() in self.protocol.data_store.completed_blobs:
if len(peers) < constants.K and binascii.hexlify(key).decode() in self.protocol.data_store.completed_blobs:
peers.append(self.compact_address())
if not peers:
response[PAGE_KEY] = 0
@ -218,10 +209,6 @@ class PingQueue:
def running(self):
return self._running
@property
def busy(self):
return self._running and (any(self._running_pings) or any(self._pending_contacts))
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
delay = delay if delay is not None else self._default_delay
now = self._loop.time()
@ -233,7 +220,7 @@ class PingQueue:
async def ping_task():
try:
if self._protocol.peer_manager.peer_is_good(peer):
if not self._protocol.routing_table.get_peer(peer.node_id):
if peer not in self._protocol.routing_table.get_peers():
self._protocol.add_peer(peer)
return
await self._protocol.get_rpc_peer(peer).ping()
@ -253,7 +240,7 @@ class PingQueue:
del self._pending_contacts[peer]
self.maybe_ping(peer)
break
await asyncio.sleep(1)
await asyncio.sleep(1, loop=self._loop)
def start(self):
assert not self._running
@ -272,33 +259,9 @@ class PingQueue:
class KademliaProtocol(DatagramProtocol):
request_sent_metric = Counter(
"request_sent", "Number of requests send from DHT RPC protocol", namespace="dht_node",
labelnames=("method",),
)
request_success_metric = Counter(
"request_success", "Number of successful requests", namespace="dht_node",
labelnames=("method",),
)
request_error_metric = Counter(
"request_error", "Number of errors returned from request to other peers", namespace="dht_node",
labelnames=("method",),
)
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 3.0, 3.5, 4.0, 4.50, 5.0, 5.50, 6.0, float('inf')
)
response_time_metric = Histogram(
"response_time", "Response times of DHT RPC requests", namespace="dht_node", buckets=HISTOGRAM_BUCKETS,
labelnames=("method",)
)
received_request_metric = Counter(
"received_request", "Number of received DHT RPC requests", namespace="dht_node",
labelnames=("method",),
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_boostrap_node: bool = False):
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
self.peer_manager = peer_manager
self.loop = loop
self.node_id = node_id
@ -313,16 +276,15 @@ class KademliaProtocol(DatagramProtocol):
self.transport: DatagramTransport = None
self.old_token_secret = constants.generate_id()
self.token_secret = constants.generate_id()
self.routing_table = TreeRoutingTable(
self.loop, self.peer_manager, self.node_id, split_buckets_under_index, is_bootstrap_node=is_boostrap_node)
self.routing_table = TreeRoutingTable(self.loop, self.peer_manager, self.node_id, split_buckets_under_index)
self.data_store = DictDataStore(self.loop, self.peer_manager)
self.ping_queue = PingQueue(self.loop, self)
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
self.rpc_timeout = rpc_timeout
self._split_lock = asyncio.Lock()
self._split_lock = asyncio.Lock(loop=self.loop)
self._to_remove: typing.Set['KademliaPeer'] = set()
self._to_add: typing.Set['KademliaPeer'] = set()
self._wakeup_routing_task = asyncio.Event()
self._wakeup_routing_task = asyncio.Event(loop=self.loop)
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
@functools.lru_cache(128)
@ -361,10 +323,72 @@ class KademliaProtocol(DatagramProtocol):
return args, {}
async def _add_peer(self, peer: 'KademliaPeer'):
async def probe(some_peer: 'KademliaPeer'):
rpc_peer = self.get_rpc_peer(some_peer)
await rpc_peer.ping()
return await self.routing_table.add_peer(peer, probe)
if not peer.node_id:
log.warning("Tried adding a peer with no node id!")
return False
for my_peer in self.routing_table.get_peers():
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
self.routing_table.remove_peer(my_peer)
self.routing_table.join_buckets()
bucket_index = self.routing_table.kbucket_index(peer.node_id)
if self.routing_table.buckets[bucket_index].add_peer(peer):
return True
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
if self.routing_table.should_split(bucket_index, peer.node_id):
self.routing_table.split_bucket(bucket_index)
# Retry the insertion attempt
result = await self._add_peer(peer)
self.routing_table.join_buckets()
return result
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
#
# A reasonable extension to this is BEP 0005, which extends the above:
#
# Not all nodes that we learn about are equal. Some are "good" and some are not.
# Many nodes using the DHT are able to send queries and receive responses,
# but are not able to respond to queries from other nodes. It is important that
# each node's routing table must contain only known good nodes. A good node is
# a node has responded to one of our queries within the last 15 minutes. A node
# is also good if it has ever responded to one of our queries and has sent us a
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
# questionable. Nodes become bad when they fail to respond to multiple queries
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
#
# When there are bad or questionable nodes in the bucket, the least recent is selected for
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
# is ignored if the pinged node replies.
not_good_contacts = self.routing_table.buckets[bucket_index].get_bad_or_unknown_peers()
not_recently_replied = []
for my_peer in not_good_contacts:
last_replied = self.peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
if not last_replied or last_replied + 60 < self.loop.time():
not_recently_replied.append(my_peer)
if not_recently_replied:
to_replace = not_recently_replied[0]
else:
to_replace = self.routing_table.buckets[bucket_index].peers[0]
last_replied = self.peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
if last_replied and last_replied + 60 > self.loop.time():
return False
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
try:
to_replace_rpc = self.get_rpc_peer(to_replace)
await to_replace_rpc.ping()
return False
except asyncio.TimeoutError:
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.routing_table.buckets[bucket_index]:
self.routing_table.buckets[bucket_index].remove_peer(to_replace)
return await self._add_peer(peer)
def add_peer(self, peer: 'KademliaPeer'):
if peer.node_id == self.node_id:
@ -382,15 +406,16 @@ class KademliaProtocol(DatagramProtocol):
async with self._split_lock:
peer = self._to_remove.pop()
self.routing_table.remove_peer(peer)
self.routing_table.join_buckets()
while self._to_add:
async with self._split_lock:
await self._add_peer(self._to_add.pop())
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1))
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop)
self._wakeup_routing_task.clear()
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
assert sender_contact.node_id != self.node_id, (sender_contact.node_id.hex()[:8],
self.node_id.hex()[:8])
assert sender_contact.node_id != self.node_id, (binascii.hexlify(sender_contact.node_id)[:8].decode(),
binascii.hexlify(self.node_id)[:8].decode())
method = message.method
if method not in [b'ping', b'store', b'findNode', b'findValue']:
raise AttributeError('Invalid method: %s' % message.method.decode())
@ -422,15 +447,11 @@ class KademliaProtocol(DatagramProtocol):
def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram):
# This is an RPC method request
self.received_request_metric.labels(method=request_datagram.method).inc()
self.peer_manager.report_last_requested(address[0], address[1])
peer = self.routing_table.get_peer(request_datagram.node_id)
if not peer:
try:
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
except ValueError as err:
log.warning("error replying to %s: %s", address[0], str(err))
return
try:
peer = self.routing_table.get_peer(request_datagram.node_id)
except IndexError:
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
try:
self._handle_rpc(peer, request_datagram)
# if the contact is not known to be bad (yet) and we haven't yet queried it, send it a ping so that it
@ -530,12 +551,12 @@ class KademliaProtocol(DatagramProtocol):
address[0], address[1], OLD_PROTOCOL_ERRORS[error_datagram.response]
)
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: # pylint: disable=arguments-renamed
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: # pylint: disable=arguments-differ
try:
message = decode_datagram(datagram)
except (ValueError, TypeError, DecodeError):
except (ValueError, TypeError):
self.peer_manager.report_failure(address[0], address[1])
log.warning("Couldn't decode dht datagram from %s: %s", address, datagram.hex())
log.warning("Couldn't decode dht datagram from %s: %s", address, binascii.hexlify(datagram).decode())
return
if isinstance(message, RequestDatagram):
@ -550,19 +571,14 @@ class KademliaProtocol(DatagramProtocol):
self._send(peer, request)
response_fut = self.sent_messages[request.rpc_id][1]
try:
self.request_sent_metric.labels(method=request.method).inc()
start = time.perf_counter()
response = await asyncio.wait_for(response_fut, self.rpc_timeout)
self.response_time_metric.labels(method=request.method).observe(time.perf_counter() - start)
self.peer_manager.report_last_replied(peer.address, peer.udp_port)
self.request_success_metric.labels(method=request.method).inc()
return response
except asyncio.CancelledError:
if not response_fut.done():
response_fut.cancel()
raise
except (asyncio.TimeoutError, RemoteException):
self.request_error_metric.labels(method=request.method).inc()
self.peer_manager.report_failure(peer.address, peer.udp_port)
if self.peer_manager.peer_is_good(peer) is False:
self.remove_peer(peer)
@ -582,7 +598,7 @@ class KademliaProtocol(DatagramProtocol):
if len(data) > constants.MSG_SIZE_LIMIT:
log.warning("cannot send datagram larger than %i bytes (packet is %i bytes)",
constants.MSG_SIZE_LIMIT, len(data))
log.debug("Packet is too large to send: %s", data[:3500].hex())
log.debug("Packet is too large to send: %s", binascii.hexlify(data[:3500]).decode())
raise ValueError(
f"cannot send datagram larger than {constants.MSG_SIZE_LIMIT} bytes (packet is {len(data)} bytes)"
)
@ -642,13 +658,13 @@ class KademliaProtocol(DatagramProtocol):
res = await self.get_rpc_peer(peer).store(hash_value)
if res != b"OK":
raise ValueError(res)
log.debug("Stored %s to %s", hash_value.hex()[:8], peer)
log.debug("Stored %s to %s", binascii.hexlify(hash_value).decode()[:8], peer)
return peer.node_id, True
try:
return await __store()
except asyncio.TimeoutError:
log.debug("Timeout while storing blob_hash %s at %s", hash_value.hex()[:8], peer)
log.debug("Timeout while storing blob_hash %s at %s", binascii.hexlify(hash_value).decode()[:8], peer)
return peer.node_id, False
except ValueError as err:
log.error("Unexpected response: %s", err)

View file

@ -4,11 +4,7 @@ import logging
import typing
import itertools
from prometheus_client import Gauge
from lbry import utils
from lbry.dht import constants
from lbry.dht.error import RemoteException
from lbry.dht.protocol.distance import Distance
if typing.TYPE_CHECKING:
from lbry.dht.peer import KademliaPeer, PeerManager
@ -17,20 +13,10 @@ log = logging.getLogger(__name__)
class KBucket:
""" Description - later
"""
Kademlia K-bucket implementation.
"""
peer_in_routing_table_metric = Gauge(
"peers_in_routing_table", "Number of peers on routing table", namespace="dht_node",
labelnames=("scope",)
)
peer_with_x_bit_colliding_metric = Gauge(
"peer_x_bit_colliding", "Number of peers with at least X bits colliding with this node id",
namespace="dht_node", labelnames=("amount",)
)
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int,
node_id: bytes, capacity: int = constants.K):
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, node_id: bytes):
"""
@param range_min: The lower boundary for the range in the n-bit ID
space covered by this k-bucket
@ -38,12 +24,12 @@ class KBucket:
covered by this k-bucket
"""
self._peer_manager = peer_manager
self.last_accessed = 0
self.range_min = range_min
self.range_max = range_max
self.peers: typing.List['KademliaPeer'] = []
self._node_id = node_id
self._distance_to_self = Distance(node_id)
self.capacity = capacity
def add_peer(self, peer: 'KademliaPeer') -> bool:
""" Add contact to _contact list in the right order. This will move the
@ -64,25 +50,24 @@ class KBucket:
self.peers.append(peer)
return True
else:
for i, _ in enumerate(self.peers):
for i in range(len(self.peers)):
local_peer = self.peers[i]
if local_peer.node_id == peer.node_id:
self.peers.remove(local_peer)
self.peers.append(peer)
return True
if len(self.peers) < self.capacity:
if len(self.peers) < constants.K:
self.peers.append(peer)
self.peer_in_routing_table_metric.labels("global").inc()
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).inc()
return True
else:
return False
# raise BucketFull("No space in bucket to insert contact")
def get_peer(self, node_id: bytes) -> 'KademliaPeer':
for peer in self.peers:
if peer.node_id == node_id:
return peer
raise IndexError(node_id)
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
""" Returns a list containing up to the first count number of contacts
@ -139,9 +124,6 @@ class KBucket:
def remove_peer(self, peer: 'KademliaPeer') -> None:
self.peers.remove(peer)
self.peer_in_routing_table_metric.labels("global").dec()
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).dec()
def key_in_range(self, key: bytes) -> bool:
""" Tests whether the specified key (i.e. node ID) is in the range
@ -179,36 +161,24 @@ class TreeRoutingTable:
version of the Kademlia paper, in section 2.4. It does, however, use the
ping RPC-based k-bucket eviction algorithm described in section 2.2 of
that paper.
BOOTSTRAP MODE: if set to True, we always add all peers. This is so a
bootstrap node does not get a bias towards its own node id and replies are
the best it can provide (joining peer knows its neighbors immediately).
Over time, this will need to be optimized so we use the disk as holding
everything in memory won't be feasible anymore.
See: https://github.com/bittorrent/bootstrap-dht
"""
bucket_in_routing_table_metric = Gauge(
"buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False):
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
self._loop = loop
self._peer_manager = peer_manager
self._parent_node_id = parent_node_id
self._split_buckets_under_index = split_buckets_under_index
self.buckets: typing.List[KBucket] = [
KBucket(
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id,
capacity=1 << 32 if is_bootstrap_node else constants.K
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id
)
]
def get_peers(self) -> typing.List['KademliaPeer']:
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
def _should_split(self, bucket_index: int, to_add: bytes) -> bool:
def should_split(self, bucket_index: int, to_add: bytes) -> bool:
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
if bucket_index < self._split_buckets_under_index:
return True
@ -233,32 +203,39 @@ class TreeRoutingTable:
return []
def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
return self.buckets[self._kbucket_index(contact_id)].get_peer(contact_id)
"""
@raise IndexError: No contact with the specified contact ID is known
by this node
"""
return self.buckets[self.kbucket_index(contact_id)].get_peer(contact_id)
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
bucket_index = start_index
refresh_ids = []
for offset, _ in enumerate(self.buckets[start_index:]):
refresh_ids.append(self._midpoint_id_in_bucket_range(start_index + offset))
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
# populate/split the buckets further
buckets_with_contacts = self.buckets_with_contacts()
if buckets_with_contacts <= 3:
for i in range(buckets_with_contacts):
refresh_ids.append(self._random_id_in_bucket_range(i))
refresh_ids.append(self._random_id_in_bucket_range(i))
now = int(self._loop.time())
for bucket in self.buckets[start_index:]:
if force or now - bucket.last_accessed >= constants.REFRESH_INTERVAL:
to_search = self.midpoint_id_in_bucket_range(bucket_index)
refresh_ids.append(to_search)
bucket_index += 1
return refresh_ids
def remove_peer(self, peer: 'KademliaPeer') -> None:
if not peer.node_id:
return
bucket_index = self._kbucket_index(peer.node_id)
bucket_index = self.kbucket_index(peer.node_id)
try:
self.buckets[bucket_index].remove_peer(peer)
self._join_buckets()
except ValueError:
return
def _kbucket_index(self, key: bytes) -> int:
def touch_kbucket(self, key: bytes) -> None:
self.touch_kbucket_by_index(self.kbucket_index(key))
def touch_kbucket_by_index(self, bucket_index: int):
self.buckets[bucket_index].last_accessed = int(self._loop.time())
def kbucket_index(self, key: bytes) -> int:
i = 0
for bucket in self.buckets:
if bucket.key_in_range(key):
@ -267,19 +244,19 @@ class TreeRoutingTable:
i += 1
return i
def _random_id_in_bucket_range(self, bucket_index: int) -> bytes:
def random_id_in_bucket_range(self, bucket_index: int) -> bytes:
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
return Distance(
self._parent_node_id
)(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big')
def _midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
def midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
return Distance(self._parent_node_id)(
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big')
).to_bytes(constants.HASH_LENGTH, 'big')
def _split_bucket(self, old_bucket_index: int) -> None:
def split_bucket(self, old_bucket_index: int) -> None:
""" Splits the specified k-bucket into two new buckets which together
cover the same range in the key/ID space
@ -302,9 +279,8 @@ class TreeRoutingTable:
# ...and remove them from the old bucket
for contact in new_bucket.peers:
old_bucket.remove_peer(contact)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
def _join_buckets(self):
def join_buckets(self):
if len(self.buckets) == 1:
return
to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0]
@ -326,8 +302,14 @@ class TreeRoutingTable:
elif can_go_higher:
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
self.buckets.remove(bucket)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
return self._join_buckets()
return self.join_buckets()
def contact_in_routing_table(self, address_tuple: typing.Tuple[str, int]) -> bool:
for bucket in self.buckets:
for contact in bucket.get_peers(sort_distance_to=False):
if address_tuple[0] == contact.address and address_tuple[1] == contact.udp_port:
return True
return False
def buckets_with_contacts(self) -> int:
count = 0
@ -335,70 +317,3 @@ class TreeRoutingTable:
if len(bucket) > 0:
count += 1
return count
async def add_peer(self, peer: 'KademliaPeer', probe: typing.Callable[['KademliaPeer'], typing.Awaitable]):
if not peer.node_id:
log.warning("Tried adding a peer with no node id!")
return False
for my_peer in self.get_peers():
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
self.remove_peer(my_peer)
self._join_buckets()
bucket_index = self._kbucket_index(peer.node_id)
if self.buckets[bucket_index].add_peer(peer):
return True
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
if self._should_split(bucket_index, peer.node_id):
self._split_bucket(bucket_index)
# Retry the insertion attempt
result = await self.add_peer(peer, probe)
self._join_buckets()
return result
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
#
# A reasonable extension to this is BEP 0005, which extends the above:
#
# Not all nodes that we learn about are equal. Some are "good" and some are not.
# Many nodes using the DHT are able to send queries and receive responses,
# but are not able to respond to queries from other nodes. It is important that
# each node's routing table must contain only known good nodes. A good node is
# a node has responded to one of our queries within the last 15 minutes. A node
# is also good if it has ever responded to one of our queries and has sent us a
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
# questionable. Nodes become bad when they fail to respond to multiple queries
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
#
# When there are bad or questionable nodes in the bucket, the least recent is selected for
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
# is ignored if the pinged node replies.
not_good_contacts = self.buckets[bucket_index].get_bad_or_unknown_peers()
not_recently_replied = []
for my_peer in not_good_contacts:
last_replied = self._peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
if not last_replied or last_replied + 60 < self._loop.time():
not_recently_replied.append(my_peer)
if not_recently_replied:
to_replace = not_recently_replied[0]
else:
to_replace = self.buckets[bucket_index].peers[0]
last_replied = self._peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
if last_replied and last_replied + 60 > self._loop.time():
return False
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
try:
await probe(to_replace)
return False
except (asyncio.TimeoutError, RemoteException):
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.buckets[bucket_index]:
self.buckets[bucket_index].remove_peer(to_replace)
return await self.add_peer(peer, probe)

View file

@ -144,7 +144,7 @@ class ErrorDatagram(KademliaDatagramBase):
self.response = response.decode()
def _decode_datagram(datagram: bytes):
def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDatagram, ErrorDatagram]:
msg_types = {
REQUEST_TYPE: RequestDatagram,
RESPONSE_TYPE: ResponseDatagram,
@ -152,29 +152,19 @@ def _decode_datagram(datagram: bytes):
}
primitive: typing.Dict = bdecode(datagram)
converted = {
str(k).encode() if not isinstance(k, bytes) else k: v for k, v in primitive.items()
}
if converted[b'0'] in [REQUEST_TYPE, ERROR_TYPE, RESPONSE_TYPE]: # pylint: disable=unsubscriptable-object
datagram_type = converted[b'0'] # pylint: disable=unsubscriptable-object
if primitive[0] in [REQUEST_TYPE, ERROR_TYPE, RESPONSE_TYPE]: # pylint: disable=unsubscriptable-object
datagram_type = primitive[0] # pylint: disable=unsubscriptable-object
else:
raise ValueError("invalid datagram type")
datagram_class = msg_types[datagram_type]
decoded = {
k: converted[str(i).encode()] # pylint: disable=unsubscriptable-object
k: primitive[i] # pylint: disable=unsubscriptable-object
for i, k in enumerate(datagram_class.required_fields)
if str(i).encode() in converted # pylint: disable=unsupported-membership-test
if i in primitive # pylint: disable=unsupported-membership-test
}
for i, _ in enumerate(OPTIONAL_FIELDS):
if str(i + OPTIONAL_ARG_OFFSET).encode() in converted:
decoded[i + OPTIONAL_ARG_OFFSET] = converted[str(i + OPTIONAL_ARG_OFFSET).encode()]
return decoded, datagram_class
def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDatagram, ErrorDatagram]:
decoded, datagram_class = _decode_datagram(datagram)
if i + OPTIONAL_ARG_OFFSET in primitive:
decoded[i + OPTIONAL_ARG_OFFSET] = primitive[i + OPTIONAL_ARG_OFFSET]
return datagram_class(**decoded)

View file

@ -34,11 +34,6 @@ Code | Name | Message
**11x** | InputValue(ValueError) | Invalid argument value provided to command.
111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid.
112 | InputValueIsNone | None or null is not valid value for argument '{argument}'.
113 | ConflictingInputValue | Only '{first_argument}' or '{second_argument}' is allowed, not both.
114 | InputStringIsBlank | {argument} cannot be blank.
115 | EmptyPublishedFile | Cannot publish empty file: {file_path}
116 | MissingPublishedFile | File does not exist: {file_path}
117 | InvalidStreamURL | Invalid LBRY stream URL: '{url}' -- When an URL cannot be downloaded, such as '@Channel/' or a collection
**2xx** | Configuration | Configuration errors.
201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues.
202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args.
@ -56,22 +51,15 @@ Code | Name | Message
405 | ChannelKeyNotFound | Channel signing key not found.
406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key.
407 | DataDownload | Failed to download blob. *generic*
408 | PrivateKeyNotFound | Couldn't find private key for {key} '{value}'.
410 | Resolve | Failed to resolve '{url}'.
411 | ResolveTimeout | Failed to resolve '{url}' within the timeout.
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{censor_id}'.
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{claim_id(censor_hash)}'.
420 | KeyFeeAboveMaxAllowed | {message}
421 | InvalidPassword | Password is invalid.
422 | IncompatibleWalletServer | '{server}:{port}' has an incompatibly old version.
423 | TooManyClaimSearchParameters | {key} cant have more than {limit} items.
424 | AlreadyPurchased | You already have a purchase for claim_id '{claim_id_hex}'. Use --allow-duplicate-purchase flag to override.
431 | ServerPaymentInvalidAddress | Invalid address from wallet server: '{address}' - skipping payment round.
432 | ServerPaymentWalletLocked | Cannot spend funds with locked wallet, skipping payment round.
433 | ServerPaymentFeeAboveMaxAllowed | Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.
434 | WalletNotLoaded | Wallet {wallet_id} is not loaded.
435 | WalletAlreadyLoaded | Wallet {wallet_path} is already loaded.
436 | WalletNotFound | Wallet not found at {wallet_path}.
437 | WalletAlreadyExists | Wallet {wallet_path} already exists, use `wallet_add` to load it.
**5xx** | Blob | **Blobs**
500 | BlobNotFound | Blob not found.
501 | BlobPermissionDenied | Permission denied to read blob.
@ -93,3 +81,6 @@ Code | Name | Message
701 | InvalidExchangeRateResponse | Failed to get exchange rate from {source}: {reason}
702 | CurrencyConversion | {message}
703 | InvalidCurrency | Invalid currency: {currency} is not a supported currency.
**8xx** | Lbrycrd | **Lbrycrd**
801 | LbrycrdUnauthorized | Failed to authenticate with lbrycrd. Perhaps wrong username or password?
811 | LbrycrdEventSubscription | Lbrycrd is not publishing '{event}' events.

View file

@ -76,45 +76,6 @@ class InputValueIsNoneError(InputValueError):
super().__init__(f"None or null is not valid value for argument '{argument}'.")
class ConflictingInputValueError(InputValueError):
def __init__(self, first_argument, second_argument):
self.first_argument = first_argument
self.second_argument = second_argument
super().__init__(f"Only '{first_argument}' or '{second_argument}' is allowed, not both.")
class InputStringIsBlankError(InputValueError):
def __init__(self, argument):
self.argument = argument
super().__init__(f"{argument} cannot be blank.")
class EmptyPublishedFileError(InputValueError):
def __init__(self, file_path):
self.file_path = file_path
super().__init__(f"Cannot publish empty file: {file_path}")
class MissingPublishedFileError(InputValueError):
def __init__(self, file_path):
self.file_path = file_path
super().__init__(f"File does not exist: {file_path}")
class InvalidStreamURLError(InputValueError):
"""
When an URL cannot be downloaded, such as '@Channel/' or a collection
"""
def __init__(self, url):
self.url = url
super().__init__(f"Invalid LBRY stream URL: '{url}'")
class ConfigurationError(BaseError):
"""
Configuration errors.
@ -238,14 +199,6 @@ class DataDownloadError(WalletError):
super().__init__("Failed to download blob. *generic*")
class PrivateKeyNotFoundError(WalletError):
def __init__(self, key, value):
self.key = key
self.value = value
super().__init__(f"Couldn't find private key for {key} '{value}'.")
class ResolveError(WalletError):
def __init__(self, url):
@ -262,11 +215,10 @@ class ResolveTimeoutError(WalletError):
class ResolveCensoredError(WalletError):
def __init__(self, url, censor_id, censor_row):
def __init__(self, url, censor_hash):
self.url = url
self.censor_id = censor_id
self.censor_row = censor_row
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.")
self.censor_hash = censor_hash
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{claim_id(censor_hash)}'.")
class KeyFeeAboveMaxAllowedError(WalletError):
@ -290,24 +242,6 @@ class IncompatibleWalletServerError(WalletError):
super().__init__(f"'{server}:{port}' has an incompatibly old version.")
class TooManyClaimSearchParametersError(WalletError):
def __init__(self, key, limit):
self.key = key
self.limit = limit
super().__init__(f"{key} cant have more than {limit} items.")
class AlreadyPurchasedError(WalletError):
"""
allow-duplicate-purchase flag to override.
"""
def __init__(self, claim_id_hex):
self.claim_id_hex = claim_id_hex
super().__init__(f"You already have a purchase for claim_id '{claim_id_hex}'. Use")
class ServerPaymentInvalidAddressError(WalletError):
def __init__(self, address):
@ -329,34 +263,6 @@ class ServerPaymentFeeAboveMaxAllowedError(WalletError):
super().__init__(f"Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.")
class WalletNotLoadedError(WalletError):
def __init__(self, wallet_id):
self.wallet_id = wallet_id
super().__init__(f"Wallet {wallet_id} is not loaded.")
class WalletAlreadyLoadedError(WalletError):
def __init__(self, wallet_path):
self.wallet_path = wallet_path
super().__init__(f"Wallet {wallet_path} is already loaded.")
class WalletNotFoundError(WalletError):
def __init__(self, wallet_path):
self.wallet_path = wallet_path
super().__init__(f"Wallet not found at {wallet_path}.")
class WalletAlreadyExistsError(WalletError):
def __init__(self, wallet_path):
self.wallet_path = wallet_path
super().__init__(f"Wallet {wallet_path} already exists, use `wallet_add` to load it.")
class BlobError(BaseError):
"""
**Blobs**
@ -492,3 +398,22 @@ class InvalidCurrencyError(CurrencyExchangeError):
def __init__(self, currency):
self.currency = currency
super().__init__(f"Invalid currency: {currency} is not a supported currency.")
class LbrycrdError(BaseError):
"""
**Lbrycrd**
"""
class LbrycrdUnauthorizedError(LbrycrdError):
def __init__(self):
super().__init__("Failed to authenticate with lbrycrd. Perhaps wrong username or password?")
class LbrycrdEventSubscriptionError(LbrycrdError):
def __init__(self, event):
self.event = event
super().__init__(f"Lbrycrd is not publishing '{event}' events.")

View file

@ -1,9 +1,17 @@
import time
import asyncio
import threading
import logging
from queue import Empty
from multiprocessing import Queue
log = logging.getLogger(__name__)
class BroadcastSubscription:
def __init__(self, controller, on_data, on_error, on_done):
def __init__(self, controller: 'EventController', on_data, on_error, on_done):
self._controller = controller
self._previous = self._next = None
self._on_data = on_data
@ -43,10 +51,10 @@ class BroadcastSubscription:
self.is_closed = True
class StreamController:
class EventController:
def __init__(self, merge_repeated_events=False):
self.stream = Stream(self)
self.stream = EventStream(self)
self._first_subscription = None
self._last_subscription = None
self._last_event = None
@ -61,37 +69,36 @@ class StreamController:
next_sub = self._first_subscription
while next_sub is not None:
subscription = next_sub
next_sub = next_sub._next
yield subscription
next_sub = next_sub._next
def _notify_and_ensure_future(self, notify):
tasks = []
for subscription in self._iterate_subscriptions:
maybe_coroutine = notify(subscription)
if asyncio.iscoroutine(maybe_coroutine):
tasks.append(maybe_coroutine)
if tasks:
return asyncio.ensure_future(asyncio.wait(tasks))
else:
f = asyncio.get_event_loop().create_future()
f.set_result(None)
return f
async def _notify(self, notify, *args):
try:
maybe_coroutine = notify(*args)
if maybe_coroutine is not None and asyncio.iscoroutine(maybe_coroutine):
await maybe_coroutine
except Exception as e:
log.exception(e)
raise
def add(self, event):
skip = self._merge_repeated and event == self._last_event
async def add(self, event):
if self._merge_repeated and event == self._last_event:
return
self._last_event = event
return self._notify_and_ensure_future(
lambda subscription: None if skip else subscription._add(event)
)
def add_error(self, exception):
return self._notify_and_ensure_future(
lambda subscription: subscription._add_error(exception)
)
def close(self):
for subscription in self._iterate_subscriptions:
subscription._close()
await self._notify(subscription._add, event)
async def add_all(self, events):
for event in events:
await self.add(event)
async def add_error(self, exception):
for subscription in self._iterate_subscriptions:
await self._notify(subscription._add_error, exception)
async def close(self):
for subscription in self._iterate_subscriptions:
await self._notify(subscription._close)
def _cancel(self, subscription):
previous = subscription._previous
@ -104,7 +111,6 @@ class StreamController:
self._last_subscription = previous
else:
next_sub._previous = previous
subscription._next = subscription._previous = subscription
def _listen(self, on_data, on_error, on_done):
subscription = BroadcastSubscription(self, on_data, on_error, on_done)
@ -119,16 +125,16 @@ class StreamController:
return subscription
class Stream:
class EventStream:
def __init__(self, controller):
def __init__(self, controller: EventController):
self._controller = controller
def listen(self, on_data, on_error=None, on_done=None):
def listen(self, on_data, on_error=None, on_done=None) -> BroadcastSubscription:
return self._controller._listen(on_data, on_error, on_done)
def where(self, condition) -> asyncio.Future:
future = asyncio.get_event_loop().create_future()
future = asyncio.get_running_loop().create_future()
def where_test(value):
if condition(value):
@ -142,14 +148,31 @@ class Stream:
return future
@property
def first(self):
future = asyncio.get_event_loop().create_future()
def first(self) -> asyncio.Future:
future = asyncio.get_running_loop().create_future()
subscription = self.listen(
lambda value: not future.done() and self._cancel_and_callback(subscription, future, value),
lambda exception: not future.done() and self._cancel_and_error(subscription, future, exception)
)
return future
@property
def last(self) -> asyncio.Future:
future = asyncio.get_running_loop().create_future()
value = None
def update_value(_value):
nonlocal value
value = _value
subscription = self.listen(
update_value,
lambda exception: not future.done() and self._cancel_and_error(subscription, future, exception),
lambda: not future.done() and self._cancel_and_callback(subscription, future, value),
)
return future
@staticmethod
def _cancel_and_callback(subscription: BroadcastSubscription, future: asyncio.Future, value):
subscription.cancel()
@ -159,3 +182,66 @@ class Stream:
def _cancel_and_error(subscription: BroadcastSubscription, future: asyncio.Future, exception):
subscription.cancel()
future.set_exception(exception)
class EventQueuePublisher(threading.Thread):
STOP = 'STOP'
def __init__(self, queue: Queue, event_controller: EventController):
super().__init__()
self.queue = queue
self.event_controller = event_controller
self.loop = None
@staticmethod
def message_to_event(message):
return message
def start(self):
self.loop = asyncio.get_running_loop()
super().start()
def run(self):
queue_get_timeout = 0.2
buffer_drain_size = 100
buffer_drain_timeout = 0.1
buffer = []
last_drained_ms_ago = time.perf_counter()
while True:
try:
msg = self.queue.get(timeout=queue_get_timeout)
if msg != self.STOP:
buffer.append(msg)
except Empty:
msg = None
drain = any((
len(buffer) >= buffer_drain_size,
(time.perf_counter() - last_drained_ms_ago) >= buffer_drain_timeout,
msg == self.STOP
))
if drain and buffer:
asyncio.run_coroutine_threadsafe(
self.event_controller.add_all([
self.message_to_event(msg) for msg in buffer
]), self.loop
)
buffer.clear()
last_drained_ms_ago = time.perf_counter()
if msg == self.STOP:
return
def stop(self):
self.queue.put(self.STOP)
if self.is_alive():
self.join()
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()

View file

@ -1,243 +0,0 @@
import asyncio
import collections
import logging
import typing
import aiohttp
from lbry import utils
from lbry.conf import Config
from lbry.extras import system_info
ANALYTICS_ENDPOINT = 'https://api.segment.io/v1'
ANALYTICS_TOKEN = 'Ax5LZzR1o3q3Z3WjATASDwR5rKyHH0qOIRIbLmMXn2H='
# Things We Track
SERVER_STARTUP = 'Server Startup'
SERVER_STARTUP_SUCCESS = 'Server Startup Success'
SERVER_STARTUP_ERROR = 'Server Startup Error'
DOWNLOAD_STARTED = 'Download Started'
DOWNLOAD_ERRORED = 'Download Errored'
DOWNLOAD_FINISHED = 'Download Finished'
HEARTBEAT = 'Heartbeat'
DISK_SPACE = 'Disk Space'
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
NEW_CHANNEL = 'New Channel'
CREDITS_SENT = 'Credits Sent'
UPNP_SETUP = "UPnP Setup"
BLOB_BYTES_UPLOADED = 'Blob Bytes Uploaded'
TIME_TO_FIRST_BYTES = "Time To First Bytes"
log = logging.getLogger(__name__)
def _event_properties(installation_id: str, session_id: str,
event_properties: typing.Optional[typing.Dict]) -> typing.Dict:
properties = {
'lbry_id': installation_id,
'session_id': session_id,
}
properties.update(event_properties or {})
return properties
def _download_properties(conf: Config, external_ip: str, resolve_duration: float,
total_duration: typing.Optional[float], download_id: str, name: str,
outpoint: str, active_peer_count: typing.Optional[int],
tried_peers_count: typing.Optional[int], connection_failures_count: typing.Optional[int],
added_fixed_peers: bool, fixed_peer_delay: float, sd_hash: str,
sd_download_duration: typing.Optional[float] = None,
head_blob_hash: typing.Optional[str] = None,
head_blob_length: typing.Optional[int] = None,
head_blob_download_duration: typing.Optional[float] = None,
error: typing.Optional[str] = None, error_msg: typing.Optional[str] = None,
wallet_server: typing.Optional[str] = None) -> typing.Dict:
return {
"external_ip": external_ip,
"download_id": download_id,
"total_duration": round(total_duration, 4),
"resolve_duration": None if not resolve_duration else round(resolve_duration, 4),
"error": error,
"error_message": error_msg,
'name': name,
"outpoint": outpoint,
"node_rpc_timeout": conf.node_rpc_timeout,
"peer_connect_timeout": conf.peer_connect_timeout,
"blob_download_timeout": conf.blob_download_timeout,
"use_fixed_peers": len(conf.fixed_peers) > 0,
"fixed_peer_delay": fixed_peer_delay,
"added_fixed_peers": added_fixed_peers,
"active_peer_count": active_peer_count,
"tried_peers_count": tried_peers_count,
"sd_blob_hash": sd_hash,
"sd_blob_duration": None if not sd_download_duration else round(sd_download_duration, 4),
"head_blob_hash": head_blob_hash,
"head_blob_length": head_blob_length,
"head_blob_duration": None if not head_blob_download_duration else round(head_blob_download_duration, 4),
"connection_failures_count": connection_failures_count,
"wallet_server": wallet_server
}
def _make_context(platform):
# see https://segment.com/docs/spec/common/#context
# they say they'll ignore fields outside the spec, but evidently they don't
context = {
'app': {
'version': platform['lbrynet_version'],
'build': platform['build'],
},
# TODO: expand os info to give linux/osx specific info
'os': {
'name': platform['os_system'],
'version': platform['os_release']
},
}
if 'desktop' in platform and 'distro' in platform:
context['os']['desktop'] = platform['desktop']
context['os']['distro'] = platform['distro']
return context
class AnalyticsManager:
def __init__(self, conf: Config, installation_id: str, session_id: str):
self.conf = conf
self.cookies = {}
self.url = ANALYTICS_ENDPOINT
self._write_key = utils.deobfuscate(ANALYTICS_TOKEN)
self._tracked_data = collections.defaultdict(list)
self.context = _make_context(system_info.get_platform())
self.installation_id = installation_id
self.session_id = session_id
self.task: typing.Optional[asyncio.Task] = None
self.external_ip: typing.Optional[str] = None
@property
def enabled(self):
return self.conf.share_usage_data
@property
def is_started(self):
return self.task is not None
async def start(self):
if self.task is None:
self.task = asyncio.create_task(self.run())
async def run(self):
while True:
if self.enabled:
self.external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
await self._send_heartbeat()
await asyncio.sleep(1800)
def stop(self):
if self.task is not None and not self.task.done():
self.task.cancel()
async def _post(self, data: typing.Dict):
request_kwargs = {
'method': 'POST',
'url': self.url + '/track',
'headers': {'Connection': 'Close'},
'auth': aiohttp.BasicAuth(self._write_key, ''),
'json': data,
'cookies': self.cookies
}
try:
async with utils.aiohttp_request(**request_kwargs) as response:
self.cookies.update(response.cookies)
except Exception as e:
log.debug('Encountered an exception while POSTing to %s: ', self.url + '/track', exc_info=e)
async def track(self, event: typing.Dict):
"""Send a single tracking event"""
if self.enabled:
log.debug('Sending track event: %s', event)
await self._post(event)
async def send_upnp_setup_success_fail(self, success, status):
await self.track(
self._event(UPNP_SETUP, {
'success': success,
'status': status,
})
)
async def send_disk_space_used(self, storage_used, storage_limit, is_from_network_quota):
await self.track(
self._event(DISK_SPACE, {
'used': storage_used,
'limit': storage_limit,
'from_network_quota': is_from_network_quota
})
)
async def send_server_startup(self):
await self.track(self._event(SERVER_STARTUP))
async def send_server_startup_success(self):
await self.track(self._event(SERVER_STARTUP_SUCCESS))
async def send_server_startup_error(self, message):
await self.track(self._event(SERVER_STARTUP_ERROR, {'message': message}))
async def send_time_to_first_bytes(self, resolve_duration: typing.Optional[float],
total_duration: typing.Optional[float], download_id: str,
name: str, outpoint: typing.Optional[str],
found_peers_count: typing.Optional[int],
tried_peers_count: typing.Optional[int],
connection_failures_count: typing.Optional[int],
added_fixed_peers: bool,
fixed_peers_delay: float, sd_hash: str,
sd_download_duration: typing.Optional[float] = None,
head_blob_hash: typing.Optional[str] = None,
head_blob_length: typing.Optional[int] = None,
head_blob_duration: typing.Optional[int] = None,
error: typing.Optional[str] = None,
error_msg: typing.Optional[str] = None,
wallet_server: typing.Optional[str] = None):
await self.track(self._event(TIME_TO_FIRST_BYTES, _download_properties(
self.conf, self.external_ip, resolve_duration, total_duration, download_id, name, outpoint,
found_peers_count, tried_peers_count, connection_failures_count, added_fixed_peers, fixed_peers_delay,
sd_hash, sd_download_duration, head_blob_hash, head_blob_length, head_blob_duration, error, error_msg,
wallet_server
)))
async def send_download_finished(self, download_id, name, sd_hash):
await self.track(
self._event(
DOWNLOAD_FINISHED, {
'download_id': download_id,
'name': name,
'stream_info': sd_hash
}
)
)
async def send_claim_action(self, action):
await self.track(self._event(CLAIM_ACTION, {'action': action}))
async def send_new_channel(self):
await self.track(self._event(NEW_CHANNEL))
async def send_credits_sent(self):
await self.track(self._event(CREDITS_SENT))
async def _send_heartbeat(self):
await self.track(self._event(HEARTBEAT))
def _event(self, event, properties: typing.Optional[typing.Dict] = None):
return {
'userId': 'lbry',
'event': event,
'properties': _event_properties(self.installation_id, self.session_id, properties),
'context': self.context,
'timestamp': utils.isonow()
}

View file

@ -1,6 +0,0 @@
from lbry.extras.cli import execute_command
from lbry.conf import Config
def daemon_rpc(conf: Config, method: str, **kwargs):
return execute_command(conf, method, kwargs, callback=lambda data: data)

View file

@ -1,75 +0,0 @@
import asyncio
import logging
from lbry.conf import Config
from lbry.extras.daemon.componentmanager import ComponentManager
log = logging.getLogger(__name__)
class ComponentType(type):
def __new__(mcs, name, bases, newattrs):
klass = type.__new__(mcs, name, bases, newattrs)
if name != "Component" and newattrs['__module__'] != 'lbry.testcase':
ComponentManager.default_component_classes[klass.component_name] = klass
return klass
class Component(metaclass=ComponentType):
"""
lbry-daemon component helper
Inheriting classes will be automatically registered with the ComponentManager and must implement setup and stop
methods
"""
depends_on = []
component_name = None
def __init__(self, component_manager):
self.conf: Config = component_manager.conf
self.component_manager = component_manager
self._running = False
def __lt__(self, other):
return self.component_name < other.component_name
@property
def running(self):
return self._running
async def get_status(self): # pylint: disable=no-self-use
return
async def start(self):
raise NotImplementedError()
async def stop(self):
raise NotImplementedError()
@property
def component(self):
raise NotImplementedError()
async def _setup(self):
try:
result = await self.start()
self._running = True
return result
except asyncio.CancelledError:
log.info("Cancelled setup of %s component", self.__class__.__name__)
raise
except Exception as err:
log.exception("Error setting up %s", self.component_name or self.__class__.__name__)
raise err
async def _stop(self):
try:
result = await self.stop()
self._running = False
return result
except asyncio.CancelledError:
log.info("Cancelled stop of %s component", self.__class__.__name__)
raise
except Exception as err:
log.exception("Error stopping %s", self.__class__.__name__)
raise err

View file

@ -1,171 +0,0 @@
import logging
import asyncio
from lbry.conf import Config
from lbry.error import ComponentStartConditionNotMetError
from lbry.dht.peer import PeerManager
log = logging.getLogger(__name__)
class RegisteredConditions:
conditions = {}
class RequiredConditionType(type):
def __new__(mcs, name, bases, newattrs):
klass = type.__new__(mcs, name, bases, newattrs)
if name != "RequiredCondition":
if klass.name in RegisteredConditions.conditions:
raise SyntaxError("already have a component registered for \"%s\"" % klass.name)
RegisteredConditions.conditions[klass.name] = klass
return klass
class RequiredCondition(metaclass=RequiredConditionType):
name = ""
component = ""
message = ""
@staticmethod
def evaluate(component):
raise NotImplementedError()
class ComponentManager:
default_component_classes = {}
def __init__(self, conf: Config, analytics_manager=None, skip_components=None,
peer_manager=None, **override_components):
self.conf = conf
self.skip_components = skip_components or []
self.loop = asyncio.get_event_loop()
self.analytics_manager = analytics_manager
self.component_classes = {}
self.components = set()
self.started = asyncio.Event()
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
for component_name, component_class in self.default_component_classes.items():
if component_name in override_components:
component_class = override_components.pop(component_name)
if component_name not in self.skip_components:
self.component_classes[component_name] = component_class
if override_components:
raise SyntaxError("unexpected components: %s" % override_components)
for component_class in self.component_classes.values():
self.components.add(component_class(self))
def evaluate_condition(self, condition_name):
if condition_name not in RegisteredConditions.conditions:
raise NameError(condition_name)
condition = RegisteredConditions.conditions[condition_name]
try:
component = self.get_component(condition.component)
result = condition.evaluate(component)
except Exception:
log.exception('failed to evaluate condition:')
result = False
return result, "" if result else condition.message
def sort_components(self, reverse=False):
"""
Sort components by requirements
"""
steps = []
staged = set()
components = set(self.components)
# components with no requirements
step = []
for component in set(components):
if not component.depends_on:
step.append(component)
staged.add(component.component_name)
components.remove(component)
if step:
step.sort()
steps.append(step)
while components:
step = []
to_stage = set()
for component in set(components):
reqs_met = 0
for needed in component.depends_on:
if needed in staged:
reqs_met += 1
if reqs_met == len(component.depends_on):
step.append(component)
to_stage.add(component.component_name)
components.remove(component)
if step:
step.sort()
staged.update(to_stage)
steps.append(step)
elif components:
raise ComponentStartConditionNotMetError(components)
if reverse:
steps.reverse()
return steps
async def start(self):
""" Start Components in sequence sorted by requirements """
for stage in self.sort_components():
needing_start = [
component._setup() for component in stage if not component.running
]
if needing_start:
await asyncio.wait(map(asyncio.create_task, needing_start))
self.started.set()
async def stop(self):
"""
Stop Components in reversed startup order
"""
stages = self.sort_components(reverse=True)
for stage in stages:
needing_stop = [
component._stop() for component in stage if component.running
]
if needing_stop:
await asyncio.wait(map(asyncio.create_task, needing_stop))
def all_components_running(self, *component_names):
"""
Check if components are running
:return: (bool) True if all specified components are running
"""
components = {component.component_name: component for component in self.components}
for component in component_names:
if component not in components:
raise NameError("%s is not a known Component" % component)
if not components[component].running:
return False
return True
def get_components_status(self):
"""
List status of all the components, whether they are running or not
:return: (dict) {(str) component_name: (bool) True is running else False}
"""
return {
component.component_name: component.running
for component in self.components
}
def get_actual_component(self, component_name):
for component in self.components:
if component.component_name == component_name:
return component
raise NameError(component_name)
def get_component(self, component_name):
return self.get_actual_component(component_name).component
def has_component(self, component_name):
return any(component for component in self.components if component_name == component.component_name)

View file

@ -1,750 +0,0 @@
import math
import os
import asyncio
import logging
import binascii
import typing
import base58
from aioupnp import __version__ as aioupnp_version
from aioupnp.upnp import UPnP
from aioupnp.fault import UPnPError
from lbry import utils
from lbry.dht.node import Node
from lbry.dht.peer import is_valid_public_ipv4
from lbry.dht.blob_announcer import BlobAnnouncer
from lbry.blob.blob_manager import BlobManager
from lbry.blob.disk_space_manager import DiskSpaceManager
from lbry.blob_exchange.server import BlobServer
from lbry.stream.background_downloader import BackgroundDownloader
from lbry.stream.stream_manager import StreamManager
from lbry.file.file_manager import FileManager
from lbry.extras.daemon.component import Component
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
from lbry.extras.daemon.storage import SQLiteStorage
from lbry.torrent.torrent_manager import TorrentManager
from lbry.wallet import WalletManager
from lbry.wallet.usage_payment import WalletServerPayer
from lbry.torrent.tracker import TrackerClient
from lbry.torrent.session import TorrentSession
log = logging.getLogger(__name__)
# settings must be initialized before this file is imported
DATABASE_COMPONENT = "database"
BLOB_COMPONENT = "blob_manager"
WALLET_COMPONENT = "wallet"
WALLET_SERVER_PAYMENTS_COMPONENT = "wallet_server_payments"
DHT_COMPONENT = "dht"
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
FILE_MANAGER_COMPONENT = "file_manager"
DISK_SPACE_COMPONENT = "disk_space"
BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
UPNP_COMPONENT = "upnp"
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
TRACKER_ANNOUNCER_COMPONENT = "tracker_announcer_component"
LIBTORRENT_COMPONENT = "libtorrent_component"
class DatabaseComponent(Component):
component_name = DATABASE_COMPONENT
def __init__(self, component_manager):
super().__init__(component_manager)
self.storage = None
@property
def component(self):
return self.storage
@staticmethod
def get_current_db_revision():
return 15
@property
def revision_filename(self):
return os.path.join(self.conf.data_dir, 'db_revision')
def _write_db_revision_file(self, version_num):
with open(self.revision_filename, mode='w') as db_revision:
db_revision.write(str(version_num))
async def start(self):
# check directories exist, create them if they don't
log.info("Loading databases")
if not os.path.exists(self.revision_filename):
log.info("db_revision file not found. Creating it")
self._write_db_revision_file(self.get_current_db_revision())
# check the db migration and run any needed migrations
with open(self.revision_filename, "r") as revision_read_handle:
old_revision = int(revision_read_handle.read().strip())
if old_revision > self.get_current_db_revision():
raise Exception('This version of lbrynet is not compatible with the database\n'
'Your database is revision %i, expected %i' %
(old_revision, self.get_current_db_revision()))
if old_revision < self.get_current_db_revision():
from lbry.extras.daemon.migrator import dbmigrator # pylint: disable=import-outside-toplevel
log.info("Upgrading your databases (revision %i to %i)", old_revision, self.get_current_db_revision())
await asyncio.get_event_loop().run_in_executor(
None, dbmigrator.migrate_db, self.conf, old_revision, self.get_current_db_revision()
)
self._write_db_revision_file(self.get_current_db_revision())
log.info("Finished upgrading the databases.")
self.storage = SQLiteStorage(
self.conf, os.path.join(self.conf.data_dir, "lbrynet.sqlite")
)
await self.storage.open()
async def stop(self):
await self.storage.close()
self.storage = None
class WalletComponent(Component):
component_name = WALLET_COMPONENT
depends_on = [DATABASE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.wallet_manager = None
@property
def component(self):
return self.wallet_manager
async def get_status(self):
if self.wallet_manager is None:
return
is_connected = self.wallet_manager.ledger.network.is_connected
sessions = []
connected = None
if is_connected:
addr, port = self.wallet_manager.ledger.network.client.server
connected = f"{addr}:{port}"
sessions.append(self.wallet_manager.ledger.network.client)
result = {
'connected': connected,
'connected_features': self.wallet_manager.ledger.network.server_features,
'servers': [
{
'host': session.server[0],
'port': session.server[1],
'latency': session.connection_latency,
'availability': session.available,
} for session in sessions
],
'known_servers': len(self.wallet_manager.ledger.network.known_hubs),
'available_servers': 1 if is_connected else 0
}
if self.wallet_manager.ledger.network.remote_height:
local_height = self.wallet_manager.ledger.local_height_including_downloaded_height
disk_height = len(self.wallet_manager.ledger.headers)
remote_height = self.wallet_manager.ledger.network.remote_height
download_height, target_height = local_height - disk_height, remote_height - disk_height
if target_height > 0:
progress = min(max(math.ceil(float(download_height) / float(target_height) * 100), 0), 100)
else:
progress = 100
best_hash = await self.wallet_manager.get_best_blockhash()
result.update({
'headers_synchronization_progress': progress,
'blocks': max(local_height, 0),
'blocks_behind': max(remote_height - local_height, 0),
'best_blockhash': best_hash,
})
return result
async def start(self):
log.info("Starting wallet")
self.wallet_manager = await WalletManager.from_lbrynet_config(self.conf)
await self.wallet_manager.start()
async def stop(self):
await self.wallet_manager.stop()
self.wallet_manager = None
class WalletServerPaymentsComponent(Component):
component_name = WALLET_SERVER_PAYMENTS_COMPONENT
depends_on = [WALLET_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.usage_payment_service = WalletServerPayer(
max_fee=self.conf.max_wallet_server_fee, analytics_manager=self.component_manager.analytics_manager,
)
@property
def component(self) -> typing.Optional[WalletServerPayer]:
return self.usage_payment_service
async def start(self):
wallet_manager = self.component_manager.get_component(WALLET_COMPONENT)
await self.usage_payment_service.start(wallet_manager.ledger, wallet_manager.default_wallet)
async def stop(self):
await self.usage_payment_service.stop()
async def get_status(self):
return {
'max_fee': self.usage_payment_service.max_fee,
'running': self.usage_payment_service.running
}
class BlobComponent(Component):
component_name = BLOB_COMPONENT
depends_on = [DATABASE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.blob_manager: typing.Optional[BlobManager] = None
@property
def component(self) -> typing.Optional[BlobManager]:
return self.blob_manager
async def start(self):
storage = self.component_manager.get_component(DATABASE_COMPONENT)
data_store = None
if DHT_COMPONENT not in self.component_manager.skip_components:
dht_node: Node = self.component_manager.get_component(DHT_COMPONENT)
if dht_node:
data_store = dht_node.protocol.data_store
blob_dir = os.path.join(self.conf.data_dir, 'blobfiles')
if not os.path.isdir(blob_dir):
os.mkdir(blob_dir)
self.blob_manager = BlobManager(self.component_manager.loop, blob_dir, storage, self.conf, data_store)
return await self.blob_manager.setup()
async def stop(self):
self.blob_manager.stop()
async def get_status(self):
count = 0
if self.blob_manager:
count = len(self.blob_manager.completed_blob_hashes)
return {
'finished_blobs': count,
'connections': {} if not self.blob_manager else self.blob_manager.connection_manager.status
}
class DHTComponent(Component):
component_name = DHT_COMPONENT
depends_on = [UPNP_COMPONENT, DATABASE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.dht_node: typing.Optional[Node] = None
self.external_udp_port = None
self.external_peer_port = None
@property
def component(self) -> typing.Optional[Node]:
return self.dht_node
async def get_status(self):
return {
'node_id': None if not self.dht_node else binascii.hexlify(self.dht_node.protocol.node_id),
'peers_in_routing_table': 0 if not self.dht_node else len(self.dht_node.protocol.routing_table.get_peers())
}
def get_node_id(self):
node_id_filename = os.path.join(self.conf.data_dir, "node_id")
if os.path.isfile(node_id_filename):
with open(node_id_filename, "r") as node_id_file:
return base58.b58decode(str(node_id_file.read()).strip())
node_id = utils.generate_id()
with open(node_id_filename, "w") as node_id_file:
node_id_file.write(base58.b58encode(node_id).decode())
return node_id
async def start(self):
log.info("start the dht")
upnp_component = self.component_manager.get_component(UPNP_COMPONENT)
self.external_peer_port = upnp_component.upnp_redirects.get("TCP", self.conf.tcp_port)
self.external_udp_port = upnp_component.upnp_redirects.get("UDP", self.conf.udp_port)
external_ip = upnp_component.external_ip
storage = self.component_manager.get_component(DATABASE_COMPONENT)
if not external_ip:
external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
if not external_ip:
log.warning("failed to get external ip")
self.dht_node = Node(
self.component_manager.loop,
self.component_manager.peer_manager,
node_id=self.get_node_id(),
internal_udp_port=self.conf.udp_port,
udp_port=self.external_udp_port,
external_ip=external_ip,
peer_port=self.external_peer_port,
rpc_timeout=self.conf.node_rpc_timeout,
split_buckets_under_index=self.conf.split_buckets_under_index,
is_bootstrap_node=self.conf.is_bootstrap_node,
storage=storage
)
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
log.info("Started the dht")
async def stop(self):
self.dht_node.stop()
class HashAnnouncerComponent(Component):
component_name = HASH_ANNOUNCER_COMPONENT
depends_on = [DHT_COMPONENT, DATABASE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.hash_announcer: typing.Optional[BlobAnnouncer] = None
@property
def component(self) -> typing.Optional[BlobAnnouncer]:
return self.hash_announcer
async def start(self):
storage = self.component_manager.get_component(DATABASE_COMPONENT)
dht_node = self.component_manager.get_component(DHT_COMPONENT)
self.hash_announcer = BlobAnnouncer(self.component_manager.loop, dht_node, storage)
self.hash_announcer.start(self.conf.concurrent_blob_announcers)
log.info("Started blob announcer")
async def stop(self):
self.hash_announcer.stop()
log.info("Stopped blob announcer")
async def get_status(self):
return {
'announce_queue_size': 0 if not self.hash_announcer else len(self.hash_announcer.announce_queue)
}
class FileManagerComponent(Component):
component_name = FILE_MANAGER_COMPONENT
depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.file_manager: typing.Optional[FileManager] = None
@property
def component(self) -> typing.Optional[FileManager]:
return self.file_manager
async def get_status(self):
if not self.file_manager:
return
return {
'managed_files': len(self.file_manager.get_filtered()),
}
async def start(self):
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
storage = self.component_manager.get_component(DATABASE_COMPONENT)
wallet = self.component_manager.get_component(WALLET_COMPONENT)
node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None
log.info('Starting the file manager')
loop = asyncio.get_event_loop()
self.file_manager = FileManager(
loop, self.conf, wallet, storage, self.component_manager.analytics_manager
)
self.file_manager.source_managers['stream'] = StreamManager(
loop, self.conf, blob_manager, wallet, storage, node,
)
if self.component_manager.has_component(LIBTORRENT_COMPONENT):
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
self.file_manager.source_managers['torrent'] = TorrentManager(
loop, self.conf, torrent, storage, self.component_manager.analytics_manager
)
await self.file_manager.start()
log.info('Done setting up file manager')
async def stop(self):
await self.file_manager.stop()
class BackgroundDownloaderComponent(Component):
MIN_PREFIX_COLLIDING_BITS = 8
component_name = BACKGROUND_DOWNLOADER_COMPONENT
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.background_task: typing.Optional[asyncio.Task] = None
self.download_loop_delay_seconds = 60
self.ongoing_download: typing.Optional[asyncio.Task] = None
self.space_manager: typing.Optional[DiskSpaceManager] = None
self.blob_manager: typing.Optional[BlobManager] = None
self.background_downloader: typing.Optional[BackgroundDownloader] = None
self.dht_node: typing.Optional[Node] = None
self.space_available: typing.Optional[int] = None
@property
def is_busy(self):
return bool(self.ongoing_download and not self.ongoing_download.done())
@property
def component(self) -> 'BackgroundDownloaderComponent':
return self
async def get_status(self):
return {'running': self.background_task is not None and not self.background_task.done(),
'available_free_space_mb': self.space_available,
'ongoing_download': self.is_busy}
async def download_blobs_in_background(self):
while True:
self.space_available = await self.space_manager.get_free_space_mb(True)
if not self.is_busy and self.space_available > 10:
self._download_next_close_blob_hash()
await asyncio.sleep(self.download_loop_delay_seconds)
def _download_next_close_blob_hash(self):
node_id = self.dht_node.protocol.node_id
for blob_hash in self.dht_node.stored_blob_hashes:
if blob_hash.hex() in self.blob_manager.completed_blob_hashes:
continue
if utils.get_colliding_prefix_bits(node_id, blob_hash) >= self.MIN_PREFIX_COLLIDING_BITS:
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash.hex()))
return
async def start(self):
self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT)
if not self.component_manager.has_component(DHT_COMPONENT):
return
self.dht_node = self.component_manager.get_component(DHT_COMPONENT)
self.blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
storage = self.component_manager.get_component(DATABASE_COMPONENT)
self.background_downloader = BackgroundDownloader(self.conf, storage, self.blob_manager, self.dht_node)
self.background_task = asyncio.create_task(self.download_blobs_in_background())
async def stop(self):
if self.ongoing_download and not self.ongoing_download.done():
self.ongoing_download.cancel()
if self.background_task:
self.background_task.cancel()
class DiskSpaceComponent(Component):
component_name = DISK_SPACE_COMPONENT
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.disk_space_manager: typing.Optional[DiskSpaceManager] = None
@property
def component(self) -> typing.Optional[DiskSpaceManager]:
return self.disk_space_manager
async def get_status(self):
if self.disk_space_manager:
space_used = await self.disk_space_manager.get_space_used_mb(cached=True)
return {
'total_used_mb': space_used['total'],
'published_blobs_storage_used_mb': space_used['private_storage'],
'content_blobs_storage_used_mb': space_used['content_storage'],
'seed_blobs_storage_used_mb': space_used['network_storage'],
'running': self.disk_space_manager.running,
}
return {'space_used': '0', 'network_seeding_space_used': '0', 'running': False}
async def start(self):
db = self.component_manager.get_component(DATABASE_COMPONENT)
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
self.disk_space_manager = DiskSpaceManager(
self.conf, db, blob_manager,
analytics=self.component_manager.analytics_manager
)
await self.disk_space_manager.start()
async def stop(self):
await self.disk_space_manager.stop()
class TorrentComponent(Component):
component_name = LIBTORRENT_COMPONENT
def __init__(self, component_manager):
super().__init__(component_manager)
self.torrent_session = None
@property
def component(self) -> typing.Optional[TorrentSession]:
return self.torrent_session
async def get_status(self):
if not self.torrent_session:
return
return {
'running': True, # TODO: what to return here?
}
async def start(self):
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
await self.torrent_session.bind() # TODO: specify host/port
async def stop(self):
if self.torrent_session:
await self.torrent_session.pause()
class PeerProtocolServerComponent(Component):
component_name = PEER_PROTOCOL_SERVER_COMPONENT
depends_on = [UPNP_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.blob_server: typing.Optional[BlobServer] = None
@property
def component(self) -> typing.Optional[BlobServer]:
return self.blob_server
async def start(self):
log.info("start blob server")
blob_manager: BlobManager = self.component_manager.get_component(BLOB_COMPONENT)
wallet: WalletManager = self.component_manager.get_component(WALLET_COMPONENT)
peer_port = self.conf.tcp_port
address = await wallet.get_unused_address()
self.blob_server = BlobServer(asyncio.get_event_loop(), blob_manager, address)
self.blob_server.start_server(peer_port, interface=self.conf.network_interface)
await self.blob_server.started_listening.wait()
async def stop(self):
if self.blob_server:
self.blob_server.stop_server()
class UPnPComponent(Component):
component_name = UPNP_COMPONENT
def __init__(self, component_manager):
super().__init__(component_manager)
self._int_peer_port = self.conf.tcp_port
self._int_dht_node_port = self.conf.udp_port
self.use_upnp = self.conf.use_upnp
self.upnp: typing.Optional[UPnP] = None
self.upnp_redirects = {}
self.external_ip: typing.Optional[str] = None
self._maintain_redirects_task = None
@property
def component(self) -> 'UPnPComponent':
return self
async def _repeatedly_maintain_redirects(self, now=True):
while True:
if now:
await self._maintain_redirects()
await asyncio.sleep(360)
async def _maintain_redirects(self):
# setup the gateway if necessary
if not self.upnp:
try:
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
except Exception as err:
log.warning("upnp discovery failed: %s", err)
self.upnp = None
# update the external ip
external_ip = None
if self.upnp:
try:
external_ip = await self.upnp.get_external_ip()
if external_ip != "0.0.0.0" and not self.external_ip:
log.info("got external ip from UPnP: %s", external_ip)
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
pass
if external_ip and not is_valid_public_ipv4(external_ip):
log.warning("UPnP returned a private/reserved ip - %s, checking lbry.com fallback", external_ip)
external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
if self.external_ip and self.external_ip != external_ip:
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
if external_ip:
self.external_ip = external_ip
dht_component = self.component_manager.get_component(DHT_COMPONENT)
if dht_component:
dht_node = dht_component.component
dht_node.protocol.external_ip = external_ip
# assert self.external_ip is not None # TODO: handle going/starting offline
if not self.upnp_redirects and self.upnp: # setup missing redirects
log.info("add UPnP port mappings")
upnp_redirects = {}
if PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
try:
upnp_redirects["TCP"] = await self.upnp.get_next_mapping(
self._int_peer_port, "TCP", "LBRY peer port", self._int_peer_port
)
except (UPnPError, asyncio.TimeoutError, NotImplementedError):
pass
if DHT_COMPONENT not in self.component_manager.skip_components:
try:
upnp_redirects["UDP"] = await self.upnp.get_next_mapping(
self._int_dht_node_port, "UDP", "LBRY DHT port", self._int_dht_node_port
)
except (UPnPError, asyncio.TimeoutError, NotImplementedError):
pass
if upnp_redirects:
log.info("set up redirects: %s", upnp_redirects)
self.upnp_redirects.update(upnp_redirects)
elif self.upnp: # check existing redirects are still active
found = set()
mappings = await self.upnp.get_redirects()
for mapping in mappings:
proto = mapping.protocol
if proto in self.upnp_redirects and mapping.external_port == self.upnp_redirects[proto]:
if mapping.lan_address == self.upnp.lan_address:
found.add(proto)
if 'UDP' not in found and DHT_COMPONENT not in self.component_manager.skip_components:
try:
udp_port = await self.upnp.get_next_mapping(self._int_dht_node_port, "UDP", "LBRY DHT port")
self.upnp_redirects['UDP'] = udp_port
log.info("refreshed upnp redirect for dht port: %i", udp_port)
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
del self.upnp_redirects['UDP']
if 'TCP' not in found and PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
try:
tcp_port = await self.upnp.get_next_mapping(self._int_peer_port, "TCP", "LBRY peer port")
self.upnp_redirects['TCP'] = tcp_port
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
del self.upnp_redirects['TCP']
if ('TCP' in self.upnp_redirects and
PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components) and \
('UDP' in self.upnp_redirects and DHT_COMPONENT not in self.component_manager.skip_components):
if self.upnp_redirects:
log.debug("upnp redirects are still active")
async def start(self):
log.info("detecting external ip")
if not self.use_upnp:
self.external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
return
success = False
await self._maintain_redirects()
if self.upnp:
if not self.upnp_redirects and not all(
x in self.component_manager.skip_components
for x in (DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)
):
log.error("failed to setup upnp")
else:
success = True
if self.upnp_redirects:
log.debug("set up upnp port redirects for gateway: %s", self.upnp.gateway.manufacturer_string)
else:
log.error("failed to setup upnp")
if not self.external_ip:
self.external_ip, probed_url = await utils.get_external_ip(self.conf.lbryum_servers)
if self.external_ip:
log.info("detected external ip using %s fallback", probed_url)
if self.component_manager.analytics_manager:
self.component_manager.loop.create_task(
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
success, await self.get_status()
)
)
self._maintain_redirects_task = self.component_manager.loop.create_task(
self._repeatedly_maintain_redirects(now=False)
)
async def stop(self):
if self.upnp_redirects:
log.info("Removing upnp redirects: %s", self.upnp_redirects)
await asyncio.wait([
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
])
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
self._maintain_redirects_task.cancel()
async def get_status(self):
return {
'aioupnp_version': aioupnp_version,
'redirects': self.upnp_redirects,
'gateway': 'No gateway found' if not self.upnp else self.upnp.gateway.manufacturer_string,
'dht_redirect_set': 'UDP' in self.upnp_redirects,
'peer_redirect_set': 'TCP' in self.upnp_redirects,
'external_ip': self.external_ip
}
class ExchangeRateManagerComponent(Component):
component_name = EXCHANGE_RATE_MANAGER_COMPONENT
def __init__(self, component_manager):
super().__init__(component_manager)
self.exchange_rate_manager = ExchangeRateManager()
@property
def component(self) -> ExchangeRateManager:
return self.exchange_rate_manager
async def start(self):
self.exchange_rate_manager.start()
async def stop(self):
self.exchange_rate_manager.stop()
class TrackerAnnouncerComponent(Component):
component_name = TRACKER_ANNOUNCER_COMPONENT
depends_on = [FILE_MANAGER_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.file_manager = None
self.announce_task = None
self.tracker_client: typing.Optional[TrackerClient] = None
@property
def component(self):
return self.tracker_client
@property
def running(self):
return self._running and self.announce_task and not self.announce_task.done()
async def announce_forever(self):
while True:
sleep_seconds = 60.0
announce_sd_hashes = []
for file in self.file_manager.get_filtered():
if not file.downloader:
continue
announce_sd_hashes.append(bytes.fromhex(file.sd_hash))
await self.tracker_client.announce_many(*announce_sd_hashes)
await asyncio.sleep(sleep_seconds)
async def start(self):
node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None
node_id = node.protocol.node_id if node else None
self.tracker_client = TrackerClient(node_id, self.conf.tcp_port, lambda: self.conf.tracker_servers)
await self.tracker_client.start()
self.file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
self.announce_task = asyncio.create_task(self.announce_forever())
async def stop(self):
self.file_manager = None
if self.announce_task and not self.announce_task.done():
self.announce_task.cancel()
self.announce_task = None
self.tracker_client.stop()

File diff suppressed because it is too large Load diff

View file

@ -1,361 +0,0 @@
import logging
from decimal import Decimal
from binascii import hexlify, unhexlify
from datetime import datetime
from json import JSONEncoder
from google.protobuf.message import DecodeError
from lbry.schema.claim import Claim
from lbry.schema.support import Support
from lbry.torrent.torrent_manager import TorrentSource
from lbry.wallet import Wallet, Ledger, Account, Transaction, Output
from lbry.wallet.bip32 import PublicKey
from lbry.wallet.dewies import dewies_to_lbc
from lbry.stream.managed_stream import ManagedStream
log = logging.getLogger(__name__)
def encode_txo_doc():
return {
'txid': "hash of transaction in hex",
'nout': "position in the transaction",
'height': "block where transaction was recorded",
'amount': "value of the txo as a decimal",
'address': "address of who can spend the txo",
'confirmations': "number of confirmed blocks",
'is_change': "payment to change address, only available when it can be determined",
'is_received': "true if txo was sent from external account to this account",
'is_spent': "true if txo is spent",
'is_mine': "payment to one of your accounts, only available when it can be determined",
'type': "one of 'claim', 'support' or 'purchase'",
'name': "when type is 'claim' or 'support', this is the claim name",
'claim_id': "when type is 'claim', 'support' or 'purchase', this is the claim id",
'claim_op': "when type is 'claim', this determines if it is 'create' or 'update'",
'value': "when type is 'claim' or 'support' with payload, this is the decoded protobuf payload",
'value_type': "determines the type of the 'value' field: 'channel', 'stream', etc",
'protobuf': "hex encoded raw protobuf version of 'value' field",
'permanent_url': "when type is 'claim' or 'support', this is the long permanent claim URL",
'claim': "for purchase outputs only, metadata of purchased claim",
'reposted_claim': "for repost claims only, metadata of claim being reposted",
'signing_channel': "for signed claims only, metadata of signing channel",
'is_channel_signature_valid': "for signed claims only, whether signature is valid",
'purchase_receipt': "metadata for the purchase transaction associated with this claim"
}
def encode_tx_doc():
return {
'txid': "hash of transaction in hex",
'height': "block where transaction was recorded",
'inputs': [encode_txo_doc()],
'outputs': [encode_txo_doc()],
'total_input': "sum of inputs as a decimal",
'total_output': "sum of outputs, sans fee, as a decimal",
'total_fee': "fee amount",
'hex': "entire transaction encoded in hex",
}
def encode_account_doc():
return {
'id': 'account_id',
'is_default': 'this account is used by default',
'ledger': 'name of crypto currency and network',
'name': 'optional account name',
'seed': 'human friendly words from which account can be recreated',
'encrypted': 'if account is encrypted',
'private_key': 'extended private key',
'public_key': 'extended public key',
'address_generator': 'settings for generating addresses',
'modified_on': 'date of last modification to account settings'
}
def encode_wallet_doc():
return {
'id': 'wallet_id',
'name': 'optional wallet name',
}
def encode_file_doc():
return {
'streaming_url': '(str) url to stream the file using range requests',
'completed': '(bool) true if download is completed',
'file_name': '(str) name of file',
'download_directory': '(str) download directory',
'points_paid': '(float) credit paid to download file',
'stopped': '(bool) true if download is stopped',
'stream_hash': '(str) stream hash of file',
'stream_name': '(str) stream name',
'suggested_file_name': '(str) suggested file name',
'sd_hash': '(str) sd hash of file',
'download_path': '(str) download path of file',
'mime_type': '(str) mime type of file',
'key': '(str) key attached to file',
'total_bytes_lower_bound': '(int) lower bound file size in bytes',
'total_bytes': '(int) file upper bound size in bytes',
'written_bytes': '(int) written size in bytes',
'blobs_completed': '(int) number of fully downloaded blobs',
'blobs_in_stream': '(int) total blobs on stream',
'blobs_remaining': '(int) total blobs remaining to download',
'status': '(str) downloader status',
'claim_id': '(str) None if claim is not found else the claim id',
'txid': '(str) None if claim is not found else the transaction id',
'nout': '(int) None if claim is not found else the transaction output index',
'outpoint': '(str) None if claim is not found else the tx and output',
'metadata': '(dict) None if claim is not found else the claim metadata',
'channel_claim_id': '(str) None if claim is not found or not signed',
'channel_name': '(str) None if claim is not found or not signed',
'claim_name': '(str) None if claim is not found else the claim name',
'reflector_progress': '(int) reflector upload progress, 0 to 100',
'uploading_to_reflector': '(bool) set to True when currently uploading to reflector'
}
class JSONResponseEncoder(JSONEncoder):
def __init__(self, *args, ledger: Ledger, include_protobuf=False, **kwargs):
super().__init__(*args, **kwargs)
self.ledger = ledger
self.include_protobuf = include_protobuf
def default(self, obj): # pylint: disable=method-hidden,arguments-renamed,too-many-return-statements
if isinstance(obj, Account):
return self.encode_account(obj)
if isinstance(obj, Wallet):
return self.encode_wallet(obj)
if isinstance(obj, (ManagedStream, TorrentSource)):
return self.encode_file(obj)
if isinstance(obj, Transaction):
return self.encode_transaction(obj)
if isinstance(obj, Output):
return self.encode_output(obj)
if isinstance(obj, Claim):
return self.encode_claim(obj)
if isinstance(obj, Support):
return obj.to_dict()
if isinstance(obj, PublicKey):
return obj.extended_key_string()
if isinstance(obj, datetime):
return obj.strftime("%Y%m%dT%H:%M:%S")
if isinstance(obj, Decimal):
return float(obj)
if isinstance(obj, bytes):
return obj.decode()
return super().default(obj)
def encode_transaction(self, tx):
return {
'txid': tx.id,
'height': tx.height,
'inputs': [self.encode_input(txo) for txo in tx.inputs],
'outputs': [self.encode_output(txo) for txo in tx.outputs],
'total_input': dewies_to_lbc(tx.input_sum),
'total_output': dewies_to_lbc(tx.input_sum - tx.fee),
'total_fee': dewies_to_lbc(tx.fee),
'hex': hexlify(tx.raw).decode(),
}
def encode_output(self, txo, check_signature=True):
if not txo:
return
tx_height = txo.tx_ref.height
best_height = self.ledger.headers.height
output = {
'txid': txo.tx_ref.id,
'nout': txo.position,
'height': tx_height,
'amount': dewies_to_lbc(txo.amount),
'address': txo.get_address(self.ledger) if txo.has_address else None,
'confirmations': (best_height+1) - tx_height if tx_height > 0 else tx_height,
'timestamp': self.ledger.headers.estimated_timestamp(tx_height)
}
if txo.is_spent is not None:
output['is_spent'] = txo.is_spent
if txo.is_my_output is not None:
output['is_my_output'] = txo.is_my_output
if txo.is_my_input is not None:
output['is_my_input'] = txo.is_my_input
if txo.sent_supports is not None:
output['sent_supports'] = dewies_to_lbc(txo.sent_supports)
if txo.sent_tips is not None:
output['sent_tips'] = dewies_to_lbc(txo.sent_tips)
if txo.received_tips is not None:
output['received_tips'] = dewies_to_lbc(txo.received_tips)
if txo.is_internal_transfer is not None:
output['is_internal_transfer'] = txo.is_internal_transfer
if txo.script.is_claim_name:
output['type'] = 'claim'
output['claim_op'] = 'create'
elif txo.script.is_update_claim:
output['type'] = 'claim'
output['claim_op'] = 'update'
elif txo.script.is_support_claim:
output['type'] = 'support'
elif txo.script.is_return_data:
output['type'] = 'data'
elif txo.purchase is not None:
output['type'] = 'purchase'
output['claim_id'] = txo.purchased_claim_id
if txo.purchased_claim is not None:
output['claim'] = self.encode_output(txo.purchased_claim)
else:
output['type'] = 'payment'
if txo.script.is_claim_involved:
output.update({
'name': txo.claim_name,
'normalized_name': txo.normalized_name,
'claim_id': txo.claim_id,
'permanent_url': txo.permanent_url,
'meta': self.encode_claim_meta(txo.meta.copy())
})
if 'short_url' in output['meta']:
output['short_url'] = output['meta'].pop('short_url')
if 'canonical_url' in output['meta']:
output['canonical_url'] = output['meta'].pop('canonical_url')
if txo.claims is not None:
output['claims'] = [self.encode_output(o) for o in txo.claims]
if txo.reposted_claim is not None:
output['reposted_claim'] = self.encode_output(txo.reposted_claim)
if txo.script.is_claim_name or txo.script.is_update_claim or txo.script.is_support_claim_data:
try:
output['value'] = txo.signable
if self.include_protobuf:
output['protobuf'] = hexlify(txo.signable.to_bytes())
if txo.purchase_receipt is not None:
output['purchase_receipt'] = self.encode_output(txo.purchase_receipt)
if txo.script.is_claim_name or txo.script.is_update_claim:
output['value_type'] = txo.claim.claim_type
if txo.claim.is_channel:
output['has_signing_key'] = txo.has_private_key
if check_signature and txo.signable.is_signed:
if txo.channel is not None:
output['signing_channel'] = self.encode_output(txo.channel)
output['is_channel_signature_valid'] = txo.is_signed_by(txo.channel, self.ledger)
else:
output['signing_channel'] = {'channel_id': txo.signable.signing_channel_id}
output['is_channel_signature_valid'] = False
except DecodeError:
pass
return output
def encode_claim_meta(self, meta):
for key, value in meta.items():
if key.endswith('_amount'):
if isinstance(value, int):
meta[key] = dewies_to_lbc(value)
if 0 < meta.get('creation_height', 0) <= self.ledger.headers.height:
meta['creation_timestamp'] = self.ledger.headers.estimated_timestamp(meta['creation_height'])
return meta
def encode_input(self, txi):
return self.encode_output(txi.txo_ref.txo, False) if txi.txo_ref.txo is not None else {
'txid': txi.txo_ref.tx_ref.id,
'nout': txi.txo_ref.position
}
def encode_account(self, account):
result = account.to_dict()
result['id'] = account.id
result.pop('certificates', None)
result['is_default'] = self.ledger.accounts[0] == account
return result
@staticmethod
def encode_wallet(wallet):
return {
'id': wallet.id,
'name': wallet.name
}
def encode_file(self, managed_stream):
output_exists = managed_stream.output_file_exists
tx_height = managed_stream.stream_claim_info.height
best_height = self.ledger.headers.height
is_stream = hasattr(managed_stream, 'stream_hash')
if is_stream:
total_bytes_lower_bound = managed_stream.descriptor.lower_bound_decrypted_length()
total_bytes = managed_stream.descriptor.upper_bound_decrypted_length()
else:
total_bytes_lower_bound = total_bytes = managed_stream.torrent_length
result = {
'streaming_url': None,
'completed': managed_stream.completed,
'file_name': None,
'download_directory': None,
'download_path': None,
'points_paid': 0.0,
'stopped': not managed_stream.running,
'stream_hash': None,
'stream_name': None,
'suggested_file_name': None,
'sd_hash': None,
'mime_type': None,
'key': None,
'total_bytes_lower_bound': total_bytes_lower_bound,
'total_bytes': total_bytes,
'written_bytes': managed_stream.written_bytes,
'blobs_completed': None,
'blobs_in_stream': None,
'blobs_remaining': None,
'status': managed_stream.status,
'claim_id': managed_stream.claim_id,
'txid': managed_stream.txid,
'nout': managed_stream.nout,
'outpoint': managed_stream.outpoint,
'metadata': managed_stream.metadata,
'protobuf': managed_stream.metadata_protobuf,
'channel_claim_id': managed_stream.channel_claim_id,
'channel_name': managed_stream.channel_name,
'claim_name': managed_stream.claim_name,
'content_fee': managed_stream.content_fee,
'purchase_receipt': self.encode_output(managed_stream.purchase_receipt),
'added_on': managed_stream.added_on,
'height': tx_height,
'confirmations': (best_height + 1) - tx_height if tx_height > 0 else tx_height,
'timestamp': self.ledger.headers.estimated_timestamp(tx_height),
'is_fully_reflected': False,
'reflector_progress': False,
'uploading_to_reflector': False
}
if is_stream:
result.update({
'streaming_url': managed_stream.stream_url,
'stream_hash': managed_stream.stream_hash,
'stream_name': managed_stream.stream_name,
'suggested_file_name': managed_stream.suggested_file_name,
'sd_hash': managed_stream.descriptor.sd_hash,
'mime_type': managed_stream.mime_type,
'key': managed_stream.descriptor.key,
'blobs_completed': managed_stream.blobs_completed,
'blobs_in_stream': managed_stream.blobs_in_stream,
'blobs_remaining': managed_stream.blobs_remaining,
'is_fully_reflected': managed_stream.is_fully_reflected,
'reflector_progress': managed_stream.reflector_progress,
'uploading_to_reflector': managed_stream.uploading_to_reflector
})
else:
result.update({
'streaming_url': f'file://{managed_stream.full_path}',
})
if output_exists:
result.update({
'file_name': managed_stream.file_name,
'download_directory': managed_stream.download_directory,
'download_path': managed_stream.full_path,
})
return result
def encode_claim(self, claim):
encoded = getattr(claim, claim.claim_type).to_dict()
if 'public_key' in encoded:
encoded['public_key_id'] = self.ledger.public_key_to_address(
unhexlify(encoded['public_key'])
)
return encoded

View file

@ -1,74 +0,0 @@
# pylint: skip-file
import os
import sys
import logging
log = logging.getLogger(__name__)
def migrate_db(conf, start, end):
current = start
while current < end:
if current == 1:
from .migrate1to2 import do_migration
elif current == 2:
from .migrate2to3 import do_migration
elif current == 3:
from .migrate3to4 import do_migration
elif current == 4:
from .migrate4to5 import do_migration
elif current == 5:
from .migrate5to6 import do_migration
elif current == 6:
from .migrate6to7 import do_migration
elif current == 7:
from .migrate7to8 import do_migration
elif current == 8:
from .migrate8to9 import do_migration
elif current == 9:
from .migrate9to10 import do_migration
elif current == 10:
from .migrate10to11 import do_migration
elif current == 11:
from .migrate11to12 import do_migration
elif current == 12:
from .migrate12to13 import do_migration
elif current == 13:
from .migrate13to14 import do_migration
elif current == 14:
from .migrate14to15 import do_migration
elif current == 15:
from .migrate15to16 import do_migration
else:
raise Exception(f"DB migration of version {current} to {current+1} is not available")
try:
do_migration(conf)
except Exception:
log.exception("failed to migrate database")
if os.path.exists(os.path.join(conf.data_dir, "lbrynet.sqlite")):
backup_name = f"rev_{current}_unmigrated_database"
count = 0
while os.path.exists(os.path.join(conf.data_dir, backup_name + ".sqlite")):
count += 1
backup_name = f"rev_{current}_unmigrated_database_{count}"
backup_path = os.path.join(conf.data_dir, backup_name + ".sqlite")
os.rename(os.path.join(conf.data_dir, "lbrynet.sqlite"), backup_path)
log.info("made a backup of the unmigrated database: %s", backup_path)
if os.path.isfile(os.path.join(conf.data_dir, "db_revision")):
os.remove(os.path.join(conf.data_dir, "db_revision"))
return None
current += 1
log.info("successfully migrated the database from revision %i to %i", current - 1, current)
return None
def run_migration_script():
log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s"
logging.basicConfig(level=logging.DEBUG, format=log_format, filename="migrator.log")
sys.stdout = open("migrator.out.log", 'w')
sys.stderr = open("migrator.err.log", 'w')
migrate_db(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))
if __name__ == "__main__":
run_migration_script()

View file

@ -1,54 +0,0 @@
import sqlite3
import os
import binascii
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
current_columns = []
for col_info in cursor.execute("pragma table_info('file');").fetchall():
current_columns.append(col_info[1])
if 'content_fee' in current_columns or 'saved_file' in current_columns:
connection.close()
print("already migrated")
return
cursor.execute(
"pragma foreign_keys=off;"
)
cursor.execute("""
create table if not exists new_file (
stream_hash text primary key not null references stream,
file_name text,
download_directory text,
blob_data_rate real not null,
status text not null,
saved_file integer not null,
content_fee text
);
""")
for (stream_hash, file_name, download_dir, data_rate, status) in cursor.execute("select * from file").fetchall():
saved_file = 0
if download_dir != '{stream}' and file_name != '{stream}':
try:
if os.path.isfile(os.path.join(binascii.unhexlify(download_dir).decode(),
binascii.unhexlify(file_name).decode())):
saved_file = 1
else:
download_dir, file_name = None, None
except Exception:
download_dir, file_name = None, None
else:
download_dir, file_name = None, None
cursor.execute(
"insert into new_file values (?, ?, ?, ?, ?, ?, NULL)",
(stream_hash, file_name, download_dir, data_rate, status, saved_file)
)
cursor.execute("drop table file")
cursor.execute("alter table new_file rename to file")
connection.commit()
connection.close()

View file

@ -1,69 +0,0 @@
import sqlite3
import os
import time
def do_migration(conf):
db_path = os.path.join(conf.data_dir, 'lbrynet.sqlite')
connection = sqlite3.connect(db_path)
connection.row_factory = sqlite3.Row
cursor = connection.cursor()
current_columns = []
for col_info in cursor.execute("pragma table_info('file');").fetchall():
current_columns.append(col_info[1])
if 'added_on' in current_columns:
connection.close()
print('already migrated')
return
# follow 12 step schema change procedure
cursor.execute("pragma foreign_keys=off")
# we don't have any indexes, views or triggers, so step 3 is skipped.
cursor.execute("drop table if exists new_file")
cursor.execute("""
create table if not exists new_file (
stream_hash text not null primary key references stream,
file_name text,
download_directory text,
blob_data_rate text not null,
status text not null,
saved_file integer not null,
content_fee text,
added_on integer not null
);
""")
# step 5: transfer content from old to new
select = "select * from file"
for (stream_hash, file_name, download_dir, blob_rate, status, saved_file, fee) \
in cursor.execute(select).fetchall():
added_on = int(time.time())
cursor.execute(
"insert into new_file values (?, ?, ?, ?, ?, ?, ?, ?)",
(stream_hash, file_name, download_dir, blob_rate, status, saved_file, fee, added_on)
)
# step 6: drop old table
cursor.execute("drop table file")
# step 7: rename new table to old table
cursor.execute("alter table new_file rename to file")
# step 8: we aren't using indexes, views or triggers so skip
# step 9: no views so skip
# step 10: foreign key check
cursor.execute("pragma foreign_key_check;")
# step 11: commit transaction
connection.commit()
# step 12: re-enable foreign keys
connection.execute("pragma foreign_keys=on;")
# done :)
connection.close()

View file

@ -1,80 +0,0 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
current_columns = []
for col_info in cursor.execute("pragma table_info('file');").fetchall():
current_columns.append(col_info[1])
if 'bt_infohash' in current_columns:
connection.close()
print("already migrated")
return
cursor.executescript("""
pragma foreign_keys=off;
create table if not exists torrent (
bt_infohash char(20) not null primary key,
tracker text,
length integer not null,
name text not null
);
create table if not exists torrent_node ( -- BEP-0005
bt_infohash char(20) not null references torrent,
host text not null,
port integer not null
);
create table if not exists torrent_tracker ( -- BEP-0012
bt_infohash char(20) not null references torrent,
tracker text not null
);
create table if not exists torrent_http_seed ( -- BEP-0017
bt_infohash char(20) not null references torrent,
http_seed text not null
);
create table if not exists new_file (
stream_hash char(96) references stream,
bt_infohash char(20) references torrent,
file_name text,
download_directory text,
blob_data_rate real not null,
status text not null,
saved_file integer not null,
content_fee text,
added_on integer not null
);
create table if not exists new_content_claim (
stream_hash char(96) references stream,
bt_infohash char(20) references torrent,
claim_outpoint text unique not null references claim
);
insert into new_file (stream_hash, bt_infohash, file_name, download_directory, blob_data_rate, status,
saved_file, content_fee, added_on) select
stream_hash, NULL, file_name, download_directory, blob_data_rate, status, saved_file, content_fee,
added_on
from file;
insert or ignore into new_content_claim (stream_hash, bt_infohash, claim_outpoint)
select stream_hash, NULL, claim_outpoint from content_claim;
drop table file;
drop table content_claim;
alter table new_file rename to file;
alter table new_content_claim rename to content_claim;
pragma foreign_keys=on;
""")
connection.commit()
connection.close()

View file

@ -1,21 +0,0 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("""
create table if not exists peer (
node_id char(96) not null primary key,
address text not null,
udp_port integer not null,
tcp_port integer,
unique (address, udp_port)
);
""")
connection.commit()
connection.close()

View file

@ -1,16 +0,0 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("""
alter table blob add column added_on integer not null default 0;
alter table blob add column is_mine integer not null default 1;
""")
connection.commit()
connection.close()

View file

@ -1,17 +0,0 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("""
update blob set should_announce=0
where should_announce=1 and
blob.blob_hash in (select stream_blob.blob_hash from stream_blob where position=0);
""")
connection.commit()
connection.close()

View file

@ -1,77 +0,0 @@
import sqlite3
import os
import logging
log = logging.getLogger(__name__)
UNSET_NOUT = -1
def do_migration(conf):
log.info("Doing the migration")
migrate_blockchainname_db(conf.data_dir)
log.info("Migration succeeded")
def migrate_blockchainname_db(db_dir):
blockchainname_db = os.path.join(db_dir, "blockchainname.db")
# skip migration on fresh installs
if not os.path.isfile(blockchainname_db):
return
temp_db = sqlite3.connect(":memory:")
db_file = sqlite3.connect(blockchainname_db)
file_cursor = db_file.cursor()
mem_cursor = temp_db.cursor()
mem_cursor.execute("create table if not exists name_metadata ("
" name text, "
" txid text, "
" n integer, "
" sd_hash text)")
mem_cursor.execute("create table if not exists claim_ids ("
" claimId text, "
" name text, "
" txid text, "
" n integer)")
temp_db.commit()
name_metadata = file_cursor.execute("select * from name_metadata").fetchall()
claim_metadata = file_cursor.execute("select * from claim_ids").fetchall()
# fill n as V1_UNSET_NOUT, Wallet.py will be responsible for filling in correct n
for name, txid, sd_hash in name_metadata:
mem_cursor.execute(
"insert into name_metadata values (?, ?, ?, ?) ",
(name, txid, UNSET_NOUT, sd_hash))
for claim_id, name, txid in claim_metadata:
mem_cursor.execute(
"insert into claim_ids values (?, ?, ?, ?)",
(claim_id, name, txid, UNSET_NOUT))
temp_db.commit()
new_name_metadata = mem_cursor.execute("select * from name_metadata").fetchall()
new_claim_metadata = mem_cursor.execute("select * from claim_ids").fetchall()
file_cursor.execute("drop table name_metadata")
file_cursor.execute("create table name_metadata ("
" name text, "
" txid text, "
" n integer, "
" sd_hash text)")
for name, txid, n, sd_hash in new_name_metadata:
file_cursor.execute(
"insert into name_metadata values (?, ?, ?, ?) ", (name, txid, n, sd_hash))
file_cursor.execute("drop table claim_ids")
file_cursor.execute("create table claim_ids ("
" claimId text, "
" name text, "
" txid text, "
" n integer)")
for claim_id, name, txid, n in new_claim_metadata:
file_cursor.execute("insert into claim_ids values (?, ?, ?, ?)", (claim_id, name, txid, n))
db_file.commit()
db_file.close()
temp_db.close()

View file

@ -1,42 +0,0 @@
import sqlite3
import os
import logging
log = logging.getLogger(__name__)
def do_migration(conf):
log.info("Doing the migration")
migrate_blockchainname_db(conf.data_dir)
log.info("Migration succeeded")
def migrate_blockchainname_db(db_dir):
blockchainname_db = os.path.join(db_dir, "blockchainname.db")
# skip migration on fresh installs
if not os.path.isfile(blockchainname_db):
return
db_file = sqlite3.connect(blockchainname_db)
file_cursor = db_file.cursor()
tables = file_cursor.execute("SELECT tbl_name FROM sqlite_master "
"WHERE type='table'").fetchall()
if 'tmp_name_metadata_table' in tables and 'name_metadata' not in tables:
file_cursor.execute("ALTER TABLE tmp_name_metadata_table RENAME TO name_metadata")
else:
file_cursor.executescript(
"CREATE TABLE IF NOT EXISTS tmp_name_metadata_table "
" (name TEXT UNIQUE NOT NULL, "
" txid TEXT NOT NULL, "
" n INTEGER NOT NULL, "
" sd_hash TEXT NOT NULL); "
"INSERT OR IGNORE INTO tmp_name_metadata_table "
" (name, txid, n, sd_hash) "
" SELECT name, txid, n, sd_hash FROM name_metadata; "
"DROP TABLE name_metadata; "
"ALTER TABLE tmp_name_metadata_table RENAME TO name_metadata;"
)
db_file.commit()
db_file.close()

Some files were not shown because too many files have changed in this diff Show more