Compare commits

...

432 commits

Author SHA1 Message Date
Victor Shyba
cf73e4599f Merge pull request #3585 from lbryio/fix_blob_db_queries
Fixes bugs on disk space management and stream recovery
2022-03-24 21:05:09 -03:00
Victor Shyba
2a698932da implement announcer as a consumer task on gather 2022-03-15 15:36:00 -03:00
Victor Shyba
ee36162b16 make active an explicit ordered dict 2022-03-15 15:36:00 -03:00
Victor Shyba
97f472d4e1 remove unused search rounds 2022-03-15 15:36:00 -03:00
Victor Shyba
160e227e90 timeout is now supported on dht tests 2022-03-15 15:36:00 -03:00
Victor Shyba
475ba010e3 fix and enable test_blob_announcer 2022-03-15 15:36:00 -03:00
Victor Shyba
57ebbbcb78 simplify dht mock and restore clock after accelerating 2022-03-15 15:36:00 -03:00
Victor Shyba
cbf973bf2d better representation of kademliapeer on debug logs 2022-03-15 15:36:00 -03:00
Victor Shyba
9d26ad96c6 add a way to wait announcements to finish so tests are reliable 2022-03-15 15:36:00 -03:00
Victor Shyba
c2a3ec3265 make timeout handler immune to asyncio time tricks 2022-03-15 15:36:00 -03:00
Victor Shyba
521d783260 allow running some extra probes for k replacements 2022-03-15 15:36:00 -03:00
Victor Shyba
50846cd37e remove all references to bottoming out 2022-03-15 15:36:00 -03:00
Victor Shyba
40ed5f311b no stop condition, let it exhaust 2022-03-15 15:36:00 -03:00
Victor Shyba
293410ac9f bottoming out is now warning and no results for peer search 2022-03-15 15:36:00 -03:00
Victor Shyba
068f46d137 don't probe peers too far from the top closest 2022-03-15 15:36:00 -03:00
Victor Shyba
5c60c09fef use a dict for the active queue 2022-03-15 15:36:00 -03:00
Victor Shyba
aca4a243d6 log bottom out of peer search in debug, show short key id for find value 2022-03-15 15:36:00 -03:00
Victor Shyba
422f292d82 bump bottom out limit of peer search so people can use 100 concurrent announcers 2022-03-15 15:36:00 -03:00
Victor Shyba
c81598aab8 wait until k peers are ready. do not double add peers 2022-03-15 15:36:00 -03:00
Victor Shyba
3f8c8e01c6 only return good (contacted) peers 2022-03-15 15:36:00 -03:00
Victor Shyba
8d90653395 reset closest peer on failure 2022-03-15 15:36:00 -03:00
Victor Shyba
01841694ad add peers from shortlist regardless, but check from other nodes 2022-03-15 15:36:00 -03:00
Victor Shyba
c3d0dd3073 bump split index to 2 2022-03-15 15:36:00 -03:00
Victor Shyba
4851f5c300 fix distance sorting and improve logging 2022-03-15 15:36:00 -03:00
Victor Shyba
aef5f11a18 closest peer is only ready when it was contacted and isn't known to be bad 2022-03-15 15:36:00 -03:00
Victor Shyba
700f1cb8e5 dont probe and ignore bad peers 2022-03-15 15:36:00 -03:00
Victor Shyba
7e5f7c1725 simplify, genaralize to any size and fix tests 2022-03-15 15:36:00 -03:00
Victor Shyba
aa7a17b112 stop after finding what to download 2022-03-15 15:36:00 -03:00
Victor Shyba
838fd71538 replace duplicated code 2022-03-15 15:36:00 -03:00
Victor Shyba
0146044b90 add get_colliding_prefix_bits, docs and tests 2022-03-15 15:36:00 -03:00
Victor Shyba
f8f73ed4a6 extract method and avoid using hash builtin name 2022-03-15 15:36:00 -03:00
Victor Shyba
74bf8c551b extract min_prefix_colliding_bits to a contanst 2022-03-15 15:36:00 -03:00
Victor Shyba
4be346d0bb check that the stored blob is at least 1 prefix byte close to peer id 2022-03-15 15:36:00 -03:00
Victor Shyba
4fd1fe0046 add migrator to set head blobs should_announce=0 2022-03-15 15:36:00 -03:00
Victor Shyba
9524f03eac fix test_announces 2022-03-15 15:36:00 -03:00
Victor Shyba
90c8a8bea3 do not search for the head blob 2022-03-15 15:36:00 -03:00
Victor Shyba
154c41bd8d dont set head blob to announce on save 2022-03-15 15:36:00 -03:00
Victor Shyba
f271511484 break tie by length 2022-03-15 15:36:00 -03:00
Victor Shyba
dde4a5b495 fix and test case for blob_clean after disabling network storage 2022-03-15 15:36:00 -03:00
Victor Shyba
68a940f3a6 put back all the peers, get rid of re_add 2022-03-15 15:36:00 -03:00
Alex Grintsvayg
8c773151a3 let stream_update work on non-stream claims 2022-03-15 15:36:00 -03:00
Jeffrey Picard
f1f5276a04 Update __init__.py
Update go hub binary to fix es sync test.
2022-03-15 15:36:00 -03:00
Lex Berezhny
51490b36e6 fix 2022-03-15 15:36:00 -03:00
Lex Berezhny
e00dc78e1f prevent creation of change which is below the dust threshold of 1000 dewies 2022-03-15 15:36:00 -03:00
Victor Shyba
72beb02ec1 bump DHT peer manager cache to 16384 2022-03-15 15:36:00 -03:00
Victor Shyba
b13c346418 add gauge for queue size 2022-03-15 15:36:00 -03:00
Victor Shyba
2520465fc1 count announcements and how many peers we were able to announce to 2022-03-15 15:36:00 -03:00
Eugene Dubinin
dd868d5148 adds tests for guess_media_type
removes unnecessary comments
2022-03-15 15:36:00 -03:00
Eugene Dubinin
e90fb9a28f adjusts code style 2022-03-15 15:36:00 -03:00
Eugene Dubinin
96d1745232 fixes KeyError on missing synonyms 2022-03-15 15:36:00 -03:00
Eugene Dubinin
cab5f9188f detect media_type from the file contents 2022-03-15 15:36:00 -03:00
Victor Shyba
44e647d85d remove request_flight metric 2022-03-15 15:36:00 -03:00
Victor Shyba
5686c2ade9 disable CSV endpoints by default 2022-03-15 15:36:00 -03:00
Victor Shyba
1cc321e9cd remove estimation endpoints as that is done over prometheus metrics now 2022-03-15 15:36:00 -03:00
Victor Shyba
b4d144da21 add granular metric for stored blob prefix, for network announcements calculation 2022-03-15 15:36:00 -03:00
Victor Shyba
39c2c0b922 change colliding bits metric to gauge 2022-03-15 15:36:00 -03:00
Victor Shyba
dbf11fe750 count bit collisions between 8 and 16 2022-03-15 15:36:00 -03:00
Victor Shyba
cc391324e4 add counter for peers with colliding bytes 2022-03-15 15:36:00 -03:00
Victor Shyba
273b4f6d3b add requests in flight and error 2022-03-15 15:36:00 -03:00
Victor Shyba
d359d25935 add request received 2022-03-15 15:36:00 -03:00
Victor Shyba
645a81cec2 add request_sent and request_time metric on dht 2022-03-15 15:36:00 -03:00
Victor Shyba
06da53ef09 add storing_peers and peer_manager_keys 2022-03-15 15:36:00 -03:00
Victor Shyba
692af0cc55 prometheus: move blobs_stored and peers to SDK. add buckets_in_routing_table 2022-03-15 15:36:00 -03:00
Victor Shyba
32b95fa04a add passive estimation to prometheus 2022-03-15 15:36:00 -03:00
Victor Shyba
5ebcbc6370 fix missing async 2022-03-15 15:36:00 -03:00
Victor Shyba
3978d64056 keep same node id between runs 2022-03-15 15:35:59 -03:00
Victor Shyba
d35f7a5ed5 add semaphore on active estimation to avoid abuse 2022-03-15 15:35:59 -03:00
Victor Shyba
2f84c38f6a same api across different estimation methods 2022-03-15 15:35:59 -03:00
Victor Shyba
b656759e0e be explicit about ignoring params 2022-03-15 15:35:59 -03:00
Victor Shyba
15ac365463 better endpoint names, small docs 2022-03-15 15:35:59 -03:00
Victor Shyba
2ab78a92ca add prefix_neighbors_count to routing table debug api 2022-03-15 15:35:59 -03:00
Jack Robison
71b5c4c6ad improve script 2022-03-15 15:35:59 -03:00
Victor Shyba
eae1a0912b first attempt at crawling 2022-03-15 15:35:59 -03:00
Lex Berezhny
d5ec647f46 added integration test 2022-03-15 15:35:59 -03:00
vertbyqb
e066b2a9da jsonrpc_channel_sign - Convert hexdata to a string before signing
Fixes #3533
2022-03-15 15:35:59 -03:00
Victor Shyba
8adec9d0ad extract cache values, increase peer cache to 2048 2022-03-15 15:35:59 -03:00
Victor Shyba
030e407584 DHT bugfix: failures tracking should be bound to 2048 LRU cache size 2022-03-15 15:35:59 -03:00
Victor Shyba
7ed807e41b fix missing docopt argument 2022-03-15 15:35:59 -03:00
Victor Shyba
354cc9b22d bump hub version to latest supporting sd_hash search 2022-03-15 15:35:59 -03:00
Victor Shyba
ff51a5bdc6 update hub protobuf including sd_hash field 2022-03-15 15:35:59 -03:00
Victor Shyba
8618be1b5c increase indexed sd_hash prefix to 4 chars 2022-03-15 15:35:59 -03:00
Victor Shyba
ebe351c67e enable and test prefix search for sd hash 2022-03-15 15:35:59 -03:00
Victor Shyba
6ad2e31976 sync and search sd_hash 2022-03-15 15:35:59 -03:00
Victor Shyba
d44dc103d5 add test 2022-03-15 15:35:59 -03:00
Victor Shyba
331ce1f12c add sd hash to API 2022-03-15 15:35:59 -03:00
Lex Berezhny
37c408a6e7 changes default coin selection strategy from standard to prefer_confirmed 2022-03-15 15:34:09 -03:00
Victor Shyba
a866621ccb do not limit DHT results by K, respect max_results 2022-03-15 15:34:09 -03:00
Victor Shyba
b45b87e72d clarify DHT debug logging on key and operation 2022-03-15 15:34:09 -03:00
Jeffrey Picard
300ae6b134 Switch RangeField back to ints 2022-03-15 15:34:09 -03:00
Jeffrey Picard
0cb66e1f0d Try forcing tox reset 2022-03-15 15:34:09 -03:00
Jeffrey Picard
618f739049 Debugging 2022-03-15 15:34:09 -03:00
Jeffrey Picard
5c8329e951 Update es version in workflow 2022-03-15 15:34:09 -03:00
Jeffrey Picard
683f6b366d Update protobufs, go hub shim, and claim test. 2022-03-15 15:34:09 -03:00
Jack Robison
ce63c2281f remove unused ES fields 2022-03-15 15:34:09 -03:00
Jack Robison
251cb4925a support lists of constraints for all range fields 2022-03-15 15:34:09 -03:00
FemtosecondLaser
f8fe205066 returned conditional check in add_timeout() as it was making test_node.py tests unhappy 2022-03-15 15:34:09 -03:00
FemtosecondLaser
9908775576 removed conditional check in add_timeout() 2022-03-15 15:34:09 -03:00
FemtosecondLaser
bf7745ec8d changed addTimeout to add_timeout for lint compliance 2022-03-15 15:34:09 -03:00
FemtosecondLaser
1aa296e168 added timeout of async operations to integration test setup/teardown 2022-03-15 15:34:09 -03:00
Jack Robison
171c99b12b v0.106.0 2022-03-15 15:34:09 -03:00
Jack Robison
83bb12265e update json docs 2022-03-15 15:34:09 -03:00
Jack Robison
b4e99faba7 update claim_search doc
backward compatibility for `trending_mixed`, `trending_local`, `trending_global`, and `trending_group` args to `claim_search`
2022-03-15 15:34:09 -03:00
Jack Robison
78b559b274 update json docs 2022-03-15 15:34:09 -03:00
Jack Robison
974b933612 update header checkpoints 2022-03-15 15:34:09 -03:00
Victor Shyba
3aac113d45 test case for stream_type search on claims missing source + fix 2022-03-15 15:34:09 -03:00
Lex Berezhny
a04d50a3bc fee per name env var 2022-03-15 15:34:09 -03:00
Victor Shyba
6ec30798ca fix typo from arg name 2022-03-15 15:34:09 -03:00
Jack Robison
b4d47e423d keep touched_or_deleted records 2022-03-15 15:34:09 -03:00
Victor Shyba
bf86e7658e add blob endpoint for listing announced blobs 2022-03-15 15:34:09 -03:00
Victor Shyba
98c0200c53 add /peers.csv to monitoring endpoint 2022-03-15 15:34:09 -03:00
Brendon J. Brewer
a850fb64c8 Rename trending 2022-03-15 15:34:09 -03:00
Jack Robison
02b0f3e9f2 fix missing es notification for support amount changing 2022-03-15 15:34:09 -03:00
Jack Robison
948a58f628 improve tests 2022-03-15 15:34:09 -03:00
Jack Robison
21a2076f26 improve resolve tests 2022-03-15 15:34:09 -03:00
Jack Robison
efd8fd81ed fix duplicate trending notification to ES 2022-03-15 15:34:09 -03:00
Jack Robison
5e73f82c23 fix effective amount for resolve/ES being off while claims/supports are unactivated 2022-03-15 15:34:09 -03:00
Jack Robison
01691dd92a fix test_colliding_short_id 2022-03-15 15:34:09 -03:00
Jack Robison
5fe339c53b update default tcp/blob port to be the same as the default udp/dht port (4444) 2022-03-15 15:34:09 -03:00
Victor Shyba
c4b86454b5 log unexpected errors, rename task/loop 2022-03-15 15:34:09 -03:00
Victor Shyba
60969e4817 clear cache on test assertions 2022-03-15 15:34:09 -03:00
Victor Shyba
48bb84fc1e make sure the downloader always stops gracefully 2022-03-15 15:34:09 -03:00
Victor Shyba
b055c25156 cache space stats from running components so status is instant 2022-03-15 15:34:09 -03:00
Victor Shyba
35905b99ff add index for blob table so size summaries are faster 2022-03-15 15:34:09 -03:00
Victor Shyba
806857c066 download from stored announcements and dont reannounce 2022-03-15 15:34:09 -03:00
Victor Shyba
ba8a7fc351 improve disk space manager status, include more info and unify space queries 2022-03-15 15:34:09 -03:00
Victor Shyba
a36fd76eb1 normal_blobs->stream_blobs, proactive->background 2022-03-15 15:34:09 -03:00
Victor Shyba
02994164be fix free space calculation, test it and give a margin of 10mb before starting so it doesnt insist when full 2022-03-15 15:34:09 -03:00
Victor Shyba
c3b89a896b move more logic out of the downloader component 2022-03-15 15:34:09 -03:00
Victor Shyba
b0bf6eff16 extract background downloader to its own class 2022-03-15 15:34:09 -03:00
Victor Shyba
47c4cb800d add analytics event for network disk space 2022-03-15 15:34:09 -03:00
Victor Shyba
c5f6a4ca1c announce orphan blobs manually, as that was done when save stream 2022-03-15 15:34:09 -03:00
Victor Shyba
813108b9d8 cleanup background downloader blobs from conf 2022-03-15 15:34:09 -03:00
Victor Shyba
95703b3af5 separated network seeding space metrics 2022-03-15 15:34:09 -03:00
Victor Shyba
2ceec7c3d4 don't save streams for network blobs and bypass disk space manager 2022-03-15 15:34:09 -03:00
Victor Shyba
3985f1578e add conf for network seeding space limit 2022-03-15 15:34:09 -03:00
Victor Shyba
355ccb1e32 schedule the download task instead 2022-03-15 15:34:09 -03:00
Victor Shyba
04d2c6f013 fix unit tests from component dependency chain changes 2022-03-15 15:34:09 -03:00
Victor Shyba
8d6a7101f6 download from DHT 2022-03-15 15:34:09 -03:00
Victor Shyba
abbd8473bb drop channel support, prepare to hook into DHT 2022-03-15 15:34:09 -03:00
Victor Shyba
02b2103d94 handle case where something that isn't a sd blob gets hit 2022-03-15 15:34:09 -03:00
Victor Shyba
85474ae381 no api yet 2022-03-15 15:34:09 -03:00
Victor Shyba
9d3c401abb download only blobs 2022-03-15 15:34:09 -03:00
Victor Shyba
32a7d1a4a0 fix tests 2022-03-15 15:34:09 -03:00
Victor Shyba
602cd5bd2e fix exception arguments 2022-03-15 15:34:09 -03:00
Victor Shyba
4b88b191e7 test add/remove/list subscriptions 2022-03-15 15:34:09 -03:00
Victor Shyba
e449cd01ba fix and test main api 2022-03-15 15:34:08 -03:00
Victor Shyba
b1db42acf4 download all blobs and check that on tests 2022-03-15 15:34:08 -03:00
Victor Shyba
fcbe8cf00b create downloader component and initial tests 2022-03-15 15:34:08 -03:00
Victor Shyba
a1866c40f5 with the fix we no longer need to restart the stream 2022-03-15 15:34:08 -03:00
Victor Shyba
d78540f3cc fix tests by checking there are actual blobs being deleted 2022-03-15 15:34:08 -03:00
Victor Shyba
7c1c04674c dont lose results on duplicates, just warn 2022-03-15 15:34:08 -03:00
Victor Shyba
694aeab75c remove tried_for_this_blob so banned peers are retried for same blob 2022-03-15 15:34:08 -03:00
Cristian Vicas
90aaf64b77 Bug [#2070] where blob_get RPC timed out.
Both stream.downloader and blob_exchange.downloader paths are adding the fixed_peers list to the DHT node.
Tested jsonrpc_blob_get daemon call.

Bug [#2070] where blob_get RPC timed out.

Both stream.downloader and blob_exchange.downloader paths are adding the fixed_peers list to the DHT node.
Tested jsonrpc_blob_get daemon call.
2022-03-15 15:34:08 -03:00
Jack Robison
6eeabb1a1a use mempool cache in transaction_get_batch 2022-03-15 15:34:08 -03:00
FemtosecondLaser
77d58b82a0 Added an integration test covering the following scenario:
On start, if download dir is non-writable - daemon terminates with a helpful message.
2022-03-15 15:34:08 -03:00
FemtosecondLaser
a7af877e9e Improved the readability of the tests. 2022-03-15 15:34:08 -03:00
FemtosecondLaser
25092f56be Changed the tests to execute against a real file system instead of a fake one. 2022-03-15 15:34:08 -03:00
FemtosecondLaser
e55f9dd21e renamed the test class to be more specific about the sut 2022-03-15 15:34:08 -03:00
FemtosecondLaser
6c10509705 removed redundant tests
renamed a test to be more specific about the kind of the precondition
2022-03-15 15:34:08 -03:00
FemtosecondLaser
bed2e253b2 Modified ensure_directory_exists() to check if the directory is writable by the process. 2022-03-15 15:34:08 -03:00
Jack Robison
c683ae9ed6 Update docker-compose-wallet-server.yml 2022-03-15 15:34:08 -03:00
Jack Robison
8ce1cebd07 add script to setup docker volumes from snapshots 2022-03-15 15:34:08 -03:00
Jack Robison
9029b4b5c3 fix attempting to update trending on abandoned claims 2022-03-15 15:34:08 -03:00
Jack Robison
db0831d55b update docs 2022-03-15 15:34:08 -03:00
Jack Robison
b3705073a3 skip loading tx/claim caches in the elastic sync script when not needed 2022-03-15 15:34:08 -03:00
Jack Robison
755e8ce101 update trending with help from @eggplantbren 2022-03-15 15:34:08 -03:00
Jack Robison
fd2ab47a16 update sync script to handle ES falling behind leveldb on shutdown 2022-03-15 15:34:08 -03:00
Jack Robison
9aafb7a743 logging 2022-03-15 15:34:08 -03:00
Jack Robison
aa1b20cf7b update docker 2022-03-15 15:34:08 -03:00
Jack Robison
3cd2227c29 update Env to accept parameters from cli args 2022-03-15 15:34:08 -03:00
Jack Robison
27cc83c03b set default CACHE_MB to 1024mb and the default QUERY_TIMEOUT_MS to 10s 2022-03-15 15:34:08 -03:00
Jack Robison
cb6db3f3d8 remove unused hub env settings 2022-03-15 15:34:08 -03:00
Jack Robison
9020e39a83 update lbry-hub-elastic-sync to support resyncing recent blocks 2022-03-15 15:34:08 -03:00
Jack Robison
64509ca95d add CACHE_ALL_CLAIM_TXOS hub setting 2022-03-15 15:34:08 -03:00
Jack Robison
dafd62104b remove dead code 2022-03-15 15:34:08 -03:00
Jack Robison
e09588e433 add CACHE_ALL_TX_HASHES setting to optionally use more memory to save i/o 2022-03-15 15:34:08 -03:00
Jack Robison
50e00192e8 sleeps 2022-03-15 15:34:08 -03:00
Jack Robison
e6d470f110 improve resolve caching 2022-03-15 15:34:08 -03:00
Jack Robison
eeaf9a72e2 threadpools for block processor and es sync reader 2022-03-15 15:34:08 -03:00
Jack Robison
a4ad1bb0a9 doc strings 2022-03-15 15:34:08 -03:00
Jack Robison
e0086682b9 improve claims_producer performance 2022-03-15 15:34:08 -03:00
Jack Robison
15ac2ade59 resolve lru cache 2022-03-15 15:34:08 -03:00
Jack Robison
09e0d5c55e add block_txs index 2022-03-15 15:34:08 -03:00
Jack Robison
dfe855c0c9 smaller caches 2022-03-15 15:34:08 -03:00
Victor Shyba
a2996768fe add optional prometheus to dht_node script 2022-03-15 15:34:08 -03:00
Jack Robison
a53b454d86 Update daemon.py
docstring
2022-03-15 15:34:08 -03:00
Cristian Vicas
01557b599a Updated documentation for RPC calls: status, blob_list. 2022-03-15 15:34:08 -03:00
belikor
65323b4169 file_manager: raise new InvalidStreamURLError if the URL is invalid
When using `lbrynet get URL`, if the URL is not a valid URL
the function `url.URL.parse` will raise a `ValueError` exception
which will produce a whole backtrace.

For example, this is the case if we provide a channel name
with a forward slash but without a stream name.
```
lbrynet get @Non-existing/
```

```
Traceback (most recent call last):
  File "/opt/git/lbry-sdk/lbry/file/file_manager.py", line 84, in download_from_uri
    if not URL.parse(uri).has_stream:
  File "/opt/git/lbry-sdk/lbry/schema/url.py", line 114, in parse
    raise ValueError('Invalid LBRY URL')
ValueError: Invalid LBRY URL
WARNING  lbry.extras.daemon.daemon:1110: Error downloading Non-existing/: Invalid LBRY URL
```

Now we raise a new `InvalidStreamURLError` which can be trapped in the upper functions
that use `url.URL.parse` such as `FileManager.download_from_uri`.
If we do this the traceback won't be shown.
```
WARNING  lbry.file.file_manager:252:
Failed to download Non-existing/: Invalid LBRY stream URL: '@Non-existing/'
WARNING  lbry.extras.daemon.daemon:1110:
Error downloading Non-existing/: Invalid LBRY stream URL: '@Non-existing/'
```

This handles the case when trying to download only "channel" parts
without the claim part.
```
lbrynet get @Non-existing
lbrynet get @Non-existing/
lbrynet get Non-existing/
```
2022-03-15 15:34:08 -03:00
Jack Robison
cdef8b4852 clear es attributes during initial sync 2022-03-15 15:34:08 -03:00
Jack Robison
8d72142390 fix setting references on txos in extra_txos 2022-03-15 15:34:08 -03:00
Jack Robison
172a0f0ac2 fix reposted channel being missing from resolve result
-improve names of the resolve related methods in `LevelDB`
2022-03-15 15:34:08 -03:00
Jack Robison
1022080be6 fix compactify script 2022-03-15 15:34:08 -03:00
Jack Robison
8031a55dbb fix test 2022-03-15 15:34:08 -03:00
Jack Robison
1ddd29656e move test_transaction_commands, test_internal_transaction_api , and test_transactions into their own runner
-move test_resolve_command to its own runner
2022-03-15 15:34:08 -03:00
Jack Robison
cb5c39a159 only save undo info for blocks within reorg limit 2022-03-15 15:34:08 -03:00
Jack Robison
9ec510c742 fix channel count 2022-03-15 15:34:08 -03:00
Jack Robison
aa80cf47b9 try to fix test_sqlite_coin_chooser 2022-03-15 15:34:08 -03:00
Jack Robison
e4e1d42e09 move test_claim_commands and test_resolve_command into new directory 2022-03-15 15:34:08 -03:00
Jack Robison
ccc9a900ef sort touched or deleted claim hashes 2022-03-15 15:34:08 -03:00
Jack Robison
c2a5ff0ae3 block processor db refactoring
-access db through HubDB class, don't use plyvel.DB directly
-add channel count and support amount prefixes
2022-03-15 15:34:08 -03:00
Jack Robison
afa710dcb5 fix and add test for abandoning a controlling in the same block a new claim is made 2022-03-15 15:34:08 -03:00
Jack Robison
d0ed7593de fix test_sqlite_coin_chooser 2022-03-15 15:34:08 -03:00
Jack Robison
830fc7f8cc fix ES index name so it stays the same within a test case 2022-03-15 15:34:08 -03:00
Jack Robison
88e8926a59 fix bug with early takeover by an update 2022-03-15 15:34:08 -03:00
Jack Robison
65e39b8e76 fix test 2022-03-15 15:34:08 -03:00
Jack Robison
acd0c2188f fix activating non existent claim 2022-03-15 15:34:08 -03:00
Jack Robison
6155cda66f add tests for takeovers from amount changes in updates before/on/after activation 2022-03-15 15:34:08 -03:00
Jack Robison
f77f2f6e80 batch address history notifications 2022-03-15 15:34:08 -03:00
Jack Robison
68d397a269 improve leveldb caching 2022-03-15 15:34:08 -03:00
Jack Robison
66ed57c834 refactor reload_blocking_filtering_streams 2022-03-15 15:34:08 -03:00
Jack Robison
8b42475d59 fix filtering error upon abandon 2022-03-15 15:34:08 -03:00
Jack Robison
de9159d83c faster read_claim_txos 2022-03-15 15:34:08 -03:00
Jack Robison
b66adfdf78 remove unused executor 2022-03-15 15:34:08 -03:00
Victor Shyba
a4be5eb1d8 fix blocking and filtering 2022-03-15 15:34:08 -03:00
Jack Robison
c8b6db4b84 fix test 2022-03-15 15:34:08 -03:00
Jack Robison
7d3eef1fd9 fix logging number of notified sessions 2022-03-15 15:34:08 -03:00
Jack Robison
6e93c5bc0d fix update that initiates takeover not being delayed 2022-03-15 15:34:08 -03:00
Jack Robison
16b21b8bda use hub binary from https://github.com/lbryio/hub/pull/13 2022-03-15 15:34:08 -03:00
Jack Robison
a0e0039988 handle invalid release time 2022-03-15 15:34:08 -03:00
Jack Robison
3970e91766 name and normalized -> claim_name and normalized_name
-update generated pb files
2022-03-15 15:34:08 -03:00
Jack Robison
c4126edb79 fix es sync.py 2022-03-15 15:34:08 -03:00
Jack Robison
589a6588da fix all_claims_producer 2022-03-15 15:34:08 -03:00
Jack Robison
1ec4111b9f merge conflicts 2022-03-15 15:34:08 -03:00
Jack Robison
c00912015a claim search fixes 2022-03-15 15:34:08 -03:00
Jack Robison
0d19439982 fix release_time and creation_timestamp 2022-03-15 15:34:08 -03:00
Jack Robison
12f790ab01 delete unused code 2022-03-15 15:34:08 -03:00
Jack Robison
4e687c4fd8 test_spec_example 2022-03-15 15:34:08 -03:00
Jack Robison
ff960fda0e non blocking claim producer 2022-03-15 15:34:08 -03:00
Jack Robison
72e45b5cb1 fix tests 2022-03-15 15:34:08 -03:00
Jack Robison
b59e0490a2 renormalization 2022-03-15 15:34:08 -03:00
Jack Robison
94754f2047 add trending integration test 2022-03-15 15:34:08 -03:00
Jack Robison
ca335b7a65 fix trending overflow 2022-03-15 15:34:08 -03:00
Jack Robison
8f9e6a519d refactor trending 2022-03-15 15:34:08 -03:00
Jack Robison
da75968078 trending fixes 2022-03-15 15:34:08 -03:00
Jack Robison
3800fb1ab0 make app backward compatible with trending_score
-update trending decay function to zero out low trending score values faster
2022-03-15 15:34:08 -03:00
Jack Robison
a51bf6a4fa log time to update and decay trending in elasticsearch 2022-03-15 15:34:08 -03:00
Jack Robison
0be141188c skip integrity errors for trending spikes 2022-03-15 15:34:08 -03:00
Jack Robison
6f2b985b73 update trending in elasticsearch
-add TrendingPrefixSpike to leveldb
-expose `TRENDING_HALF_LIFE`, `TRENDING_WHALE_HALF_LIFE` and `TRENDING_WHALE_THRESHOLD` hub settings
2022-03-15 15:34:08 -03:00
Brendon J. Brewer
8eba05308d constants 2022-03-15 15:34:08 -03:00
Brendon J. Brewer
ecab123a3a Put trending score into ES 2022-03-15 15:34:08 -03:00
Brendon J. Brewer
180f6c4519 Mark claims as touched 2022-03-15 15:34:08 -03:00
Jack Robison
dc4e362e10 fix non normalized canonical urls 2022-03-15 15:34:08 -03:00
Jack Robison
7e78fdec04 handle unicode error for unnormalized names 2022-03-15 15:34:08 -03:00
Jack Robison
e212ce23e3 prefix db 2022-03-15 15:34:08 -03:00
Brendon J. Brewer
0c7be8975f trending 2022-03-15 15:34:08 -03:00
Jack Robison
6cba95c148 fix claims not having non-normalized names 2022-03-15 15:34:08 -03:00
Jack Robison
f62d128621 update test 2022-03-15 15:34:08 -03:00
Jack Robison
91a86fd12f merge conflicts 2022-03-15 15:34:08 -03:00
Jack Robison
c819d494c3 fix missing fields in reposts 2022-03-15 15:34:08 -03:00
Jack Robison
b4853c5f67 fix merge conflicts and simplify extract_doc 2022-03-15 15:34:08 -03:00
Jack Robison
c68334b421 fix bulk es sync 2022-03-15 15:34:08 -03:00
Jack Robison
613acc7b00 fix tests 2022-03-15 15:34:08 -03:00
Jack Robison
371fc4d68c fix tests 2022-03-15 15:34:08 -03:00
Jack Robison
7ef0ae12a1 handle invalid claim update 2022-03-15 15:34:08 -03:00
Jack Robison
dc34e8884c fix keeping claim_hash_to_txo and txo_to_claim in sync 2022-03-15 15:34:08 -03:00
Victor Shyba
08d635322e implement blocking and filtering 2022-03-15 15:34:08 -03:00
Jack Robison
bc0c1b9a3e fix _get_pending_claim_name 2022-03-15 15:34:08 -03:00
Jack Robison
c42ee926da fix applying expiration fork 2022-03-15 15:34:08 -03:00
Jack Robison
c59d08080e update iterators to use pack_partial_key 2022-03-15 15:34:08 -03:00
Jack Robison
94e0624024 delete lbry/wallet/server/storage.py
-expose leveldb lru cache size as `CACHE_MB` hub param
2022-03-15 15:34:08 -03:00
Jack Robison
e94a2c7c94 remove dead code 2022-03-15 15:34:07 -03:00
Jack Robison
cd1ce32377 non blocking mempool loop 2022-03-15 15:34:07 -03:00
Jack Robison
d1560ef09b faster es sync 2022-03-15 15:34:07 -03:00
Jack Robison
6699d1e2f8 run advance_block in threadpool 2022-03-15 15:34:07 -03:00
Jack Robison
f1fbfa1b39 use claim_to_txo cache 2022-03-15 15:34:07 -03:00
Jack Robison
0d86717a9a faster _cached_get_active_amount for claims
-remove dead code
2022-03-15 15:34:07 -03:00
Jack Robison
528af27e4a fix claim search by fee for claims without fees 2022-03-15 15:34:07 -03:00
Jack Robison
fe69afaa56 clear claim_to_txo cache before reading 2022-03-15 15:34:07 -03:00
Jack Robison
31d7823498 handle claims that dont exist in ES sync 2022-03-15 15:34:07 -03:00
Jack Robison
ef6ec03161 in memory claim_to_txo and txo_to_claim dictionaries 2022-03-15 15:34:07 -03:00
Jack Robison
68596be1b9 fix spend_utxo 2022-03-15 15:34:07 -03:00
Jack Robison
a48fe84971 split flush from advance_block 2022-03-15 15:34:07 -03:00
Jack Robison
bfbe7c1bf5 rename extend_ops 2022-03-15 15:34:07 -03:00
Jack Robison
085ce1ff16 faster spend_utxo 2022-03-15 15:34:07 -03:00
Jack Robison
c81c0d9480 faster get_future_activated 2022-03-15 15:34:07 -03:00
Jack Robison
05b6bdb8f6 threadpool 2022-03-15 15:34:07 -03:00
Jack Robison
991d4f8859 cleanup 2022-03-15 15:34:07 -03:00
Jack Robison
22c75605ee fix getting block hash during reorg 2022-03-15 15:34:07 -03:00
Jack Robison
f52faa8d14 fix test 2022-03-15 15:34:07 -03:00
Jack Robison
8dd88a2780 fix spends in address histories 2022-03-15 15:34:07 -03:00
Jack Robison
7ad2234983 faster es sync 2022-03-15 15:34:07 -03:00
Jack Robison
fb5c008fc5 fix expiring channels 2022-03-15 15:34:07 -03:00
Jack Robison
f55b6bdc71 fix abandoning signed claims in the same tx as their channel
-fix canonical/short url in es
2022-03-15 15:34:07 -03:00
Jack Robison
de9edb0695 handle failure to generate a short id 2022-03-15 15:34:07 -03:00
Jack Robison
2180e24bc1 fix resolve by short id 2022-03-15 15:34:07 -03:00
Jack Robison
749e64b101 tests 2022-03-15 15:34:07 -03:00
Jack Robison
b4eaa5f918 move MemPool into BlockProcessor 2022-03-15 15:34:07 -03:00
Jack Robison
d4194954d3 combine MemPool and Notifications classes 2022-03-15 15:34:07 -03:00
Jack Robison
6e221fc7d9 fix touched hashXs notifications 2022-03-15 15:34:07 -03:00
Jack Robison
ea1285cd9f reorg claims in the search index 2022-03-15 15:34:07 -03:00
Jack Robison
4e77fa100b small fixes 2022-03-15 15:34:07 -03:00
Jack Robison
babb76d90d rename 2022-03-15 15:34:07 -03:00
Jack Robison
f73153ed8d delete stale code 2022-03-15 15:34:07 -03:00
Jack Robison
acfc1f56ee simplify advance and reorg 2022-03-15 15:34:07 -03:00
Jack Robison
81773a6497 update limited_history 2022-03-15 15:34:07 -03:00
Jack Robison
25cf751158 update lookup_utxos 2022-03-15 15:34:07 -03:00
Jack Robison
a9c8061c0c update RevertableOpStack 2022-03-15 15:34:07 -03:00
Jack Robison
f53b1ee290 add remaining db prefixes 2022-03-15 15:34:07 -03:00
Jack Robison
85b46ecff6 update plyvel to 1.3.0
https://github.com/lbryio/lbry-sdk/pull/3205#issuecomment-877564489
2022-03-15 15:34:07 -03:00
Jack Robison
fd2753b95a cleanup 2022-03-15 15:34:07 -03:00
Jack Robison
768934e1cc skip es sync during initial hub sync, halt the hub upon finishing initial sync 2022-03-15 15:34:07 -03:00
Jack Robison
ad7dee3e7f fix es sync 2022-03-15 15:34:07 -03:00
Jack Robison
354c69bf4f fix non localhost elasticsearch 2022-03-15 15:34:07 -03:00
Jack Robison
52aa045635 improve channel invalidation test 2022-03-15 15:34:07 -03:00
Jack Robison
1c8f92239d fix stream_update --clear_channel flag 2022-03-15 15:34:07 -03:00
Jack Robison
0c85de7839 fix signed claim invalidation corner cases 2022-03-15 15:34:07 -03:00
Jack Robison
d74d06d97b extra deletes
-the channel_to_claim/claim_to_channel entries already get deleted when the claim txo is spent
2022-03-15 15:34:07 -03:00
Jack Robison
55351c5842 fix has_no_source for reposts 2022-03-15 15:34:07 -03:00
Jack Robison
e67152ec14 update channel_to_claim and claim_to_channel at the same time 2022-03-15 15:34:07 -03:00
Jack Robison
287ff0a557 typing and fix error string 2022-03-15 15:34:07 -03:00
Jack Robison
bf539d67ea update staged txo_to_claim after invalidating channel sig
-fixes abandon of claim with invalidated signature and an update in same block
2022-03-15 15:34:07 -03:00
Jack Robison
69651453dd missing channel_to_claim delete 2022-03-15 15:34:07 -03:00
Jack Robison
7f743ff3f1 fix RepostKey 2022-03-15 15:34:07 -03:00
Jack Robison
a1b7c61b56 typing 2022-03-15 15:34:07 -03:00
Jack Robison
fbd1d53542 fix 2022-03-15 15:34:07 -03:00
Jack Robison
02adc74e2c cleanup 2022-03-15 15:34:07 -03:00
Jack Robison
3a452a3b2a rename effective_amount prefix 2022-03-15 15:34:07 -03:00
Jack Robison
91f8b3b505 fix undeleted claim_to_channel record 2022-03-15 15:34:07 -03:00
Jack Robison
ce09f2eb0f cleanup 2022-03-15 15:34:07 -03:00
Jack Robison
02d479b92d fix mismatch in claim_to_txo<->txo_to_claim 2022-03-15 15:34:07 -03:00
Jack Robison
bce14c3d79 rename things
-fix effective amount integrity error
2022-03-15 15:34:07 -03:00
Jack Robison
90cfcaac40 fix 2022-03-15 15:34:07 -03:00
Jack Robison
02563db2c7 pretty print 2022-03-15 15:34:07 -03:00
Jack Robison
cfa32a3986 fix duplicate activate 2022-03-15 15:34:07 -03:00
Jack Robison
ab4a6bc5b8 fix updating resolve by effective amount after abandoning support 2022-03-15 15:34:07 -03:00
Jack Robison
f01b8c849d use RevertableOpStack in _get_takeover_ops 2022-03-15 15:34:07 -03:00
Jack Robison
6a46f50a35 remove debug prints 2022-03-15 15:34:07 -03:00
Jack Robison
468ed91ee3 add RevertableOpStack to verify consistency of ops as they're staged 2022-03-15 15:34:07 -03:00
Jack Robison
b856e2120a fix fee amount overflow in es 2022-03-15 15:34:07 -03:00
Jack Robison
fd7bfbea78 faster claim producer
-make batches of claim txos from the iterator, and sort by tx hash before fetching to maximize cache and read ahead hits
2022-03-15 15:34:07 -03:00
Jack Robison
a74434e269 imports 2022-03-15 15:34:07 -03:00
Jack Robison
92be63ff5b close db in sync script 2022-03-15 15:34:07 -03:00
Jack Robison
4ce16b6509 try default block size 2022-03-15 15:34:07 -03:00
Jack Robison
2341667d04 update elastic sync 2022-03-15 15:34:07 -03:00
Jack Robison
89e7c8582e invalidate channel signatures upon channel abandon 2022-03-15 15:34:07 -03:00
Jack Robison
da4e4ecd23 _prepare_claim_for_sync generators 2022-03-15 15:34:07 -03:00
Jack Robison
c85648d43b logging 2022-03-15 15:34:07 -03:00
Jack Robison
c90331af98 use default sync=False during write_batch 2022-03-15 15:34:07 -03:00
Jack Robison
e5461f6d4f genesis_bytes attribute 2022-03-15 15:34:07 -03:00
Jack Robison
ac82d6b27b fix 2022-03-15 15:34:07 -03:00
Jack Robison
01db974040 disable es (revert) 2022-03-15 15:34:07 -03:00
Jack Robison
6ae6cf733d _cached_get_active_amount 2022-03-15 15:34:07 -03:00
Jack Robison
ad1d25d945 fix removing unactivated support 2022-03-15 15:34:07 -03:00
Jack Robison
69ed47fc22 debug 2022-03-15 15:34:07 -03:00
Jack Robison
7c62654534 leveldb tuning 2022-03-15 15:34:07 -03:00
Jack Robison
531e6c1a61 debug 2022-03-15 15:34:07 -03:00
Jack Robison
066f797ad4 refactor ClaimToTXO prefix 2022-03-15 15:34:07 -03:00
Jack Robison
9f0611f3d9 filter abandoned claims from those considered for early activation 2022-03-15 15:34:07 -03:00
Jack Robison
b0d2efd613 only do early takeover on a larger amount (fix case where they're equal) 2022-03-15 15:34:07 -03:00
Jack Robison
073283a433 filter supported claim hashes for claims that dont exist from early takeover/activations 2022-03-15 15:34:07 -03:00
Jack Robison
5be04448ea remove extra open functions 2022-03-15 15:34:07 -03:00
Jack Robison
5541b80179 fix flush id 2022-03-15 15:34:07 -03:00
Jack Robison
35232b3650 fix clearing pending_support caches upon abandon 2022-03-15 15:34:07 -03:00
Jack Robison
f4ca3ea66b fix putting spent unactivated supports in removed_active_support 2022-03-15 15:34:07 -03:00
Jack Robison
306efa17cc fix syncing claim to es where channel is in the same block 2022-03-15 15:34:07 -03:00
Jack Robison
83107ad877 ignore activation for headless supports 2022-03-15 15:34:07 -03:00
Jack Robison
b0cf25bf5c faster get_future_activated 2022-03-15 15:34:07 -03:00
Jack Robison
6330424bcd debugging 2022-03-15 15:34:07 -03:00
Jack Robison
5f3850bfa2 prints 2022-03-15 15:34:07 -03:00
Jack Robison
82e6658483 flush count 2022-03-15 15:34:07 -03:00
Jack Robison
49f4add8d1 tests 2022-03-15 15:34:07 -03:00
Jack Robison
6ea96e79bd reposts 2022-03-15 15:34:07 -03:00
Jack Robison
9ad31008a5 fix updating the ES search index
-update search index to use ResolveResult tuples
2022-03-15 15:34:07 -03:00
Jack Robison
966f47a5b1 bid ordered resolve, feed ES claim data from block processor 2022-03-15 15:34:07 -03:00
Jack Robison
8711ece274 fix duplicate update op for early activating claim 2022-03-15 15:34:07 -03:00
Jack Robison
c0e2d56f55 fix early takeovers by not-yet activated claims 2022-03-15 15:34:07 -03:00
Jack Robison
73da5a35b8 test_early_takeover_abandoned_controlling_support 2022-03-15 15:34:07 -03:00
Jack Robison
2ba044ee4e require previous_winning arg for get_takeover_name_ops 2022-03-15 15:34:07 -03:00
Jack Robison
d69180da71 fix takeover edge case
if a claim with a higher value than that of a claim taking over a name exists but isn't yet activated, activate it early and have it take over the name
2022-03-15 15:34:07 -03:00
Jack Robison
b6e4cb9102 comments 2022-03-15 15:34:07 -03:00
Jack Robison
99d16fcb5a bid ordered resolve (WIP) 2022-03-15 15:34:07 -03:00
Jack Robison
c8d0d765d1 fix udp ping test 2022-03-15 15:34:07 -03:00
Jack Robison
96c318ee88 DBError 2022-03-15 15:34:07 -03:00
Jack Robison
c1ac4d8261 remove unused COIN file 2022-03-15 15:34:07 -03:00
Jack Robison
e678df86e0 claim takeovers 2022-03-15 15:34:07 -03:00
Jack Robison
6aa124592d move get_expiration_height and claimtrie constants to Coin class 2022-03-15 15:34:07 -03:00
Jack Robison
86b6b860dc tests 2022-03-15 15:34:07 -03:00
Jack Robison
53ee3a5f80 claim activations and takeovers (WIP) 2022-03-15 15:34:07 -03:00
Jack Robison
8af410b184 advance_blocks -> advance_block 2022-03-15 15:34:07 -03:00
Jack Robison
8cb3fe8831 dead code 2022-03-15 15:34:07 -03:00
Jack Robison
2e92f3acad LBRYBlockProcessor -> BlockProcessor
- temporarily disable claim_search
2022-03-15 15:34:07 -03:00
Jack Robison
4e58094e4b rebase 2022-03-15 15:34:07 -03:00
Jack Robison
1445340bba tests 2022-03-15 15:34:07 -03:00
Jack Robison
eb0eacd404 add wrapper for getnamesintrie
-used for verifying db state against lbrycrd
2022-03-15 15:34:07 -03:00
Jack Robison
1ff3ab4b83 db state struct
-remove dead code
2022-03-15 15:34:07 -03:00
Jack Robison
06841a4fde claim expiration 2022-03-15 15:34:07 -03:00
Jack Robison
d57cd5acd7 get_claim_by_claim_id 2022-03-15 15:34:07 -03:00
Jack Robison
b40cda78ee claims db
-move all leveldb prefixes to DB_PREFIXES enum
-add serializable RevertableOp interface for key/value puts and deletes
-resolve urls from leveldb
2022-03-15 15:34:07 -03:00
Jack Robison
0a833f5f83 transaction_num_mapping 2022-03-15 15:34:07 -03:00
Jack Robison
ef3bab16d3 disable sqlite in block processor 2022-03-15 15:34:07 -03:00
Jack Robison
3e826d0a5d named tuples 2022-03-15 15:34:07 -03:00
Jack Robison
bcd2c7d90b consolidate flush_backup 2022-03-15 15:34:07 -03:00
Jack Robison
eda1b0b3fc remove lbry.wallet.server.history 2022-03-15 15:34:07 -03:00
Jack Robison
bc7fe680c0 consolidate leveldb block advance/reorg
-move methods from History to LevelDB
2022-03-15 15:34:07 -03:00
Jack Robison
3ed748f2fd atomic flush_dbs 2022-03-15 15:34:07 -03:00
Jack Robison
9634753efd combine leveldb databases 2022-03-15 15:34:07 -03:00
Jack Robison
31df4f0bb5 Merkle staticmethods 2022-03-15 15:34:07 -03:00
Lex Berezhny
1673b8debc v0.105.0 2022-03-15 15:34:07 -03:00
Victor Shyba
3d7ece91eb fix file reflect and add test 2022-03-15 15:34:07 -03:00
Lex Berezhny
1004a83dae disk space metrics 2022-03-15 15:34:07 -03:00
Victor Shyba
e7b5b82909 dht: use bytes hex/fromhex instead of binascii 2022-03-15 15:34:07 -03:00
Victor Shyba
c17fddddcd add grin to dht known list 2022-03-15 15:34:07 -03:00
Victor Shyba
2664a34d52 add madiator to known dht nodes 2022-03-15 15:34:07 -03:00
Victor Shyba
c9bf9691e3 add option to set bootstrap_node 2022-03-15 15:34:07 -03:00
Victor Shyba
ad5e5fed82 add dockerfile for dht node 2022-03-15 15:34:07 -03:00
Victor Shyba
0e2fbe1c40 configure where to save peers 2022-03-15 15:34:07 -03:00
Victor Shyba
62e65d61f4 define arg types 2022-03-15 15:34:07 -03:00
Victor Shyba
a237cbd963 add dht seed node script 2022-03-15 15:34:07 -03:00
Victor Shyba
78b12dc411 fix conflict with imported function 2022-03-15 15:34:07 -03:00
Victor Shyba
27457db5c3 errors for empyt and misssing file on publish 2022-03-15 15:34:07 -03:00
Victor Shyba
e569fdd43c generalize stream empty to argument empty 2022-03-15 15:34:07 -03:00
Victor Shyba
00a850500d empty stream name error for user input 2022-03-15 15:34:07 -03:00
Victor Shyba
25aa2f95a5 error for missing channel private key 2022-03-15 15:34:07 -03:00
Victor Shyba
dc2ccc4fe8 error for already purchased claims 2022-03-15 15:34:07 -03:00
belikor
cd5f260061 api.json: correct the error in the generated documentation
From `"name": "blobs_in_stream<blobs_in_stream>"`
to `"name": "blobs_in_stream"`.
2022-03-15 15:34:06 -03:00
belikor
c47ba498a4 daemon: fix documentation in the file_list docstring
This is necessary to produce the `docs/api.json`
(through `scripts/generate_json_api.py`)
with correct information, and to be able to parse this file later on
by other tools.
2022-03-15 15:34:06 -03:00
Lex Berezhny
a6bf8e0eb7 v0.104.0 2022-03-15 15:34:06 -03:00
Lex Berezhny
930d4b3acf re-enable coveralls 2022-03-15 15:34:06 -03:00
Lex Berezhny
c481838179 ubuntu 16.04 is deprecated on github actions, upgrading to 18.04 2022-03-15 15:34:06 -03:00
Lex Berezhny
963649998a revert release 2022-03-15 15:34:06 -03:00
Lex Berezhny
dc4b950e8f v0.104.0 2022-03-15 15:34:06 -03:00
Lex Berezhny
5fea68a9b9 default is_mine to true during migration 2022-03-15 15:34:06 -03:00
Lex Berezhny
568a7ae16a coveralls still down, will have to merged with coveralls off 2022-03-15 15:34:06 -03:00
Lex Berezhny
f69d47587f tests 2022-03-15 15:34:06 -03:00
Lex Berezhny
7148767b6f lint 2022-03-15 15:34:06 -03:00
Lex Berezhny
c66b1646a6 during disk clean your own sd blob is now kept and file status of deleted files is set to stopped 2022-03-15 15:34:06 -03:00
Lex Berezhny
bbcdc881cd db migration and other fixes 2022-03-15 15:34:06 -03:00
Lex Berezhny
07a78cf73d use databse to track blob disk space use and preserve own blobs 2022-03-15 15:34:06 -03:00
Lex Berezhny
93ac2e3bc9 v0.103.0 2022-03-15 15:34:06 -03:00
Lex Berezhny
7e9614b8d1 omit just node.py 2022-03-15 15:34:06 -03:00
Lex Berezhny
7d704f966b coverage omit fix 2022-03-15 15:34:06 -03:00
Lex Berezhny
43a2c6515d omit coverage inside tox 2022-03-15 15:34:06 -03:00
125 changed files with 10269 additions and 7244 deletions

View file

@ -78,7 +78,11 @@ jobs:
test:
- datanetwork
- blockchain
- blockchain_legacy_search
- claims
- takeovers
- transactions
- claims_legacy_search
- takeovers_legacy_search
- other
steps:
- name: Configure sysctl limits
@ -90,7 +94,7 @@ jobs:
- name: Runs Elasticsearch
uses: elastic/elastic-github-actions/elasticsearch@master
with:
stack-version: 7.6.0
stack-version: 7.12.1
- uses: actions/checkout@v2
- uses: actions/setup-python@v1
with:
@ -106,6 +110,8 @@ jobs:
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
restore-keys: txo-integration-${{ matrix.test }}-
- run: pip install tox coverage coveralls
- if: matrix.test == 'claims'
run: rm -rf .tox
- run: tox -e ${{ matrix.test }}
- name: submit coverage report
env:
@ -134,7 +140,7 @@ jobs:
strategy:
matrix:
os:
- ubuntu-16.04
- ubuntu-18.04
- macos-latest
- windows-latest
runs-on: ${{ matrix.os }}

2
.gitignore vendored
View file

@ -13,7 +13,7 @@ __pycache__
_trial_temp/
trending*.log
/tests/integration/blockchain/files
/tests/integration/claims/files
/tests/.coverage.*
/lbry/wallet/bin

View file

@ -0,0 +1,38 @@
FROM debian:10-slim
ARG user=lbry
ARG projects_dir=/home/$user
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
automake libtool \
tar unzip \
build-essential \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
USER $user
WORKDIR $projects_dir
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
ENTRYPOINT ["python3", "scripts/dht_node.py"]

View file

@ -20,6 +20,7 @@ RUN apt-get update && \
python3-dev \
python3-pip \
python3-wheel \
python3-cffi \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*

View file

@ -18,23 +18,27 @@ services:
- "wallet_server:/database"
environment:
- DAEMON_URL=http://lbry:lbry@127.0.0.1:9245
- MAX_QUERY_WORKERS=4
- CACHE_MB=1024
- CACHE_ALL_TX_HASHES=
- CACHE_ALL_CLAIM_TXOS=
- MAX_SEND=1000000000000000000
- MAX_RECEIVE=1000000000000000000
- MAX_SESSIONS=100000
- HOST=0.0.0.0
- TCP_PORT=50001
- PROMETHEUS_PORT=2112
- QUERY_TIMEOUT_MS=3000 # how long search queries allowed to run before cancelling, in milliseconds
- TRENDING_ALGORITHMS=variable_decay
- MAX_SEND=10000000000000 # deprecated. leave it high until its removed
- MAX_SUBS=1000000000000 # deprecated. leave it high until its removed
- FILTERING_CHANNEL_IDS=770bd7ecba84fd2f7607fb15aedd2b172c2e153f 95e5db68a3101df19763f3a5182e4b12ba393ee8
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6 e4e230b131082f6b10c8f7994bbb83f29e8e6fb9
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
container_name: es01
environment:
- node.name=es01
- discovery.type=single-node
- indices.query.bool.max_clause_count=4096
- indices.query.bool.max_clause_count=8192
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms8g -Xmx8g" # no more than 32, remember to disable swap
- "ES_JAVA_OPTS=-Xms4g -Xmx4g" # no more than 32, remember to disable swap
ulimits:
memlock:
soft: -1

View file

@ -6,7 +6,7 @@ set -euo pipefail
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/claims.db ]]; then
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
files="$(ls)"
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
@ -20,6 +20,6 @@ if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/claims.db ]]; then
rm "$filename"
fi
/home/lbry/.local/bin/lbry-hub-elastic-sync /database/claims.db
/home/lbry/.local/bin/lbry-hub-elastic-sync
echo 'starting server'
/home/lbry/.local/bin/lbry-hub "$@"

File diff suppressed because one or more lines are too long

View file

@ -1,2 +1,2 @@
__version__ = "0.102.0"
__version__ = "0.106.0"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name

View file

@ -1,5 +1,6 @@
import os
import re
import time
import asyncio
import binascii
import logging
@ -70,12 +71,16 @@ class AbstractBlob:
'writers',
'verified',
'writing',
'readers'
'readers',
'added_on',
'is_mine',
]
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None):
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False,
):
self.loop = loop
self.blob_hash = blob_hash
self.length = length
@ -85,6 +90,8 @@ class AbstractBlob:
self.verified: asyncio.Event = asyncio.Event(loop=self.loop)
self.writing: asyncio.Event = asyncio.Event(loop=self.loop)
self.readers: typing.List[typing.BinaryIO] = []
self.added_on = added_on or time.time()
self.is_mine = is_mine
if not is_valid_blobhash(blob_hash):
raise InvalidBlobHashError(blob_hash)
@ -180,20 +187,21 @@ class AbstractBlob:
@classmethod
async def create_from_unencrypted(
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None) -> BlobInfo:
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
) -> BlobInfo:
"""
Create an encrypted BlobFile from plaintext bytes
"""
blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted)
length = len(blob_bytes)
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir)
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir, added_on, is_mine)
writer = blob.get_blob_writer()
writer.write(blob_bytes)
await blob.verified.wait()
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash)
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), added_on, blob_hash, is_mine)
def save_verified_blob(self, verified_bytes: bytes):
if self.verified.is_set():
@ -248,11 +256,13 @@ class BlobBuffer(AbstractBlob):
"""
An in-memory only blob
"""
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None):
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
):
self._verified_bytes: typing.Optional[BytesIO] = None
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
@contextlib.contextmanager
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
@ -289,10 +299,12 @@ class BlobFile(AbstractBlob):
"""
A blob existing on the local file system
"""
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None):
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
):
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
if not blob_directory or not os.path.isdir(blob_directory):
raise OSError(f"invalid blob directory '{blob_directory}'")
self.file_path = os.path.join(self.blob_directory, self.blob_hash)
@ -343,12 +355,12 @@ class BlobFile(AbstractBlob):
@classmethod
async def create_from_unencrypted(
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'],
asyncio.Task]] = None) -> BlobInfo:
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int, added_on: float, is_mine: bool,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None
) -> BlobInfo:
if not blob_dir or not os.path.isdir(blob_dir):
raise OSError(f"cannot create blob in directory: '{blob_dir}'")
return await super().create_from_unencrypted(
loop, blob_dir, key, iv, unencrypted, blob_num, blob_completed_callback
loop, blob_dir, key, iv, unencrypted, blob_num, added_on, is_mine, blob_completed_callback
)

View file

@ -7,13 +7,19 @@ class BlobInfo:
'blob_num',
'length',
'iv',
'added_on',
'is_mine'
]
def __init__(self, blob_num: int, length: int, iv: str, blob_hash: typing.Optional[str] = None):
def __init__(
self, blob_num: int, length: int, iv: str, added_on,
blob_hash: typing.Optional[str] = None, is_mine=False):
self.blob_hash = blob_hash
self.blob_num = blob_num
self.length = length
self.iv = iv
self.added_on = added_on
self.is_mine = is_mine
def as_dict(self) -> typing.Dict:
d = {

View file

@ -36,30 +36,30 @@ class BlobManager:
self.config.blob_lru_cache_size)
self.connection_manager = ConnectionManager(loop)
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None):
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None, is_mine: bool = False):
if self.config.save_blobs or (
is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))):
return BlobFile(
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
)
return BlobBuffer(
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
)
def get_blob(self, blob_hash, length: typing.Optional[int] = None):
def get_blob(self, blob_hash, length: typing.Optional[int] = None, is_mine: bool = False):
if blob_hash in self.blobs:
if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer):
buffer = self.blobs.pop(blob_hash)
if blob_hash in self.completed_blob_hashes:
self.completed_blob_hashes.remove(blob_hash)
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
if buffer.is_readable():
with buffer.reader_context() as reader:
self.blobs[blob_hash].write_blob(reader.read())
if length and self.blobs[blob_hash].length is None:
self.blobs[blob_hash].set_length(length)
else:
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
return self.blobs[blob_hash]
def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool:
@ -83,6 +83,8 @@ class BlobManager:
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
if to_add:
self.completed_blob_hashes.update(to_add)
# check blobs that aren't set as finished but were seen on disk
await self.ensure_completed_blobs_status(in_blobfiles_dir - to_add)
if self.config.track_bandwidth:
self.connection_manager.start()
return True
@ -105,13 +107,26 @@ class BlobManager:
if isinstance(blob, BlobFile):
if blob.blob_hash not in self.completed_blob_hashes:
self.completed_blob_hashes.add(blob.blob_hash)
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=True))
return self.loop.create_task(self.storage.add_blobs(
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=True)
)
else:
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=False))
return self.loop.create_task(self.storage.add_blobs(
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False)
)
def check_completed_blobs(self, blob_hashes: typing.List[str]) -> typing.List[str]:
"""Returns of the blobhashes_to_check, which are valid"""
return [blob_hash for blob_hash in blob_hashes if self.is_blob_verified(blob_hash)]
async def ensure_completed_blobs_status(self, blob_hashes: typing.Iterable[str]):
"""Ensures that completed blobs from a given list of blob hashes are set as 'finished' in the database."""
to_add = []
for blob_hash in blob_hashes:
if not self.is_blob_verified(blob_hash):
continue
blob = self.get_blob(blob_hash)
to_add.append((blob.blob_hash, blob.length, blob.added_on, blob.is_mine))
if len(to_add) > 500:
await self.storage.add_blobs(*to_add, finished=True)
to_add.clear()
return await self.storage.add_blobs(*to_add, finished=True)
def delete_blob(self, blob_hash: str):
if not is_valid_blobhash(blob_hash):

View file

@ -1,4 +1,3 @@
import os
import asyncio
import logging
@ -7,51 +6,65 @@ log = logging.getLogger(__name__)
class DiskSpaceManager:
def __init__(self, config, cleaning_interval=30 * 60):
def __init__(self, config, db, blob_manager, cleaning_interval=30 * 60, analytics=None):
self.config = config
self.db = db
self.blob_manager = blob_manager
self.cleaning_interval = cleaning_interval
self.running = False
self.task = None
self.analytics = analytics
self._used_space_bytes = None
@property
def space_used_bytes(self):
used = 0
data_dir = os.path.join(self.config.data_dir, 'blobfiles')
for item in os.scandir(data_dir):
if item.is_file:
used += item.stat().st_size
return used
async def get_free_space_mb(self, is_network_blob=False):
limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
space_used_mb = await self.get_space_used_mb()
space_used_mb = space_used_mb['network_storage'] if is_network_blob else space_used_mb['content_storage']
return max(0, limit_mb - space_used_mb)
@property
def space_used_mb(self):
return int(self.space_used_bytes/1024.0/1024.0)
async def get_space_used_bytes(self):
self._used_space_bytes = await self.db.get_stored_blob_disk_usage()
return self._used_space_bytes
def clean(self):
if not self.config.blob_storage_limit:
async def get_space_used_mb(self, cached=True):
cached = cached and self._used_space_bytes is not None
space_used_bytes = self._used_space_bytes if cached else await self.get_space_used_bytes()
return {key: int(value/1024.0/1024.0) for key, value in space_used_bytes.items()}
async def clean(self):
await self._clean(False)
await self._clean(True)
async def _clean(self, is_network_blob=False):
space_used_mb = await self.get_space_used_mb(cached=False)
if is_network_blob:
space_used_mb = space_used_mb['network_storage']
else:
space_used_mb = space_used_mb['content_storage'] + space_used_mb['private_storage']
storage_limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
if self.analytics:
asyncio.create_task(
self.analytics.send_disk_space_used(space_used_mb, storage_limit_mb, is_network_blob)
)
delete = []
available = storage_limit_mb - space_used_mb
if storage_limit_mb == 0 if not is_network_blob else available >= 0:
return 0
used = 0
files = []
data_dir = os.path.join(self.config.data_dir, 'blobfiles')
for file in os.scandir(data_dir):
if file.is_file:
file_stats = file.stat()
used += file_stats.st_size
files.append((file_stats.st_mtime, file_stats.st_size, file.path))
files.sort()
available = (self.config.blob_storage_limit*1024*1024) - used
cleaned = 0
for _, file_size, file in files:
available += file_size
if available > 0:
for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False, is_network_blob=is_network_blob):
delete.append(blob_hash)
available += int(file_size/1024.0/1024.0)
if available >= 0:
break
os.remove(file)
cleaned += 1
return cleaned
if delete:
await self.db.stop_all_files()
await self.blob_manager.delete_blobs(delete, delete_from_db=True)
self._used_space_bytes = None
return len(delete)
async def cleaning_loop(self):
while self.running:
await asyncio.sleep(self.cleaning_interval)
await asyncio.get_event_loop().run_in_executor(None, self.clean)
await self.clean()
async def start(self):
self.running = True

View file

@ -3,6 +3,7 @@ import typing
import logging
from lbry.utils import cache_concurrent
from lbry.blob_exchange.client import request_blob
from lbry.dht.node import get_kademlia_peers_from_hosts
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.dht.node import Node
@ -87,7 +88,6 @@ class BlobDownloader:
if blob.get_is_verified():
return blob
self.is_running.set()
tried_for_this_blob: typing.Set['KademliaPeer'] = set()
try:
while not blob.get_is_verified() and self.is_running.is_set():
batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
@ -97,24 +97,15 @@ class BlobDownloader:
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
)
re_add: typing.Set['KademliaPeer'] = set()
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
if peer in self.ignored:
continue
if peer in tried_for_this_blob:
if peer in self.active_connections or not self.should_race_continue(blob):
continue
if peer in self.active_connections:
if peer not in re_add:
re_add.add(peer)
continue
if not self.should_race_continue(blob):
break
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
self.active_connections[peer] = t
tried_for_this_blob.add(peer)
if not re_add:
self.peer_queue.put_nowait(list(batch))
self.peer_queue.put_nowait(list(batch))
await self.new_peer_or_finished()
self.cleanup_active()
log.debug("downloaded %s", blob_hash[:8])
@ -133,11 +124,14 @@ class BlobDownloader:
protocol.close()
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', node: 'Node',
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
blob_hash: str) -> 'AbstractBlob':
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download)
search_queue.put_nowait(blob_hash)
peer_queue, accumulate_task = node.accumulate_peers(search_queue)
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)
if fixed_peers:
loop.call_later(config.fixed_peer_delay, peer_queue.put_nowait, fixed_peers)
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
try:
return await downloader.download_blob(blob_hash)

View file

@ -613,7 +613,7 @@ class Config(CLIConfig):
"ports or have firewall rules you likely want to disable this.", True
)
udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port'])
tcp_port = Integer("TCP port to listen for incoming blob requests", 3333, previous_names=['peer_port'])
tcp_port = Integer("TCP port to listen for incoming blob requests", 4444, previous_names=['peer_port'])
prometheus_port = Integer("Port to expose prometheus metrics (off by default)", 0)
network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0')
@ -622,7 +622,7 @@ class Config(CLIConfig):
"Routing table bucket index below which we always split the bucket if given a new key to add to it and "
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
"use.", 1
"use.", 2
)
# protocol timeouts
@ -634,6 +634,7 @@ class Config(CLIConfig):
# blob announcement and download
save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True)
network_storage_limit = Integer("Disk space in MB to be allocated for helping the P2P network. 0 = disable", 0)
blob_storage_limit = Integer("Disk space in MB to be allocated for blob storage. 0 = no limit", 0)
blob_lru_cache_size = Integer(
"LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when "
@ -692,6 +693,8 @@ class Config(CLIConfig):
('spv19.lbry.com', 50001),
])
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
('dht.lbry.grin.io', 4444), # Grin
('dht.lbry.madiator.com', 4444), # Madiator
('lbrynet1.lbry.com', 4444), # US EAST
('lbrynet2.lbry.com', 4444), # US WEST
('lbrynet3.lbry.com', 4444), # EU
@ -721,7 +724,8 @@ class Config(CLIConfig):
coin_selection_strategy = StringChoice(
"Strategy to use when selecting UTXOs for a transaction",
STRATEGIES, "standard")
STRATEGIES, "prefer_confirmed"
)
transaction_cache_size = Integer("Transaction cache size", 2 ** 17)
save_resolved_claims = Toggle(

View file

@ -1,6 +1,9 @@
import asyncio
import typing
import logging
from prometheus_client import Counter, Gauge
if typing.TYPE_CHECKING:
from lbry.dht.node import Node
from lbry.extras.daemon.storage import SQLiteStorage
@ -9,24 +12,39 @@ log = logging.getLogger(__name__)
class BlobAnnouncer:
announcements_sent_metric = Counter(
"announcements_sent", "Number of announcements sent and their respective status.", namespace="dht_node",
labelnames=("peers", "error"),
)
announcement_queue_size_metric = Gauge(
"announcement_queue_size", "Number of hashes waiting to be announced.", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
self.loop = loop
self.node = node
self.storage = storage
self.announce_task: asyncio.Task = None
self.announce_queue: typing.List[str] = []
self._done = asyncio.Event()
self.announced = set()
async def _submit_announcement(self, blob_hash):
try:
peers = len(await self.node.announce_blob(blob_hash))
if peers > 4:
return blob_hash
else:
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise err
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
async def _run_consumer(self):
while self.announce_queue:
try:
blob_hash = self.announce_queue.pop()
peers = len(await self.node.announce_blob(blob_hash))
self.announcements_sent_metric.labels(peers=peers, error=False).inc()
if peers > 4:
self.announced.add(blob_hash)
else:
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
except Exception as err:
self.announcements_sent_metric.labels(peers=0, error=True).inc()
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise err
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
async def _announce(self, batch_size: typing.Optional[int] = 10):
while batch_size:
@ -37,17 +55,18 @@ class BlobAnnouncer:
log.warning("No peers in DHT, announce round skipped")
continue
self.announce_queue.extend(await self.storage.get_blobs_to_announce())
self.announcement_queue_size_metric.labels(scope="global").set(len(self.announce_queue))
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
while len(self.announce_queue) > 0:
log.info("%i blobs to announce", len(self.announce_queue))
announced = await asyncio.gather(*[
self._submit_announcement(
self.announce_queue.pop()) for _ in range(batch_size) if self.announce_queue
], loop=self.loop)
announced = list(filter(None, announced))
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)], loop=self.loop)
announced = list(filter(None, self.announced))
if announced:
await self.storage.update_last_announced_blobs(announced)
log.info("announced %i blobs", len(announced))
self.announced.clear()
self._done.set()
self._done.clear()
def start(self, batch_size: typing.Optional[int] = 10):
assert not self.announce_task or self.announce_task.done(), "already running"
@ -56,3 +75,6 @@ class BlobAnnouncer:
def stop(self):
if self.announce_task and not self.announce_task.done():
self.announce_task.cancel()
def wait(self):
return self._done.wait()

View file

@ -20,7 +20,6 @@ MAYBE_PING_DELAY = 300 # 5 minutes
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
RPC_ID_LENGTH = 20
PROTOCOL_VERSION = 1
BOTTOM_OUT_LIMIT = 3
MSG_SIZE_LIMIT = 1400

View file

@ -1,8 +1,10 @@
import logging
import asyncio
import typing
import binascii
import socket
from prometheus_client import Gauge
from lbry.utils import resolve_host
from lbry.dht import constants
from lbry.dht.peer import make_kademlia_peer
@ -18,6 +20,14 @@ log = logging.getLogger(__name__)
class Node:
storing_peers_metric = Gauge(
"storing_peers", "Number of peers storing blobs announced to this node", namespace="dht_node",
labelnames=("scope",),
)
stored_blob_with_x_bytes_colliding = Gauge(
"stored_blobs_x_bytes_colliding", "Number of blobs with at least X bytes colliding with this node id prefix",
namespace="dht_node", labelnames=("amount",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX,
@ -32,6 +42,10 @@ class Node:
self._refresh_task: asyncio.Task = None
self._storage = storage
@property
def stored_blob_hashes(self):
return self.protocol.data_store.keys()
async def refresh_node(self, force_once=False):
while True:
# remove peers with expired blob announcements from the datastore
@ -41,7 +55,18 @@ class Node:
# add all peers in the routing table
total_peers.extend(self.protocol.routing_table.get_peers())
# add all the peers who have announced blobs to us
total_peers.extend(self.protocol.data_store.get_storing_contacts())
storing_peers = self.protocol.data_store.get_storing_contacts()
self.storing_peers_metric.labels("global").set(len(storing_peers))
total_peers.extend(storing_peers)
counts = {0: 0, 1: 0, 2: 0}
node_id = self.protocol.node_id
for blob_hash in self.protocol.data_store.keys():
bytes_colliding = 0 if blob_hash[0] != node_id[0] else 2 if blob_hash[1] == node_id[1] else 1
counts[bytes_colliding] += 1
self.stored_blob_with_x_bytes_colliding.labels(amount=0).set(counts[0])
self.stored_blob_with_x_bytes_colliding.labels(amount=1).set(counts[1])
self.stored_blob_with_x_bytes_colliding.labels(amount=2).set(counts[2])
# get ids falling in the midpoint of each bucket that hasn't been recently updated
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
@ -80,7 +105,7 @@ class Node:
await fut
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
hash_value = binascii.unhexlify(blob_hash.encode())
hash_value = bytes.fromhex(blob_hash)
assert len(hash_value) == constants.HASH_LENGTH
peers = await self.peer_search(hash_value)
@ -95,7 +120,7 @@ class Node:
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
if stored_to:
log.debug(
"Stored %s to %i of %i attempted peers", binascii.hexlify(hash_value).decode()[:8],
"Stored %s to %i of %i attempted peers", hash_value.hex()[:8],
len(stored_to), len(peers)
)
else:
@ -177,25 +202,23 @@ class Node:
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
bottom_out_limit: int = constants.BOTTOM_OUT_LIMIT,
max_results: int = constants.K) -> IterativeNodeFinder:
return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
key, bottom_out_limit, max_results, None, shortlist)
key, max_results, None, shortlist)
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
bottom_out_limit: int = 40,
max_results: int = -1) -> IterativeValueFinder:
return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
key, bottom_out_limit, max_results, None, shortlist)
key, max_results, None, shortlist)
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None
shortlist: typing.Optional[typing.List['KademliaPeer']] = None
) -> typing.List['KademliaPeer']:
peers = []
async for iteration_peers in self.get_iterative_node_finder(
node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results):
node_id, shortlist=shortlist, max_results=max_results):
peers.extend(iteration_peers)
distance = Distance(node_id)
peers.sort(key=lambda peer: distance(peer.node_id))
@ -223,7 +246,7 @@ class Node:
# prioritize peers who reply to a dht ping first
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
async for results in self.get_iterative_value_finder(binascii.unhexlify(blob_hash.encode())):
async for results in self.get_iterative_value_finder(bytes.fromhex(blob_hash)):
to_put = []
for peer in results:
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
@ -258,3 +281,10 @@ class Node:
) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
queue = peer_queue or asyncio.Queue(loop=self.loop)
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))
async def get_kademlia_peers_from_hosts(peer_list: typing.List[typing.Tuple[str, int]]) -> typing.List['KademliaPeer']:
peer_address_list = [(await resolve_host(url, port, proto='tcp'), port) for url, port in peer_list]
kademlia_peer_list = [make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
for address, port in peer_address_list]
return kademlia_peer_list

View file

@ -1,18 +1,21 @@
import typing
import asyncio
import logging
from binascii import hexlify
from dataclasses import dataclass, field
from functools import lru_cache
from prometheus_client import Gauge
from lbry.utils import is_valid_public_ipv4 as _is_valid_public_ipv4, LRUCache
from lbry.dht import constants
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
ALLOW_LOCALHOST = False
CACHE_SIZE = 16384
log = logging.getLogger(__name__)
@lru_cache(1024)
@lru_cache(CACHE_SIZE)
def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str],
udp_port: typing.Optional[int] = None,
tcp_port: typing.Optional[int] = None,
@ -26,17 +29,26 @@ def is_valid_public_ipv4(address, allow_localhost: bool = False):
class PeerManager:
peer_manager_keys_metric = Gauge(
"peer_manager_keys", "Number of keys tracked by PeerManager dicts (sum)", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop):
self._loop = loop
self._rpc_failures: typing.Dict[
typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]]
] = {}
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = LRUCache(2048)
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = LRUCache(2048)
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = LRUCache(2048)
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = LRUCache(2048)
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = LRUCache(2048)
self._node_tokens: typing.Dict[bytes, (float, bytes)] = LRUCache(2048)
] = LRUCache(CACHE_SIZE)
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = LRUCache(CACHE_SIZE)
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = LRUCache(CACHE_SIZE)
self._node_tokens: typing.Dict[bytes, (float, bytes)] = LRUCache(CACHE_SIZE)
def count_cache_keys(self):
return len(self._rpc_failures) + len(self._last_replied) + len(self._last_sent) + len(
self._last_requested) + len(self._node_id_mapping) + len(self._node_id_reverse_mapping) + len(
self._node_tokens)
def reset(self):
for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested):
@ -86,6 +98,7 @@ class PeerManager:
self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id))
self._node_id_mapping[(address, udp_port)] = node_id
self._node_id_reverse_mapping[node_id] = (address, udp_port)
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
def prune(self): # TODO: periodically call this
now = self._loop.time()
@ -154,7 +167,7 @@ class KademliaPeer:
def __post_init__(self):
if self._node_id is not None:
if not len(self._node_id) == constants.HASH_LENGTH:
raise ValueError("invalid node_id: {}".format(hexlify(self._node_id).decode()))
raise ValueError("invalid node_id: {}".format(self._node_id.hex()))
if self.udp_port is not None and not 1024 <= self.udp_port <= 65535:
raise ValueError(f"invalid udp port: {self.address}:{self.udp_port}")
if self.tcp_port is not None and not 1024 <= self.tcp_port <= 65535:
@ -177,3 +190,6 @@ class KademliaPeer:
def compact_ip(self):
return make_compact_ip(self.address)
def __str__(self):
return f"{self.__class__.__name__}({self.node_id.hex()[:8]}@{self.address}:{self.udp_port}-{self.tcp_port})"

View file

@ -16,6 +16,12 @@ class DictDataStore:
self._peer_manager = peer_manager
self.completed_blobs: typing.Set[str] = set()
def keys(self):
return self._data_store.keys()
def __len__(self):
return self._data_store.__len__()
def removed_expired_peers(self):
now = self.loop.time()
keys = list(self._data_store.keys())

View file

@ -1,7 +1,6 @@
import asyncio
from binascii import hexlify
from itertools import chain
from collections import defaultdict
from collections import defaultdict, OrderedDict
import typing
import logging
from typing import TYPE_CHECKING
@ -75,7 +74,7 @@ def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
class IterativeFinder:
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
if len(key) != constants.HASH_LENGTH:
@ -86,28 +85,22 @@ class IterativeFinder:
self.protocol = protocol
self.key = key
self.bottom_out_limit = bottom_out_limit
self.max_results = max_results
self.max_results = max(constants.K, max_results)
self.exclude = exclude or []
self.active: typing.Set['KademliaPeer'] = set()
self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
self.contacted: typing.Set['KademliaPeer'] = set()
self.distance = Distance(key)
self.closest_peer: typing.Optional['KademliaPeer'] = None
self.prev_closest_peer: typing.Optional['KademliaPeer'] = None
self.iteration_queue = asyncio.Queue(loop=self.loop)
self.running_probes: typing.Set[asyncio.Task] = set()
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
self.iteration_count = 0
self.bottom_out_count = 0
self.running = False
self.tasks: typing.List[asyncio.Task] = []
self.delayed_calls: typing.List[asyncio.Handle] = []
for peer in get_shortlist(routing_table, key, shortlist):
if peer.node_id:
self._add_active(peer)
self._add_active(peer, force=True)
else:
# seed nodes
self._schedule_probe(peer)
@ -139,15 +132,14 @@ class IterativeFinder:
"""
return []
def _is_closer(self, peer: 'KademliaPeer') -> bool:
return not self.closest_peer or self.distance.is_closer(peer.node_id, self.closest_peer.node_id)
def _add_active(self, peer):
def _add_active(self, peer, force=False):
if not force and self.peer_manager.peer_is_good(peer) is False:
return
if peer in self.contacted:
return
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
self.active.add(peer)
if self._is_closer(peer):
self.prev_closest_peer = self.closest_peer
self.closest_peer = peer
self.active[peer] = self.distance(peer.node_id)
self.active = OrderedDict(sorted(self.active.items(), key=lambda item: item[1]))
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
self._add_active(peer)
@ -159,33 +151,43 @@ class IterativeFinder:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address,
peer.udp_port, address, udp_port)
self.check_result_ready(response)
self._log_state()
def _reset_closest(self, peer):
if peer in self.active:
del self.active[peer]
async def _send_probe(self, peer: 'KademliaPeer'):
try:
response = await self.send_probe(peer)
except asyncio.TimeoutError:
self.active.discard(peer)
self._reset_closest(peer)
return
except ValueError as err:
log.warning(str(err))
self.active.discard(peer)
self._reset_closest(peer)
return
except TransportNotConnected:
return self.aclose()
except RemoteException:
self._reset_closest(peer)
return
return await self._handle_probe_result(peer, response)
async def _search_round(self):
def _search_round(self):
"""
Send up to constants.alpha (5) probes to closest active peers
"""
added = 0
to_probe = list(self.active - self.contacted)
to_probe.sort(key=lambda peer: self.distance(self.key))
for peer in to_probe:
if added >= constants.ALPHA:
for index, peer in enumerate(self.active.keys()):
if index == 0:
log.debug("closest to probe: %s", peer.node_id.hex()[:8])
if peer in self.contacted:
continue
if len(self.running_probes) >= constants.ALPHA:
break
if index > (constants.K + len(self.running_probes)):
break
origin_address = (peer.address, peer.udp_port)
if origin_address in self.exclude:
@ -196,9 +198,9 @@ class IterativeFinder:
continue
self._schedule_probe(peer)
added += 1
log.debug("running %d probes", len(self.running_probes))
log.debug("running %d probes for key %s", len(self.running_probes), self.key.hex()[:8])
if not added and not self.running_probes:
log.debug("search for %s exhausted", hexlify(self.key)[:8])
log.debug("search for %s exhausted", self.key.hex()[:8])
self.search_exhausted()
def _schedule_probe(self, peer: 'KademliaPeer'):
@ -207,33 +209,22 @@ class IterativeFinder:
t = self.loop.create_task(self._send_probe(peer))
def callback(_):
self.running_probes.difference_update({
probe for probe in self.running_probes if probe.done() or probe == t
})
if not self.running_probes:
self.tasks.append(self.loop.create_task(self._search_task(0.0)))
self.running_probes.pop(peer, None)
if self.running:
self._search_round()
t.add_done_callback(callback)
self.running_probes.add(t)
self.running_probes[peer] = t
async def _search_task(self, delay: typing.Optional[float] = constants.ITERATIVE_LOOKUP_DELAY):
try:
if self.running:
await self._search_round()
if self.running:
self.delayed_calls.append(self.loop.call_later(delay, self._search))
except (asyncio.CancelledError, StopAsyncIteration, TransportNotConnected):
if self.running:
self.loop.call_soon(self.aclose)
def _search(self):
self.tasks.append(self.loop.create_task(self._search_task()))
def _log_state(self):
log.debug("[%s] check result: %i active nodes %i contacted",
self.key.hex()[:8], len(self.active), len(self.contacted))
def __aiter__(self):
if self.running:
raise Exception("already running")
self.running = True
self._search()
self.loop.call_soon(self._search_round)
return self
async def __anext__(self) -> typing.List['KademliaPeer']:
@ -253,40 +244,40 @@ class IterativeFinder:
def aclose(self):
self.running = False
self.iteration_queue.put_nowait(None)
for task in chain(self.tasks, self.running_probes, self.delayed_calls):
for task in chain(self.tasks, self.running_probes.values()):
task.cancel()
self.tasks.clear()
self.running_probes.clear()
self.delayed_calls.clear()
class IterativeNodeFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
super().__init__(loop, peer_manager, routing_table, protocol, key, max_results, exclude,
shortlist)
self.yielded_peers: typing.Set['KademliaPeer'] = set()
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
log.debug("probing %s:%d %s", peer.address, peer.udp_port, hexlify(peer.node_id)[:8] if peer.node_id else '')
log.debug("probe %s:%d (%s) for NODE %s",
peer.address, peer.udp_port, peer.node_id.hex()[:8] if peer.node_id else '', self.key.hex()[:8])
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
return FindNodeResponse(self.key, response)
def search_exhausted(self):
self.put_result(self.active, finish=True)
self.put_result(self.active.keys(), finish=True)
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
not_yet_yielded = [
peer for peer in from_iter
if peer not in self.yielded_peers
and peer.node_id != self.protocol.node_id
and self.peer_manager.peer_is_good(peer) is not False
and self.peer_manager.peer_is_good(peer) is True # return only peers who answered
]
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
to_yield = not_yet_yielded[:min(constants.K, len(not_yet_yielded))]
to_yield = not_yet_yielded[:max(constants.K, self.max_results)]
if to_yield:
self.yielded_peers.update(to_yield)
self.iteration_queue.put_nowait(to_yield)
@ -298,26 +289,16 @@ class IterativeNodeFinder(IterativeFinder):
if found:
log.debug("found")
return self.put_result(self.active, finish=True)
if self.prev_closest_peer and self.closest_peer and not self._is_closer(self.prev_closest_peer):
# log.info("improving, %i %i %i %i %i", len(self.shortlist), len(self.active), len(self.contacted),
# self.bottom_out_count, self.iteration_count)
self.bottom_out_count = 0
elif self.prev_closest_peer and self.closest_peer:
self.bottom_out_count += 1
log.info("bottom out %i %i %i", len(self.active), len(self.contacted), self.bottom_out_count)
if self.bottom_out_count >= self.bottom_out_limit or self.iteration_count >= self.bottom_out_limit:
log.info("limit hit")
self.put_result(self.active, True)
return self.put_result(self.active.keys(), finish=True)
class IterativeValueFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
super().__init__(loop, peer_manager, routing_table, protocol, key, max_results, exclude,
shortlist)
self.blob_peers: typing.Set['KademliaPeer'] = set()
# this tracks the index of the most recent page we requested from each peer
@ -326,6 +307,8 @@ class IterativeValueFinder(IterativeFinder):
self.discovered_peers: typing.Dict['KademliaPeer', typing.Set['KademliaPeer']] = defaultdict(set)
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
log.debug("probe %s:%d (%s) for VALUE %s",
peer.address, peer.udp_port, peer.node_id.hex()[:8], self.key.hex()[:8])
page = self.peer_pages[peer]
response = await self.protocol.get_rpc_peer(peer).find_value(self.key, page=page)
parsed = FindValueResponse(self.key, response)
@ -347,7 +330,6 @@ class IterativeValueFinder(IterativeFinder):
already_known + len(parsed.found_compact_addresses))
if len(self.discovered_peers[peer]) != already_known + len(parsed.found_compact_addresses):
log.warning("misbehaving peer %s:%i returned duplicate peers for blob", peer.address, peer.udp_port)
parsed.found_compact_addresses.clear()
elif len(parsed.found_compact_addresses) >= constants.K and self.peer_pages[peer] < parsed.pages:
# the peer returned a full page and indicates it has more
self.peer_pages[peer] += 1
@ -361,23 +343,12 @@ class IterativeValueFinder(IterativeFinder):
blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)
for compact_addr in response.found_compact_addresses]
to_yield = []
self.bottom_out_count = 0
for blob_peer in blob_peers:
if blob_peer not in self.blob_peers:
self.blob_peers.add(blob_peer)
to_yield.append(blob_peer)
if to_yield:
# log.info("found %i new peers for blob", len(to_yield))
self.iteration_queue.put_nowait(to_yield)
# if self.max_results and len(self.blob_peers) >= self.max_results:
# log.info("enough blob peers found")
# if not self.finished.is_set():
# self.finished.set()
elif self.prev_closest_peer and self.closest_peer:
self.bottom_out_count += 1
if self.bottom_out_count >= self.bottom_out_limit:
log.info("blob peer search bottomed out")
self.iteration_queue.put_nowait(None)
def get_initial_result(self) -> typing.List['KademliaPeer']:
if self.protocol.data_store.has_peers_for_blob(self.key):

View file

@ -3,12 +3,14 @@ import socket
import functools
import hashlib
import asyncio
import time
import typing
import binascii
import random
from asyncio.protocols import DatagramProtocol
from asyncio.transports import DatagramTransport
from prometheus_client import Gauge, Counter, Histogram
from lbry.dht import constants
from lbry.dht.serialization.bencoding import DecodeError
from lbry.dht.serialization.datagram import decode_datagram, ErrorDatagram, ResponseDatagram, RequestDatagram
@ -31,6 +33,11 @@ OLD_PROTOCOL_ERRORS = {
class KademliaRPC:
stored_blob_metric = Gauge(
"stored_blobs", "Number of blobs announced by other peers", namespace="dht_node",
labelnames=("scope",),
)
def __init__(self, protocol: 'KademliaProtocol', loop: asyncio.AbstractEventLoop, peer_port: int = 3333):
self.protocol = protocol
self.loop = loop
@ -62,6 +69,7 @@ class KademliaRPC:
self.protocol.data_store.add_peer_to_blob(
rpc_contact, blob_hash
)
self.stored_blob_metric.labels("global").set(len(self.protocol.data_store))
return b'OK'
def find_node(self, rpc_contact: 'KademliaPeer', key: bytes) -> typing.List[typing.Tuple[bytes, str, int]]:
@ -97,7 +105,7 @@ class KademliaRPC:
if not rpc_contact.tcp_port or peer.compact_address_tcp() != rpc_contact.compact_address_tcp()
]
# if we don't have k storing peers to return and we have this hash locally, include our contact information
if len(peers) < constants.K and binascii.hexlify(key).decode() in self.protocol.data_store.completed_blobs:
if len(peers) < constants.K and key.hex() in self.protocol.data_store.completed_blobs:
peers.append(self.compact_address())
if not peers:
response[PAGE_KEY] = 0
@ -260,6 +268,30 @@ class PingQueue:
class KademliaProtocol(DatagramProtocol):
request_sent_metric = Counter(
"request_sent", "Number of requests send from DHT RPC protocol", namespace="dht_node",
labelnames=("method",),
)
request_success_metric = Counter(
"request_success", "Number of successful requests", namespace="dht_node",
labelnames=("method",),
)
request_error_metric = Counter(
"request_error", "Number of errors returned from request to other peers", namespace="dht_node",
labelnames=("method",),
)
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 3.0, 3.5, 4.0, 4.50, 5.0, 5.50, 6.0, float('inf')
)
response_time_metric = Histogram(
"response_time", "Response times of DHT RPC requests", namespace="dht_node", buckets=HISTOGRAM_BUCKETS,
labelnames=("method",)
)
received_request_metric = Counter(
"received_request", "Number of received DHT RPC requests", namespace="dht_node",
labelnames=("method",),
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
@ -415,8 +447,8 @@ class KademliaProtocol(DatagramProtocol):
self._wakeup_routing_task.clear()
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
assert sender_contact.node_id != self.node_id, (binascii.hexlify(sender_contact.node_id)[:8].decode(),
binascii.hexlify(self.node_id)[:8].decode())
assert sender_contact.node_id != self.node_id, (sender_contact.node_id.hex()[:8],
self.node_id.hex()[:8])
method = message.method
if method not in [b'ping', b'store', b'findNode', b'findValue']:
raise AttributeError('Invalid method: %s' % message.method.decode())
@ -448,6 +480,7 @@ class KademliaProtocol(DatagramProtocol):
def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram):
# This is an RPC method request
self.received_request_metric.labels(method=request_datagram.method).inc()
self.peer_manager.report_last_requested(address[0], address[1])
try:
peer = self.routing_table.get_peer(request_datagram.node_id)
@ -561,7 +594,7 @@ class KademliaProtocol(DatagramProtocol):
message = decode_datagram(datagram)
except (ValueError, TypeError, DecodeError):
self.peer_manager.report_failure(address[0], address[1])
log.warning("Couldn't decode dht datagram from %s: %s", address, binascii.hexlify(datagram).decode())
log.warning("Couldn't decode dht datagram from %s: %s", address, datagram.hex())
return
if isinstance(message, RequestDatagram):
@ -576,14 +609,19 @@ class KademliaProtocol(DatagramProtocol):
self._send(peer, request)
response_fut = self.sent_messages[request.rpc_id][1]
try:
self.request_sent_metric.labels(method=request.method).inc()
start = time.perf_counter()
response = await asyncio.wait_for(response_fut, self.rpc_timeout)
self.response_time_metric.labels(method=request.method).observe(time.perf_counter() - start)
self.peer_manager.report_last_replied(peer.address, peer.udp_port)
self.request_success_metric.labels(method=request.method).inc()
return response
except asyncio.CancelledError:
if not response_fut.done():
response_fut.cancel()
raise
except (asyncio.TimeoutError, RemoteException):
self.request_error_metric.labels(method=request.method).inc()
self.peer_manager.report_failure(peer.address, peer.udp_port)
if self.peer_manager.peer_is_good(peer) is False:
self.remove_peer(peer)
@ -603,7 +641,7 @@ class KademliaProtocol(DatagramProtocol):
if len(data) > constants.MSG_SIZE_LIMIT:
log.warning("cannot send datagram larger than %i bytes (packet is %i bytes)",
constants.MSG_SIZE_LIMIT, len(data))
log.debug("Packet is too large to send: %s", binascii.hexlify(data[:3500]).decode())
log.debug("Packet is too large to send: %s", data[:3500].hex())
raise ValueError(
f"cannot send datagram larger than {constants.MSG_SIZE_LIMIT} bytes (packet is {len(data)} bytes)"
)
@ -663,13 +701,13 @@ class KademliaProtocol(DatagramProtocol):
res = await self.get_rpc_peer(peer).store(hash_value)
if res != b"OK":
raise ValueError(res)
log.debug("Stored %s to %s", binascii.hexlify(hash_value).decode()[:8], peer)
log.debug("Stored %s to %s", hash_value.hex()[:8], peer)
return peer.node_id, True
try:
return await __store()
except asyncio.TimeoutError:
log.debug("Timeout while storing blob_hash %s at %s", binascii.hexlify(hash_value).decode()[:8], peer)
log.debug("Timeout while storing blob_hash %s at %s", hash_value.hex()[:8], peer)
return peer.node_id, False
except ValueError as err:
log.error("Unexpected response: %s", err)

View file

@ -4,6 +4,9 @@ import logging
import typing
import itertools
from prometheus_client import Gauge
from lbry import utils
from lbry.dht import constants
from lbry.dht.protocol.distance import Distance
if typing.TYPE_CHECKING:
@ -13,8 +16,17 @@ log = logging.getLogger(__name__)
class KBucket:
""" Description - later
"""
Kademlia K-bucket implementation.
"""
peer_in_routing_table_metric = Gauge(
"peers_in_routing_table", "Number of peers on routing table", namespace="dht_node",
labelnames=("scope",)
)
peer_with_x_bit_colliding_metric = Gauge(
"peer_x_bit_colliding", "Number of peers with at least X bits colliding with this node id",
namespace="dht_node", labelnames=("amount",)
)
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, node_id: bytes):
"""
@ -58,6 +70,9 @@ class KBucket:
return True
if len(self.peers) < constants.K:
self.peers.append(peer)
self.peer_in_routing_table_metric.labels("global").inc()
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).inc()
return True
else:
return False
@ -124,6 +139,9 @@ class KBucket:
def remove_peer(self, peer: 'KademliaPeer') -> None:
self.peers.remove(peer)
self.peer_in_routing_table_metric.labels("global").dec()
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).dec()
def key_in_range(self, key: bytes) -> bool:
""" Tests whether the specified key (i.e. node ID) is in the range
@ -162,6 +180,10 @@ class TreeRoutingTable:
ping RPC-based k-bucket eviction algorithm described in section 2.2 of
that paper.
"""
bucket_in_routing_table_metric = Gauge(
"buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
@ -279,6 +301,7 @@ class TreeRoutingTable:
# ...and remove them from the old bucket
for contact in new_bucket.peers:
old_bucket.remove_peer(contact)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
def join_buckets(self):
if len(self.buckets) == 1:
@ -302,6 +325,7 @@ class TreeRoutingTable:
elif can_go_higher:
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
self.buckets.remove(bucket)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
return self.join_buckets()
def contact_in_routing_table(self, address_tuple: typing.Tuple[str, int]) -> bool:

View file

@ -35,6 +35,10 @@ Code | Name | Message
111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid.
112 | InputValueIsNone | None or null is not valid value for argument '{argument}'.
113 | ConflictingInputValue | Only '{first_argument}' or '{second_argument}' is allowed, not both.
114 | InputStringIsBlank | {argument} cannot be blank.
115 | EmptyPublishedFile | Cannot publish empty file: {file_path}
116 | MissingPublishedFile | File does not exist: {file_path}
117 | InvalidStreamURL | Invalid LBRY stream URL: '{url}' -- When an URL cannot be downloaded, such as '@Channel/' or a collection
**2xx** | Configuration | Configuration errors.
201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues.
202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args.
@ -52,6 +56,7 @@ Code | Name | Message
405 | ChannelKeyNotFound | Channel signing key not found.
406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key.
407 | DataDownload | Failed to download blob. *generic*
408 | PrivateKeyNotFound | Couldn't find private key for {key} '{value}'.
410 | Resolve | Failed to resolve '{url}'.
411 | ResolveTimeout | Failed to resolve '{url}' within the timeout.
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{censor_id}'.
@ -59,6 +64,7 @@ Code | Name | Message
421 | InvalidPassword | Password is invalid.
422 | IncompatibleWalletServer | '{server}:{port}' has an incompatibly old version.
423 | TooManyClaimSearchParameters | {key} cant have more than {limit} items.
424 | AlreadyPurchased | You already have a purchase for claim_id '{claim_id_hex}'. Use --allow-duplicate-purchase flag to override.
431 | ServerPaymentInvalidAddress | Invalid address from wallet server: '{address}' - skipping payment round.
432 | ServerPaymentWalletLocked | Cannot spend funds with locked wallet, skipping payment round.
433 | ServerPaymentFeeAboveMaxAllowed | Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.

View file

@ -84,6 +84,37 @@ class ConflictingInputValueError(InputValueError):
super().__init__(f"Only '{first_argument}' or '{second_argument}' is allowed, not both.")
class InputStringIsBlankError(InputValueError):
def __init__(self, argument):
self.argument = argument
super().__init__(f"{argument} cannot be blank.")
class EmptyPublishedFileError(InputValueError):
def __init__(self, file_path):
self.file_path = file_path
super().__init__(f"Cannot publish empty file: {file_path}")
class MissingPublishedFileError(InputValueError):
def __init__(self, file_path):
self.file_path = file_path
super().__init__(f"File does not exist: {file_path}")
class InvalidStreamURLError(InputValueError):
"""
When an URL cannot be downloaded, such as '@Channel/' or a collection
"""
def __init__(self, url):
self.url = url
super().__init__(f"Invalid LBRY stream URL: '{url}'")
class ConfigurationError(BaseError):
"""
Configuration errors.
@ -207,6 +238,14 @@ class DataDownloadError(WalletError):
super().__init__("Failed to download blob. *generic*")
class PrivateKeyNotFoundError(WalletError):
def __init__(self, key, value):
self.key = key
self.value = value
super().__init__(f"Couldn't find private key for {key} '{value}'.")
class ResolveError(WalletError):
def __init__(self, url):
@ -223,9 +262,10 @@ class ResolveTimeoutError(WalletError):
class ResolveCensoredError(WalletError):
def __init__(self, url, censor_id):
def __init__(self, url, censor_id, censor_row):
self.url = url
self.censor_id = censor_id
self.censor_row = censor_row
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.")
@ -258,6 +298,16 @@ class TooManyClaimSearchParametersError(WalletError):
super().__init__(f"{key} cant have more than {limit} items.")
class AlreadyPurchasedError(WalletError):
"""
allow-duplicate-purchase flag to override.
"""
def __init__(self, claim_id_hex):
self.claim_id_hex = claim_id_hex
super().__init__(f"You already have a purchase for claim_id '{claim_id_hex}'. Use")
class ServerPaymentInvalidAddressError(WalletError):
def __init__(self, address):

View file

@ -226,6 +226,9 @@ def get_argument_parser():
def ensure_directory_exists(path: str):
if not os.path.isdir(path):
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
use_effective_ids = os.access in os.supports_effective_ids
if not os.access(path, os.W_OK, effective_ids=use_effective_ids):
raise PermissionError(f"The following directory is not writable: {path}")
LOG_MODULES = 'lbry', 'aioupnp'

View file

@ -18,6 +18,7 @@ DOWNLOAD_STARTED = 'Download Started'
DOWNLOAD_ERRORED = 'Download Errored'
DOWNLOAD_FINISHED = 'Download Finished'
HEARTBEAT = 'Heartbeat'
DISK_SPACE = 'Disk Space'
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
NEW_CHANNEL = 'New Channel'
CREDITS_SENT = 'Credits Sent'
@ -169,6 +170,15 @@ class AnalyticsManager:
})
)
async def send_disk_space_used(self, storage_used, storage_limit, is_from_network_quota):
await self.track(
self._event(DISK_SPACE, {
'used': storage_used,
'limit': storage_limit,
'from_network_quota': is_from_network_quota
})
)
async def send_server_startup(self):
await self.track(self._event(SERVER_STARTUP))

View file

@ -4,6 +4,7 @@ import asyncio
import logging
import binascii
import typing
import base58
from aioupnp import __version__ as aioupnp_version
@ -17,6 +18,7 @@ from lbry.dht.blob_announcer import BlobAnnouncer
from lbry.blob.blob_manager import BlobManager
from lbry.blob.disk_space_manager import DiskSpaceManager
from lbry.blob_exchange.server import BlobServer
from lbry.stream.background_downloader import BackgroundDownloader
from lbry.stream.stream_manager import StreamManager
from lbry.file.file_manager import FileManager
from lbry.extras.daemon.component import Component
@ -42,6 +44,7 @@ DHT_COMPONENT = "dht"
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
FILE_MANAGER_COMPONENT = "file_manager"
DISK_SPACE_COMPONENT = "disk_space"
BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
UPNP_COMPONENT = "upnp"
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
@ -61,7 +64,7 @@ class DatabaseComponent(Component):
@staticmethod
def get_current_db_revision():
return 14
return 15
@property
def revision_filename(self):
@ -377,24 +380,99 @@ class FileManagerComponent(Component):
self.file_manager.stop()
class DiskSpaceComponent(Component):
component_name = DISK_SPACE_COMPONENT
class BackgroundDownloaderComponent(Component):
MIN_PREFIX_COLLIDING_BITS = 8
component_name = BACKGROUND_DOWNLOADER_COMPONENT
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.disk_space_manager = DiskSpaceManager(self.conf)
self.background_task: typing.Optional[asyncio.Task] = None
self.download_loop_delay_seconds = 60
self.ongoing_download: typing.Optional[asyncio.Task] = None
self.space_manager: typing.Optional[DiskSpaceManager] = None
self.blob_manager: typing.Optional[BlobManager] = None
self.background_downloader: typing.Optional[BackgroundDownloader] = None
self.dht_node: typing.Optional[Node] = None
self.space_available: typing.Optional[int] = None
@property
def is_busy(self):
return bool(self.ongoing_download and not self.ongoing_download.done())
@property
def component(self) -> 'BackgroundDownloaderComponent':
return self
async def get_status(self):
return {'running': self.background_task is not None and not self.background_task.done(),
'available_free_space_mb': self.space_available,
'ongoing_download': self.is_busy}
async def download_blobs_in_background(self):
while True:
self.space_available = await self.space_manager.get_free_space_mb(True)
if not self.is_busy and self.space_available > 10:
self._download_next_close_blob_hash()
await asyncio.sleep(self.download_loop_delay_seconds)
def _download_next_close_blob_hash(self):
node_id = self.dht_node.protocol.node_id
for blob_hash in self.dht_node.stored_blob_hashes:
if blob_hash.hex() in self.blob_manager.completed_blob_hashes:
continue
if utils.get_colliding_prefix_bits(node_id, blob_hash) >= self.MIN_PREFIX_COLLIDING_BITS:
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash.hex()))
return
async def start(self):
self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT)
if not self.component_manager.has_component(DHT_COMPONENT):
return
self.dht_node = self.component_manager.get_component(DHT_COMPONENT)
self.blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
storage = self.component_manager.get_component(DATABASE_COMPONENT)
self.background_downloader = BackgroundDownloader(self.conf, storage, self.blob_manager, self.dht_node)
self.background_task = asyncio.create_task(self.download_blobs_in_background())
async def stop(self):
if self.ongoing_download and not self.ongoing_download.done():
self.ongoing_download.cancel()
if self.background_task:
self.background_task.cancel()
class DiskSpaceComponent(Component):
component_name = DISK_SPACE_COMPONENT
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.disk_space_manager: typing.Optional[DiskSpaceManager] = None
@property
def component(self) -> typing.Optional[DiskSpaceManager]:
return self.disk_space_manager
async def get_status(self):
return {
'space_used': str(self.disk_space_manager.space_used_mb),
'running': self.disk_space_manager.running,
}
if self.disk_space_manager:
space_used = await self.disk_space_manager.get_space_used_mb(cached=True)
return {
'total_used_mb': space_used['total'],
'published_blobs_storage_used_mb': space_used['private_storage'],
'content_blobs_storage_used_mb': space_used['content_storage'],
'seed_blobs_storage_used_mb': space_used['network_storage'],
'running': self.disk_space_manager.running,
}
return {'space_used': '0', 'network_seeding_space_used': '0', 'running': False}
async def start(self):
db = self.component_manager.get_component(DATABASE_COMPONENT)
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
self.disk_space_manager = DiskSpaceManager(
self.conf, db, blob_manager,
analytics=self.component_manager.analytics_manager
)
await self.disk_space_manager.start()
async def stop(self):

View file

@ -38,7 +38,8 @@ from lbry.dht.peer import make_kademlia_peer
from lbry.error import (
DownloadSDTimeoutError, ComponentsNotStartedError, ComponentStartConditionNotMetError,
CommandDoesNotExistError, BaseError, WalletNotFoundError, WalletAlreadyLoadedError, WalletAlreadyExistsError,
ConflictingInputValueError
ConflictingInputValueError, AlreadyPurchasedError, PrivateKeyNotFoundError, InputStringIsBlankError,
InputValueError
)
from lbry.extras import system_info
from lbry.extras.daemon import analytics
@ -205,12 +206,25 @@ def fix_kwargs_for_hub(**kwargs):
elif key == "not_channel_ids":
kwargs["channel_id"] = {"invert": True, "value": kwargs.pop("not_channel_ids")}
elif key in MY_RANGE_FIELDS:
operator = '='
if isinstance(value, str) and value[0] in opcodes:
operator_length = 2 if value[:2] in opcodes else 1
operator, value = value[:operator_length], value[operator_length:]
value = [str(value if key != 'fee_amount' else Decimal(value)*1000)]
kwargs[key] = {"op": opcodes[operator], "value": value}
constraints = []
for val in value if isinstance(value, list) else [value]:
operator = '='
if isinstance(val, str) and val[0] in opcodes:
operator_length = 2 if val[:2] in opcodes else 1
operator, val = val[:operator_length], val[operator_length:]
val = [int(val if key != 'fee_amount' else Decimal(val)*1000)]
constraints.append({"op": opcodes[operator], "value": val})
kwargs[key] = constraints
elif key == 'order_by': # TODO: remove this after removing support for old trending args from the api
value = value if isinstance(value, list) else [value]
new_value = []
for new_v in value:
migrated = new_v if new_v not in (
'trending_mixed', 'trending_local', 'trending_global', 'trending_group'
) else 'trending_score'
if migrated not in new_value:
new_value.append(migrated)
kwargs[key] = new_value
return kwargs
@ -951,7 +965,12 @@ class Daemon(metaclass=JSONRPCServerType):
},
'total_outgoing_mps': (float) megabytes per second sent,
'total_incoming_mps': (float) megabytes per second received,
'time': (float) timestamp
'max_outgoing_mbs': (float) maximum bandwidth (megabytes per second) sent, since the
daemon was started
'max_incoming_mbs': (float) maximum bandwidth (megabytes per second) received, since the
daemon was started
'total_sent' : (int) total number of bytes sent since the daemon was started
'total_received' : (int) total number of bytes received since the daemon was started
}
},
'hash_announcer': {
@ -2040,7 +2059,7 @@ class Daemon(metaclass=JSONRPCServerType):
--channel_claim_id=<channel_claim_id> : (str) get file with matching channel claim id(s)
--channel_name=<channel_name> : (str) get file with matching channel name
--claim_name=<claim_name> : (str) get file with matching claim name
--blobs_in_stream<blobs_in_stream> : (int) get file with matching blobs in stream
--blobs_in_stream=<blobs_in_stream> : (int) get file with matching blobs in stream
--download_path=<download_path> : (str) get file with matching download path
--uploading_to_reflector=<uploading_to_reflector> : (bool) get files currently uploading to reflector
--is_fully_reflected=<is_fully_reflected> : (bool) get files that have been uploaded to reflector
@ -2282,7 +2301,7 @@ class Daemon(metaclass=JSONRPCServerType):
accounts = wallet.get_accounts_or_all(funding_account_ids)
txo = None
if claim_id:
txo = await self.ledger.get_claim_by_claim_id(accounts, claim_id, include_purchase_receipt=True)
txo = await self.ledger.get_claim_by_claim_id(claim_id, accounts, include_purchase_receipt=True)
if not isinstance(txo, Output) or not txo.is_claim:
# TODO: use error from lbry.error
raise Exception(f"Could not find claim with claim_id '{claim_id}'.")
@ -2295,11 +2314,7 @@ class Daemon(metaclass=JSONRPCServerType):
# TODO: use error from lbry.error
raise Exception("Missing argument claim_id or url.")
if not allow_duplicate_purchase and txo.purchase_receipt:
# TODO: use error from lbry.error
raise Exception(
f"You already have a purchase for claim_id '{claim_id}'. "
f"Use --allow-duplicate-purchase flag to override."
)
raise AlreadyPurchasedError(claim_id)
claim = txo.claim
if not claim.is_stream or not claim.stream.has_fee:
# TODO: use error from lbry.error
@ -2397,6 +2412,9 @@ class Daemon(metaclass=JSONRPCServerType):
value with an equality constraint such as '>', '>=', '<' and '<='
eg. --height=">400000" would limit results to only claims above 400k block height.
They also support multiple constraints passed as a list of the args described above.
eg. --release_time=[">1000000", "<2000000"]
Usage:
claim_search [<name> | --name=<name>] [--text=<text>] [--txid=<txid>] [--nout=<nout>]
[--claim_id=<claim_id> | --claim_ids=<claim_ids>...]
@ -2411,7 +2429,7 @@ class Daemon(metaclass=JSONRPCServerType):
[--amount=<amount>] [--effective_amount=<effective_amount>]
[--support_amount=<support_amount>] [--trending_group=<trending_group>]
[--trending_mixed=<trending_mixed>] [--trending_local=<trending_local>]
[--trending_global=<trending_global]
[--trending_global=<trending_global] [--trending_score=<trending_score]
[--reposted_claim_id=<reposted_claim_id>] [--reposted=<reposted>]
[--claim_type=<claim_type>] [--stream_types=<stream_types>...] [--media_types=<media_types>...]
[--fee_currency=<fee_currency>] [--fee_amount=<fee_amount>]
@ -2423,7 +2441,7 @@ class Daemon(metaclass=JSONRPCServerType):
[--not_locations=<not_locations>...]
[--order_by=<order_by>...] [--no_totals] [--page=<page>] [--page_size=<page_size>]
[--wallet_id=<wallet_id>] [--include_purchase_receipt] [--include_is_my_output]
[--remove_duplicates] [--has_source | --has_no_source]
[--remove_duplicates] [--has_source | --has_no_source] [--sd_hash=<sd_hash>]
[--new_sdk_server=<new_sdk_server>]
Options:
@ -2479,25 +2497,11 @@ class Daemon(metaclass=JSONRPCServerType):
all tips and supports received), this amount is
blank until claim has reached activation height
(supports equality constraints)
--trending_group=<trending_group>: (int) group numbers 1 through 4 representing the
trending groups of the content: 4 means
content is trending globally and independently,
3 means content is not trending globally but is
trending independently (locally), 2 means it is
trending globally but not independently and 1
means it's not trending globally or locally
(supports equality constraints)
--trending_mixed=<trending_mixed>: (int) trending amount taken from the global or local
value depending on the trending group:
4 - global value, 3 - local value, 2 - global
value, 1 - local value (supports equality
constraints)
--trending_local=<trending_local>: (int) trending value calculated relative only to
the individual contents past history (supports
equality constraints)
--trending_global=<trending_global>: (int) trending value calculated relative to all
trending content globally (supports
equality constraints)
--trending_score=<trending_score>: (int) limit by trending score (supports equality constraints)
--trending_group=<trending_group>: (int) DEPRECATED - instead please use trending_score
--trending_mixed=<trending_mixed>: (int) DEPRECATED - instead please use trending_score
--trending_local=<trending_local>: (int) DEPRECATED - instead please use trending_score
--trending_global=<trending_global>: (int) DEPRECATED - instead please use trending_score
--reposted_claim_id=<reposted_claim_id>: (str) all reposts of the specified original claim id
--reposted=<reposted> : (int) claims reposted this many times (supports
equality constraints)
@ -2535,6 +2539,8 @@ class Daemon(metaclass=JSONRPCServerType):
--remove_duplicates : (bool) removes duplicated content from search by picking either the
original claim or the oldest matching repost
--has_source : (bool) find claims containing a source field
--sd_hash=<sd_hash> : (str) find claims where the source stream descriptor hash matches
(partially or completely) the given hexadecimal string
--has_no_source : (bool) find claims not containing a source field
--new_sdk_server=<new_sdk_server> : (str) URL of the new SDK server (EXPERIMENTAL)
@ -2565,6 +2571,17 @@ class Daemon(metaclass=JSONRPCServerType):
kwargs['signature_valid'] = 0
if 'has_no_source' in kwargs:
kwargs['has_source'] = not kwargs.pop('has_no_source')
if 'order_by' in kwargs: # TODO: remove this after removing support for old trending args from the api
value = kwargs.pop('order_by')
value = value if isinstance(value, list) else [value]
new_value = []
for new_v in value:
migrated = new_v if new_v not in (
'trending_mixed', 'trending_local', 'trending_global', 'trending_group'
) else 'trending_score'
if migrated not in new_value:
new_value.append(migrated)
kwargs['order_by'] = new_value
page_num, page_size = abs(kwargs.pop('page', 1)), min(abs(kwargs.pop('page_size', DEFAULT_PAGE_SIZE)), 50)
wallet = self.wallet_manager.get_wallet_or_default(kwargs.pop('wallet_id', None))
kwargs.update({'offset': page_size * (page_num - 1), 'limit': page_size})
@ -2897,7 +2914,7 @@ class Daemon(metaclass=JSONRPCServerType):
wallet, channel_account_id, channel_id, channel_name, for_signing=True
)
timestamp = str(int(time.time()))
signature = signing_channel.sign_data(unhexlify(hexdata), timestamp)
signature = signing_channel.sign_data(unhexlify(str(hexdata)), timestamp)
return {
'signature': signature,
'signing_ts': timestamp
@ -3598,15 +3615,17 @@ class Daemon(metaclass=JSONRPCServerType):
)
if len(existing_claims) != 1:
account_ids = ', '.join(f"'{account.id}'" for account in accounts)
# TODO: use error from lbry.error
raise Exception(
raise InputValueError(
f"Can't find the stream '{claim_id}' in account(s) {account_ids}."
)
old_txo = existing_claims[0]
if not old_txo.claim.is_stream:
# TODO: use error from lbry.error
raise Exception(
f"A claim with id '{claim_id}' was found but it is not a stream claim."
if not old_txo.claim.is_stream and not old_txo.claim.is_repost:
# in principle it should work with any type of claim, but its safer to
# limit it to ones we know won't be broken. in the future we can expand
# this if we have a test case for e.g. channel or support claims
raise InputValueError(
f"A claim with id '{claim_id}' was found but it is not a stream or repost claim."
)
if bid is not None:
@ -3620,7 +3639,7 @@ class Daemon(metaclass=JSONRPCServerType):
claim_address = old_txo.get_address(account.ledger)
channel = None
if channel_id or channel_name:
if not clear_channel and (channel_id or channel_name):
channel = await self.get_channel_or_error(
wallet, channel_account_id, channel_id, channel_name, for_signing=True)
elif old_txo.claim.is_signed and not clear_channel and not replace:
@ -3637,26 +3656,32 @@ class Daemon(metaclass=JSONRPCServerType):
if replace:
claim = Claim()
if old_txo.claim.stream.has_source:
claim.stream.message.source.CopyFrom(
old_txo.claim.stream.message.source
)
stream_type = old_txo.claim.stream.stream_type
if stream_type:
old_stream_type = getattr(old_txo.claim.stream.message, stream_type)
new_stream_type = getattr(claim.stream.message, stream_type)
new_stream_type.CopyFrom(old_stream_type)
claim.stream.update(file_path=file_path, **kwargs)
if old_txo.claim.is_stream:
if old_txo.claim.stream.has_source:
claim.stream.message.source.CopyFrom(
old_txo.claim.stream.message.source
)
stream_type = old_txo.claim.stream.stream_type
if stream_type:
old_stream_type = getattr(old_txo.claim.stream.message, stream_type)
new_stream_type = getattr(claim.stream.message, stream_type)
new_stream_type.CopyFrom(old_stream_type)
else:
claim = Claim.from_bytes(old_txo.claim.to_bytes())
claim.stream.update(file_path=file_path, **kwargs)
tx = await Transaction.claim_update(
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel
)
new_txo = tx.outputs[0]
if old_txo.claim.is_stream:
claim.stream.update(file_path=file_path, **kwargs)
if clear_channel:
claim.clear_signature()
tx = await Transaction.claim_update(
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0],
channel if not clear_channel else None
)
new_txo = tx.outputs[0]
stream_hash = None
if not preview:
if not preview and old_txo.claim.is_stream:
old_stream = self.file_manager.get_filtered(sd_hash=old_txo.claim.stream.source.sd_hash)
old_stream = old_stream[0] if old_stream else None
if file_path is not None:
@ -4152,7 +4177,7 @@ class Daemon(metaclass=JSONRPCServerType):
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if claim_id:
txo = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
txo = await self.ledger.get_claim_by_claim_id(claim_id, wallet.accounts)
if not isinstance(txo, Output) or not txo.is_claim:
# TODO: use error from lbry.error
raise Exception(f"Could not find collection with claim_id '{claim_id}'.")
@ -4219,7 +4244,7 @@ class Daemon(metaclass=JSONRPCServerType):
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
channel = await self.get_channel_or_none(wallet, channel_account_id, channel_id, channel_name, for_signing=True)
amount = self.get_dewies_or_error("amount", amount)
claim = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
claim = await self.ledger.get_claim_by_claim_id(claim_id)
claim_address = claim.get_address(self.ledger)
if not tip:
account = wallet.get_account_or_default(account_id)
@ -4860,20 +4885,16 @@ class Daemon(metaclass=JSONRPCServerType):
"""
@requires(DHT_COMPONENT)
async def jsonrpc_peer_list(self, blob_hash, search_bottom_out_limit=None, page=None, page_size=None):
async def jsonrpc_peer_list(self, blob_hash, page=None, page_size=None):
"""
Get peers for blob hash
Usage:
peer_list (<blob_hash> | --blob_hash=<blob_hash>)
[<search_bottom_out_limit> | --search_bottom_out_limit=<search_bottom_out_limit>]
[--page=<page>] [--page_size=<page_size>]
Options:
--blob_hash=<blob_hash> : (str) find available peers for this blob hash
--search_bottom_out_limit=<search_bottom_out_limit> : (int) the number of search probes in a row
that don't find any new peers
before giving up and returning
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
@ -4885,13 +4906,6 @@ class Daemon(metaclass=JSONRPCServerType):
if not is_valid_blobhash(blob_hash):
# TODO: use error from lbry.error
raise Exception("invalid blob hash")
if search_bottom_out_limit is not None:
search_bottom_out_limit = int(search_bottom_out_limit)
if search_bottom_out_limit <= 0:
# TODO: use error from lbry.error
raise Exception("invalid bottom out limit")
else:
search_bottom_out_limit = 4
peers = []
peer_q = asyncio.Queue(loop=self.component_manager.loop)
await self.dht_node._peers_for_value_producer(blob_hash, peer_q)
@ -4961,7 +4975,8 @@ class Daemon(metaclass=JSONRPCServerType):
--finished : (bool) only return finished blobs
--uri=<uri> : (str) filter blobs by stream in a uri
--stream_hash=<stream_hash> : (str) filter blobs by stream hash
--sd_hash=<sd_hash> : (str) filter blobs by sd hash
--sd_hash=<sd_hash> : (str) filter blobs in a stream by sd hash, ie the hash of the stream
descriptor blob for a stream that has been downloaded
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
@ -5041,7 +5056,7 @@ class Daemon(metaclass=JSONRPCServerType):
Returns:
(bool) true if successful
"""
return self.disk_space_manager.clean()
return await self.disk_space_manager.clean()
@requires(FILE_MANAGER_COMPONENT)
async def jsonrpc_file_reflect(self, **kwargs):
@ -5072,8 +5087,8 @@ class Daemon(metaclass=JSONRPCServerType):
else:
server, port = random.choice(self.conf.reflector_servers)
reflected = await asyncio.gather(*[
self.file_manager['stream'].reflect_stream(stream, server, port)
for stream in self.file_manager.get_filtered_streams(**kwargs)
self.file_manager.source_managers['stream'].reflect_stream(stream, server, port)
for stream in self.file_manager.get_filtered(**kwargs)
])
total = []
for reflected_for_stream in reflected:
@ -5130,10 +5145,12 @@ class Daemon(metaclass=JSONRPCServerType):
]
},
"node_id": (str) the local dht node id
"prefix_neighbors_count": (int) the amount of peers sharing the same byte prefix of the local node id
}
"""
result = {
'buckets': {}
'buckets': {},
'prefix_neighbors_count': 0
}
for i, _ in enumerate(self.dht_node.protocol.routing_table.buckets):
@ -5146,6 +5163,7 @@ class Daemon(metaclass=JSONRPCServerType):
"node_id": hexlify(peer.node_id).decode(),
}
result['buckets'][i].append(host)
result['prefix_neighbors_count'] += 1 if peer.node_id[0] == self.dht_node.protocol.node_id[0] else 0
result['node_id'] = hexlify(self.dht_node.protocol.node_id).decode()
return result
@ -5246,8 +5264,7 @@ class Daemon(metaclass=JSONRPCServerType):
def valid_stream_name_or_error(name: str):
try:
if not name:
# TODO: use error from lbry.error
raise Exception('Stream name cannot be blank.')
raise InputStringIsBlankError('Stream name')
parsed = URL.parse(name)
if parsed.has_channel:
# TODO: use error from lbry.error
@ -5337,7 +5354,7 @@ class Daemon(metaclass=JSONRPCServerType):
if len(channels) == 1:
if for_signing and not channels[0].has_private_key:
# TODO: use error from lbry.error
raise Exception(f"Couldn't find private key for {key} '{value}'. ")
raise PrivateKeyNotFoundError(key, value)
return channels[0]
elif len(channels) > 1:
# TODO: use error from lbry.error

View file

@ -35,6 +35,10 @@ def migrate_db(conf, start, end):
from .migrate12to13 import do_migration
elif current == 13:
from .migrate13to14 import do_migration
elif current == 14:
from .migrate14to15 import do_migration
elif current == 15:
from .migrate15to16 import do_migration
else:
raise Exception(f"DB migration of version {current} to {current+1} is not available")
try:

View file

@ -0,0 +1,16 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("""
alter table blob add column added_on integer not null default 0;
alter table blob add column is_mine integer not null default 1;
""")
connection.commit()
connection.close()

View file

@ -0,0 +1,17 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("""
update blob set should_announce=0
where should_announce=1 and
blob.blob_hash in (select stream_blob.blob_hash from stream_blob where position=0);
""")
connection.commit()
connection.close()

View file

@ -20,7 +20,7 @@ def do_migration(conf):
"left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall()
blobs_by_stream = {}
for stream_hash, position, iv, blob_hash, blob_length in blobs:
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, blob_hash))
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, 0, blob_hash))
for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams:
sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename,

View file

@ -170,8 +170,8 @@ def get_all_lbry_files(transaction: sqlite3.Connection) -> typing.List[typing.Di
def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
# add all blobs, except the last one, which is empty
transaction.executemany(
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0)
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0, blob.added_on, blob.is_mine)
for blob in (descriptor.blobs[:-1] if len(descriptor.blobs) > 1 else descriptor.blobs) + [sd_blob])
).fetchall()
# associate the blobs to the stream
@ -187,8 +187,8 @@ def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descripto
).fetchall()
# ensure should_announce is set regardless if insert was ignored
transaction.execute(
"update blob set should_announce=1 where blob_hash in (?, ?)",
(sd_blob.blob_hash, descriptor.blobs[0].blob_hash,)
"update blob set should_announce=1 where blob_hash in (?)",
(sd_blob.blob_hash,)
).fetchall()
@ -242,7 +242,9 @@ class SQLiteStorage(SQLiteMixin):
should_announce integer not null default 0,
status text not null,
last_announced_time integer,
single_announce integer
single_announce integer,
added_on integer not null,
is_mine integer not null default 0
);
create table if not exists stream (
@ -335,6 +337,7 @@ class SQLiteStorage(SQLiteMixin):
tcp_port integer,
unique (address, udp_port)
);
create index if not exists blob_data on blob(blob_hash, blob_length, is_mine);
"""
def __init__(self, conf: Config, path, loop=None, time_getter: typing.Optional[typing.Callable[[], float]] = None):
@ -356,19 +359,19 @@ class SQLiteStorage(SQLiteMixin):
# # # # # # # # # blob functions # # # # # # # # #
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int], finished=False):
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int, int, int], finished=False):
def _add_blobs(transaction: sqlite3.Connection):
transaction.executemany(
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0)
for blob_hash, length in blob_hashes_and_lengths
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0, added_on, is_mine)
for blob_hash, length, added_on, is_mine in blob_hashes_and_lengths
)
).fetchall()
if finished:
transaction.executemany(
"update blob set status='finished' where blob.blob_hash=?", (
(blob_hash, ) for blob_hash, _ in blob_hashes_and_lengths
(blob_hash, ) for blob_hash, _, _, _ in blob_hashes_and_lengths
)
).fetchall()
return await self.db.run(_add_blobs)
@ -378,6 +381,11 @@ class SQLiteStorage(SQLiteMixin):
"select status from blob where blob_hash=?", blob_hash
)
def set_announce(self, *blob_hashes):
return self.db.execute_fetchall(
"update blob set should_announce=1 where blob_hash in (?, ?)", blob_hashes
)
def update_last_announced_blobs(self, blob_hashes: typing.List[str]):
def _update_last_announced_blobs(transaction: sqlite3.Connection):
last_announced = self.time_getter()
@ -435,6 +443,60 @@ class SQLiteStorage(SQLiteMixin):
def get_all_blob_hashes(self):
return self.run_and_return_list("select blob_hash from blob")
async def get_stored_blobs(self, is_mine: bool, is_network_blob=False):
is_mine = 1 if is_mine else 0
if is_network_blob:
return await self.db.execute_fetchall(
"select blob.blob_hash, blob.blob_length, blob.added_on "
"from blob left join stream_blob using (blob_hash) "
"where stream_blob.stream_hash is null and blob.is_mine=? "
"order by blob.blob_length desc, blob.added_on asc",
(is_mine,)
)
sd_blobs = await self.db.execute_fetchall(
"select blob.blob_hash, blob.blob_length, blob.added_on "
"from blob join stream on blob.blob_hash=stream.sd_hash join file using (stream_hash) "
"where blob.is_mine=? order by blob.added_on asc",
(is_mine,)
)
content_blobs = await self.db.execute_fetchall(
"select blob.blob_hash, blob.blob_length, blob.added_on "
"from blob join stream_blob using (blob_hash) cross join stream using (stream_hash)"
"cross join file using (stream_hash) where blob.is_mine=? order by blob.added_on asc, blob.blob_length asc",
(is_mine,)
)
return content_blobs + sd_blobs
async def get_stored_blob_disk_usage(self):
total, network_size, content_size, private_size = await self.db.execute_fetchone("""
select coalesce(sum(blob_length), 0) as total,
coalesce(sum(case when
stream_blob.stream_hash is null
then blob_length else 0 end), 0) as network_storage,
coalesce(sum(case when
stream_blob.blob_hash is not null and is_mine=0
then blob_length else 0 end), 0) as content_storage,
coalesce(sum(case when
is_mine=1
then blob_length else 0 end), 0) as private_storage
from blob left join stream_blob using (blob_hash) where blob_hash not in (select sd_hash from stream)
""")
return {
'network_storage': network_size,
'content_storage': content_size,
'private_storage': private_size,
'total': total
}
async def update_blob_ownership(self, sd_hash, is_mine: bool):
is_mine = 1 if is_mine else 0
await self.db.execute_fetchall(
"update blob set is_mine = ? where blob_hash in ("
" select blob_hash from blob natural join stream_blob natural join stream where sd_hash = ?"
") OR blob_hash = ?", (is_mine, sd_hash, sd_hash)
)
def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]:
def _sync_blobs(transaction: sqlite3.Connection) -> typing.Set[str]:
finished_blob_hashes = tuple(
@ -470,7 +532,8 @@ class SQLiteStorage(SQLiteMixin):
def _get_blobs_for_stream(transaction):
crypt_blob_infos = []
stream_blobs = transaction.execute(
"select blob_hash, position, iv from stream_blob where stream_hash=? "
"select s.blob_hash, s.position, s.iv, b.added_on "
"from stream_blob s left outer join blob b on b.blob_hash=s.blob_hash where stream_hash=? "
"order by position asc", (stream_hash, )
).fetchall()
if only_completed:
@ -490,9 +553,10 @@ class SQLiteStorage(SQLiteMixin):
for blob_hash, length in lengths:
blob_length_dict[blob_hash] = length
for blob_hash, position, iv in stream_blobs:
current_time = time.time()
for blob_hash, position, iv, added_on in stream_blobs:
blob_length = blob_length_dict.get(blob_hash, 0)
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, blob_hash))
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, added_on or current_time, blob_hash))
if not blob_hash:
break
return crypt_blob_infos
@ -570,6 +634,10 @@ class SQLiteStorage(SQLiteMixin):
log.debug("update file status %s -> %s", stream_hash, new_status)
return self.db.execute_fetchall("update file set status=? where stream_hash=?", (new_status, stream_hash))
def stop_all_files(self):
log.debug("stopping all files")
return self.db.execute_fetchall("update file set status=?", ("stopped",))
async def change_file_download_dir_and_file_name(self, stream_hash: str, download_dir: typing.Optional[str],
file_name: typing.Optional[str]):
if not file_name or not download_dir:

View file

@ -5,6 +5,7 @@ from typing import Optional
from aiohttp.web import Request
from lbry.error import ResolveError, DownloadSDTimeoutError, InsufficientFundsError
from lbry.error import ResolveTimeoutError, DownloadDataTimeoutError, KeyFeeAboveMaxAllowedError
from lbry.error import InvalidStreamURLError
from lbry.stream.managed_stream import ManagedStream
from lbry.torrent.torrent_manager import TorrentSource
from lbry.utils import cache_concurrent
@ -81,8 +82,11 @@ class FileManager:
payment = None
try:
# resolve the claim
if not URL.parse(uri).has_stream:
raise ResolveError("cannot download a channel claim, specify a /path")
try:
if not URL.parse(uri).has_stream:
raise InvalidStreamURLError(uri)
except ValueError:
raise InvalidStreamURLError(uri)
try:
resolved_result = await asyncio.wait_for(
self.wallet_manager.ledger.resolve(
@ -244,7 +248,7 @@ class FileManager:
raise error
except Exception as err: # forgive data timeout, don't delete stream
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
KeyFeeAboveMaxAllowedError)
KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError)
if isinstance(err, expected):
log.warning("Failed to download %s: %s", uri, str(err))
elif isinstance(err, asyncio.CancelledError):

View file

@ -10,6 +10,7 @@ from google.protobuf.json_format import MessageToDict
from lbry.crypto.base58 import Base58
from lbry.constants import COIN
from lbry.error import MissingPublishedFileError, EmptyPublishedFileError
from lbry.schema.mime_types import guess_media_type
from lbry.schema.base import Metadata, BaseMessageList
@ -139,10 +140,10 @@ class Source(Metadata):
self.name = os.path.basename(file_path)
self.media_type, stream_type = guess_media_type(file_path)
if not os.path.isfile(file_path):
raise Exception(f"File does not exist: {file_path}")
raise MissingPublishedFileError(file_path)
self.size = os.path.getsize(file_path)
if self.size == 0:
raise Exception(f"Cannot publish empty file: {file_path}")
raise EmptyPublishedFileError(file_path)
self.file_hash_bytes = calculate_sha384_file_hash(file_path)
return stream_type

View file

@ -1,4 +1,6 @@
import os
import filetype
import logging
types_map = {
# http://www.iana.org/assignments/media-types
@ -166,10 +168,38 @@ types_map = {
'.wmv': ('video/x-ms-wmv', 'video')
}
# maps detected extensions to the possible analogs
# i.e. .cbz file is actually a .zip
synonyms_map = {
'.zip': ['.cbz'],
'.rar': ['.cbr'],
'.ar': ['.a']
}
log = logging.getLogger(__name__)
def guess_media_type(path):
_, ext = os.path.splitext(path)
extension = ext.strip().lower()
try:
kind = filetype.guess(path)
if kind:
real_extension = f".{kind.extension}"
if extension != real_extension:
if extension:
log.warning(f"file extension does not match it's contents: {path}, identified as {real_extension}")
else:
log.debug(f"file {path} does not have extension, identified by it's contents as {real_extension}")
if extension not in synonyms_map.get(real_extension, []):
extension = real_extension
except OSError as error:
pass
if extension[1:]:
if extension in types_map:
return types_map[extension]

View file

@ -1,23 +1,27 @@
import base64
import struct
from typing import List
from typing import List, TYPE_CHECKING, Union, Optional
from binascii import hexlify
from itertools import chain
from lbry.error import ResolveCensoredError
from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage
from lbry.schema.types.v2.result_pb2 import Error as ErrorMessage
if TYPE_CHECKING:
from lbry.wallet.server.leveldb import ResolveResult
INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID)
NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND)
BLOCKED = ErrorMessage.Code.Name(ErrorMessage.BLOCKED)
def set_reference(reference, txo_row):
if txo_row:
reference.tx_hash = txo_row['txo_hash'][:32]
reference.nout = struct.unpack('<I', txo_row['txo_hash'][32:])[0]
reference.height = txo_row['height']
def set_reference(reference, claim_hash, rows):
if claim_hash:
for txo in rows:
if claim_hash == txo.claim_hash:
reference.tx_hash = txo.tx_hash
reference.nout = txo.position
reference.height = txo.height
return
class Censor:
@ -38,19 +42,19 @@ class Censor:
def apply(self, rows):
return [row for row in rows if not self.censor(row)]
def censor(self, row) -> bool:
def censor(self, row) -> Optional[bytes]:
if self.is_censored(row):
censoring_channel_hash = bytes.fromhex(row['censoring_channel_id'])[::-1]
self.censored.setdefault(censoring_channel_hash, set())
self.censored[censoring_channel_hash].add(row['tx_hash'])
return True
return False
return censoring_channel_hash
return None
def to_message(self, outputs: OutputsMessage, extra_txo_rows: dict):
for censoring_channel_hash, count in self.censored.items():
blocked = outputs.blocked.add()
blocked.count = len(count)
set_reference(blocked.channel, extra_txo_rows.get(censoring_channel_hash))
set_reference(blocked.channel, censoring_channel_hash, extra_txo_rows)
outputs.blocked_total += len(count)
@ -115,10 +119,10 @@ class Outputs:
'expiration_height': claim.expiration_height,
'effective_amount': claim.effective_amount,
'support_amount': claim.support_amount,
'trending_group': claim.trending_group,
'trending_mixed': claim.trending_mixed,
'trending_local': claim.trending_local,
'trending_global': claim.trending_global,
# 'trending_group': claim.trending_group,
# 'trending_mixed': claim.trending_mixed,
# 'trending_local': claim.trending_local,
# 'trending_global': claim.trending_global,
}
if claim.HasField('channel'):
txo.channel = tx_map[claim.channel.tx_hash].outputs[claim.channel.nout]
@ -169,51 +173,60 @@ class Outputs:
@classmethod
def to_bytes(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked: Censor = None) -> bytes:
extra_txo_rows = {row['claim_hash']: row for row in extra_txo_rows}
page = OutputsMessage()
page.offset = offset
if total is not None:
page.total = total
if blocked is not None:
blocked.to_message(page, extra_txo_rows)
for row in extra_txo_rows:
txo_message: 'OutputsMessage' = page.extra_txos.add()
if not isinstance(row, Exception):
if row.channel_hash:
set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows)
if row.reposted_claim_hash:
set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows)
cls.encode_txo(txo_message, row)
for row in txo_rows:
cls.row_to_message(row, page.txos.add(), extra_txo_rows)
for row in extra_txo_rows.values():
cls.row_to_message(row, page.extra_txos.add(), extra_txo_rows)
# cls.row_to_message(row, page.txos.add(), extra_txo_rows)
txo_message: 'OutputsMessage' = page.txos.add()
cls.encode_txo(txo_message, row)
if not isinstance(row, Exception):
if row.channel_hash:
set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows)
if row.reposted_claim_hash:
set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows)
elif isinstance(row, ResolveCensoredError):
set_reference(txo_message.error.blocked.channel, row.censor_id, extra_txo_rows)
return page.SerializeToString()
@classmethod
def row_to_message(cls, txo, txo_message, extra_row_dict: dict):
if isinstance(txo, Exception):
txo_message.error.text = txo.args[0]
if isinstance(txo, ValueError):
def encode_txo(cls, txo_message, resolve_result: Union['ResolveResult', Exception]):
if isinstance(resolve_result, Exception):
txo_message.error.text = resolve_result.args[0]
if isinstance(resolve_result, ValueError):
txo_message.error.code = ErrorMessage.INVALID
elif isinstance(txo, LookupError):
elif isinstance(resolve_result, LookupError):
txo_message.error.code = ErrorMessage.NOT_FOUND
elif isinstance(txo, ResolveCensoredError):
elif isinstance(resolve_result, ResolveCensoredError):
txo_message.error.code = ErrorMessage.BLOCKED
set_reference(txo_message.error.blocked.channel, extra_row_dict.get(bytes.fromhex(txo.censor_id)[::-1]))
return
txo_message.tx_hash = txo['txo_hash'][:32]
txo_message.nout, = struct.unpack('<I', txo['txo_hash'][32:])
txo_message.height = txo['height']
txo_message.claim.short_url = txo['short_url']
txo_message.claim.reposted = txo['reposted']
if txo['canonical_url'] is not None:
txo_message.claim.canonical_url = txo['canonical_url']
txo_message.claim.is_controlling = bool(txo['is_controlling'])
if txo['last_take_over_height'] is not None:
txo_message.claim.take_over_height = txo['last_take_over_height']
txo_message.claim.creation_height = txo['creation_height']
txo_message.claim.activation_height = txo['activation_height']
txo_message.claim.expiration_height = txo['expiration_height']
if txo['claims_in_channel'] is not None:
txo_message.claim.claims_in_channel = txo['claims_in_channel']
txo_message.claim.effective_amount = txo['effective_amount']
txo_message.claim.support_amount = txo['support_amount']
txo_message.claim.trending_group = txo['trending_group']
txo_message.claim.trending_mixed = txo['trending_mixed']
txo_message.claim.trending_local = txo['trending_local']
txo_message.claim.trending_global = txo['trending_global']
set_reference(txo_message.claim.channel, extra_row_dict.get(txo['channel_hash']))
set_reference(txo_message.claim.repost, extra_row_dict.get(txo['reposted_claim_hash']))
txo_message.tx_hash = resolve_result.tx_hash
txo_message.nout = resolve_result.position
txo_message.height = resolve_result.height
txo_message.claim.short_url = resolve_result.short_url
txo_message.claim.reposted = resolve_result.reposted
txo_message.claim.is_controlling = resolve_result.is_controlling
txo_message.claim.creation_height = resolve_result.creation_height
txo_message.claim.activation_height = resolve_result.activation_height
txo_message.claim.expiration_height = resolve_result.expiration_height
txo_message.claim.effective_amount = resolve_result.effective_amount
txo_message.claim.support_amount = resolve_result.support_amount
if resolve_result.canonical_url is not None:
txo_message.claim.canonical_url = resolve_result.canonical_url
if resolve_result.last_takeover_height is not None:
txo_message.claim.take_over_height = resolve_result.last_takeover_height
if resolve_result.claims_in_channel is not None:
txo_message.claim.claims_in_channel = resolve_result.claims_in_channel

View file

@ -11,7 +11,7 @@ from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
import lbry.schema.types.v2.result_pb2 as result__pb2
from . import result_pb2 as result__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
@ -20,7 +20,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
syntax='proto3',
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\thub.proto\x12\x02pb\x1a\x0cresult.proto\"0\n\x0fInvertibleField\x12\x0e\n\x06invert\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x03(\t\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"j\n\nRangeField\x12\x1d\n\x02op\x18\x01 \x01(\x0e\x32\x11.pb.RangeField.Op\x12\r\n\x05value\x18\x02 \x03(\t\".\n\x02Op\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x01\x12\x07\n\x03GTE\x10\x02\x12\x06\n\x02LT\x10\x03\x12\x06\n\x02GT\x10\x04\"\x9c\r\n\rSearchRequest\x12%\n\x08\x63laim_id\x18\x01 \x01(\x0b\x32\x13.pb.InvertibleField\x12\'\n\nchannel_id\x18\x02 \x01(\x0b\x32\x13.pb.InvertibleField\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\r\n\x05limit\x18\x04 \x01(\r\x12\x10\n\x08order_by\x18\x05 \x03(\t\x12\x0e\n\x06offset\x18\x06 \x01(\r\x12\x16\n\x0eis_controlling\x18\x07 \x01(\x08\x12\x1d\n\x15last_take_over_height\x18\x08 \x01(\t\x12\x12\n\nclaim_name\x18\t \x01(\t\x12\x17\n\x0fnormalized_name\x18\n \x01(\t\x12#\n\x0btx_position\x18\x0b \x01(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06\x61mount\x18\x0c \x01(\x0b\x32\x0e.pb.RangeField\x12!\n\ttimestamp\x18\r \x01(\x0b\x32\x0e.pb.RangeField\x12*\n\x12\x63reation_timestamp\x18\x0e \x01(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06height\x18\x0f \x01(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0f\x63reation_height\x18\x10 \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x61\x63tivation_height\x18\x11 \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x65xpiration_height\x18\x12 \x01(\x0b\x32\x0e.pb.RangeField\x12$\n\x0crelease_time\x18\x13 \x01(\x0b\x32\x0e.pb.RangeField\x12\x11\n\tshort_url\x18\x14 \x01(\t\x12\x15\n\rcanonical_url\x18\x15 \x01(\t\x12\r\n\x05title\x18\x16 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x17 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x18 \x01(\t\x12\x12\n\nclaim_type\x18\x19 \x03(\t\x12$\n\x0crepost_count\x18\x1a \x01(\x0b\x32\x0e.pb.RangeField\x12\x13\n\x0bstream_type\x18\x1b \x03(\t\x12\x12\n\nmedia_type\x18\x1c \x03(\t\x12\"\n\nfee_amount\x18\x1d \x01(\x0b\x32\x0e.pb.RangeField\x12\x14\n\x0c\x66\x65\x65_currency\x18\x1e \x01(\t\x12 \n\x08\x64uration\x18\x1f \x01(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11reposted_claim_id\x18 \x01(\t\x12#\n\x0b\x63\x65nsor_type\x18! \x01(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11\x63laims_in_channel\x18\" \x01(\t\x12$\n\x0c\x63hannel_join\x18# \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x12is_signature_valid\x18$ \x01(\x0b\x32\r.pb.BoolValue\x12(\n\x10\x65\x66\x66\x65\x63tive_amount\x18% \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0esupport_amount\x18& \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_group\x18\' \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_mixed\x18( \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_local\x18) \x01(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0ftrending_global\x18* \x01(\x0b\x32\x0e.pb.RangeField\x12\r\n\x05tx_id\x18+ \x01(\t\x12 \n\x07tx_nout\x18, \x01(\x0b\x32\x0f.pb.UInt32Value\x12\x11\n\tsignature\x18- \x01(\t\x12\x18\n\x10signature_digest\x18. \x01(\t\x12\x18\n\x10public_key_bytes\x18/ \x01(\t\x12\x15\n\rpublic_key_id\x18\x30 \x01(\t\x12\x10\n\x08\x61ny_tags\x18\x31 \x03(\t\x12\x10\n\x08\x61ll_tags\x18\x32 \x03(\t\x12\x10\n\x08not_tags\x18\x33 \x03(\t\x12\x1d\n\x15has_channel_signature\x18\x34 \x01(\x08\x12!\n\nhas_source\x18\x35 \x01(\x0b\x32\r.pb.BoolValue\x12 \n\x18limit_claims_per_channel\x18\x36 \x01(\r\x12\x15\n\rany_languages\x18\x37 \x03(\t\x12\x15\n\rall_languages\x18\x38 \x03(\t\x12\x19\n\x11remove_duplicates\x18\x39 \x01(\x08\x12\x11\n\tno_totals\x18: \x01(\x08\x32\x31\n\x03Hub\x12*\n\x06Search\x12\x11.pb.SearchRequest\x1a\x0b.pb.Outputs\"\x00\x42&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
serialized_pb=b'\n\thub.proto\x12\x02pb\x1a\x0cresult.proto\"\x0e\n\x0c\x45mptyMessage\".\n\rServerMessage\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\t\"N\n\x0cHelloMessage\x12\x0c\n\x04port\x18\x01 \x01(\t\x12\x0c\n\x04host\x18\x02 \x01(\t\x12\"\n\x07servers\x18\x03 \x03(\x0b\x32\x11.pb.ServerMessage\"0\n\x0fInvertibleField\x12\x0e\n\x06invert\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x03(\t\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"j\n\nRangeField\x12\x1d\n\x02op\x18\x01 \x01(\x0e\x32\x11.pb.RangeField.Op\x12\r\n\x05value\x18\x02 \x03(\x05\".\n\x02Op\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x01\x12\x07\n\x03GTE\x10\x02\x12\x06\n\x02LT\x10\x03\x12\x06\n\x02GT\x10\x04\"\x8e\x0c\n\rSearchRequest\x12%\n\x08\x63laim_id\x18\x01 \x01(\x0b\x32\x13.pb.InvertibleField\x12\'\n\nchannel_id\x18\x02 \x01(\x0b\x32\x13.pb.InvertibleField\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\r\n\x05limit\x18\x04 \x01(\x05\x12\x10\n\x08order_by\x18\x05 \x03(\t\x12\x0e\n\x06offset\x18\x06 \x01(\r\x12\x16\n\x0eis_controlling\x18\x07 \x01(\x08\x12\x1d\n\x15last_take_over_height\x18\x08 \x01(\t\x12\x12\n\nclaim_name\x18\t \x01(\t\x12\x17\n\x0fnormalized_name\x18\n \x01(\t\x12#\n\x0btx_position\x18\x0b \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06\x61mount\x18\x0c \x03(\x0b\x32\x0e.pb.RangeField\x12!\n\ttimestamp\x18\r \x03(\x0b\x32\x0e.pb.RangeField\x12*\n\x12\x63reation_timestamp\x18\x0e \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06height\x18\x0f \x03(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0f\x63reation_height\x18\x10 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x61\x63tivation_height\x18\x11 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x65xpiration_height\x18\x12 \x03(\x0b\x32\x0e.pb.RangeField\x12$\n\x0crelease_time\x18\x13 \x03(\x0b\x32\x0e.pb.RangeField\x12\x11\n\tshort_url\x18\x14 \x01(\t\x12\x15\n\rcanonical_url\x18\x15 \x01(\t\x12\r\n\x05title\x18\x16 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x17 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x18 \x01(\t\x12\x12\n\nclaim_type\x18\x19 \x03(\t\x12$\n\x0crepost_count\x18\x1a \x03(\x0b\x32\x0e.pb.RangeField\x12\x13\n\x0bstream_type\x18\x1b \x03(\t\x12\x12\n\nmedia_type\x18\x1c \x03(\t\x12\"\n\nfee_amount\x18\x1d \x03(\x0b\x32\x0e.pb.RangeField\x12\x14\n\x0c\x66\x65\x65_currency\x18\x1e \x01(\t\x12 \n\x08\x64uration\x18\x1f \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11reposted_claim_id\x18 \x01(\t\x12#\n\x0b\x63\x65nsor_type\x18! \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11\x63laims_in_channel\x18\" \x01(\t\x12)\n\x12is_signature_valid\x18$ \x01(\x0b\x32\r.pb.BoolValue\x12(\n\x10\x65\x66\x66\x65\x63tive_amount\x18% \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0esupport_amount\x18& \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_score\x18\' \x03(\x0b\x32\x0e.pb.RangeField\x12\r\n\x05tx_id\x18+ \x01(\t\x12 \n\x07tx_nout\x18, \x01(\x0b\x32\x0f.pb.UInt32Value\x12\x11\n\tsignature\x18- \x01(\t\x12\x18\n\x10signature_digest\x18. \x01(\t\x12\x18\n\x10public_key_bytes\x18/ \x01(\t\x12\x15\n\rpublic_key_id\x18\x30 \x01(\t\x12\x10\n\x08\x61ny_tags\x18\x31 \x03(\t\x12\x10\n\x08\x61ll_tags\x18\x32 \x03(\t\x12\x10\n\x08not_tags\x18\x33 \x03(\t\x12\x1d\n\x15has_channel_signature\x18\x34 \x01(\x08\x12!\n\nhas_source\x18\x35 \x01(\x0b\x32\r.pb.BoolValue\x12 \n\x18limit_claims_per_channel\x18\x36 \x01(\x05\x12\x15\n\rany_languages\x18\x37 \x03(\t\x12\x15\n\rall_languages\x18\x38 \x03(\t\x12\x19\n\x11remove_duplicates\x18\x39 \x01(\x08\x12\x11\n\tno_totals\x18: \x01(\x08\x12\x0f\n\x07sd_hash\x18; \x01(\t2\x88\x03\n\x03Hub\x12*\n\x06Search\x12\x11.pb.SearchRequest\x1a\x0b.pb.Outputs\"\x00\x12+\n\x04Ping\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12-\n\x05Hello\x12\x10.pb.HelloMessage\x1a\x10.pb.HelloMessage\"\x00\x12/\n\x07\x41\x64\x64Peer\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12\x35\n\rPeerSubscribe\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12.\n\x07Version\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12/\n\x08\x46\x65\x61tures\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12\x30\n\tBroadcast\x12\x10.pb.EmptyMessage\x1a\x0f.pb.UInt32Value\"\x00\x42&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
,
dependencies=[result__pb2.DESCRIPTOR,])
@ -61,12 +61,122 @@ _RANGEFIELD_OP = _descriptor.EnumDescriptor(
],
containing_type=None,
serialized_options=None,
serialized_start=199,
serialized_end=245,
serialized_start=373,
serialized_end=419,
)
_sym_db.RegisterEnumDescriptor(_RANGEFIELD_OP)
_EMPTYMESSAGE = _descriptor.Descriptor(
name='EmptyMessage',
full_name='pb.EmptyMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=31,
serialized_end=45,
)
_SERVERMESSAGE = _descriptor.Descriptor(
name='ServerMessage',
full_name='pb.ServerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='pb.ServerMessage.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='port', full_name='pb.ServerMessage.port', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=47,
serialized_end=93,
)
_HELLOMESSAGE = _descriptor.Descriptor(
name='HelloMessage',
full_name='pb.HelloMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='port', full_name='pb.HelloMessage.port', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host', full_name='pb.HelloMessage.host', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='servers', full_name='pb.HelloMessage.servers', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=173,
)
_INVERTIBLEFIELD = _descriptor.Descriptor(
name='InvertibleField',
full_name='pb.InvertibleField',
@ -101,8 +211,40 @@ _INVERTIBLEFIELD = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
serialized_start=31,
serialized_end=79,
serialized_start=175,
serialized_end=223,
)
_STRINGVALUE = _descriptor.Descriptor(
name='StringValue',
full_name='pb.StringValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='pb.StringValue.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=225,
serialized_end=253,
)
@ -133,8 +275,8 @@ _BOOLVALUE = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
serialized_start=81,
serialized_end=107,
serialized_start=255,
serialized_end=281,
)
@ -165,8 +307,8 @@ _UINT32VALUE = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=137,
serialized_start=283,
serialized_end=311,
)
@ -187,7 +329,7 @@ _RANGEFIELD = _descriptor.Descriptor(
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='pb.RangeField.value', index=1,
number=2, type=9, cpp_type=9, label=3,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
@ -205,8 +347,8 @@ _RANGEFIELD = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=245,
serialized_start=313,
serialized_end=419,
)
@ -241,7 +383,7 @@ _SEARCHREQUEST = _descriptor.Descriptor(
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit', full_name='pb.SearchRequest.limit', index=3,
number=4, type=13, cpp_type=3, label=1,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
@ -290,64 +432,64 @@ _SEARCHREQUEST = _descriptor.Descriptor(
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tx_position', full_name='pb.SearchRequest.tx_position', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='amount', full_name='pb.SearchRequest.amount', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='pb.SearchRequest.timestamp', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='creation_timestamp', full_name='pb.SearchRequest.creation_timestamp', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='height', full_name='pb.SearchRequest.height', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='creation_height', full_name='pb.SearchRequest.creation_height', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=16, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='activation_height', full_name='pb.SearchRequest.activation_height', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expiration_height', full_name='pb.SearchRequest.expiration_height', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='release_time', full_name='pb.SearchRequest.release_time', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=19, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
@ -395,8 +537,8 @@ _SEARCHREQUEST = _descriptor.Descriptor(
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repost_count', full_name='pb.SearchRequest.repost_count', index=25,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=26, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
@ -416,8 +558,8 @@ _SEARCHREQUEST = _descriptor.Descriptor(
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fee_amount', full_name='pb.SearchRequest.fee_amount', index=28,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=29, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
@ -430,8 +572,8 @@ _SEARCHREQUEST = _descriptor.Descriptor(
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='pb.SearchRequest.duration', index=30,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=31, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
@ -444,8 +586,8 @@ _SEARCHREQUEST = _descriptor.Descriptor(
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='censor_type', full_name='pb.SearchRequest.censor_type', index=32,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
number=33, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
@ -457,173 +599,152 @@ _SEARCHREQUEST = _descriptor.Descriptor(
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='channel_join', full_name='pb.SearchRequest.channel_join', index=34,
number=35, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_signature_valid', full_name='pb.SearchRequest.is_signature_valid', index=35,
name='is_signature_valid', full_name='pb.SearchRequest.is_signature_valid', index=34,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_amount', full_name='pb.SearchRequest.effective_amount', index=36,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
name='effective_amount', full_name='pb.SearchRequest.effective_amount', index=35,
number=37, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='support_amount', full_name='pb.SearchRequest.support_amount', index=37,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
name='support_amount', full_name='pb.SearchRequest.support_amount', index=36,
number=38, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trending_group', full_name='pb.SearchRequest.trending_group', index=38,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
name='trending_score', full_name='pb.SearchRequest.trending_score', index=37,
number=39, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trending_mixed', full_name='pb.SearchRequest.trending_mixed', index=39,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trending_local', full_name='pb.SearchRequest.trending_local', index=40,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trending_global', full_name='pb.SearchRequest.trending_global', index=41,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tx_id', full_name='pb.SearchRequest.tx_id', index=42,
name='tx_id', full_name='pb.SearchRequest.tx_id', index=38,
number=43, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tx_nout', full_name='pb.SearchRequest.tx_nout', index=43,
name='tx_nout', full_name='pb.SearchRequest.tx_nout', index=39,
number=44, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='pb.SearchRequest.signature', index=44,
name='signature', full_name='pb.SearchRequest.signature', index=40,
number=45, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature_digest', full_name='pb.SearchRequest.signature_digest', index=45,
name='signature_digest', full_name='pb.SearchRequest.signature_digest', index=41,
number=46, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='public_key_bytes', full_name='pb.SearchRequest.public_key_bytes', index=46,
name='public_key_bytes', full_name='pb.SearchRequest.public_key_bytes', index=42,
number=47, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='public_key_id', full_name='pb.SearchRequest.public_key_id', index=47,
name='public_key_id', full_name='pb.SearchRequest.public_key_id', index=43,
number=48, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='any_tags', full_name='pb.SearchRequest.any_tags', index=48,
name='any_tags', full_name='pb.SearchRequest.any_tags', index=44,
number=49, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='all_tags', full_name='pb.SearchRequest.all_tags', index=49,
name='all_tags', full_name='pb.SearchRequest.all_tags', index=45,
number=50, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='not_tags', full_name='pb.SearchRequest.not_tags', index=50,
name='not_tags', full_name='pb.SearchRequest.not_tags', index=46,
number=51, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='has_channel_signature', full_name='pb.SearchRequest.has_channel_signature', index=51,
name='has_channel_signature', full_name='pb.SearchRequest.has_channel_signature', index=47,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='has_source', full_name='pb.SearchRequest.has_source', index=52,
name='has_source', full_name='pb.SearchRequest.has_source', index=48,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit_claims_per_channel', full_name='pb.SearchRequest.limit_claims_per_channel', index=53,
number=54, type=13, cpp_type=3, label=1,
name='limit_claims_per_channel', full_name='pb.SearchRequest.limit_claims_per_channel', index=49,
number=54, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='any_languages', full_name='pb.SearchRequest.any_languages', index=54,
name='any_languages', full_name='pb.SearchRequest.any_languages', index=50,
number=55, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='all_languages', full_name='pb.SearchRequest.all_languages', index=55,
name='all_languages', full_name='pb.SearchRequest.all_languages', index=51,
number=56, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='remove_duplicates', full_name='pb.SearchRequest.remove_duplicates', index=56,
name='remove_duplicates', full_name='pb.SearchRequest.remove_duplicates', index=52,
number=57, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='no_totals', full_name='pb.SearchRequest.no_totals', index=57,
name='no_totals', full_name='pb.SearchRequest.no_totals', index=53,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sd_hash', full_name='pb.SearchRequest.sd_hash', index=54,
number=59, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
@ -636,10 +757,11 @@ _SEARCHREQUEST = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[
],
serialized_start=248,
serialized_end=1940,
serialized_start=422,
serialized_end=1972,
)
_HELLOMESSAGE.fields_by_name['servers'].message_type = _SERVERMESSAGE
_RANGEFIELD.fields_by_name['op'].enum_type = _RANGEFIELD_OP
_RANGEFIELD_OP.containing_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['claim_id'].message_type = _INVERTIBLEFIELD
@ -657,23 +779,44 @@ _SEARCHREQUEST.fields_by_name['repost_count'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['fee_amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['duration'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['censor_type'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['channel_join'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['is_signature_valid'].message_type = _BOOLVALUE
_SEARCHREQUEST.fields_by_name['effective_amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['support_amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['trending_group'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['trending_mixed'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['trending_local'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['trending_global'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['trending_score'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['tx_nout'].message_type = _UINT32VALUE
_SEARCHREQUEST.fields_by_name['has_source'].message_type = _BOOLVALUE
DESCRIPTOR.message_types_by_name['EmptyMessage'] = _EMPTYMESSAGE
DESCRIPTOR.message_types_by_name['ServerMessage'] = _SERVERMESSAGE
DESCRIPTOR.message_types_by_name['HelloMessage'] = _HELLOMESSAGE
DESCRIPTOR.message_types_by_name['InvertibleField'] = _INVERTIBLEFIELD
DESCRIPTOR.message_types_by_name['StringValue'] = _STRINGVALUE
DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE
DESCRIPTOR.message_types_by_name['UInt32Value'] = _UINT32VALUE
DESCRIPTOR.message_types_by_name['RangeField'] = _RANGEFIELD
DESCRIPTOR.message_types_by_name['SearchRequest'] = _SEARCHREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EmptyMessage = _reflection.GeneratedProtocolMessageType('EmptyMessage', (_message.Message,), {
'DESCRIPTOR' : _EMPTYMESSAGE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.EmptyMessage)
})
_sym_db.RegisterMessage(EmptyMessage)
ServerMessage = _reflection.GeneratedProtocolMessageType('ServerMessage', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.ServerMessage)
})
_sym_db.RegisterMessage(ServerMessage)
HelloMessage = _reflection.GeneratedProtocolMessageType('HelloMessage', (_message.Message,), {
'DESCRIPTOR' : _HELLOMESSAGE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.HelloMessage)
})
_sym_db.RegisterMessage(HelloMessage)
InvertibleField = _reflection.GeneratedProtocolMessageType('InvertibleField', (_message.Message,), {
'DESCRIPTOR' : _INVERTIBLEFIELD,
'__module__' : 'hub_pb2'
@ -681,6 +824,13 @@ InvertibleField = _reflection.GeneratedProtocolMessageType('InvertibleField', (_
})
_sym_db.RegisterMessage(InvertibleField)
StringValue = _reflection.GeneratedProtocolMessageType('StringValue', (_message.Message,), {
'DESCRIPTOR' : _STRINGVALUE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.StringValue)
})
_sym_db.RegisterMessage(StringValue)
BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), {
'DESCRIPTOR' : _BOOLVALUE,
'__module__' : 'hub_pb2'
@ -719,8 +869,8 @@ _HUB = _descriptor.ServiceDescriptor(
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1942,
serialized_end=1991,
serialized_start=1975,
serialized_end=2367,
methods=[
_descriptor.MethodDescriptor(
name='Search',
@ -732,6 +882,76 @@ _HUB = _descriptor.ServiceDescriptor(
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Ping',
full_name='pb.Hub.Ping',
index=1,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Hello',
full_name='pb.Hub.Hello',
index=2,
containing_service=None,
input_type=_HELLOMESSAGE,
output_type=_HELLOMESSAGE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddPeer',
full_name='pb.Hub.AddPeer',
index=3,
containing_service=None,
input_type=_SERVERMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='PeerSubscribe',
full_name='pb.Hub.PeerSubscribe',
index=4,
containing_service=None,
input_type=_SERVERMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Version',
full_name='pb.Hub.Version',
index=5,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Features',
full_name='pb.Hub.Features',
index=6,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Broadcast',
full_name='pb.Hub.Broadcast',
index=7,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_UINT32VALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_HUB)

View file

@ -2,8 +2,8 @@
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import lbry.schema.types.v2.hub_pb2 as hub__pb2
import lbry.schema.types.v2.result_pb2 as result__pb2
from . import hub_pb2 as hub__pb2
from . import result_pb2 as result__pb2
class HubStub(object):
@ -20,6 +20,41 @@ class HubStub(object):
request_serializer=hub__pb2.SearchRequest.SerializeToString,
response_deserializer=result__pb2.Outputs.FromString,
)
self.Ping = channel.unary_unary(
'/pb.Hub/Ping',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Hello = channel.unary_unary(
'/pb.Hub/Hello',
request_serializer=hub__pb2.HelloMessage.SerializeToString,
response_deserializer=hub__pb2.HelloMessage.FromString,
)
self.AddPeer = channel.unary_unary(
'/pb.Hub/AddPeer',
request_serializer=hub__pb2.ServerMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.PeerSubscribe = channel.unary_unary(
'/pb.Hub/PeerSubscribe',
request_serializer=hub__pb2.ServerMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Version = channel.unary_unary(
'/pb.Hub/Version',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Features = channel.unary_unary(
'/pb.Hub/Features',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Broadcast = channel.unary_unary(
'/pb.Hub/Broadcast',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.UInt32Value.FromString,
)
class HubServicer(object):
@ -31,6 +66,48 @@ class HubServicer(object):
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Ping(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Hello(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddPeer(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PeerSubscribe(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Version(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Features(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Broadcast(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_HubServicer_to_server(servicer, server):
rpc_method_handlers = {
@ -39,6 +116,41 @@ def add_HubServicer_to_server(servicer, server):
request_deserializer=hub__pb2.SearchRequest.FromString,
response_serializer=result__pb2.Outputs.SerializeToString,
),
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Hello': grpc.unary_unary_rpc_method_handler(
servicer.Hello,
request_deserializer=hub__pb2.HelloMessage.FromString,
response_serializer=hub__pb2.HelloMessage.SerializeToString,
),
'AddPeer': grpc.unary_unary_rpc_method_handler(
servicer.AddPeer,
request_deserializer=hub__pb2.ServerMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'PeerSubscribe': grpc.unary_unary_rpc_method_handler(
servicer.PeerSubscribe,
request_deserializer=hub__pb2.ServerMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Version': grpc.unary_unary_rpc_method_handler(
servicer.Version,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Features': grpc.unary_unary_rpc_method_handler(
servicer.Features,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Broadcast': grpc.unary_unary_rpc_method_handler(
servicer.Broadcast,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.UInt32Value.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'pb.Hub', rpc_method_handlers)
@ -65,3 +177,122 @@ class Hub(object):
result__pb2.Outputs.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Ping(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Ping',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Hello(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Hello',
hub__pb2.HelloMessage.SerializeToString,
hub__pb2.HelloMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddPeer(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/AddPeer',
hub__pb2.ServerMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PeerSubscribe(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/PeerSubscribe',
hub__pb2.ServerMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Version(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Version',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Features(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Features',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Broadcast(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Broadcast',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.UInt32Value.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)

View file

@ -1,13 +1,11 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: result.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
@ -19,9 +17,10 @@ DESCRIPTOR = _descriptor.FileDescriptor(
name='result.proto',
package='pb',
syntax='proto3',
serialized_pb=_b('\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xaf\x03\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_group\x18\x16 \x01(\r\x12\x16\n\x0etrending_mixed\x18\x17 \x01(\x02\x12\x16\n\x0etrending_local\x18\x18 \x01(\x02\x12\x17\n\x0ftrending_global\x18\x19 \x01(\x02\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.Outputb\x06proto3')
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xe6\x02\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_score\x18\x16 \x01(\x01\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.OutputB&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
@ -30,28 +29,33 @@ _ERROR_CODE = _descriptor.EnumDescriptor(
full_name='pb.Error.Code',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_CODE', index=0, number=0,
options=None,
type=None),
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='NOT_FOUND', index=1, number=1,
options=None,
type=None),
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID', index=2, number=2,
options=None,
type=None),
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BLOCKED', index=3, number=3,
options=None,
type=None),
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
options=None,
serialized_start=817,
serialized_end=882,
serialized_options=None,
serialized_start=744,
serialized_end=809,
)
_sym_db.RegisterEnumDescriptor(_ERROR_CODE)
@ -62,6 +66,7 @@ _OUTPUTS = _descriptor.Descriptor(
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='txos', full_name='pb.Outputs.txos', index=0,
@ -69,49 +74,49 @@ _OUTPUTS = _descriptor.Descriptor(
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='extra_txos', full_name='pb.Outputs.extra_txos', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total', full_name='pb.Outputs.total', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='offset', full_name='pb.Outputs.offset', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='blocked', full_name='pb.Outputs.blocked', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='blocked_total', full_name='pb.Outputs.blocked_total', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
@ -128,56 +133,59 @@ _OUTPUT = _descriptor.Descriptor(
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tx_hash', full_name='pb.Output.tx_hash', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nout', full_name='pb.Output.nout', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='height', full_name='pb.Output.height', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='claim', full_name='pb.Output.claim', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='pb.Output.error', index=4,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='meta', full_name='pb.Output.meta',
index=0, containing_type=None, fields=[]),
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=174,
serialized_end=297,
@ -190,6 +198,7 @@ _CLAIMMETA = _descriptor.Descriptor(
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='channel', full_name='pb.ClaimMeta.channel', index=0,
@ -197,133 +206,112 @@ _CLAIMMETA = _descriptor.Descriptor(
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repost', full_name='pb.ClaimMeta.repost', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='short_url', full_name='pb.ClaimMeta.short_url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='canonical_url', full_name='pb.ClaimMeta.canonical_url', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_controlling', full_name='pb.ClaimMeta.is_controlling', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='take_over_height', full_name='pb.ClaimMeta.take_over_height', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='creation_height', full_name='pb.ClaimMeta.creation_height', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='activation_height', full_name='pb.ClaimMeta.activation_height', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expiration_height', full_name='pb.ClaimMeta.expiration_height', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='claims_in_channel', full_name='pb.ClaimMeta.claims_in_channel', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reposted', full_name='pb.ClaimMeta.reposted', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_amount', full_name='pb.ClaimMeta.effective_amount', index=11,
number=20, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='support_amount', full_name='pb.ClaimMeta.support_amount', index=12,
number=21, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trending_group', full_name='pb.ClaimMeta.trending_group', index=13,
number=22, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trending_mixed', full_name='pb.ClaimMeta.trending_mixed', index=14,
number=23, type=2, cpp_type=6, label=1,
name='trending_score', full_name='pb.ClaimMeta.trending_score', index=13,
number=22, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trending_local', full_name='pb.ClaimMeta.trending_local', index=15,
number=24, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trending_global', full_name='pb.ClaimMeta.trending_global', index=16,
number=25, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=300,
serialized_end=731,
serialized_end=658,
)
@ -333,6 +321,7 @@ _ERROR = _descriptor.Descriptor(
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='pb.Error.code', index=0,
@ -340,21 +329,21 @@ _ERROR = _descriptor.Descriptor(
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='text', full_name='pb.Error.text', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='blocked', full_name='pb.Error.blocked', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
@ -362,14 +351,14 @@ _ERROR = _descriptor.Descriptor(
enum_types=[
_ERROR_CODE,
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=734,
serialized_end=882,
serialized_start=661,
serialized_end=809,
)
@ -379,6 +368,7 @@ _BLOCKED = _descriptor.Descriptor(
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='count', full_name='pb.Blocked.count', index=0,
@ -386,28 +376,28 @@ _BLOCKED = _descriptor.Descriptor(
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='channel', full_name='pb.Blocked.channel', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=884,
serialized_end=937,
serialized_start=811,
serialized_end=864,
)
_OUTPUTS.fields_by_name['txos'].message_type = _OUTPUT
@ -432,41 +422,43 @@ DESCRIPTOR.message_types_by_name['Output'] = _OUTPUT
DESCRIPTOR.message_types_by_name['ClaimMeta'] = _CLAIMMETA
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
DESCRIPTOR.message_types_by_name['Blocked'] = _BLOCKED
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), dict(
DESCRIPTOR = _OUTPUTS,
__module__ = 'result_pb2'
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), {
'DESCRIPTOR' : _OUTPUTS,
'__module__' : 'result_pb2'
# @@protoc_insertion_point(class_scope:pb.Outputs)
))
})
_sym_db.RegisterMessage(Outputs)
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), dict(
DESCRIPTOR = _OUTPUT,
__module__ = 'result_pb2'
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), {
'DESCRIPTOR' : _OUTPUT,
'__module__' : 'result_pb2'
# @@protoc_insertion_point(class_scope:pb.Output)
))
})
_sym_db.RegisterMessage(Output)
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), dict(
DESCRIPTOR = _CLAIMMETA,
__module__ = 'result_pb2'
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), {
'DESCRIPTOR' : _CLAIMMETA,
'__module__' : 'result_pb2'
# @@protoc_insertion_point(class_scope:pb.ClaimMeta)
))
})
_sym_db.RegisterMessage(ClaimMeta)
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), dict(
DESCRIPTOR = _ERROR,
__module__ = 'result_pb2'
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), {
'DESCRIPTOR' : _ERROR,
'__module__' : 'result_pb2'
# @@protoc_insertion_point(class_scope:pb.Error)
))
})
_sym_db.RegisterMessage(Error)
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), dict(
DESCRIPTOR = _BLOCKED,
__module__ = 'result_pb2'
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), {
'DESCRIPTOR' : _BLOCKED,
'__module__' : 'result_pb2'
# @@protoc_insertion_point(class_scope:pb.Blocked)
))
})
_sym_db.RegisterMessage(Blocked)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,4 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc

View file

@ -0,0 +1,30 @@
import asyncio
import logging
from lbry.stream.downloader import StreamDownloader
log = logging.getLogger(__name__)
class BackgroundDownloader:
def __init__(self, conf, storage, blob_manager, dht_node=None):
self.storage = storage
self.blob_manager = blob_manager
self.node = dht_node
self.conf = conf
async def download_blobs(self, sd_hash):
downloader = StreamDownloader(asyncio.get_running_loop(), self.conf, self.blob_manager, sd_hash)
try:
await downloader.start(self.node, save_stream=False)
for blob_info in downloader.descriptor.blobs[:-1]:
await downloader.download_stream_blob(blob_info)
except ValueError:
return
except asyncio.CancelledError:
raise
except Exception:
log.error("Unexpected download error on background downloader")
finally:
downloader.stop()

View file

@ -4,6 +4,7 @@ import binascii
import logging
import typing
import asyncio
import time
import re
from collections import OrderedDict
from cryptography.hazmat.primitives.ciphers.algorithms import AES
@ -152,15 +153,19 @@ class StreamDescriptor:
h.update(self.old_sort_json())
return h.hexdigest()
async def make_sd_blob(self, blob_file_obj: typing.Optional[AbstractBlob] = None,
old_sort: typing.Optional[bool] = False,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None):
async def make_sd_blob(
self, blob_file_obj: typing.Optional[AbstractBlob] = None, old_sort: typing.Optional[bool] = False,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
added_on: float = None, is_mine: bool = False
):
sd_hash = self.calculate_sd_hash() if not old_sort else self.calculate_old_sort_sd_hash()
if not old_sort:
sd_data = self.as_json()
else:
sd_data = self.old_sort_json()
sd_blob = blob_file_obj or BlobFile(self.loop, sd_hash, len(sd_data), blob_completed_callback, self.blob_dir)
sd_blob = blob_file_obj or BlobFile(
self.loop, sd_hash, len(sd_data), blob_completed_callback, self.blob_dir, added_on, is_mine
)
if blob_file_obj:
blob_file_obj.set_length(len(sd_data))
if not sd_blob.get_is_verified():
@ -189,12 +194,13 @@ class StreamDescriptor:
raise InvalidStreamDescriptorError("Stream terminator blob should not have a hash")
if any(i != blob_info['blob_num'] for i, blob_info in enumerate(decoded['blobs'])):
raise InvalidStreamDescriptorError("Stream contains out of order or skipped blobs")
added_on = time.time()
descriptor = cls(
loop, blob_dir,
binascii.unhexlify(decoded['stream_name']).decode(),
decoded['key'],
binascii.unhexlify(decoded['suggested_file_name']).decode(),
[BlobInfo(info['blob_num'], info['length'], info['iv'], info.get('blob_hash'))
[BlobInfo(info['blob_num'], info['length'], info['iv'], added_on, info.get('blob_hash'))
for info in decoded['blobs']],
decoded['stream_hash'],
blob.blob_hash
@ -252,20 +258,25 @@ class StreamDescriptor:
iv_generator = iv_generator or random_iv_generator()
key = key or os.urandom(AES.block_size // 8)
blob_num = -1
added_on = time.time()
async for blob_bytes in file_reader(file_path):
blob_num += 1
blob_info = await BlobFile.create_from_unencrypted(
loop, blob_dir, key, next(iv_generator), blob_bytes, blob_num, blob_completed_callback
loop, blob_dir, key, next(iv_generator), blob_bytes, blob_num, added_on, True, blob_completed_callback
)
blobs.append(blob_info)
blobs.append(
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode())) # add the stream terminator
# add the stream terminator
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode(), added_on, None, True)
)
file_name = os.path.basename(file_path)
suggested_file_name = sanitize_file_name(file_name)
descriptor = cls(
loop, blob_dir, file_name, binascii.hexlify(key).decode(), suggested_file_name, blobs
)
sd_blob = await descriptor.make_sd_blob(old_sort=old_sort, blob_completed_callback=blob_completed_callback)
sd_blob = await descriptor.make_sd_blob(
old_sort=old_sort, blob_completed_callback=blob_completed_callback, added_on=added_on, is_mine=True
)
descriptor.sd_hash = sd_blob.blob_hash
return descriptor

View file

@ -3,9 +3,9 @@ import typing
import logging
import binascii
from lbry.dht.peer import make_kademlia_peer
from lbry.dht.node import get_kademlia_peers_from_hosts
from lbry.error import DownloadSDTimeoutError
from lbry.utils import resolve_host, lru_cache_concurrent
from lbry.utils import lru_cache_concurrent
from lbry.stream.descriptor import StreamDescriptor
from lbry.blob_exchange.downloader import BlobDownloader
if typing.TYPE_CHECKING:
@ -48,26 +48,19 @@ class StreamDownloader:
self.cached_read_blob = cached_read_blob
async def add_fixed_peers(self):
def _delayed_add_fixed_peers():
def _add_fixed_peers(fixed_peers):
self.peer_queue.put_nowait(fixed_peers)
self.added_fixed_peers = True
self.peer_queue.put_nowait([
make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
for address, port in addresses
])
if not self.config.fixed_peers:
return
addresses = [
(await resolve_host(url, port, proto='tcp'), port)
for url, port in self.config.fixed_peers
]
if 'dht' in self.config.components_to_skip or not self.node or not \
len(self.node.protocol.routing_table.get_peers()) > 0:
self.fixed_peers_delay = 0.0
else:
self.fixed_peers_delay = self.config.fixed_peer_delay
self.fixed_peers_handle = self.loop.call_later(self.fixed_peers_delay, _delayed_add_fixed_peers)
fixed_peers = await get_kademlia_peers_from_hosts(self.config.fixed_peers)
self.fixed_peers_handle = self.loop.call_later(self.fixed_peers_delay, _add_fixed_peers, fixed_peers)
async def load_descriptor(self, connection_id: int = 0):
# download or get the sd blob
@ -90,7 +83,7 @@ class StreamDownloader:
)
log.info("loaded stream manifest %s", self.sd_hash)
async def start(self, node: typing.Optional['Node'] = None, connection_id: int = 0):
async def start(self, node: typing.Optional['Node'] = None, connection_id: int = 0, save_stream=True):
# set up peer accumulation
self.node = node or self.node # fixme: this shouldnt be set here!
if self.node:
@ -105,11 +98,7 @@ class StreamDownloader:
if not self.descriptor:
await self.load_descriptor(connection_id)
# add the head blob to the peer search
self.search_queue.put_nowait(self.descriptor.blobs[0].blob_hash)
log.info("added head blob to peer search for stream %s", self.sd_hash)
if not await self.blob_manager.storage.stream_exists(self.sd_hash):
if not await self.blob_manager.storage.stream_exists(self.sd_hash) and save_stream:
await self.blob_manager.storage.store_stream(
self.blob_manager.get_blob(self.sd_hash, length=self.descriptor.length), self.descriptor
)

View file

@ -70,6 +70,7 @@ class StreamManager(SourceManager):
async def recover_streams(self, file_infos: typing.List[typing.Dict]):
to_restore = []
to_check = []
async def recover_stream(sd_hash: str, stream_hash: str, stream_name: str,
suggested_file_name: str, key: str,
@ -82,6 +83,7 @@ class StreamManager(SourceManager):
if not descriptor:
return
to_restore.append((descriptor, sd_blob, content_fee))
to_check.extend([sd_blob.blob_hash] + [blob.blob_hash for blob in descriptor.blobs[:-1]])
await asyncio.gather(*[
recover_stream(
@ -93,6 +95,8 @@ class StreamManager(SourceManager):
if to_restore:
await self.storage.recover_streams(to_restore, self.config.download_dir)
if to_check:
await self.blob_manager.ensure_completed_blobs_status(to_check)
# if self.blob_manager._save_blobs:
# log.info("Recovered %i/%i attempted streams", len(to_restore), len(file_infos))
@ -228,6 +232,7 @@ class StreamManager(SourceManager):
while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0:
stream.reflector_progress = 0
sent = await stream.upload_to_reflector(host, port)
return sent
async def create(self, file_path: str, key: Optional[bytes] = None,
iv_generator: Optional[typing.Generator[bytes, None, None]] = None) -> ManagedStream:
@ -236,7 +241,7 @@ class StreamManager(SourceManager):
blob_completed_callback=self.blob_manager.blob_completed
)
await self.storage.store_stream(
self.blob_manager.get_blob(descriptor.sd_hash), descriptor
self.blob_manager.get_blob(descriptor.sd_hash, is_mine=True), descriptor
)
row_id = await self.storage.save_published_file(
descriptor.stream_hash, os.path.basename(file_path), os.path.dirname(file_path), 0

View file

@ -132,17 +132,18 @@ class AsyncioTestCase(unittest.TestCase):
with outcome.testPartExecutor(self):
self.setUp()
self.add_timeout()
self.loop.run_until_complete(self.asyncSetUp())
if outcome.success:
outcome.expecting_failure = expecting_failure
with outcome.testPartExecutor(self, isTest=True):
maybe_coroutine = testMethod()
if asyncio.iscoroutine(maybe_coroutine):
if self.TIMEOUT:
self.loop.call_later(self.TIMEOUT, self.cancel)
self.add_timeout()
self.loop.run_until_complete(maybe_coroutine)
outcome.expecting_failure = False
with outcome.testPartExecutor(self):
self.add_timeout()
self.loop.run_until_complete(self.asyncTearDown())
self.tearDown()
@ -190,6 +191,7 @@ class AsyncioTestCase(unittest.TestCase):
with outcome.testPartExecutor(self):
maybe_coroutine = function(*args, **kwargs)
if asyncio.iscoroutine(maybe_coroutine):
self.add_timeout()
self.loop.run_until_complete(maybe_coroutine)
def cancel(self):
@ -198,6 +200,16 @@ class AsyncioTestCase(unittest.TestCase):
task.print_stack()
task.cancel()
def add_timeout(self):
if self.TIMEOUT:
self.loop.call_later(self.TIMEOUT, self.check_timeout, time())
def check_timeout(self, started):
if time() - started >= self.TIMEOUT:
self.cancel()
else:
self.loop.call_later(self.TIMEOUT, self.check_timeout, started)
class AdvanceTimeTestCase(AsyncioTestCase):
@ -490,13 +502,15 @@ class CommandTestCase(IntegrationTestCase):
""" Synchronous version of `out` method. """
return json.loads(jsonrpc_dumps_pretty(value, ledger=self.ledger))['result']
async def confirm_and_render(self, awaitable, confirm) -> Transaction:
async def confirm_and_render(self, awaitable, confirm, return_tx=False) -> Transaction:
tx = await awaitable
if confirm:
await self.ledger.wait(tx)
await self.generate(1)
await self.ledger.wait(tx, self.blockchain.block_expected)
return self.sout(tx)
if not return_tx:
return self.sout(tx)
return tx
def create_upload_file(self, data, prefix=None, suffix=None):
file_path = tempfile.mktemp(prefix=prefix or "tmp", suffix=suffix or "", dir=self.daemon.conf.upload_dir)
@ -507,19 +521,19 @@ class CommandTestCase(IntegrationTestCase):
async def stream_create(
self, name='hovercraft', bid='1.0', file_path=None,
data=b'hi!', confirm=True, prefix=None, suffix=None, **kwargs):
data=b'hi!', confirm=True, prefix=None, suffix=None, return_tx=False, **kwargs):
if file_path is None and data is not None:
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
return await self.confirm_and_render(
self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm
self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm, return_tx
)
async def stream_update(
self, claim_id, data=None, prefix=None, suffix=None, confirm=True, **kwargs):
self, claim_id, data=None, prefix=None, suffix=None, confirm=True, return_tx=False, **kwargs):
if data is not None:
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
return await self.confirm_and_render(
self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm
self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm, return_tx
)
return await self.confirm_and_render(
self.daemon.jsonrpc_stream_update(claim_id, **kwargs), confirm
@ -625,6 +639,9 @@ class CommandTestCase(IntegrationTestCase):
async def claim_search(self, **kwargs):
return (await self.out(self.daemon.jsonrpc_claim_search(**kwargs)))['items']
async def get_claim_by_claim_id(self, claim_id):
return await self.out(self.ledger.get_claim_by_claim_id(claim_id))
async def file_list(self, *args, **kwargs):
return (await self.out(self.daemon.jsonrpc_file_list(*args, **kwargs)))['items']
@ -649,6 +666,9 @@ class CommandTestCase(IntegrationTestCase):
async def transaction_list(self, *args, **kwargs):
return (await self.out(self.daemon.jsonrpc_transaction_list(*args, **kwargs)))['items']
async def blob_list(self, *args, **kwargs):
return (await self.out(self.daemon.jsonrpc_blob_list(*args, **kwargs)))['items']
@staticmethod
def get_claim_id(tx):
return tx['outputs'][0]['claim_id']

View file

@ -474,3 +474,18 @@ class LockWithMetrics(asyncio.Lock):
return super().release()
finally:
self._lock_held_time_metric.observe(time.perf_counter() - self._lock_acquired_time)
def get_colliding_prefix_bits(first_value: bytes, second_value: bytes):
"""
Calculates the amount of colliding prefix bits between <first_value> and <second_value>.
This is given by the amount of bits that are the same until the first different one (via XOR),
starting from the most significant bit to the least significant bit.
:param first_value: first value to compare, bigger than size.
:param second_value: second value to compare, bigger than size.
:return: amount of prefix colliding bits.
"""
assert len(first_value) == len(second_value), "length should be the same"
size = len(first_value) * 8
first_value, second_value = int.from_bytes(first_value, "big"), int.from_bytes(second_value, "big")
return size - (first_value ^ second_value).bit_length()

Binary file not shown.

View file

@ -881,4 +881,187 @@ HASHES = {
879000: '0eb0810f4b81d1845b0a88f05449408df2e45715c9210a656f45278c5fdf7956',
880000: 'e7d613027e3b4ca38d09bbef07998b57db237c6d67f1e8ea50024d2e0d9a1a72',
881000: '21af4d355d8756b8bf0369b2d79b5c824148ae069026ba5c14f9dd6b7555e1db',
882000: 'bc26f028e547ec44fc3864925bd1493211773b5cb9a9583ba4c1909b89fe0d33',
883000: '170a624f4be04cd2fd435cfb6ba1f31b9ef5d7b084a25dfa23cd118c2752029e',
884000: '46cccb7a12b4d01d07c211b7b8db41321cd73f30069df27bcdb3bb600c0272b0',
885000: '7c27f79d5a99baf0f81f2b09eb5c1bf905976a0f872e02bd4ca9e82f0ed50cb0',
886000: '256e3e00cecc72dbbfef5cea627ecf1d43b56edd5fd1642a2bc4e97c17056f34',
887000: '658ebac7dfa62bc7a22b1a9ba4e5b425a866f7550a6b40fd07de47119fd1f7e8',
888000: '497a9d02868605b9ff6e7f15948a83a7e07606829107e63c2e091c90c7a7b4d4',
889000: '561daaa7ebc87e586d37a96ecfbc72484d7eb602824f38f484ed333e78208e9e',
890000: 'ab5a8cb625b28343f8fac858eab6576c856dab88bde8cda02b80b3edfd307d71',
891000: '2e81d9fc885ddc09222b298ac9efbb73638a5721802b9256de6505ecf122dbaa',
892000: '73be08881b8832e986c0bb9a06c70fff346edb2afaf69630e47e4a4a90c5fece',
893000: 'd39079dcaa4d8af1c26f0edf7e16df43cd857a31e0aa4c4123226793f1ab497f',
894000: '0a3b677d72c590d4b1ff7a9b4098d6b52d0dc10d64c30c2766d18e6eb02872cd',
895000: 'a3bbba831f48c5b68e494ee63015b487782c64c5c24bb29436283360c28fd1e0',
896000: '20af178a192ca43975ab6c838fe97ca42ba6c682682eddbc6481efd153ecb0a2',
897000: '8d0ee14b9fdb853a09ab2951d26b8f7cb8bc8038b09513bd330ee4b0bdcc4780',
898000: 'c97fbb70f804408b131a98f9fb4c04cdf2df1655d3e8ff2e0d58ed8537349f4e',
899000: 'eba2be80478e8dec2d66ca40b853580c5dad040351c64c177e3d8c25aff6c1b6',
900000: 'c4dc344a993558418b93b3f60aaef0030e2a4116086577fbf1e2f544bdbddae1',
901000: '36d84229afa63045875fc8fea0c55de8eb90694b3a37cceb825c87abf1fea998',
902000: '8ca4890ecfc5e3f9d767e4fcdf318a1e3e3597675bbcfe534d64e76bc4e8fbf4',
903000: '8b9f6a7514033c57668ca94fb3758cc6d1ef37ac982c2ff5a9f0f206fcd8d0a8',
904000: 'e9ae813991f35ca89af2fe1f1b6adf9e93c6b1dd6a74f003ebbe699a30b252ea',
905000: 'd426489d01d4f4c829f2eb68a67721d2c0e1c71e8c33ef9253593447e8603462',
906000: '63000bbed97451e68d64485c02c1c3d90b4156237dac315f4e012ffb538e375b',
907000: '96759653a4e514541effa7ef86d9f22a272ddde7b069149d17e9d9203a1edafb',
908000: 'eec6477d2f3b71bde76dc2380d6e06aa8aa306ca56ba1dd15a31c22ae0db501b',
909000: 'd5c2984cf130335aa29296ba5b17672d00360fe0ec73977326180014908c0b55',
910000: '7b99cb1c94144f606937903e173bd9ef63bfffd3db8110693fa4c2caa0abc21f',
911000: '95eed0d9dd9869ac6f83fa67863e77f24df69bcb90fef70918f30b2400e24ea8',
912000: '34c3c8780c54ecced50f0a6b394309d09ee6ce37cd98794699c63771d1d91144',
913000: '536052ddcd445702160288ef3f669ce56868c085315556c9f5ca081ef0c0b9e1',
914000: '1bcd1fe9632f93a0a1fe7d8a1891a4fc6ef1be40ccf887524a9095ed7aa9fa44',
915000: '139bad9fa12ec72a37b62ad8511300ebfda89330fa5d5a83861f864b6adeae67',
916000: '81d15282214ff83e2a034212eb58abeafcb5664d3734bff13b22b4c093b20fea',
917000: 'f31081031cebe450e4450ef397d91790fc0068e98e6746cd0aab86d17e4448f5',
918000: '4af8eb28616ef0e859b5471650c7f8e910cd692a6b4ff3a7171a709db2f18e4e',
919000: '78a197b5f9733e9e4dc9820e1c79bd335beb19f6b87056e48e8e21fbe27d83d6',
920000: '33d20f86d1367f07d6731e1e2cc9305252b281b1b092403133924cc1052f501d',
921000: '6926f1e31e7fe9b8f7a81efa73d5635f8f28c1db1708e4d57f6e7ead951a4beb',
922000: '811e2335798eb54696a4b11ca3a44b9d79486262119383d542491afa9ae80204',
923000: '8f47ac365bc380885db809f2818ffc7dd2076aaa0f9bf6c180df1b4358dc842e',
924000: '535e79802c10630c17fb8fddec3ba2bf85eedbc0c076f3575f8189fe887ba993',
925000: 'ca43bd24d17d75d55e72e45549384b395c62e1daf0d3f58f296e18168b918fbf',
926000: '9a03be89e0725877d42296e6c995d9c48bb5f4bbd971f5a9add191af2d1c144b',
927000: 'a14e0ef6bd1bc221dbba99031c16ddbbd76394186677c29bdf07b89fa2a6efac',
928000: 'b16931bd7392e9db26be975b072024210fb5fe6ee22fc0809d51980aa8068a98',
929000: '4da56a2e66fcd98a70039d9061ea5eb0fb6d9460b437d2191e47441182419a04',
930000: '87e820e2237a54c4ea100bdd0145598f05add92185cd3d0929aa2d5099f4d5e0',
931000: '515b22c91172157c443a47cf213014aff144181a77e276e291535ab3762bb1ae',
932000: 'e130c6a9eb416f96256d1f90256a148957daa32f56af228d2d9ce6ff27ce2011',
933000: '30c992ec7a9a320fb4db260373121efc7b5e7fc744f4b31defbe6a7608e0749e',
934000: 'ec490fa0de6b1d78a4121a5044f501bbb3bd9e448c18121cea87eb8e3cadba41',
935000: '603e4ae6a6d936c79b3f1c9f9e88305930953b9b390dac442976a6e8395fc520',
936000: '2b756fe2de4328e598ed511b8828e5c2c6b5cdda1b5e7c1c26f8e0424c81afa9',
937000: '1ae0f15f14a0d4819e34a6c18de9428a9e43e17d75383bffa9ffb18358e93b63',
938000: 'cbd7001825ec87b8c6917d6e9e7dc5c8d7767788b6ffd61a61d0c612dbe5de66',
939000: 'd770d0395aa79076044783fb37a1bb173cb95c93ff1ba82c34a72c4d8e425a03',
940000: '3341d0a0349d091d88d233cd6ea6e0ad553d52039b4d47af51b8a8e7573a7916',
941000: '16123b8758e99344ebe6670cd95826881b274c31d4da2a051052955a32bade3a',
942000: 'ac7430961e77f902918fe79a52cbf6b523e3f2804ec83d0b17908e131ea9ea68',
943000: '2ad08a6877e4687dcb7a623adeddc88403e8082efd6de28328b351282dc141e2',
944000: '81382e8c1f47fa7c03fa1726f9b09ed1cd38140fe50683896eaa1b403d7e5fe3',
945000: '152bfbb166da04dab16030af28ae65b3275819eed1d0bbfc11eba65616ebefd6',
946000: '25b3da0962f87a0d3e4aec8b16483efbcab9514893a42fd31f4cb544ddc45a1f',
947000: '2cb738ba342436628ff292797e3d36c4752d71bdc1af87fe758d469d06e36e0e',
948000: 'b3683e18570fcc8b986720514539181ec43fb5dbc20fe314c56ab6bd31ab766a',
949000: '94ced5bfba55ccffc909bf098d537e047d8d4cbb79f5e2a74146073f39804865',
950000: 'b11543cd2aedae27f6ddc3d2b431c897fdcfe59ed3c926b0777bc1e99de4d12a',
951000: '21508881a7f80fcd0b9b27bbcfba634b39c6525f5313968c4605cd55b4fec446',
952000: 'f9b3ed919c9ca20cd2927d899ee7a86c93c2dd919dafb6fdb792f2d9f1895cb0',
953000: 'cf578d8e80eec4102dc1b5321f10b36020b3b32f4b5d4664c90c412ca2ef6b42',
954000: 'ed17c919ae5c4be835966b47f667d6082c75917b95584b2d2aff0e32f5c8aa98',
955000: '948ea467fa01a20122e2146669214fdd3bb025038554609f7299ece5bca63e39',
956000: 'b50ff4c02957ed8764215d25f206f6f1fe6d0eb712a378b937ff952dd479afd2',
957000: '169922a3e51517ba6104a883d29aac03a9d20b4d448bd2773137b0d790e3db6b',
958000: '92258ac2e8b53167dc30436d93f385d432bd549711ab9790ba4e8263c5c54382',
959000: '7ca824697459eb302bcd7fba9d255fb269555abe7cf9d2dd5e54e196d751e682',
960000: '89f9ec925d23698076d84f9e852ab04fc956ac4465827303de0c3bb0b685eb32',
961000: '41cf75cd71bc12b93674c416e8b01b7410eb9e09eb8727ad93ff0b833c9966c9',
962000: '7db1f1dbff3e389713067879bfedf9513ec74bb1e128b13fc2fe23ad55fd0306',
963000: 'a35e71c611b2227adeac824d151d2f09bdbecd5765a4e62c6e74a3e4290abc66',
964000: 'dc1811130e249d2208d6f85838512b4e5482efb0bd2f619164a68a0c60d7f248',
965000: '92f5e25dd1c03102720dd0c3136b1a0769901bf89fcc0262a5e24405f349ca07',
966000: '08243d780d8ba96a940f409b87d9c6b8a95c92804173b9156ada0dad35b628dc',
967000: 'cb769a8935bb6faeb981da74f4079babbbb89476f825cc897f43e79790295260',
968000: 'ff3fc27d2998f4dc4ac1ff378afe14c7d0f43cc328deb9c978ec0e067d1dfaf9',
969000: 'e41a3452f45d5f025627d08c9c41017679e9c4804371dd1cc02f3ed49f85dbb2',
970000: 'f5eaaf7ba6b47245a4a8096a7785c7b25dc6db342ac2ccbba0c321e97ab58284',
971000: '75414062f1d4ed675dadc8f04ba10147a484aaca1ae316dc0b896a92809b3db6',
972000: '5bcf2ee00133774c7d060a1a1863dfccc20d5127ecb542470f607dec2504fe6f',
973000: '07d15b9656ecde2cd86a9d22c3de8b6505d6bab2aa5a94560b0db9119f1f6f6c',
974000: '2059e7924d7a210a88f5a65abc61152506a82edccd27416e796c81b9b8003f13',
975000: '7fcf5d8b2c0e51cfbdaa2502a9da0bdb323646899dad37dacc39af9f9e16fc5c',
976000: '02acb8cf87a0900436eccfca50371948531041d7b8b410a902205f84dd7fb88e',
977000: '2636dfd5a47016c893265473e78ecbf2000769d886f0d01ee7a91e9397210d15',
978000: 'ce92f52a35096b94bea73a7d4e113bc4564a4a589b66f1ab86f61c822cf9ee76',
979000: '21b8102f5b76be0c8e20d537ebc78ebe46bfcea6b6d2dda950ce5b48e85f72d7',
980000: 'f4df0bd63b36105705de62266d654612d9804bad7069d41344de269657e6f084',
981000: 'f006cd2718d98d774a5cd18394db7744c812fa149c8a63e76bab934aee89f571',
982000: 'da5d6609265d9153022d823b0260aa07e7511ceff7a3fd2ca7ce83cb3900a661',
983000: '3a26f3f02aa145fa8c5268fbe10dd9c3546d7dda57489ca5d4b161beb0d5a6e2',
984000: '968e8cd37a1137797d40f39f106cae62d1e252b46c7473b9434ad5f870ee88fb',
985000: '3129c3bf20deace1a9c92646a9d769da7a07f18dcd5b7a7b1e8cf5fd5390f8e1',
986000: '6ce830ca5da322ddbb97fc572ea03218913d070e5910516b33c6113b02b23c21',
987000: '7fb1a8635623847132ab766a99b792953379f782d1115b9649f5f9c5a742ca04',
988000: '5e8e6c6da7f271129c20c4dd891dcb1df4f9d690ee7cf391c6b7fbd028a0da4c',
989000: '12919e34bb9a9ac1d2a01e221eb8c511117fc4e1b3ae15355d95caf4673bdb08',
990000: '016f8b18227a0c09da55594a98638ad5b0fbb4896e2ab6163ac40b6015b2811e',
991000: 'ddf8cd6e2f4ee07530ae7567cef4fa2c2fd4a655cb20e20422e66fd49bde6489',
992000: 'dca77707c0caa3a9605f3dadf593402339c29448869907fb31f6c624e942dcbd',
993000: 'de9acc4c7c482ecac741fd6acbbc3a333afab52f3fe5eea4130c0770299a56dd',
994000: '54420631f8a801a1b8f391088f599ee22cedc06f24bf67f18272feb8fe70c682',
995000: '4b44b26e3e2495716dfd86fc42594cd4b1e4b70bdab4f0905cce4cb9556e008a',
996000: 'd6e41fd301fc5f519c343ceb39c9ff845656a4482e4e182abdcd3963fd5fde1c',
997000: 'd68b6a509d742b182ffb5a98b0e585a2320a5d3fe6977ad3e6cd06835ef2ea55',
998000: '1efcdcbadbec54ce3a93a1857253614536c34f05a0b1924f24bff194dc3392e1',
999000: '10a7713e46f47527f3819b4a9257a03f3e207d18e4917d6bcb43fdea3ba82b9a',
1000000: '1b4ddb1436df05f07807d6337b93ee1aa8b600fd6a910a8fd5313a39e0440eec',
1001000: 'cde0df1abdae26d2c2bdc111be15fb33231c5e167bb8b8f8eec667d71379fee4',
1002000: 'd7ce7a96a3ca73a4dfd6a1780e23f834f339142519ea7f45d256c113e27e4857',
1003000: 'b1a9b1c562ec62b9dd746d336b4211afc37482d0274ff692a44fa17ac9fe9a28',
1004000: '7afd6d0fb0014fbe16a31c84d3f1731736eaeef35e40bb1a1f232fb00345deae',
1005000: '4af61ce4cda5de58277f7a67cadea5d3f6ce56e54785b188e32306e00b0414df',
1006000: '08e1fb7295efd4a48cb999d899a3d481b682ddbce738fecd88a6d32cbe8234f0',
1007000: '14a367a41603dd690541daee8aa4a2882260059e3f85bd8978b7431e8f7db844',
1008000: 'e673230e62aaefad0678611f94ff35ee8a6e18eb96438bdfb4b614f54f54dba7',
1009000: 'e191af8fb71d0d91419abd19443af3d3f23ee4fe359bb8c390429cc838132bde',
1010000: 'ffdba58f184cf60838b75b7899b6633e7cfd34cf36eded572c0133d07387bc49',
1011000: '40801af3a5546cb9d53e05e21b74be09de9a421b762ca1d52d2266f5c2055ce8',
1012000: '552519acebed0e38102f5270dc60b1da7a123600b6b94169ae74462ae454693f',
1013000: '1eee96f48418929927eaa9642777bc806d326cfffaf077bc8695a7ecd438d631',
1014000: 'a471093e1de2a8db586412d7351c8d88e44ea890f46e9b43251af427a0a4a879',
1015000: '57532f5a522295cc139f008bdcb7a1e6d02e6035d5221b2687c7c216f06297a2',
1016000: 'ec46dba07addcb6e62f58456a53c513d876f1c49ae7d76d230adb8debd26027d',
1017000: '33ea8d25f342a7465ed71e4bab2b91007991e0994c61d321e3625301a1390322',
1018000: '4871c03cc95d4ce0a39bd2cebbb001b2ea1cce1b3561bb841d88f43bb9d12ffd',
1019000: 'f5248257576eb2ff4139d6374cc7ce34121cc942598cf9e04d2bd572e09189bb',
1020000: 'e7785286897c85cfb0276957bff216039eeb11bc1ebca89d0bb586022caa5750',
1021000: 'a30220f17d060634c5f6a1ddc5ea34b01c18fb5eb7e0e8267b66bf5a49525627',
1022000: '6083ea49e64ac0d4507c674237cf87d30b90b285ec63d082e626df0223eb7c9c',
1023000: '1dc5596d716bc33ee0f56fc40c1f073155a58a7692935c9e5854ef3b65b76828',
1024000: '065adfee40dc33abff07fb55339571712b959bc1830dc60b6691e36eab1508ae',
1025000: 'bb6903752d31278570e774b80a80782179c78f099e58c3dc4cba7afea7a471c4',
1026000: 'f3050f3c2f3a76f5084856b0f089383517caa3f51530fbc29335308f5f170625',
1027000: '746ed3701510d07958d11a06f22dbb839d9858373dc5a33249dd69e91bab01fd',
1028000: '43f7a96ea6a45b78c29ad4a2f8680ef184438c2bd3686172b0564e0ae6dd7ba1',
1029000: 'cbb9916099c59e14fe61d284374f4feaa3d43afec59e4698ed92143576f24b34',
1030000: '2e805fc2331e32e586ea692bc3d4e6b11e1ec3f1cab6e331b459f9f1ac9a1f1e',
1031000: '04f324f8f6d4f9901cf65f78dc91d6010ea6cf125f5ac0253b57b5f1f79e81e0',
1032000: '60ca62f52fdfd858b0ee0fdb380648bde85ca14e2a73565205ed4ee0bc861c77',
1033000: 'eb60aac23d599d3099cf98ed8fc3213f1bc06bc1c677429b303e9c81f79f1340',
1034000: 'f0328df2daf119ce673ddfa7a39a84576985f701f7a7dec3f56f58c2019ebd4d',
1035000: 'f9d3cbce3854de168d8835c96917c01be6244c8f82641e8d9398dfffec4e7107',
1036000: '7dca97e6e1d6ed70aa7805f74b768009a270e7ebe1dd951e8727d1d2f2d271f2',
1037000: '5329504126b2845b3044f423b521e77ff58d7d242f24bf87c87f4d8d4e03a947',
1038000: '5bad3ad55e3daa415f3182a1f2a099fe1767e8fae34e9bb95d47e242b8971434',
1039000: 'c29729b8ba49ac0043fe4aa6fc971f8ac3eda68ff92970957ada39a2989b2491',
1040000: 'f303aebfc9267600c081d0c021065743f93790df6f5c924a86b773788e0c45be',
1041000: 'a1cbe5059fa2275707785b77970c36d79b12c1ba93121bc9064ab9b64abacf7b',
1042000: '004b0dd4e438abc54ae832d733df32a6ba35b75e6d3e0c9c1dee5a7950507295',
1043000: '31893a3fe7bb4f6dd546c7a8de4a65990e94046aab442d18c68b6bf6acd54518',
1044000: '2c4dd479948acc42946f94050810000b0539864ad24a67a7251bff1c4971b035',
1045000: '1cea782d60df35a88b30ae205ce37e30abc7cad2b22181722be150bd92c53814',
1046000: 'ee808f0efb0f2ef93e8599d8b7f0e2e7c3cdc42353e4ea5165028b961f43d548',
1047000: '75f057e2a8cb1d46e5c943d63cc56936a6bac8b1cb89300593845a20baf39765',
1048000: '2abcd227f5314baed85e3c5b49d3888a60085c1845c955a8bf96aa3dd6394798',
1049000: '5d0ec24b9acd5ab21b42f68e1f3142b7bf83433b98f2fa9794586c8eff45893e',
1050000: '1d364b13a4c17bd67a6d1e5f77c26d02faa014d7cd152b4da70380f168b8e0ff',
1051000: 'b9a20cec21de84433be9b85817dd4803e875d9275dbc02907b29888431859bae',
1052000: '424cb56b00407d73b309b2081dd0bf89213cf024e3aafb3090506aa0ba10f835',
1053000: '6df3041a32fafd6a4e08778546d077cf591e1a2a16e77fe7a610efc2b542a9ff',
1054000: '78f8dee794f3d4366019339d7ba74ad2b543ecd25dc575620f66e1d535411971',
1055000: '43b8e9dae5addd58a7cccf62ba57ab46ffdaa2dcd113cc8ca537e9101b54c096',
1056000: '86b7f3741343f85d93410b78cc3fbf03d49b60a664e908703016aa56a206ae7e',
1057000: 'b033cf6ec622be6a99dff536a2cf73b36d3c3f8c3835ee17e0dd357403e85c41',
1058000: 'a65a6db692a8358e399a5ac3c818902fdb60595262ae05531084848febead249',
1059000: 'f6d781d2e2fdb4b7b074d1d8123875d899cdbd6be375cb4288e86f1d14a929f6',
1060000: 'cd9019bb1de4926cca16a7bef1a46786f10a3260d467cda0775f73361795abc9',
1061000: 'ed4f5dc6f475f95b40595632fafd9e7e5eef388b6cc15772204c0b0e9ee4e542',
1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80',
1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465',
1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6',
}

View file

@ -2,6 +2,7 @@ NULL_HASH32 = b'\x00'*32
CENT = 1000000
COIN = 100*CENT
DUST = 1000
TIMEOUT = 30.0

View file

@ -556,7 +556,7 @@ class Ledger(metaclass=LedgerRegistry):
log.info("Sync finished for address %s: %d/%d", address, len(pending_synced_history), len(to_request))
assert len(pending_synced_history) == len(remote_history), \
f"{len(pending_synced_history)} vs {len(remote_history)}"
f"{len(pending_synced_history)} vs {len(remote_history)} for {address}"
synced_history = ""
for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())):
assert i == remote_i, f"{i} vs {remote_i}"
@ -894,9 +894,21 @@ class Ledger(metaclass=LedgerRegistry):
hub_server=new_sdk_server is not None
)
async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
for claim in (await self.claim_search(accounts, claim_id=claim_id, **kwargs))[0]:
return claim
# async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
# return await self.network.get_claim_by_id(claim_id)
async def get_claim_by_claim_id(self, claim_id, accounts=None, include_purchase_receipt=False,
include_is_my_output=False):
accounts = accounts or []
# return await self.network.get_claim_by_id(claim_id)
inflated = await self._inflate_outputs(
self.network.get_claim_by_id(claim_id), accounts,
include_purchase_receipt=include_purchase_receipt,
include_is_my_output=include_is_my_output,
)
txos = inflated[0]
if txos:
return txos[0]
async def _report_state(self):
try:

View file

@ -194,6 +194,8 @@ class WalletManager:
'data_path': config.wallet_dir,
'tx_cache_size': config.transaction_cache_size
}
if 'LBRY_FEE_PER_NAME_CHAR' in os.environ:
ledger_config['fee_per_name_char'] = int(os.environ.get('LBRY_FEE_PER_NAME_CHAR'))
wallets_directory = os.path.join(config.wallet_dir, 'wallets')
if not os.path.exists(wallets_directory):

View file

@ -238,7 +238,7 @@ class Network:
log.exception("error looking up dns for spv server %s:%i", server, port)
# accumulate the dns results
if self.config['explicit_servers']:
if self.config.get('explicit_servers', []):
hubs = self.config['explicit_servers']
elif self.known_hubs:
hubs = self.known_hubs
@ -254,7 +254,7 @@ class Network:
sent_ping_timestamps = {}
_, ip_to_hostnames = await self.resolve_spv_dns()
n = len(ip_to_hostnames)
log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config['explicit_servers']))
log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config.get('explicit_servers', [])))
pongs = {}
known_hubs = self.known_hubs
try:
@ -299,8 +299,8 @@ class Network:
if (pong is not None and self.jurisdiction is not None) and \
(pong.country_name != self.jurisdiction):
continue
client = ClientSession(network=self, server=(host, port), timeout=self.config['hub_timeout'],
concurrency=self.config['concurrent_hub_requests'])
client = ClientSession(network=self, server=(host, port), timeout=self.config.get('hub_timeout', 30),
concurrency=self.config.get('concurrent_hub_requests', 30))
try:
await client.create_connection()
log.warning("Connected to spv server %s:%i", host, port)
@ -465,6 +465,12 @@ class Network:
def get_server_features(self):
return self.rpc('server.features', (), restricted=True)
# def get_claims_by_ids(self, claim_ids):
# return self.rpc('blockchain.claimtrie.getclaimsbyids', claim_ids)
def get_claim_by_id(self, claim_id):
return self.rpc('blockchain.claimtrie.getclaimbyid', [claim_id])
def resolve(self, urls, session_override=None):
return self.rpc('blockchain.claimtrie.resolve', urls, False, session_override)

View file

@ -1,5 +1,5 @@
__hub_url__ = (
"https://github.com/lbryio/hub/releases/download/v0.2021.08.24-beta/hub"
"https://github.com/lbryio/hub/releases/download/v0.2022.01.21.1/hub"
)
from .node import Conductor
from .service import ConductorService

View file

@ -196,11 +196,10 @@ class SPVNode:
self.session_timeout = 600
self.rpc_port = '0' # disabled by default
self.stopped = False
self.index_name = None
self.index_name = uuid4().hex
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None):
self.data_path = tempfile.mkdtemp()
self.index_name = uuid4().hex
conf = {
'DESCRIPTION': '',
'PAYMENT_ADDRESS': '',
@ -223,7 +222,7 @@ class SPVNode:
# TODO: don't use os.environ
os.environ.update(conf)
self.server = Server(Env(self.coin_class))
self.server.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5
self.server.bp.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5
await self.server.start()
async def stop(self, cleanup=True):
@ -474,6 +473,10 @@ class HubProcess(asyncio.SubprocessProtocol):
raise SystemError(data.decode())
if b'listening on' in data:
self.ready.set()
str_lines = str(data.decode()).split("\n")
for line in str_lines:
if 'releaseTime' in line:
print(line)
def process_exited(self):
self.stopped.set()

View file

@ -496,6 +496,17 @@ class RPCSession(SessionBase):
self.abort()
return False
async def send_notifications(self, notifications) -> bool:
"""Send an RPC notification over the network."""
message, _ = self.connection.send_batch(notifications)
try:
await self._send_message(message)
return True
except asyncio.TimeoutError:
self.logger.info("timeout sending address notification to %s", self.peer_address_str(for_log=True))
self.abort()
return False
def send_batch(self, raise_errors=False):
"""Return a BatchRequest. Intended to be used like so:

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,6 @@
import logging
import traceback
import argparse
import importlib
from lbry.wallet.server.env import Env
from lbry.wallet.server.server import Server
@ -10,27 +9,19 @@ def get_argument_parser():
parser = argparse.ArgumentParser(
prog="lbry-hub"
)
parser.add_argument("spvserver", type=str, help="Python class path to SPV server implementation.",
nargs="?", default="lbry.wallet.server.coin.LBC")
Env.contribute_to_arg_parser(parser)
return parser
def get_coin_class(spvserver):
spvserver_path, coin_class_name = spvserver.rsplit('.', 1)
spvserver_module = importlib.import_module(spvserver_path)
return getattr(spvserver_module, coin_class_name)
def main():
parser = get_argument_parser()
args = parser.parse_args()
coin_class = get_coin_class(args.spvserver)
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
logging.info('lbry.server starting')
logging.getLogger('aiohttp').setLevel(logging.WARNING)
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
try:
server = Server(Env(coin_class))
server = Server(Env.from_arg_parser(args))
server.run()
except Exception:
traceback.print_exc()

View file

@ -14,8 +14,7 @@ from lbry.wallet.server.daemon import Daemon, LBCDaemon
from lbry.wallet.server.script import ScriptPubKey, OpCodes
from lbry.wallet.server.leveldb import LevelDB
from lbry.wallet.server.session import LBRYElectrumX, LBRYSessionManager
from lbry.wallet.server.db.writer import LBRYLevelDB
from lbry.wallet.server.block_processor import LBRYBlockProcessor
from lbry.wallet.server.block_processor import BlockProcessor
Block = namedtuple("Block", "raw header transactions")
@ -39,7 +38,7 @@ class Coin:
SESSIONCLS = LBRYElectrumX
DESERIALIZER = lib_tx.Deserializer
DAEMON = Daemon
BLOCK_PROCESSOR = LBRYBlockProcessor
BLOCK_PROCESSOR = BlockProcessor
SESSION_MANAGER = LBRYSessionManager
DB = LevelDB
HEADER_VALUES = [
@ -214,6 +213,11 @@ class Coin:
txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block()
return Block(raw_block, header, txs)
@classmethod
def transaction(cls, raw_tx: bytes):
"""Return a Block namedtuple given a raw block and its height."""
return cls.DESERIALIZER(raw_tx).read_tx()
@classmethod
def decimal_value(cls, value):
"""Return the number of standard coin units as a Decimal given a
@ -237,10 +241,9 @@ class Coin:
class LBC(Coin):
DAEMON = LBCDaemon
SESSIONCLS = LBRYElectrumX
BLOCK_PROCESSOR = LBRYBlockProcessor
SESSION_MANAGER = LBRYSessionManager
DESERIALIZER = DeserializerSegWit
DB = LBRYLevelDB
DB = LevelDB
NAME = "LBRY"
SHORTNAME = "LBC"
NET = "mainnet"
@ -258,6 +261,18 @@ class LBC(Coin):
TX_PER_BLOCK = 1
RPC_PORT = 9245
REORG_LIMIT = 200
nOriginalClaimExpirationTime = 262974
nExtendedClaimExpirationTime = 2102400
nExtendedClaimExpirationForkHeight = 400155
nNormalizedNameForkHeight = 539940 # targeting 21 March 2019
nMinTakeoverWorkaroundHeight = 496850
nMaxTakeoverWorkaroundHeight = 658300 # targeting 30 Oct 2019
nWitnessForkHeight = 680770 # targeting 11 Dec 2019
nAllClaimsInMerkleForkHeight = 658310 # targeting 30 Oct 2019
proportionalDelayFactor = 32
maxTakeoverDelay = 4032
PEERS = [
]
@ -335,6 +350,18 @@ class LBC(Coin):
else:
return sha256(script).digest()[:HASHX_LEN]
@classmethod
def get_expiration_height(cls, last_updated_height: int, extended: bool = False) -> int:
if extended:
return last_updated_height + cls.nExtendedClaimExpirationTime
if last_updated_height < cls.nExtendedClaimExpirationForkHeight:
return last_updated_height + cls.nOriginalClaimExpirationTime
return last_updated_height + cls.nExtendedClaimExpirationTime
@classmethod
def get_delay_for_name(cls, blocks_of_continuous_ownership: int) -> int:
return min(blocks_of_continuous_ownership // cls.proportionalDelayFactor, cls.maxTakeoverDelay)
class LBCRegTest(LBC):
NET = "regtest"
@ -344,6 +371,15 @@ class LBCRegTest(LBC):
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = bytes.fromhex("c4")
nOriginalClaimExpirationTime = 500
nExtendedClaimExpirationTime = 600
nExtendedClaimExpirationForkHeight = 800
nNormalizedNameForkHeight = 250
nMinTakeoverWorkaroundHeight = -1
nMaxTakeoverWorkaroundHeight = -1
nWitnessForkHeight = 150
nAllClaimsInMerkleForkHeight = 350
class LBCTestNet(LBCRegTest):
NET = "testnet"

View file

@ -55,7 +55,7 @@ class Daemon:
self.available_rpcs = {}
self.connector = aiohttp.TCPConnector()
self._block_hash_cache = LRUCacheWithMetrics(100000)
self._block_cache = LRUCacheWithMetrics(2 ** 16, metric_name='block', namespace=NAMESPACE)
self._block_cache = LRUCacheWithMetrics(2 ** 13, metric_name='block', namespace=NAMESPACE)
async def close(self):
if self.connector:
@ -364,6 +364,11 @@ class LBCDaemon(Daemon):
'''Given a name, returns the winning claim value.'''
return await self._send_single('getvalueforname', (name,))
@handles_errors
async def getnamesintrie(self):
'''Given a name, returns the winning claim value.'''
return await self._send_single('getnamesintrie')
@handles_errors
async def claimname(self, name, hexvalue, amount):
'''Claim a name, used for functional tests only.'''

View file

@ -0,0 +1,42 @@
import enum
@enum.unique
class DB_PREFIXES(enum.Enum):
claim_to_support = b'K'
support_to_claim = b'L'
claim_to_txo = b'E'
txo_to_claim = b'G'
claim_to_channel = b'I'
channel_to_claim = b'J'
claim_short_id_prefix = b'F'
effective_amount = b'D'
claim_expiration = b'O'
claim_takeover = b'P'
pending_activation = b'Q'
activated_claim_and_support = b'R'
active_amount = b'S'
repost = b'V'
reposted_claim = b'W'
undo = b'M'
claim_diff = b'Y'
tx = b'B'
block_hash = b'C'
header = b'H'
tx_num = b'N'
tx_count = b'T'
tx_hash = b'X'
utxo = b'u'
hashx_utxo = b'h'
hashx_history = b'x'
db_state = b's'
channel_count = b'Z'
support_amount = b'a'
block_txs = b'b'

View file

@ -1,22 +0,0 @@
class FindShortestID:
__slots__ = 'short_id', 'new_id'
def __init__(self):
self.short_id = ''
self.new_id = None
def step(self, other_id, new_id):
self.new_id = new_id
for i in range(len(self.new_id)):
if other_id[i] != self.new_id[i]:
if i > len(self.short_id)-1:
self.short_id = self.new_id[:i+1]
break
def finalize(self):
if self.short_id:
return '#'+self.short_id
def register_canonical_functions(connection):
connection.create_aggregate("shortest_id", 2, FindShortestID)

View file

@ -1,3 +1,5 @@
import typing
CLAIM_TYPES = {
'stream': 1,
'channel': 2,
@ -418,3 +420,28 @@ INDEXED_LANGUAGES = [
'zh',
'zu'
]
class ResolveResult(typing.NamedTuple):
name: str
normalized_name: str
claim_hash: bytes
tx_num: int
position: int
tx_hash: bytes
height: int
amount: int
short_url: str
is_controlling: bool
canonical_url: str
creation_height: int
activation_height: int
expiration_height: int
effective_amount: int
support_amount: int
reposted: int
last_takeover_height: typing.Optional[int]
claims_in_channel: typing.Optional[int]
channel_hash: typing.Optional[bytes]
reposted_claim_hash: typing.Optional[bytes]
signature_valid: typing.Optional[bool]

119
lbry/wallet/server/db/db.py Normal file
View file

@ -0,0 +1,119 @@
import struct
from typing import Optional
from lbry.wallet.server.db import DB_PREFIXES
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete
class KeyValueStorage:
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
raise NotImplemented()
def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None,
include_key=True, include_value=True, fill_cache=True):
raise NotImplemented()
def write_batch(self, transaction: bool = False):
raise NotImplemented()
def close(self):
raise NotImplemented()
@property
def closed(self) -> bool:
raise NotImplemented()
class PrefixDB:
UNDO_KEY_STRUCT = struct.Struct(b'>Q')
def __init__(self, db: KeyValueStorage, max_undo_depth: int = 200, unsafe_prefixes=None):
self._db = db
self._op_stack = RevertableOpStack(db.get, unsafe_prefixes=unsafe_prefixes)
self._max_undo_depth = max_undo_depth
def unsafe_commit(self):
"""
Write staged changes to the database without keeping undo information
Changes written cannot be undone
"""
try:
with self._db.write_batch(transaction=True) as batch:
batch_put = batch.put
batch_delete = batch.delete
for staged_change in self._op_stack:
if staged_change.is_put:
batch_put(staged_change.key, staged_change.value)
else:
batch_delete(staged_change.key)
finally:
self._op_stack.clear()
def commit(self, height: int):
"""
Write changes for a block height to the database and keep undo information so that the changes can be reverted
"""
undo_ops = self._op_stack.get_undo_ops()
delete_undos = []
if height > self._max_undo_depth:
delete_undos.extend(self._db.iterator(
start=DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(0),
stop=DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height - self._max_undo_depth),
include_value=False
))
try:
with self._db.write_batch(transaction=True) as batch:
batch_put = batch.put
batch_delete = batch.delete
for staged_change in self._op_stack:
if staged_change.is_put:
batch_put(staged_change.key, staged_change.value)
else:
batch_delete(staged_change.key)
for undo_to_delete in delete_undos:
batch_delete(undo_to_delete)
batch_put(DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height), undo_ops)
finally:
self._op_stack.clear()
def rollback(self, height: int):
"""
Revert changes for a block height
"""
undo_key = DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height)
self._op_stack.apply_packed_undo_ops(self._db.get(undo_key))
try:
with self._db.write_batch(transaction=True) as batch:
batch_put = batch.put
batch_delete = batch.delete
for staged_change in self._op_stack:
if staged_change.is_put:
batch_put(staged_change.key, staged_change.value)
else:
batch_delete(staged_change.key)
batch_delete(undo_key)
finally:
self._op_stack.clear()
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
return self._db.get(key, fill_cache=fill_cache)
def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None,
include_key=True, include_value=True, fill_cache=True):
return self._db.iterator(
reverse=reverse, start=start, stop=stop, include_start=include_start, include_stop=include_stop,
prefix=prefix, include_key=include_key, include_value=include_value, fill_cache=fill_cache
)
def close(self):
if not self._db.closed:
self._db.close()
@property
def closed(self):
return self._db.closed
def stage_raw_put(self, key: bytes, value: bytes):
self._op_stack.append_op(RevertablePut(key, value))
def stage_raw_delete(self, key: bytes, value: bytes):
self._op_stack.append_op(RevertableDelete(key, value))

View file

@ -8,7 +8,7 @@ INDEX_DEFAULT_SETTINGS = {
"number_of_shards": 1,
"number_of_replicas": 0,
"sort": {
"field": ["trending_mixed", "release_time"],
"field": ["trending_score", "release_time"],
"order": ["desc", "desc"]
}}
},
@ -27,11 +27,24 @@ INDEX_DEFAULT_SETTINGS = {
"max_chars": 10
}
},
"sd_hash": {
"fields": {
"keyword": {
"ignore_above": 96,
"type": "keyword"
}
},
"type": "text",
"index_prefixes": {
"min_chars": 1,
"max_chars": 4
}
},
"height": {"type": "integer"},
"claim_type": {"type": "byte"},
"censor_type": {"type": "byte"},
"trending_mixed": {"type": "float"},
"release_time": {"type": "long"},
"trending_score": {"type": "double"},
"release_time": {"type": "long"}
}
}
}
@ -42,7 +55,7 @@ FIELDS = {
'tx_id', 'tx_nout', 'tx_position',
'short_url', 'canonical_url',
'is_controlling', 'last_take_over_height',
'public_key_bytes', 'public_key_id', 'claims_in_channel', 'channel_join_height',
'public_key_bytes', 'public_key_id', 'claims_in_channel',
'channel_id', 'signature', 'signature_digest', 'is_signature_valid',
'amount', 'effective_amount', 'support_amount',
'fee_amount', 'fee_currency',
@ -52,31 +65,36 @@ FIELDS = {
'timestamp', 'creation_timestamp',
'duration', 'release_time',
'tags', 'languages', 'has_source', 'reposted_claim_type',
'reposted_claim_id', 'repost_count',
'trending_group', 'trending_mixed', 'trending_local', 'trending_global',
'reposted_claim_id', 'repost_count', 'sd_hash',
'trending_score', 'tx_num'
}
TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'claim_name', 'description', 'claim_id', 'censoring_channel_id',
TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'description', 'claim_id', 'censoring_channel_id',
'media_type', 'normalized_name', 'public_key_bytes', 'public_key_id', 'short_url', 'signature',
'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id', 'tags'}
'claim_name', 'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id',
'tags', 'sd_hash'}
RANGE_FIELDS = {
'height', 'creation_height', 'activation_height', 'expiration_height',
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
'tx_position', 'channel_join', 'repost_count', 'limit_claims_per_channel',
'tx_position', 'repost_count', 'limit_claims_per_channel',
'amount', 'effective_amount', 'support_amount',
'trending_group', 'trending_mixed', 'censor_type',
'trending_local', 'trending_global',
'trending_score', 'censor_type', 'tx_num'
}
ALL_FIELDS = RANGE_FIELDS | TEXT_FIELDS | FIELDS
REPLACEMENTS = {
'claim_name': 'normalized_name',
'name': 'normalized_name',
'txid': 'tx_id',
'nout': 'tx_nout',
'valid_channel_signature': 'is_signature_valid',
'trending_group': 'trending_score',
'trending_mixed': 'trending_score',
'trending_global': 'trending_score',
'trending_local': 'trending_score',
'reposted': 'repost_count',
'stream_types': 'stream_type',
'media_types': 'media_type',
'reposted': 'repost_count'
'valid_channel_signature': 'is_signature_valid'
}

View file

@ -1,3 +1,4 @@
import time
import asyncio
import struct
from binascii import unhexlify
@ -8,8 +9,6 @@ from typing import Optional, List, Iterable, Union
from elasticsearch import AsyncElasticsearch, NotFoundError, ConnectionError
from elasticsearch.helpers import async_streaming_bulk
from lbry.crypto.base58 import Base58
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
from lbry.schema.result import Outputs, Censor
from lbry.schema.tags import clean_tags
@ -19,6 +18,7 @@ from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES
from lbry.wallet.server.db.elasticsearch.constants import INDEX_DEFAULT_SETTINGS, REPLACEMENTS, FIELDS, TEXT_FIELDS, \
RANGE_FIELDS, ALL_FIELDS
from lbry.wallet.server.util import class_logger
from lbry.wallet.server.db.common import ResolveResult
class ChannelResolution(str):
@ -50,9 +50,7 @@ class SearchIndex:
self.index = index_prefix + 'claims'
self.logger = class_logger(__name__, self.__class__.__name__)
self.claim_cache = LRUCache(2 ** 15)
self.short_id_cache = LRUCache(2 ** 17)
self.search_cache = LRUCache(2 ** 17)
self.resolution_cache = LRUCache(2 ** 17)
self._elastic_host = elastic_host
self._elastic_port = elastic_port
@ -91,6 +89,7 @@ class SearchIndex:
if index_version != self.VERSION:
self.logger.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION)
raise IndexVersionMismatch(index_version, self.VERSION)
await self.sync_client.indices.refresh(self.index)
return acked
def stop(self):
@ -103,15 +102,28 @@ class SearchIndex:
async def _consume_claim_producer(self, claim_producer):
count = 0
for op, doc in claim_producer:
async for op, doc in claim_producer:
if op == 'delete':
yield {'_index': self.index, '_op_type': 'delete', '_id': doc}
yield {
'_index': self.index,
'_op_type': 'delete',
'_id': doc
}
else:
yield extract_doc(doc, self.index)
yield {
'doc': {key: value for key, value in doc.items() if key in ALL_FIELDS},
'_id': doc['claim_id'],
'_index': self.index,
'_op_type': 'update',
'doc_as_upsert': True
}
count += 1
if count % 100 == 0:
self.logger.info("Indexing in progress, %d claims.", count)
self.logger.info("Indexing done for %d claims.", count)
if count:
self.logger.info("Indexing done for %d claims.", count)
else:
self.logger.debug("Indexing done for %d claims.", count)
async def claim_consumer(self, claim_producer):
touched = set()
@ -123,22 +135,170 @@ class SearchIndex:
item = item.popitem()[1]
touched.add(item['_id'])
await self.sync_client.indices.refresh(self.index)
self.logger.info("Indexing done.")
self.logger.debug("Indexing done.")
def update_filter_query(self, censor_type, blockdict, channels=False):
blockdict = {key[::-1].hex(): value[::-1].hex() for key, value in blockdict.items()}
blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()}
if channels:
update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
else:
update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
key = 'channel_id' if channels else 'claim_id'
update['script'] = {
"source": f"ctx._source.censor_type={censor_type}; ctx._source.censoring_channel_id=params[ctx._source.{key}]",
"source": f"ctx._source.censor_type={censor_type}; "
f"ctx._source.censoring_channel_id=params[ctx._source.{key}];",
"lang": "painless",
"params": blockdict
}
return update
async def update_trending_score(self, params):
update_trending_score_script = """
double softenLBC(double lbc) { return (Math.pow(lbc, 1.0 / 3.0)); }
double logsumexp(double x, double y)
{
double top;
if(x > y)
top = x;
else
top = y;
double result = top + Math.log(Math.exp(x-top) + Math.exp(y-top));
return(result);
}
double logdiffexp(double big, double small)
{
return big + Math.log(1.0 - Math.exp(small - big));
}
double squash(double x)
{
if(x < 0.0)
return -Math.log(1.0 - x);
else
return Math.log(x + 1.0);
}
double unsquash(double x)
{
if(x < 0.0)
return 1.0 - Math.exp(-x);
else
return Math.exp(x) - 1.0;
}
double log_to_squash(double x)
{
return logsumexp(x, 0.0);
}
double squash_to_log(double x)
{
//assert x > 0.0;
return logdiffexp(x, 0.0);
}
double squashed_add(double x, double y)
{
// squash(unsquash(x) + unsquash(y)) but avoiding overflow.
// Cases where the signs are the same
if (x < 0.0 && y < 0.0)
return -logsumexp(-x, logdiffexp(-y, 0.0));
if (x >= 0.0 && y >= 0.0)
return logsumexp(x, logdiffexp(y, 0.0));
// Where the signs differ
if (x >= 0.0 && y < 0.0)
if (Math.abs(x) >= Math.abs(y))
return logsumexp(0.0, logdiffexp(x, -y));
else
return -logsumexp(0.0, logdiffexp(-y, x));
if (x < 0.0 && y >= 0.0)
{
// Addition is commutative, hooray for new math
return squashed_add(y, x);
}
return 0.0;
}
double squashed_multiply(double x, double y)
{
// squash(unsquash(x)*unsquash(y)) but avoiding overflow.
int sign;
if(x*y >= 0.0)
sign = 1;
else
sign = -1;
return sign*logsumexp(squash_to_log(Math.abs(x))
+ squash_to_log(Math.abs(y)), 0.0);
}
// Squashed inflated units
double inflateUnits(int height) {
double timescale = 576.0; // Half life of 400 = e-folding time of a day
// by coincidence, so may as well go with it
return log_to_squash(height / timescale);
}
double spikePower(double newAmount) {
if (newAmount < 50.0) {
return(0.5);
} else if (newAmount < 85.0) {
return(newAmount / 100.0);
} else {
return(0.85);
}
}
double spikeMass(double oldAmount, double newAmount) {
double softenedChange = softenLBC(Math.abs(newAmount - oldAmount));
double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount));
double power = spikePower(newAmount);
if (oldAmount > newAmount) {
-1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
} else {
Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
}
}
for (i in params.src.changes) {
double units = inflateUnits(i.height);
if (ctx._source.trending_score == null) {
ctx._source.trending_score = 0.0;
}
double bigSpike = squashed_multiply(units, squash(spikeMass(i.prev_amount, i.new_amount)));
ctx._source.trending_score = squashed_add(ctx._source.trending_score, bigSpike);
}
"""
start = time.perf_counter()
def producer():
for claim_id, claim_updates in params.items():
yield {
'_id': claim_id,
'_index': self.index,
'_op_type': 'update',
'script': {
'lang': 'painless',
'source': update_trending_score_script,
'params': {'src': {
'changes': [
{
'height': p.height,
'prev_amount': p.prev_amount / 1E8,
'new_amount': p.new_amount / 1E8,
} for p in claim_updates
]
}}
},
}
if not params:
return
async for ok, item in async_streaming_bulk(self.sync_client, producer(), raise_on_error=False):
if not ok:
self.logger.warning("updating trending failed for an item: %s", item)
await self.sync_client.indices.refresh(self.index)
self.logger.info("updated trending scores in %ims", int((time.perf_counter() - start) * 1000))
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
if filtered_streams:
await self.sync_client.update_by_query(
@ -166,56 +326,84 @@ class SearchIndex:
def clear_caches(self):
self.search_cache.clear()
self.short_id_cache.clear()
self.claim_cache.clear()
self.resolution_cache.clear()
async def session_query(self, query_name, kwargs):
offset, total = kwargs.get('offset', 0) if isinstance(kwargs, dict) else 0, 0
async def cached_search(self, kwargs):
total_referenced = []
if query_name == 'resolve':
total_referenced, response, censor = await self.resolve(*kwargs)
else:
cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache)
if cache_item.result is not None:
cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache)
if cache_item.result is not None:
return cache_item.result
async with cache_item.lock:
if cache_item.result:
return cache_item.result
async with cache_item.lock:
if cache_item.result:
return cache_item.result
censor = Censor(Censor.SEARCH)
if kwargs.get('no_totals'):
response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
else:
response, offset, total = await self.search(**kwargs)
censor.apply(response)
censor = Censor(Censor.SEARCH)
if kwargs.get('no_totals'):
response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
else:
response, offset, total = await self.search(**kwargs)
censor.apply(response)
total_referenced.extend(response)
if censor.censored:
response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
total_referenced.extend(response)
if censor.censored:
response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
total_referenced.extend(response)
result = Outputs.to_base64(
response, await self._get_referenced_rows(total_referenced), offset, total, censor
)
cache_item.result = result
return result
return Outputs.to_base64(response, await self._get_referenced_rows(total_referenced), offset, total, censor)
async def resolve(self, *urls):
censor = Censor(Censor.RESOLVE)
results = [await self.resolve_url(url) for url in urls]
# just heat the cache
await self.populate_claim_cache(*filter(lambda x: isinstance(x, str), results))
results = [self._get_from_cache_or_error(url, result) for url, result in zip(urls, results)]
censored = [
result if not isinstance(result, dict) or not censor.censor(result)
else ResolveCensoredError(url, result['censoring_channel_id'])
for url, result in zip(urls, results)
]
return results, censored, censor
def _get_from_cache_or_error(self, url: str, resolution: Union[LookupError, StreamResolution, ChannelResolution]):
cached = self.claim_cache.get(resolution)
return cached or (resolution if isinstance(resolution, LookupError) else resolution.lookup_error(url))
response = [
ResolveResult(
name=r['claim_name'],
normalized_name=r['normalized_name'],
claim_hash=r['claim_hash'],
tx_num=r['tx_num'],
position=r['tx_nout'],
tx_hash=r['tx_hash'],
height=r['height'],
amount=r['amount'],
short_url=r['short_url'],
is_controlling=r['is_controlling'],
canonical_url=r['canonical_url'],
creation_height=r['creation_height'],
activation_height=r['activation_height'],
expiration_height=r['expiration_height'],
effective_amount=r['effective_amount'],
support_amount=r['support_amount'],
last_takeover_height=r['last_take_over_height'],
claims_in_channel=r['claims_in_channel'],
channel_hash=r['channel_hash'],
reposted_claim_hash=r['reposted_claim_hash'],
reposted=r['reposted'],
signature_valid=r['signature_valid']
) for r in response
]
extra = [
ResolveResult(
name=r['claim_name'],
normalized_name=r['normalized_name'],
claim_hash=r['claim_hash'],
tx_num=r['tx_num'],
position=r['tx_nout'],
tx_hash=r['tx_hash'],
height=r['height'],
amount=r['amount'],
short_url=r['short_url'],
is_controlling=r['is_controlling'],
canonical_url=r['canonical_url'],
creation_height=r['creation_height'],
activation_height=r['activation_height'],
expiration_height=r['expiration_height'],
effective_amount=r['effective_amount'],
support_amount=r['support_amount'],
last_takeover_height=r['last_take_over_height'],
claims_in_channel=r['claims_in_channel'],
channel_hash=r['channel_hash'],
reposted_claim_hash=r['reposted_claim_hash'],
reposted=r['reposted'],
signature_valid=r['signature_valid']
) for r in await self._get_referenced_rows(total_referenced)
]
result = Outputs.to_base64(
response, extra, offset, total, censor
)
cache_item.result = result
return result
async def get_many(self, *claim_ids):
await self.populate_claim_cache(*claim_ids)
@ -230,32 +418,13 @@ class SearchIndex:
for result in expand_result(filter(lambda doc: doc['found'], results["docs"])):
self.claim_cache.set(result['claim_id'], result)
async def full_id_from_short_id(self, name, short_id, channel_id=None):
key = '#'.join((channel_id or '', name, short_id))
if key not in self.short_id_cache:
query = {'name': name, 'claim_id': short_id}
if channel_id:
query['channel_id'] = channel_id
query['order_by'] = ['^channel_join']
query['signature_valid'] = True
else:
query['order_by'] = '^creation_height'
result, _, _ = await self.search(**query, limit=1)
if len(result) == 1:
result = result[0]['claim_id']
self.short_id_cache[key] = result
return self.short_id_cache.get(key, None)
async def search(self, **kwargs):
if 'channel' in kwargs:
kwargs['channel_id'] = await self.resolve_url(kwargs.pop('channel'))
if not kwargs['channel_id'] or not isinstance(kwargs['channel_id'], str):
return [], 0, 0
try:
return await self.search_ahead(**kwargs)
except NotFoundError:
return [], 0, 0
return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0)
# return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0)
async def search_ahead(self, **kwargs):
# 'limit_claims_per_channel' case. Fetch 1000 results, reorder, slice, inflate and return
@ -335,78 +504,6 @@ class SearchIndex:
next_page_hits_maybe_check_later.append((hit_id, hit_channel_id))
return reordered_hits
async def resolve_url(self, raw_url):
if raw_url not in self.resolution_cache:
self.resolution_cache[raw_url] = await self._resolve_url(raw_url)
return self.resolution_cache[raw_url]
async def _resolve_url(self, raw_url):
try:
url = URL.parse(raw_url)
except ValueError as e:
return e
stream = LookupError(f'Could not find claim at "{raw_url}".')
channel_id = await self.resolve_channel_id(url)
if isinstance(channel_id, LookupError):
return channel_id
stream = (await self.resolve_stream(url, channel_id if isinstance(channel_id, str) else None)) or stream
if url.has_stream:
return StreamResolution(stream)
else:
return ChannelResolution(channel_id)
async def resolve_channel_id(self, url: URL):
if not url.has_channel:
return
if url.channel.is_fullid:
return url.channel.claim_id
if url.channel.is_shortid:
channel_id = await self.full_id_from_short_id(url.channel.name, url.channel.claim_id)
if not channel_id:
return LookupError(f'Could not find channel in "{url}".')
return channel_id
query = url.channel.to_dict()
if set(query) == {'name'}:
query['is_controlling'] = True
else:
query['order_by'] = ['^creation_height']
matches, _, _ = await self.search(**query, limit=1)
if matches:
channel_id = matches[0]['claim_id']
else:
return LookupError(f'Could not find channel in "{url}".')
return channel_id
async def resolve_stream(self, url: URL, channel_id: str = None):
if not url.has_stream:
return None
if url.has_channel and channel_id is None:
return None
query = url.stream.to_dict()
if url.stream.claim_id is not None:
if url.stream.is_fullid:
claim_id = url.stream.claim_id
else:
claim_id = await self.full_id_from_short_id(query['name'], query['claim_id'], channel_id)
return claim_id
if channel_id is not None:
if set(query) == {'name'}:
# temporarily emulate is_controlling for claims in channel
query['order_by'] = ['effective_amount', '^height']
else:
query['order_by'] = ['^channel_join']
query['channel_id'] = channel_id
query['signature_valid'] = True
elif set(query) == {'name'}:
query['is_controlling'] = True
matches, _, _ = await self.search(**query, limit=1)
if matches:
return matches[0]['claim_id']
async def _get_referenced_rows(self, txo_rows: List[dict]):
txo_rows = [row for row in txo_rows if isinstance(row, dict)]
referenced_ids = set(filter(None, map(itemgetter('reposted_claim_id'), txo_rows)))
@ -424,33 +521,6 @@ class SearchIndex:
return referenced_txos
def extract_doc(doc, index):
doc['claim_id'] = doc.pop('claim_hash')[::-1].hex()
if doc['reposted_claim_hash'] is not None:
doc['reposted_claim_id'] = doc.pop('reposted_claim_hash')[::-1].hex()
else:
doc['reposted_claim_id'] = None
channel_hash = doc.pop('channel_hash')
doc['channel_id'] = channel_hash[::-1].hex() if channel_hash else channel_hash
doc['censoring_channel_id'] = doc.get('censoring_channel_id')
txo_hash = doc.pop('txo_hash')
doc['tx_id'] = txo_hash[:32][::-1].hex()
doc['tx_nout'] = struct.unpack('<I', txo_hash[32:])[0]
doc['repost_count'] = doc.pop('reposted')
doc['is_controlling'] = bool(doc['is_controlling'])
doc['signature'] = (doc.pop('signature') or b'').hex() or None
doc['signature_digest'] = (doc.pop('signature_digest') or b'').hex() or None
doc['public_key_bytes'] = (doc.pop('public_key_bytes') or b'').hex() or None
doc['public_key_id'] = (doc.pop('public_key_hash') or b'').hex() or None
doc['is_signature_valid'] = bool(doc['signature_valid'])
doc['claim_type'] = doc.get('claim_type', 0) or 0
doc['stream_type'] = int(doc.get('stream_type', 0) or 0)
doc['has_source'] = bool(doc['has_source'])
doc['normalized_name'] = doc.pop('normalized')
doc = {key: value for key, value in doc.items() if key in ALL_FIELDS}
return {'doc': doc, '_id': doc['claim_id'], '_index': index, '_op_type': 'update', 'doc_as_upsert': True}
def expand_query(**kwargs):
if "amount_order" in kwargs:
kwargs["limit"] = 1
@ -462,6 +532,8 @@ def expand_query(**kwargs):
kwargs.pop('is_controlling')
query = {'must': [], 'must_not': []}
collapse = None
if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None:
kwargs['fee_currency'] = kwargs['fee_currency'].upper()
for key, value in kwargs.items():
key = key.replace('claim.', '')
many = key.endswith('__in') or isinstance(value, list)
@ -481,29 +553,36 @@ def expand_query(**kwargs):
else:
value = [CLAIM_TYPES[claim_type] for claim_type in value]
elif key == 'stream_type':
value = STREAM_TYPES[value] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
if key == '_id':
if isinstance(value, Iterable):
value = [item[::-1].hex() for item in value]
else:
value = value[::-1].hex()
if not many and key in ('_id', 'claim_id') and len(value) < 20:
if not many and key in ('_id', 'claim_id', 'sd_hash') and len(value) < 20:
partial_id = True
if key == 'public_key_id':
value = Base58.decode(value)[1:21].hex()
if key in ('signature_valid', 'has_source'):
continue # handled later
if key in TEXT_FIELDS:
key += '.keyword'
ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'}
if partial_id:
query['must'].append({"prefix": {"claim_id": value}})
query['must'].append({"prefix": {key: value}})
elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops:
operator_length = 2 if value[:2] in ops else 1
operator, value = value[:operator_length], value[operator_length:]
if key == 'fee_amount':
value = str(Decimal(value)*1000)
query['must'].append({"range": {key: {ops[operator]: value}}})
elif key in RANGE_FIELDS and isinstance(value, list) and all(v[0] in ops for v in value):
range_constraints = []
for v in value:
operator_length = 2 if v[:2] in ops else 1
operator, stripped_op_v = v[:operator_length], v[operator_length:]
if key == 'fee_amount':
stripped_op_v = str(Decimal(stripped_op_v)*1000)
range_constraints.append((operator, stripped_op_v))
query['must'].append({"range": {key: {ops[operator]: v for operator, v in range_constraints}}})
elif many:
query['must'].append({"terms": {key: value}})
else:
@ -537,13 +616,13 @@ def expand_query(**kwargs):
elif key == 'limit_claims_per_channel':
collapse = ('channel_id.keyword', value)
if kwargs.get('has_channel_signature'):
query['must'].append({"exists": {"field": "signature_digest"}})
query['must'].append({"exists": {"field": "signature"}})
if 'signature_valid' in kwargs:
query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
elif 'signature_valid' in kwargs:
query.setdefault('should', [])
query["minimum_should_match"] = 1
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature_digest"}}}})
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}})
query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
if 'has_source' in kwargs:
query.setdefault('should', [])
@ -612,7 +691,9 @@ def expand_result(results):
result['tx_hash'] = unhexlify(result['tx_id'])[::-1]
result['reposted'] = result.pop('repost_count')
result['signature_valid'] = result.pop('is_signature_valid')
result['normalized'] = result.pop('normalized_name')
# result['normalized'] = result.pop('normalized_name')
# if result['censoring_channel_hash']:
# result['censoring_channel_hash'] = unhexlify(result['censoring_channel_hash'])[::-1]
expanded.append(result)
if inner_hits:
return expand_result(inner_hits)

View file

@ -1,100 +1,121 @@
import os
import argparse
import asyncio
import logging
import os
from collections import namedtuple
from multiprocessing import Process
import sqlite3
from elasticsearch import AsyncElasticsearch
from elasticsearch.helpers import async_bulk
from elasticsearch.helpers import async_streaming_bulk
from lbry.wallet.server.env import Env
from lbry.wallet.server.coin import LBC
from lbry.wallet.server.db.elasticsearch.search import extract_doc, SearchIndex, IndexVersionMismatch
from lbry.wallet.server.leveldb import LevelDB
from lbry.wallet.server.db.elasticsearch.search import SearchIndex, IndexVersionMismatch
from lbry.wallet.server.db.elasticsearch.constants import ALL_FIELDS
async def get_all(db, shard_num, shards_total, limit=0, index_name='claims'):
logging.info("shard %d starting", shard_num)
def namedtuple_factory(cursor, row):
Row = namedtuple('Row', (d[0] for d in cursor.description))
return Row(*row)
db.row_factory = namedtuple_factory
total = db.execute(f"select count(*) as total from claim where height % {shards_total} = {shard_num};").fetchone()[0]
for num, claim in enumerate(db.execute(f"""
SELECT claimtrie.claim_hash as is_controlling,
claimtrie.last_take_over_height,
(select group_concat(tag, ',,') from tag where tag.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as tags,
(select group_concat(language, ' ') from language where language.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as languages,
cr.has_source as reposted_has_source,
cr.claim_type as reposted_claim_type,
cr.stream_type as reposted_stream_type,
cr.media_type as reposted_media_type,
cr.duration as reposted_duration,
cr.fee_amount as reposted_fee_amount,
cr.fee_currency as reposted_fee_currency,
claim.*
FROM claim LEFT JOIN claimtrie USING (claim_hash) LEFT JOIN claim cr ON cr.claim_hash=claim.reposted_claim_hash
WHERE claim.height % {shards_total} = {shard_num}
ORDER BY claim.height desc
""")):
claim = dict(claim._asdict())
claim['has_source'] = bool(claim.pop('reposted_has_source') or claim['has_source'])
claim['stream_type'] = claim.pop('reposted_stream_type') or claim['stream_type']
claim['media_type'] = claim.pop('reposted_media_type') or claim['media_type']
claim['fee_amount'] = claim.pop('reposted_fee_amount') or claim['fee_amount']
claim['fee_currency'] = claim.pop('reposted_fee_currency') or claim['fee_currency']
claim['duration'] = claim.pop('reposted_duration') or claim['duration']
claim['censor_type'] = 0
claim['censoring_channel_id'] = None
claim['tags'] = claim['tags'].split(',,') if claim['tags'] else []
claim['languages'] = claim['languages'].split(' ') if claim['languages'] else []
if num % 10_000 == 0:
logging.info("%d/%d", num, total)
yield extract_doc(claim, index_name)
if 0 < limit <= num:
break
async def consume(producer, index_name):
env = Env(LBC)
logging.info("ES sync host: %s:%i", env.elastic_host, env.elastic_port)
es = AsyncElasticsearch([{'host': env.elastic_host, 'port': env.elastic_port}])
async def get_recent_claims(env, index_name='claims', db=None):
log = logging.getLogger()
need_open = db is None
db = db or LevelDB(env)
try:
await async_bulk(es, producer, request_timeout=120)
await es.indices.refresh(index=index_name)
if need_open:
db.open_db()
if db.es_sync_height == db.db_height or db.db_height <= 0:
return
if need_open:
await db.initialize_caches()
log.info(f"catching up ES ({db.es_sync_height}) to leveldb height: {db.db_height}")
cnt = 0
touched_claims = set()
deleted_claims = set()
for height in range(db.es_sync_height, db.db_height + 1):
touched_or_deleted = db.prefix_db.touched_or_deleted.get(height)
touched_claims.update(touched_or_deleted.touched_claims)
deleted_claims.update(touched_or_deleted.deleted_claims)
touched_claims.difference_update(deleted_claims)
for deleted in deleted_claims:
yield {
'_index': index_name,
'_op_type': 'delete',
'_id': deleted.hex()
}
for touched in touched_claims:
claim = db.claim_producer(touched)
if claim:
yield {
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
'_id': claim['claim_id'],
'_index': index_name,
'_op_type': 'update',
'doc_as_upsert': True
}
cnt += 1
else:
logging.warning("could not sync claim %s", touched.hex())
if cnt % 10000 == 0:
logging.info("%i claims sent to ES", cnt)
db.es_sync_height = db.db_height
db.write_db_state()
db.prefix_db.unsafe_commit()
db.assert_db_state()
logging.info("finished sending %i claims to ES, deleted %i", cnt, len(deleted_claims))
finally:
await es.close()
if need_open:
db.close()
async def make_es_index(index=None):
env = Env(LBC)
if index is None:
index = SearchIndex('', elastic_host=env.elastic_host, elastic_port=env.elastic_port)
async def get_all_claims(env, index_name='claims', db=None):
need_open = db is None
db = db or LevelDB(env)
if need_open:
db.open_db()
await db.initialize_caches()
logging.info("Fetching claims to send ES from leveldb")
try:
return await index.start()
cnt = 0
async for claim in db.all_claims_producer():
yield {
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
'_id': claim['claim_id'],
'_index': index_name,
'_op_type': 'update',
'doc_as_upsert': True
}
cnt += 1
if cnt % 10000 == 0:
logging.info("sent %i claims to ES", cnt)
finally:
if need_open:
db.close()
async def make_es_index_and_run_sync(env: Env, clients=32, force=False, db=None, index_name='claims'):
index = SearchIndex(env.es_index_prefix, elastic_host=env.elastic_host, elastic_port=env.elastic_port)
logging.info("ES sync host: %s:%i", env.elastic_host, env.elastic_port)
try:
created = await index.start()
except IndexVersionMismatch as err:
logging.info(
"dropping ES search index (version %s) for upgrade to version %s", err.got_version, err.expected_version
)
await index.delete_index()
await index.stop()
return await index.start()
created = await index.start()
finally:
index.stop()
async def run(db_path, clients, blocks, shard, index_name='claims'):
db = sqlite3.connect(db_path, isolation_level=None, check_same_thread=False, uri=True)
db.execute('pragma journal_mode=wal;')
db.execute('pragma temp_store=memory;')
producer = get_all(db, shard, clients, limit=blocks, index_name=index_name)
await asyncio.gather(*(consume(producer, index_name=index_name) for _ in range(min(8, clients))))
def __run(args, shard):
asyncio.run(run(args.db_path, args.clients, args.blocks, shard))
es = AsyncElasticsearch([{'host': env.elastic_host, 'port': env.elastic_port}])
if force or created:
claim_generator = get_all_claims(env, index_name=index_name, db=db)
else:
claim_generator = get_recent_claims(env, index_name=index_name, db=db)
try:
async for ok, item in async_streaming_bulk(es, claim_generator, request_timeout=600, raise_on_error=False):
if not ok:
logging.warning("indexing failed for an item: %s", item)
await es.indices.refresh(index=index_name)
finally:
await es.close()
def run_elastic_sync():
@ -104,23 +125,14 @@ def run_elastic_sync():
logging.info('lbry.server starting')
parser = argparse.ArgumentParser(prog="lbry-hub-elastic-sync")
parser.add_argument("db_path", type=str)
parser.add_argument("-c", "--clients", type=int, default=16)
parser.add_argument("-b", "--blocks", type=int, default=0)
parser.add_argument("-c", "--clients", type=int, default=32)
parser.add_argument("-f", "--force", default=False, action='store_true')
Env.contribute_to_arg_parser(parser)
args = parser.parse_args()
processes = []
env = Env.from_arg_parser(args)
if not args.force and not os.path.exists(args.db_path):
logging.info("DB path doesnt exist")
if not os.path.exists(os.path.join(args.db_dir, 'lbry-leveldb')):
logging.info("DB path doesnt exist, nothing to sync to ES")
return
if not args.force and not asyncio.run(make_es_index()):
logging.info("ES is already initialized")
return
for i in range(args.clients):
processes.append(Process(target=__run, args=(args, i)))
processes[-1].start()
for process in processes:
process.join()
process.close()
asyncio.run(make_es_index_and_run_sync(env, clients=args.clients, force=args.force))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,175 @@
import struct
import logging
from string import printable
from collections import defaultdict
from typing import Tuple, Iterable, Callable, Optional
from lbry.wallet.server.db import DB_PREFIXES
_OP_STRUCT = struct.Struct('>BLL')
log = logging.getLogger()
class RevertableOp:
__slots__ = [
'key',
'value',
]
is_put = 0
def __init__(self, key: bytes, value: bytes):
self.key = key
self.value = value
@property
def is_delete(self) -> bool:
return not self.is_put
def invert(self) -> 'RevertableOp':
raise NotImplementedError()
def pack(self) -> bytes:
"""
Serialize to bytes
"""
return struct.pack(
f'>BLL{len(self.key)}s{len(self.value)}s', int(self.is_put), len(self.key), len(self.value), self.key,
self.value
)
@classmethod
def unpack(cls, packed: bytes) -> Tuple['RevertableOp', bytes]:
"""
Deserialize from bytes
:param packed: bytes containing at least one packed revertable op
:return: tuple of the deserialized op (a put or a delete) and the remaining serialized bytes
"""
is_put, key_len, val_len = _OP_STRUCT.unpack(packed[:9])
key = packed[9:9 + key_len]
value = packed[9 + key_len:9 + key_len + val_len]
if is_put == 1:
return RevertablePut(key, value), packed[9 + key_len + val_len:]
return RevertableDelete(key, value), packed[9 + key_len + val_len:]
def __eq__(self, other: 'RevertableOp') -> bool:
return (self.is_put, self.key, self.value) == (other.is_put, other.key, other.value)
def __repr__(self) -> str:
return str(self)
def __str__(self) -> str:
from lbry.wallet.server.db.prefixes import auto_decode_item
k, v = auto_decode_item(self.key, self.value)
key = ''.join(c if c in printable else '.' for c in str(k))
val = ''.join(c if c in printable else '.' for c in str(v))
return f"{'PUT' if self.is_put else 'DELETE'} {DB_PREFIXES(self.key[:1]).name}: {key} | {val}"
class RevertableDelete(RevertableOp):
def invert(self):
return RevertablePut(self.key, self.value)
class RevertablePut(RevertableOp):
is_put = True
def invert(self):
return RevertableDelete(self.key, self.value)
class OpStackIntegrity(Exception):
pass
class RevertableOpStack:
def __init__(self, get_fn: Callable[[bytes], Optional[bytes]], unsafe_prefixes=None):
"""
This represents a sequence of revertable puts and deletes to a key-value database that checks for integrity
violations when applying the puts and deletes. The integrity checks assure that keys that do not exist
are not deleted, and that when keys are deleted the current value is correctly known so that the delete
may be undone. When putting values, the integrity checks assure that existing values are not overwritten
without first being deleted. Updates are performed by applying a delete op for the old value and a put op
for the new value.
:param get_fn: getter function from an object implementing `KeyValueStorage`
:param unsafe_prefixes: optional set of prefixes to ignore integrity errors for, violations are still logged
"""
self._get = get_fn
self._items = defaultdict(list)
self._unsafe_prefixes = unsafe_prefixes or set()
def append_op(self, op: RevertableOp):
"""
Apply a put or delete op, checking that it introduces no integrity errors
"""
inverted = op.invert()
if self._items[op.key] and inverted == self._items[op.key][-1]:
self._items[op.key].pop() # if the new op is the inverse of the last op, we can safely null both
return
elif self._items[op.key] and self._items[op.key][-1] == op: # duplicate of last op
return # raise an error?
stored_val = self._get(op.key)
has_stored_val = stored_val is not None
delete_stored_op = None if not has_stored_val else RevertableDelete(op.key, stored_val)
will_delete_existing_stored = False if delete_stored_op is None else (delete_stored_op in self._items[op.key])
try:
if op.is_put and has_stored_val and not will_delete_existing_stored:
raise OpStackIntegrity(
f"db op tries to add on top of existing key without deleting first: {op}"
)
elif op.is_delete and has_stored_val and stored_val != op.value and not will_delete_existing_stored:
# there is a value and we're not deleting it in this op
# check that a delete for the stored value is in the stack
raise OpStackIntegrity(f"db op tries to delete with incorrect existing value {op}")
elif op.is_delete and not has_stored_val:
raise OpStackIntegrity(f"db op tries to delete nonexistent key: {op}")
elif op.is_delete and stored_val != op.value:
raise OpStackIntegrity(f"db op tries to delete with incorrect value: {op}")
except OpStackIntegrity as err:
if op.key[:1] in self._unsafe_prefixes:
log.debug(f"skipping over integrity error: {err}")
else:
raise err
self._items[op.key].append(op)
def extend_ops(self, ops: Iterable[RevertableOp]):
"""
Apply a sequence of put or delete ops, checking that they introduce no integrity errors
"""
for op in ops:
self.append_op(op)
def clear(self):
self._items.clear()
def __len__(self):
return sum(map(len, self._items.values()))
def __iter__(self):
for key, ops in self._items.items():
for op in ops:
yield op
def __reversed__(self):
for key, ops in self._items.items():
for op in reversed(ops):
yield op
def get_undo_ops(self) -> bytes:
"""
Get the serialized bytes to undo all of the changes made by the pending ops
"""
return b''.join(op.invert().pack() for op in reversed(self))
def apply_packed_undo_ops(self, packed: bytes):
"""
Unpack and apply a sequence of undo ops from serialized undo bytes
"""
while packed:
op, packed = RevertableOp.unpack(packed)
self.append_op(op)
def get_last_op_for_key(self, key: bytes) -> Optional[RevertableOp]:
if key in self._items and self._items[key]:
return self._items[key][-1]

View file

@ -1,9 +0,0 @@
from . import zscore
from . import ar
from . import variable_decay
TRENDING_ALGORITHMS = {
'zscore': zscore,
'ar': ar,
'variable_decay': variable_decay
}

View file

@ -1,265 +0,0 @@
import copy
import math
import time
# Half life in blocks
HALF_LIFE = 134
# Decay coefficient per block
DECAY = 0.5**(1.0/HALF_LIFE)
# How frequently to write trending values to the db
SAVE_INTERVAL = 10
# Renormalisation interval
RENORM_INTERVAL = 1000
# Assertion
assert RENORM_INTERVAL % SAVE_INTERVAL == 0
# Decay coefficient per renormalisation interval
DECAY_PER_RENORM = DECAY**(RENORM_INTERVAL)
# Log trending calculations?
TRENDING_LOG = True
def install(connection):
"""
Install the AR trending algorithm.
"""
check_trending_values(connection)
if TRENDING_LOG:
f = open("trending_ar.log", "a")
f.close()
# Stub
CREATE_TREND_TABLE = ""
def check_trending_values(connection):
"""
If the trending values appear to be based on the zscore algorithm,
reset them. This will allow resyncing from a standard snapshot.
"""
c = connection.cursor()
needs_reset = False
for row in c.execute("SELECT COUNT(*) num FROM claim WHERE trending_global <> 0;"):
if row[0] != 0:
needs_reset = True
break
if needs_reset:
print("Resetting some columns. This might take a while...", flush=True, end="")
c.execute(""" BEGIN;
UPDATE claim SET trending_group = 0;
UPDATE claim SET trending_mixed = 0;
UPDATE claim SET trending_global = 0;
UPDATE claim SET trending_local = 0;
COMMIT;""")
print("done.")
def spike_height(trending_score, x, x_old, time_boost=1.0):
"""
Compute the size of a trending spike.
"""
# Change in softened amount
change_in_softened_amount = x**0.25 - x_old**0.25
# Softened change in amount
delta = x - x_old
softened_change_in_amount = abs(delta)**0.25
# Softened change in amount counts more for minnows
if delta > 0.0:
if trending_score >= 0.0:
multiplier = 0.1/((trending_score/time_boost + softened_change_in_amount) + 1.0)
softened_change_in_amount *= multiplier
else:
softened_change_in_amount *= -1.0
return time_boost*(softened_change_in_amount + change_in_softened_amount)
def get_time_boost(height):
"""
Return the time boost at a given height.
"""
return 1.0/DECAY**(height % RENORM_INTERVAL)
def trending_log(s):
"""
Log a string.
"""
if TRENDING_LOG:
fout = open("trending_ar.log", "a")
fout.write(s)
fout.flush()
fout.close()
class TrendingData:
"""
An object of this class holds trending data
"""
def __init__(self):
self.claims = {}
# Have all claims been read from db yet?
self.initialised = False
def insert_claim_from_load(self, claim_hash, trending_score, total_amount):
assert not self.initialised
self.claims[claim_hash] = {"trending_score": trending_score,
"total_amount": total_amount,
"changed": False}
def update_claim(self, claim_hash, total_amount, time_boost=1.0):
"""
Update trending data for a claim, given its new total amount.
"""
assert self.initialised
# Extract existing total amount and trending score
# or use starting values if the claim is new
if claim_hash in self.claims:
old_state = copy.deepcopy(self.claims[claim_hash])
else:
old_state = {"trending_score": 0.0,
"total_amount": 0.0,
"changed": False}
# Calculate LBC change
change = total_amount - old_state["total_amount"]
# Modify data if there was an LBC change
if change != 0.0:
spike = spike_height(old_state["trending_score"],
total_amount,
old_state["total_amount"],
time_boost)
trending_score = old_state["trending_score"] + spike
self.claims[claim_hash] = {"total_amount": total_amount,
"trending_score": trending_score,
"changed": True}
def test_trending():
"""
Quick trending test for something receiving 10 LBC per block
"""
data = TrendingData()
data.insert_claim_from_load("abc", 10.0, 1.0)
data.initialised = True
for height in range(1, 5000):
if height % RENORM_INTERVAL == 0:
data.claims["abc"]["trending_score"] *= DECAY_PER_RENORM
time_boost = get_time_boost(height)
data.update_claim("abc", data.claims["abc"]["total_amount"] + 10.0,
time_boost=time_boost)
print(str(height) + " " + str(time_boost) + " " \
+ str(data.claims["abc"]["trending_score"]))
# One global instance
# pylint: disable=C0103
trending_data = TrendingData()
def run(db, height, final_height, recalculate_claim_hashes):
if height < final_height - 5*HALF_LIFE:
trending_log("Skipping AR trending at block {h}.\n".format(h=height))
return
start = time.time()
trending_log("Calculating AR trending at block {h}.\n".format(h=height))
trending_log(" Length of trending data = {l}.\n"\
.format(l=len(trending_data.claims)))
# Renormalise trending scores and mark all as having changed
if height % RENORM_INTERVAL == 0:
trending_log(" Renormalising trending scores...")
keys = trending_data.claims.keys()
for key in keys:
if trending_data.claims[key]["trending_score"] != 0.0:
trending_data.claims[key]["trending_score"] *= DECAY_PER_RENORM
trending_data.claims[key]["changed"] = True
# Tiny becomes zero
if abs(trending_data.claims[key]["trending_score"]) < 1E-9:
trending_data.claims[key]["trending_score"] = 0.0
trending_log("done.\n")
# Regular message.
trending_log(" Reading total_amounts from db and updating"\
+ " trending scores in RAM...")
# Get the value of the time boost
time_boost = get_time_boost(height)
# Update claims from db
if not trending_data.initialised:
# On fresh launch
for row in db.execute("""
SELECT claim_hash, trending_mixed,
(amount + support_amount)
AS total_amount
FROM claim;
"""):
trending_data.insert_claim_from_load(row[0], row[1], 1E-8*row[2])
trending_data.initialised = True
else:
for row in db.execute(f"""
SELECT claim_hash,
(amount + support_amount)
AS total_amount
FROM claim
WHERE claim_hash IN
({','.join('?' for _ in recalculate_claim_hashes)});
""", list(recalculate_claim_hashes)):
trending_data.update_claim(row[0], 1E-8*row[1], time_boost)
trending_log("done.\n")
# Write trending scores to DB
if height % SAVE_INTERVAL == 0:
trending_log(" Writing trending scores to db...")
the_list = []
keys = trending_data.claims.keys()
for key in keys:
if trending_data.claims[key]["changed"]:
the_list.append((trending_data.claims[key]["trending_score"],
key))
trending_data.claims[key]["changed"] = False
trending_log("{n} scores to write...".format(n=len(the_list)))
db.executemany("UPDATE claim SET trending_mixed=? WHERE claim_hash=?;",
the_list)
trending_log("done.\n")
trending_log("Trending operations took {time} seconds.\n\n"\
.format(time=time.time() - start))
if __name__ == "__main__":
test_trending()

View file

@ -1,485 +0,0 @@
"""
AR-like trending with a delayed effect and a faster
decay rate for high valued claims.
"""
import math
import time
import sqlite3
# Half life in blocks *for lower LBC claims* (it's shorter for whale claims)
HALF_LIFE = 200
# Whale threshold, in LBC (higher -> less DB writing)
WHALE_THRESHOLD = 10000.0
# Decay coefficient per block
DECAY = 0.5**(1.0/HALF_LIFE)
# How frequently to write trending values to the db
SAVE_INTERVAL = 10
# Renormalisation interval
RENORM_INTERVAL = 1000
# Assertion
assert RENORM_INTERVAL % SAVE_INTERVAL == 0
# Decay coefficient per renormalisation interval
DECAY_PER_RENORM = DECAY**(RENORM_INTERVAL)
# Log trending calculations?
TRENDING_LOG = True
def install(connection):
"""
Install the trending algorithm.
"""
check_trending_values(connection)
trending_data.initialise(connection.cursor())
if TRENDING_LOG:
f = open("trending_variable_decay.log", "a")
f.close()
# Stub
CREATE_TREND_TABLE = ""
def check_trending_values(connection):
"""
If the trending values appear to be based on the zscore algorithm,
reset them. This will allow resyncing from a standard snapshot.
"""
c = connection.cursor()
needs_reset = False
for row in c.execute("SELECT COUNT(*) num FROM claim WHERE trending_global <> 0;"):
if row[0] != 0:
needs_reset = True
break
if needs_reset:
print("Resetting some columns. This might take a while...", flush=True,
end="")
c.execute(""" BEGIN;
UPDATE claim SET trending_group = 0;
UPDATE claim SET trending_mixed = 0;
COMMIT;""")
print("done.")
def trending_log(s):
"""
Log a string to the log file
"""
if TRENDING_LOG:
fout = open("trending_variable_decay.log", "a")
fout.write(s)
fout.flush()
fout.close()
def trending_unit(height):
"""
Return the trending score unit at a given height.
"""
# Round to the beginning of a SAVE_INTERVAL batch of blocks.
_height = height - (height % SAVE_INTERVAL)
return 1.0/DECAY**(height % RENORM_INTERVAL)
class TrendingDB:
"""
An in-memory database of trending scores
"""
def __init__(self):
self.conn = sqlite3.connect(":memory:", check_same_thread=False)
self.cursor = self.conn.cursor()
self.initialised = False
self.write_needed = set()
def execute(self, query, *args, **kwargs):
return self.conn.execute(query, *args, **kwargs)
def executemany(self, query, *args, **kwargs):
return self.conn.executemany(query, *args, **kwargs)
def begin(self):
self.execute("BEGIN;")
def commit(self):
self.execute("COMMIT;")
def initialise(self, db):
"""
Pass in claims.db
"""
if self.initialised:
return
trending_log("Initialising trending database...")
# The need for speed
self.execute("PRAGMA JOURNAL_MODE=OFF;")
self.execute("PRAGMA SYNCHRONOUS=0;")
self.begin()
# Create the tables
self.execute("""
CREATE TABLE IF NOT EXISTS claims
(claim_hash BYTES PRIMARY KEY,
lbc REAL NOT NULL DEFAULT 0.0,
trending_score REAL NOT NULL DEFAULT 0.0)
WITHOUT ROWID;""")
self.execute("""
CREATE TABLE IF NOT EXISTS spikes
(id INTEGER PRIMARY KEY,
claim_hash BYTES NOT NULL,
height INTEGER NOT NULL,
mass REAL NOT NULL,
FOREIGN KEY (claim_hash)
REFERENCES claims (claim_hash));""")
# Clear out any existing data
self.execute("DELETE FROM claims;")
self.execute("DELETE FROM spikes;")
# Create indexes
self.execute("CREATE INDEX idx1 ON spikes (claim_hash, height, mass);")
self.execute("CREATE INDEX idx2 ON spikes (claim_hash, height, mass DESC);")
self.execute("CREATE INDEX idx3 on claims (lbc DESC, claim_hash, trending_score);")
# Import data from claims.db
for row in db.execute("""
SELECT claim_hash,
1E-8*(amount + support_amount) AS lbc,
trending_mixed
FROM claim;
"""):
self.execute("INSERT INTO claims VALUES (?, ?, ?);", row)
self.commit()
self.initialised = True
trending_log("done.\n")
def apply_spikes(self, height):
"""
Apply spikes that are due. This occurs inside a transaction.
"""
spikes = []
unit = trending_unit(height)
for row in self.execute("""
SELECT SUM(mass), claim_hash FROM spikes
WHERE height = ?
GROUP BY claim_hash;
""", (height, )):
spikes.append((row[0]*unit, row[1]))
self.write_needed.add(row[1])
self.executemany("""
UPDATE claims
SET trending_score = (trending_score + ?)
WHERE claim_hash = ?;
""", spikes)
self.execute("DELETE FROM spikes WHERE height = ?;", (height, ))
def decay_whales(self, height):
"""
Occurs inside transaction.
"""
if height % SAVE_INTERVAL != 0:
return
whales = self.execute("""
SELECT trending_score, lbc, claim_hash
FROM claims
WHERE lbc >= ?;
""", (WHALE_THRESHOLD, )).fetchall()
whales2 = []
for whale in whales:
trending, lbc, claim_hash = whale
# Overall multiplication factor for decay rate
# At WHALE_THRESHOLD, this is 1
# At 10*WHALE_THRESHOLD, it is 3
decay_rate_factor = 1.0 + 2.0*math.log10(lbc/WHALE_THRESHOLD)
# The -1 is because this is just the *extra* part being applied
factor = (DECAY**SAVE_INTERVAL)**(decay_rate_factor - 1.0)
# Decay
trending *= factor
whales2.append((trending, claim_hash))
self.write_needed.add(claim_hash)
self.executemany("UPDATE claims SET trending_score=? WHERE claim_hash=?;",
whales2)
def renorm(self, height):
"""
Renormalise trending scores. Occurs inside a transaction.
"""
if height % RENORM_INTERVAL == 0:
threshold = 1.0E-3/DECAY_PER_RENORM
for row in self.execute("""SELECT claim_hash FROM claims
WHERE ABS(trending_score) >= ?;""",
(threshold, )):
self.write_needed.add(row[0])
self.execute("""UPDATE claims SET trending_score = ?*trending_score
WHERE ABS(trending_score) >= ?;""",
(DECAY_PER_RENORM, threshold))
def write_to_claims_db(self, db, height):
"""
Write changed trending scores to claims.db.
"""
if height % SAVE_INTERVAL != 0:
return
rows = self.execute(f"""
SELECT trending_score, claim_hash
FROM claims
WHERE claim_hash IN
({','.join('?' for _ in self.write_needed)});
""", list(self.write_needed)).fetchall()
db.executemany("""UPDATE claim SET trending_mixed = ?
WHERE claim_hash = ?;""", rows)
# Clear list of claims needing to be written to claims.db
self.write_needed = set()
def update(self, db, height, recalculate_claim_hashes):
"""
Update trending scores.
Input is a cursor to claims.db, the block height, and the list of
claims that changed.
"""
assert self.initialised
self.begin()
self.renorm(height)
# Fetch changed/new claims from claims.db
for row in db.execute(f"""
SELECT claim_hash,
1E-8*(amount + support_amount) AS lbc
FROM claim
WHERE claim_hash IN
({','.join('?' for _ in recalculate_claim_hashes)});
""", list(recalculate_claim_hashes)):
claim_hash, lbc = row
# Insert into trending db if it does not exist
self.execute("""
INSERT INTO claims (claim_hash)
VALUES (?)
ON CONFLICT (claim_hash) DO NOTHING;""",
(claim_hash, ))
# See if it was an LBC change
old = self.execute("SELECT * FROM claims WHERE claim_hash=?;",
(claim_hash, )).fetchone()
lbc_old = old[1]
# Save new LBC value into trending db
self.execute("UPDATE claims SET lbc = ? WHERE claim_hash = ?;",
(lbc, claim_hash))
if lbc > lbc_old:
# Schedule a future spike
delay = min(int((lbc + 1E-8)**0.4), HALF_LIFE)
spike = (claim_hash, height + delay, spike_mass(lbc, lbc_old))
self.execute("""INSERT INTO spikes
(claim_hash, height, mass)
VALUES (?, ?, ?);""", spike)
elif lbc < lbc_old:
# Subtract from future spikes
penalty = spike_mass(lbc_old, lbc)
spikes = self.execute("""
SELECT * FROM spikes
WHERE claim_hash = ?
ORDER BY height ASC, mass DESC;
""", (claim_hash, )).fetchall()
for spike in spikes:
spike_id, mass = spike[0], spike[3]
if mass > penalty:
# The entire penalty merely reduces this spike
self.execute("UPDATE spikes SET mass=? WHERE id=?;",
(mass - penalty, spike_id))
penalty = 0.0
else:
# Removing this spike entirely accounts for some (or
# all) of the penalty, then move on to other spikes
self.execute("DELETE FROM spikes WHERE id=?;",
(spike_id, ))
penalty -= mass
# If penalty remains, that's a negative spike to be applied
# immediately.
if penalty > 0.0:
self.execute("""
INSERT INTO spikes (claim_hash, height, mass)
VALUES (?, ?, ?);""",
(claim_hash, height, -penalty))
self.apply_spikes(height)
self.decay_whales(height)
self.commit()
self.write_to_claims_db(db, height)
# The "global" instance to work with
# pylint: disable=C0103
trending_data = TrendingDB()
def spike_mass(x, x_old):
"""
Compute the mass of a trending spike (normed - constant units).
x_old = old LBC value
x = new LBC value
"""
# Sign of trending spike
sign = 1.0
if x < x_old:
sign = -1.0
# Magnitude
mag = abs(x**0.25 - x_old**0.25)
# Minnow boost
mag *= 1.0 + 2E4/(x + 100.0)**2
return sign*mag
def run(db, height, final_height, recalculate_claim_hashes):
if height < final_height - 5*HALF_LIFE:
trending_log(f"Skipping trending calculations at block {height}.\n")
return
start = time.time()
trending_log(f"Calculating variable_decay trending at block {height}.\n")
trending_data.update(db, height, recalculate_claim_hashes)
end = time.time()
trending_log(f"Trending operations took {end - start} seconds.\n\n")
def test_trending():
"""
Quick trending test for claims with different support patterns.
Actually use the run() function.
"""
# Create a fake "claims.db" for testing
# pylint: disable=I1101
dbc = apsw.Connection(":memory:")
db = dbc.cursor()
# Create table
db.execute("""
BEGIN;
CREATE TABLE claim (claim_hash TEXT PRIMARY KEY,
amount REAL NOT NULL DEFAULT 0.0,
support_amount REAL NOT NULL DEFAULT 0.0,
trending_mixed REAL NOT NULL DEFAULT 0.0);
COMMIT;
""")
# Initialise trending data before anything happens with the claims
trending_data.initialise(db)
# Insert initial states of claims
everything = {"huge_whale": 0.01, "medium_whale": 0.01, "small_whale": 0.01,
"huge_whale_botted": 0.01, "minnow": 0.01}
def to_list_of_tuples(stuff):
l = []
for key in stuff:
l.append((key, stuff[key]))
return l
db.executemany("""
INSERT INTO claim (claim_hash, amount) VALUES (?, 1E8*?);
""", to_list_of_tuples(everything))
# Process block zero
height = 0
run(db, height, height, everything.keys())
# Save trajectories for plotting
trajectories = {}
for row in trending_data.execute("""
SELECT claim_hash, trending_score
FROM claims;
"""):
trajectories[row[0]] = [row[1]/trending_unit(height)]
# Main loop
for height in range(1, 1000):
# One-off supports
if height == 1:
everything["huge_whale"] += 5E5
everything["medium_whale"] += 5E4
everything["small_whale"] += 5E3
# Every block
if height < 500:
everything["huge_whale_botted"] += 5E5/500
everything["minnow"] += 1
# Remove supports
if height == 500:
for key in everything:
everything[key] = 0.01
# Whack into the db
db.executemany("""
UPDATE claim SET amount = 1E8*? WHERE claim_hash = ?;
""", [(y, x) for (x, y) in to_list_of_tuples(everything)])
# Call run()
run(db, height, height, everything.keys())
# Append current trending scores to trajectories
for row in db.execute("""
SELECT claim_hash, trending_mixed
FROM claim;
"""):
trajectories[row[0]].append(row[1]/trending_unit(height))
dbc.close()
# pylint: disable=C0415
import matplotlib.pyplot as plt
for key in trajectories:
plt.plot(trajectories[key], label=key)
plt.legend()
plt.show()
if __name__ == "__main__":
test_trending()

View file

@ -1,119 +0,0 @@
from math import sqrt
# TRENDING_WINDOW is the number of blocks in ~6hr period (21600 seconds / 161 seconds per block)
TRENDING_WINDOW = 134
# TRENDING_DATA_POINTS says how many samples to use for the trending algorithm
# i.e. only consider claims from the most recent (TRENDING_WINDOW * TRENDING_DATA_POINTS) blocks
TRENDING_DATA_POINTS = 28
CREATE_TREND_TABLE = """
create table if not exists trend (
claim_hash bytes not null,
height integer not null,
amount integer not null,
primary key (claim_hash, height)
) without rowid;
"""
class ZScore:
__slots__ = 'count', 'total', 'power', 'last'
def __init__(self):
self.count = 0
self.total = 0
self.power = 0
self.last = None
def step(self, value):
if self.last is not None:
self.count += 1
self.total += self.last
self.power += self.last ** 2
self.last = value
@property
def mean(self):
return self.total / self.count
@property
def standard_deviation(self):
value = (self.power / self.count) - self.mean ** 2
return sqrt(value) if value > 0 else 0
def finalize(self):
if self.count == 0:
return self.last
return (self.last - self.mean) / (self.standard_deviation or 1)
def install(connection):
connection.create_aggregate("zscore", 1, ZScore)
connection.executescript(CREATE_TREND_TABLE)
def run(db, height, final_height, affected_claims):
# don't start tracking until we're at the end of initial sync
if height < (final_height - (TRENDING_WINDOW * TRENDING_DATA_POINTS)):
return
if height % TRENDING_WINDOW != 0:
return
db.execute(f"""
DELETE FROM trend WHERE height < {height - (TRENDING_WINDOW * TRENDING_DATA_POINTS)}
""")
start = (height - TRENDING_WINDOW) + 1
db.execute(f"""
INSERT OR IGNORE INTO trend (claim_hash, height, amount)
SELECT claim_hash, {start}, COALESCE(
(SELECT SUM(amount) FROM support WHERE claim_hash=claim.claim_hash
AND height >= {start}), 0
) AS support_sum
FROM claim WHERE support_sum > 0
""")
zscore = ZScore()
for global_sum in db.execute("SELECT AVG(amount) AS avg_amount FROM trend GROUP BY height"):
zscore.step(global_sum.avg_amount)
global_mean, global_deviation = 0, 1
if zscore.count > 0:
global_mean = zscore.mean
global_deviation = zscore.standard_deviation
db.execute(f"""
UPDATE claim SET
trending_local = COALESCE((
SELECT zscore(amount) FROM trend
WHERE claim_hash=claim.claim_hash ORDER BY height DESC
), 0),
trending_global = COALESCE((
SELECT (amount - {global_mean}) / {global_deviation} FROM trend
WHERE claim_hash=claim.claim_hash AND height = {start}
), 0),
trending_group = 0,
trending_mixed = 0
""")
# trending_group and trending_mixed determine how trending will show in query results
# normally the SQL will be: "ORDER BY trending_group, trending_mixed"
# changing the trending_group will have significant impact on trending results
# changing the value used for trending_mixed will only impact trending within a trending_group
db.execute(f"""
UPDATE claim SET
trending_group = CASE
WHEN trending_local > 0 AND trending_global > 0 THEN 4
WHEN trending_local <= 0 AND trending_global > 0 THEN 3
WHEN trending_local > 0 AND trending_global <= 0 THEN 2
WHEN trending_local <= 0 AND trending_global <= 0 THEN 1
END,
trending_mixed = CASE
WHEN trending_local > 0 AND trending_global > 0 THEN trending_global
WHEN trending_local <= 0 AND trending_global > 0 THEN trending_local
WHEN trending_local > 0 AND trending_global <= 0 THEN trending_local
WHEN trending_local <= 0 AND trending_global <= 0 THEN trending_global
END
WHERE trending_local <> 0 OR trending_global <> 0
""")

View file

@ -1,994 +0,0 @@
import os
import sqlite3
from typing import Union, Tuple, Set, List
from itertools import chain
from decimal import Decimal
from collections import namedtuple
from binascii import unhexlify, hexlify
from lbry.wallet.server.leveldb import LevelDB
from lbry.wallet.server.util import class_logger
from lbry.wallet.database import query, constraints_to_sql
from lbry.schema.tags import clean_tags
from lbry.schema.mime_types import guess_stream_type
from lbry.wallet import Ledger, RegTestLedger
from lbry.wallet.transaction import Transaction, Output
from lbry.wallet.server.db.canonical import register_canonical_functions
from lbry.wallet.server.db.trending import TRENDING_ALGORITHMS
from .common import CLAIM_TYPES, STREAM_TYPES, COMMON_TAGS, INDEXED_LANGUAGES
from lbry.wallet.server.db.elasticsearch import SearchIndex
ATTRIBUTE_ARRAY_MAX_LENGTH = 100
sqlite3.enable_callback_tracebacks(True)
class SQLDB:
PRAGMAS = """
pragma journal_mode=WAL;
"""
CREATE_CLAIM_TABLE = """
create table if not exists claim (
claim_hash bytes primary key,
claim_id text not null,
claim_name text not null,
normalized text not null,
txo_hash bytes not null,
tx_position integer not null,
amount integer not null,
timestamp integer not null, -- last updated timestamp
creation_timestamp integer not null,
height integer not null, -- last updated height
creation_height integer not null,
activation_height integer,
expiration_height integer not null,
release_time integer not null,
short_url text not null, -- normalized#shortest-unique-claim_id
canonical_url text, -- channel's-short_url/normalized#shortest-unique-claim_id-within-channel
title text,
author text,
description text,
claim_type integer,
has_source bool,
reposted integer default 0,
-- streams
stream_type text,
media_type text,
fee_amount integer default 0,
fee_currency text,
duration integer,
-- reposts
reposted_claim_hash bytes,
-- claims which are channels
public_key_bytes bytes,
public_key_hash bytes,
claims_in_channel integer,
-- claims which are inside channels
channel_hash bytes,
channel_join integer, -- height at which claim got valid signature / joined channel
signature bytes,
signature_digest bytes,
signature_valid bool,
effective_amount integer not null default 0,
support_amount integer not null default 0,
trending_group integer not null default 0,
trending_mixed integer not null default 0,
trending_local integer not null default 0,
trending_global integer not null default 0
);
create index if not exists claim_normalized_idx on claim (normalized, activation_height);
create index if not exists claim_channel_hash_idx on claim (channel_hash, signature, claim_hash);
create index if not exists claim_claims_in_channel_idx on claim (signature_valid, channel_hash, normalized);
create index if not exists claim_txo_hash_idx on claim (txo_hash);
create index if not exists claim_activation_height_idx on claim (activation_height, claim_hash);
create index if not exists claim_expiration_height_idx on claim (expiration_height);
create index if not exists claim_reposted_claim_hash_idx on claim (reposted_claim_hash);
"""
CREATE_SUPPORT_TABLE = """
create table if not exists support (
txo_hash bytes primary key,
tx_position integer not null,
height integer not null,
claim_hash bytes not null,
amount integer not null
);
create index if not exists support_claim_hash_idx on support (claim_hash, height);
"""
CREATE_TAG_TABLE = """
create table if not exists tag (
tag text not null,
claim_hash bytes not null,
height integer not null
);
create unique index if not exists tag_claim_hash_tag_idx on tag (claim_hash, tag);
"""
CREATE_LANGUAGE_TABLE = """
create table if not exists language (
language text not null,
claim_hash bytes not null,
height integer not null
);
create unique index if not exists language_claim_hash_language_idx on language (claim_hash, language);
"""
CREATE_CLAIMTRIE_TABLE = """
create table if not exists claimtrie (
normalized text primary key,
claim_hash bytes not null,
last_take_over_height integer not null
);
create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash);
"""
CREATE_CHANGELOG_TRIGGER = """
create table if not exists changelog (
claim_hash bytes primary key
);
create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash);
create trigger if not exists claim_changelog after update on claim
begin
insert or ignore into changelog (claim_hash) values (new.claim_hash);
end;
create trigger if not exists claimtrie_changelog after update on claimtrie
begin
insert or ignore into changelog (claim_hash) values (new.claim_hash);
insert or ignore into changelog (claim_hash) values (old.claim_hash);
end;
"""
SEARCH_INDEXES = """
-- used by any tag clouds
create index if not exists tag_tag_idx on tag (tag, claim_hash);
-- naked order bys (no filters)
create unique index if not exists claim_release_idx on claim (release_time, claim_hash);
create unique index if not exists claim_trending_idx on claim (trending_group, trending_mixed, claim_hash);
create unique index if not exists claim_effective_amount_idx on claim (effective_amount, claim_hash);
-- claim_type filter + order by
create unique index if not exists claim_type_release_idx on claim (release_time, claim_type, claim_hash);
create unique index if not exists claim_type_trending_idx on claim (trending_group, trending_mixed, claim_type, claim_hash);
create unique index if not exists claim_type_effective_amount_idx on claim (effective_amount, claim_type, claim_hash);
-- stream_type filter + order by
create unique index if not exists stream_type_release_idx on claim (stream_type, release_time, claim_hash);
create unique index if not exists stream_type_trending_idx on claim (stream_type, trending_group, trending_mixed, claim_hash);
create unique index if not exists stream_type_effective_amount_idx on claim (stream_type, effective_amount, claim_hash);
-- channel_hash filter + order by
create unique index if not exists channel_hash_release_idx on claim (channel_hash, release_time, claim_hash);
create unique index if not exists channel_hash_trending_idx on claim (channel_hash, trending_group, trending_mixed, claim_hash);
create unique index if not exists channel_hash_effective_amount_idx on claim (channel_hash, effective_amount, claim_hash);
-- duration filter + order by
create unique index if not exists duration_release_idx on claim (duration, release_time, claim_hash);
create unique index if not exists duration_trending_idx on claim (duration, trending_group, trending_mixed, claim_hash);
create unique index if not exists duration_effective_amount_idx on claim (duration, effective_amount, claim_hash);
-- fee_amount + order by
create unique index if not exists fee_amount_release_idx on claim (fee_amount, release_time, claim_hash);
create unique index if not exists fee_amount_trending_idx on claim (fee_amount, trending_group, trending_mixed, claim_hash);
create unique index if not exists fee_amount_effective_amount_idx on claim (fee_amount, effective_amount, claim_hash);
-- TODO: verify that all indexes below are used
create index if not exists claim_height_normalized_idx on claim (height, normalized asc);
create index if not exists claim_resolve_idx on claim (normalized, claim_id);
create index if not exists claim_id_idx on claim (claim_id, claim_hash);
create index if not exists claim_timestamp_idx on claim (timestamp);
create index if not exists claim_public_key_hash_idx on claim (public_key_hash);
create index if not exists claim_signature_valid_idx on claim (signature_valid);
"""
TAG_INDEXES = '\n'.join(
f"create unique index if not exists tag_{tag_key}_idx on tag (tag, claim_hash) WHERE tag='{tag_value}';"
for tag_value, tag_key in COMMON_TAGS.items()
)
LANGUAGE_INDEXES = '\n'.join(
f"create unique index if not exists language_{language}_idx on language (language, claim_hash) WHERE language='{language}';"
for language in INDEXED_LANGUAGES
)
CREATE_TABLES_QUERY = (
CREATE_CLAIM_TABLE +
CREATE_SUPPORT_TABLE +
CREATE_CLAIMTRIE_TABLE +
CREATE_TAG_TABLE +
CREATE_CHANGELOG_TRIGGER +
CREATE_LANGUAGE_TABLE
)
def __init__(
self, main, path: str, blocking_channels: list, filtering_channels: list, trending: list):
self.main = main
self._db_path = path
self.db = None
self.logger = class_logger(__name__, self.__class__.__name__)
self.ledger = Ledger if main.coin.NET == 'mainnet' else RegTestLedger
self.blocked_streams = None
self.blocked_channels = None
self.blocking_channel_hashes = {
unhexlify(channel_id)[::-1] for channel_id in blocking_channels if channel_id
}
self.filtered_streams = None
self.filtered_channels = None
self.filtering_channel_hashes = {
unhexlify(channel_id)[::-1] for channel_id in filtering_channels if channel_id
}
self.trending = trending
self.pending_deletes = set()
def open(self):
self.db = sqlite3.connect(self._db_path, isolation_level=None, check_same_thread=False, uri=True)
def namedtuple_factory(cursor, row):
Row = namedtuple('Row', (d[0] for d in cursor.description))
return Row(*row)
self.db.row_factory = namedtuple_factory
self.db.executescript(self.PRAGMAS)
self.db.executescript(self.CREATE_TABLES_QUERY)
register_canonical_functions(self.db)
self.blocked_streams = {}
self.blocked_channels = {}
self.filtered_streams = {}
self.filtered_channels = {}
self.update_blocked_and_filtered_claims()
for algorithm in self.trending:
algorithm.install(self.db)
def close(self):
if self.db is not None:
self.db.close()
def update_blocked_and_filtered_claims(self):
self.update_claims_from_channel_hashes(
self.blocked_streams, self.blocked_channels, self.blocking_channel_hashes
)
self.update_claims_from_channel_hashes(
self.filtered_streams, self.filtered_channels, self.filtering_channel_hashes
)
self.filtered_streams.update(self.blocked_streams)
self.filtered_channels.update(self.blocked_channels)
def update_claims_from_channel_hashes(self, shared_streams, shared_channels, channel_hashes):
streams, channels = {}, {}
if channel_hashes:
sql = query(
"SELECT repost.channel_hash, repost.reposted_claim_hash, target.claim_type "
"FROM claim as repost JOIN claim AS target ON (target.claim_hash=repost.reposted_claim_hash)", **{
'repost.reposted_claim_hash__is_not_null': 1,
'repost.channel_hash__in': channel_hashes
}
)
for blocked_claim in self.execute(*sql):
if blocked_claim.claim_type == CLAIM_TYPES['stream']:
streams[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash
elif blocked_claim.claim_type == CLAIM_TYPES['channel']:
channels[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash
shared_streams.clear()
shared_streams.update(streams)
shared_channels.clear()
shared_channels.update(channels)
@staticmethod
def _insert_sql(table: str, data: dict) -> Tuple[str, list]:
columns, values = [], []
for column, value in data.items():
columns.append(column)
values.append(value)
sql = (
f"INSERT INTO {table} ({', '.join(columns)}) "
f"VALUES ({', '.join(['?'] * len(values))})"
)
return sql, values
@staticmethod
def _update_sql(table: str, data: dict, where: str,
constraints: Union[list, tuple]) -> Tuple[str, list]:
columns, values = [], []
for column, value in data.items():
columns.append(f"{column} = ?")
values.append(value)
values.extend(constraints)
return f"UPDATE {table} SET {', '.join(columns)} WHERE {where}", values
@staticmethod
def _delete_sql(table: str, constraints: dict) -> Tuple[str, dict]:
where, values = constraints_to_sql(constraints)
return f"DELETE FROM {table} WHERE {where}", values
def execute(self, *args):
return self.db.execute(*args)
def executemany(self, *args):
return self.db.executemany(*args)
def begin(self):
self.execute('begin;')
def commit(self):
self.execute('commit;')
def _upsertable_claims(self, txos: List[Output], header, clear_first=False):
claim_hashes, claims, tags, languages = set(), [], {}, {}
for txo in txos:
tx = txo.tx_ref.tx
try:
assert txo.claim_name
assert txo.normalized_name
except:
#self.logger.exception(f"Could not decode claim name for {tx.id}:{txo.position}.")
continue
language = 'none'
try:
if txo.claim.is_stream and txo.claim.stream.languages:
language = txo.claim.stream.languages[0].language
except:
pass
claim_hash = txo.claim_hash
claim_hashes.add(claim_hash)
claim_record = {
'claim_hash': claim_hash,
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'normalized': txo.normalized_name,
'txo_hash': txo.ref.hash,
'tx_position': tx.position,
'amount': txo.amount,
'timestamp': header['timestamp'],
'height': tx.height,
'title': None,
'description': None,
'author': None,
'duration': None,
'claim_type': None,
'has_source': False,
'stream_type': None,
'media_type': None,
'release_time': None,
'fee_currency': None,
'fee_amount': 0,
'reposted_claim_hash': None
}
claims.append(claim_record)
try:
claim = txo.claim
except:
#self.logger.exception(f"Could not parse claim protobuf for {tx.id}:{txo.position}.")
continue
if claim.is_stream:
claim_record['claim_type'] = CLAIM_TYPES['stream']
claim_record['has_source'] = claim.stream.has_source
claim_record['media_type'] = claim.stream.source.media_type
claim_record['stream_type'] = STREAM_TYPES[guess_stream_type(claim_record['media_type'])]
claim_record['title'] = claim.stream.title
claim_record['description'] = claim.stream.description
claim_record['author'] = claim.stream.author
if claim.stream.video and claim.stream.video.duration:
claim_record['duration'] = claim.stream.video.duration
if claim.stream.audio and claim.stream.audio.duration:
claim_record['duration'] = claim.stream.audio.duration
if claim.stream.release_time:
claim_record['release_time'] = claim.stream.release_time
if claim.stream.has_fee:
fee = claim.stream.fee
if isinstance(fee.currency, str):
claim_record['fee_currency'] = fee.currency.lower()
if isinstance(fee.amount, Decimal):
if fee.amount >= 0 and int(fee.amount*1000) < 9223372036854775807:
claim_record['fee_amount'] = int(fee.amount*1000)
elif claim.is_repost:
claim_record['claim_type'] = CLAIM_TYPES['repost']
claim_record['reposted_claim_hash'] = claim.repost.reference.claim_hash
elif claim.is_channel:
claim_record['claim_type'] = CLAIM_TYPES['channel']
elif claim.is_collection:
claim_record['claim_type'] = CLAIM_TYPES['collection']
languages[(language, claim_hash)] = (language, claim_hash, tx.height)
for tag in clean_tags(claim.message.tags):
tags[(tag, claim_hash)] = (tag, claim_hash, tx.height)
if clear_first:
self._clear_claim_metadata(claim_hashes)
if tags:
self.executemany(
"INSERT OR IGNORE INTO tag (tag, claim_hash, height) VALUES (?, ?, ?)", tags.values()
)
if languages:
self.executemany(
"INSERT OR IGNORE INTO language (language, claim_hash, height) VALUES (?, ?, ?)", languages.values()
)
return claims
def insert_claims(self, txos: List[Output], header):
claims = self._upsertable_claims(txos, header)
if claims:
self.executemany("""
INSERT OR REPLACE INTO claim (
claim_hash, claim_id, claim_name, normalized, txo_hash, tx_position, amount,
claim_type, media_type, stream_type, timestamp, creation_timestamp, has_source,
fee_currency, fee_amount, title, description, author, duration, height, reposted_claim_hash,
creation_height, release_time, activation_height, expiration_height, short_url)
VALUES (
:claim_hash, :claim_id, :claim_name, :normalized, :txo_hash, :tx_position, :amount,
:claim_type, :media_type, :stream_type, :timestamp, :timestamp, :has_source,
:fee_currency, :fee_amount, :title, :description, :author, :duration, :height, :reposted_claim_hash, :height,
CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE :timestamp END,
CASE WHEN :normalized NOT IN (SELECT normalized FROM claimtrie) THEN :height END,
CASE WHEN :height >= 137181 THEN :height+2102400 ELSE :height+262974 END,
:claim_name||COALESCE(
(SELECT shortest_id(claim_id, :claim_id) FROM claim WHERE normalized = :normalized),
'#'||substr(:claim_id, 1, 1)
)
)""", claims)
def update_claims(self, txos: List[Output], header):
claims = self._upsertable_claims(txos, header, clear_first=True)
if claims:
self.executemany("""
UPDATE claim SET
txo_hash=:txo_hash, tx_position=:tx_position, amount=:amount, height=:height,
claim_type=:claim_type, media_type=:media_type, stream_type=:stream_type,
timestamp=:timestamp, fee_amount=:fee_amount, fee_currency=:fee_currency, has_source=:has_source,
title=:title, duration=:duration, description=:description, author=:author, reposted_claim_hash=:reposted_claim_hash,
release_time=CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE release_time END
WHERE claim_hash=:claim_hash;
""", claims)
def delete_claims(self, claim_hashes: Set[bytes]):
""" Deletes claim supports and from claimtrie in case of an abandon. """
if claim_hashes:
affected_channels = self.execute(*query(
"SELECT channel_hash FROM claim", channel_hash__is_not_null=1, claim_hash__in=claim_hashes
)).fetchall()
for table in ('claim', 'support', 'claimtrie'):
self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes}))
self._clear_claim_metadata(claim_hashes)
return {r.channel_hash for r in affected_channels}
return set()
def delete_claims_above_height(self, height: int):
claim_hashes = [x[0] for x in self.execute(
"SELECT claim_hash FROM claim WHERE height>?", (height, )
).fetchall()]
while claim_hashes:
batch = set(claim_hashes[:500])
claim_hashes = claim_hashes[500:]
self.delete_claims(batch)
def _clear_claim_metadata(self, claim_hashes: Set[bytes]):
if claim_hashes:
for table in ('tag',): # 'language', 'location', etc
self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes}))
def split_inputs_into_claims_supports_and_other(self, txis):
txo_hashes = {txi.txo_ref.hash for txi in txis}
claims = self.execute(*query(
"SELECT txo_hash, claim_hash, normalized FROM claim", txo_hash__in=txo_hashes
)).fetchall()
txo_hashes -= {r.txo_hash for r in claims}
supports = {}
if txo_hashes:
supports = self.execute(*query(
"SELECT txo_hash, claim_hash FROM support", txo_hash__in=txo_hashes
)).fetchall()
txo_hashes -= {r.txo_hash for r in supports}
return claims, supports, txo_hashes
def insert_supports(self, txos: List[Output]):
supports = []
for txo in txos:
tx = txo.tx_ref.tx
supports.append((
txo.ref.hash, tx.position, tx.height,
txo.claim_hash, txo.amount
))
if supports:
self.executemany(
"INSERT OR IGNORE INTO support ("
" txo_hash, tx_position, height, claim_hash, amount"
") "
"VALUES (?, ?, ?, ?, ?)", supports
)
def delete_supports(self, txo_hashes: Set[bytes]):
if txo_hashes:
self.execute(*self._delete_sql('support', {'txo_hash__in': txo_hashes}))
def calculate_reposts(self, txos: List[Output]):
targets = set()
for txo in txos:
try:
claim = txo.claim
except:
continue
if claim.is_repost:
targets.add((claim.repost.reference.claim_hash,))
if targets:
self.executemany(
"""
UPDATE claim SET reposted = (
SELECT count(*) FROM claim AS repost WHERE repost.reposted_claim_hash = claim.claim_hash
)
WHERE claim_hash = ?
""", targets
)
return {target[0] for target in targets}
def validate_channel_signatures(self, height, new_claims, updated_claims, spent_claims, affected_channels, timer):
if not new_claims and not updated_claims and not spent_claims:
return
sub_timer = timer.add_timer('segregate channels and signables')
sub_timer.start()
channels, new_channel_keys, signables = {}, {}, {}
for txo in chain(new_claims, updated_claims):
try:
claim = txo.claim
except:
continue
if claim.is_channel:
channels[txo.claim_hash] = txo
new_channel_keys[txo.claim_hash] = claim.channel.public_key_bytes
else:
signables[txo.claim_hash] = txo
sub_timer.stop()
sub_timer = timer.add_timer('make list of channels we need to lookup')
sub_timer.start()
missing_channel_keys = set()
for txo in signables.values():
claim = txo.claim
if claim.is_signed and claim.signing_channel_hash not in new_channel_keys:
missing_channel_keys.add(claim.signing_channel_hash)
sub_timer.stop()
sub_timer = timer.add_timer('lookup missing channels')
sub_timer.start()
all_channel_keys = {}
if new_channel_keys or missing_channel_keys or affected_channels:
all_channel_keys = dict(self.execute(*query(
"SELECT claim_hash, public_key_bytes FROM claim",
claim_hash__in=set(new_channel_keys) | missing_channel_keys | affected_channels
)))
sub_timer.stop()
sub_timer = timer.add_timer('prepare for updating claims')
sub_timer.start()
changed_channel_keys = {}
for claim_hash, new_key in new_channel_keys.items():
if claim_hash not in all_channel_keys or all_channel_keys[claim_hash] != new_key:
all_channel_keys[claim_hash] = new_key
changed_channel_keys[claim_hash] = new_key
claim_updates = []
for claim_hash, txo in signables.items():
claim = txo.claim
update = {
'claim_hash': claim_hash,
'channel_hash': None,
'signature': None,
'signature_digest': None,
'signature_valid': None
}
if claim.is_signed:
update.update({
'channel_hash': claim.signing_channel_hash,
'signature': txo.get_encoded_signature(),
'signature_digest': txo.get_signature_digest(self.ledger),
'signature_valid': 0
})
claim_updates.append(update)
sub_timer.stop()
sub_timer = timer.add_timer('find claims affected by a change in channel key')
sub_timer.start()
if changed_channel_keys:
sql = f"""
SELECT * FROM claim WHERE
channel_hash IN ({','.join('?' for _ in changed_channel_keys)}) AND
signature IS NOT NULL
"""
for affected_claim in self.execute(sql, list(changed_channel_keys.keys())):
if affected_claim.claim_hash not in signables:
claim_updates.append({
'claim_hash': affected_claim.claim_hash,
'channel_hash': affected_claim.channel_hash,
'signature': affected_claim.signature,
'signature_digest': affected_claim.signature_digest,
'signature_valid': 0
})
sub_timer.stop()
sub_timer = timer.add_timer('verify signatures')
sub_timer.start()
for update in claim_updates:
channel_pub_key = all_channel_keys.get(update['channel_hash'])
if channel_pub_key and update['signature']:
update['signature_valid'] = Output.is_signature_valid(
bytes(update['signature']), bytes(update['signature_digest']), channel_pub_key
)
sub_timer.stop()
sub_timer = timer.add_timer('update claims')
sub_timer.start()
if claim_updates:
self.executemany(f"""
UPDATE claim SET
channel_hash=:channel_hash, signature=:signature, signature_digest=:signature_digest,
signature_valid=:signature_valid,
channel_join=CASE
WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN channel_join
WHEN :signature_valid=1 THEN {height}
END,
canonical_url=CASE
WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN canonical_url
WHEN :signature_valid=1 THEN
(SELECT short_url FROM claim WHERE claim_hash=:channel_hash)||'/'||
claim_name||COALESCE(
(SELECT shortest_id(other_claim.claim_id, claim.claim_id) FROM claim AS other_claim
WHERE other_claim.signature_valid = 1 AND
other_claim.channel_hash = :channel_hash AND
other_claim.normalized = claim.normalized),
'#'||substr(claim_id, 1, 1)
)
END
WHERE claim_hash=:claim_hash;
""", claim_updates)
sub_timer.stop()
sub_timer = timer.add_timer('update claims affected by spent channels')
sub_timer.start()
if spent_claims:
self.execute(
f"""
UPDATE claim SET
signature_valid=CASE WHEN signature IS NOT NULL THEN 0 END,
channel_join=NULL, canonical_url=NULL
WHERE channel_hash IN ({','.join('?' for _ in spent_claims)})
""", list(spent_claims)
)
sub_timer.stop()
sub_timer = timer.add_timer('update channels')
sub_timer.start()
if channels:
self.executemany(
"""
UPDATE claim SET
public_key_bytes=:public_key_bytes,
public_key_hash=:public_key_hash
WHERE claim_hash=:claim_hash""", [{
'claim_hash': claim_hash,
'public_key_bytes': txo.claim.channel.public_key_bytes,
'public_key_hash': self.ledger.address_to_hash160(
self.ledger.public_key_to_address(txo.claim.channel.public_key_bytes)
)
} for claim_hash, txo in channels.items()]
)
sub_timer.stop()
sub_timer = timer.add_timer('update claims_in_channel counts')
sub_timer.start()
if all_channel_keys:
self.executemany(f"""
UPDATE claim SET
claims_in_channel=(
SELECT COUNT(*) FROM claim AS claim_in_channel
WHERE claim_in_channel.signature_valid=1 AND
claim_in_channel.channel_hash=claim.claim_hash
)
WHERE claim_hash = ?
""", [(channel_hash,) for channel_hash in all_channel_keys])
sub_timer.stop()
sub_timer = timer.add_timer('update blocked claims list')
sub_timer.start()
if (self.blocking_channel_hashes.intersection(all_channel_keys) or
self.filtering_channel_hashes.intersection(all_channel_keys)):
self.update_blocked_and_filtered_claims()
sub_timer.stop()
def _update_support_amount(self, claim_hashes):
if claim_hashes:
self.execute(f"""
UPDATE claim SET
support_amount = COALESCE(
(SELECT SUM(amount) FROM support WHERE support.claim_hash=claim.claim_hash), 0
)
WHERE claim_hash IN ({','.join('?' for _ in claim_hashes)})
""", claim_hashes)
def _update_effective_amount(self, height, claim_hashes=None):
self.execute(
f"UPDATE claim SET effective_amount = amount + support_amount "
f"WHERE activation_height = {height}"
)
if claim_hashes:
self.execute(
f"UPDATE claim SET effective_amount = amount + support_amount "
f"WHERE activation_height < {height} "
f" AND claim_hash IN ({','.join('?' for _ in claim_hashes)})",
claim_hashes
)
def _calculate_activation_height(self, height):
last_take_over_height = f"""COALESCE(
(SELECT last_take_over_height FROM claimtrie
WHERE claimtrie.normalized=claim.normalized),
{height}
)
"""
self.execute(f"""
UPDATE claim SET activation_height =
{height} + min(4032, cast(({height} - {last_take_over_height}) / 32 AS INT))
WHERE activation_height IS NULL
""")
def _perform_overtake(self, height, changed_claim_hashes, deleted_names):
deleted_names_sql = claim_hashes_sql = ""
if changed_claim_hashes:
claim_hashes_sql = f"OR claim_hash IN ({','.join('?' for _ in changed_claim_hashes)})"
if deleted_names:
deleted_names_sql = f"OR normalized IN ({','.join('?' for _ in deleted_names)})"
overtakes = self.execute(f"""
SELECT winner.normalized, winner.claim_hash,
claimtrie.claim_hash AS current_winner,
MAX(winner.effective_amount) AS max_winner_effective_amount
FROM (
SELECT normalized, claim_hash, effective_amount FROM claim
WHERE normalized IN (
SELECT normalized FROM claim WHERE activation_height={height} {claim_hashes_sql}
) {deleted_names_sql}
ORDER BY effective_amount DESC, height ASC, tx_position ASC
) AS winner LEFT JOIN claimtrie USING (normalized)
GROUP BY winner.normalized
HAVING current_winner IS NULL OR current_winner <> winner.claim_hash
""", list(changed_claim_hashes)+deleted_names)
for overtake in overtakes:
if overtake.current_winner:
self.execute(
f"UPDATE claimtrie SET claim_hash = ?, last_take_over_height = {height} "
f"WHERE normalized = ?",
(overtake.claim_hash, overtake.normalized)
)
else:
self.execute(
f"INSERT INTO claimtrie (claim_hash, normalized, last_take_over_height) "
f"VALUES (?, ?, {height})",
(overtake.claim_hash, overtake.normalized)
)
self.execute(
f"UPDATE claim SET activation_height = {height} WHERE normalized = ? "
f"AND (activation_height IS NULL OR activation_height > {height})",
(overtake.normalized,)
)
def _copy(self, height):
if height > 50:
self.execute(f"DROP TABLE claimtrie{height-50}")
self.execute(f"CREATE TABLE claimtrie{height} AS SELECT * FROM claimtrie")
def update_claimtrie(self, height, changed_claim_hashes, deleted_names, timer):
r = timer.run
binary_claim_hashes = list(changed_claim_hashes)
r(self._calculate_activation_height, height)
r(self._update_support_amount, binary_claim_hashes)
r(self._update_effective_amount, height, binary_claim_hashes)
r(self._perform_overtake, height, binary_claim_hashes, list(deleted_names))
r(self._update_effective_amount, height)
r(self._perform_overtake, height, [], [])
def get_expiring(self, height):
return self.execute(
f"SELECT claim_hash, normalized FROM claim WHERE expiration_height = {height}"
)
def enqueue_changes(self):
query = """
SELECT claimtrie.claim_hash as is_controlling,
claimtrie.last_take_over_height,
(select group_concat(tag, ',,') from tag where tag.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as tags,
(select group_concat(language, ' ') from language where language.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as languages,
cr.has_source as reposted_has_source,
cr.claim_type as reposted_claim_type,
cr.stream_type as reposted_stream_type,
cr.media_type as reposted_media_type,
cr.duration as reposted_duration,
cr.fee_amount as reposted_fee_amount,
cr.fee_currency as reposted_fee_currency,
claim.*
FROM claim LEFT JOIN claimtrie USING (claim_hash) LEFT JOIN claim cr ON cr.claim_hash=claim.reposted_claim_hash
WHERE claim.claim_hash in (SELECT claim_hash FROM changelog)
"""
for claim in self.execute(query):
claim = claim._asdict()
id_set = set(filter(None, (claim['claim_hash'], claim['channel_hash'], claim['reposted_claim_hash'])))
claim['censor_type'] = 0
censoring_channel_hash = None
claim['has_source'] = bool(claim.pop('reposted_has_source') or claim['has_source'])
claim['stream_type'] = claim.pop('reposted_stream_type') or claim['stream_type']
claim['media_type'] = claim.pop('reposted_media_type') or claim['media_type']
claim['fee_amount'] = claim.pop('reposted_fee_amount') or claim['fee_amount']
claim['fee_currency'] = claim.pop('reposted_fee_currency') or claim['fee_currency']
claim['duration'] = claim.pop('reposted_duration') or claim['duration']
for reason_id in id_set:
if reason_id in self.blocked_streams:
claim['censor_type'] = 2
censoring_channel_hash = self.blocked_streams.get(reason_id)
elif reason_id in self.blocked_channels:
claim['censor_type'] = 2
censoring_channel_hash = self.blocked_channels.get(reason_id)
elif reason_id in self.filtered_streams:
claim['censor_type'] = 1
censoring_channel_hash = self.filtered_streams.get(reason_id)
elif reason_id in self.filtered_channels:
claim['censor_type'] = 1
censoring_channel_hash = self.filtered_channels.get(reason_id)
claim['censoring_channel_id'] = censoring_channel_hash[::-1].hex() if censoring_channel_hash else None
claim['tags'] = claim['tags'].split(',,') if claim['tags'] else []
claim['languages'] = claim['languages'].split(' ') if claim['languages'] else []
yield 'update', claim
def clear_changelog(self):
self.execute("delete from changelog;")
def claim_producer(self):
while self.pending_deletes:
claim_hash = self.pending_deletes.pop()
yield 'delete', hexlify(claim_hash[::-1]).decode()
for claim in self.enqueue_changes():
yield claim
self.clear_changelog()
def advance_txs(self, height, all_txs, header, daemon_height, timer):
insert_claims = []
update_claims = []
update_claim_hashes = set()
delete_claim_hashes = self.pending_deletes
insert_supports = []
delete_support_txo_hashes = set()
recalculate_claim_hashes = set() # added/deleted supports, added/updated claim
deleted_claim_names = set()
delete_others = set()
body_timer = timer.add_timer('body')
for position, (etx, txid) in enumerate(all_txs):
tx = timer.run(
Transaction, etx.raw, height=height, position=position
)
# Inputs
spent_claims, spent_supports, spent_others = timer.run(
self.split_inputs_into_claims_supports_and_other, tx.inputs
)
body_timer.start()
delete_claim_hashes.update({r.claim_hash for r in spent_claims})
deleted_claim_names.update({r.normalized for r in spent_claims})
delete_support_txo_hashes.update({r.txo_hash for r in spent_supports})
recalculate_claim_hashes.update({r.claim_hash for r in spent_supports})
delete_others.update(spent_others)
# Outputs
for output in tx.outputs:
if output.is_support:
insert_supports.append(output)
recalculate_claim_hashes.add(output.claim_hash)
elif output.script.is_claim_name:
insert_claims.append(output)
recalculate_claim_hashes.add(output.claim_hash)
elif output.script.is_update_claim:
claim_hash = output.claim_hash
update_claims.append(output)
recalculate_claim_hashes.add(claim_hash)
body_timer.stop()
skip_update_claim_timer = timer.add_timer('skip update of abandoned claims')
skip_update_claim_timer.start()
for updated_claim in list(update_claims):
if updated_claim.ref.hash in delete_others:
update_claims.remove(updated_claim)
for updated_claim in update_claims:
claim_hash = updated_claim.claim_hash
delete_claim_hashes.discard(claim_hash)
update_claim_hashes.add(claim_hash)
skip_update_claim_timer.stop()
skip_insert_claim_timer = timer.add_timer('skip insertion of abandoned claims')
skip_insert_claim_timer.start()
for new_claim in list(insert_claims):
if new_claim.ref.hash in delete_others:
if new_claim.claim_hash not in update_claim_hashes:
insert_claims.remove(new_claim)
skip_insert_claim_timer.stop()
skip_insert_support_timer = timer.add_timer('skip insertion of abandoned supports')
skip_insert_support_timer.start()
for new_support in list(insert_supports):
if new_support.ref.hash in delete_others:
insert_supports.remove(new_support)
skip_insert_support_timer.stop()
expire_timer = timer.add_timer('recording expired claims')
expire_timer.start()
for expired in self.get_expiring(height):
delete_claim_hashes.add(expired.claim_hash)
deleted_claim_names.add(expired.normalized)
expire_timer.stop()
r = timer.run
affected_channels = r(self.delete_claims, delete_claim_hashes)
r(self.delete_supports, delete_support_txo_hashes)
r(self.insert_claims, insert_claims, header)
r(self.calculate_reposts, insert_claims)
r(self.update_claims, update_claims, header)
r(self.validate_channel_signatures, height, insert_claims,
update_claims, delete_claim_hashes, affected_channels, forward_timer=True)
r(self.insert_supports, insert_supports)
r(self.update_claimtrie, height, recalculate_claim_hashes, deleted_claim_names, forward_timer=True)
for algorithm in self.trending:
r(algorithm.run, self.db.cursor(), height, daemon_height, recalculate_claim_hashes)
class LBRYLevelDB(LevelDB):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
path = os.path.join(self.env.db_dir, 'claims.db')
trending = []
for algorithm_name in self.env.trending_algorithms:
if algorithm_name in TRENDING_ALGORITHMS:
trending.append(TRENDING_ALGORITHMS[algorithm_name])
if self.env.es_mode == 'reader':
self.logger.info('Index mode: reader')
self.sql = None
else:
self.logger.info('Index mode: writer. Using SQLite db to sync ES')
self.sql = SQLDB(
self, path,
self.env.default('BLOCKING_CHANNEL_IDS', '').split(' '),
self.env.default('FILTERING_CHANNEL_IDS', '').split(' '),
trending
)
# Search index
self.search_index = SearchIndex(
self.env.es_index_prefix, self.env.database_query_timeout, self.env.elastic_host, self.env.elastic_port
)
def close(self):
super().close()
if self.sql:
self.sql.close()
async def _open_dbs(self, *args, **kwargs):
await self.search_index.start()
await super()._open_dbs(*args, **kwargs)
if self.sql:
self.sql.open()

View file

@ -5,7 +5,7 @@
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
import math
import re
import resource
from os import environ
@ -13,7 +13,7 @@ from collections import namedtuple
from ipaddress import ip_address
from lbry.wallet.server.util import class_logger
from lbry.wallet.server.coin import Coin
from lbry.wallet.server.coin import Coin, LBC, LBCTestNet, LBCRegTest
import lbry.wallet.server.util as lib_util
@ -28,73 +28,84 @@ class Env:
class Error(Exception):
pass
def __init__(self, coin=None):
def __init__(self, coin=None, db_dir=None, daemon_url=None, host=None, rpc_host=None, elastic_host=None,
elastic_port=None, loop_policy=None, max_query_workers=None, websocket_host=None, websocket_port=None,
chain=None, es_index_prefix=None, es_mode=None, cache_MB=None, reorg_limit=None, tcp_port=None,
udp_port=None, ssl_port=None, ssl_certfile=None, ssl_keyfile=None, rpc_port=None,
prometheus_port=None, max_subscriptions=None, banner_file=None, anon_logs=None, log_sessions=None,
allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None,
payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None,
session_timeout=None, drop_client=None, description=None, daily_fee=None,
database_query_timeout=None, db_max_open_files=512):
self.logger = class_logger(__name__, self.__class__.__name__)
self.allow_root = self.boolean('ALLOW_ROOT', False)
self.host = self.default('HOST', 'localhost')
self.rpc_host = self.default('RPC_HOST', 'localhost')
self.elastic_host = self.default('ELASTIC_HOST', 'localhost')
self.elastic_port = self.integer('ELASTIC_PORT', 9200)
self.loop_policy = self.set_event_loop_policy()
self.db_dir = db_dir if db_dir is not None else self.required('DB_DIRECTORY')
self.daemon_url = daemon_url if daemon_url is not None else self.required('DAEMON_URL')
self.db_max_open_files = db_max_open_files
self.host = host if host is not None else self.default('HOST', 'localhost')
self.rpc_host = rpc_host if rpc_host is not None else self.default('RPC_HOST', 'localhost')
self.elastic_host = elastic_host if elastic_host is not None else self.default('ELASTIC_HOST', 'localhost')
self.elastic_port = elastic_port if elastic_port is not None else self.integer('ELASTIC_PORT', 9200)
self.loop_policy = self.set_event_loop_policy(
loop_policy if loop_policy is not None else self.default('EVENT_LOOP_POLICY', None)
)
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
self.db_dir = self.required('DB_DIRECTORY')
self.db_engine = self.default('DB_ENGINE', 'leveldb')
self.trending_algorithms = [
trending for trending in set(self.default('TRENDING_ALGORITHMS', 'zscore').split(' ')) if trending
]
self.max_query_workers = self.integer('MAX_QUERY_WORKERS', None)
self.individual_tag_indexes = self.boolean('INDIVIDUAL_TAG_INDEXES', True)
self.track_metrics = self.boolean('TRACK_METRICS', False)
self.websocket_host = self.default('WEBSOCKET_HOST', self.host)
self.websocket_port = self.integer('WEBSOCKET_PORT', None)
self.daemon_url = self.required('DAEMON_URL')
self.max_query_workers = max_query_workers if max_query_workers is not None else self.integer('MAX_QUERY_WORKERS', 4)
self.websocket_host = websocket_host if websocket_host is not None else self.default('WEBSOCKET_HOST', self.host)
self.websocket_port = websocket_port if websocket_port is not None else self.integer('WEBSOCKET_PORT', None)
if coin is not None:
assert issubclass(coin, Coin)
self.coin = coin
else:
coin_name = self.required('COIN').strip()
network = self.default('NET', 'mainnet').strip()
self.coin = Coin.lookup_coin_class(coin_name, network)
self.es_index_prefix = self.default('ES_INDEX_PREFIX', '')
self.es_mode = self.default('ES_MODE', 'writer')
self.cache_MB = self.integer('CACHE_MB', 1200)
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
chain = chain if chain is not None else self.default('NET', 'mainnet').strip().lower()
if chain == 'mainnet':
self.coin = LBC
elif chain == 'testnet':
self.coin = LBCTestNet
else:
self.coin = LBCRegTest
self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default('ES_INDEX_PREFIX', '')
self.es_mode = es_mode if es_mode is not None else self.default('ES_MODE', 'writer')
self.cache_MB = cache_MB if cache_MB is not None else self.integer('CACHE_MB', 1024)
self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
# Server stuff
self.tcp_port = self.integer('TCP_PORT', None)
self.udp_port = self.integer('UDP_PORT', self.tcp_port)
self.ssl_port = self.integer('SSL_PORT', None)
self.tcp_port = tcp_port if tcp_port is not None else self.integer('TCP_PORT', None)
self.udp_port = udp_port if udp_port is not None else self.integer('UDP_PORT', self.tcp_port)
self.ssl_port = ssl_port if ssl_port is not None else self.integer('SSL_PORT', None)
if self.ssl_port:
self.ssl_certfile = self.required('SSL_CERTFILE')
self.ssl_keyfile = self.required('SSL_KEYFILE')
self.rpc_port = self.integer('RPC_PORT', 8000)
self.prometheus_port = self.integer('PROMETHEUS_PORT', 0)
self.max_subscriptions = self.integer('MAX_SUBSCRIPTIONS', 10000)
self.banner_file = self.default('BANNER_FILE', None)
self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file)
self.anon_logs = self.boolean('ANON_LOGS', False)
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
self.allow_lan_udp = self.boolean('ALLOW_LAN_UDP', False)
self.country = self.default('COUNTRY', 'US')
self.ssl_certfile = ssl_certfile if ssl_certfile is not None else self.required('SSL_CERTFILE')
self.ssl_keyfile = ssl_keyfile if ssl_keyfile is not None else self.required('SSL_KEYFILE')
self.rpc_port = rpc_port if rpc_port is not None else self.integer('RPC_PORT', 8000)
self.prometheus_port = prometheus_port if prometheus_port is not None else self.integer('PROMETHEUS_PORT', 0)
self.max_subscriptions = max_subscriptions if max_subscriptions is not None else self.integer('MAX_SUBSCRIPTIONS', 10000)
self.banner_file = banner_file if banner_file is not None else self.default('BANNER_FILE', None)
# self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file)
self.anon_logs = anon_logs if anon_logs is not None else self.boolean('ANON_LOGS', False)
self.log_sessions = log_sessions if log_sessions is not None else self.integer('LOG_SESSIONS', 3600)
self.allow_lan_udp = allow_lan_udp if allow_lan_udp is not None else self.boolean('ALLOW_LAN_UDP', False)
self.cache_all_tx_hashes = cache_all_tx_hashes if cache_all_tx_hashes is not None else self.boolean('CACHE_ALL_TX_HASHES', False)
self.cache_all_claim_txos = cache_all_claim_txos if cache_all_claim_txos is not None else self.boolean('CACHE_ALL_CLAIM_TXOS', False)
self.country = country if country is not None else self.default('COUNTRY', 'US')
# Peer discovery
self.peer_discovery = self.peer_discovery_enum()
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
self.peer_hubs = self.extract_peer_hubs()
self.force_proxy = self.boolean('FORCE_PROXY', False)
self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
# self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# The electrum client takes the empty string as unspecified
self.payment_address = self.default('PAYMENT_ADDRESS', '')
self.donation_address = self.default('DONATION_ADDRESS', '')
self.payment_address = payment_address if payment_address is not None else self.default('PAYMENT_ADDRESS', '')
self.donation_address = donation_address if donation_address is not None else self.default('DONATION_ADDRESS', '')
# Server limits to help prevent DoS
self.max_send = self.integer('MAX_SEND', 1000000)
self.max_receive = self.integer('MAX_RECEIVE', 1000000)
self.max_subs = self.integer('MAX_SUBS', 250000)
self.max_sessions = self.sane_max_sessions()
self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
self.drop_client = self.custom("DROP_CLIENT", None, re.compile)
self.description = self.default('DESCRIPTION', '')
self.daily_fee = self.string_amount('DAILY_FEE', '0')
self.max_send = max_send if max_send is not None else self.integer('MAX_SEND', 1000000)
self.max_receive = max_receive if max_receive is not None else self.integer('MAX_RECEIVE', 1000000)
# self.max_subs = self.integer('MAX_SUBS', 250000)
self.max_sessions = max_sessions if max_sessions is not None else self.sane_max_sessions()
# self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
self.session_timeout = session_timeout if session_timeout is not None else self.integer('SESSION_TIMEOUT', 600)
self.drop_client = drop_client if drop_client is not None else self.custom("DROP_CLIENT", None, re.compile)
self.description = description if description is not None else self.default('DESCRIPTION', '')
self.daily_fee = daily_fee if daily_fee is not None else self.string_amount('DAILY_FEE', '0')
# Identities
clearnet_identity = self.clearnet_identity()
@ -102,7 +113,8 @@ class Env:
self.identities = [identity
for identity in (clearnet_identity, tor_identity)
if identity is not None]
self.database_query_timeout = float(self.integer('QUERY_TIMEOUT_MS', 3000)) / 1000.0
self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \
(float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0)
@classmethod
def default(cls, envvar, default):
@ -154,9 +166,9 @@ class Env:
if bad:
raise cls.Error(f'remove obsolete environment variables {bad}')
def set_event_loop_policy(self):
policy_name = self.default('EVENT_LOOP_POLICY', None)
if not policy_name:
@classmethod
def set_event_loop_policy(cls, policy_name: str = None):
if not policy_name or policy_name == 'default':
import asyncio
return asyncio.get_event_loop_policy()
elif policy_name == 'uvloop':
@ -165,7 +177,7 @@ class Env:
loop_policy = uvloop.EventLoopPolicy()
asyncio.set_event_loop_policy(loop_policy)
return loop_policy
raise self.Error(f'unknown event loop policy "{policy_name}"')
raise cls.Error(f'unknown event loop policy "{policy_name}"')
def cs_host(self, *, for_rpc):
"""Returns the 'host' argument to pass to asyncio's create_server
@ -274,3 +286,99 @@ class Env:
def extract_peer_hubs(self):
return [hub.strip() for hub in self.default('PEER_HUBS', '').split(',') if hub.strip()]
@classmethod
def contribute_to_arg_parser(cls, parser):
parser.add_argument('--db_dir', type=str, help='path of the directory containing lbry-leveldb',
default=cls.default('DB_DIRECTORY', None))
parser.add_argument('--daemon_url',
help='URL for rpc from lbrycrd, <rpcuser>:<rpcpassword>@<lbrycrd rpc ip><lbrycrd rpc port>',
default=cls.default('DAEMON_URL', None))
parser.add_argument('--db_max_open_files', type=int, default=512,
help='number of files leveldb can have open at a time')
parser.add_argument('--host', type=str, default=cls.default('HOST', 'localhost'),
help='Interface for hub server to listen on')
parser.add_argument('--tcp_port', type=int, default=cls.integer('TCP_PORT', 50001),
help='TCP port to listen on for hub server')
parser.add_argument('--udp_port', type=int, default=cls.integer('UDP_PORT', 50001),
help='UDP port to listen on for hub server')
parser.add_argument('--rpc_host', default=cls.default('RPC_HOST', 'localhost'), type=str,
help='Listening interface for admin rpc')
parser.add_argument('--rpc_port', default=cls.integer('RPC_PORT', 8000), type=int,
help='Listening port for admin rpc')
parser.add_argument('--websocket_host', default=cls.default('WEBSOCKET_HOST', 'localhost'), type=str,
help='Listening interface for websocket')
parser.add_argument('--websocket_port', default=cls.integer('WEBSOCKET_PORT', None), type=int,
help='Listening port for websocket')
parser.add_argument('--ssl_port', default=cls.integer('SSL_PORT', None), type=int,
help='SSL port to listen on for hub server')
parser.add_argument('--ssl_certfile', default=cls.default('SSL_CERTFILE', None), type=str,
help='Path to SSL cert file')
parser.add_argument('--ssl_keyfile', default=cls.default('SSL_KEYFILE', None), type=str,
help='Path to SSL key file')
parser.add_argument('--reorg_limit', default=cls.integer('REORG_LIMIT', 200), type=int, help='Max reorg depth')
parser.add_argument('--elastic_host', default=cls.default('ELASTIC_HOST', 'localhost'), type=str,
help='elasticsearch host')
parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int,
help='elasticsearch port')
parser.add_argument('--es_mode', default=cls.default('ES_MODE', 'writer'), type=str,
choices=['reader', 'writer'])
parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str)
parser.add_argument('--loop_policy', default=cls.default('EVENT_LOOP_POLICY', 'default'), type=str,
choices=['default', 'uvloop'])
parser.add_argument('--max_query_workers', type=int, default=cls.integer('MAX_QUERY_WORKERS', 4),
help='number of threads used by the request handler to read the database')
parser.add_argument('--cache_MB', type=int, default=cls.integer('CACHE_MB', 1024),
help='size of the leveldb lru cache, in megabytes')
parser.add_argument('--cache_all_tx_hashes', type=bool,
help='Load all tx hashes into memory. This will make address subscriptions and sync, '
'resolve, transaction fetching, and block sync all faster at the expense of higher '
'memory usage')
parser.add_argument('--cache_all_claim_txos', type=bool,
help='Load all claim txos into memory. This will make address subscriptions and sync, '
'resolve, transaction fetching, and block sync all faster at the expense of higher '
'memory usage')
parser.add_argument('--prometheus_port', type=int, default=cls.integer('PROMETHEUS_PORT', 0),
help='port for hub prometheus metrics to listen on, disabled by default')
parser.add_argument('--max_subscriptions', type=int, default=cls.integer('MAX_SUBSCRIPTIONS', 10000),
help='max subscriptions per connection')
parser.add_argument('--banner_file', type=str, default=cls.default('BANNER_FILE', None),
help='path to file containing banner text')
parser.add_argument('--anon_logs', type=bool, default=cls.boolean('ANON_LOGS', False),
help="don't log ip addresses")
parser.add_argument('--allow_lan_udp', type=bool, default=cls.boolean('ALLOW_LAN_UDP', False),
help='reply to hub UDP ping messages from LAN ip addresses')
parser.add_argument('--country', type=str, default=cls.default('COUNTRY', 'US'), help='')
parser.add_argument('--max_send', type=int, default=cls.default('MAX_SEND', 1000000), help='')
parser.add_argument('--max_receive', type=int, default=cls.default('MAX_RECEIVE', 1000000), help='')
parser.add_argument('--max_sessions', type=int, default=cls.default('MAX_SESSIONS', 1000), help='')
parser.add_argument('--session_timeout', type=int, default=cls.default('SESSION_TIMEOUT', 600), help='')
parser.add_argument('--drop_client', type=str, default=cls.default('DROP_CLIENT', None), help='')
parser.add_argument('--description', type=str, default=cls.default('DESCRIPTION', ''), help='')
parser.add_argument('--daily_fee', type=float, default=cls.default('DAILY_FEE', 0.0), help='')
parser.add_argument('--payment_address', type=str, default=cls.default('PAYMENT_ADDRESS', ''), help='')
parser.add_argument('--donation_address', type=str, default=cls.default('DONATION_ADDRESS', ''), help='')
parser.add_argument('--chain', type=str, default=cls.default('NET', 'mainnet'),
help="Which chain to use, default is mainnet")
parser.add_argument('--query_timeout_ms', type=int, default=cls.integer('QUERY_TIMEOUT_MS', 10000),
help="elasticsearch query timeout")
@classmethod
def from_arg_parser(cls, args):
return cls(
db_dir=args.db_dir, daemon_url=args.daemon_url, db_max_open_files=args.db_max_open_files,
host=args.host, rpc_host=args.rpc_host, elastic_host=args.elastic_host, elastic_port=args.elastic_port,
loop_policy=args.loop_policy, max_query_workers=args.max_query_workers, websocket_host=args.websocket_host,
websocket_port=args.websocket_port, chain=args.chain, es_index_prefix=args.es_index_prefix,
es_mode=args.es_mode, cache_MB=args.cache_MB, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port,
udp_port=args.udp_port, ssl_port=args.ssl_port, ssl_certfile=args.ssl_certfile,
ssl_keyfile=args.ssl_keyfile, rpc_port=args.rpc_port, prometheus_port=args.prometheus_port,
max_subscriptions=args.max_subscriptions, banner_file=args.banner_file, anon_logs=args.anon_logs,
log_sessions=None, allow_lan_udp=args.allow_lan_udp,
cache_all_tx_hashes=args.cache_all_tx_hashes, cache_all_claim_txos=args.cache_all_claim_txos,
country=args.country, payment_address=args.payment_address, donation_address=args.donation_address,
max_send=args.max_send, max_receive=args.max_receive, max_sessions=args.max_sessions,
session_timeout=args.session_timeout, drop_client=args.drop_client, description=args.description,
daily_fee=args.daily_fee, database_query_timeout=(args.query_timeout_ms / 1000)
)

View file

@ -36,6 +36,7 @@ _sha512 = hashlib.sha512
_new_hash = hashlib.new
_new_hmac = hmac.new
HASHX_LEN = 11
CLAIM_HASH_LEN = 20
def sha256(x):

View file

@ -1,349 +0,0 @@
# Copyright (c) 2016-2018, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
"""History by script hash (address)."""
import array
import ast
import bisect
import time
from collections import defaultdict
from functools import partial
from lbry.wallet.server import util
from lbry.wallet.server.util import pack_be_uint32, unpack_be_uint32_from, unpack_be_uint16_from
from lbry.wallet.server.hash import hash_to_hex_str, HASHX_LEN
class History:
DB_VERSIONS = [0, 1]
def __init__(self):
self.logger = util.class_logger(__name__, self.__class__.__name__)
# For history compaction
self.max_hist_row_entries = 12500
self.unflushed = defaultdict(partial(array.array, 'I'))
self.unflushed_count = 0
self.db = None
@property
def needs_migration(self):
return self.db_version != max(self.DB_VERSIONS)
def migrate(self):
# 0 -> 1: flush_count from 16 to 32 bits
self.logger.warning("HISTORY MIGRATION IN PROGRESS. Please avoid shutting down before it finishes.")
with self.db.write_batch() as batch:
for key, value in self.db.iterator(prefix=b''):
if len(key) != 13:
continue
flush_id, = unpack_be_uint16_from(key[-2:])
new_key = key[:-2] + pack_be_uint32(flush_id)
batch.put(new_key, value)
self.logger.warning("history migration: new keys added, removing old ones.")
for key, value in self.db.iterator(prefix=b''):
if len(key) == 13:
batch.delete(key)
self.logger.warning("history migration: writing new state.")
self.db_version = 1
self.write_state(batch)
self.logger.warning("history migration: done.")
def open_db(self, db_class, for_sync, utxo_flush_count, compacting):
self.db = db_class('hist', for_sync)
self.read_state()
if self.needs_migration:
self.migrate()
self.clear_excess(utxo_flush_count)
# An incomplete compaction needs to be cancelled otherwise
# restarting it will corrupt the history
if not compacting:
self._cancel_compaction()
return self.flush_count
def close_db(self):
if self.db:
self.db.close()
self.db = None
def read_state(self):
state = self.db.get(b'state\0\0')
if state:
state = ast.literal_eval(state.decode())
if not isinstance(state, dict):
raise RuntimeError('failed reading state from history DB')
self.flush_count = state['flush_count']
self.comp_flush_count = state.get('comp_flush_count', -1)
self.comp_cursor = state.get('comp_cursor', -1)
self.db_version = state.get('db_version', 0)
else:
self.flush_count = 0
self.comp_flush_count = -1
self.comp_cursor = -1
self.db_version = max(self.DB_VERSIONS)
self.logger.info(f'history DB version: {self.db_version}')
if self.db_version not in self.DB_VERSIONS:
msg = f'this software only handles DB versions {self.DB_VERSIONS}'
self.logger.error(msg)
raise RuntimeError(msg)
self.logger.info(f'flush count: {self.flush_count:,d}')
def clear_excess(self, utxo_flush_count):
# < might happen at end of compaction as both DBs cannot be
# updated atomically
if self.flush_count <= utxo_flush_count:
return
self.logger.info('DB shut down uncleanly. Scanning for '
'excess history flushes...')
keys = []
for key, hist in self.db.iterator(prefix=b''):
flush_id, = unpack_be_uint32_from(key[-4:])
if flush_id > utxo_flush_count:
keys.append(key)
self.logger.info(f'deleting {len(keys):,d} history entries')
self.flush_count = utxo_flush_count
with self.db.write_batch() as batch:
for key in keys:
batch.delete(key)
self.write_state(batch)
self.logger.info('deleted excess history entries')
def write_state(self, batch):
"""Write state to the history DB."""
state = {
'flush_count': self.flush_count,
'comp_flush_count': self.comp_flush_count,
'comp_cursor': self.comp_cursor,
'db_version': self.db_version,
}
# History entries are not prefixed; the suffix \0\0 ensures we
# look similar to other entries and aren't interfered with
batch.put(b'state\0\0', repr(state).encode())
def add_unflushed(self, hashXs_by_tx, first_tx_num):
unflushed = self.unflushed
count = 0
for tx_num, hashXs in enumerate(hashXs_by_tx, start=first_tx_num):
hashXs = set(hashXs)
for hashX in hashXs:
unflushed[hashX].append(tx_num)
count += len(hashXs)
self.unflushed_count += count
def unflushed_memsize(self):
return len(self.unflushed) * 180 + self.unflushed_count * 4
def assert_flushed(self):
assert not self.unflushed
def flush(self):
start_time = time.time()
self.flush_count += 1
flush_id = pack_be_uint32(self.flush_count)
unflushed = self.unflushed
with self.db.write_batch() as batch:
for hashX in sorted(unflushed):
key = hashX + flush_id
batch.put(key, unflushed[hashX].tobytes())
self.write_state(batch)
count = len(unflushed)
unflushed.clear()
self.unflushed_count = 0
if self.db.for_sync:
elapsed = time.time() - start_time
self.logger.info(f'flushed history in {elapsed:.1f}s '
f'for {count:,d} addrs')
def backup(self, hashXs, tx_count):
# Not certain this is needed, but it doesn't hurt
self.flush_count += 1
nremoves = 0
bisect_left = bisect.bisect_left
with self.db.write_batch() as batch:
for hashX in sorted(hashXs):
deletes = []
puts = {}
for key, hist in self.db.iterator(prefix=hashX, reverse=True):
a = array.array('I')
a.frombytes(hist)
# Remove all history entries >= tx_count
idx = bisect_left(a, tx_count)
nremoves += len(a) - idx
if idx > 0:
puts[key] = a[:idx].tobytes()
break
deletes.append(key)
for key in deletes:
batch.delete(key)
for key, value in puts.items():
batch.put(key, value)
self.write_state(batch)
self.logger.info(f'backing up removed {nremoves:,d} history entries')
# def get_txnums(self, hashX, limit=1000):
# """Generator that returns an unpruned, sorted list of tx_nums in the
# history of a hashX. Includes both spending and receiving
# transactions. By default yields at most 1000 entries. Set
# limit to None to get them all. """
# limit = util.resolve_limit(limit)
# for key, hist in self.db.iterator(prefix=hashX):
# a = array.array('I')
# a.frombytes(hist)
# for tx_num in a:
# if limit == 0:
# return
# yield tx_num
# limit -= 1
#
# History compaction
#
# comp_cursor is a cursor into compaction progress.
# -1: no compaction in progress
# 0-65535: Compaction in progress; all prefixes < comp_cursor have
# been compacted, and later ones have not.
# 65536: compaction complete in-memory but not flushed
#
# comp_flush_count applies during compaction, and is a flush count
# for history with prefix < comp_cursor. flush_count applies
# to still uncompacted history. It is -1 when no compaction is
# taking place. Key suffixes up to and including comp_flush_count
# are used, so a parallel history flush must first increment this
#
# When compaction is complete and the final flush takes place,
# flush_count is reset to comp_flush_count, and comp_flush_count to -1
def _flush_compaction(self, cursor, write_items, keys_to_delete):
"""Flush a single compaction pass as a batch."""
# Update compaction state
if cursor == 65536:
self.flush_count = self.comp_flush_count
self.comp_cursor = -1
self.comp_flush_count = -1
else:
self.comp_cursor = cursor
# History DB. Flush compacted history and updated state
with self.db.write_batch() as batch:
# Important: delete first! The keyspace may overlap.
for key in keys_to_delete:
batch.delete(key)
for key, value in write_items:
batch.put(key, value)
self.write_state(batch)
def _compact_hashX(self, hashX, hist_map, hist_list,
write_items, keys_to_delete):
"""Compress history for a hashX. hist_list is an ordered list of
the histories to be compressed."""
# History entries (tx numbers) are 4 bytes each. Distribute
# over rows of up to 50KB in size. A fixed row size means
# future compactions will not need to update the first N - 1
# rows.
max_row_size = self.max_hist_row_entries * 4
full_hist = b''.join(hist_list)
nrows = (len(full_hist) + max_row_size - 1) // max_row_size
if nrows > 4:
self.logger.info('hashX {} is large: {:,d} entries across '
'{:,d} rows'
.format(hash_to_hex_str(hashX),
len(full_hist) // 4, nrows))
# Find what history needs to be written, and what keys need to
# be deleted. Start by assuming all keys are to be deleted,
# and then remove those that are the same on-disk as when
# compacted.
write_size = 0
keys_to_delete.update(hist_map)
for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
key = hashX + pack_be_uint32(n)
if hist_map.get(key) == chunk:
keys_to_delete.remove(key)
else:
write_items.append((key, chunk))
write_size += len(chunk)
assert n + 1 == nrows
self.comp_flush_count = max(self.comp_flush_count, n)
return write_size
def _compact_prefix(self, prefix, write_items, keys_to_delete):
"""Compact all history entries for hashXs beginning with the
given prefix. Update keys_to_delete and write."""
prior_hashX = None
hist_map = {}
hist_list = []
key_len = HASHX_LEN + 2
write_size = 0
for key, hist in self.db.iterator(prefix=prefix):
# Ignore non-history entries
if len(key) != key_len:
continue
hashX = key[:-2]
if hashX != prior_hashX and prior_hashX:
write_size += self._compact_hashX(prior_hashX, hist_map,
hist_list, write_items,
keys_to_delete)
hist_map.clear()
hist_list.clear()
prior_hashX = hashX
hist_map[key] = hist
hist_list.append(hist)
if prior_hashX:
write_size += self._compact_hashX(prior_hashX, hist_map, hist_list,
write_items, keys_to_delete)
return write_size
def _compact_history(self, limit):
"""Inner loop of history compaction. Loops until limit bytes have
been processed.
"""
keys_to_delete = set()
write_items = [] # A list of (key, value) pairs
write_size = 0
# Loop over 2-byte prefixes
cursor = self.comp_cursor
while write_size < limit and cursor < (1 << 32):
prefix = pack_be_uint32(cursor)
write_size += self._compact_prefix(prefix, write_items,
keys_to_delete)
cursor += 1
max_rows = self.comp_flush_count + 1
self._flush_compaction(cursor, write_items, keys_to_delete)
self.logger.info('history compaction: wrote {:,d} rows ({:.1f} MB), '
'removed {:,d} rows, largest: {:,d}, {:.1f}% complete'
.format(len(write_items), write_size / 1000000,
len(keys_to_delete), max_rows,
100 * cursor / 65536))
return write_size
def _cancel_compaction(self):
if self.comp_cursor != -1:
self.logger.warning('cancelling in-progress history compaction')
self.comp_flush_count = -1
self.comp_cursor = -1

File diff suppressed because it is too large Load diff

View file

@ -9,15 +9,16 @@
import asyncio
import itertools
import time
from abc import ABC, abstractmethod
import attr
import typing
from typing import Set, Optional, Callable, Awaitable
from collections import defaultdict
from prometheus_client import Histogram
import attr
from lbry.wallet.server.hash import hash_to_hex_str, hex_str_to_hash
from lbry.wallet.server.util import class_logger, chunks
from lbry.wallet.server.leveldb import UTXO
if typing.TYPE_CHECKING:
from lbry.wallet.server.session import LBRYSessionManager
@attr.s(slots=True)
@ -28,6 +29,7 @@ class MemPoolTx:
out_pairs = attr.ib()
fee = attr.ib()
size = attr.ib()
raw_tx = attr.ib()
@attr.s(slots=True)
@ -37,47 +39,6 @@ class MemPoolTxSummary:
has_unconfirmed_inputs = attr.ib()
class MemPoolAPI(ABC):
"""A concrete instance of this class is passed to the MemPool object
and used by it to query DB and blockchain state."""
@abstractmethod
async def height(self):
"""Query bitcoind for its height."""
@abstractmethod
def cached_height(self):
"""Return the height of bitcoind the last time it was queried,
for any reason, without actually querying it.
"""
@abstractmethod
async def mempool_hashes(self):
"""Query bitcoind for the hashes of all transactions in its
mempool, returned as a list."""
@abstractmethod
async def raw_transactions(self, hex_hashes):
"""Query bitcoind for the serialized raw transactions with the given
hashes. Missing transactions are returned as None.
hex_hashes is an iterable of hexadecimal hash strings."""
@abstractmethod
async def lookup_utxos(self, prevouts):
"""Return a list of (hashX, value) pairs each prevout if unspent,
otherwise return None if spent or not found.
prevouts - an iterable of (hash, index) pairs
"""
@abstractmethod
async def on_mempool(self, touched, new_touched, height):
"""Called each time the mempool is synchronized. touched is a set of
hashXs touched since the previous call. height is the
daemon's height at the time the mempool was obtained."""
NAMESPACE = "wallet_server"
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
@ -89,23 +50,14 @@ mempool_process_time_metric = Histogram(
class MemPool:
"""Representation of the daemon's mempool.
coin - a coin class from coins.py
api - an object implementing MemPoolAPI
Updated regularly in caught-up state. Goal is to enable efficient
response to the calls in the external interface. To that end we
maintain the following maps:
tx: tx_hash -> MemPoolTx
hashXs: hashX -> set of all hashes of txs touching the hashX
"""
def __init__(self, coin, api, refresh_secs=1.0, log_status_secs=120.0):
assert isinstance(api, MemPoolAPI)
def __init__(self, coin, daemon, db, state_lock: asyncio.Lock, refresh_secs=1.0, log_status_secs=120.0):
self.coin = coin
self.api = api
self._daemon = daemon
self._db = db
self._touched_mp = {}
self._touched_bp = {}
self._highest_block = -1
self.logger = class_logger(__name__, self.__class__.__name__)
self.txs = {}
self.hashXs = defaultdict(set) # None can be a key
@ -113,10 +65,11 @@ class MemPool:
self.refresh_secs = refresh_secs
self.log_status_secs = log_status_secs
# Prevents mempool refreshes during fee histogram calculation
self.lock = asyncio.Lock()
self.lock = state_lock
self.wakeup = asyncio.Event()
self.mempool_process_time_metric = mempool_process_time_metric
self.notified_mempool_txs = set()
self.notify_sessions: Optional[Callable[[int, Set[bytes], Set[bytes]], Awaitable[None]]] = None
async def _logging(self, synchronized_event):
"""Print regular logs of mempool stats."""
@ -132,40 +85,6 @@ class MemPool:
await asyncio.sleep(self.log_status_secs)
await synchronized_event.wait()
async def _refresh_histogram(self, synchronized_event):
while True:
await synchronized_event.wait()
async with self.lock:
self._update_histogram(100_000)
await asyncio.sleep(self.coin.MEMPOOL_HISTOGRAM_REFRESH_SECS)
def _update_histogram(self, bin_size):
# Build a histogram by fee rate
histogram = defaultdict(int)
for tx in self.txs.values():
histogram[tx.fee // tx.size] += tx.size
# Now compact it. For efficiency, get_fees returns a
# compact histogram with variable bin size. The compact
# histogram is an array of (fee_rate, vsize) values.
# vsize_n is the cumulative virtual size of mempool
# transactions with a fee rate in the interval
# [rate_(n-1), rate_n)], and rate_(n-1) > rate_n.
# Intervals are chosen to create tranches containing at
# least 100kb of transactions
compact = []
cum_size = 0
r = 0 # ?
for fee_rate, size in sorted(histogram.items(), reverse=True):
cum_size += size
if cum_size + r > bin_size:
compact.append((fee_rate, cum_size))
r += cum_size - bin_size
cum_size = 0
bin_size *= 1.1
self.logger.info(f'compact fee histogram: {compact}')
self.cached_compact_histogram = compact
def _accept_transactions(self, tx_map, utxo_map, touched):
"""Accept transactions in tx_map to the mempool if all their inputs
can be found in the existing mempool or a utxo_map from the
@ -223,9 +142,9 @@ class MemPool:
"""Refresh our view of the daemon's mempool."""
while True:
start = time.perf_counter()
height = self.api.cached_height()
hex_hashes = await self.api.mempool_hashes()
if height != await self.api.height():
height = self._daemon.cached_height()
hex_hashes = await self._daemon.mempool_hashes()
if height != await self._daemon.height():
continue
hashes = {hex_str_to_hash(hh) for hh in hex_hashes}
async with self.lock:
@ -237,7 +156,7 @@ class MemPool:
}
synchronized_event.set()
synchronized_event.clear()
await self.api.on_mempool(touched, new_touched, height)
await self.on_mempool(touched, new_touched, height)
duration = time.perf_counter() - start
self.mempool_process_time_metric.observe(duration)
try:
@ -292,8 +211,7 @@ class MemPool:
async def _fetch_and_accept(self, hashes, all_hashes, touched):
"""Fetch a list of mempool transactions."""
hex_hashes_iter = (hash_to_hex_str(hash) for hash in hashes)
raw_txs = await self.api.raw_transactions(hex_hashes_iter)
raw_txs = await self._daemon.getrawtransactions((hash_to_hex_str(hash) for hash in hashes))
to_hashX = self.coin.hashX_from_script
deserializer = self.coin.DESERIALIZER
@ -313,7 +231,7 @@ class MemPool:
txout_pairs = tuple((to_hashX(txout.pk_script), txout.value)
for txout in tx.outputs)
tx_map[hash] = MemPoolTx(txin_pairs, None, txout_pairs,
0, tx_size)
0, tx_size, raw_tx)
# Determine all prevouts not in the mempool, and fetch the
# UTXO information from the database. Failed prevout lookups
@ -323,7 +241,7 @@ class MemPool:
prevouts = tuple(prevout for tx in tx_map.values()
for prevout in tx.prevouts
if prevout[0] not in all_hashes)
utxos = await self.api.lookup_utxos(prevouts)
utxos = await self._db.lookup_utxos(prevouts)
utxo_map = dict(zip(prevouts, utxos))
return self._accept_transactions(tx_map, utxo_map, touched)
@ -407,3 +325,37 @@ class MemPool:
if unspent_inputs:
return -1
return 0
async def _maybe_notify(self, new_touched):
tmp, tbp = self._touched_mp, self._touched_bp
common = set(tmp).intersection(tbp)
if common:
height = max(common)
elif tmp and max(tmp) == self._highest_block:
height = self._highest_block
else:
# Either we are processing a block and waiting for it to
# come in, or we have not yet had a mempool update for the
# new block height
return
touched = tmp.pop(height)
for old in [h for h in tmp if h <= height]:
del tmp[old]
for old in [h for h in tbp if h <= height]:
touched.update(tbp.pop(old))
# print("notify", height, len(touched), len(new_touched))
await self.notify_sessions(height, touched, new_touched)
async def start(self, height, session_manager: 'LBRYSessionManager'):
self._highest_block = height
self.notify_sessions = session_manager._notify_sessions
await self.notify_sessions(height, set(), set())
async def on_mempool(self, touched, new_touched, height):
self._touched_mp[height] = touched
await self._maybe_notify(new_touched)
async def on_block(self, touched, height):
self._touched_bp[height] = touched
self._highest_block = height
await self._maybe_notify(set())

View file

@ -43,10 +43,12 @@ class Merkle:
def __init__(self, hash_func=double_sha256):
self.hash_func = hash_func
def tree_depth(self, hash_count):
return self.branch_length(hash_count) + 1
@staticmethod
def tree_depth(hash_count):
return Merkle.branch_length(hash_count) + 1
def branch_length(self, hash_count):
@staticmethod
def branch_length(hash_count):
"""Return the length of a merkle branch given the number of hashes."""
if not isinstance(hash_count, int):
raise TypeError('hash_count must be an integer')
@ -54,7 +56,8 @@ class Merkle:
raise ValueError('hash_count must be at least 1')
return ceil(log(hash_count, 2))
def branch_and_root(self, hashes, index, length=None):
@staticmethod
def branch_and_root(hashes, index, length=None, hash_func=double_sha256):
"""Return a (merkle branch, merkle_root) pair given hashes, and the
index of one of those hashes.
"""
@ -64,7 +67,7 @@ class Merkle:
# This also asserts hashes is not empty
if not 0 <= index < len(hashes):
raise ValueError(f"index '{index}/{len(hashes)}' out of range")
natural_length = self.branch_length(len(hashes))
natural_length = Merkle.branch_length(len(hashes))
if length is None:
length = natural_length
else:
@ -73,7 +76,6 @@ class Merkle:
if length < natural_length:
raise ValueError('length out of range')
hash_func = self.hash_func
branch = []
for _ in range(length):
if len(hashes) & 1:
@ -85,44 +87,47 @@ class Merkle:
return branch, hashes[0]
def root(self, hashes, length=None):
@staticmethod
def root(hashes, length=None):
"""Return the merkle root of a non-empty iterable of binary hashes."""
branch, root = self.branch_and_root(hashes, 0, length)
branch, root = Merkle.branch_and_root(hashes, 0, length)
return root
def root_from_proof(self, hash, branch, index):
"""Return the merkle root given a hash, a merkle branch to it, and
its index in the hashes array.
# @staticmethod
# def root_from_proof(hash, branch, index, hash_func=double_sha256):
# """Return the merkle root given a hash, a merkle branch to it, and
# its index in the hashes array.
#
# branch is an iterable sorted deepest to shallowest. If the
# returned root is the expected value then the merkle proof is
# verified.
#
# The caller should have confirmed the length of the branch with
# branch_length(). Unfortunately this is not easily done for
# bitcoin transactions as the number of transactions in a block
# is unknown to an SPV client.
# """
# for elt in branch:
# if index & 1:
# hash = hash_func(elt + hash)
# else:
# hash = hash_func(hash + elt)
# index >>= 1
# if index:
# raise ValueError('index out of range for branch')
# return hash
branch is an iterable sorted deepest to shallowest. If the
returned root is the expected value then the merkle proof is
verified.
The caller should have confirmed the length of the branch with
branch_length(). Unfortunately this is not easily done for
bitcoin transactions as the number of transactions in a block
is unknown to an SPV client.
"""
hash_func = self.hash_func
for elt in branch:
if index & 1:
hash = hash_func(elt + hash)
else:
hash = hash_func(hash + elt)
index >>= 1
if index:
raise ValueError('index out of range for branch')
return hash
def level(self, hashes, depth_higher):
@staticmethod
def level(hashes, depth_higher):
"""Return a level of the merkle tree of hashes the given depth
higher than the bottom row of the original tree."""
size = 1 << depth_higher
root = self.root
root = Merkle.root
return [root(hashes[n: n + size], depth_higher)
for n in range(0, len(hashes), size)]
def branch_and_root_from_level(self, level, leaf_hashes, index,
@staticmethod
def branch_and_root_from_level(level, leaf_hashes, index,
depth_higher):
"""Return a (merkle branch, merkle_root) pair when a merkle-tree has a
level cached.
@ -146,10 +151,10 @@ class Merkle:
if not isinstance(leaf_hashes, list):
raise TypeError("leaf_hashes must be a list")
leaf_index = (index >> depth_higher) << depth_higher
leaf_branch, leaf_root = self.branch_and_root(
leaf_branch, leaf_root = Merkle.branch_and_root(
leaf_hashes, index - leaf_index, depth_higher)
index >>= depth_higher
level_branch, root = self.branch_and_root(level, index)
level_branch, root = Merkle.branch_and_root(level, index)
# Check last so that we know index is in-range
if leaf_root != level[index]:
raise ValueError('leaf hashes inconsistent with level')

View file

@ -5,66 +5,13 @@ from concurrent.futures.thread import ThreadPoolExecutor
import typing
import lbry
from lbry.wallet.server.mempool import MemPool, MemPoolAPI
from lbry.wallet.server.mempool import MemPool
from lbry.wallet.server.block_processor import BlockProcessor
from lbry.wallet.server.leveldb import LevelDB
from lbry.wallet.server.session import LBRYSessionManager
from lbry.prometheus import PrometheusServer
class Notifications:
# hashX notifications come from two sources: new blocks and
# mempool refreshes.
#
# A user with a pending transaction is notified after the block it
# gets in is processed. Block processing can take an extended
# time, and the prefetcher might poll the daemon after the mempool
# code in any case. In such cases the transaction will not be in
# the mempool after the mempool refresh. We want to avoid
# notifying clients twice - for the mempool refresh and when the
# block is done. This object handles that logic by deferring
# notifications appropriately.
def __init__(self):
self._touched_mp = {}
self._touched_bp = {}
self.notified_mempool_txs = set()
self._highest_block = -1
async def _maybe_notify(self, new_touched):
tmp, tbp = self._touched_mp, self._touched_bp
common = set(tmp).intersection(tbp)
if common:
height = max(common)
elif tmp and max(tmp) == self._highest_block:
height = self._highest_block
else:
# Either we are processing a block and waiting for it to
# come in, or we have not yet had a mempool update for the
# new block height
return
touched = tmp.pop(height)
for old in [h for h in tmp if h <= height]:
del tmp[old]
for old in [h for h in tbp if h <= height]:
touched.update(tbp.pop(old))
await self.notify(height, touched, new_touched)
async def notify(self, height, touched, new_touched):
pass
async def start(self, height, notify_func):
self._highest_block = height
self.notify = notify_func
await self.notify(height, set(), set())
async def on_mempool(self, touched, new_touched, height):
self._touched_mp[height] = touched
await self._maybe_notify(new_touched)
async def on_block(self, touched, height):
self._touched_bp[height] = touched
self._highest_block = height
await self._maybe_notify(set())
class Server:
def __init__(self, env):
@ -73,26 +20,13 @@ class Server:
self.shutdown_event = asyncio.Event()
self.cancellable_tasks = []
self.notifications = notifications = Notifications()
self.daemon = daemon = env.coin.DAEMON(env.coin, env.daemon_url)
self.db = db = env.coin.DB(env)
self.bp = bp = env.coin.BLOCK_PROCESSOR(env, db, daemon, notifications)
self.db = db = LevelDB(env)
self.bp = bp = BlockProcessor(env, db, daemon, self.shutdown_event)
self.prometheus_server: typing.Optional[PrometheusServer] = None
# Set notifications up to implement the MemPoolAPI
notifications.height = daemon.height
notifications.cached_height = daemon.cached_height
notifications.mempool_hashes = daemon.mempool_hashes
notifications.raw_transactions = daemon.getrawtransactions
notifications.lookup_utxos = db.lookup_utxos
MemPoolAPI.register(Notifications)
self.mempool = mempool = MemPool(env.coin, notifications)
notifications.notified_mempool_txs = self.mempool.notified_mempool_txs
self.session_mgr = env.coin.SESSION_MANAGER(
env, db, bp, daemon, mempool, self.shutdown_event
self.session_mgr = LBRYSessionManager(
env, db, bp, daemon, self.shutdown_event
)
self._indexer_task = None
@ -120,8 +54,8 @@ class Server:
await _start_cancellable(self.bp.fetch_and_process_blocks)
await self.db.populate_header_merkle_cache()
await _start_cancellable(self.mempool.keep_synchronized)
await _start_cancellable(self.session_mgr.serve, self.notifications)
await _start_cancellable(self.bp.mempool.keep_synchronized)
await _start_cancellable(self.session_mgr.serve, self.bp.mempool)
async def stop(self):
for task in reversed(self.cancellable_tasks):
@ -135,7 +69,7 @@ class Server:
def run(self):
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(1)
executor = ThreadPoolExecutor(self.env.max_query_workers, thread_name_prefix='hub-worker')
loop.set_default_executor(executor)
def __exit():

View file

@ -2,8 +2,6 @@ import os
import ssl
import math
import time
import json
import base64
import codecs
import typing
import asyncio
@ -15,32 +13,29 @@ from asyncio import Event, sleep
from collections import defaultdict
from functools import partial
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from elasticsearch import ConnectionTimeout
from prometheus_client import Counter, Info, Histogram, Gauge
import lbry
from lbry.error import TooManyClaimSearchParametersError
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
from lbry.build_info import BUILD, COMMIT_HASH, DOCKER_TAG
from lbry.wallet.server.block_processor import LBRYBlockProcessor
from lbry.wallet.server.db.writer import LBRYLevelDB
from lbry.schema.result import Outputs
from lbry.wallet.server.block_processor import BlockProcessor
from lbry.wallet.server.leveldb import LevelDB
from lbry.wallet.server.websocket import AdminWebSocket
from lbry.wallet.server.metrics import ServerLoadData, APICallMetrics
from lbry.wallet.rpc.framing import NewlineFramer
import lbry.wallet.server.version as VERSION
from lbry.wallet.rpc import (
RPCSession, JSONRPCAutoDetect, JSONRPCConnection,
handler_invocation, RPCError, Request, JSONRPC
handler_invocation, RPCError, Request, JSONRPC, Notification, Batch
)
from lbry.wallet.server import text
from lbry.wallet.server import util
from lbry.wallet.server.hash import sha256, hash_to_hex_str, hex_str_to_hash, HASHX_LEN, Base58Error
from lbry.wallet.server.daemon import DaemonError
if typing.TYPE_CHECKING:
from lbry.wallet.server.env import Env
from lbry.wallet.server.mempool import MemPool
from lbry.wallet.server.daemon import Daemon
BAD_REQUEST = 1
@ -175,14 +170,13 @@ class SessionManager:
namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
)
def __init__(self, env: 'Env', db: LBRYLevelDB, bp: LBRYBlockProcessor, daemon: 'Daemon', mempool: 'MemPool',
shutdown_event: asyncio.Event):
def __init__(self, env: 'Env', db: LevelDB, bp: BlockProcessor, daemon: 'Daemon', shutdown_event: asyncio.Event):
env.max_send = max(350000, env.max_send)
self.env = env
self.db = db
self.bp = bp
self.daemon = daemon
self.mempool = mempool
self.mempool = bp.mempool
self.shutdown_event = shutdown_event
self.logger = util.class_logger(__name__, self.__class__.__name__)
self.servers: typing.Dict[str, asyncio.AbstractServer] = {}
@ -263,17 +257,6 @@ class SessionManager:
await self._start_external_servers()
paused = False
async def _log_sessions(self):
"""Periodically log sessions."""
log_interval = self.env.log_sessions
if log_interval:
while True:
await sleep(log_interval)
data = self._session_data(for_log=True)
for line in text.sessions_lines(data):
self.logger.info(line)
self.logger.info(json.dumps(self._get_info()))
def _group_map(self):
group_map = defaultdict(list)
for session in self.sessions.values():
@ -376,23 +359,6 @@ class SessionManager:
'version': lbry.__version__,
}
def _session_data(self, for_log):
"""Returned to the RPC 'sessions' call."""
now = time.time()
sessions = sorted(self.sessions.values(), key=lambda s: s.start_time)
return [(session.session_id,
session.flags(),
session.peer_address_str(for_log=for_log),
session.client_version,
session.protocol_version_string(),
session.count_pending_items(),
session.txs_sent,
session.sub_count(),
session.recv_count, session.recv_size,
session.send_count, session.send_size,
now - session.start_time)
for session in sessions]
def _group_data(self):
"""Returned to the RPC 'groups' call."""
result = []
@ -537,23 +503,19 @@ class SessionManager:
return lines
async def rpc_sessions(self):
"""Return statistics about connected sessions."""
return self._session_data(for_log=False)
async def rpc_reorg(self, count):
"""Force a reorg of the given number of blocks.
count: number of blocks to reorg
"""
count = non_negative_integer(count)
if not self.bp.force_chain_reorg(count):
raise RPCError(BAD_REQUEST, 'still catching up with daemon')
return f'scheduled a reorg of {count:,d} blocks'
# async def rpc_reorg(self, count):
# """Force a reorg of the given number of blocks.
#
# count: number of blocks to reorg
# """
# count = non_negative_integer(count)
# if not self.bp.force_chain_reorg(count):
# raise RPCError(BAD_REQUEST, 'still catching up with daemon')
# return f'scheduled a reorg of {count:,d} blocks'
# --- External Interface
async def serve(self, notifications, server_listening_event):
async def serve(self, mempool, server_listening_event):
"""Start the RPC server if enabled. When the event is triggered,
start TCP and SSL servers."""
try:
@ -567,7 +529,7 @@ class SessionManager:
if self.env.drop_client is not None:
self.logger.info(f'drop clients matching: {self.env.drop_client.pattern}')
# Start notifications; initialize hsub_results
await notifications.start(self.db.db_height, self._notify_sessions)
await mempool.start(self.db.db_height, self)
await self.start_other()
await self._start_external_servers()
server_listening_event.set()
@ -576,9 +538,12 @@ class SessionManager:
# because we connect to ourself
await asyncio.wait([
self._clear_stale_sessions(),
self._log_sessions(),
self._manage_servers()
])
except Exception as err:
if not isinstance(err, asyncio.CancelledError):
log.exception("hub server died")
raise err
finally:
await self._close_servers(list(self.servers.keys()))
log.warning("disconnect %i sessions", len(self.sessions))
@ -663,19 +628,25 @@ class SessionManager:
for hashX in touched.intersection(self.mempool_statuses.keys()):
self.mempool_statuses.pop(hashX, None)
touched.intersection_update(self.hashx_subscriptions_by_session.keys())
await asyncio.get_event_loop().run_in_executor(
self.bp._chain_executor, touched.intersection_update, self.hashx_subscriptions_by_session.keys()
)
if touched or (height_changed and self.mempool_statuses):
if touched or new_touched or (height_changed and self.mempool_statuses):
notified_hashxs = 0
notified_sessions = 0
session_hashxes_to_notify = defaultdict(list)
to_notify = touched if height_changed else new_touched
for hashX in to_notify:
if hashX not in self.hashx_subscriptions_by_session:
continue
for session_id in self.hashx_subscriptions_by_session[hashX]:
asyncio.create_task(self.sessions[session_id].send_history_notification(hashX))
notified_sessions += 1
notified_hashxs += 1
if notified_sessions:
self.logger.info(f'notified {notified_sessions} sessions/{notified_hashxs:,d} touched addresses')
session_hashxes_to_notify[session_id].append(hashX)
notified_hashxs += 1
for session_id, hashXes in session_hashxes_to_notify.items():
asyncio.create_task(self.sessions[session_id].send_history_notifications(*hashXes))
if session_hashxes_to_notify:
self.logger.info(f'notified {len(session_hashxes_to_notify)} sessions/{notified_hashxs:,d} touched addresses')
def add_session(self, session):
self.sessions[id(session)] = session
@ -746,16 +717,6 @@ class SessionBase(RPCSession):
def toggle_logging(self):
self.log_me = not self.log_me
def flags(self):
"""Status flags."""
status = self.kind[0]
if self.is_closing():
status += 'C'
if self.log_me:
status += 'L'
status += str(self._concurrency.max_concurrent)
return status
def connection_made(self, transport):
"""Handle an incoming client connection."""
super().connection_made(transport)
@ -810,44 +771,32 @@ class LBRYSessionManager(SessionManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.query_executor = None
self.websocket = None
self.metrics = ServerLoadData()
self.metrics_loop = None
# self.metrics = ServerLoadData()
# self.metrics_loop = None
self.running = False
if self.env.websocket_host is not None and self.env.websocket_port is not None:
self.websocket = AdminWebSocket(self)
async def process_metrics(self):
while self.running:
data = self.metrics.to_json_and_reset({
'sessions': self.session_count(),
'height': self.db.db_height,
})
if self.websocket is not None:
self.websocket.send_message(data)
await asyncio.sleep(1)
# async def process_metrics(self):
# while self.running:
# data = self.metrics.to_json_and_reset({
# 'sessions': self.session_count(),
# 'height': self.db.db_height,
# })
# if self.websocket is not None:
# self.websocket.send_message(data)
# await asyncio.sleep(1)
async def start_other(self):
self.running = True
if self.env.max_query_workers is not None and self.env.max_query_workers == 0:
self.query_executor = ThreadPoolExecutor(max_workers=1)
else:
self.query_executor = ProcessPoolExecutor(
max_workers=self.env.max_query_workers or max(os.cpu_count(), 4)
)
if self.websocket is not None:
await self.websocket.start()
if self.env.track_metrics:
self.metrics_loop = asyncio.create_task(self.process_metrics())
async def stop_other(self):
self.running = False
if self.env.track_metrics:
self.metrics_loop.cancel()
if self.websocket is not None:
await self.websocket.stop()
self.query_executor.shutdown()
class LBRYElectrumX(SessionBase):
@ -887,6 +836,8 @@ class LBRYElectrumX(SessionBase):
'blockchain.transaction.get_height': cls.transaction_get_height,
'blockchain.claimtrie.search': cls.claimtrie_search,
'blockchain.claimtrie.resolve': cls.claimtrie_resolve,
'blockchain.claimtrie.getclaimbyid': cls.claimtrie_getclaimbyid,
# 'blockchain.claimtrie.getclaimsbyids': cls.claimtrie_getclaimsbyids,
'blockchain.block.get_server_height': cls.get_server_height,
'mempool.get_fee_histogram': cls.mempool_compact_histogram,
'blockchain.block.headers': cls.block_headers,
@ -915,8 +866,8 @@ class LBRYElectrumX(SessionBase):
self.protocol_tuple = self.PROTOCOL_MIN
self.protocol_string = None
self.daemon = self.session_mgr.daemon
self.bp: LBRYBlockProcessor = self.session_mgr.bp
self.db: LBRYLevelDB = self.bp.db
self.bp: BlockProcessor = self.session_mgr.bp
self.db: LevelDB = self.bp.db
@classmethod
def protocol_min_max_strings(cls):
@ -939,7 +890,7 @@ class LBRYElectrumX(SessionBase):
'donation_address': env.donation_address,
'daily_fee': env.daily_fee,
'hash_function': 'sha256',
'trending_algorithm': env.trending_algorithms[0]
'trending_algorithm': 'fast_ar'
})
async def server_features_async(self):
@ -956,93 +907,152 @@ class LBRYElectrumX(SessionBase):
def sub_count(self):
return len(self.hashX_subs)
async def send_history_notification(self, hashX):
start = time.perf_counter()
alias = self.hashX_subs[hashX]
if len(alias) == 64:
method = 'blockchain.scripthash.subscribe'
else:
method = 'blockchain.address.subscribe'
try:
self.session_mgr.notifications_in_flight_metric.inc()
status = await self.address_status(hashX)
self.session_mgr.address_history_metric.observe(time.perf_counter() - start)
async def send_history_notifications(self, *hashXes: typing.Iterable[bytes]):
notifications = []
for hashX in hashXes:
alias = self.hashX_subs[hashX]
if len(alias) == 64:
method = 'blockchain.scripthash.subscribe'
else:
method = 'blockchain.address.subscribe'
start = time.perf_counter()
await self.send_notification(method, (alias, status))
db_history = await self.session_mgr.limited_history(hashX)
mempool = self.mempool.transaction_summaries(hashX)
status = ''.join(f'{hash_to_hex_str(tx_hash)}:'
f'{height:d}:'
for tx_hash, height in db_history)
status += ''.join(f'{hash_to_hex_str(tx.hash)}:'
f'{-tx.has_unconfirmed_inputs:d}:'
for tx in mempool)
if status:
status = sha256(status.encode()).hex()
else:
status = None
if mempool:
self.session_mgr.mempool_statuses[hashX] = status
else:
self.session_mgr.mempool_statuses.pop(hashX, None)
self.session_mgr.address_history_metric.observe(time.perf_counter() - start)
notifications.append((method, (alias, status)))
start = time.perf_counter()
self.session_mgr.notifications_in_flight_metric.inc()
for method, args in notifications:
self.NOTIFICATION_COUNT.labels(method=method, version=self.client_version).inc()
try:
await self.send_notifications(
Batch([Notification(method, (alias, status)) for (method, (alias, status)) in notifications])
)
self.session_mgr.notifications_sent_metric.observe(time.perf_counter() - start)
finally:
self.session_mgr.notifications_in_flight_metric.dec()
def get_metrics_or_placeholder_for_api(self, query_name):
""" Do not hold on to a reference to the metrics
returned by this method past an `await` or
you may be working with a stale metrics object.
"""
if self.env.track_metrics:
return self.session_mgr.metrics.for_api(query_name)
else:
return APICallMetrics(query_name)
# def get_metrics_or_placeholder_for_api(self, query_name):
# """ Do not hold on to a reference to the metrics
# returned by this method past an `await` or
# you may be working with a stale metrics object.
# """
# if self.env.track_metrics:
# # return self.session_mgr.metrics.for_api(query_name)
# else:
# return APICallMetrics(query_name)
async def run_in_executor(self, query_name, func, kwargs):
start = time.perf_counter()
try:
self.session_mgr.pending_query_metric.inc()
result = await asyncio.get_running_loop().run_in_executor(
self.session_mgr.query_executor, func, kwargs
)
except asyncio.CancelledError:
raise
except Exception:
log.exception("dear devs, please handle this exception better")
metrics = self.get_metrics_or_placeholder_for_api(query_name)
metrics.query_error(start, {})
self.session_mgr.db_error_metric.inc()
raise RPCError(JSONRPC.INTERNAL_ERROR, 'unknown server error')
else:
if self.env.track_metrics:
metrics = self.get_metrics_or_placeholder_for_api(query_name)
(result, metrics_data) = result
metrics.query_response(start, metrics_data)
return base64.b64encode(result).decode()
finally:
self.session_mgr.pending_query_metric.dec()
self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
async def run_and_cache_query(self, query_name, kwargs):
start = time.perf_counter()
if isinstance(kwargs, dict):
kwargs['release_time'] = format_release_time(kwargs.get('release_time'))
try:
self.session_mgr.pending_query_metric.inc()
return await self.db.search_index.session_query(query_name, kwargs)
except ConnectionTimeout:
self.session_mgr.interrupt_count_metric.inc()
raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
finally:
self.session_mgr.pending_query_metric.dec()
self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
# async def run_and_cache_query(self, query_name, kwargs):
# start = time.perf_counter()
# if isinstance(kwargs, dict):
# kwargs['release_time'] = format_release_time(kwargs.get('release_time'))
# try:
# self.session_mgr.pending_query_metric.inc()
# return await self.db.search_index.session_query(query_name, kwargs)
# except ConnectionTimeout:
# self.session_mgr.interrupt_count_metric.inc()
# raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
# finally:
# self.session_mgr.pending_query_metric.dec()
# self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
async def mempool_compact_histogram(self):
return self.mempool.compact_fee_histogram()
async def claimtrie_search(self, **kwargs):
if kwargs:
start = time.perf_counter()
if 'release_time' in kwargs:
release_time = kwargs.pop('release_time')
release_times = release_time if isinstance(release_time, list) else [release_time]
try:
return await self.run_and_cache_query('search', kwargs)
except TooManyClaimSearchParametersError as err:
await asyncio.sleep(2)
self.logger.warning("Got an invalid query from %s, for %s with more than %d elements.",
self.peer_address()[0], err.key, err.limit)
return RPCError(1, str(err))
kwargs['release_time'] = [format_release_time(release_time) for release_time in release_times]
except ValueError:
pass
try:
self.session_mgr.pending_query_metric.inc()
if 'channel' in kwargs:
channel_url = kwargs.pop('channel')
_, channel_claim, _, _ = await self.db.resolve(channel_url)
if not channel_claim or isinstance(channel_claim, (ResolveCensoredError, LookupError, ValueError)):
return Outputs.to_base64([], [], 0, None, None)
kwargs['channel_id'] = channel_claim.claim_hash.hex()
return await self.db.search_index.cached_search(kwargs)
except ConnectionTimeout:
self.session_mgr.interrupt_count_metric.inc()
raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
except TooManyClaimSearchParametersError as err:
await asyncio.sleep(2)
self.logger.warning("Got an invalid query from %s, for %s with more than %d elements.",
self.peer_address()[0], err.key, err.limit)
return RPCError(1, str(err))
finally:
self.session_mgr.pending_query_metric.dec()
self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
async def claimtrie_resolve(self, *urls):
if urls:
count = len(urls)
try:
self.session_mgr.urls_to_resolve_count_metric.inc(count)
return await self.run_and_cache_query('resolve', urls)
finally:
self.session_mgr.resolved_url_count_metric.inc(count)
async def _cached_resolve_url(self, url):
if url not in self.bp.resolve_cache:
self.bp.resolve_cache[url] = await self.loop.run_in_executor(None, self.db._resolve, url)
return self.bp.resolve_cache[url]
async def claimtrie_resolve(self, *urls) -> str:
sorted_urls = tuple(sorted(urls))
self.session_mgr.urls_to_resolve_count_metric.inc(len(sorted_urls))
try:
if sorted_urls in self.bp.resolve_outputs_cache:
return self.bp.resolve_outputs_cache[sorted_urls]
rows, extra = [], []
for url in urls:
if url not in self.bp.resolve_cache:
self.bp.resolve_cache[url] = await self._cached_resolve_url(url)
stream, channel, repost, reposted_channel = self.bp.resolve_cache[url]
if isinstance(channel, ResolveCensoredError):
rows.append(channel)
extra.append(channel.censor_row)
elif isinstance(stream, ResolveCensoredError):
rows.append(stream)
extra.append(stream.censor_row)
elif channel and not stream:
rows.append(channel)
# print("resolved channel", channel.name.decode())
if repost:
extra.append(repost)
if reposted_channel:
extra.append(reposted_channel)
elif stream:
# print("resolved stream", stream.name.decode())
rows.append(stream)
if channel:
# print("and channel", channel.name.decode())
extra.append(channel)
if repost:
extra.append(repost)
if reposted_channel:
extra.append(reposted_channel)
await asyncio.sleep(0)
self.bp.resolve_outputs_cache[sorted_urls] = result = await self.loop.run_in_executor(
None, Outputs.to_base64, rows, extra, 0, None, None
)
return result
finally:
self.session_mgr.resolved_url_count_metric.inc(len(sorted_urls))
async def get_server_height(self):
return self.bp.height
@ -1057,6 +1067,15 @@ class LBRYElectrumX(SessionBase):
return -1
return None
async def claimtrie_getclaimbyid(self, claim_id):
rows = []
extra = []
stream = await self.db.fs_getclaimbyid(claim_id)
if not stream:
stream = LookupError(f"Could not find claim at {claim_id}")
rows.append(stream)
return Outputs.to_base64(rows, extra, 0, None, None)
def assert_tx_hash(self, value):
'''Raise an RPCError if the value is not a valid transaction
hash.'''
@ -1184,9 +1203,11 @@ class LBRYElectrumX(SessionBase):
address: the address to subscribe to"""
if len(addresses) > 1000:
raise RPCError(BAD_REQUEST, f'too many addresses in subscription request: {len(addresses)}')
return [
await self.hashX_subscribe(self.address_to_hashX(address), address) for address in addresses
]
results = []
for address in addresses:
results.append(await self.hashX_subscribe(self.address_to_hashX(address), address))
await asyncio.sleep(0)
return results
async def address_unsubscribe(self, address):
"""Unsubscribe an address.
@ -1435,15 +1456,20 @@ class LBRYElectrumX(SessionBase):
raise RPCError(BAD_REQUEST, f'too many tx hashes in request: {len(tx_hashes)}')
for tx_hash in tx_hashes:
assert_tx_hash(tx_hash)
batch_result = await self.db.fs_transactions(tx_hashes)
batch_result = await self.db.get_transactions_and_merkles(tx_hashes)
needed_merkles = {}
for tx_hash in tx_hashes:
if tx_hash in batch_result and batch_result[tx_hash][0]:
continue
tx_info = await self.daemon_request('getrawtransaction', tx_hash, True)
raw_tx = tx_info['hex']
block_hash = tx_info.get('blockhash')
tx_hash_bytes = bytes.fromhex(tx_hash)[::-1]
mempool_tx = self.mempool.txs.get(tx_hash_bytes, None)
if mempool_tx:
raw_tx, block_hash = mempool_tx.raw_tx.hex(), None
else:
tx_info = await self.daemon_request('getrawtransaction', tx_hash, True)
raw_tx = tx_info['hex']
block_hash = tx_info.get('blockhash')
if block_hash:
block = await self.daemon.deserialised_block(block_hash)
height = block['height']

View file

@ -1,167 +0,0 @@
# Copyright (c) 2016-2017, the ElectrumX authors
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
"""Backend database abstraction."""
import os
from functools import partial
from lbry.wallet.server import util
def db_class(db_dir, name):
"""Returns a DB engine class."""
for db_class in util.subclasses(Storage):
if db_class.__name__.lower() == name.lower():
db_class.import_module()
return partial(db_class, db_dir)
raise RuntimeError(f'unrecognised DB engine "{name}"')
class Storage:
"""Abstract base class of the DB backend abstraction."""
def __init__(self, db_dir, name, for_sync):
self.db_dir = db_dir
self.is_new = not os.path.exists(os.path.join(db_dir, name))
self.for_sync = for_sync or self.is_new
self.open(name, create=self.is_new)
@classmethod
def import_module(cls):
"""Import the DB engine module."""
raise NotImplementedError
def open(self, name, create):
"""Open an existing database or create a new one."""
raise NotImplementedError
def close(self):
"""Close an existing database."""
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def put(self, key, value):
raise NotImplementedError
def write_batch(self):
"""Return a context manager that provides `put` and `delete`.
Changes should only be committed when the context manager
closes without an exception.
"""
raise NotImplementedError
def iterator(self, prefix=b'', reverse=False):
"""Return an iterator that yields (key, value) pairs from the
database sorted by key.
If `prefix` is set, only keys starting with `prefix` will be
included. If `reverse` is True the items are returned in
reverse order.
"""
raise NotImplementedError
class LevelDB(Storage):
"""LevelDB database engine."""
@classmethod
def import_module(cls):
import plyvel
cls.module = plyvel
def open(self, name, create, lru_cache_size=None):
mof = 10000
path = os.path.join(self.db_dir, name)
# Use snappy compression (the default)
self.db = self.module.DB(path, create_if_missing=create, max_open_files=mof)
self.close = self.db.close
self.get = self.db.get
self.put = self.db.put
self.iterator = self.db.iterator
self.write_batch = partial(self.db.write_batch, transaction=True, sync=True)
class RocksDB(Storage):
"""RocksDB database engine."""
@classmethod
def import_module(cls):
import rocksdb
cls.module = rocksdb
def open(self, name, create):
mof = 512 if self.for_sync else 128
path = os.path.join(self.db_dir, name)
# Use snappy compression (the default)
options = self.module.Options(create_if_missing=create,
use_fsync=True,
target_file_size_base=33554432,
max_open_files=mof)
self.db = self.module.DB(path, options)
self.get = self.db.get
self.put = self.db.put
def close(self):
# PyRocksDB doesn't provide a close method; hopefully this is enough
self.db = self.get = self.put = None
import gc
gc.collect()
def write_batch(self):
return RocksDBWriteBatch(self.db)
def iterator(self, prefix=b'', reverse=False):
return RocksDBIterator(self.db, prefix, reverse)
class RocksDBWriteBatch:
"""A write batch for RocksDB."""
def __init__(self, db):
self.batch = RocksDB.module.WriteBatch()
self.db = db
def __enter__(self):
return self.batch
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_val:
self.db.write(self.batch)
class RocksDBIterator:
"""An iterator for RocksDB."""
def __init__(self, db, prefix, reverse):
self.prefix = prefix
if reverse:
self.iterator = reversed(db.iteritems())
nxt_prefix = util.increment_byte_string(prefix)
if nxt_prefix:
self.iterator.seek(nxt_prefix)
try:
next(self.iterator)
except StopIteration:
self.iterator.seek(nxt_prefix)
else:
self.iterator.seek_to_last()
else:
self.iterator = db.iteritems()
self.iterator.seek(prefix)
def __iter__(self):
return self
def __next__(self):
k, v = next(self.iterator)
if not k.startswith(self.prefix):
raise StopIteration
return k, v

View file

@ -1,82 +0,0 @@
import time
from lbry.wallet.server import util
def sessions_lines(data):
"""A generator returning lines for a list of sessions.
data is the return value of rpc_sessions()."""
fmt = ('{:<6} {:<5} {:>17} {:>5} {:>5} {:>5} '
'{:>7} {:>7} {:>7} {:>7} {:>7} {:>9} {:>21}')
yield fmt.format('ID', 'Flags', 'Client', 'Proto',
'Reqs', 'Txs', 'Subs',
'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer')
for (id_, flags, peer, client, proto, reqs, txs_sent, subs,
recv_count, recv_size, send_count, send_size, time) in data:
yield fmt.format(id_, flags, client, proto,
f'{reqs:,d}',
f'{txs_sent:,d}',
f'{subs:,d}',
f'{recv_count:,d}',
'{:,d}'.format(recv_size // 1024),
f'{send_count:,d}',
'{:,d}'.format(send_size // 1024),
util.formatted_time(time, sep=''), peer)
def groups_lines(data):
"""A generator returning lines for a list of groups.
data is the return value of rpc_groups()."""
fmt = ('{:<6} {:>9} {:>9} {:>6} {:>6} {:>8}'
'{:>7} {:>9} {:>7} {:>9}')
yield fmt.format('ID', 'Sessions', 'Bwidth KB', 'Reqs', 'Txs', 'Subs',
'Recv', 'Recv KB', 'Sent', 'Sent KB')
for (id_, session_count, bandwidth, reqs, txs_sent, subs,
recv_count, recv_size, send_count, send_size) in data:
yield fmt.format(id_,
f'{session_count:,d}',
'{:,d}'.format(bandwidth // 1024),
f'{reqs:,d}',
f'{txs_sent:,d}',
f'{subs:,d}',
f'{recv_count:,d}',
'{:,d}'.format(recv_size // 1024),
f'{send_count:,d}',
'{:,d}'.format(send_size // 1024))
def peers_lines(data):
"""A generator returning lines for a list of peers.
data is the return value of rpc_peers()."""
def time_fmt(t):
if not t:
return 'Never'
return util.formatted_time(now - t)
now = time.time()
fmt = ('{:<30} {:<6} {:>5} {:>5} {:<17} {:>4} '
'{:>4} {:>8} {:>11} {:>11} {:>5} {:>20} {:<15}')
yield fmt.format('Host', 'Status', 'TCP', 'SSL', 'Server', 'Min',
'Max', 'Pruning', 'Last Good', 'Last Try',
'Tries', 'Source', 'IP Address')
for item in data:
features = item['features']
hostname = item['host']
host = features['hosts'][hostname]
yield fmt.format(hostname[:30],
item['status'],
host.get('tcp_port') or '',
host.get('ssl_port') or '',
features['server_version'] or 'unknown',
features['protocol_min'],
features['protocol_max'],
features['pruning'] or '',
time_fmt(item['last_good']),
time_fmt(item['last_try']),
item['try_count'],
item['source'][:20],
item['ip_addr'] or '')

View file

@ -26,7 +26,7 @@
# and warranty status of this software.
"""Transaction-related classes and functions."""
import typing
from collections import namedtuple
from lbry.wallet.server.hash import sha256, double_sha256, hash_to_hex_str
@ -41,11 +41,20 @@ ZERO = bytes(32)
MINUS_1 = 4294967295
class Tx(namedtuple("Tx", "version inputs outputs locktime raw")):
"""Class representing a transaction."""
class Tx(typing.NamedTuple):
version: int
inputs: typing.List['TxInput']
outputs: typing.List['TxOutput']
locktime: int
raw: bytes
class TxInput(namedtuple("TxInput", "prev_hash prev_idx script sequence")):
class TxInput(typing.NamedTuple):
prev_hash: bytes
prev_idx: int
script: bytes
sequence: int
"""Class representing a transaction input."""
def __str__(self):
script = self.script.hex()
@ -65,7 +74,9 @@ class TxInput(namedtuple("TxInput", "prev_hash prev_idx script sequence")):
))
class TxOutput(namedtuple("TxOutput", "value pk_script")):
class TxOutput(typing.NamedTuple):
value: int
pk_script: bytes
def serialize(self):
return b''.join((

View file

@ -340,7 +340,7 @@ pack_le_int64 = struct_le_q.pack
pack_le_uint16 = struct_le_H.pack
pack_le_uint32 = struct_le_I.pack
pack_be_uint64 = lambda x: x.to_bytes(8, byteorder='big')
pack_be_uint16 = struct_be_H.pack
pack_be_uint16 = lambda x: x.to_bytes(2, byteorder='big')
pack_be_uint32 = struct_be_I.pack
pack_byte = structB.pack

View file

@ -24,7 +24,7 @@ from lbry.schema.purchase import Purchase
from lbry.schema.support import Support
from .script import InputScript, OutputScript
from .constants import COIN, NULL_HASH32
from .constants import COIN, DUST, NULL_HASH32
from .bcd_data_stream import BCDataStream
from .hash import TXRef, TXRefImmutable
from .util import ReadOnlyList
@ -838,10 +838,10 @@ class Transaction:
)
if payment > cost:
change = payment - cost
if change > cost_of_change:
change_amount = change - cost_of_change
if change_amount > DUST:
change_address = await change_account.change.get_or_create_usable_address()
change_hash160 = change_account.ledger.address_to_hash160(change_address)
change_amount = change - cost_of_change
change_output = Output.pay_pubkey_hash(change_amount, change_hash160)
change_output.is_internal_transfer = True
tx.add_outputs([Output.pay_pubkey_hash(change_amount, change_hash160)])

112
scripts/dht_node.py Normal file
View file

@ -0,0 +1,112 @@
import asyncio
import argparse
import logging
import csv
import os.path
from io import StringIO
from typing import Optional
from aiohttp import web
from prometheus_client import generate_latest as prom_generate_latest
from lbry.dht.constants import generate_id
from lbry.dht.node import Node
from lbry.dht.peer import PeerManager
from lbry.extras.daemon.storage import SQLiteStorage
from lbry.conf import Config
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
log = logging.getLogger(__name__)
class SimpleMetrics:
def __init__(self, port, node):
self.prometheus_port = port
self.dht_node: Node = node
async def handle_metrics_get_request(self, _):
try:
return web.Response(
text=prom_generate_latest().decode(),
content_type='text/plain; version=0.0.4'
)
except Exception:
log.exception('could not generate prometheus data')
raise
async def handle_peers_csv(self, _):
out = StringIO()
writer = csv.DictWriter(out, fieldnames=["ip", "port", "dht_id"])
writer.writeheader()
for peer in self.dht_node.protocol.routing_table.get_peers():
writer.writerow({"ip": peer.address, "port": peer.udp_port, "dht_id": peer.node_id.hex()})
return web.Response(text=out.getvalue(), content_type='text/csv')
async def handle_blobs_csv(self, _):
out = StringIO()
writer = csv.DictWriter(out, fieldnames=["blob_hash"])
writer.writeheader()
for blob in self.dht_node.protocol.data_store.keys():
writer.writerow({"blob_hash": blob.hex()})
return web.Response(text=out.getvalue(), content_type='text/csv')
async def start(self):
prom_app = web.Application()
prom_app.router.add_get('/metrics', self.handle_metrics_get_request)
if self.dht_node:
prom_app.router.add_get('/peers.csv', self.handle_peers_csv)
prom_app.router.add_get('/blobs.csv', self.handle_blobs_csv)
metrics_runner = web.AppRunner(prom_app)
await metrics_runner.setup()
prom_site = web.TCPSite(metrics_runner, "0.0.0.0", self.prometheus_port)
await prom_site.start()
async def main(host: str, port: int, db_file_path: str, bootstrap_node: Optional[str], prometheus_port: int, export: bool):
loop = asyncio.get_event_loop()
conf = Config()
if not db_file_path.startswith(':memory:'):
node_id_file_path = db_file_path + 'node_id'
if os.path.exists(node_id_file_path):
with open(node_id_file_path, 'rb') as node_id_file:
node_id = node_id_file.read()
else:
with open(node_id_file_path, 'wb') as node_id_file:
node_id = generate_id()
node_id_file.write(node_id)
storage = SQLiteStorage(conf, db_file_path, loop, loop.time)
if bootstrap_node:
nodes = bootstrap_node.split(':')
nodes = [(nodes[0], int(nodes[1]))]
else:
nodes = conf.known_dht_nodes
await storage.open()
node = Node(
loop, PeerManager(loop), node_id, port, port, 3333, None,
storage=storage
)
if prometheus_port > 0:
metrics = SimpleMetrics(prometheus_port, node if export else None)
await metrics.start()
node.start(host, nodes)
log.info("Peer with id %s started", node_id.hex())
while True:
await asyncio.sleep(10)
log.info("Known peers: %d. Storing contact information for %d blobs from %d peers.",
len(node.protocol.routing_table.get_peers()), len(node.protocol.data_store),
len(node.protocol.data_store.get_storing_contacts()))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Starts a single DHT node, which then can be used as a seed node or just a contributing node.")
parser.add_argument("--host", default='0.0.0.0', type=str, help="Host to listen for requests. Default: 0.0.0.0")
parser.add_argument("--port", default=4444, type=int, help="Port to listen for requests. Default: 4444")
parser.add_argument("--db_file", default='/tmp/dht.db', type=str, help="DB file to save peers. Default: /tmp/dht.db")
parser.add_argument("--bootstrap_node", default=None, type=str,
help="Node to connect for bootstraping this node. Leave unset to use the default ones. "
"Format: host:port Example: lbrynet1.lbry.com:4444")
parser.add_argument("--metrics_port", default=0, type=int, help="Port for Prometheus metrics. 0 to disable. Default: 0")
parser.add_argument("--enable_csv_export", action='store_true', help="Enable CSV endpoints on metrics server.")
args = parser.parse_args()
asyncio.run(main(args.host, args.port, args.db_file, args.bootstrap_node, args.metrics_port, args.enable_csv_export))

View file

@ -0,0 +1,33 @@
#!/bin/bash
SNAPSHOT_HEIGHT="1049658"
HUB_VOLUME_PATH="/var/lib/docker/volumes/${USER}_wallet_server"
ES_VOLUME_PATH="/var/lib/docker/volumes/${USER}_es01"
SNAPSHOT_TAR_NAME="wallet_server_snapshot_${SNAPSHOT_HEIGHT}.tar"
ES_SNAPSHOT_TAR_NAME="es_snapshot_${SNAPSHOT_HEIGHT}.tar"
SNAPSHOT_URL="https://snapshots.lbry.com/hub/${SNAPSHOT_TAR_NAME}"
ES_SNAPSHOT_URL="https://snapshots.lbry.com/hub/${ES_SNAPSHOT_TAR_NAME}"
echo "fetching wallet server snapshot"
wget $SNAPSHOT_URL
echo "decompressing wallet server snapshot"
tar -xf $SNAPSHOT_TAR_NAME
sudo mkdir -p $HUB_VOLUME_PATH
sudo rm -rf "${HUB_VOLUME_PATH}/_data"
sudo chown -R 999:999 "snapshot_${SNAPSHOT_HEIGHT}"
sudo mv "snapshot_${SNAPSHOT_HEIGHT}" "${HUB_VOLUME_PATH}/_data"
echo "finished setting up wallet server snapshot"
echo "fetching elasticsearch snapshot"
wget $ES_SNAPSHOT_URL
echo "decompressing elasticsearch snapshot"
tar -xf $ES_SNAPSHOT_TAR_NAME
sudo chown -R $USER:root "snapshot_es_${SNAPSHOT_HEIGHT}"
sudo chmod -R 775 "snapshot_es_${SNAPSHOT_HEIGHT}"
sudo mkdir -p $ES_VOLUME_PATH
sudo rm -rf "${ES_VOLUME_PATH}/_data"
sudo mv "snapshot_es_${SNAPSHOT_HEIGHT}" "${ES_VOLUME_PATH}/_data"
echo "finished setting up elasticsearch snapshot"

View file

@ -6,7 +6,8 @@ source =
lbry
.tox/*/lib/python*/site-packages/lbry
omit =
lbry/wallet/orchstr8/*
lbry/wallet/orchstr8/
.tox/*/lib/python*/site-packages/lbry/wallet/orchstr8/node.py
[cryptography.*,coincurve.*,pbkdf2, libtorrent]
ignore_missing_imports = True

View file

@ -9,7 +9,7 @@ with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh:
PLYVEL = []
if sys.platform.startswith('linux'):
PLYVEL.append('plyvel==1.0.5')
PLYVEL.append('plyvel==1.3.0')
setup(
name=__name__,
@ -56,7 +56,8 @@ setup(
'attrs==18.2.0',
'pylru==1.1.0',
'elasticsearch==7.10.1',
'grpcio==1.38.0'
'grpcio==1.38.0',
'filetype==1.0.9'
] + PLYVEL,
extras_require={
'torrent': ['lbry-libtorrent'],

View file

@ -9,7 +9,7 @@ if typing.TYPE_CHECKING:
def get_time_accelerator(loop: asyncio.AbstractEventLoop,
now: typing.Optional[float] = None) -> typing.Callable[[float], typing.Awaitable[None]]:
instant_step: bool = False) -> typing.Callable[[float], typing.Awaitable[None]]:
"""
Returns an async advance() function
@ -17,32 +17,22 @@ def get_time_accelerator(loop: asyncio.AbstractEventLoop,
made by call_later, call_at, and call_soon.
"""
_time = now or loop.time()
loop.time = functools.wraps(loop.time)(lambda: _time)
original = loop.time
_drift = 0
loop.time = functools.wraps(loop.time)(lambda: original() + _drift)
async def accelerate_time(seconds: float) -> None:
nonlocal _time
nonlocal _drift
if seconds < 0:
raise ValueError(f'Cannot go back in time ({seconds} seconds)')
_time += seconds
await past_events()
_drift += seconds
await asyncio.sleep(0)
async def past_events() -> None:
while loop._scheduled:
timer: asyncio.TimerHandle = loop._scheduled[0]
if timer not in loop._ready and timer._when <= _time:
loop._scheduled.remove(timer)
loop._ready.append(timer)
if timer._when > _time:
break
await asyncio.sleep(0)
async def accelerator(seconds: float):
steps = seconds * 10.0
steps = seconds * 10.0 if not instant_step else 1
for _ in range(max(int(steps), 1)):
await accelerate_time(0.1)
await accelerate_time(seconds/steps)
return accelerator

View file

@ -22,8 +22,8 @@ class BlockchainReorganizationTests(CommandTestCase):
self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode())
self.assertEqual(block_hash, (await bp.db.fs_block_hashes(height, 1))[0][::-1].hex())
txids = await asyncio.get_event_loop().run_in_executor(bp.db.executor, get_txids)
txs = await bp.db.fs_transactions(txids)
txids = await asyncio.get_event_loop().run_in_executor(None, get_txids)
txs = await bp.db.get_transactions_and_merkles(txids)
block_txs = (await bp.daemon.deserialised_block(block_hash))['tx']
self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions')
self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order')
@ -57,11 +57,29 @@ class BlockchainReorganizationTests(CommandTestCase):
await self.assertBlockHash(209)
await self.assertBlockHash(210)
await self.assertBlockHash(211)
still_valid = await self.daemon.jsonrpc_stream_create(
'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!')
)
await self.ledger.wait(still_valid)
await self.blockchain.generate(1)
await self.ledger.on_header.where(lambda e: e.height == 212)
claim_id = still_valid.outputs[0].claim_id
c1 = (await self.resolve(f'still-valid#{claim_id}'))['claim_id']
c2 = (await self.resolve(f'still-valid#{claim_id[:2]}'))['claim_id']
c3 = (await self.resolve(f'still-valid'))['claim_id']
self.assertTrue(c1 == c2 == c3)
abandon_tx = await self.daemon.jsonrpc_stream_abandon(claim_id=claim_id)
await self.blockchain.generate(1)
await self.ledger.on_header.where(lambda e: e.height == 213)
c1 = await self.resolve(f'still-valid#{still_valid.outputs[0].claim_id}')
c2 = await self.daemon.jsonrpc_resolve([f'still-valid#{claim_id[:2]}'])
c3 = await self.daemon.jsonrpc_resolve([f'still-valid'])
async def test_reorg_change_claim_height(self):
# sanity check
txos, _, _, _ = await self.ledger.claim_search([], name='hovercraft')
self.assertListEqual(txos, [])
result = await self.resolve('hovercraft') # TODO: do these for claim_search and resolve both
self.assertIn('error', result)
still_valid = await self.daemon.jsonrpc_stream_create(
'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!')
@ -82,17 +100,15 @@ class BlockchainReorganizationTests(CommandTestCase):
self.assertEqual(self.ledger.headers.height, 208)
await self.assertBlockHash(208)
txos, _, _, _ = await self.ledger.claim_search([], name='hovercraft')
self.assertEqual(1, len(txos))
txo = txos[0]
self.assertEqual(txo.tx_ref.id, broadcast_tx.id)
self.assertEqual(txo.tx_ref.height, 208)
claim = await self.resolve('hovercraft')
self.assertEqual(claim['txid'], broadcast_tx.id)
self.assertEqual(claim['height'], 208)
# check that our tx is in block 208 as returned by lbrycrdd
invalidated_block_hash = (await self.ledger.headers.hash(208)).decode()
block_207 = await self.blockchain.get_block(invalidated_block_hash)
self.assertIn(txo.tx_ref.id, block_207['tx'])
self.assertEqual(208, txos[0].tx_ref.height)
self.assertIn(claim['txid'], block_207['tx'])
self.assertEqual(208, claim['height'])
# reorg the last block dropping our claim tx
await self.blockchain.invalidate_block(invalidated_block_hash)
@ -109,11 +125,20 @@ class BlockchainReorganizationTests(CommandTestCase):
reorg_block_hash = await self.blockchain.get_block_hash(208)
self.assertNotEqual(invalidated_block_hash, reorg_block_hash)
block_207 = await self.blockchain.get_block(reorg_block_hash)
self.assertNotIn(txo.tx_ref.id, block_207['tx'])
self.assertNotIn(claim['txid'], block_207['tx'])
client_reorg_block_hash = (await self.ledger.headers.hash(208)).decode()
self.assertEqual(client_reorg_block_hash, reorg_block_hash)
# verify the dropped claim is no longer returned by claim search
self.assertDictEqual(
{'error': {'name': 'NOT_FOUND', 'text': 'Could not find claim at "hovercraft".'}},
await self.resolve('hovercraft')
)
# verify the claim published a block earlier wasn't also reverted
self.assertEqual(207, (await self.resolve('still-valid'))['height'])
# broadcast the claim in a different block
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
self.assertEqual(broadcast_tx.id, new_txid)
@ -123,14 +148,88 @@ class BlockchainReorganizationTests(CommandTestCase):
await asyncio.wait_for(self.on_header(210), 1.0)
# verify the claim is in the new block and that it is returned by claim_search
block_210 = await self.blockchain.get_block((await self.ledger.headers.hash(210)).decode())
self.assertIn(txo.tx_ref.id, block_210['tx'])
txos, _, _, _ = await self.ledger.claim_search([], name='hovercraft')
self.assertEqual(1, len(txos))
self.assertEqual(txos[0].tx_ref.id, new_txid)
self.assertEqual(210, txos[0].tx_ref.height)
republished = await self.resolve('hovercraft')
self.assertEqual(210, republished['height'])
self.assertEqual(claim['claim_id'], republished['claim_id'])
# this should still be unchanged
txos, _, _, _ = await self.ledger.claim_search([], name='still-valid')
self.assertEqual(1, len(txos))
self.assertEqual(207, txos[0].tx_ref.height)
self.assertEqual(207, (await self.resolve('still-valid'))['height'])
async def test_reorg_drop_claim(self):
# sanity check
result = await self.resolve('hovercraft') # TODO: do these for claim_search and resolve both
self.assertIn('error', result)
still_valid = await self.daemon.jsonrpc_stream_create(
'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!')
)
await self.ledger.wait(still_valid)
await self.generate(1)
# create a claim and verify it's returned by claim_search
self.assertEqual(self.ledger.headers.height, 207)
await self.assertBlockHash(207)
broadcast_tx = await self.daemon.jsonrpc_stream_create(
'hovercraft', '1.0', file_path=self.create_upload_file(data=b'hi!')
)
await self.ledger.wait(broadcast_tx)
await self.generate(1)
await self.ledger.wait(broadcast_tx, self.blockchain.block_expected)
self.assertEqual(self.ledger.headers.height, 208)
await self.assertBlockHash(208)
claim = await self.resolve('hovercraft')
self.assertEqual(claim['txid'], broadcast_tx.id)
self.assertEqual(claim['height'], 208)
# check that our tx is in block 208 as returned by lbrycrdd
invalidated_block_hash = (await self.ledger.headers.hash(208)).decode()
block_207 = await self.blockchain.get_block(invalidated_block_hash)
self.assertIn(claim['txid'], block_207['tx'])
self.assertEqual(208, claim['height'])
# reorg the last block dropping our claim tx
await self.blockchain.invalidate_block(invalidated_block_hash)
await self.blockchain.clear_mempool()
await self.blockchain.generate(2)
# wait for the client to catch up and verify the reorg
await asyncio.wait_for(self.on_header(209), 3.0)
await self.assertBlockHash(207)
await self.assertBlockHash(208)
await self.assertBlockHash(209)
# verify the claim was dropped from block 208 as returned by lbrycrdd
reorg_block_hash = await self.blockchain.get_block_hash(208)
self.assertNotEqual(invalidated_block_hash, reorg_block_hash)
block_207 = await self.blockchain.get_block(reorg_block_hash)
self.assertNotIn(claim['txid'], block_207['tx'])
client_reorg_block_hash = (await self.ledger.headers.hash(208)).decode()
self.assertEqual(client_reorg_block_hash, reorg_block_hash)
# verify the dropped claim is no longer returned by claim search
self.assertDictEqual(
{'error': {'name': 'NOT_FOUND', 'text': 'Could not find claim at "hovercraft".'}},
await self.resolve('hovercraft')
)
# verify the claim published a block earlier wasn't also reverted
self.assertEqual(207, (await self.resolve('still-valid'))['height'])
# broadcast the claim in a different block
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
self.assertEqual(broadcast_tx.id, new_txid)
await self.blockchain.generate(1)
# wait for the client to catch up
await asyncio.wait_for(self.on_header(210), 1.0)
# verify the claim is in the new block and that it is returned by claim_search
republished = await self.resolve('hovercraft')
self.assertEqual(210, republished['height'])
self.assertEqual(claim['claim_id'], republished['claim_id'])
# this should still be unchanged
self.assertEqual(207, (await self.resolve('still-valid'))['height'])

View file

@ -33,7 +33,7 @@ class NetworkTests(IntegrationTestCase):
'donation_address': '',
'daily_fee': '0',
'server_version': lbry.__version__,
'trending_algorithm': 'zscore',
'trending_algorithm': 'fast_ar',
}, await self.ledger.network.get_server_features())
# await self.conductor.spv_node.stop()
payment_address, donation_address = await self.account.get_addresses(limit=2)
@ -58,7 +58,7 @@ class NetworkTests(IntegrationTestCase):
'donation_address': donation_address,
'daily_fee': '42',
'server_version': lbry.__version__,
'trending_algorithm': 'zscore',
'trending_algorithm': 'fast_ar',
}, await self.ledger.network.get_server_features())
@ -176,10 +176,19 @@ class UDPServerFailDiscoveryTest(AsyncioTestCase):
class ServerPickingTestCase(AsyncioTestCase):
async def _make_udp_server(self, port):
async def _make_udp_server(self, port, latency) -> StatusServer:
s = StatusServer()
await s.start(0, b'\x00' * 32, '127.0.0.1', port)
await s.start(0, b'\x00' * 32, 'US', '127.0.0.1', port, True)
s.set_available()
sendto = s._protocol.transport.sendto
def mock_sendto(data, addr):
self.loop.call_later(latency, sendto, data, addr)
s._protocol.transport.sendto = mock_sendto
self.addCleanup(s.stop)
return s
async def _make_fake_server(self, latency=1.0, port=1):
# local fake server with artificial latency
@ -191,23 +200,24 @@ class ServerPickingTestCase(AsyncioTestCase):
return {'height': 1}
server = await self.loop.create_server(lambda: FakeSession(), host='127.0.0.1', port=port)
self.addCleanup(server.close)
await self._make_udp_server(port)
await self._make_udp_server(port, latency)
return '127.0.0.1', port
async def _make_bad_server(self, port=42420):
async def echo(reader, writer):
while True:
writer.write(await reader.read())
server = await asyncio.start_server(echo, host='127.0.0.1', port=port)
self.addCleanup(server.close)
await self._make_udp_server(port)
await self._make_udp_server(port, 0)
return '127.0.0.1', port
async def _test_pick_fastest(self):
async def test_pick_fastest(self):
ledger = Mock(config={
'default_servers': [
# fast but unhealthy, should be discarded
await self._make_bad_server(),
# await self._make_bad_server(),
('localhost', 1),
('example.that.doesnt.resolve', 9000),
await self._make_fake_server(latency=1.0, port=1340),
@ -223,7 +233,7 @@ class ServerPickingTestCase(AsyncioTestCase):
await asyncio.wait_for(network.on_connected.first, timeout=10)
self.assertTrue(network.is_connected)
self.assertTupleEqual(network.client.server, ('127.0.0.1', 1337))
self.assertTrue(all([not session.is_closing() for session in network.session_pool.available_sessions]))
# self.assertTrue(all([not session.is_closing() for session in network.session_pool.available_sessions]))
# ensure we are connected to all of them after a while
await asyncio.sleep(1)
self.assertEqual(len(list(network.session_pool.available_sessions)), 3)
# await asyncio.sleep(1)
# self.assertEqual(len(list(network.session_pool.available_sessions)), 3)

View file

@ -1,410 +0,0 @@
import asyncio
import json
import hashlib
from binascii import hexlify, unhexlify
from lbry.testcase import CommandTestCase
from lbry.wallet.transaction import Transaction, Output
from lbry.schema.compat import OldClaimMessage
from lbry.crypto.hash import sha256
from lbry.crypto.base58 import Base58
class BaseResolveTestCase(CommandTestCase):
async def assertResolvesToClaimId(self, name, claim_id):
other = await self.resolve(name)
if claim_id is None:
self.assertIn('error', other)
self.assertEqual(other['error']['name'], 'NOT_FOUND')
else:
self.assertEqual(claim_id, other['claim_id'])
class ResolveCommand(BaseResolveTestCase):
async def test_resolve_response(self):
channel_id = self.get_claim_id(
await self.channel_create('@abc', '0.01')
)
# resolving a channel @abc
response = await self.resolve('lbry://@abc')
self.assertEqual(response['name'], '@abc')
self.assertEqual(response['value_type'], 'channel')
self.assertEqual(response['meta']['claims_in_channel'], 0)
await self.stream_create('foo', '0.01', channel_id=channel_id)
await self.stream_create('foo2', '0.01', channel_id=channel_id)
# resolving a channel @abc with some claims in it
response['confirmations'] += 2
response['meta']['claims_in_channel'] = 2
self.assertEqual(response, await self.resolve('lbry://@abc'))
# resolving claim foo within channel @abc
claim = await self.resolve('lbry://@abc/foo')
self.assertEqual(claim['name'], 'foo')
self.assertEqual(claim['value_type'], 'stream')
self.assertEqual(claim['signing_channel']['name'], '@abc')
self.assertTrue(claim['is_channel_signature_valid'])
self.assertEqual(
claim['timestamp'],
self.ledger.headers.estimated_timestamp(claim['height'])
)
self.assertEqual(
claim['signing_channel']['timestamp'],
self.ledger.headers.estimated_timestamp(claim['signing_channel']['height'])
)
# resolving claim foo by itself
self.assertEqual(claim, await self.resolve('lbry://foo'))
# resolving from the given permanent url
self.assertEqual(claim, await self.resolve(claim['permanent_url']))
# resolving multiple at once
response = await self.out(self.daemon.jsonrpc_resolve(['lbry://foo', 'lbry://foo2']))
self.assertSetEqual({'lbry://foo', 'lbry://foo2'}, set(response))
claim = response['lbry://foo2']
self.assertEqual(claim['name'], 'foo2')
self.assertEqual(claim['value_type'], 'stream')
self.assertEqual(claim['signing_channel']['name'], '@abc')
self.assertTrue(claim['is_channel_signature_valid'])
# resolve has correct confirmations
tx_details = await self.blockchain.get_raw_transaction(claim['txid'])
self.assertEqual(claim['confirmations'], json.loads(tx_details)['confirmations'])
# resolve handles invalid data
await self.blockchain_claim_name("gibberish", hexlify(b"{'invalid':'json'}").decode(), "0.1")
await self.generate(1)
response = await self.out(self.daemon.jsonrpc_resolve("lbry://gibberish"))
self.assertSetEqual({'lbry://gibberish'}, set(response))
claim = response['lbry://gibberish']
self.assertEqual(claim['name'], 'gibberish')
self.assertNotIn('value', claim)
# resolve retries
await self.conductor.spv_node.stop()
resolve_task = asyncio.create_task(self.resolve('foo'))
await self.conductor.spv_node.start(self.conductor.blockchain_node)
self.assertIsNotNone((await resolve_task)['claim_id'])
async def test_winning_by_effective_amount(self):
# first one remains winner unless something else changes
claim_id1 = self.get_claim_id(
await self.channel_create('@foo', allow_duplicate_name=True))
await self.assertResolvesToClaimId('@foo', claim_id1)
claim_id2 = self.get_claim_id(
await self.channel_create('@foo', allow_duplicate_name=True))
await self.assertResolvesToClaimId('@foo', claim_id1)
claim_id3 = self.get_claim_id(
await self.channel_create('@foo', allow_duplicate_name=True))
await self.assertResolvesToClaimId('@foo', claim_id1)
# supports change the winner
await self.support_create(claim_id3, '0.09')
await self.assertResolvesToClaimId('@foo', claim_id3)
await self.support_create(claim_id2, '0.19')
await self.assertResolvesToClaimId('@foo', claim_id2)
await self.support_create(claim_id1, '0.29')
await self.assertResolvesToClaimId('@foo', claim_id1)
async def test_advanced_resolve(self):
claim_id1 = self.get_claim_id(
await self.stream_create('foo', '0.7', allow_duplicate_name=True))
claim_id2 = self.get_claim_id(
await self.stream_create('foo', '0.8', allow_duplicate_name=True))
claim_id3 = self.get_claim_id(
await self.stream_create('foo', '0.9', allow_duplicate_name=True))
# plain winning claim
await self.assertResolvesToClaimId('foo', claim_id3)
# amount order resolution
await self.assertResolvesToClaimId('foo$1', claim_id3)
await self.assertResolvesToClaimId('foo$2', claim_id2)
await self.assertResolvesToClaimId('foo$3', claim_id1)
await self.assertResolvesToClaimId('foo$4', None)
async def test_partial_claim_id_resolve(self):
# add some noise
await self.channel_create('@abc', '0.1', allow_duplicate_name=True)
await self.channel_create('@abc', '0.2', allow_duplicate_name=True)
await self.channel_create('@abc', '1.0', allow_duplicate_name=True)
channel_id = self.get_claim_id(
await self.channel_create('@abc', '1.1', allow_duplicate_name=True))
await self.assertResolvesToClaimId(f'@abc', channel_id)
await self.assertResolvesToClaimId(f'@abc#{channel_id[:10]}', channel_id)
await self.assertResolvesToClaimId(f'@abc#{channel_id}', channel_id)
channel = (await self.claim_search(claim_id=channel_id))[0]
await self.assertResolvesToClaimId(channel['short_url'], channel_id)
await self.assertResolvesToClaimId(channel['canonical_url'], channel_id)
await self.assertResolvesToClaimId(channel['permanent_url'], channel_id)
# add some noise
await self.stream_create('foo', '0.1', allow_duplicate_name=True, channel_id=channel['claim_id'])
await self.stream_create('foo', '0.2', allow_duplicate_name=True, channel_id=channel['claim_id'])
await self.stream_create('foo', '0.3', allow_duplicate_name=True, channel_id=channel['claim_id'])
claim_id1 = self.get_claim_id(
await self.stream_create('foo', '0.7', allow_duplicate_name=True, channel_id=channel['claim_id']))
claim1 = (await self.claim_search(claim_id=claim_id1))[0]
await self.assertResolvesToClaimId('foo', claim_id1)
await self.assertResolvesToClaimId('@abc/foo', claim_id1)
await self.assertResolvesToClaimId(claim1['short_url'], claim_id1)
await self.assertResolvesToClaimId(claim1['canonical_url'], claim_id1)
await self.assertResolvesToClaimId(claim1['permanent_url'], claim_id1)
claim_id2 = self.get_claim_id(
await self.stream_create('foo', '0.8', allow_duplicate_name=True, channel_id=channel['claim_id']))
claim2 = (await self.claim_search(claim_id=claim_id2))[0]
await self.assertResolvesToClaimId('foo', claim_id2)
await self.assertResolvesToClaimId('@abc/foo', claim_id2)
await self.assertResolvesToClaimId(claim2['short_url'], claim_id2)
await self.assertResolvesToClaimId(claim2['canonical_url'], claim_id2)
await self.assertResolvesToClaimId(claim2['permanent_url'], claim_id2)
async def test_abandoned_channel_with_signed_claims(self):
channel = (await self.channel_create('@abc', '1.0'))['outputs'][0]
orphan_claim = await self.stream_create('on-channel-claim', '0.0001', channel_id=channel['claim_id'])
abandoned_channel_id = channel['claim_id']
await self.channel_abandon(txid=channel['txid'], nout=0)
channel = (await self.channel_create('@abc', '1.0'))['outputs'][0]
orphan_claim_id = self.get_claim_id(orphan_claim)
# Original channel doesn't exists anymore, so the signature is invalid. For invalid signatures, resolution is
# only possible outside a channel
self.assertEqual(
{'error': {
'name': 'NOT_FOUND',
'text': 'Could not find claim at "lbry://@abc/on-channel-claim".',
}},
await self.resolve('lbry://@abc/on-channel-claim')
)
response = await self.resolve('lbry://on-channel-claim')
self.assertFalse(response['is_channel_signature_valid'])
self.assertEqual({'channel_id': abandoned_channel_id}, response['signing_channel'])
direct_uri = 'lbry://on-channel-claim#' + orphan_claim_id
response = await self.resolve(direct_uri)
self.assertFalse(response['is_channel_signature_valid'])
self.assertEqual({'channel_id': abandoned_channel_id}, response['signing_channel'])
await self.stream_abandon(claim_id=orphan_claim_id)
uri = 'lbry://@abc/on-channel-claim'
# now, claim something on this channel (it will update the invalid claim, but we save and forcefully restore)
valid_claim = await self.stream_create('on-channel-claim', '0.00000001', channel_id=channel['claim_id'])
# resolves normally
response = await self.resolve(uri)
self.assertTrue(response['is_channel_signature_valid'])
# ooops! claimed a valid conflict! (this happens on the wild, mostly by accident or race condition)
await self.stream_create(
'on-channel-claim', '0.00000001', channel_id=channel['claim_id'], allow_duplicate_name=True
)
# it still resolves! but to the older claim
response = await self.resolve(uri)
self.assertTrue(response['is_channel_signature_valid'])
self.assertEqual(response['txid'], valid_claim['txid'])
claims = await self.claim_search(name='on-channel-claim')
self.assertEqual(2, len(claims))
self.assertEqual(
{channel['claim_id']}, {claim['signing_channel']['claim_id'] for claim in claims}
)
async def test_normalization_resolution(self):
one = 'ΣίσυφοςfiÆ'
two = 'ΣΊΣΥΦΟσFIæ'
_ = await self.stream_create(one, '0.1')
c = await self.stream_create(two, '0.2')
winner_id = self.get_claim_id(c)
r1 = await self.resolve(f'lbry://{one}')
r2 = await self.resolve(f'lbry://{two}')
self.assertEqual(winner_id, r1['claim_id'])
self.assertEqual(winner_id, r2['claim_id'])
async def test_resolve_old_claim(self):
channel = await self.daemon.jsonrpc_channel_create('@olds', '1.0')
await self.confirm_tx(channel.id)
address = channel.outputs[0].get_address(self.account.ledger)
claim = generate_signed_legacy(address, channel.outputs[0])
tx = await Transaction.claim_create('example', claim.SerializeToString(), 1, address, [self.account], self.account)
await tx.sign([self.account])
await self.broadcast(tx)
await self.confirm_tx(tx.id)
response = await self.resolve('@olds/example')
self.assertTrue(response['is_channel_signature_valid'])
claim.publisherSignature.signature = bytes(reversed(claim.publisherSignature.signature))
tx = await Transaction.claim_create(
'bad_example', claim.SerializeToString(), 1, address, [self.account], self.account
)
await tx.sign([self.account])
await self.broadcast(tx)
await self.confirm_tx(tx.id)
response = await self.resolve('bad_example')
self.assertFalse(response['is_channel_signature_valid'])
self.assertEqual(
{'error': {
'name': 'NOT_FOUND',
'text': 'Could not find claim at "@olds/bad_example".',
}},
await self.resolve('@olds/bad_example')
)
async def test_resolve_with_includes(self):
wallet2 = await self.daemon.jsonrpc_wallet_create('wallet2', create_account=True)
address2 = await self.daemon.jsonrpc_address_unused(wallet_id=wallet2.id)
await self.wallet_send('1.0', address2)
stream = await self.stream_create(
'priced', '0.1', wallet_id=wallet2.id,
fee_amount='0.5', fee_currency='LBC', fee_address=address2
)
stream_id = self.get_claim_id(stream)
resolve = await self.resolve('priced')
self.assertNotIn('is_my_output', resolve)
self.assertNotIn('purchase_receipt', resolve)
self.assertNotIn('sent_supports', resolve)
self.assertNotIn('sent_tips', resolve)
self.assertNotIn('received_tips', resolve)
# is_my_output
resolve = await self.resolve('priced', include_is_my_output=True)
self.assertFalse(resolve['is_my_output'])
resolve = await self.resolve('priced', wallet_id=wallet2.id, include_is_my_output=True)
self.assertTrue(resolve['is_my_output'])
# purchase receipt
resolve = await self.resolve('priced', include_purchase_receipt=True)
self.assertNotIn('purchase_receipt', resolve)
await self.purchase_create(stream_id)
resolve = await self.resolve('priced', include_purchase_receipt=True)
self.assertEqual('0.5', resolve['purchase_receipt']['amount'])
# my supports and my tips
resolve = await self.resolve(
'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True
)
self.assertEqual('0.0', resolve['sent_supports'])
self.assertEqual('0.0', resolve['sent_tips'])
self.assertEqual('0.0', resolve['received_tips'])
await self.support_create(stream_id, '0.3')
await self.support_create(stream_id, '0.2')
await self.support_create(stream_id, '0.4', tip=True)
await self.support_create(stream_id, '0.5', tip=True)
resolve = await self.resolve(
'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True
)
self.assertEqual('0.5', resolve['sent_supports'])
self.assertEqual('0.9', resolve['sent_tips'])
self.assertEqual('0.0', resolve['received_tips'])
resolve = await self.resolve(
'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True,
wallet_id=wallet2.id
)
self.assertEqual('0.0', resolve['sent_supports'])
self.assertEqual('0.0', resolve['sent_tips'])
self.assertEqual('0.9', resolve['received_tips'])
self.assertEqual('1.4', resolve['meta']['support_amount'])
# make sure nothing is leaked between wallets through cached tx/txos
resolve = await self.resolve('priced')
self.assertNotIn('is_my_output', resolve)
self.assertNotIn('purchase_receipt', resolve)
self.assertNotIn('sent_supports', resolve)
self.assertNotIn('sent_tips', resolve)
self.assertNotIn('received_tips', resolve)
class ResolveAfterReorg(BaseResolveTestCase):
async def reorg(self, start):
blocks = self.ledger.headers.height - start
self.blockchain.block_expected = start - 1
# go back to start
await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode())
# go to previous + 1
await self.generate(blocks + 2)
async def test_reorg(self):
self.assertEqual(self.ledger.headers.height, 206)
channel_name = '@abc'
channel_id = self.get_claim_id(
await self.channel_create(channel_name, '0.01')
)
self.assertNotIn('error', await self.resolve(channel_name))
await self.reorg(206)
self.assertNotIn('error', await self.resolve(channel_name))
stream_name = 'foo'
stream_id = self.get_claim_id(
await self.stream_create(stream_name, '0.01', channel_id=channel_id)
)
self.assertNotIn('error', await self.resolve(stream_name))
await self.reorg(206)
self.assertNotIn('error', await self.resolve(stream_name))
await self.support_create(stream_id, '0.01')
self.assertNotIn('error', await self.resolve(stream_name))
await self.reorg(206)
self.assertNotIn('error', await self.resolve(stream_name))
await self.stream_abandon(stream_id)
self.assertNotIn('error', await self.resolve(channel_name))
self.assertIn('error', await self.resolve(stream_name))
await self.reorg(206)
self.assertNotIn('error', await self.resolve(channel_name))
self.assertIn('error', await self.resolve(stream_name))
await self.channel_abandon(channel_id)
self.assertIn('error', await self.resolve(channel_name))
self.assertIn('error', await self.resolve(stream_name))
await self.reorg(206)
self.assertIn('error', await self.resolve(channel_name))
self.assertIn('error', await self.resolve(stream_name))
def generate_signed_legacy(address: bytes, output: Output):
decoded_address = Base58.decode(address)
claim = OldClaimMessage()
claim.ParseFromString(unhexlify(
'080110011aee04080112a604080410011a2b4865726520617265203520526561736f6e73204920e29da4e'
'fb88f204e657874636c6f7564207c20544c4722920346696e64206f7574206d6f72652061626f7574204e'
'657874636c6f75643a2068747470733a2f2f6e657874636c6f75642e636f6d2f0a0a596f752063616e206'
'6696e64206d65206f6e20746865736520736f6369616c733a0a202a20466f72756d733a2068747470733a'
'2f2f666f72756d2e6865617679656c656d656e742e696f2f0a202a20506f64636173743a2068747470733'
'a2f2f6f6666746f706963616c2e6e65740a202a2050617472656f6e3a2068747470733a2f2f7061747265'
'6f6e2e636f6d2f7468656c696e757867616d65720a202a204d657263683a2068747470733a2f2f7465657'
'37072696e672e636f6d2f73746f7265732f6f6666696369616c2d6c696e75782d67616d65720a202a2054'
'77697463683a2068747470733a2f2f7477697463682e74762f786f6e64616b0a202a20547769747465723'
'a2068747470733a2f2f747769747465722e636f6d2f7468656c696e757867616d65720a0a2e2e2e0a6874'
'7470733a2f2f7777772e796f75747562652e636f6d2f77617463683f763d4672546442434f535f66632a0'
'f546865204c696e75782047616d6572321c436f7079726967687465642028636f6e746163742061757468'
'6f722938004a2968747470733a2f2f6265726b2e6e696e6a612f7468756d626e61696c732f46725464424'
'34f535f666352005a001a41080110011a30040e8ac6e89c061f982528c23ad33829fd7146435bf7a4cc22'
'f0bff70c4fe0b91fd36da9a375e3e1c171db825bf5d1f32209766964656f2f6d70342a5c080110031a406'
'2b2dd4c45e364030fbfad1a6fefff695ebf20ea33a5381b947753e2a0ca359989a5cc7d15e5392a0d354c'
'0b68498382b2701b22c03beb8dcb91089031b871e72214feb61536c007cdf4faeeaab4876cb397feaf6b51'
))
claim.ClearField("publisherSignature")
digest = sha256(b''.join([
decoded_address,
claim.SerializeToString(),
output.claim_hash[::-1]
]))
signature = output.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
claim.publisherSignature.version = 1
claim.publisherSignature.signatureType = 1
claim.publisherSignature.signature = signature
claim.publisherSignature.certificateId = output.claim_hash[::-1]
return claim

View file

@ -5,7 +5,7 @@ import lbry.wallet
from lbry.error import ServerPaymentFeeAboveMaxAllowedError
from lbry.wallet.network import ClientSession
from lbry.wallet.rpc import RPCError
from lbry.wallet.server.db.elasticsearch.sync import run as run_sync, make_es_index
from lbry.wallet.server.db.elasticsearch.sync import make_es_index_and_run_sync
from lbry.wallet.server.session import LBRYElectrumX
from lbry.testcase import IntegrationTestCase, CommandTestCase
from lbry.wallet.orchstr8.node import SPVNode
@ -95,25 +95,32 @@ class TestESSync(CommandTestCase):
await self.generate(1)
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
db = self.conductor.spv_node.server.db
env = self.conductor.spv_node.server.env
await db.search_index.delete_index()
db.search_index.clear_caches()
self.assertEqual(0, len(await self.claim_search(order_by=['height'])))
await db.search_index.stop()
self.assertTrue(await make_es_index(db.search_index))
async def resync():
await db.search_index.start()
db.search_index.clear_caches()
await run_sync(db.sql._db_path, 1, 0, 0, index_name=db.search_index.index)
await make_es_index_and_run_sync(env, db=db, index_name=db.search_index.index, force=True)
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
self.assertEqual(0, len(await self.claim_search(order_by=['height'])))
await resync()
# this time we will test a migration from unversioned to v1
await db.search_index.sync_client.indices.delete_template(db.search_index.index)
await db.search_index.stop()
self.assertTrue(await make_es_index(db.search_index))
await make_es_index_and_run_sync(env, db=db, index_name=db.search_index.index, force=True)
await db.search_index.start()
await resync()
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
class TestHubDiscovery(CommandTestCase):
@ -192,17 +199,18 @@ class TestHubDiscovery(CommandTestCase):
)
class TestStress(CommandTestCase):
async def test_flush_over_66_thousand(self):
history = self.conductor.spv_node.server.db.history
history.flush_count = 66_000
history.flush()
self.assertEqual(history.flush_count, 66_001)
await self.generate(1)
self.assertEqual(history.flush_count, 66_002)
class TestStressFlush(CommandTestCase):
# async def test_flush_over_66_thousand(self):
# history = self.conductor.spv_node.server.db.history
# history.flush_count = 66_000
# history.flush()
# self.assertEqual(history.flush_count, 66_001)
# await self.generate(1)
# self.assertEqual(history.flush_count, 66_002)
async def test_thousands_claim_ids_on_search(self):
await self.stream_create()
with self.assertRaises(RPCError) as err:
await self.claim_search(not_channel_ids=[("%040x" % i) for i in range(8196)])
self.assertEqual(err.exception.message, 'not_channel_ids cant have more than 2048 items.')
# in the go hub this doesnt have a `.` at the end, in python it does
self.assertTrue(err.exception.message.startswith('not_channel_ids cant have more than 2048 items'))

View file

View file

@ -12,6 +12,7 @@ from lbry.error import InsufficientFundsError
from lbry.extras.daemon.daemon import DEFAULT_PAGE_SIZE
from lbry.testcase import CommandTestCase
from lbry.wallet.orchstr8.node import SPVNode
from lbry.wallet.server.db.common import STREAM_TYPES
from lbry.wallet.transaction import Transaction, Output
from lbry.wallet.util import satoshis_to_coins as lbc
from lbry.crypto.hash import sha256
@ -182,6 +183,9 @@ class ClaimSearchCommand(ClaimTestCase):
claims = [three, two, signed]
await self.assertFindsClaims(claims, channel_ids=[self.channel_id])
await self.assertFindsClaims(claims, channel=f"@abc#{self.channel_id}")
await self.assertFindsClaims(claims, channel=f"@abc#{self.channel_id}", valid_channel_signature=True)
await self.assertFindsClaims(claims, channel=f"@abc#{self.channel_id}", has_channel_signature=True, valid_channel_signature=True)
await self.assertFindsClaims([], channel=f"@abc#{self.channel_id}", has_channel_signature=True, invalid_channel_signature=True) # fixme
await self.assertFindsClaims([], channel=f"@inexistent")
await self.assertFindsClaims([three, two, signed2, signed], channel_ids=[channel_id2, self.channel_id])
await self.channel_abandon(claim_id=self.channel_id)
@ -210,6 +214,10 @@ class ClaimSearchCommand(ClaimTestCase):
await self.assertFindsClaims([three, two], claim_ids=[self.get_claim_id(three), self.get_claim_id(two)])
await self.assertFindsClaims([three], claim_id=self.get_claim_id(three))
await self.assertFindsClaims([three], claim_id=self.get_claim_id(three), text='*')
# resolve by sd hash
two_sd_hash = two['outputs'][0]['value']['source']['sd_hash']
await self.assertFindsClaims([two], sd_hash=two_sd_hash)
await self.assertFindsClaims([two], sd_hash=two_sd_hash[:2])
async def test_source_filter(self):
channel = await self.channel_create('@abc')
@ -224,6 +232,7 @@ class ClaimSearchCommand(ClaimTestCase):
await self.assertListsClaims([channel_repost, no_source_repost, normal_repost, normal], has_source=True)
await self.assertFindsClaims([channel_repost, no_source_repost, normal_repost, normal, no_source, channel])
await self.assertListsClaims([channel_repost, no_source_repost, normal_repost, normal, no_source, channel])
await self.assertFindsClaims([normal_repost, normal], stream_types=list(STREAM_TYPES.keys()))
async def test_pagination(self):
await self.create_channel()
@ -810,10 +819,15 @@ class TransactionOutputCommands(ClaimTestCase):
stream_id = self.get_claim_id(await self.stream_create())
await self.support_create(stream_id, '0.3')
await self.support_create(stream_id, '0.2')
await self.generate(day_blocks)
await self.generate(day_blocks // 2)
await self.stream_update(stream_id)
await self.generate(day_blocks // 2)
await self.support_create(stream_id, '0.4')
await self.support_create(stream_id, '0.5')
await self.generate(day_blocks)
await self.stream_update(stream_id)
await self.generate(day_blocks // 2)
await self.stream_update(stream_id)
await self.generate(day_blocks // 2)
await self.support_create(stream_id, '0.6')
plot = await self.txo_plot(type='support')
@ -1225,6 +1239,8 @@ class ChannelCommands(CommandTestCase):
signature2 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=data_to_sign))
self.assertTrue(verify(channel, unhexlify(data_to_sign), signature1))
self.assertTrue(verify(channel, unhexlify(data_to_sign), signature2))
signature3 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=99))
self.assertTrue(verify(channel, unhexlify('99'), signature3))
async def test_channel_export_import_before_sending_channel(self):
# export
@ -1425,7 +1441,11 @@ class StreamCommands(ClaimTestCase):
self.assertTrue(signed['outputs'][0]['is_channel_signature_valid'])
async def test_repost(self):
await self.channel_create('@goodies', '1.0')
tx = await self.channel_create('@goodies', '1.0')
goodies_claim_id = self.get_claim_id(tx)
tx = await self.channel_create('@spam', '1.0')
spam_claim_id = self.get_claim_id(tx)
tx = await self.stream_create('newstuff', '1.1', channel_name='@goodies', tags=['foo', 'gaming'])
claim_id = self.get_claim_id(tx)
@ -1433,8 +1453,18 @@ class StreamCommands(ClaimTestCase):
self.assertItemCount(await self.daemon.jsonrpc_txo_list(reposted_claim_id=claim_id), 0)
self.assertItemCount(await self.daemon.jsonrpc_txo_list(type='repost'), 0)
tx = await self.stream_repost(claim_id, 'newstuff-again', '1.1')
tx = await self.stream_repost(claim_id, 'newstuff-again', '1.1', channel_name='@spam')
repost_id = self.get_claim_id(tx)
# test inflating reposted channels works
repost_url = f'newstuff-again:{repost_id}'
self.ledger._tx_cache.clear()
self.assertEqual(
goodies_claim_id,
(await self.out(self.daemon.jsonrpc_resolve(repost_url))
)[repost_url]['reposted_claim']['signing_channel']['claim_id']
)
self.assertItemCount(await self.daemon.jsonrpc_claim_list(claim_type='repost'), 1)
self.assertEqual((await self.claim_search(name='newstuff'))[0]['meta']['reposted'], 1)
self.assertEqual((await self.claim_search(reposted_claim_id=claim_id))[0]['claim_id'], repost_id)
@ -1475,6 +1505,11 @@ class StreamCommands(ClaimTestCase):
self.assertEqual(resolved['@reposting-goodies/repost-on-channel'], search)
self.assertEqual(resolved['newstuff-again']['reposted_claim']['name'], 'newstuff')
await self.stream_update(repost_id, bid='0.42')
searched_repost = (await self.claim_search(claim_id=repost_id))[0]
self.assertEqual(searched_repost['amount'], '0.42')
self.assertEqual(searched_repost['signing_channel']['claim_id'], spam_claim_id)
async def test_filtering_channels_for_removing_content(self):
await self.channel_create('@some_channel', '0.1')
await self.stream_create('good_content', '0.1', channel_name='@some_channel', tags=['good'])
@ -1484,12 +1519,10 @@ class StreamCommands(ClaimTestCase):
filtering_channel_id = self.get_claim_id(
await self.channel_create('@filtering', '0.1')
)
self.conductor.spv_node.server.db.sql.filtering_channel_hashes.add(
unhexlify(filtering_channel_id)[::-1]
)
self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.filtered_streams))
self.conductor.spv_node.server.db.filtering_channel_hashes.add(bytes.fromhex(filtering_channel_id))
self.assertEqual(0, len(self.conductor.spv_node.server.db.filtered_streams))
await self.stream_repost(bad_content_id, 'filter1', '0.1', channel_name='@filtering')
self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.filtered_streams))
self.assertEqual(1, len(self.conductor.spv_node.server.db.filtered_streams))
# search for filtered content directly
result = await self.out(self.daemon.jsonrpc_claim_search(name='bad_content'))
@ -1531,12 +1564,16 @@ class StreamCommands(ClaimTestCase):
blocking_channel_id = self.get_claim_id(
await self.channel_create('@blocking', '0.1')
)
self.conductor.spv_node.server.db.sql.blocking_channel_hashes.add(
unhexlify(blocking_channel_id)[::-1]
)
self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.blocked_streams))
# test setting from env vars and starting from scratch
await self.conductor.spv_node.stop(False)
await self.conductor.spv_node.start(self.conductor.blockchain_node,
extraconf={'BLOCKING_CHANNEL_IDS': blocking_channel_id,
'FILTERING_CHANNEL_IDS': filtering_channel_id})
await self.daemon.wallet_manager.reset()
self.assertEqual(0, len(self.conductor.spv_node.server.db.blocked_streams))
await self.stream_repost(bad_content_id, 'block1', '0.1', channel_name='@blocking')
self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.blocked_streams))
self.assertEqual(1, len(self.conductor.spv_node.server.db.blocked_streams))
# blocked content is not resolveable
error = (await self.resolve('lbry://@some_channel/bad_content'))['error']
@ -1559,9 +1596,9 @@ class StreamCommands(ClaimTestCase):
self.assertEqual('@bad_channel', result['items'][1]['name'])
# filter channel out
self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.filtered_channels))
self.assertEqual(0, len(self.conductor.spv_node.server.db.filtered_channels))
await self.stream_repost(bad_channel_id, 'filter2', '0.1', channel_name='@filtering')
self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.filtered_channels))
self.assertEqual(1, len(self.conductor.spv_node.server.db.filtered_channels))
# same claim search as previous now returns 0 results
result = await self.out(self.daemon.jsonrpc_claim_search(any_tags=['bad-stuff'], order_by=['height']))
@ -1586,9 +1623,9 @@ class StreamCommands(ClaimTestCase):
self.assertEqual(worse_content_id, result['claim_id'])
# block channel
self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.blocked_channels))
self.assertEqual(0, len(self.conductor.spv_node.server.db.blocked_channels))
await self.stream_repost(bad_channel_id, 'block2', '0.1', channel_name='@blocking')
self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.blocked_channels))
self.assertEqual(1, len(self.conductor.spv_node.server.db.blocked_channels))
# channel, claim in channel or claim individually no longer resolve
self.assertEqual((await self.resolve('lbry://@bad_channel'))['error']['name'], 'BLOCKED')
@ -1760,6 +1797,24 @@ class StreamCommands(ClaimTestCase):
self.assertItemCount(await self.daemon.jsonrpc_claim_list(account_id=self.account.id), 3)
self.assertItemCount(await self.daemon.jsonrpc_claim_list(account_id=account2_id), 1)
self.assertEqual(3, len(await self.claim_search(release_time='>0', order_by=['release_time'])))
self.assertEqual(3, len(await self.claim_search(release_time='>=0', order_by=['release_time'])))
self.assertEqual(4, len(await self.claim_search(order_by=['release_time'])))
self.assertEqual(3, len(await self.claim_search(claim_type='stream', order_by=['release_time'])))
self.assertEqual(1, len(await self.claim_search(claim_type='channel', order_by=['release_time'])))
self.assertEqual(1, len(await self.claim_search(release_time='>=123456', order_by=['release_time'])))
self.assertEqual(1, len(await self.claim_search(release_time='>123456', order_by=['release_time'])))
self.assertEqual(2, len(await self.claim_search(release_time='<123457', order_by=['release_time'])))
self.assertEqual(2, len(await self.claim_search(release_time=['<123457'], order_by=['release_time'])))
self.assertEqual(2, len(await self.claim_search(release_time=['>0', '<123457'], order_by=['release_time'])))
self.assertEqual(
2, len(await self.claim_search(release_time=['>=123097', '<123457'], order_by=['release_time']))
)
self.assertEqual(
2, len(await self.claim_search(release_time=['<123457', '>0'], order_by=['release_time']))
)
async def test_setting_fee_fields(self):
tx = await self.out(self.stream_create('paid-stream'))
txo = tx['outputs'][0]

View file

@ -4,8 +4,10 @@ import os
from binascii import hexlify
from lbry.schema import Claim
from lbry.stream.background_downloader import BackgroundDownloader
from lbry.stream.descriptor import StreamDescriptor
from lbry.testcase import CommandTestCase
from lbry.extras.daemon.components import TorrentSession
from lbry.extras.daemon.components import TorrentSession, BACKGROUND_DOWNLOADER_COMPONENT
from lbry.wallet import Transaction
@ -69,6 +71,16 @@ class FileCommands(CommandTestCase):
t = await self.stream_create(f'Stream_{i}', '0.00001')
self.stream_claim_ids.append(t['outputs'][0]['claim_id'])
async def test_file_reflect(self):
tx = await self.stream_create('mirror', '0.01')
sd_hash = tx['outputs'][0]['value']['source']['sd_hash']
self.assertEqual([], await self.daemon.jsonrpc_file_reflect(sd_hash=sd_hash))
all_except_sd = [
blob_hash for blob_hash in self.server.blob_manager.completed_blob_hashes if blob_hash != sd_hash
]
await self.reflector.blob_manager.delete_blobs(all_except_sd)
self.assertEqual(all_except_sd, await self.daemon.jsonrpc_file_reflect(sd_hash=sd_hash))
async def test_file_management(self):
await self.stream_create('foo', '0.01')
await self.stream_create('foo2', '0.01')
@ -95,18 +107,12 @@ class FileCommands(CommandTestCase):
self.assertEqual(await self.daemon.storage.get_blobs_to_announce(), [])
await self.stream_create('foo', '0.01')
stream = (await self.daemon.jsonrpc_file_list())["items"][0]
self.assertSetEqual(
set(await self.daemon.storage.get_blobs_to_announce()),
{stream.sd_hash, stream.descriptor.blobs[0].blob_hash}
)
self.assertSetEqual(set(await self.daemon.storage.get_blobs_to_announce()), {stream.sd_hash})
self.assertTrue(await self.daemon.jsonrpc_file_delete(delete_all=True))
# announces on download
self.assertEqual(await self.daemon.storage.get_blobs_to_announce(), [])
stream = await self.daemon.jsonrpc_get('foo')
self.assertSetEqual(
set(await self.daemon.storage.get_blobs_to_announce()),
{stream.sd_hash, stream.descriptor.blobs[0].blob_hash}
)
self.assertSetEqual(set(await self.daemon.storage.get_blobs_to_announce()), {stream.sd_hash})
async def _purge_file(self, claim_name, full_path):
self.assertTrue(
@ -312,10 +318,11 @@ class FileCommands(CommandTestCase):
tx = await self.stream_create('foo', '0.01', data=b'deadbeef' * 1000000)
sd_hash = tx['outputs'][0]['value']['source']['sd_hash']
file_info = (await self.file_list())[0]
await self.daemon.jsonrpc_file_delete(claim_name='foo')
blobs = await self.server_storage.get_blobs_for_stream(
await self.server_storage.get_stream_hash_for_sd_hash(sd_hash)
blobs = await self.daemon.storage.get_blobs_for_stream(
await self.daemon.storage.get_stream_hash_for_sd_hash(sd_hash)
)
await self.daemon.jsonrpc_file_delete(claim_name='foo')
self.assertEqual(5, len(blobs))
all_except_sd_and_head = [
blob.blob_hash for blob in blobs[1:-1]
]
@ -332,10 +339,11 @@ class FileCommands(CommandTestCase):
async def test_incomplete_downloads_retry(self):
tx = await self.stream_create('foo', '0.01', data=b'deadbeef' * 1000000)
sd_hash = tx['outputs'][0]['value']['source']['sd_hash']
await self.daemon.jsonrpc_file_delete(claim_name='foo')
blobs = await self.server_storage.get_blobs_for_stream(
await self.server_storage.get_stream_hash_for_sd_hash(sd_hash)
blobs = await self.daemon.storage.get_blobs_for_stream(
await self.daemon.storage.get_stream_hash_for_sd_hash(sd_hash)
)
self.assertEqual(5, len(blobs))
await self.daemon.jsonrpc_file_delete(claim_name='foo')
all_except_sd_and_head = [
blob.blob_hash for blob in blobs[1:-1]
]
@ -353,7 +361,6 @@ class FileCommands(CommandTestCase):
self.assertNotIn('error', resp)
self.assertItemCount(await self.daemon.jsonrpc_file_list(), 1)
self.assertEqual('running', (await self.file_list())[0]['status'])
await self.daemon.jsonrpc_file_set_status('stop', claim_name='foo')
# recover blobs
for blob_hash in all_except_sd_and_head:
@ -362,7 +369,6 @@ class FileCommands(CommandTestCase):
self.server_blob_manager.blobs.clear()
await self.server_blob_manager.blob_completed(self.server_blob_manager.get_blob(blob_hash))
await self.daemon.jsonrpc_file_set_status('start', claim_name='foo')
await asyncio.wait_for(self.wait_files_to_complete(), timeout=5)
file_info = (await self.file_list())[0]
self.assertEqual(file_info['blobs_completed'], file_info['blobs_in_stream'])
@ -515,16 +521,141 @@ class FileCommands(CommandTestCase):
class DiskSpaceManagement(CommandTestCase):
async def get_referenced_blobs(self, tx):
sd_hash = tx['outputs'][0]['value']['source']['sd_hash']
stream_hash = await self.daemon.storage.get_stream_hash_for_sd_hash(sd_hash)
return tx['outputs'][0]['value']['source']['sd_hash'], set(await self.blob_list(
stream_hash=stream_hash
))
async def test_file_management(self):
status = await self.status()
self.assertIn('disk_space', status)
self.assertEqual('0', status['disk_space']['space_used'])
self.assertEqual(0, status['disk_space']['total_used_mb'])
self.assertEqual(True, status['disk_space']['running'])
await self.stream_create('foo1', '0.01', data=('0' * 3 * 1024 * 1024).encode())
await self.stream_create('foo2', '0.01', data=('0' * 2 * 1024 * 1024).encode())
self.assertEqual('5', (await self.status())['disk_space']['space_used'])
sd_hash1, blobs1 = await self.get_referenced_blobs(
await self.stream_create('foo1', '0.01', data=('0' * 2 * 1024 * 1024).encode())
)
sd_hash2, blobs2 = await self.get_referenced_blobs(
await self.stream_create('foo2', '0.01', data=('0' * 3 * 1024 * 1024).encode())
)
sd_hash3, blobs3 = await self.get_referenced_blobs(
await self.stream_create('foo3', '0.01', data=('0' * 3 * 1024 * 1024).encode())
)
sd_hash4, blobs4 = await self.get_referenced_blobs(
await self.stream_create('foo4', '0.01', data=('0' * 2 * 1024 * 1024).encode())
)
await self.daemon.storage.update_blob_ownership(sd_hash1, False)
await self.daemon.storage.update_blob_ownership(sd_hash3, False)
await self.daemon.storage.update_blob_ownership(sd_hash4, False)
await self.blob_clean() # just to refresh caches, has no effect
self.assertEqual(7, (await self.status())['disk_space']['content_blobs_storage_used_mb'])
self.assertEqual(10, (await self.status())['disk_space']['total_used_mb'])
self.assertEqual(blobs1 | blobs2 | blobs3 | blobs4, set(await self.blob_list()))
await self.blob_clean()
self.assertEqual('5', (await self.status())['disk_space']['space_used'])
self.daemon.conf.blob_storage_limit = 3
self.assertEqual(10, (await self.status())['disk_space']['total_used_mb'])
self.assertEqual(7, (await self.status())['disk_space']['content_blobs_storage_used_mb'])
self.assertEqual(3, (await self.status())['disk_space']['published_blobs_storage_used_mb'])
self.assertEqual(blobs1 | blobs2 | blobs3 | blobs4, set(await self.blob_list()))
self.daemon.conf.blob_storage_limit = 6
await self.blob_clean()
self.assertEqual('3', (await self.status())['disk_space']['space_used'])
self.assertEqual(5, (await self.status())['disk_space']['total_used_mb'])
self.assertEqual(2, (await self.status())['disk_space']['content_blobs_storage_used_mb'])
self.assertEqual(3, (await self.status())['disk_space']['published_blobs_storage_used_mb'])
blobs = set(await self.blob_list())
self.assertFalse(blobs1.issubset(blobs))
self.assertTrue(blobs2.issubset(blobs))
self.assertFalse(blobs3.issubset(blobs))
self.assertTrue(blobs4.issubset(blobs))
# check that added_on gets set on downloads (was a bug)
self.assertLess(0, await self.daemon.storage.run_and_return_one_or_none("select min(added_on) from blob"))
await self.daemon.jsonrpc_file_delete(delete_all=True)
await self.daemon.jsonrpc_get("foo4", save_file=False)
self.assertLess(0, await self.daemon.storage.run_and_return_one_or_none("select min(added_on) from blob"))
class TestBackgroundDownloaderComponent(CommandTestCase):
async def get_blobs_from_sd_blob(self, sd_blob):
descriptor = await StreamDescriptor.from_stream_descriptor_blob(
asyncio.get_running_loop(), self.daemon.blob_manager.blob_dir, sd_blob
)
return descriptor.blobs
async def assertBlobs(self, *sd_hashes, no_files=True):
# checks that we have ony the finished blobs needed for the the referenced streams
seen = set(sd_hashes)
for sd_hash in sd_hashes:
sd_blob = self.daemon.blob_manager.get_blob(sd_hash)
self.assertTrue(sd_blob.get_is_verified())
blobs = await self.get_blobs_from_sd_blob(sd_blob)
for blob in blobs[:-1]:
self.assertTrue(self.daemon.blob_manager.get_blob(blob.blob_hash).get_is_verified())
seen.update(blob.blob_hash for blob in blobs if blob.blob_hash)
if no_files:
self.assertEqual(seen, self.daemon.blob_manager.completed_blob_hashes)
self.assertEqual(0, len(await self.file_list()))
async def clear(self):
await self.daemon.jsonrpc_file_delete(delete_all=True)
self.assertEqual(0, len(await self.file_list()))
await self.daemon.blob_manager.delete_blobs(list(self.daemon.blob_manager.completed_blob_hashes), True)
self.assertEqual(0, len((await self.daemon.jsonrpc_blob_list())['items']))
async def test_download(self):
content1 = await self.stream_create('content1', '0.01', data=bytes([0] * 32 * 1024 * 1024))
content1 = content1['outputs'][0]['value']['source']['sd_hash']
content2 = await self.stream_create('content2', '0.01', data=bytes([0] * 16 * 1024 * 1024))
content2 = content2['outputs'][0]['value']['source']['sd_hash']
self.assertEqual(48, (await self.status())['disk_space']['published_blobs_storage_used_mb'])
self.assertEqual(0, (await self.status())['disk_space']['content_blobs_storage_used_mb'])
background_downloader = BackgroundDownloader(self.daemon.conf, self.daemon.storage, self.daemon.blob_manager)
self.daemon.conf.network_storage_limit = 32
await self.clear()
await self.blob_clean()
self.assertEqual(0, (await self.status())['disk_space']['total_used_mb'])
await background_downloader.download_blobs(content1)
await self.assertBlobs(content1)
await self.blob_clean()
self.assertEqual(0, (await self.status())['disk_space']['content_blobs_storage_used_mb'])
self.assertEqual(32, (await self.status())['disk_space']['seed_blobs_storage_used_mb'])
self.daemon.conf.network_storage_limit = 48
await background_downloader.download_blobs(content2)
await self.assertBlobs(content1, content2)
await self.blob_clean()
self.assertEqual(0, (await self.status())['disk_space']['content_blobs_storage_used_mb'])
self.assertEqual(48, (await self.status())['disk_space']['seed_blobs_storage_used_mb'])
await self.clear()
await background_downloader.download_blobs(content2)
await self.assertBlobs(content2)
await self.blob_clean()
self.assertEqual(0, (await self.status())['disk_space']['content_blobs_storage_used_mb'])
self.assertEqual(16, (await self.status())['disk_space']['seed_blobs_storage_used_mb'])
# tests that an attempt to download something that isn't a sd blob will download the single blob and stop
blobs = await self.get_blobs_from_sd_blob(self.reflector.blob_manager.get_blob(content1))
await self.clear()
await background_downloader.download_blobs(blobs[0].blob_hash)
self.assertEqual({blobs[0].blob_hash}, self.daemon.blob_manager.completed_blob_hashes)
# test that disk space manager doesn't delete orphan network blobs
await background_downloader.download_blobs(content1)
await self.daemon.storage.db.execute_fetchall("update blob set added_on=0") # so it is preferred for cleaning
await self.daemon.jsonrpc_get("content2", save_file=False)
while (await self.file_list())[0]['status'] != 'stopped':
await asyncio.sleep(0.5)
await self.assertBlobs(content1, no_files=False)
self.daemon.conf.blob_storage_limit = 1
await self.blob_clean()
await self.assertBlobs(content1, no_files=False)
self.daemon.conf.network_storage_limit = 0
await self.blob_clean()
self.assertEqual(0, (await self.status())['disk_space']['seed_blobs_storage_used_mb'])

View file

@ -199,5 +199,6 @@ class EpicAdventuresOfChris45(CommandTestCase):
# He closes and opens the wallet server databases to see how horribly they break
db = self.conductor.spv_node.server.db
db.close()
await db.open_for_serving()
db.open_db()
await db.initialize_caches()
# They didn't! (error would be AssertionError: 276 vs 266 (264 counts) on startup)

Some files were not shown because too many files have changed in this diff Show more