forked from LBRYCommunity/lbry-sdk
Compare commits
796 commits
no_determi
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
eb5da9511e | ||
|
8722ef840e | ||
|
6e75a1a89b | ||
|
ef3189de1d | ||
|
c2d2080034 | ||
|
d0b5a0a8fd | ||
|
1d0e17be21 | ||
|
4ef03bb1f4 | ||
|
4bd4bcdc27 | ||
|
e5ca967fa2 | ||
|
eed7d02e8b | ||
|
02aecad52b | ||
|
585962d930 | ||
|
ea4fba39a6 | ||
|
7a86406746 | ||
|
c8a3eb97a4 | ||
|
20213628d7 | ||
|
2d1649f972 | ||
|
5cb04b86a0 | ||
|
93ab6b3be3 | ||
|
b9762c3e64 | ||
|
82592d00ef | ||
|
c118174c1a | ||
|
d284acd8b8 | ||
|
235c98372d | ||
|
d2f5073ef4 | ||
|
84e5e43117 | ||
|
7bd025ae54 | ||
|
8f28ce65b0 | ||
|
d36e305129 | ||
|
2609dee8fb | ||
|
a2da86d4b5 | ||
|
aa16c7fee5 | ||
|
3266f72b82 | ||
|
77cd2a3f8a | ||
|
308e586e9a | ||
84beddfd77 | |||
|
6258651650 | ||
|
cc5f0b6630 | ||
|
f64d507d39 | ||
|
001819d5c2 | ||
|
8b4c046d28 | ||
|
2c20ad6c43 | ||
|
9e610cc54c | ||
|
b9d25c6d01 | ||
|
419b5b45f2 | ||
|
516c2dd5d0 | ||
|
b99102f9c9 | ||
|
8c6c7b655c | ||
|
48c6873fc4 | ||
|
15dc52bd9a | ||
|
52d555078f | ||
|
cc976bd010 | ||
|
9cc6992011 | ||
|
a1b87460c5 | ||
|
007e1115c4 | ||
|
20ae51b949 | ||
|
24e9c7b435 | ||
|
b452e76e1d | ||
|
341834c30d | ||
|
12bac730bd | ||
|
1027337833 | ||
|
97fef21f75 | ||
|
9dafd5f69b | ||
|
fd4f0b2049 | ||
|
734f0651a4 | ||
|
94deaf55df | ||
|
d957d46f96 | ||
|
0217aede3d | ||
|
e4e1600f51 | ||
|
d0aad8ccaf | ||
|
ab50cfa5c1 | ||
|
5a26aea398 | ||
|
bd1cebdb4c | ||
|
ec433f069f | ||
|
cd6d3fec9c | ||
|
8c474a69de | ||
|
8903056648 | ||
|
749a92d0e5 | ||
|
a7d7efecc7 | ||
|
c88f0797a3 | ||
|
137ebd503d | ||
|
c3f5dd780e | ||
|
20b1865879 | ||
|
231b982422 | ||
|
fd69401791 | ||
|
718d046833 | ||
|
e10f57d1ed | ||
|
8a033d58df | ||
|
c07c369a28 | ||
|
5be990fc55 | ||
|
8f26010c04 | ||
|
3021962e3d | ||
|
84d89ce5af | ||
|
0961cad716 | ||
|
5c543cb374 | ||
|
f78d7896a5 | ||
|
78a28de2aa | ||
|
45a255e7a2 | ||
|
d2738c2e72 | ||
|
a7c7ab7f7b | ||
|
988f288715 | ||
|
38e9b5b432 | ||
|
f7455600cc | ||
|
c7c2d6fe5a | ||
|
c6c0228970 | ||
|
8d9d2c76ae | ||
|
0b059a5445 | ||
|
ab67f417ee | ||
|
0e7a1aee0a | ||
|
d0497cf6b5 | ||
|
c38573d5de | ||
|
f077e56cec | ||
|
5e58c2f224 | ||
|
cc64789e96 | ||
|
b5c390ca04 | ||
|
da2ffb000e | ||
|
df77392fe0 | ||
|
9aa9ecdc0a | ||
|
43b45a939b | ||
|
e2922a434f | ||
|
0d6125de0b | ||
|
13af7800c2 | ||
|
47a5d37d7c | ||
|
4a3a7e318d | ||
|
85ff487af5 | ||
|
62eb9d5c75 | ||
|
cfe5c8de8a | ||
|
0497698c5b | ||
|
508bdb8e94 | ||
|
cd42f0d726 | ||
|
2706b66a92 | ||
|
29c2d5715d | ||
|
965389b759 | ||
|
174439f517 | ||
|
baf422fc03 | ||
|
61f7fbe230 | ||
|
c6c27925b7 | ||
|
be4c62cf32 | ||
|
443a1c32fa | ||
|
90c2a58470 | ||
|
adc79ec404 | ||
|
137d8ca4ac | ||
|
abf4d888af | ||
|
6c350e57dd | ||
|
fb7a93096e | ||
|
7ea88e7b31 | ||
|
2361e34541 | ||
|
be06378437 | ||
|
a334a93757 | ||
|
e3ee3892b2 | ||
|
d61accea1a | ||
|
e887453aa5 | ||
|
c3e4f0b988 | ||
|
318728aebd | ||
|
d8c1aaebc2 | ||
|
d7b65c15d2 | ||
|
972db80246 | ||
|
0d343ecb2f | ||
|
01cd95fe46 | ||
|
6dc57fc02c | ||
|
10df0c1fba | ||
|
ec751e5add | ||
|
3e3974f813 | ||
|
ec82486e15 | ||
|
e16f6b07b8 | ||
|
9a842c273b | ||
|
40f7d3ee4b | ||
|
1dc2f0458b | ||
|
3924b28cc3 | ||
|
020487b6a0 | ||
|
14037c9b2f | ||
|
0cb37a5c4b | ||
|
fa5f3e7e55 | ||
|
30aa0724ec | ||
|
059890e4e5 | ||
|
9654d4f003 | ||
|
956b52a2c1 | ||
|
2e975c8b61 | ||
|
656e299100 | ||
|
352e45b6b7 | ||
|
a9a1076362 | ||
|
6d370b0a12 | ||
|
c9fac27b66 | ||
|
59bc0b9682 | ||
|
ba60aeeebc | ||
|
dc427ecf6c | ||
|
87b4404767 | ||
|
ba9ac489c3 | ||
|
7049629ad7 | ||
|
3ae4aeea47 | ||
|
8becf1f69f | ||
|
582f79ba1c | ||
|
3c28d869f4 | ||
|
fe61b90610 | ||
|
c04fbb2908 | ||
|
571e71b28e | ||
|
39fcfcccfb | ||
|
2313d30996 | ||
|
ac7e94c6ed | ||
|
a391fe9fc7 | ||
|
ea8adc5367 | ||
|
0ea8ba72dd | ||
|
7a8d5da0e8 | ||
|
da30f003e8 | ||
|
6257948ad7 | ||
|
a7f606d62c | ||
|
1d95eb1549 | ||
|
e5e9873f79 | ||
|
530f9c72ea | ||
|
fad84c771c | ||
|
fe07aac79c | ||
|
91a6eae831 | ||
|
5852fcd287 | ||
|
4767bb9dee | ||
|
82d7f81f41 | ||
|
b036961954 | ||
|
5c708e1c6f | ||
|
9436600267 | ||
|
4ab29c4d5f | ||
|
6944c4a7c4 | ||
|
2735484fae | ||
|
03b0d5e250 | ||
|
629812337b | ||
|
e54cc8850c | ||
|
7cba51ca7d | ||
|
3dc145fe68 | ||
|
7d560df9fd | ||
|
b3f894e480 | ||
|
235cc5dc05 | ||
|
c276053301 | ||
|
2e85e29ef1 | ||
|
1169a02c8b | ||
|
a7cea4082e | ||
|
7e6ea97499 | ||
|
3c46cc4fdd | ||
|
6e5c7a1927 | ||
|
4e09b35012 | ||
|
16a2023bbd | ||
|
99fc7178c1 | ||
|
d4aca89a48 | ||
|
2918d8c7b4 | ||
|
407c570f8b | ||
|
e299a9c159 | ||
|
cc4a578578 | ||
|
0e4f1eae5b | ||
|
eccf0e6234 | ||
|
a3da041412 | ||
|
2f1617eee4 | ||
|
05124d41ae | ||
|
42fd1c962e | ||
|
47e432b4bb | ||
|
61c99abcf1 | ||
|
28fdd62945 | ||
|
3855db6c66 | ||
|
30acde0afc | ||
|
2d9c5742c7 | ||
|
43e50f7f04 | ||
|
888e9918a6 | ||
|
9e9a64d989 | ||
|
7acaecaed2 | ||
|
24eb189b7f | ||
|
2344aca146 | ||
|
758f9deafe | ||
|
7b425eb2ac | ||
|
30e8728f7f | ||
|
3989eef84b | ||
|
dc6f8c4fc4 | ||
|
2df8a1d99d | ||
|
4ea858fdd3 | ||
|
006391dd26 | ||
|
4a0bf8a702 | ||
|
d0e715feb9 | ||
|
fd73412f12 | ||
|
3819552861 | ||
|
ca6fd5b7b9 | ||
|
b8867cd18c | ||
|
8209eafc6b | ||
|
858e72a555 | ||
|
d3880fffa0 | ||
|
0a51898722 | ||
|
63cef81015 | ||
|
9279865078 | ||
|
fba7fc7aba | ||
|
a3d9d5bce7 | ||
|
23ecbc8ebe | ||
|
42b2dbd92e | ||
|
37eb55375a | ||
|
94bf357817 | ||
|
eca69391ef | ||
|
d0c5b32a90 | ||
|
84ef52cf4d | ||
|
8fb14bf713 | ||
|
16eb50a291 | ||
|
dd503fbb82 | ||
|
ae79314869 | ||
|
0cbc514a8e | ||
|
5777f3e15c | ||
|
8cdcd770c0 | ||
|
2d20458bc2 | ||
|
2bd2088248 | ||
|
5818270803 | ||
|
79a5f0e375 | ||
|
c830784f65 | ||
|
3fc538104d | ||
|
96490fdb15 | ||
|
5a0c225c6f | ||
|
c3e524cb8b | ||
|
9faf6e46ca | ||
|
e89acac235 | ||
|
200761ff13 | ||
|
cb78e95e3d | ||
|
f01cf98d62 | ||
|
c9c2495611 | ||
|
aac72fa512 | ||
|
c5e2f19dde | ||
|
34bd9e5cb4 | ||
|
ad489ed606 | ||
|
bb541901d9 | ||
|
ca4ba19a5e | ||
|
f05943ff79 | ||
|
7ded8a1333 | ||
|
c2478d4add | ||
|
f69747bc89 | ||
|
441cc950aa | ||
|
a76a0ac8c4 | ||
|
8b1009161a | ||
|
868a620e91 | ||
|
a0e34b0bc8 | ||
|
612dbcb2f3 | ||
|
b3614d965d | ||
|
5d7137255e | ||
|
6ff867ef55 | ||
|
c14915df29 | ||
|
7d4966e2ae | ||
|
3876e0317d | ||
|
0b2b10f759 | ||
|
9a79b33664 | ||
|
af1a6edd15 | ||
|
b78929f4d5 | ||
|
fb6e342043 | ||
|
0faa2d35da | ||
|
511e57c231 | ||
|
d762d675c4 | ||
|
3fdadee87c | ||
|
1aa4d9d585 | ||
|
8019f4bdb3 | ||
|
ca65c1ebc5 | ||
|
f0e47aae86 | ||
|
dc7cd545ba | ||
|
76bd59d82e | ||
|
461687ffb4 | ||
|
dd5b9ca81b | ||
|
89ed04f8a7 | ||
|
ec0d9f06c5 | ||
|
03b59ac6fc | ||
|
43ac3336d7 | ||
|
d12c78db74 | ||
|
bfaf1b0957 | ||
|
bb60c385d5 | ||
|
c96d1d9c32 | ||
|
7c7a0d4bdf | ||
|
cc829a7bf4 | ||
|
e0ea6383e2 | ||
|
bcec5dc2ae | ||
|
cba9c16a06 | ||
|
dd68fb077b | ||
|
c2294e97db | ||
|
c0f512ace7 | ||
|
3305eb67c6 | ||
|
c9d637b4da | ||
|
ae3e8fadf5 | ||
|
a1abd94387 | ||
|
9b463a8cab | ||
|
babc54a240 | ||
|
5836a93b21 | ||
|
557348e345 | ||
|
9adfec6b00 | ||
|
3a496902f8 | ||
|
b5ead91746 | ||
|
302461b446 | ||
|
ac201c718e | ||
|
f78e3825ca | ||
|
0618053bd4 | ||
|
8e6fa3490c | ||
|
8a1a1a4000 | ||
|
fd9dcbf9a8 | ||
|
beb8583436 | ||
|
b44e2c0b38 | ||
|
06e94640b5 | ||
|
ff36bdc802 | ||
|
46f576de46 | ||
|
7b09c34fce | ||
|
a22f50aa84 | ||
|
2d9130b4e0 | ||
|
470ee72462 | ||
|
add147b409 | ||
|
371df6e6c2 | ||
|
7ed5fe8f66 | ||
|
a6ca7a6f38 | ||
|
1c857b8dd8 | ||
|
87ff3f95ff | ||
|
5cb4c06d0c | ||
|
e7d9079389 | ||
|
9cdcff0e1e | ||
|
a4dce8cf9f | ||
|
aaa11c02bf | ||
|
d2ebbf5db6 | ||
|
e6efc1ad4a | ||
|
a8523996a9 | ||
|
f586de2bbe | ||
|
7df02303b2 | ||
|
f89c75e642 | ||
|
d2c1961101 | ||
|
2a4c5a48bf | ||
|
5f5f39a4aa | ||
|
df54cc04af | ||
|
0439616480 | ||
|
19fa274227 | ||
|
8076000c27 | ||
|
c80b30f070 | ||
|
486d5c48b0 | ||
|
4822792ee2 | ||
|
569f1d42b1 | ||
|
23c10faff5 | ||
|
1eaa195363 | ||
|
fb57cfa5d8 | ||
|
d33086c8f7 | ||
|
d815a6f02c | ||
|
8216f4a873 | ||
|
e4cc4521d9 | ||
|
6bd9b3744d | ||
|
f741b00768 | ||
|
5eb95d7dd4 | ||
|
e5268f43e7 | ||
|
54d6fb9da4 | ||
|
3d5c9cc1c2 | ||
|
442326f1d8 | ||
|
d66f46e07b | ||
|
757b53443d | ||
|
3436965b33 | ||
|
df71132957 | ||
|
1b322dc404 | ||
|
58341f4ff1 | ||
|
0d3ca80008 | ||
|
63437712cd | ||
|
26d0e87f46 | ||
|
2cad4fa1ce | ||
|
7bb293e5d6 | ||
|
e4777f9314 | ||
|
3508f562a7 | ||
|
1aa66c6038 | ||
|
e7458edb72 | ||
|
7f97013703 | ||
|
9e43060d41 | ||
|
d69486fb6e | ||
|
d4ebfdbc3c | ||
|
e00c3db71a | ||
|
11c3ea0b87 | ||
|
7531401623 | ||
|
e6c1dc251e | ||
|
dca7977051 | ||
|
d19e07d661 | ||
|
751ff6e21f | ||
|
3f6fe995b8 | ||
|
1e00fb369d | ||
|
54b522383a | ||
|
90a7de3b5c | ||
|
3fe1582432 | ||
|
85eddd2100 | ||
|
f5f8775c59 | ||
|
0ca98678f7 | ||
|
a19060c08d | ||
|
fa2ad88cc4 | ||
|
63cbcd0956 | ||
|
d6d0ebf8f4 | ||
|
0d810d92ca | ||
|
1ff914a6f4 | ||
|
5959b1be72 | ||
|
d12a214c05 | ||
|
3a83052f2e | ||
|
510b44ca92 | ||
|
15edb6756d | ||
|
fbfd02b08b | ||
|
b39c26fc86 | ||
|
95b2c8d175 | ||
|
d52748b09f | ||
|
34d18a3a9a | ||
|
3b27d6a9b5 | ||
|
703c391f99 | ||
|
4f1dc29df1 | ||
|
13667df374 | ||
|
8800d6985f | ||
|
364b8f2605 | ||
|
67b9ea9deb | ||
|
b78f2336a7 | ||
|
c7ba637c7d | ||
|
23a5ce3df7 | ||
|
8f88e28e50 | ||
|
9cf6139557 | ||
|
d556065a8b | ||
|
951716f7dc | ||
|
1ddc7ddda3 | ||
|
903ed9f3dc | ||
|
c42b76dcb8 | ||
|
a73582d9ae | ||
|
42c4fc7557 | ||
|
ddbbb6f1dd | ||
|
ff21a92330 | ||
|
07f76f7ad1 | ||
|
c90ccffd7b | ||
|
a00d5f18af | ||
|
1e391d211b | ||
|
d87f9672fa | ||
|
2b5838aa01 | ||
|
e10486d6ec | ||
|
1a74d6604d | ||
|
6d118536b6 | ||
|
ca4d758db9 | ||
|
dc18c26aa4 | ||
|
48505c2968 | ||
|
a98ea1e66a | ||
|
3dec697816 | ||
|
88fd41e597 | ||
|
b05d071a1c | ||
|
a27d3b9689 | ||
|
1facc0cd01 | ||
|
837f91d830 | ||
|
9c5f5aefb0 | ||
|
6b8d4a444b | ||
|
6bef09a3b1 | ||
|
e35319e5a2 | ||
|
0e548b3812 | ||
|
bfac02ccab | ||
|
7ea1a2b361 | ||
|
99df418f1d | ||
|
6416d8ce9c | ||
|
22b43a2b01 | ||
|
05e5d24c5e | ||
|
eabcc30367 | ||
|
f5e0ef5223 | ||
|
f46d9330b0 | ||
|
b62a0b4607 | ||
|
1f044321fb | ||
|
a841d49483 | ||
|
9509acc490 | ||
|
02d356ef12 | ||
|
d3516f299e | ||
|
79630767c2 | ||
|
084a76d075 | ||
|
bc6822e397 | ||
|
43432a9e48 | ||
|
d64a5bc12f | ||
|
b2922d18e2 | ||
|
ccf03fc07b | ||
|
a7c45da10c | ||
|
e03f01e24a | ||
|
0939589557 | ||
|
8167af9b4a | ||
|
4cf76123e5 | ||
|
01ee4b23e6 | ||
|
b198f79214 | ||
|
09db868a28 | ||
|
33e8ef75ff | ||
|
11dcb16b14 | ||
|
86f21da28b | ||
|
89cd6a9aa4 | ||
|
18e1256037 | ||
|
02cf478d91 | ||
|
6ec70192fe | ||
|
8c75098a9a | ||
|
72500f6948 | ||
|
37ec9ab464 | ||
|
82fe2a4c8d | ||
|
aa50e6ee66 | ||
|
91a07cfaee | ||
|
709f5e9a65 | ||
|
b2f9ef21cc | ||
|
be6b72edcd | ||
|
ece2d1e78a | ||
|
1ee1a5f2a1 | ||
|
a567326853 | ||
|
6231861dd6 | ||
|
1ff7b77ee0 | ||
|
9365708bb2 | ||
|
d23a0a8589 | ||
|
701b39b043 | ||
|
58ad1f3876 | ||
|
2138e7ea33 | ||
|
32f8c9e59f | ||
|
57028eab39 | ||
|
3a16edd8a6 | ||
|
165f3bb270 | ||
|
0ba75153f3 | ||
|
db2789990f | ||
|
acaf299bcb | ||
|
1940301824 | ||
|
34576e880d | ||
|
65c0668d40 | ||
|
53bd2bcbfe | ||
|
388724fccb | ||
|
231eabb013 | ||
|
54903fc2ea | ||
|
3a1baf0700 | ||
|
0c0e36b6f8 | ||
|
234c03db09 | ||
|
59db5e7889 | ||
|
28aa7da349 | ||
|
c51e344b87 | ||
|
54461dfa75 | ||
|
2d48e93f74 | ||
|
af22646322 | ||
|
722b42a93e | ||
|
8f9e7f77a7 | ||
|
09bb1ba494 | ||
|
d4137428ff | ||
|
b4d6c4f5b7 | ||
|
ffbe59ece5 | ||
|
fab9c90ccb | ||
|
fb1a774bc4 | ||
|
98bc7d1e0e | ||
|
f7622f24b2 | ||
|
f0a195a6d4 | ||
|
180ba27d84 | ||
|
f944671f86 | ||
|
def2903f7d | ||
|
0273a4e839 | ||
|
f8d2f02c5d | ||
|
25147d8897 | ||
|
0fb6f05fba | ||
|
4e4e899356 | ||
|
5a01dbf269 | ||
|
30b923b283 | ||
|
73ba381d20 | ||
|
1a5912877e | ||
|
813e506b68 | ||
|
077ca987f7 | ||
|
c632a7a6a5 | ||
|
e33e767510 | ||
|
ac82617aa9 | ||
|
a35dfd1fd1 | ||
|
c28aae9913 | ||
|
c26a99e65c | ||
|
ca57dcfc2f | ||
|
df5662dd69 | ||
|
8927a4889e | ||
|
1ac7831f3c | ||
|
292d272a94 | ||
|
a6ee8dc66e | ||
|
496f89f184 | ||
|
7a56eff1ac | ||
|
07e182aa16 | ||
|
7de06aa1e0 | ||
|
3955b64405 | ||
|
2bb55d681d | ||
|
f94e6ac527 | ||
|
b344f17b86 | ||
|
677b8cb633 | ||
|
6f3342e09e | ||
|
a1ddd762e0 | ||
|
68474e4057 | ||
|
a84b9ee396 | ||
|
b9c2ee745a | ||
|
c91a47fcaa | ||
|
615e489d8d | ||
|
c68f9f6f16 | ||
|
229cb85a6a | ||
|
e5c22fa665 | ||
|
8bcfff05d7 | ||
|
6416ee8151 | ||
|
f8eceb48e6 | ||
|
310c483bfa | ||
|
a8f20361aa | ||
|
290be69d99 | ||
|
3b96bd7ea0 | ||
|
dc2f22f5fa | ||
|
821be29f41 | ||
|
52ff1a12ff | ||
|
814699ef11 | ||
|
0c30838b25 | ||
|
cf66c2a1ee | ||
|
2ee419ffca | ||
|
bfb9d696d7 | ||
|
bb2a34dd6b | ||
|
ed652c0c56 | ||
|
1dc961d6eb | ||
|
d119fcfc98 | ||
|
4d3573724a | ||
|
8b37a66075 | ||
|
ba4f32075a | ||
|
218be22576 | ||
|
7688293716 | ||
|
458f8533c4 | ||
|
34502752fc | ||
|
d6758fd823 | ||
|
65700e790e | ||
|
7c34e4bb96 | ||
|
d0d6e3563b | ||
|
a2619f8c78 | ||
|
42d07fd2f0 | ||
|
8bea10960f | ||
|
9cbb19c304 | ||
|
1b94dfd712 | ||
|
9f3604d739 | ||
|
4a1b2be269 | ||
|
962dc1b55b | ||
|
07c86502f6 | ||
|
adb188e5d0 | ||
|
ce031dc6b8 | ||
|
18b5f03247 | ||
|
8a555ecf1c | ||
|
1b325b9acd | ||
|
1bdaddb319 | ||
|
7896e177ef | ||
|
ce8e659008 | ||
|
27be5deeb2 | ||
|
515f270c3a | ||
|
ffff3bd334 | ||
|
f493f13b25 | ||
|
e605c14b13 | ||
|
338488f16d | ||
|
2abc67c3e8 | ||
|
eb1ba143ec | ||
|
6f5bca0f67 | ||
|
407cd8dd4b | ||
|
62a4f0fc04 | ||
|
77cde411f1 | ||
|
3eb9d23108 | ||
|
410d4aeb21 | ||
|
0a28d216fd | ||
|
b69faf6920 | ||
|
efb92ea37a | ||
|
e77f9981df | ||
|
d27c2cc1e9 | ||
|
586b19675e | ||
|
f2907536b4 | ||
|
4aa4e35d1c | ||
|
9a11ac06bf | ||
|
aa3b18f848 | ||
|
103bdc151f | ||
|
6d4c1cd879 | ||
|
cacbe30871 | ||
|
bfeeacb230 | ||
|
04bb7b4919 | ||
|
b7df277a5c | ||
|
c681041b48 | ||
|
923834c784 | ||
|
588edf98be | ||
|
28c603ad5f | ||
|
6988a47e02 | ||
|
2c8ceb1217 | ||
|
ccac4ffa24 | ||
|
4258cef9bd | ||
|
62cc6dfe76 | ||
|
9f224a971b | ||
|
cf5dba9157 | ||
|
23035b9aa0 | ||
|
84908ec8ec | ||
|
dade49743b | ||
|
f29bf35c2a | ||
|
dfa6701c43 | ||
|
763ca69a73 | ||
|
6bf3b152bf | ||
|
aa19f85996 | ||
|
156d89567e | ||
|
ecc71baf61 | ||
|
90c743d963 | ||
|
b926293fa7 | ||
|
71a19191f8 | ||
|
38a0f20a33 | ||
|
c35192108c | ||
|
245b564f13 | ||
|
0d8d1ea4f3 | ||
|
27a427a363 | ||
|
2ff028a694 | ||
|
c211338218 | ||
|
8ac89af8bd | ||
|
bbbaf59591 | ||
|
169419896f | ||
|
0543dca502 | ||
|
cc6011d57a | ||
|
fc4407ef7e | ||
|
03735a125f | ||
|
5baeda9ff1 | ||
|
9b9794b5e0 | ||
|
0697d60a48 | ||
|
cfe6c82a31 | ||
|
3e30228d95 | ||
|
7264b53e5f | ||
|
60836d8523 | ||
|
ef89c2e47a | ||
|
2d9e3e1847 | ||
|
30136a9697 | ||
|
db7ccd66d3 | ||
|
cfe6483102 | ||
|
561566e723 |
183 changed files with 8279 additions and 15601 deletions
80
.github/workflows/main.yml
vendored
80
.github/workflows/main.yml
vendored
|
@ -1,24 +1,24 @@
|
|||
name: ci
|
||||
on: ["push", "pull_request"]
|
||||
on: ["push", "pull_request", "workflow_dispatch"]
|
||||
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.9'
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- run: pip install --user --upgrade pip wheel
|
||||
- run: pip install -e .[torrent,lint]
|
||||
- run: pip install -e .[lint]
|
||||
- run: make lint
|
||||
|
||||
tests-unit:
|
||||
|
@ -26,31 +26,31 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-latest
|
||||
- ubuntu-20.04
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.9'
|
||||
- name: set pip cache dir
|
||||
id: pip-cache
|
||||
run: echo "::set-output name=dir::$(pip cache dir)"
|
||||
shell: bash
|
||||
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
path: ${{ env.PIP_CACHE_DIR }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- id: os-name
|
||||
uses: ASzc/change-string-case-action@v1
|
||||
uses: ASzc/change-string-case-action@v5
|
||||
with:
|
||||
string: ${{ runner.os }}
|
||||
- run: pip install --user --upgrade pip wheel
|
||||
- run: python -m pip install --user --upgrade pip wheel
|
||||
- if: startsWith(runner.os, 'linux')
|
||||
run: pip install -e .[torrent,test]
|
||||
run: pip install -e .[test]
|
||||
- if: startsWith(runner.os, 'linux')
|
||||
env:
|
||||
HOME: /tmp
|
||||
|
@ -72,13 +72,15 @@ jobs:
|
|||
|
||||
tests-integration:
|
||||
name: "tests / integration"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
test:
|
||||
- datanetwork
|
||||
- blockchain
|
||||
- blockchain_legacy_search
|
||||
- claims
|
||||
- takeovers
|
||||
- transactions
|
||||
- other
|
||||
steps:
|
||||
- name: Configure sysctl limits
|
||||
|
@ -90,22 +92,24 @@ jobs:
|
|||
- name: Runs Elasticsearch
|
||||
uses: elastic/elastic-github-actions/elasticsearch@master
|
||||
with:
|
||||
stack-version: 7.6.0
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
stack-version: 7.12.1
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.9'
|
||||
- if: matrix.test == 'other'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --no-install-recommends ffmpeg
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ./.tox
|
||||
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
|
||||
restore-keys: txo-integration-${{ matrix.test }}-
|
||||
- run: pip install tox coverage coveralls
|
||||
- if: matrix.test == 'claims'
|
||||
run: rm -rf .tox
|
||||
- run: tox -e ${{ matrix.test }}
|
||||
- name: submit coverage report
|
||||
env:
|
||||
|
@ -119,7 +123,7 @@ jobs:
|
|||
|
||||
coverage:
|
||||
needs: ["tests-unit", "tests-integration"]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: finalize coverage report submission
|
||||
env:
|
||||
|
@ -134,29 +138,29 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-16.04
|
||||
- ubuntu-20.04
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.9'
|
||||
- id: os-name
|
||||
uses: ASzc/change-string-case-action@v1
|
||||
uses: ASzc/change-string-case-action@v5
|
||||
with:
|
||||
string: ${{ runner.os }}
|
||||
- name: set pip cache dir
|
||||
id: pip-cache
|
||||
run: echo "::set-output name=dir::$(pip cache dir)"
|
||||
shell: bash
|
||||
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
path: ${{ env.PIP_CACHE_DIR }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- run: pip install pyinstaller==4.4
|
||||
- run: pip install pyinstaller==4.6
|
||||
- run: pip install -e .
|
||||
- if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: python docker/set_build.py
|
||||
|
@ -171,7 +175,7 @@ jobs:
|
|||
pip install pywin32==301
|
||||
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
|
||||
dist/lbrynet.exe --version
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: lbrynet-${{ steps.os-name.outputs.lowercase }}
|
||||
path: dist/
|
||||
|
@ -180,7 +184,7 @@ jobs:
|
|||
name: "release"
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
needs: ["build"]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/download-artifact@v2
|
||||
|
|
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
|
@ -7,7 +7,7 @@ on:
|
|||
jobs:
|
||||
release:
|
||||
name: "slack notification"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: LoveToKnow/slackify-markdown-action@v1.0.0
|
||||
id: markdown
|
||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -13,7 +13,7 @@ __pycache__
|
|||
_trial_temp/
|
||||
trending*.log
|
||||
|
||||
/tests/integration/blockchain/files
|
||||
/tests/integration/claims/files
|
||||
/tests/.coverage.*
|
||||
|
||||
/lbry/wallet/bin
|
||||
|
|
2
LICENSE
2
LICENSE
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015-2020 LBRY Inc
|
||||
Copyright (c) 2015-2022 LBRY Inc
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
||||
|
|
43
docker/Dockerfile.dht_node
Normal file
43
docker/Dockerfile.dht_node
Normal file
|
@ -0,0 +1,43 @@
|
|||
FROM debian:10-slim
|
||||
|
||||
ARG user=lbry
|
||||
ARG projects_dir=/home/$user
|
||||
ARG db_dir=/database
|
||||
|
||||
ARG DOCKER_TAG
|
||||
ARG DOCKER_COMMIT=docker
|
||||
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y --no-install-recommends install \
|
||||
wget \
|
||||
automake libtool \
|
||||
tar unzip \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
libleveldb-dev \
|
||||
python3.7 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-wheel \
|
||||
python3-setuptools && \
|
||||
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
||||
|
||||
COPY . $projects_dir
|
||||
RUN chown -R $user:$user $projects_dir
|
||||
RUN mkdir -p $db_dir
|
||||
RUN chown -R $user:$user $db_dir
|
||||
|
||||
USER $user
|
||||
WORKDIR $projects_dir
|
||||
|
||||
RUN python3 -m pip install -U setuptools pip
|
||||
RUN make install
|
||||
RUN python3 docker/set_build.py
|
||||
RUN rm ~/.cache -rf
|
||||
VOLUME $db_dir
|
||||
ENTRYPOINT ["python3", "scripts/dht_node.py"]
|
||||
|
|
@ -20,6 +20,7 @@ RUN apt-get update && \
|
|||
python3-dev \
|
||||
python3-pip \
|
||||
python3-wheel \
|
||||
python3-cffi \
|
||||
python3-setuptools && \
|
||||
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
|
|
@ -18,23 +18,27 @@ services:
|
|||
- "wallet_server:/database"
|
||||
environment:
|
||||
- DAEMON_URL=http://lbry:lbry@127.0.0.1:9245
|
||||
- MAX_QUERY_WORKERS=4
|
||||
- CACHE_MB=1024
|
||||
- CACHE_ALL_TX_HASHES=
|
||||
- CACHE_ALL_CLAIM_TXOS=
|
||||
- MAX_SEND=1000000000000000000
|
||||
- MAX_RECEIVE=1000000000000000000
|
||||
- MAX_SESSIONS=100000
|
||||
- HOST=0.0.0.0
|
||||
- TCP_PORT=50001
|
||||
- PROMETHEUS_PORT=2112
|
||||
- QUERY_TIMEOUT_MS=3000 # how long search queries allowed to run before cancelling, in milliseconds
|
||||
- TRENDING_ALGORITHMS=variable_decay
|
||||
- MAX_SEND=10000000000000 # deprecated. leave it high until its removed
|
||||
- MAX_SUBS=1000000000000 # deprecated. leave it high until its removed
|
||||
- FILTERING_CHANNEL_IDS=770bd7ecba84fd2f7607fb15aedd2b172c2e153f 95e5db68a3101df19763f3a5182e4b12ba393ee8
|
||||
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6 e4e230b131082f6b10c8f7994bbb83f29e8e6fb9
|
||||
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6
|
||||
es01:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
|
||||
container_name: es01
|
||||
environment:
|
||||
- node.name=es01
|
||||
- discovery.type=single-node
|
||||
- indices.query.bool.max_clause_count=4096
|
||||
- indices.query.bool.max_clause_count=8192
|
||||
- bootstrap.memory_lock=true
|
||||
- "ES_JAVA_OPTS=-Xms8g -Xmx8g" # no more than 32, remember to disable swap
|
||||
- "ES_JAVA_OPTS=-Xms4g -Xmx4g" # no more than 32, remember to disable swap
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
|
|
|
@ -6,7 +6,7 @@ set -euo pipefail
|
|||
|
||||
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
|
||||
|
||||
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/claims.db ]]; then
|
||||
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
|
||||
files="$(ls)"
|
||||
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
|
||||
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
|
||||
|
@ -20,6 +20,6 @@ if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/claims.db ]]; then
|
|||
rm "$filename"
|
||||
fi
|
||||
|
||||
/home/lbry/.local/bin/lbry-hub-elastic-sync /database/claims.db
|
||||
/home/lbry/.local/bin/lbry-hub-elastic-sync
|
||||
echo 'starting server'
|
||||
/home/lbry/.local/bin/lbry-hub "$@"
|
||||
|
|
350
docs/api.json
350
docs/api.json
File diff suppressed because one or more lines are too long
|
@ -1,2 +1,2 @@
|
|||
__version__ = "0.102.0"
|
||||
__version__ = "0.113.0"
|
||||
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import re
|
||||
import time
|
||||
import asyncio
|
||||
import binascii
|
||||
import logging
|
||||
|
@ -70,21 +71,27 @@ class AbstractBlob:
|
|||
'writers',
|
||||
'verified',
|
||||
'writing',
|
||||
'readers'
|
||||
'readers',
|
||||
'added_on',
|
||||
'is_mine',
|
||||
]
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||
blob_directory: typing.Optional[str] = None):
|
||||
def __init__(
|
||||
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False,
|
||||
):
|
||||
self.loop = loop
|
||||
self.blob_hash = blob_hash
|
||||
self.length = length
|
||||
self.blob_completed_callback = blob_completed_callback
|
||||
self.blob_directory = blob_directory
|
||||
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
|
||||
self.verified: asyncio.Event = asyncio.Event(loop=self.loop)
|
||||
self.writing: asyncio.Event = asyncio.Event(loop=self.loop)
|
||||
self.verified: asyncio.Event = asyncio.Event()
|
||||
self.writing: asyncio.Event = asyncio.Event()
|
||||
self.readers: typing.List[typing.BinaryIO] = []
|
||||
self.added_on = added_on or time.time()
|
||||
self.is_mine = is_mine
|
||||
|
||||
if not is_valid_blobhash(blob_hash):
|
||||
raise InvalidBlobHashError(blob_hash)
|
||||
|
@ -180,20 +187,21 @@ class AbstractBlob:
|
|||
|
||||
@classmethod
|
||||
async def create_from_unencrypted(
|
||||
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
||||
unencrypted: bytes, blob_num: int,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None) -> BlobInfo:
|
||||
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
||||
unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
|
||||
) -> BlobInfo:
|
||||
"""
|
||||
Create an encrypted BlobFile from plaintext bytes
|
||||
"""
|
||||
|
||||
blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted)
|
||||
length = len(blob_bytes)
|
||||
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir)
|
||||
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir, added_on, is_mine)
|
||||
writer = blob.get_blob_writer()
|
||||
writer.write(blob_bytes)
|
||||
await blob.verified.wait()
|
||||
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash)
|
||||
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), added_on, blob_hash, is_mine)
|
||||
|
||||
def save_verified_blob(self, verified_bytes: bytes):
|
||||
if self.verified.is_set():
|
||||
|
@ -214,7 +222,7 @@ class AbstractBlob:
|
|||
peer_port: typing.Optional[int] = None) -> HashBlobWriter:
|
||||
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
|
||||
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut = asyncio.Future()
|
||||
writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
|
||||
self.writers[(peer_address, peer_port)] = writer
|
||||
|
||||
|
@ -248,11 +256,13 @@ class BlobBuffer(AbstractBlob):
|
|||
"""
|
||||
An in-memory only blob
|
||||
"""
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||
blob_directory: typing.Optional[str] = None):
|
||||
def __init__(
|
||||
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
|
||||
):
|
||||
self._verified_bytes: typing.Optional[BytesIO] = None
|
||||
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
|
||||
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
|
||||
|
@ -289,10 +299,12 @@ class BlobFile(AbstractBlob):
|
|||
"""
|
||||
A blob existing on the local file system
|
||||
"""
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||
blob_directory: typing.Optional[str] = None):
|
||||
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
|
||||
def __init__(
|
||||
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
|
||||
):
|
||||
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
|
||||
if not blob_directory or not os.path.isdir(blob_directory):
|
||||
raise OSError(f"invalid blob directory '{blob_directory}'")
|
||||
self.file_path = os.path.join(self.blob_directory, self.blob_hash)
|
||||
|
@ -343,12 +355,12 @@ class BlobFile(AbstractBlob):
|
|||
|
||||
@classmethod
|
||||
async def create_from_unencrypted(
|
||||
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
||||
unencrypted: bytes, blob_num: int,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'],
|
||||
asyncio.Task]] = None) -> BlobInfo:
|
||||
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
||||
unencrypted: bytes, blob_num: int, added_on: float, is_mine: bool,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None
|
||||
) -> BlobInfo:
|
||||
if not blob_dir or not os.path.isdir(blob_dir):
|
||||
raise OSError(f"cannot create blob in directory: '{blob_dir}'")
|
||||
return await super().create_from_unencrypted(
|
||||
loop, blob_dir, key, iv, unencrypted, blob_num, blob_completed_callback
|
||||
loop, blob_dir, key, iv, unencrypted, blob_num, added_on, is_mine, blob_completed_callback
|
||||
)
|
||||
|
|
|
@ -7,13 +7,19 @@ class BlobInfo:
|
|||
'blob_num',
|
||||
'length',
|
||||
'iv',
|
||||
'added_on',
|
||||
'is_mine'
|
||||
]
|
||||
|
||||
def __init__(self, blob_num: int, length: int, iv: str, blob_hash: typing.Optional[str] = None):
|
||||
def __init__(
|
||||
self, blob_num: int, length: int, iv: str, added_on,
|
||||
blob_hash: typing.Optional[str] = None, is_mine=False):
|
||||
self.blob_hash = blob_hash
|
||||
self.blob_num = blob_num
|
||||
self.length = length
|
||||
self.iv = iv
|
||||
self.added_on = added_on
|
||||
self.is_mine = is_mine
|
||||
|
||||
def as_dict(self) -> typing.Dict:
|
||||
d = {
|
||||
|
|
|
@ -36,30 +36,30 @@ class BlobManager:
|
|||
self.config.blob_lru_cache_size)
|
||||
self.connection_manager = ConnectionManager(loop)
|
||||
|
||||
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None):
|
||||
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None, is_mine: bool = False):
|
||||
if self.config.save_blobs or (
|
||||
is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))):
|
||||
return BlobFile(
|
||||
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
|
||||
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
|
||||
)
|
||||
return BlobBuffer(
|
||||
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
|
||||
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
|
||||
)
|
||||
|
||||
def get_blob(self, blob_hash, length: typing.Optional[int] = None):
|
||||
def get_blob(self, blob_hash, length: typing.Optional[int] = None, is_mine: bool = False):
|
||||
if blob_hash in self.blobs:
|
||||
if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer):
|
||||
buffer = self.blobs.pop(blob_hash)
|
||||
if blob_hash in self.completed_blob_hashes:
|
||||
self.completed_blob_hashes.remove(blob_hash)
|
||||
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
|
||||
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
|
||||
if buffer.is_readable():
|
||||
with buffer.reader_context() as reader:
|
||||
self.blobs[blob_hash].write_blob(reader.read())
|
||||
if length and self.blobs[blob_hash].length is None:
|
||||
self.blobs[blob_hash].set_length(length)
|
||||
else:
|
||||
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
|
||||
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
|
||||
return self.blobs[blob_hash]
|
||||
|
||||
def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool:
|
||||
|
@ -83,6 +83,8 @@ class BlobManager:
|
|||
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
|
||||
if to_add:
|
||||
self.completed_blob_hashes.update(to_add)
|
||||
# check blobs that aren't set as finished but were seen on disk
|
||||
await self.ensure_completed_blobs_status(in_blobfiles_dir - to_add)
|
||||
if self.config.track_bandwidth:
|
||||
self.connection_manager.start()
|
||||
return True
|
||||
|
@ -105,13 +107,26 @@ class BlobManager:
|
|||
if isinstance(blob, BlobFile):
|
||||
if blob.blob_hash not in self.completed_blob_hashes:
|
||||
self.completed_blob_hashes.add(blob.blob_hash)
|
||||
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=True))
|
||||
return self.loop.create_task(self.storage.add_blobs(
|
||||
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=True)
|
||||
)
|
||||
else:
|
||||
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=False))
|
||||
return self.loop.create_task(self.storage.add_blobs(
|
||||
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False)
|
||||
)
|
||||
|
||||
def check_completed_blobs(self, blob_hashes: typing.List[str]) -> typing.List[str]:
|
||||
"""Returns of the blobhashes_to_check, which are valid"""
|
||||
return [blob_hash for blob_hash in blob_hashes if self.is_blob_verified(blob_hash)]
|
||||
async def ensure_completed_blobs_status(self, blob_hashes: typing.Iterable[str]):
|
||||
"""Ensures that completed blobs from a given list of blob hashes are set as 'finished' in the database."""
|
||||
to_add = []
|
||||
for blob_hash in blob_hashes:
|
||||
if not self.is_blob_verified(blob_hash):
|
||||
continue
|
||||
blob = self.get_blob(blob_hash)
|
||||
to_add.append((blob.blob_hash, blob.length, blob.added_on, blob.is_mine))
|
||||
if len(to_add) > 500:
|
||||
await self.storage.add_blobs(*to_add, finished=True)
|
||||
to_add.clear()
|
||||
return await self.storage.add_blobs(*to_add, finished=True)
|
||||
|
||||
def delete_blob(self, blob_hash: str):
|
||||
if not is_valid_blobhash(blob_hash):
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import os
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
|
@ -7,51 +6,65 @@ log = logging.getLogger(__name__)
|
|||
|
||||
class DiskSpaceManager:
|
||||
|
||||
def __init__(self, config, cleaning_interval=30 * 60):
|
||||
def __init__(self, config, db, blob_manager, cleaning_interval=30 * 60, analytics=None):
|
||||
self.config = config
|
||||
self.db = db
|
||||
self.blob_manager = blob_manager
|
||||
self.cleaning_interval = cleaning_interval
|
||||
self.running = False
|
||||
self.task = None
|
||||
self.analytics = analytics
|
||||
self._used_space_bytes = None
|
||||
|
||||
@property
|
||||
def space_used_bytes(self):
|
||||
used = 0
|
||||
data_dir = os.path.join(self.config.data_dir, 'blobfiles')
|
||||
for item in os.scandir(data_dir):
|
||||
if item.is_file:
|
||||
used += item.stat().st_size
|
||||
return used
|
||||
async def get_free_space_mb(self, is_network_blob=False):
|
||||
limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
|
||||
space_used_mb = await self.get_space_used_mb()
|
||||
space_used_mb = space_used_mb['network_storage'] if is_network_blob else space_used_mb['content_storage']
|
||||
return max(0, limit_mb - space_used_mb)
|
||||
|
||||
@property
|
||||
def space_used_mb(self):
|
||||
return int(self.space_used_bytes/1024.0/1024.0)
|
||||
async def get_space_used_bytes(self):
|
||||
self._used_space_bytes = await self.db.get_stored_blob_disk_usage()
|
||||
return self._used_space_bytes
|
||||
|
||||
def clean(self):
|
||||
if not self.config.blob_storage_limit:
|
||||
async def get_space_used_mb(self, cached=True):
|
||||
cached = cached and self._used_space_bytes is not None
|
||||
space_used_bytes = self._used_space_bytes if cached else await self.get_space_used_bytes()
|
||||
return {key: int(value/1024.0/1024.0) for key, value in space_used_bytes.items()}
|
||||
|
||||
async def clean(self):
|
||||
await self._clean(False)
|
||||
await self._clean(True)
|
||||
|
||||
async def _clean(self, is_network_blob=False):
|
||||
space_used_mb = await self.get_space_used_mb(cached=False)
|
||||
if is_network_blob:
|
||||
space_used_mb = space_used_mb['network_storage']
|
||||
else:
|
||||
space_used_mb = space_used_mb['content_storage'] + space_used_mb['private_storage']
|
||||
storage_limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
|
||||
if self.analytics:
|
||||
asyncio.create_task(
|
||||
self.analytics.send_disk_space_used(space_used_mb, storage_limit_mb, is_network_blob)
|
||||
)
|
||||
delete = []
|
||||
available = storage_limit_mb - space_used_mb
|
||||
if storage_limit_mb == 0 if not is_network_blob else available >= 0:
|
||||
return 0
|
||||
used = 0
|
||||
files = []
|
||||
data_dir = os.path.join(self.config.data_dir, 'blobfiles')
|
||||
for file in os.scandir(data_dir):
|
||||
if file.is_file:
|
||||
file_stats = file.stat()
|
||||
used += file_stats.st_size
|
||||
files.append((file_stats.st_mtime, file_stats.st_size, file.path))
|
||||
files.sort()
|
||||
available = (self.config.blob_storage_limit*1024*1024) - used
|
||||
cleaned = 0
|
||||
for _, file_size, file in files:
|
||||
available += file_size
|
||||
if available > 0:
|
||||
for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False, is_network_blob=is_network_blob):
|
||||
delete.append(blob_hash)
|
||||
available += int(file_size/1024.0/1024.0)
|
||||
if available >= 0:
|
||||
break
|
||||
os.remove(file)
|
||||
cleaned += 1
|
||||
return cleaned
|
||||
if delete:
|
||||
await self.db.stop_all_files()
|
||||
await self.blob_manager.delete_blobs(delete, delete_from_db=True)
|
||||
self._used_space_bytes = None
|
||||
return len(delete)
|
||||
|
||||
async def cleaning_loop(self):
|
||||
while self.running:
|
||||
await asyncio.sleep(self.cleaning_interval)
|
||||
await asyncio.get_event_loop().run_in_executor(None, self.clean)
|
||||
await self.clean()
|
||||
|
||||
async def start(self):
|
||||
self.running = True
|
||||
|
|
|
@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
|||
self.buf = b''
|
||||
|
||||
# this is here to handle the race when the downloader is closed right as response_fut gets a result
|
||||
self.closed = asyncio.Event(loop=self.loop)
|
||||
self.closed = asyncio.Event()
|
||||
|
||||
def data_received(self, data: bytes):
|
||||
if self.connection_manager:
|
||||
|
@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
|||
self.transport.write(msg)
|
||||
if self.connection_manager:
|
||||
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
|
||||
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop)
|
||||
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout)
|
||||
availability_response = response.get_availability_response()
|
||||
price_response = response.get_price_response()
|
||||
blob_response = response.get_blob_response()
|
||||
|
@ -151,7 +151,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
|||
f" timeout in {self.peer_timeout}"
|
||||
log.debug(msg)
|
||||
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
|
||||
await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop)
|
||||
await asyncio.wait_for(self.writer.finished, self.peer_timeout)
|
||||
# wait for the io to finish
|
||||
await self.blob.verified.wait()
|
||||
log.info("%s at %fMB/s", msg,
|
||||
|
@ -187,7 +187,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
|||
try:
|
||||
self._blob_bytes_received = 0
|
||||
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
|
||||
self._response_fut = asyncio.Future(loop=self.loop)
|
||||
self._response_fut = asyncio.Future()
|
||||
return await self._download_blob()
|
||||
except OSError:
|
||||
# i'm not sure how to fix this race condition - jack
|
||||
|
@ -244,7 +244,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract
|
|||
try:
|
||||
if not connected_protocol:
|
||||
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
|
||||
peer_connect_timeout, loop=loop)
|
||||
peer_connect_timeout)
|
||||
connected_protocol = protocol
|
||||
if blob is None or blob.get_is_verified() or not blob.is_writeable():
|
||||
# blob is None happens when we are just opening a connection
|
||||
|
|
|
@ -3,6 +3,7 @@ import typing
|
|||
import logging
|
||||
from lbry.utils import cache_concurrent
|
||||
from lbry.blob_exchange.client import request_blob
|
||||
from lbry.dht.node import get_kademlia_peers_from_hosts
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.dht.node import Node
|
||||
|
@ -29,7 +30,7 @@ class BlobDownloader:
|
|||
self.failures: typing.Dict['KademliaPeer', int] = {}
|
||||
self.connection_failures: typing.Set['KademliaPeer'] = set()
|
||||
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
|
||||
self.is_running = asyncio.Event(loop=self.loop)
|
||||
self.is_running = asyncio.Event()
|
||||
|
||||
def should_race_continue(self, blob: 'AbstractBlob'):
|
||||
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
|
||||
|
@ -63,8 +64,8 @@ class BlobDownloader:
|
|||
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
|
||||
|
||||
async def new_peer_or_finished(self):
|
||||
active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)]
|
||||
await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED')
|
||||
active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
|
||||
await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
|
||||
|
||||
def cleanup_active(self):
|
||||
if not self.active_connections and not self.connections:
|
||||
|
@ -87,7 +88,6 @@ class BlobDownloader:
|
|||
if blob.get_is_verified():
|
||||
return blob
|
||||
self.is_running.set()
|
||||
tried_for_this_blob: typing.Set['KademliaPeer'] = set()
|
||||
try:
|
||||
while not blob.get_is_verified() and self.is_running.is_set():
|
||||
batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
|
||||
|
@ -97,24 +97,15 @@ class BlobDownloader:
|
|||
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
|
||||
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
|
||||
)
|
||||
re_add: typing.Set['KademliaPeer'] = set()
|
||||
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
|
||||
if peer in self.ignored:
|
||||
continue
|
||||
if peer in tried_for_this_blob:
|
||||
if peer in self.active_connections or not self.should_race_continue(blob):
|
||||
continue
|
||||
if peer in self.active_connections:
|
||||
if peer not in re_add:
|
||||
re_add.add(peer)
|
||||
continue
|
||||
if not self.should_race_continue(blob):
|
||||
break
|
||||
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
|
||||
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
|
||||
self.active_connections[peer] = t
|
||||
tried_for_this_blob.add(peer)
|
||||
if not re_add:
|
||||
self.peer_queue.put_nowait(list(batch))
|
||||
self.peer_queue.put_nowait(list(batch))
|
||||
await self.new_peer_or_finished()
|
||||
self.cleanup_active()
|
||||
log.debug("downloaded %s", blob_hash[:8])
|
||||
|
@ -133,11 +124,14 @@ class BlobDownloader:
|
|||
protocol.close()
|
||||
|
||||
|
||||
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', node: 'Node',
|
||||
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
|
||||
blob_hash: str) -> 'AbstractBlob':
|
||||
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download)
|
||||
search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
|
||||
search_queue.put_nowait(blob_hash)
|
||||
peer_queue, accumulate_task = node.accumulate_peers(search_queue)
|
||||
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
|
||||
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)
|
||||
if fixed_peers:
|
||||
loop.call_later(config.fixed_peer_delay, peer_queue.put_nowait, fixed_peers)
|
||||
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
|
||||
try:
|
||||
return await downloader.download_blob(blob_hash)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import asyncio
|
||||
import binascii
|
||||
import logging
|
||||
import socket
|
||||
import typing
|
||||
from json.decoder import JSONDecodeError
|
||||
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
|
||||
|
@ -24,19 +25,19 @@ class BlobServerProtocol(asyncio.Protocol):
|
|||
self.idle_timeout = idle_timeout
|
||||
self.transfer_timeout = transfer_timeout
|
||||
self.server_task: typing.Optional[asyncio.Task] = None
|
||||
self.started_listening = asyncio.Event(loop=self.loop)
|
||||
self.started_listening = asyncio.Event()
|
||||
self.buf = b''
|
||||
self.transport: typing.Optional[asyncio.Transport] = None
|
||||
self.lbrycrd_address = lbrycrd_address
|
||||
self.peer_address_and_port: typing.Optional[str] = None
|
||||
self.started_transfer = asyncio.Event(loop=self.loop)
|
||||
self.transfer_finished = asyncio.Event(loop=self.loop)
|
||||
self.started_transfer = asyncio.Event()
|
||||
self.transfer_finished = asyncio.Event()
|
||||
self.close_on_idle_task: typing.Optional[asyncio.Task] = None
|
||||
|
||||
async def close_on_idle(self):
|
||||
while self.transport:
|
||||
try:
|
||||
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop)
|
||||
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
|
||||
except asyncio.TimeoutError:
|
||||
log.debug("closing idle connection from %s", self.peer_address_and_port)
|
||||
return self.close()
|
||||
|
@ -100,7 +101,7 @@ class BlobServerProtocol(asyncio.Protocol):
|
|||
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
|
||||
self.started_transfer.set()
|
||||
try:
|
||||
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop)
|
||||
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
|
||||
if sent and sent > 0:
|
||||
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
|
||||
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
|
||||
|
@ -137,7 +138,7 @@ class BlobServerProtocol(asyncio.Protocol):
|
|||
try:
|
||||
request = BlobRequest.deserialize(self.buf + data)
|
||||
self.buf = remainder
|
||||
except JSONDecodeError:
|
||||
except (UnicodeDecodeError, JSONDecodeError):
|
||||
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
|
||||
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
|
||||
self.close()
|
||||
|
@ -156,7 +157,7 @@ class BlobServer:
|
|||
self.loop = loop
|
||||
self.blob_manager = blob_manager
|
||||
self.server_task: typing.Optional[asyncio.Task] = None
|
||||
self.started_listening = asyncio.Event(loop=self.loop)
|
||||
self.started_listening = asyncio.Event()
|
||||
self.lbrycrd_address = lbrycrd_address
|
||||
self.idle_timeout = idle_timeout
|
||||
self.transfer_timeout = transfer_timeout
|
||||
|
@ -167,6 +168,13 @@ class BlobServer:
|
|||
raise Exception("already running")
|
||||
|
||||
async def _start_server():
|
||||
# checking if the port is in use
|
||||
# thx https://stackoverflow.com/a/52872579
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
if s.connect_ex(('localhost', port)) == 0:
|
||||
# the port is already in use!
|
||||
log.error("Failed to bind TCP %s:%d", interface, port)
|
||||
|
||||
server = await self.loop.create_server(
|
||||
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
|
||||
self.idle_timeout, self.transfer_timeout),
|
||||
|
|
30
lbry/conf.py
30
lbry/conf.py
|
@ -613,7 +613,7 @@ class Config(CLIConfig):
|
|||
"ports or have firewall rules you likely want to disable this.", True
|
||||
)
|
||||
udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port'])
|
||||
tcp_port = Integer("TCP port to listen for incoming blob requests", 3333, previous_names=['peer_port'])
|
||||
tcp_port = Integer("TCP port to listen for incoming blob requests", 4444, previous_names=['peer_port'])
|
||||
prometheus_port = Integer("Port to expose prometheus metrics (off by default)", 0)
|
||||
network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0')
|
||||
|
||||
|
@ -622,7 +622,11 @@ class Config(CLIConfig):
|
|||
"Routing table bucket index below which we always split the bucket if given a new key to add to it and "
|
||||
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
|
||||
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
|
||||
"use.", 1
|
||||
"use.", 2
|
||||
)
|
||||
is_bootstrap_node = Toggle(
|
||||
"When running as a bootstrap node, disable all logic related to balancing the routing table, so we can "
|
||||
"add as many peers as possible and better help first-runs.", False
|
||||
)
|
||||
|
||||
# protocol timeouts
|
||||
|
@ -634,6 +638,7 @@ class Config(CLIConfig):
|
|||
|
||||
# blob announcement and download
|
||||
save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True)
|
||||
network_storage_limit = Integer("Disk space in MB to be allocated for helping the P2P network. 0 = disable", 0)
|
||||
blob_storage_limit = Integer("Disk space in MB to be allocated for blob storage. 0 = no limit", 0)
|
||||
blob_lru_cache_size = Integer(
|
||||
"LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when "
|
||||
|
@ -680,6 +685,14 @@ class Config(CLIConfig):
|
|||
('cdn.reflector.lbry.com', 5567)
|
||||
])
|
||||
|
||||
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
|
||||
('tracker.lbry.com', 9252),
|
||||
('tracker.lbry.grin.io', 9252),
|
||||
('tracker.lbry.pigg.es', 9252),
|
||||
('tracker.lizard.technology', 9252),
|
||||
('s1.lbry.network', 9252),
|
||||
])
|
||||
|
||||
lbryum_servers = Servers("SPV wallet servers", [
|
||||
('spv11.lbry.com', 50001),
|
||||
('spv12.lbry.com', 50001),
|
||||
|
@ -690,12 +703,20 @@ class Config(CLIConfig):
|
|||
('spv17.lbry.com', 50001),
|
||||
('spv18.lbry.com', 50001),
|
||||
('spv19.lbry.com', 50001),
|
||||
('hub.lbry.grin.io', 50001),
|
||||
('hub.lizard.technology', 50001),
|
||||
('s1.lbry.network', 50001),
|
||||
])
|
||||
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
|
||||
('dht.lbry.grin.io', 4444), # Grin
|
||||
('dht.lbry.madiator.com', 4444), # Madiator
|
||||
('dht.lbry.pigg.es', 4444), # Pigges
|
||||
('lbrynet1.lbry.com', 4444), # US EAST
|
||||
('lbrynet2.lbry.com', 4444), # US WEST
|
||||
('lbrynet3.lbry.com', 4444), # EU
|
||||
('lbrynet4.lbry.com', 4444) # ASIA
|
||||
('lbrynet4.lbry.com', 4444), # ASIA
|
||||
('dht.lizard.technology', 4444), # Jack
|
||||
('s2.lbry.network', 4444),
|
||||
])
|
||||
|
||||
# blockchain
|
||||
|
@ -721,7 +742,8 @@ class Config(CLIConfig):
|
|||
|
||||
coin_selection_strategy = StringChoice(
|
||||
"Strategy to use when selecting UTXOs for a transaction",
|
||||
STRATEGIES, "standard")
|
||||
STRATEGIES, "prefer_confirmed"
|
||||
)
|
||||
|
||||
transaction_cache_size = Integer("Transaction cache size", 2 ** 17)
|
||||
save_resolved_claims = Toggle(
|
||||
|
|
|
@ -67,7 +67,7 @@ class ConnectionManager:
|
|||
|
||||
while True:
|
||||
last = time.perf_counter()
|
||||
await asyncio.sleep(0.1, loop=self.loop)
|
||||
await asyncio.sleep(0.1)
|
||||
self._status['incoming_bps'].clear()
|
||||
self._status['outgoing_bps'].clear()
|
||||
now = time.perf_counter()
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
import asyncio
|
||||
import typing
|
||||
import logging
|
||||
|
||||
from prometheus_client import Counter, Gauge
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.dht.node import Node
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
|
@ -9,45 +12,59 @@ log = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class BlobAnnouncer:
|
||||
announcements_sent_metric = Counter(
|
||||
"announcements_sent", "Number of announcements sent and their respective status.", namespace="dht_node",
|
||||
labelnames=("peers", "error"),
|
||||
)
|
||||
announcement_queue_size_metric = Gauge(
|
||||
"announcement_queue_size", "Number of hashes waiting to be announced.", namespace="dht_node",
|
||||
labelnames=("scope",)
|
||||
)
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
|
||||
self.loop = loop
|
||||
self.node = node
|
||||
self.storage = storage
|
||||
self.announce_task: asyncio.Task = None
|
||||
self.announce_queue: typing.List[str] = []
|
||||
self._done = asyncio.Event()
|
||||
self.announced = set()
|
||||
|
||||
async def _submit_announcement(self, blob_hash):
|
||||
try:
|
||||
peers = len(await self.node.announce_blob(blob_hash))
|
||||
if peers > 4:
|
||||
return blob_hash
|
||||
else:
|
||||
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise err
|
||||
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
|
||||
async def _run_consumer(self):
|
||||
while self.announce_queue:
|
||||
try:
|
||||
blob_hash = self.announce_queue.pop()
|
||||
peers = len(await self.node.announce_blob(blob_hash))
|
||||
self.announcements_sent_metric.labels(peers=peers, error=False).inc()
|
||||
if peers > 4:
|
||||
self.announced.add(blob_hash)
|
||||
else:
|
||||
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
|
||||
except Exception as err:
|
||||
self.announcements_sent_metric.labels(peers=0, error=True).inc()
|
||||
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
|
||||
|
||||
async def _announce(self, batch_size: typing.Optional[int] = 10):
|
||||
while batch_size:
|
||||
if not self.node.joined.is_set():
|
||||
await self.node.joined.wait()
|
||||
await asyncio.sleep(60, loop=self.loop)
|
||||
await asyncio.sleep(60)
|
||||
if not self.node.protocol.routing_table.get_peers():
|
||||
log.warning("No peers in DHT, announce round skipped")
|
||||
continue
|
||||
self.announce_queue.extend(await self.storage.get_blobs_to_announce())
|
||||
self.announcement_queue_size_metric.labels(scope="global").set(len(self.announce_queue))
|
||||
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
|
||||
while len(self.announce_queue) > 0:
|
||||
log.info("%i blobs to announce", len(self.announce_queue))
|
||||
announced = await asyncio.gather(*[
|
||||
self._submit_announcement(
|
||||
self.announce_queue.pop()) for _ in range(batch_size) if self.announce_queue
|
||||
], loop=self.loop)
|
||||
announced = list(filter(None, announced))
|
||||
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)])
|
||||
announced = list(filter(None, self.announced))
|
||||
if announced:
|
||||
await self.storage.update_last_announced_blobs(announced)
|
||||
log.info("announced %i blobs", len(announced))
|
||||
self.announced.clear()
|
||||
self._done.set()
|
||||
self._done.clear()
|
||||
|
||||
def start(self, batch_size: typing.Optional[int] = 10):
|
||||
assert not self.announce_task or self.announce_task.done(), "already running"
|
||||
|
@ -56,3 +73,6 @@ class BlobAnnouncer:
|
|||
def stop(self):
|
||||
if self.announce_task and not self.announce_task.done():
|
||||
self.announce_task.cancel()
|
||||
|
||||
def wait(self):
|
||||
return self._done.wait()
|
||||
|
|
|
@ -20,7 +20,6 @@ MAYBE_PING_DELAY = 300 # 5 minutes
|
|||
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
|
||||
RPC_ID_LENGTH = 20
|
||||
PROTOCOL_VERSION = 1
|
||||
BOTTOM_OUT_LIMIT = 3
|
||||
MSG_SIZE_LIMIT = 1400
|
||||
|
||||
|
||||
|
|
146
lbry/dht/node.py
146
lbry/dht/node.py
|
@ -1,9 +1,11 @@
|
|||
import logging
|
||||
import asyncio
|
||||
import typing
|
||||
import binascii
|
||||
import socket
|
||||
from lbry.utils import resolve_host
|
||||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from lbry.utils import aclosing, resolve_host
|
||||
from lbry.dht import constants
|
||||
from lbry.dht.peer import make_kademlia_peer
|
||||
from lbry.dht.protocol.distance import Distance
|
||||
|
@ -18,20 +20,32 @@ log = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class Node:
|
||||
storing_peers_metric = Gauge(
|
||||
"storing_peers", "Number of peers storing blobs announced to this node", namespace="dht_node",
|
||||
labelnames=("scope",),
|
||||
)
|
||||
stored_blob_with_x_bytes_colliding = Gauge(
|
||||
"stored_blobs_x_bytes_colliding", "Number of blobs with at least X bytes colliding with this node id prefix",
|
||||
namespace="dht_node", labelnames=("amount",)
|
||||
)
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
|
||||
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX,
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False,
|
||||
storage: typing.Optional['SQLiteStorage'] = None):
|
||||
self.loop = loop
|
||||
self.internal_udp_port = internal_udp_port
|
||||
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
|
||||
split_buckets_under_index)
|
||||
split_buckets_under_index, is_bootstrap_node)
|
||||
self.listening_port: asyncio.DatagramTransport = None
|
||||
self.joined = asyncio.Event(loop=self.loop)
|
||||
self.joined = asyncio.Event()
|
||||
self._join_task: asyncio.Task = None
|
||||
self._refresh_task: asyncio.Task = None
|
||||
self._storage = storage
|
||||
|
||||
@property
|
||||
def stored_blob_hashes(self):
|
||||
return self.protocol.data_store.keys()
|
||||
|
||||
async def refresh_node(self, force_once=False):
|
||||
while True:
|
||||
# remove peers with expired blob announcements from the datastore
|
||||
|
@ -41,17 +55,21 @@ class Node:
|
|||
# add all peers in the routing table
|
||||
total_peers.extend(self.protocol.routing_table.get_peers())
|
||||
# add all the peers who have announced blobs to us
|
||||
total_peers.extend(self.protocol.data_store.get_storing_contacts())
|
||||
storing_peers = self.protocol.data_store.get_storing_contacts()
|
||||
self.storing_peers_metric.labels("global").set(len(storing_peers))
|
||||
total_peers.extend(storing_peers)
|
||||
|
||||
counts = {0: 0, 1: 0, 2: 0}
|
||||
node_id = self.protocol.node_id
|
||||
for blob_hash in self.protocol.data_store.keys():
|
||||
bytes_colliding = 0 if blob_hash[0] != node_id[0] else 2 if blob_hash[1] == node_id[1] else 1
|
||||
counts[bytes_colliding] += 1
|
||||
self.stored_blob_with_x_bytes_colliding.labels(amount=0).set(counts[0])
|
||||
self.stored_blob_with_x_bytes_colliding.labels(amount=1).set(counts[1])
|
||||
self.stored_blob_with_x_bytes_colliding.labels(amount=2).set(counts[2])
|
||||
|
||||
# get ids falling in the midpoint of each bucket that hasn't been recently updated
|
||||
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
|
||||
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
|
||||
# populate/split the buckets further
|
||||
buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts()
|
||||
if buckets_with_contacts <= 3:
|
||||
for i in range(buckets_with_contacts):
|
||||
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
|
||||
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
|
||||
|
||||
if self.protocol.routing_table.get_peers():
|
||||
# if we have node ids to look up, perform the iterative search until we have k results
|
||||
|
@ -61,7 +79,7 @@ class Node:
|
|||
else:
|
||||
if force_once:
|
||||
break
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut = asyncio.Future()
|
||||
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
|
||||
await fut
|
||||
continue
|
||||
|
@ -75,12 +93,12 @@ class Node:
|
|||
if force_once:
|
||||
break
|
||||
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
fut = asyncio.Future()
|
||||
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
|
||||
await fut
|
||||
|
||||
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
|
||||
hash_value = binascii.unhexlify(blob_hash.encode())
|
||||
hash_value = bytes.fromhex(blob_hash)
|
||||
assert len(hash_value) == constants.HASH_LENGTH
|
||||
peers = await self.peer_search(hash_value)
|
||||
|
||||
|
@ -90,12 +108,12 @@ class Node:
|
|||
for peer in peers:
|
||||
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
|
||||
stored_to_tup = await asyncio.gather(
|
||||
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop
|
||||
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers)
|
||||
)
|
||||
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
|
||||
if stored_to:
|
||||
log.debug(
|
||||
"Stored %s to %i of %i attempted peers", binascii.hexlify(hash_value).decode()[:8],
|
||||
"Stored %s to %i of %i attempted peers", hash_value.hex()[:8],
|
||||
len(stored_to), len(peers)
|
||||
)
|
||||
else:
|
||||
|
@ -164,39 +182,36 @@ class Node:
|
|||
for address, udp_port in known_node_urls or []
|
||||
]))
|
||||
except socket.gaierror:
|
||||
await asyncio.sleep(30, loop=self.loop)
|
||||
await asyncio.sleep(30)
|
||||
continue
|
||||
|
||||
self.protocol.peer_manager.reset()
|
||||
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
|
||||
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
|
||||
|
||||
await asyncio.sleep(1, loop=self.loop)
|
||||
await asyncio.sleep(1)
|
||||
|
||||
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
|
||||
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
|
||||
|
||||
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
||||
bottom_out_limit: int = constants.BOTTOM_OUT_LIMIT,
|
||||
max_results: int = constants.K) -> IterativeNodeFinder:
|
||||
|
||||
return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
|
||||
key, bottom_out_limit, max_results, None, shortlist)
|
||||
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
|
||||
return IterativeNodeFinder(self.loop, self.protocol, key, max_results, shortlist)
|
||||
|
||||
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
||||
bottom_out_limit: int = 40,
|
||||
max_results: int = -1) -> IterativeValueFinder:
|
||||
|
||||
return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
|
||||
key, bottom_out_limit, max_results, None, shortlist)
|
||||
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
|
||||
return IterativeValueFinder(self.loop, self.protocol, key, max_results, shortlist)
|
||||
|
||||
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
|
||||
bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None
|
||||
) -> typing.List['KademliaPeer']:
|
||||
peers = []
|
||||
async for iteration_peers in self.get_iterative_node_finder(
|
||||
node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results):
|
||||
peers.extend(iteration_peers)
|
||||
async with aclosing(self.get_iterative_node_finder(
|
||||
node_id, shortlist=shortlist, max_results=max_results)) as node_finder:
|
||||
async for iteration_peers in node_finder:
|
||||
peers.extend(iteration_peers)
|
||||
distance = Distance(node_id)
|
||||
peers.sort(key=lambda peer: distance(peer.node_id))
|
||||
return peers[:count]
|
||||
|
@ -222,39 +237,46 @@ class Node:
|
|||
|
||||
# prioritize peers who reply to a dht ping first
|
||||
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
|
||||
|
||||
async for results in self.get_iterative_value_finder(binascii.unhexlify(blob_hash.encode())):
|
||||
to_put = []
|
||||
for peer in results:
|
||||
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
|
||||
continue
|
||||
is_good = self.protocol.peer_manager.peer_is_good(peer)
|
||||
if is_good:
|
||||
# the peer has replied recently over UDP, it can probably be reached on the TCP port
|
||||
to_put.append(peer)
|
||||
elif is_good is None:
|
||||
if not peer.udp_port:
|
||||
# TODO: use the same port for TCP and UDP
|
||||
# the udp port must be guessed
|
||||
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
|
||||
# including on a network with several nodes, then assume the udp port is proportionately
|
||||
# based on a starting port of 4444
|
||||
udp_port_to_try = peer.tcp_port
|
||||
if 3400 > peer.tcp_port > 3332:
|
||||
udp_port_to_try = (peer.tcp_port - 3333) + 4444
|
||||
self.loop.create_task(put_into_result_queue_after_pong(
|
||||
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
|
||||
))
|
||||
async with aclosing(self.get_iterative_value_finder(bytes.fromhex(blob_hash))) as value_finder:
|
||||
async for results in value_finder:
|
||||
to_put = []
|
||||
for peer in results:
|
||||
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
|
||||
continue
|
||||
is_good = self.protocol.peer_manager.peer_is_good(peer)
|
||||
if is_good:
|
||||
# the peer has replied recently over UDP, it can probably be reached on the TCP port
|
||||
to_put.append(peer)
|
||||
elif is_good is None:
|
||||
if not peer.udp_port:
|
||||
# TODO: use the same port for TCP and UDP
|
||||
# the udp port must be guessed
|
||||
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
|
||||
# including on a network with several nodes, then assume the udp port is proportionately
|
||||
# based on a starting port of 4444
|
||||
udp_port_to_try = peer.tcp_port
|
||||
if 3400 > peer.tcp_port > 3332:
|
||||
udp_port_to_try = (peer.tcp_port - 3333) + 4444
|
||||
self.loop.create_task(put_into_result_queue_after_pong(
|
||||
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
|
||||
))
|
||||
else:
|
||||
self.loop.create_task(put_into_result_queue_after_pong(peer))
|
||||
else:
|
||||
self.loop.create_task(put_into_result_queue_after_pong(peer))
|
||||
else:
|
||||
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
|
||||
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
|
||||
if to_put:
|
||||
result_queue.put_nowait(to_put)
|
||||
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
|
||||
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
|
||||
if to_put:
|
||||
result_queue.put_nowait(to_put)
|
||||
|
||||
def accumulate_peers(self, search_queue: asyncio.Queue,
|
||||
peer_queue: typing.Optional[asyncio.Queue] = None
|
||||
) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
|
||||
queue = peer_queue or asyncio.Queue(loop=self.loop)
|
||||
queue = peer_queue or asyncio.Queue()
|
||||
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))
|
||||
|
||||
|
||||
async def get_kademlia_peers_from_hosts(peer_list: typing.List[typing.Tuple[str, int]]) -> typing.List['KademliaPeer']:
|
||||
peer_address_list = [(await resolve_host(url, port, proto='tcp'), port) for url, port in peer_list]
|
||||
kademlia_peer_list = [make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
|
||||
for address, port in peer_address_list]
|
||||
return kademlia_peer_list
|
||||
|
|
|
@ -1,18 +1,21 @@
|
|||
import typing
|
||||
import asyncio
|
||||
import logging
|
||||
from binascii import hexlify
|
||||
from dataclasses import dataclass, field
|
||||
from functools import lru_cache
|
||||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from lbry.utils import is_valid_public_ipv4 as _is_valid_public_ipv4, LRUCache
|
||||
from lbry.dht import constants
|
||||
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
|
||||
|
||||
ALLOW_LOCALHOST = False
|
||||
CACHE_SIZE = 16384
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@lru_cache(1024)
|
||||
@lru_cache(CACHE_SIZE)
|
||||
def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str],
|
||||
udp_port: typing.Optional[int] = None,
|
||||
tcp_port: typing.Optional[int] = None,
|
||||
|
@ -26,17 +29,26 @@ def is_valid_public_ipv4(address, allow_localhost: bool = False):
|
|||
|
||||
|
||||
class PeerManager:
|
||||
peer_manager_keys_metric = Gauge(
|
||||
"peer_manager_keys", "Number of keys tracked by PeerManager dicts (sum)", namespace="dht_node",
|
||||
labelnames=("scope",)
|
||||
)
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop):
|
||||
self._loop = loop
|
||||
self._rpc_failures: typing.Dict[
|
||||
typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]]
|
||||
] = {}
|
||||
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = LRUCache(2048)
|
||||
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = LRUCache(2048)
|
||||
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = LRUCache(2048)
|
||||
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = LRUCache(2048)
|
||||
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = LRUCache(2048)
|
||||
self._node_tokens: typing.Dict[bytes, (float, bytes)] = LRUCache(2048)
|
||||
] = LRUCache(CACHE_SIZE)
|
||||
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
|
||||
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
|
||||
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
|
||||
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = LRUCache(CACHE_SIZE)
|
||||
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = LRUCache(CACHE_SIZE)
|
||||
self._node_tokens: typing.Dict[bytes, (float, bytes)] = LRUCache(CACHE_SIZE)
|
||||
|
||||
def count_cache_keys(self):
|
||||
return len(self._rpc_failures) + len(self._last_replied) + len(self._last_sent) + len(
|
||||
self._last_requested) + len(self._node_id_mapping) + len(self._node_id_reverse_mapping) + len(
|
||||
self._node_tokens)
|
||||
|
||||
def reset(self):
|
||||
for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested):
|
||||
|
@ -86,6 +98,10 @@ class PeerManager:
|
|||
self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id))
|
||||
self._node_id_mapping[(address, udp_port)] = node_id
|
||||
self._node_id_reverse_mapping[node_id] = (address, udp_port)
|
||||
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
|
||||
|
||||
def get_node_id_for_endpoint(self, address, port):
|
||||
return self._node_id_mapping.get((address, port))
|
||||
|
||||
def prune(self): # TODO: periodically call this
|
||||
now = self._loop.time()
|
||||
|
@ -137,9 +153,10 @@ class PeerManager:
|
|||
def peer_is_good(self, peer: 'KademliaPeer'):
|
||||
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
|
||||
|
||||
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
|
||||
node_id, address, tcp_port = decode_compact_address(compact_address)
|
||||
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
|
||||
|
||||
def decode_tcp_peer_from_compact_address(compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
|
||||
node_id, address, tcp_port = decode_compact_address(compact_address)
|
||||
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
|
||||
|
||||
|
||||
@dataclass(unsafe_hash=True)
|
||||
|
@ -154,7 +171,7 @@ class KademliaPeer:
|
|||
def __post_init__(self):
|
||||
if self._node_id is not None:
|
||||
if not len(self._node_id) == constants.HASH_LENGTH:
|
||||
raise ValueError("invalid node_id: {}".format(hexlify(self._node_id).decode()))
|
||||
raise ValueError("invalid node_id: {}".format(self._node_id.hex()))
|
||||
if self.udp_port is not None and not 1024 <= self.udp_port <= 65535:
|
||||
raise ValueError(f"invalid udp port: {self.address}:{self.udp_port}")
|
||||
if self.tcp_port is not None and not 1024 <= self.tcp_port <= 65535:
|
||||
|
@ -177,3 +194,6 @@ class KademliaPeer:
|
|||
|
||||
def compact_ip(self):
|
||||
return make_compact_ip(self.address)
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__}({self.node_id.hex()[:8]}@{self.address}:{self.udp_port}-{self.tcp_port})"
|
||||
|
|
|
@ -16,6 +16,12 @@ class DictDataStore:
|
|||
self._peer_manager = peer_manager
|
||||
self.completed_blobs: typing.Set[str] = set()
|
||||
|
||||
def keys(self):
|
||||
return self._data_store.keys()
|
||||
|
||||
def __len__(self):
|
||||
return self._data_store.__len__()
|
||||
|
||||
def removed_expired_peers(self):
|
||||
now = self.loop.time()
|
||||
keys = list(self._data_store.keys())
|
||||
|
|
|
@ -1,18 +1,17 @@
|
|||
import asyncio
|
||||
from binascii import hexlify
|
||||
from itertools import chain
|
||||
from collections import defaultdict
|
||||
from collections import defaultdict, OrderedDict
|
||||
from collections.abc import AsyncIterator
|
||||
import typing
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
from lbry.dht import constants
|
||||
from lbry.dht.error import RemoteException, TransportNotConnected
|
||||
from lbry.dht.protocol.distance import Distance
|
||||
from lbry.dht.peer import make_kademlia_peer
|
||||
from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address
|
||||
from lbry.dht.serialization.datagram import PAGE_KEY
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from lbry.dht.protocol.routing_table import TreeRoutingTable
|
||||
from lbry.dht.protocol.protocol import KademliaProtocol
|
||||
from lbry.dht.peer import PeerManager, KademliaPeer
|
||||
|
||||
|
@ -27,6 +26,15 @@ class FindResponse:
|
|||
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
|
||||
for contact_triple in self.get_close_triples():
|
||||
node_id, address, udp_port = contact_triple
|
||||
try:
|
||||
yield make_kademlia_peer(node_id, address, udp_port)
|
||||
except ValueError:
|
||||
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
|
||||
peer_info.udp_port, address, udp_port)
|
||||
|
||||
|
||||
class FindNodeResponse(FindResponse):
|
||||
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
|
||||
|
@ -57,57 +65,33 @@ class FindValueResponse(FindResponse):
|
|||
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
|
||||
|
||||
|
||||
def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']:
|
||||
"""
|
||||
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table
|
||||
|
||||
:param routing_table: a TreeRoutingTable
|
||||
:param key: a 48 byte hash
|
||||
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
|
||||
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
|
||||
"""
|
||||
if len(key) != constants.HASH_LENGTH:
|
||||
raise ValueError("invalid key length: %i" % len(key))
|
||||
return shortlist or routing_table.find_close_peers(key)
|
||||
|
||||
|
||||
class IterativeFinder:
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
|
||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
||||
class IterativeFinder(AsyncIterator):
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||
protocol: 'KademliaProtocol', key: bytes,
|
||||
max_results: typing.Optional[int] = constants.K,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||
if len(key) != constants.HASH_LENGTH:
|
||||
raise ValueError("invalid key length: %i" % len(key))
|
||||
self.loop = loop
|
||||
self.peer_manager = peer_manager
|
||||
self.routing_table = routing_table
|
||||
self.peer_manager = protocol.peer_manager
|
||||
self.protocol = protocol
|
||||
|
||||
self.key = key
|
||||
self.bottom_out_limit = bottom_out_limit
|
||||
self.max_results = max_results
|
||||
self.exclude = exclude or []
|
||||
self.max_results = max(constants.K, max_results)
|
||||
|
||||
self.active: typing.Set['KademliaPeer'] = set()
|
||||
self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
|
||||
self.contacted: typing.Set['KademliaPeer'] = set()
|
||||
self.distance = Distance(key)
|
||||
|
||||
self.closest_peer: typing.Optional['KademliaPeer'] = None
|
||||
self.prev_closest_peer: typing.Optional['KademliaPeer'] = None
|
||||
self.iteration_queue = asyncio.Queue()
|
||||
|
||||
self.iteration_queue = asyncio.Queue(loop=self.loop)
|
||||
|
||||
self.running_probes: typing.Set[asyncio.Task] = set()
|
||||
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
|
||||
self.iteration_count = 0
|
||||
self.bottom_out_count = 0
|
||||
self.running = False
|
||||
self.tasks: typing.List[asyncio.Task] = []
|
||||
self.delayed_calls: typing.List[asyncio.Handle] = []
|
||||
for peer in get_shortlist(routing_table, key, shortlist):
|
||||
for peer in shortlist:
|
||||
if peer.node_id:
|
||||
self._add_active(peer)
|
||||
self._add_active(peer, force=True)
|
||||
else:
|
||||
# seed nodes
|
||||
self._schedule_probe(peer)
|
||||
|
@ -139,66 +123,79 @@ class IterativeFinder:
|
|||
"""
|
||||
return []
|
||||
|
||||
def _is_closer(self, peer: 'KademliaPeer') -> bool:
|
||||
return not self.closest_peer or self.distance.is_closer(peer.node_id, self.closest_peer.node_id)
|
||||
|
||||
def _add_active(self, peer):
|
||||
def _add_active(self, peer, force=False):
|
||||
if not force and self.peer_manager.peer_is_good(peer) is False:
|
||||
return
|
||||
if peer in self.contacted:
|
||||
return
|
||||
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
|
||||
self.active.add(peer)
|
||||
if self._is_closer(peer):
|
||||
self.prev_closest_peer = self.closest_peer
|
||||
self.closest_peer = peer
|
||||
self.active[peer] = self.distance(peer.node_id)
|
||||
self.active = OrderedDict(sorted(self.active.items(), key=lambda item: item[1]))
|
||||
|
||||
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
|
||||
self._add_active(peer)
|
||||
for contact_triple in response.get_close_triples():
|
||||
node_id, address, udp_port = contact_triple
|
||||
try:
|
||||
self._add_active(make_kademlia_peer(node_id, address, udp_port))
|
||||
except ValueError:
|
||||
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address,
|
||||
peer.udp_port, address, udp_port)
|
||||
for new_peer in response.get_close_kademlia_peers(peer):
|
||||
self._add_active(new_peer)
|
||||
self.check_result_ready(response)
|
||||
self._log_state(reason="check result")
|
||||
|
||||
def _reset_closest(self, peer):
|
||||
if peer in self.active:
|
||||
del self.active[peer]
|
||||
|
||||
async def _send_probe(self, peer: 'KademliaPeer'):
|
||||
try:
|
||||
response = await self.send_probe(peer)
|
||||
except asyncio.TimeoutError:
|
||||
self.active.discard(peer)
|
||||
self._reset_closest(peer)
|
||||
return
|
||||
except asyncio.CancelledError:
|
||||
log.debug("%s[%x] cancelled probe",
|
||||
type(self).__name__, id(self))
|
||||
raise
|
||||
except ValueError as err:
|
||||
log.warning(str(err))
|
||||
self.active.discard(peer)
|
||||
self._reset_closest(peer)
|
||||
return
|
||||
except TransportNotConnected:
|
||||
return self.aclose()
|
||||
await self._aclose(reason="not connected")
|
||||
return
|
||||
except RemoteException:
|
||||
self._reset_closest(peer)
|
||||
return
|
||||
return await self._handle_probe_result(peer, response)
|
||||
|
||||
async def _search_round(self):
|
||||
def _search_round(self):
|
||||
"""
|
||||
Send up to constants.alpha (5) probes to closest active peers
|
||||
"""
|
||||
|
||||
added = 0
|
||||
to_probe = list(self.active - self.contacted)
|
||||
to_probe.sort(key=lambda peer: self.distance(self.key))
|
||||
for peer in to_probe:
|
||||
if added >= constants.ALPHA:
|
||||
for index, peer in enumerate(self.active.keys()):
|
||||
if index == 0:
|
||||
log.debug("%s[%x] closest to probe: %s",
|
||||
type(self).__name__, id(self),
|
||||
peer.node_id.hex()[:8])
|
||||
if peer in self.contacted:
|
||||
continue
|
||||
if len(self.running_probes) >= constants.ALPHA:
|
||||
break
|
||||
if index > (constants.K + len(self.running_probes)):
|
||||
break
|
||||
origin_address = (peer.address, peer.udp_port)
|
||||
if origin_address in self.exclude:
|
||||
continue
|
||||
if peer.node_id == self.protocol.node_id:
|
||||
continue
|
||||
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
|
||||
continue
|
||||
self._schedule_probe(peer)
|
||||
added += 1
|
||||
log.debug("running %d probes", len(self.running_probes))
|
||||
log.debug("%s[%x] running %d probes for key %s",
|
||||
type(self).__name__, id(self),
|
||||
len(self.running_probes), self.key.hex()[:8])
|
||||
if not added and not self.running_probes:
|
||||
log.debug("search for %s exhausted", hexlify(self.key)[:8])
|
||||
log.debug("%s[%x] search for %s exhausted",
|
||||
type(self).__name__, id(self),
|
||||
self.key.hex()[:8])
|
||||
self.search_exhausted()
|
||||
|
||||
def _schedule_probe(self, peer: 'KademliaPeer'):
|
||||
|
@ -207,33 +204,24 @@ class IterativeFinder:
|
|||
t = self.loop.create_task(self._send_probe(peer))
|
||||
|
||||
def callback(_):
|
||||
self.running_probes.difference_update({
|
||||
probe for probe in self.running_probes if probe.done() or probe == t
|
||||
})
|
||||
if not self.running_probes:
|
||||
self.tasks.append(self.loop.create_task(self._search_task(0.0)))
|
||||
self.running_probes.pop(peer, None)
|
||||
if self.running:
|
||||
self._search_round()
|
||||
|
||||
t.add_done_callback(callback)
|
||||
self.running_probes.add(t)
|
||||
self.running_probes[peer] = t
|
||||
|
||||
async def _search_task(self, delay: typing.Optional[float] = constants.ITERATIVE_LOOKUP_DELAY):
|
||||
try:
|
||||
if self.running:
|
||||
await self._search_round()
|
||||
if self.running:
|
||||
self.delayed_calls.append(self.loop.call_later(delay, self._search))
|
||||
except (asyncio.CancelledError, StopAsyncIteration, TransportNotConnected):
|
||||
if self.running:
|
||||
self.loop.call_soon(self.aclose)
|
||||
|
||||
def _search(self):
|
||||
self.tasks.append(self.loop.create_task(self._search_task()))
|
||||
def _log_state(self, reason="?"):
|
||||
log.debug("%s[%x] [%s] %s: %i active nodes %i contacted %i produced %i queued",
|
||||
type(self).__name__, id(self), self.key.hex()[:8],
|
||||
reason, len(self.active), len(self.contacted),
|
||||
self.iteration_count, self.iteration_queue.qsize())
|
||||
|
||||
def __aiter__(self):
|
||||
if self.running:
|
||||
raise Exception("already running")
|
||||
self.running = True
|
||||
self._search()
|
||||
self.loop.call_soon(self._search_round)
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> typing.List['KademliaPeer']:
|
||||
|
@ -246,47 +234,57 @@ class IterativeFinder:
|
|||
raise StopAsyncIteration
|
||||
self.iteration_count += 1
|
||||
return result
|
||||
except (asyncio.CancelledError, StopAsyncIteration):
|
||||
self.loop.call_soon(self.aclose)
|
||||
except asyncio.CancelledError:
|
||||
await self._aclose(reason="cancelled")
|
||||
raise
|
||||
except StopAsyncIteration:
|
||||
await self._aclose(reason="no more results")
|
||||
raise
|
||||
|
||||
def aclose(self):
|
||||
async def _aclose(self, reason="?"):
|
||||
log.debug("%s[%x] [%s] shutdown because %s: %i active nodes %i contacted %i produced %i queued",
|
||||
type(self).__name__, id(self), self.key.hex()[:8],
|
||||
reason, len(self.active), len(self.contacted),
|
||||
self.iteration_count, self.iteration_queue.qsize())
|
||||
self.running = False
|
||||
self.iteration_queue.put_nowait(None)
|
||||
for task in chain(self.tasks, self.running_probes, self.delayed_calls):
|
||||
for task in chain(self.tasks, self.running_probes.values()):
|
||||
task.cancel()
|
||||
self.tasks.clear()
|
||||
self.running_probes.clear()
|
||||
self.delayed_calls.clear()
|
||||
|
||||
async def aclose(self):
|
||||
if self.running:
|
||||
await self._aclose(reason="aclose")
|
||||
log.debug("%s[%x] [%s] async close completed",
|
||||
type(self).__name__, id(self), self.key.hex()[:8])
|
||||
|
||||
class IterativeNodeFinder(IterativeFinder):
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
|
||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||
protocol: 'KademliaProtocol', key: bytes,
|
||||
max_results: typing.Optional[int] = constants.K,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
|
||||
shortlist)
|
||||
super().__init__(loop, protocol, key, max_results, shortlist)
|
||||
self.yielded_peers: typing.Set['KademliaPeer'] = set()
|
||||
|
||||
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
|
||||
log.debug("probing %s:%d %s", peer.address, peer.udp_port, hexlify(peer.node_id)[:8] if peer.node_id else '')
|
||||
log.debug("probe %s:%d (%s) for NODE %s",
|
||||
peer.address, peer.udp_port, peer.node_id.hex()[:8] if peer.node_id else '', self.key.hex()[:8])
|
||||
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
|
||||
return FindNodeResponse(self.key, response)
|
||||
|
||||
def search_exhausted(self):
|
||||
self.put_result(self.active, finish=True)
|
||||
self.put_result(self.active.keys(), finish=True)
|
||||
|
||||
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
|
||||
not_yet_yielded = [
|
||||
peer for peer in from_iter
|
||||
if peer not in self.yielded_peers
|
||||
and peer.node_id != self.protocol.node_id
|
||||
and self.peer_manager.peer_is_good(peer) is not False
|
||||
and self.peer_manager.peer_is_good(peer) is True # return only peers who answered
|
||||
]
|
||||
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
|
||||
to_yield = not_yet_yielded[:min(constants.K, len(not_yet_yielded))]
|
||||
to_yield = not_yet_yielded[:max(constants.K, self.max_results)]
|
||||
if to_yield:
|
||||
self.yielded_peers.update(to_yield)
|
||||
self.iteration_queue.put_nowait(to_yield)
|
||||
|
@ -298,27 +296,15 @@ class IterativeNodeFinder(IterativeFinder):
|
|||
|
||||
if found:
|
||||
log.debug("found")
|
||||
return self.put_result(self.active, finish=True)
|
||||
if self.prev_closest_peer and self.closest_peer and not self._is_closer(self.prev_closest_peer):
|
||||
# log.info("improving, %i %i %i %i %i", len(self.shortlist), len(self.active), len(self.contacted),
|
||||
# self.bottom_out_count, self.iteration_count)
|
||||
self.bottom_out_count = 0
|
||||
elif self.prev_closest_peer and self.closest_peer:
|
||||
self.bottom_out_count += 1
|
||||
log.info("bottom out %i %i %i", len(self.active), len(self.contacted), self.bottom_out_count)
|
||||
if self.bottom_out_count >= self.bottom_out_limit or self.iteration_count >= self.bottom_out_limit:
|
||||
log.info("limit hit")
|
||||
self.put_result(self.active, True)
|
||||
return self.put_result(self.active.keys(), finish=True)
|
||||
|
||||
|
||||
class IterativeValueFinder(IterativeFinder):
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
|
||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||
protocol: 'KademliaProtocol', key: bytes,
|
||||
max_results: typing.Optional[int] = constants.K,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
|
||||
shortlist)
|
||||
super().__init__(loop, protocol, key, max_results, shortlist)
|
||||
self.blob_peers: typing.Set['KademliaPeer'] = set()
|
||||
# this tracks the index of the most recent page we requested from each peer
|
||||
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
|
||||
|
@ -326,6 +312,8 @@ class IterativeValueFinder(IterativeFinder):
|
|||
self.discovered_peers: typing.Dict['KademliaPeer', typing.Set['KademliaPeer']] = defaultdict(set)
|
||||
|
||||
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
|
||||
log.debug("probe %s:%d (%s) for VALUE %s",
|
||||
peer.address, peer.udp_port, peer.node_id.hex()[:8], self.key.hex()[:8])
|
||||
page = self.peer_pages[peer]
|
||||
response = await self.protocol.get_rpc_peer(peer).find_value(self.key, page=page)
|
||||
parsed = FindValueResponse(self.key, response)
|
||||
|
@ -335,7 +323,7 @@ class IterativeValueFinder(IterativeFinder):
|
|||
decoded_peers = set()
|
||||
for compact_addr in parsed.found_compact_addresses:
|
||||
try:
|
||||
decoded_peers.add(self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr))
|
||||
decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr))
|
||||
except ValueError:
|
||||
log.warning("misbehaving peer %s:%i returned invalid peer for blob",
|
||||
peer.address, peer.udp_port)
|
||||
|
@ -347,7 +335,6 @@ class IterativeValueFinder(IterativeFinder):
|
|||
already_known + len(parsed.found_compact_addresses))
|
||||
if len(self.discovered_peers[peer]) != already_known + len(parsed.found_compact_addresses):
|
||||
log.warning("misbehaving peer %s:%i returned duplicate peers for blob", peer.address, peer.udp_port)
|
||||
parsed.found_compact_addresses.clear()
|
||||
elif len(parsed.found_compact_addresses) >= constants.K and self.peer_pages[peer] < parsed.pages:
|
||||
# the peer returned a full page and indicates it has more
|
||||
self.peer_pages[peer] += 1
|
||||
|
@ -358,26 +345,15 @@ class IterativeValueFinder(IterativeFinder):
|
|||
|
||||
def check_result_ready(self, response: FindValueResponse):
|
||||
if response.found:
|
||||
blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)
|
||||
blob_peers = [decode_tcp_peer_from_compact_address(compact_addr)
|
||||
for compact_addr in response.found_compact_addresses]
|
||||
to_yield = []
|
||||
self.bottom_out_count = 0
|
||||
for blob_peer in blob_peers:
|
||||
if blob_peer not in self.blob_peers:
|
||||
self.blob_peers.add(blob_peer)
|
||||
to_yield.append(blob_peer)
|
||||
if to_yield:
|
||||
# log.info("found %i new peers for blob", len(to_yield))
|
||||
self.iteration_queue.put_nowait(to_yield)
|
||||
# if self.max_results and len(self.blob_peers) >= self.max_results:
|
||||
# log.info("enough blob peers found")
|
||||
# if not self.finished.is_set():
|
||||
# self.finished.set()
|
||||
elif self.prev_closest_peer and self.closest_peer:
|
||||
self.bottom_out_count += 1
|
||||
if self.bottom_out_count >= self.bottom_out_limit:
|
||||
log.info("blob peer search bottomed out")
|
||||
self.iteration_queue.put_nowait(None)
|
||||
|
||||
def get_initial_result(self) -> typing.List['KademliaPeer']:
|
||||
if self.protocol.data_store.has_peers_for_blob(self.key):
|
||||
|
|
|
@ -3,12 +3,14 @@ import socket
|
|||
import functools
|
||||
import hashlib
|
||||
import asyncio
|
||||
import time
|
||||
import typing
|
||||
import binascii
|
||||
import random
|
||||
from asyncio.protocols import DatagramProtocol
|
||||
from asyncio.transports import DatagramTransport
|
||||
|
||||
from prometheus_client import Gauge, Counter, Histogram
|
||||
|
||||
from lbry.dht import constants
|
||||
from lbry.dht.serialization.bencoding import DecodeError
|
||||
from lbry.dht.serialization.datagram import decode_datagram, ErrorDatagram, ResponseDatagram, RequestDatagram
|
||||
|
@ -31,6 +33,11 @@ OLD_PROTOCOL_ERRORS = {
|
|||
|
||||
|
||||
class KademliaRPC:
|
||||
stored_blob_metric = Gauge(
|
||||
"stored_blobs", "Number of blobs announced by other peers", namespace="dht_node",
|
||||
labelnames=("scope",),
|
||||
)
|
||||
|
||||
def __init__(self, protocol: 'KademliaProtocol', loop: asyncio.AbstractEventLoop, peer_port: int = 3333):
|
||||
self.protocol = protocol
|
||||
self.loop = loop
|
||||
|
@ -62,6 +69,7 @@ class KademliaRPC:
|
|||
self.protocol.data_store.add_peer_to_blob(
|
||||
rpc_contact, blob_hash
|
||||
)
|
||||
self.stored_blob_metric.labels("global").set(len(self.protocol.data_store))
|
||||
return b'OK'
|
||||
|
||||
def find_node(self, rpc_contact: 'KademliaPeer', key: bytes) -> typing.List[typing.Tuple[bytes, str, int]]:
|
||||
|
@ -97,7 +105,7 @@ class KademliaRPC:
|
|||
if not rpc_contact.tcp_port or peer.compact_address_tcp() != rpc_contact.compact_address_tcp()
|
||||
]
|
||||
# if we don't have k storing peers to return and we have this hash locally, include our contact information
|
||||
if len(peers) < constants.K and binascii.hexlify(key).decode() in self.protocol.data_store.completed_blobs:
|
||||
if len(peers) < constants.K and key.hex() in self.protocol.data_store.completed_blobs:
|
||||
peers.append(self.compact_address())
|
||||
if not peers:
|
||||
response[PAGE_KEY] = 0
|
||||
|
@ -210,6 +218,10 @@ class PingQueue:
|
|||
def running(self):
|
||||
return self._running
|
||||
|
||||
@property
|
||||
def busy(self):
|
||||
return self._running and (any(self._running_pings) or any(self._pending_contacts))
|
||||
|
||||
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
|
||||
delay = delay if delay is not None else self._default_delay
|
||||
now = self._loop.time()
|
||||
|
@ -221,7 +233,7 @@ class PingQueue:
|
|||
async def ping_task():
|
||||
try:
|
||||
if self._protocol.peer_manager.peer_is_good(peer):
|
||||
if peer not in self._protocol.routing_table.get_peers():
|
||||
if not self._protocol.routing_table.get_peer(peer.node_id):
|
||||
self._protocol.add_peer(peer)
|
||||
return
|
||||
await self._protocol.get_rpc_peer(peer).ping()
|
||||
|
@ -241,7 +253,7 @@ class PingQueue:
|
|||
del self._pending_contacts[peer]
|
||||
self.maybe_ping(peer)
|
||||
break
|
||||
await asyncio.sleep(1, loop=self._loop)
|
||||
await asyncio.sleep(1)
|
||||
|
||||
def start(self):
|
||||
assert not self._running
|
||||
|
@ -260,9 +272,33 @@ class PingQueue:
|
|||
|
||||
|
||||
class KademliaProtocol(DatagramProtocol):
|
||||
request_sent_metric = Counter(
|
||||
"request_sent", "Number of requests send from DHT RPC protocol", namespace="dht_node",
|
||||
labelnames=("method",),
|
||||
)
|
||||
request_success_metric = Counter(
|
||||
"request_success", "Number of successful requests", namespace="dht_node",
|
||||
labelnames=("method",),
|
||||
)
|
||||
request_error_metric = Counter(
|
||||
"request_error", "Number of errors returned from request to other peers", namespace="dht_node",
|
||||
labelnames=("method",),
|
||||
)
|
||||
HISTOGRAM_BUCKETS = (
|
||||
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 3.0, 3.5, 4.0, 4.50, 5.0, 5.50, 6.0, float('inf')
|
||||
)
|
||||
response_time_metric = Histogram(
|
||||
"response_time", "Response times of DHT RPC requests", namespace="dht_node", buckets=HISTOGRAM_BUCKETS,
|
||||
labelnames=("method",)
|
||||
)
|
||||
received_request_metric = Counter(
|
||||
"received_request", "Number of received DHT RPC requests", namespace="dht_node",
|
||||
labelnames=("method",),
|
||||
)
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
|
||||
udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_boostrap_node: bool = False):
|
||||
self.peer_manager = peer_manager
|
||||
self.loop = loop
|
||||
self.node_id = node_id
|
||||
|
@ -277,15 +313,16 @@ class KademliaProtocol(DatagramProtocol):
|
|||
self.transport: DatagramTransport = None
|
||||
self.old_token_secret = constants.generate_id()
|
||||
self.token_secret = constants.generate_id()
|
||||
self.routing_table = TreeRoutingTable(self.loop, self.peer_manager, self.node_id, split_buckets_under_index)
|
||||
self.routing_table = TreeRoutingTable(
|
||||
self.loop, self.peer_manager, self.node_id, split_buckets_under_index, is_bootstrap_node=is_boostrap_node)
|
||||
self.data_store = DictDataStore(self.loop, self.peer_manager)
|
||||
self.ping_queue = PingQueue(self.loop, self)
|
||||
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
|
||||
self.rpc_timeout = rpc_timeout
|
||||
self._split_lock = asyncio.Lock(loop=self.loop)
|
||||
self._split_lock = asyncio.Lock()
|
||||
self._to_remove: typing.Set['KademliaPeer'] = set()
|
||||
self._to_add: typing.Set['KademliaPeer'] = set()
|
||||
self._wakeup_routing_task = asyncio.Event(loop=self.loop)
|
||||
self._wakeup_routing_task = asyncio.Event()
|
||||
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
|
||||
|
||||
@functools.lru_cache(128)
|
||||
|
@ -324,72 +361,10 @@ class KademliaProtocol(DatagramProtocol):
|
|||
return args, {}
|
||||
|
||||
async def _add_peer(self, peer: 'KademliaPeer'):
|
||||
if not peer.node_id:
|
||||
log.warning("Tried adding a peer with no node id!")
|
||||
return False
|
||||
for my_peer in self.routing_table.get_peers():
|
||||
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
|
||||
self.routing_table.remove_peer(my_peer)
|
||||
self.routing_table.join_buckets()
|
||||
bucket_index = self.routing_table.kbucket_index(peer.node_id)
|
||||
if self.routing_table.buckets[bucket_index].add_peer(peer):
|
||||
return True
|
||||
|
||||
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
|
||||
if self.routing_table.should_split(bucket_index, peer.node_id):
|
||||
self.routing_table.split_bucket(bucket_index)
|
||||
# Retry the insertion attempt
|
||||
result = await self._add_peer(peer)
|
||||
self.routing_table.join_buckets()
|
||||
return result
|
||||
else:
|
||||
# We can't split the k-bucket
|
||||
#
|
||||
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
|
||||
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
|
||||
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
|
||||
#
|
||||
# A reasonable extension to this is BEP 0005, which extends the above:
|
||||
#
|
||||
# Not all nodes that we learn about are equal. Some are "good" and some are not.
|
||||
# Many nodes using the DHT are able to send queries and receive responses,
|
||||
# but are not able to respond to queries from other nodes. It is important that
|
||||
# each node's routing table must contain only known good nodes. A good node is
|
||||
# a node has responded to one of our queries within the last 15 minutes. A node
|
||||
# is also good if it has ever responded to one of our queries and has sent us a
|
||||
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
|
||||
# questionable. Nodes become bad when they fail to respond to multiple queries
|
||||
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
|
||||
#
|
||||
# When there are bad or questionable nodes in the bucket, the least recent is selected for
|
||||
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
|
||||
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
|
||||
# is ignored if the pinged node replies.
|
||||
|
||||
not_good_contacts = self.routing_table.buckets[bucket_index].get_bad_or_unknown_peers()
|
||||
not_recently_replied = []
|
||||
for my_peer in not_good_contacts:
|
||||
last_replied = self.peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
|
||||
if not last_replied or last_replied + 60 < self.loop.time():
|
||||
not_recently_replied.append(my_peer)
|
||||
if not_recently_replied:
|
||||
to_replace = not_recently_replied[0]
|
||||
else:
|
||||
to_replace = self.routing_table.buckets[bucket_index].peers[0]
|
||||
last_replied = self.peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
|
||||
if last_replied and last_replied + 60 > self.loop.time():
|
||||
return False
|
||||
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
|
||||
try:
|
||||
to_replace_rpc = self.get_rpc_peer(to_replace)
|
||||
await to_replace_rpc.ping()
|
||||
return False
|
||||
except asyncio.TimeoutError:
|
||||
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
|
||||
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
|
||||
if to_replace in self.routing_table.buckets[bucket_index]:
|
||||
self.routing_table.buckets[bucket_index].remove_peer(to_replace)
|
||||
return await self._add_peer(peer)
|
||||
async def probe(some_peer: 'KademliaPeer'):
|
||||
rpc_peer = self.get_rpc_peer(some_peer)
|
||||
await rpc_peer.ping()
|
||||
return await self.routing_table.add_peer(peer, probe)
|
||||
|
||||
def add_peer(self, peer: 'KademliaPeer'):
|
||||
if peer.node_id == self.node_id:
|
||||
|
@ -407,16 +382,15 @@ class KademliaProtocol(DatagramProtocol):
|
|||
async with self._split_lock:
|
||||
peer = self._to_remove.pop()
|
||||
self.routing_table.remove_peer(peer)
|
||||
self.routing_table.join_buckets()
|
||||
while self._to_add:
|
||||
async with self._split_lock:
|
||||
await self._add_peer(self._to_add.pop())
|
||||
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop)
|
||||
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1))
|
||||
self._wakeup_routing_task.clear()
|
||||
|
||||
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
|
||||
assert sender_contact.node_id != self.node_id, (binascii.hexlify(sender_contact.node_id)[:8].decode(),
|
||||
binascii.hexlify(self.node_id)[:8].decode())
|
||||
assert sender_contact.node_id != self.node_id, (sender_contact.node_id.hex()[:8],
|
||||
self.node_id.hex()[:8])
|
||||
method = message.method
|
||||
if method not in [b'ping', b'store', b'findNode', b'findValue']:
|
||||
raise AttributeError('Invalid method: %s' % message.method.decode())
|
||||
|
@ -448,10 +422,10 @@ class KademliaProtocol(DatagramProtocol):
|
|||
|
||||
def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram):
|
||||
# This is an RPC method request
|
||||
self.received_request_metric.labels(method=request_datagram.method).inc()
|
||||
self.peer_manager.report_last_requested(address[0], address[1])
|
||||
try:
|
||||
peer = self.routing_table.get_peer(request_datagram.node_id)
|
||||
except IndexError:
|
||||
peer = self.routing_table.get_peer(request_datagram.node_id)
|
||||
if not peer:
|
||||
try:
|
||||
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
|
||||
except ValueError as err:
|
||||
|
@ -561,7 +535,7 @@ class KademliaProtocol(DatagramProtocol):
|
|||
message = decode_datagram(datagram)
|
||||
except (ValueError, TypeError, DecodeError):
|
||||
self.peer_manager.report_failure(address[0], address[1])
|
||||
log.warning("Couldn't decode dht datagram from %s: %s", address, binascii.hexlify(datagram).decode())
|
||||
log.warning("Couldn't decode dht datagram from %s: %s", address, datagram.hex())
|
||||
return
|
||||
|
||||
if isinstance(message, RequestDatagram):
|
||||
|
@ -576,14 +550,19 @@ class KademliaProtocol(DatagramProtocol):
|
|||
self._send(peer, request)
|
||||
response_fut = self.sent_messages[request.rpc_id][1]
|
||||
try:
|
||||
self.request_sent_metric.labels(method=request.method).inc()
|
||||
start = time.perf_counter()
|
||||
response = await asyncio.wait_for(response_fut, self.rpc_timeout)
|
||||
self.response_time_metric.labels(method=request.method).observe(time.perf_counter() - start)
|
||||
self.peer_manager.report_last_replied(peer.address, peer.udp_port)
|
||||
self.request_success_metric.labels(method=request.method).inc()
|
||||
return response
|
||||
except asyncio.CancelledError:
|
||||
if not response_fut.done():
|
||||
response_fut.cancel()
|
||||
raise
|
||||
except (asyncio.TimeoutError, RemoteException):
|
||||
self.request_error_metric.labels(method=request.method).inc()
|
||||
self.peer_manager.report_failure(peer.address, peer.udp_port)
|
||||
if self.peer_manager.peer_is_good(peer) is False:
|
||||
self.remove_peer(peer)
|
||||
|
@ -603,7 +582,7 @@ class KademliaProtocol(DatagramProtocol):
|
|||
if len(data) > constants.MSG_SIZE_LIMIT:
|
||||
log.warning("cannot send datagram larger than %i bytes (packet is %i bytes)",
|
||||
constants.MSG_SIZE_LIMIT, len(data))
|
||||
log.debug("Packet is too large to send: %s", binascii.hexlify(data[:3500]).decode())
|
||||
log.debug("Packet is too large to send: %s", data[:3500].hex())
|
||||
raise ValueError(
|
||||
f"cannot send datagram larger than {constants.MSG_SIZE_LIMIT} bytes (packet is {len(data)} bytes)"
|
||||
)
|
||||
|
@ -663,13 +642,13 @@ class KademliaProtocol(DatagramProtocol):
|
|||
res = await self.get_rpc_peer(peer).store(hash_value)
|
||||
if res != b"OK":
|
||||
raise ValueError(res)
|
||||
log.debug("Stored %s to %s", binascii.hexlify(hash_value).decode()[:8], peer)
|
||||
log.debug("Stored %s to %s", hash_value.hex()[:8], peer)
|
||||
return peer.node_id, True
|
||||
|
||||
try:
|
||||
return await __store()
|
||||
except asyncio.TimeoutError:
|
||||
log.debug("Timeout while storing blob_hash %s at %s", binascii.hexlify(hash_value).decode()[:8], peer)
|
||||
log.debug("Timeout while storing blob_hash %s at %s", hash_value.hex()[:8], peer)
|
||||
return peer.node_id, False
|
||||
except ValueError as err:
|
||||
log.error("Unexpected response: %s", err)
|
||||
|
|
|
@ -4,7 +4,11 @@ import logging
|
|||
import typing
|
||||
import itertools
|
||||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from lbry import utils
|
||||
from lbry.dht import constants
|
||||
from lbry.dht.error import RemoteException
|
||||
from lbry.dht.protocol.distance import Distance
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.dht.peer import KademliaPeer, PeerManager
|
||||
|
@ -13,10 +17,20 @@ log = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class KBucket:
|
||||
""" Description - later
|
||||
"""
|
||||
Kademlia K-bucket implementation.
|
||||
"""
|
||||
peer_in_routing_table_metric = Gauge(
|
||||
"peers_in_routing_table", "Number of peers on routing table", namespace="dht_node",
|
||||
labelnames=("scope",)
|
||||
)
|
||||
peer_with_x_bit_colliding_metric = Gauge(
|
||||
"peer_x_bit_colliding", "Number of peers with at least X bits colliding with this node id",
|
||||
namespace="dht_node", labelnames=("amount",)
|
||||
)
|
||||
|
||||
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, node_id: bytes):
|
||||
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int,
|
||||
node_id: bytes, capacity: int = constants.K):
|
||||
"""
|
||||
@param range_min: The lower boundary for the range in the n-bit ID
|
||||
space covered by this k-bucket
|
||||
|
@ -24,12 +38,12 @@ class KBucket:
|
|||
covered by this k-bucket
|
||||
"""
|
||||
self._peer_manager = peer_manager
|
||||
self.last_accessed = 0
|
||||
self.range_min = range_min
|
||||
self.range_max = range_max
|
||||
self.peers: typing.List['KademliaPeer'] = []
|
||||
self._node_id = node_id
|
||||
self._distance_to_self = Distance(node_id)
|
||||
self.capacity = capacity
|
||||
|
||||
def add_peer(self, peer: 'KademliaPeer') -> bool:
|
||||
""" Add contact to _contact list in the right order. This will move the
|
||||
|
@ -56,18 +70,19 @@ class KBucket:
|
|||
self.peers.remove(local_peer)
|
||||
self.peers.append(peer)
|
||||
return True
|
||||
if len(self.peers) < constants.K:
|
||||
if len(self.peers) < self.capacity:
|
||||
self.peers.append(peer)
|
||||
self.peer_in_routing_table_metric.labels("global").inc()
|
||||
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
|
||||
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).inc()
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
# raise BucketFull("No space in bucket to insert contact")
|
||||
|
||||
def get_peer(self, node_id: bytes) -> 'KademliaPeer':
|
||||
for peer in self.peers:
|
||||
if peer.node_id == node_id:
|
||||
return peer
|
||||
raise IndexError(node_id)
|
||||
|
||||
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
|
||||
""" Returns a list containing up to the first count number of contacts
|
||||
|
@ -124,6 +139,9 @@ class KBucket:
|
|||
|
||||
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
||||
self.peers.remove(peer)
|
||||
self.peer_in_routing_table_metric.labels("global").dec()
|
||||
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
|
||||
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).dec()
|
||||
|
||||
def key_in_range(self, key: bytes) -> bool:
|
||||
""" Tests whether the specified key (i.e. node ID) is in the range
|
||||
|
@ -161,24 +179,36 @@ class TreeRoutingTable:
|
|||
version of the Kademlia paper, in section 2.4. It does, however, use the
|
||||
ping RPC-based k-bucket eviction algorithm described in section 2.2 of
|
||||
that paper.
|
||||
|
||||
BOOTSTRAP MODE: if set to True, we always add all peers. This is so a
|
||||
bootstrap node does not get a bias towards its own node id and replies are
|
||||
the best it can provide (joining peer knows its neighbors immediately).
|
||||
Over time, this will need to be optimized so we use the disk as holding
|
||||
everything in memory won't be feasible anymore.
|
||||
See: https://github.com/bittorrent/bootstrap-dht
|
||||
"""
|
||||
bucket_in_routing_table_metric = Gauge(
|
||||
"buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node",
|
||||
labelnames=("scope",)
|
||||
)
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False):
|
||||
self._loop = loop
|
||||
self._peer_manager = peer_manager
|
||||
self._parent_node_id = parent_node_id
|
||||
self._split_buckets_under_index = split_buckets_under_index
|
||||
self.buckets: typing.List[KBucket] = [
|
||||
KBucket(
|
||||
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id
|
||||
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id,
|
||||
capacity=1 << 32 if is_bootstrap_node else constants.K
|
||||
)
|
||||
]
|
||||
|
||||
def get_peers(self) -> typing.List['KademliaPeer']:
|
||||
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
|
||||
|
||||
def should_split(self, bucket_index: int, to_add: bytes) -> bool:
|
||||
def _should_split(self, bucket_index: int, to_add: bytes) -> bool:
|
||||
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
|
||||
if bucket_index < self._split_buckets_under_index:
|
||||
return True
|
||||
|
@ -203,39 +233,32 @@ class TreeRoutingTable:
|
|||
return []
|
||||
|
||||
def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
|
||||
"""
|
||||
@raise IndexError: No contact with the specified contact ID is known
|
||||
by this node
|
||||
"""
|
||||
return self.buckets[self.kbucket_index(contact_id)].get_peer(contact_id)
|
||||
return self.buckets[self._kbucket_index(contact_id)].get_peer(contact_id)
|
||||
|
||||
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
|
||||
bucket_index = start_index
|
||||
refresh_ids = []
|
||||
now = int(self._loop.time())
|
||||
for bucket in self.buckets[start_index:]:
|
||||
if force or now - bucket.last_accessed >= constants.REFRESH_INTERVAL:
|
||||
to_search = self.midpoint_id_in_bucket_range(bucket_index)
|
||||
refresh_ids.append(to_search)
|
||||
bucket_index += 1
|
||||
for offset, _ in enumerate(self.buckets[start_index:]):
|
||||
refresh_ids.append(self._midpoint_id_in_bucket_range(start_index + offset))
|
||||
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
|
||||
# populate/split the buckets further
|
||||
buckets_with_contacts = self.buckets_with_contacts()
|
||||
if buckets_with_contacts <= 3:
|
||||
for i in range(buckets_with_contacts):
|
||||
refresh_ids.append(self._random_id_in_bucket_range(i))
|
||||
refresh_ids.append(self._random_id_in_bucket_range(i))
|
||||
return refresh_ids
|
||||
|
||||
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
||||
if not peer.node_id:
|
||||
return
|
||||
bucket_index = self.kbucket_index(peer.node_id)
|
||||
bucket_index = self._kbucket_index(peer.node_id)
|
||||
try:
|
||||
self.buckets[bucket_index].remove_peer(peer)
|
||||
self._join_buckets()
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
def touch_kbucket(self, key: bytes) -> None:
|
||||
self.touch_kbucket_by_index(self.kbucket_index(key))
|
||||
|
||||
def touch_kbucket_by_index(self, bucket_index: int):
|
||||
self.buckets[bucket_index].last_accessed = int(self._loop.time())
|
||||
|
||||
def kbucket_index(self, key: bytes) -> int:
|
||||
def _kbucket_index(self, key: bytes) -> int:
|
||||
i = 0
|
||||
for bucket in self.buckets:
|
||||
if bucket.key_in_range(key):
|
||||
|
@ -244,19 +267,19 @@ class TreeRoutingTable:
|
|||
i += 1
|
||||
return i
|
||||
|
||||
def random_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||
def _random_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
|
||||
return Distance(
|
||||
self._parent_node_id
|
||||
)(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big')
|
||||
|
||||
def midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||
def _midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
|
||||
return Distance(self._parent_node_id)(
|
||||
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big')
|
||||
).to_bytes(constants.HASH_LENGTH, 'big')
|
||||
|
||||
def split_bucket(self, old_bucket_index: int) -> None:
|
||||
def _split_bucket(self, old_bucket_index: int) -> None:
|
||||
""" Splits the specified k-bucket into two new buckets which together
|
||||
cover the same range in the key/ID space
|
||||
|
||||
|
@ -279,8 +302,9 @@ class TreeRoutingTable:
|
|||
# ...and remove them from the old bucket
|
||||
for contact in new_bucket.peers:
|
||||
old_bucket.remove_peer(contact)
|
||||
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
|
||||
|
||||
def join_buckets(self):
|
||||
def _join_buckets(self):
|
||||
if len(self.buckets) == 1:
|
||||
return
|
||||
to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0]
|
||||
|
@ -302,14 +326,8 @@ class TreeRoutingTable:
|
|||
elif can_go_higher:
|
||||
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
|
||||
self.buckets.remove(bucket)
|
||||
return self.join_buckets()
|
||||
|
||||
def contact_in_routing_table(self, address_tuple: typing.Tuple[str, int]) -> bool:
|
||||
for bucket in self.buckets:
|
||||
for contact in bucket.get_peers(sort_distance_to=False):
|
||||
if address_tuple[0] == contact.address and address_tuple[1] == contact.udp_port:
|
||||
return True
|
||||
return False
|
||||
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
|
||||
return self._join_buckets()
|
||||
|
||||
def buckets_with_contacts(self) -> int:
|
||||
count = 0
|
||||
|
@ -317,3 +335,70 @@ class TreeRoutingTable:
|
|||
if len(bucket) > 0:
|
||||
count += 1
|
||||
return count
|
||||
|
||||
async def add_peer(self, peer: 'KademliaPeer', probe: typing.Callable[['KademliaPeer'], typing.Awaitable]):
|
||||
if not peer.node_id:
|
||||
log.warning("Tried adding a peer with no node id!")
|
||||
return False
|
||||
for my_peer in self.get_peers():
|
||||
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
|
||||
self.remove_peer(my_peer)
|
||||
self._join_buckets()
|
||||
bucket_index = self._kbucket_index(peer.node_id)
|
||||
if self.buckets[bucket_index].add_peer(peer):
|
||||
return True
|
||||
|
||||
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
|
||||
if self._should_split(bucket_index, peer.node_id):
|
||||
self._split_bucket(bucket_index)
|
||||
# Retry the insertion attempt
|
||||
result = await self.add_peer(peer, probe)
|
||||
self._join_buckets()
|
||||
return result
|
||||
else:
|
||||
# We can't split the k-bucket
|
||||
#
|
||||
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
|
||||
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
|
||||
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
|
||||
#
|
||||
# A reasonable extension to this is BEP 0005, which extends the above:
|
||||
#
|
||||
# Not all nodes that we learn about are equal. Some are "good" and some are not.
|
||||
# Many nodes using the DHT are able to send queries and receive responses,
|
||||
# but are not able to respond to queries from other nodes. It is important that
|
||||
# each node's routing table must contain only known good nodes. A good node is
|
||||
# a node has responded to one of our queries within the last 15 minutes. A node
|
||||
# is also good if it has ever responded to one of our queries and has sent us a
|
||||
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
|
||||
# questionable. Nodes become bad when they fail to respond to multiple queries
|
||||
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
|
||||
#
|
||||
# When there are bad or questionable nodes in the bucket, the least recent is selected for
|
||||
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
|
||||
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
|
||||
# is ignored if the pinged node replies.
|
||||
|
||||
not_good_contacts = self.buckets[bucket_index].get_bad_or_unknown_peers()
|
||||
not_recently_replied = []
|
||||
for my_peer in not_good_contacts:
|
||||
last_replied = self._peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
|
||||
if not last_replied or last_replied + 60 < self._loop.time():
|
||||
not_recently_replied.append(my_peer)
|
||||
if not_recently_replied:
|
||||
to_replace = not_recently_replied[0]
|
||||
else:
|
||||
to_replace = self.buckets[bucket_index].peers[0]
|
||||
last_replied = self._peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
|
||||
if last_replied and last_replied + 60 > self._loop.time():
|
||||
return False
|
||||
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
|
||||
try:
|
||||
await probe(to_replace)
|
||||
return False
|
||||
except (asyncio.TimeoutError, RemoteException):
|
||||
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
|
||||
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
|
||||
if to_replace in self.buckets[bucket_index]:
|
||||
self.buckets[bucket_index].remove_peer(to_replace)
|
||||
return await self.add_peer(peer, probe)
|
||||
|
|
|
@ -35,6 +35,10 @@ Code | Name | Message
|
|||
111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid.
|
||||
112 | InputValueIsNone | None or null is not valid value for argument '{argument}'.
|
||||
113 | ConflictingInputValue | Only '{first_argument}' or '{second_argument}' is allowed, not both.
|
||||
114 | InputStringIsBlank | {argument} cannot be blank.
|
||||
115 | EmptyPublishedFile | Cannot publish empty file: {file_path}
|
||||
116 | MissingPublishedFile | File does not exist: {file_path}
|
||||
117 | InvalidStreamURL | Invalid LBRY stream URL: '{url}' -- When an URL cannot be downloaded, such as '@Channel/' or a collection
|
||||
**2xx** | Configuration | Configuration errors.
|
||||
201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues.
|
||||
202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args.
|
||||
|
@ -52,6 +56,7 @@ Code | Name | Message
|
|||
405 | ChannelKeyNotFound | Channel signing key not found.
|
||||
406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key.
|
||||
407 | DataDownload | Failed to download blob. *generic*
|
||||
408 | PrivateKeyNotFound | Couldn't find private key for {key} '{value}'.
|
||||
410 | Resolve | Failed to resolve '{url}'.
|
||||
411 | ResolveTimeout | Failed to resolve '{url}' within the timeout.
|
||||
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{censor_id}'.
|
||||
|
@ -59,6 +64,7 @@ Code | Name | Message
|
|||
421 | InvalidPassword | Password is invalid.
|
||||
422 | IncompatibleWalletServer | '{server}:{port}' has an incompatibly old version.
|
||||
423 | TooManyClaimSearchParameters | {key} cant have more than {limit} items.
|
||||
424 | AlreadyPurchased | You already have a purchase for claim_id '{claim_id_hex}'. Use --allow-duplicate-purchase flag to override.
|
||||
431 | ServerPaymentInvalidAddress | Invalid address from wallet server: '{address}' - skipping payment round.
|
||||
432 | ServerPaymentWalletLocked | Cannot spend funds with locked wallet, skipping payment round.
|
||||
433 | ServerPaymentFeeAboveMaxAllowed | Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.
|
||||
|
|
|
@ -84,6 +84,37 @@ class ConflictingInputValueError(InputValueError):
|
|||
super().__init__(f"Only '{first_argument}' or '{second_argument}' is allowed, not both.")
|
||||
|
||||
|
||||
class InputStringIsBlankError(InputValueError):
|
||||
|
||||
def __init__(self, argument):
|
||||
self.argument = argument
|
||||
super().__init__(f"{argument} cannot be blank.")
|
||||
|
||||
|
||||
class EmptyPublishedFileError(InputValueError):
|
||||
|
||||
def __init__(self, file_path):
|
||||
self.file_path = file_path
|
||||
super().__init__(f"Cannot publish empty file: {file_path}")
|
||||
|
||||
|
||||
class MissingPublishedFileError(InputValueError):
|
||||
|
||||
def __init__(self, file_path):
|
||||
self.file_path = file_path
|
||||
super().__init__(f"File does not exist: {file_path}")
|
||||
|
||||
|
||||
class InvalidStreamURLError(InputValueError):
|
||||
"""
|
||||
When an URL cannot be downloaded, such as '@Channel/' or a collection
|
||||
"""
|
||||
|
||||
def __init__(self, url):
|
||||
self.url = url
|
||||
super().__init__(f"Invalid LBRY stream URL: '{url}'")
|
||||
|
||||
|
||||
class ConfigurationError(BaseError):
|
||||
"""
|
||||
Configuration errors.
|
||||
|
@ -207,6 +238,14 @@ class DataDownloadError(WalletError):
|
|||
super().__init__("Failed to download blob. *generic*")
|
||||
|
||||
|
||||
class PrivateKeyNotFoundError(WalletError):
|
||||
|
||||
def __init__(self, key, value):
|
||||
self.key = key
|
||||
self.value = value
|
||||
super().__init__(f"Couldn't find private key for {key} '{value}'.")
|
||||
|
||||
|
||||
class ResolveError(WalletError):
|
||||
|
||||
def __init__(self, url):
|
||||
|
@ -223,9 +262,10 @@ class ResolveTimeoutError(WalletError):
|
|||
|
||||
class ResolveCensoredError(WalletError):
|
||||
|
||||
def __init__(self, url, censor_id):
|
||||
def __init__(self, url, censor_id, censor_row):
|
||||
self.url = url
|
||||
self.censor_id = censor_id
|
||||
self.censor_row = censor_row
|
||||
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.")
|
||||
|
||||
|
||||
|
@ -258,6 +298,16 @@ class TooManyClaimSearchParametersError(WalletError):
|
|||
super().__init__(f"{key} cant have more than {limit} items.")
|
||||
|
||||
|
||||
class AlreadyPurchasedError(WalletError):
|
||||
"""
|
||||
allow-duplicate-purchase flag to override.
|
||||
"""
|
||||
|
||||
def __init__(self, claim_id_hex):
|
||||
self.claim_id_hex = claim_id_hex
|
||||
super().__init__(f"You already have a purchase for claim_id '{claim_id_hex}'. Use")
|
||||
|
||||
|
||||
class ServerPaymentInvalidAddressError(WalletError):
|
||||
|
||||
def __init__(self, address):
|
||||
|
|
|
@ -226,6 +226,9 @@ def get_argument_parser():
|
|||
def ensure_directory_exists(path: str):
|
||||
if not os.path.isdir(path):
|
||||
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
|
||||
use_effective_ids = os.access in os.supports_effective_ids
|
||||
if not os.access(path, os.W_OK, effective_ids=use_effective_ids):
|
||||
raise PermissionError(f"The following directory is not writable: {path}")
|
||||
|
||||
|
||||
LOG_MODULES = 'lbry', 'aioupnp'
|
||||
|
|
|
@ -18,6 +18,7 @@ DOWNLOAD_STARTED = 'Download Started'
|
|||
DOWNLOAD_ERRORED = 'Download Errored'
|
||||
DOWNLOAD_FINISHED = 'Download Finished'
|
||||
HEARTBEAT = 'Heartbeat'
|
||||
DISK_SPACE = 'Disk Space'
|
||||
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
|
||||
NEW_CHANNEL = 'New Channel'
|
||||
CREDITS_SENT = 'Credits Sent'
|
||||
|
@ -169,6 +170,15 @@ class AnalyticsManager:
|
|||
})
|
||||
)
|
||||
|
||||
async def send_disk_space_used(self, storage_used, storage_limit, is_from_network_quota):
|
||||
await self.track(
|
||||
self._event(DISK_SPACE, {
|
||||
'used': storage_used,
|
||||
'limit': storage_limit,
|
||||
'from_network_quota': is_from_network_quota
|
||||
})
|
||||
)
|
||||
|
||||
async def send_server_startup(self):
|
||||
await self.track(self._event(SERVER_STARTUP))
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from lbry.conf import Config
|
||||
from lbry.extras.cli import execute_command
|
||||
from lbry.conf import Config
|
||||
|
||||
|
||||
def daemon_rpc(conf: Config, method: str, **kwargs):
|
||||
|
|
|
@ -37,7 +37,7 @@ class Component(metaclass=ComponentType):
|
|||
def running(self):
|
||||
return self._running
|
||||
|
||||
async def get_status(self):
|
||||
async def get_status(self): # pylint: disable=no-self-use
|
||||
return
|
||||
|
||||
async def start(self):
|
||||
|
|
|
@ -42,7 +42,7 @@ class ComponentManager:
|
|||
self.analytics_manager = analytics_manager
|
||||
self.component_classes = {}
|
||||
self.components = set()
|
||||
self.started = asyncio.Event(loop=self.loop)
|
||||
self.started = asyncio.Event()
|
||||
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
|
||||
|
||||
for component_name, component_class in self.default_component_classes.items():
|
||||
|
@ -118,7 +118,7 @@ class ComponentManager:
|
|||
component._setup() for component in stage if not component.running
|
||||
]
|
||||
if needing_start:
|
||||
await asyncio.wait(needing_start)
|
||||
await asyncio.wait(map(asyncio.create_task, needing_start))
|
||||
self.started.set()
|
||||
|
||||
async def stop(self):
|
||||
|
@ -131,7 +131,7 @@ class ComponentManager:
|
|||
component._stop() for component in stage if component.running
|
||||
]
|
||||
if needing_stop:
|
||||
await asyncio.wait(needing_stop)
|
||||
await asyncio.wait(map(asyncio.create_task, needing_stop))
|
||||
|
||||
def all_components_running(self, *component_names):
|
||||
"""
|
||||
|
|
|
@ -4,6 +4,7 @@ import asyncio
|
|||
import logging
|
||||
import binascii
|
||||
import typing
|
||||
|
||||
import base58
|
||||
|
||||
from aioupnp import __version__ as aioupnp_version
|
||||
|
@ -17,6 +18,7 @@ from lbry.dht.blob_announcer import BlobAnnouncer
|
|||
from lbry.blob.blob_manager import BlobManager
|
||||
from lbry.blob.disk_space_manager import DiskSpaceManager
|
||||
from lbry.blob_exchange.server import BlobServer
|
||||
from lbry.stream.background_downloader import BackgroundDownloader
|
||||
from lbry.stream.stream_manager import StreamManager
|
||||
from lbry.file.file_manager import FileManager
|
||||
from lbry.extras.daemon.component import Component
|
||||
|
@ -25,10 +27,8 @@ from lbry.extras.daemon.storage import SQLiteStorage
|
|||
from lbry.torrent.torrent_manager import TorrentManager
|
||||
from lbry.wallet import WalletManager
|
||||
from lbry.wallet.usage_payment import WalletServerPayer
|
||||
try:
|
||||
from lbry.torrent.session import TorrentSession
|
||||
except ImportError:
|
||||
TorrentSession = None
|
||||
from lbry.torrent.tracker import TrackerClient
|
||||
from lbry.torrent.session import TorrentSession
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -42,9 +42,11 @@ DHT_COMPONENT = "dht"
|
|||
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
|
||||
FILE_MANAGER_COMPONENT = "file_manager"
|
||||
DISK_SPACE_COMPONENT = "disk_space"
|
||||
BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
|
||||
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
|
||||
UPNP_COMPONENT = "upnp"
|
||||
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
|
||||
TRACKER_ANNOUNCER_COMPONENT = "tracker_announcer_component"
|
||||
LIBTORRENT_COMPONENT = "libtorrent_component"
|
||||
|
||||
|
||||
|
@ -61,7 +63,7 @@ class DatabaseComponent(Component):
|
|||
|
||||
@staticmethod
|
||||
def get_current_db_revision():
|
||||
return 14
|
||||
return 15
|
||||
|
||||
@property
|
||||
def revision_filename(self):
|
||||
|
@ -291,6 +293,7 @@ class DHTComponent(Component):
|
|||
peer_port=self.external_peer_port,
|
||||
rpc_timeout=self.conf.node_rpc_timeout,
|
||||
split_buckets_under_index=self.conf.split_buckets_under_index,
|
||||
is_bootstrap_node=self.conf.is_bootstrap_node,
|
||||
storage=storage
|
||||
)
|
||||
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
|
||||
|
@ -354,10 +357,6 @@ class FileManagerComponent(Component):
|
|||
wallet = self.component_manager.get_component(WALLET_COMPONENT)
|
||||
node = self.component_manager.get_component(DHT_COMPONENT) \
|
||||
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||
try:
|
||||
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT) if TorrentSession else None
|
||||
except NameError:
|
||||
torrent = None
|
||||
log.info('Starting the file manager')
|
||||
loop = asyncio.get_event_loop()
|
||||
self.file_manager = FileManager(
|
||||
|
@ -366,7 +365,8 @@ class FileManagerComponent(Component):
|
|||
self.file_manager.source_managers['stream'] = StreamManager(
|
||||
loop, self.conf, blob_manager, wallet, storage, node,
|
||||
)
|
||||
if TorrentSession and LIBTORRENT_COMPONENT not in self.conf.components_to_skip:
|
||||
if self.component_manager.has_component(LIBTORRENT_COMPONENT):
|
||||
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
|
||||
self.file_manager.source_managers['torrent'] = TorrentManager(
|
||||
loop, self.conf, torrent, storage, self.component_manager.analytics_manager
|
||||
)
|
||||
|
@ -374,27 +374,102 @@ class FileManagerComponent(Component):
|
|||
log.info('Done setting up file manager')
|
||||
|
||||
async def stop(self):
|
||||
self.file_manager.stop()
|
||||
await self.file_manager.stop()
|
||||
|
||||
|
||||
class BackgroundDownloaderComponent(Component):
|
||||
MIN_PREFIX_COLLIDING_BITS = 8
|
||||
component_name = BACKGROUND_DOWNLOADER_COMPONENT
|
||||
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.background_task: typing.Optional[asyncio.Task] = None
|
||||
self.download_loop_delay_seconds = 60
|
||||
self.ongoing_download: typing.Optional[asyncio.Task] = None
|
||||
self.space_manager: typing.Optional[DiskSpaceManager] = None
|
||||
self.blob_manager: typing.Optional[BlobManager] = None
|
||||
self.background_downloader: typing.Optional[BackgroundDownloader] = None
|
||||
self.dht_node: typing.Optional[Node] = None
|
||||
self.space_available: typing.Optional[int] = None
|
||||
|
||||
@property
|
||||
def is_busy(self):
|
||||
return bool(self.ongoing_download and not self.ongoing_download.done())
|
||||
|
||||
@property
|
||||
def component(self) -> 'BackgroundDownloaderComponent':
|
||||
return self
|
||||
|
||||
async def get_status(self):
|
||||
return {'running': self.background_task is not None and not self.background_task.done(),
|
||||
'available_free_space_mb': self.space_available,
|
||||
'ongoing_download': self.is_busy}
|
||||
|
||||
async def download_blobs_in_background(self):
|
||||
while True:
|
||||
self.space_available = await self.space_manager.get_free_space_mb(True)
|
||||
if not self.is_busy and self.space_available > 10:
|
||||
self._download_next_close_blob_hash()
|
||||
await asyncio.sleep(self.download_loop_delay_seconds)
|
||||
|
||||
def _download_next_close_blob_hash(self):
|
||||
node_id = self.dht_node.protocol.node_id
|
||||
for blob_hash in self.dht_node.stored_blob_hashes:
|
||||
if blob_hash.hex() in self.blob_manager.completed_blob_hashes:
|
||||
continue
|
||||
if utils.get_colliding_prefix_bits(node_id, blob_hash) >= self.MIN_PREFIX_COLLIDING_BITS:
|
||||
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash.hex()))
|
||||
return
|
||||
|
||||
async def start(self):
|
||||
self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT)
|
||||
if not self.component_manager.has_component(DHT_COMPONENT):
|
||||
return
|
||||
self.dht_node = self.component_manager.get_component(DHT_COMPONENT)
|
||||
self.blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
self.background_downloader = BackgroundDownloader(self.conf, storage, self.blob_manager, self.dht_node)
|
||||
self.background_task = asyncio.create_task(self.download_blobs_in_background())
|
||||
|
||||
async def stop(self):
|
||||
if self.ongoing_download and not self.ongoing_download.done():
|
||||
self.ongoing_download.cancel()
|
||||
if self.background_task:
|
||||
self.background_task.cancel()
|
||||
|
||||
|
||||
class DiskSpaceComponent(Component):
|
||||
component_name = DISK_SPACE_COMPONENT
|
||||
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.disk_space_manager = DiskSpaceManager(self.conf)
|
||||
self.disk_space_manager: typing.Optional[DiskSpaceManager] = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[DiskSpaceManager]:
|
||||
return self.disk_space_manager
|
||||
|
||||
async def get_status(self):
|
||||
return {
|
||||
'space_used': str(self.disk_space_manager.space_used_mb),
|
||||
'running': self.disk_space_manager.running,
|
||||
}
|
||||
if self.disk_space_manager:
|
||||
space_used = await self.disk_space_manager.get_space_used_mb(cached=True)
|
||||
return {
|
||||
'total_used_mb': space_used['total'],
|
||||
'published_blobs_storage_used_mb': space_used['private_storage'],
|
||||
'content_blobs_storage_used_mb': space_used['content_storage'],
|
||||
'seed_blobs_storage_used_mb': space_used['network_storage'],
|
||||
'running': self.disk_space_manager.running,
|
||||
}
|
||||
return {'space_used': '0', 'network_seeding_space_used': '0', 'running': False}
|
||||
|
||||
async def start(self):
|
||||
db = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||
self.disk_space_manager = DiskSpaceManager(
|
||||
self.conf, db, blob_manager,
|
||||
analytics=self.component_manager.analytics_manager
|
||||
)
|
||||
await self.disk_space_manager.start()
|
||||
|
||||
async def stop(self):
|
||||
|
@ -420,9 +495,8 @@ class TorrentComponent(Component):
|
|||
}
|
||||
|
||||
async def start(self):
|
||||
if TorrentSession:
|
||||
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
|
||||
await self.torrent_session.bind() # TODO: specify host/port
|
||||
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
|
||||
await self.torrent_session.bind() # TODO: specify host/port
|
||||
|
||||
async def stop(self):
|
||||
if self.torrent_session:
|
||||
|
@ -477,7 +551,7 @@ class UPnPComponent(Component):
|
|||
while True:
|
||||
if now:
|
||||
await self._maintain_redirects()
|
||||
await asyncio.sleep(360, loop=self.component_manager.loop)
|
||||
await asyncio.sleep(360)
|
||||
|
||||
async def _maintain_redirects(self):
|
||||
# setup the gateway if necessary
|
||||
|
@ -486,8 +560,6 @@ class UPnPComponent(Component):
|
|||
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
|
||||
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
log.warning("upnp discovery failed: %s", err)
|
||||
self.upnp = None
|
||||
|
||||
|
@ -599,7 +671,7 @@ class UPnPComponent(Component):
|
|||
log.info("Removing upnp redirects: %s", self.upnp_redirects)
|
||||
await asyncio.wait([
|
||||
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
|
||||
], loop=self.component_manager.loop)
|
||||
])
|
||||
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
|
||||
self._maintain_redirects_task.cancel()
|
||||
|
||||
|
@ -630,3 +702,49 @@ class ExchangeRateManagerComponent(Component):
|
|||
|
||||
async def stop(self):
|
||||
self.exchange_rate_manager.stop()
|
||||
|
||||
|
||||
class TrackerAnnouncerComponent(Component):
|
||||
component_name = TRACKER_ANNOUNCER_COMPONENT
|
||||
depends_on = [FILE_MANAGER_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.file_manager = None
|
||||
self.announce_task = None
|
||||
self.tracker_client: typing.Optional[TrackerClient] = None
|
||||
|
||||
@property
|
||||
def component(self):
|
||||
return self.tracker_client
|
||||
|
||||
@property
|
||||
def running(self):
|
||||
return self._running and self.announce_task and not self.announce_task.done()
|
||||
|
||||
async def announce_forever(self):
|
||||
while True:
|
||||
sleep_seconds = 60.0
|
||||
announce_sd_hashes = []
|
||||
for file in self.file_manager.get_filtered():
|
||||
if not file.downloader:
|
||||
continue
|
||||
announce_sd_hashes.append(bytes.fromhex(file.sd_hash))
|
||||
await self.tracker_client.announce_many(*announce_sd_hashes)
|
||||
await asyncio.sleep(sleep_seconds)
|
||||
|
||||
async def start(self):
|
||||
node = self.component_manager.get_component(DHT_COMPONENT) \
|
||||
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||
node_id = node.protocol.node_id if node else None
|
||||
self.tracker_client = TrackerClient(node_id, self.conf.tcp_port, lambda: self.conf.tracker_servers)
|
||||
await self.tracker_client.start()
|
||||
self.file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
|
||||
self.announce_task = asyncio.create_task(self.announce_forever())
|
||||
|
||||
async def stop(self):
|
||||
self.file_manager = None
|
||||
if self.announce_task and not self.announce_task.done():
|
||||
self.announce_task.cancel()
|
||||
self.announce_task = None
|
||||
self.tracker_client.stop()
|
||||
|
|
|
@ -8,16 +8,14 @@ import time
|
|||
import inspect
|
||||
import typing
|
||||
import random
|
||||
import hashlib
|
||||
import tracemalloc
|
||||
from decimal import Decimal
|
||||
import itertools
|
||||
from urllib.parse import urlencode, quote
|
||||
from typing import Callable, Optional, List
|
||||
from binascii import hexlify, unhexlify
|
||||
from traceback import format_exc
|
||||
from functools import wraps, partial
|
||||
|
||||
import ecdsa
|
||||
import base58
|
||||
from aiohttp import web
|
||||
from prometheus_client import generate_latest as prom_generate_latest, Gauge, Histogram, Counter
|
||||
|
@ -29,6 +27,8 @@ from lbry.wallet import (
|
|||
)
|
||||
from lbry.wallet.dewies import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc
|
||||
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPE_NAMES
|
||||
from lbry.wallet.bip32 import PrivateKey
|
||||
from lbry.crypto.base58 import Base58
|
||||
|
||||
from lbry import utils
|
||||
from lbry.conf import Config, Setting, NOT_SET
|
||||
|
@ -38,12 +38,13 @@ from lbry.dht.peer import make_kademlia_peer
|
|||
from lbry.error import (
|
||||
DownloadSDTimeoutError, ComponentsNotStartedError, ComponentStartConditionNotMetError,
|
||||
CommandDoesNotExistError, BaseError, WalletNotFoundError, WalletAlreadyLoadedError, WalletAlreadyExistsError,
|
||||
ConflictingInputValueError
|
||||
ConflictingInputValueError, AlreadyPurchasedError, PrivateKeyNotFoundError, InputStringIsBlankError,
|
||||
InputValueError
|
||||
)
|
||||
from lbry.extras import system_info
|
||||
from lbry.extras.daemon import analytics
|
||||
from lbry.extras.daemon.components import WALLET_COMPONENT, DATABASE_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT
|
||||
from lbry.extras.daemon.components import FILE_MANAGER_COMPONENT, DISK_SPACE_COMPONENT
|
||||
from lbry.extras.daemon.components import FILE_MANAGER_COMPONENT, DISK_SPACE_COMPONENT, TRACKER_ANNOUNCER_COMPONENT
|
||||
from lbry.extras.daemon.components import EXCHANGE_RATE_MANAGER_COMPONENT, UPNP_COMPONENT
|
||||
from lbry.extras.daemon.componentmanager import RequiredCondition
|
||||
from lbry.extras.daemon.componentmanager import ComponentManager
|
||||
|
@ -52,9 +53,8 @@ from lbry.extras.daemon.undecorated import undecorated
|
|||
from lbry.extras.daemon.security import ensure_request_allowed
|
||||
from lbry.file_analysis import VideoFileAnalyzer
|
||||
from lbry.schema.claim import Claim
|
||||
from lbry.schema.url import URL, normalize_name
|
||||
from lbry.wallet.server.db.elasticsearch.constants import RANGE_FIELDS, REPLACEMENTS
|
||||
MY_RANGE_FIELDS = RANGE_FIELDS - {"limit_claims_per_channel"}
|
||||
from lbry.schema.url import URL
|
||||
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.blob.blob_manager import BlobManager
|
||||
|
@ -67,6 +67,29 @@ if typing.TYPE_CHECKING:
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
RANGE_FIELDS = {
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
|
||||
'tx_position', 'repost_count', 'limit_claims_per_channel',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'trending_score', 'censor_type', 'tx_num'
|
||||
}
|
||||
MY_RANGE_FIELDS = RANGE_FIELDS - {"limit_claims_per_channel"}
|
||||
REPLACEMENTS = {
|
||||
'claim_name': 'normalized_name',
|
||||
'name': 'normalized_name',
|
||||
'txid': 'tx_id',
|
||||
'nout': 'tx_nout',
|
||||
'trending_group': 'trending_score',
|
||||
'trending_mixed': 'trending_score',
|
||||
'trending_global': 'trending_score',
|
||||
'trending_local': 'trending_score',
|
||||
'reposted': 'repost_count',
|
||||
'stream_types': 'stream_type',
|
||||
'media_types': 'media_type',
|
||||
'valid_channel_signature': 'is_signature_valid'
|
||||
}
|
||||
|
||||
|
||||
def is_transactional_function(name):
|
||||
for action in ('create', 'update', 'abandon', 'send', 'fund'):
|
||||
|
@ -172,48 +195,6 @@ def paginate_list(items: List, page: Optional[int], page_size: Optional[int]):
|
|||
}
|
||||
|
||||
|
||||
def fix_kwargs_for_hub(**kwargs):
|
||||
repeated_fields = {"media_type", "stream_type", "claim_type"}
|
||||
value_fields = {"tx_nout", "has_source", "is_signature_valid"}
|
||||
opcodes = {'=': 0, '<=': 1, '>=': 2, '<': 3, '>': 4}
|
||||
for key, value in list(kwargs.items()):
|
||||
if value in (None, [], False):
|
||||
kwargs.pop(key)
|
||||
continue
|
||||
if key in REPLACEMENTS:
|
||||
kwargs[REPLACEMENTS[key]] = kwargs.pop(key)
|
||||
key = REPLACEMENTS[key]
|
||||
|
||||
if key == "normalized_name":
|
||||
kwargs[key] = normalize_name(value)
|
||||
if key == "limit_claims_per_channel":
|
||||
value = kwargs.pop("limit_claims_per_channel") or 0
|
||||
if value > 0:
|
||||
kwargs["limit_claims_per_channel"] = value
|
||||
elif key == "invalid_channel_signature":
|
||||
kwargs["is_signature_valid"] = {"value": not kwargs.pop("invalid_channel_signature")}
|
||||
elif key == "has_no_source":
|
||||
kwargs["has_source"] = {"value": not kwargs.pop("has_no_source")}
|
||||
elif key in value_fields:
|
||||
kwargs[key] = {"value": value} if not isinstance(value, dict) else value
|
||||
elif key in repeated_fields and isinstance(value, str):
|
||||
kwargs[key] = [value]
|
||||
elif key in ("claim_id", "channel_id"):
|
||||
kwargs[key] = {"invert": False, "value": [kwargs[key]]}
|
||||
elif key in ("claim_ids", "channel_ids"):
|
||||
kwargs[key[:-1]] = {"invert": False, "value": kwargs.pop(key)}
|
||||
elif key == "not_channel_ids":
|
||||
kwargs["channel_id"] = {"invert": True, "value": kwargs.pop("not_channel_ids")}
|
||||
elif key in MY_RANGE_FIELDS:
|
||||
operator = '='
|
||||
if isinstance(value, str) and value[0] in opcodes:
|
||||
operator_length = 2 if value[:2] in opcodes else 1
|
||||
operator, value = value[:operator_length], value[operator_length:]
|
||||
value = [str(value if key != 'fee_amount' else Decimal(value)*1000)]
|
||||
kwargs[key] = {"op": opcodes[operator], "value": value}
|
||||
return kwargs
|
||||
|
||||
|
||||
DHT_HAS_CONTACTS = "dht_has_contacts"
|
||||
|
||||
|
||||
|
@ -633,7 +614,8 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
content_type='application/json'
|
||||
)
|
||||
|
||||
async def handle_metrics_get_request(self, request: web.Request):
|
||||
@staticmethod
|
||||
async def handle_metrics_get_request(request: web.Request):
|
||||
try:
|
||||
return web.Response(
|
||||
text=prom_generate_latest().decode(),
|
||||
|
@ -951,7 +933,12 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
},
|
||||
'total_outgoing_mps': (float) megabytes per second sent,
|
||||
'total_incoming_mps': (float) megabytes per second received,
|
||||
'time': (float) timestamp
|
||||
'max_outgoing_mbs': (float) maximum bandwidth (megabytes per second) sent, since the
|
||||
daemon was started
|
||||
'max_incoming_mbs': (float) maximum bandwidth (megabytes per second) received, since the
|
||||
daemon was started
|
||||
'total_sent' : (int) total number of bytes sent since the daemon was started
|
||||
'total_received' : (int) total number of bytes received since the daemon was started
|
||||
}
|
||||
},
|
||||
'hash_announcer': {
|
||||
|
@ -1341,6 +1328,65 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
c.wallets += [wallet_id]
|
||||
return wallet
|
||||
|
||||
@requires("wallet")
|
||||
async def jsonrpc_wallet_export(self, password=None, wallet_id=None):
|
||||
"""
|
||||
Exports encrypted wallet data if password is supplied; otherwise plain JSON.
|
||||
|
||||
Wallet must be unlocked to perform this operation.
|
||||
|
||||
Usage:
|
||||
wallet_export [--password=<password>] [--wallet_id=<wallet_id>]
|
||||
|
||||
Options:
|
||||
--password=<password> : (str) password to encrypt outgoing data
|
||||
--wallet_id=<wallet_id> : (str) wallet being exported
|
||||
|
||||
Returns:
|
||||
(str) data: base64-encoded encrypted wallet, or cleartext JSON
|
||||
|
||||
"""
|
||||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||
if password is None:
|
||||
return wallet.to_json()
|
||||
return wallet.pack(password).decode()
|
||||
|
||||
@requires("wallet")
|
||||
async def jsonrpc_wallet_import(self, data, password=None, wallet_id=None, blocking=False):
|
||||
"""
|
||||
Import wallet data and merge accounts and preferences. Data is expected to be JSON if
|
||||
password is not supplied.
|
||||
|
||||
Wallet must be unlocked to perform this operation.
|
||||
|
||||
Usage:
|
||||
wallet_import (<data> | --data=<data>) [<password> | --password=<password>]
|
||||
[--wallet_id=<wallet_id>] [--blocking]
|
||||
|
||||
Options:
|
||||
--data=<data> : (str) incoming wallet data
|
||||
--password=<password> : (str) password to decrypt incoming data
|
||||
--wallet_id=<wallet_id> : (str) wallet being merged into
|
||||
--blocking : (bool) wait until any new accounts have merged
|
||||
|
||||
Returns:
|
||||
(str) base64-encoded encrypted wallet, or cleartext JSON
|
||||
"""
|
||||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||
added_accounts, merged_accounts = wallet.merge(self.wallet_manager, password, data)
|
||||
for new_account in itertools.chain(added_accounts, merged_accounts):
|
||||
await new_account.maybe_migrate_certificates()
|
||||
if added_accounts and self.ledger.network.is_connected:
|
||||
if blocking:
|
||||
await asyncio.wait([
|
||||
a.ledger.subscribe_account(a) for a in added_accounts
|
||||
])
|
||||
else:
|
||||
for new_account in added_accounts:
|
||||
asyncio.create_task(self.ledger.subscribe_account(new_account))
|
||||
wallet.save()
|
||||
return await self.jsonrpc_wallet_export(password=password, wallet_id=wallet_id)
|
||||
|
||||
@requires("wallet")
|
||||
async def jsonrpc_wallet_add(self, wallet_id):
|
||||
"""
|
||||
|
@ -1809,7 +1855,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
Options:
|
||||
--to_account=<to_account> : (str) send to this account
|
||||
--from_account=<from_account> : (str) spend from this account
|
||||
--amount=<amount> : (str) the amount to transfer lbc
|
||||
--amount=<amount> : (decimal) the amount to transfer lbc
|
||||
--everything : (bool) transfer everything (excluding claims), default: false.
|
||||
--outputs=<outputs> : (int) split payment across many outputs, default: 1.
|
||||
--wallet_id=<wallet_id> : (str) limit operation to specific wallet.
|
||||
|
@ -1832,6 +1878,48 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
outputs=outputs, broadcast=broadcast
|
||||
)
|
||||
|
||||
@requires("wallet")
|
||||
async def jsonrpc_account_deposit(
|
||||
self, txid, nout, redeem_script, private_key,
|
||||
to_account=None, wallet_id=None, preview=False, blocking=False
|
||||
):
|
||||
"""
|
||||
Spend a time locked transaction into your account.
|
||||
|
||||
Usage:
|
||||
account_deposit <txid> <nout> <redeem_script> <private_key>
|
||||
[<to_account> | --to_account=<to_account>]
|
||||
[--wallet_id=<wallet_id>] [--preview] [--blocking]
|
||||
|
||||
Options:
|
||||
--txid=<txid> : (str) id of the transaction
|
||||
--nout=<nout> : (int) output number in the transaction
|
||||
--redeem_script=<redeem_script> : (str) redeem script for output
|
||||
--private_key=<private_key> : (str) private key to sign transaction
|
||||
--to_account=<to_account> : (str) deposit to this account
|
||||
--wallet_id=<wallet_id> : (str) limit operation to specific wallet.
|
||||
--preview : (bool) do not broadcast the transaction
|
||||
--blocking : (bool) wait until tx has synced
|
||||
|
||||
Returns: {Transaction}
|
||||
"""
|
||||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||
account = wallet.get_account_or_default(to_account)
|
||||
other_tx = await self.wallet_manager.get_transaction(txid)
|
||||
tx = await Transaction.spend_time_lock(
|
||||
other_tx.outputs[nout], unhexlify(redeem_script), account
|
||||
)
|
||||
pk = PrivateKey.from_bytes(
|
||||
account.ledger, Base58.decode_check(private_key)[1:-1]
|
||||
)
|
||||
await tx.sign([account], {pk.address: pk})
|
||||
if not preview:
|
||||
await self.broadcast_or_release(tx, blocking)
|
||||
self.component_manager.loop.create_task(self.analytics_manager.send_credits_sent())
|
||||
else:
|
||||
await self.ledger.release_tx(tx)
|
||||
return tx
|
||||
|
||||
@requires(WALLET_COMPONENT)
|
||||
def jsonrpc_account_send(self, amount, addresses, account_id=None, wallet_id=None, preview=False, blocking=False):
|
||||
"""
|
||||
|
@ -1903,7 +1991,9 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||
wallet_changed = False
|
||||
if data is not None:
|
||||
added_accounts = wallet.merge(self.wallet_manager, password, data)
|
||||
added_accounts, merged_accounts = wallet.merge(self.wallet_manager, password, data)
|
||||
for new_account in itertools.chain(added_accounts, merged_accounts):
|
||||
await new_account.maybe_migrate_certificates()
|
||||
if added_accounts and self.ledger.network.is_connected:
|
||||
if blocking:
|
||||
await asyncio.wait([
|
||||
|
@ -2040,7 +2130,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
--channel_claim_id=<channel_claim_id> : (str) get file with matching channel claim id(s)
|
||||
--channel_name=<channel_name> : (str) get file with matching channel name
|
||||
--claim_name=<claim_name> : (str) get file with matching claim name
|
||||
--blobs_in_stream<blobs_in_stream> : (int) get file with matching blobs in stream
|
||||
--blobs_in_stream=<blobs_in_stream> : (int) get file with matching blobs in stream
|
||||
--download_path=<download_path> : (str) get file with matching download path
|
||||
--uploading_to_reflector=<uploading_to_reflector> : (bool) get files currently uploading to reflector
|
||||
--is_fully_reflected=<is_fully_reflected> : (bool) get files that have been uploaded to reflector
|
||||
|
@ -2282,7 +2372,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
accounts = wallet.get_accounts_or_all(funding_account_ids)
|
||||
txo = None
|
||||
if claim_id:
|
||||
txo = await self.ledger.get_claim_by_claim_id(accounts, claim_id, include_purchase_receipt=True)
|
||||
txo = await self.ledger.get_claim_by_claim_id(claim_id, accounts, include_purchase_receipt=True)
|
||||
if not isinstance(txo, Output) or not txo.is_claim:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception(f"Could not find claim with claim_id '{claim_id}'.")
|
||||
|
@ -2295,11 +2385,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
# TODO: use error from lbry.error
|
||||
raise Exception("Missing argument claim_id or url.")
|
||||
if not allow_duplicate_purchase and txo.purchase_receipt:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception(
|
||||
f"You already have a purchase for claim_id '{claim_id}'. "
|
||||
f"Use --allow-duplicate-purchase flag to override."
|
||||
)
|
||||
raise AlreadyPurchasedError(claim_id)
|
||||
claim = txo.claim
|
||||
if not claim.is_stream or not claim.stream.has_fee:
|
||||
# TODO: use error from lbry.error
|
||||
|
@ -2324,6 +2410,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
Usage:
|
||||
claim_list [--claim_type=<claim_type>...] [--claim_id=<claim_id>...] [--name=<name>...] [--is_spent]
|
||||
[--reposted_claim_id=<reposted_claim_id>...]
|
||||
[--channel_id=<channel_id>...] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
|
||||
[--has_source | --has_no_source] [--page=<page>] [--page_size=<page_size>]
|
||||
[--resolve] [--order_by=<order_by>] [--no_totals] [--include_received_tips]
|
||||
|
@ -2334,6 +2421,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
--channel_id=<channel_id> : (str or list) streams in this channel
|
||||
--name=<name> : (str or list) claim name
|
||||
--is_spent : (bool) shows previous claim updates and abandons
|
||||
--reposted_claim_id=<reposted_claim_id> : (str or list) reposted claim id
|
||||
--account_id=<account_id> : (str) id of the account to query
|
||||
--wallet_id=<wallet_id> : (str) restrict results to specific wallet
|
||||
--has_source : (bool) list claims containing a source field
|
||||
|
@ -2397,6 +2485,9 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
value with an equality constraint such as '>', '>=', '<' and '<='
|
||||
eg. --height=">400000" would limit results to only claims above 400k block height.
|
||||
|
||||
They also support multiple constraints passed as a list of the args described above.
|
||||
eg. --release_time=[">1000000", "<2000000"]
|
||||
|
||||
Usage:
|
||||
claim_search [<name> | --name=<name>] [--text=<text>] [--txid=<txid>] [--nout=<nout>]
|
||||
[--claim_id=<claim_id> | --claim_ids=<claim_ids>...]
|
||||
|
@ -2411,7 +2502,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
[--amount=<amount>] [--effective_amount=<effective_amount>]
|
||||
[--support_amount=<support_amount>] [--trending_group=<trending_group>]
|
||||
[--trending_mixed=<trending_mixed>] [--trending_local=<trending_local>]
|
||||
[--trending_global=<trending_global]
|
||||
[--trending_global=<trending_global] [--trending_score=<trending_score]
|
||||
[--reposted_claim_id=<reposted_claim_id>] [--reposted=<reposted>]
|
||||
[--claim_type=<claim_type>] [--stream_types=<stream_types>...] [--media_types=<media_types>...]
|
||||
[--fee_currency=<fee_currency>] [--fee_amount=<fee_amount>]
|
||||
|
@ -2423,7 +2514,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
[--not_locations=<not_locations>...]
|
||||
[--order_by=<order_by>...] [--no_totals] [--page=<page>] [--page_size=<page_size>]
|
||||
[--wallet_id=<wallet_id>] [--include_purchase_receipt] [--include_is_my_output]
|
||||
[--remove_duplicates] [--has_source | --has_no_source]
|
||||
[--remove_duplicates] [--has_source | --has_no_source] [--sd_hash=<sd_hash>]
|
||||
[--new_sdk_server=<new_sdk_server>]
|
||||
|
||||
Options:
|
||||
|
@ -2479,25 +2570,11 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
all tips and supports received), this amount is
|
||||
blank until claim has reached activation height
|
||||
(supports equality constraints)
|
||||
--trending_group=<trending_group>: (int) group numbers 1 through 4 representing the
|
||||
trending groups of the content: 4 means
|
||||
content is trending globally and independently,
|
||||
3 means content is not trending globally but is
|
||||
trending independently (locally), 2 means it is
|
||||
trending globally but not independently and 1
|
||||
means it's not trending globally or locally
|
||||
(supports equality constraints)
|
||||
--trending_mixed=<trending_mixed>: (int) trending amount taken from the global or local
|
||||
value depending on the trending group:
|
||||
4 - global value, 3 - local value, 2 - global
|
||||
value, 1 - local value (supports equality
|
||||
constraints)
|
||||
--trending_local=<trending_local>: (int) trending value calculated relative only to
|
||||
the individual contents past history (supports
|
||||
equality constraints)
|
||||
--trending_global=<trending_global>: (int) trending value calculated relative to all
|
||||
trending content globally (supports
|
||||
equality constraints)
|
||||
--trending_score=<trending_score>: (int) limit by trending score (supports equality constraints)
|
||||
--trending_group=<trending_group>: (int) DEPRECATED - instead please use trending_score
|
||||
--trending_mixed=<trending_mixed>: (int) DEPRECATED - instead please use trending_score
|
||||
--trending_local=<trending_local>: (int) DEPRECATED - instead please use trending_score
|
||||
--trending_global=<trending_global>: (int) DEPRECATED - instead please use trending_score
|
||||
--reposted_claim_id=<reposted_claim_id>: (str) all reposts of the specified original claim id
|
||||
--reposted=<reposted> : (int) claims reposted this many times (supports
|
||||
equality constraints)
|
||||
|
@ -2535,36 +2612,34 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
--remove_duplicates : (bool) removes duplicated content from search by picking either the
|
||||
original claim or the oldest matching repost
|
||||
--has_source : (bool) find claims containing a source field
|
||||
--sd_hash=<sd_hash> : (str) find claims where the source stream descriptor hash matches
|
||||
(partially or completely) the given hexadecimal string
|
||||
--has_no_source : (bool) find claims not containing a source field
|
||||
--new_sdk_server=<new_sdk_server> : (str) URL of the new SDK server (EXPERIMENTAL)
|
||||
|
||||
Returns: {Paginated[Output]}
|
||||
"""
|
||||
if self.ledger.config.get('use_go_hub'):
|
||||
host = self.ledger.network.client.server[0]
|
||||
port = "50051"
|
||||
kwargs['new_sdk_server'] = f"{host}:{port}"
|
||||
if kwargs.get("channel"):
|
||||
channel = kwargs.pop("channel")
|
||||
channel_obj = (await self.jsonrpc_resolve(channel))[channel]
|
||||
if isinstance(channel_obj, dict):
|
||||
# This happens when the channel doesn't exist
|
||||
kwargs["channel_id"] = ""
|
||||
else:
|
||||
kwargs["channel_id"] = channel_obj.claim_id
|
||||
kwargs = fix_kwargs_for_hub(**kwargs)
|
||||
else:
|
||||
# Don't do this if using the hub server, it screws everything up
|
||||
if "claim_ids" in kwargs and not kwargs["claim_ids"]:
|
||||
kwargs.pop("claim_ids")
|
||||
if {'claim_id', 'claim_ids'}.issubset(kwargs):
|
||||
raise ConflictingInputValueError('claim_id', 'claim_ids')
|
||||
if kwargs.pop('valid_channel_signature', False):
|
||||
kwargs['signature_valid'] = 1
|
||||
if kwargs.pop('invalid_channel_signature', False):
|
||||
kwargs['signature_valid'] = 0
|
||||
if 'has_no_source' in kwargs:
|
||||
kwargs['has_source'] = not kwargs.pop('has_no_source')
|
||||
if "claim_ids" in kwargs and not kwargs["claim_ids"]:
|
||||
kwargs.pop("claim_ids")
|
||||
if {'claim_id', 'claim_ids'}.issubset(kwargs):
|
||||
raise ConflictingInputValueError('claim_id', 'claim_ids')
|
||||
if kwargs.pop('valid_channel_signature', False):
|
||||
kwargs['signature_valid'] = 1
|
||||
if kwargs.pop('invalid_channel_signature', False):
|
||||
kwargs['signature_valid'] = 0
|
||||
if 'has_no_source' in kwargs:
|
||||
kwargs['has_source'] = not kwargs.pop('has_no_source')
|
||||
if 'order_by' in kwargs: # TODO: remove this after removing support for old trending args from the api
|
||||
value = kwargs.pop('order_by')
|
||||
value = value if isinstance(value, list) else [value]
|
||||
new_value = []
|
||||
for new_v in value:
|
||||
migrated = new_v if new_v not in (
|
||||
'trending_mixed', 'trending_local', 'trending_global', 'trending_group'
|
||||
) else 'trending_score'
|
||||
if migrated not in new_value:
|
||||
new_value.append(migrated)
|
||||
kwargs['order_by'] = new_value
|
||||
page_num, page_size = abs(kwargs.pop('page', 1)), min(abs(kwargs.pop('page_size', DEFAULT_PAGE_SIZE)), 50)
|
||||
wallet = self.wallet_manager.get_wallet_or_default(kwargs.pop('wallet_id', None))
|
||||
kwargs.update({'offset': page_size * (page_num - 1), 'limit': page_size})
|
||||
|
@ -2690,16 +2765,17 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
name, claim, amount, claim_address, funding_accounts, funding_accounts[0]
|
||||
)
|
||||
txo = tx.outputs[0]
|
||||
await txo.generate_channel_private_key()
|
||||
txo.set_channel_private_key(
|
||||
await funding_accounts[0].generate_channel_private_key()
|
||||
)
|
||||
|
||||
await tx.sign(funding_accounts)
|
||||
|
||||
if not preview:
|
||||
account.add_channel_private_key(txo.private_key)
|
||||
wallet.save()
|
||||
await self.broadcast_or_release(tx, blocking)
|
||||
self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info(
|
||||
tx, txo, claim_address, claim, name, dewies_to_lbc(amount)
|
||||
tx, txo, claim_address, claim, name
|
||||
)]))
|
||||
self.component_manager.loop.create_task(self.analytics_manager.send_new_channel())
|
||||
else:
|
||||
|
@ -2844,7 +2920,9 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
new_txo = tx.outputs[0]
|
||||
|
||||
if new_signing_key:
|
||||
await new_txo.generate_channel_private_key()
|
||||
new_txo.set_channel_private_key(
|
||||
await funding_accounts[0].generate_channel_private_key()
|
||||
)
|
||||
else:
|
||||
new_txo.private_key = old_txo.private_key
|
||||
|
||||
|
@ -2853,11 +2931,10 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
await tx.sign(funding_accounts)
|
||||
|
||||
if not preview:
|
||||
account.add_channel_private_key(new_txo.private_key)
|
||||
wallet.save()
|
||||
await self.broadcast_or_release(tx, blocking)
|
||||
self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info(
|
||||
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name, dewies_to_lbc(amount)
|
||||
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name
|
||||
)]))
|
||||
self.component_manager.loop.create_task(self.analytics_manager.send_new_channel())
|
||||
else:
|
||||
|
@ -2867,19 +2944,21 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
@requires(WALLET_COMPONENT)
|
||||
async def jsonrpc_channel_sign(
|
||||
self, channel_name=None, channel_id=None, hexdata=None, channel_account_id=None, wallet_id=None):
|
||||
self, channel_name=None, channel_id=None, hexdata=None, salt=None,
|
||||
channel_account_id=None, wallet_id=None):
|
||||
"""
|
||||
Signs data using the specified channel signing key.
|
||||
|
||||
Usage:
|
||||
channel_sign [<channel_name> | --channel_name=<channel_name>]
|
||||
[<channel_id> | --channel_id=<channel_id>] [<hexdata> | --hexdata=<hexdata>]
|
||||
channel_sign [<channel_name> | --channel_name=<channel_name>] [<channel_id> | --channel_id=<channel_id>]
|
||||
[<hexdata> | --hexdata=<hexdata>] [<salt> | --salt=<salt>]
|
||||
[--channel_account_id=<channel_account_id>...] [--wallet_id=<wallet_id>]
|
||||
|
||||
Options:
|
||||
--channel_name=<channel_name> : (str) name of channel used to sign (or use channel id)
|
||||
--channel_id=<channel_id> : (str) claim id of channel used to sign (or use channel name)
|
||||
--hexdata=<hexdata> : (str) data to sign, encoded as hexadecimal
|
||||
--salt=<salt> : (str) salt to use for signing, default is to use timestamp
|
||||
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
|
||||
for channel certificates, defaults to all accounts.
|
||||
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
|
||||
|
@ -2896,11 +2975,13 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
signing_channel = await self.get_channel_or_error(
|
||||
wallet, channel_account_id, channel_id, channel_name, for_signing=True
|
||||
)
|
||||
timestamp = str(int(time.time()))
|
||||
signature = signing_channel.sign_data(unhexlify(hexdata), timestamp)
|
||||
if salt is None:
|
||||
salt = str(int(time.time()))
|
||||
signature = signing_channel.sign_data(unhexlify(str(hexdata)), salt)
|
||||
return {
|
||||
'signature': signature,
|
||||
'signing_ts': timestamp
|
||||
'signing_ts': salt, # DEPRECATED
|
||||
'salt': salt,
|
||||
}
|
||||
|
||||
@requires(WALLET_COMPONENT)
|
||||
|
@ -3025,7 +3106,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
'channel_id': channel.claim_id,
|
||||
'holding_address': address,
|
||||
'holding_public_key': public_key.extended_key_string(),
|
||||
'signing_private_key': channel.private_key.to_pem().decode()
|
||||
'signing_private_key': channel.private_key.signing_key.to_pem().decode()
|
||||
}
|
||||
return base58.b58encode(json.dumps(export, separators=(',', ':')))
|
||||
|
||||
|
@ -3048,15 +3129,14 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
decoded = base58.b58decode(channel_data)
|
||||
data = json.loads(decoded)
|
||||
channel_private_key = ecdsa.SigningKey.from_pem(
|
||||
data['signing_private_key'], hashfunc=hashlib.sha256
|
||||
channel_private_key = PrivateKey.from_pem(
|
||||
self.ledger, data['signing_private_key']
|
||||
)
|
||||
public_key_der = channel_private_key.get_verifying_key().to_der()
|
||||
|
||||
# check that the holding_address hasn't changed since the export was made
|
||||
holding_address = data['holding_address']
|
||||
channels, _, _, _ = await self.ledger.claim_search(
|
||||
wallet.accounts, public_key_id=self.ledger.public_key_to_address(public_key_der)
|
||||
wallet.accounts, public_key_id=channel_private_key.address
|
||||
)
|
||||
if channels and channels[0].get_address(self.ledger) != holding_address:
|
||||
holding_address = channels[0].get_address(self.ledger)
|
||||
|
@ -3219,15 +3299,17 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
)
|
||||
|
||||
@requires(WALLET_COMPONENT, FILE_MANAGER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT)
|
||||
async def jsonrpc_stream_repost(self, name, bid, claim_id, allow_duplicate_name=False, channel_id=None,
|
||||
channel_name=None, channel_account_id=None, account_id=None, wallet_id=None,
|
||||
claim_address=None, funding_account_ids=None, preview=False, blocking=False):
|
||||
async def jsonrpc_stream_repost(
|
||||
self, name, bid, claim_id, allow_duplicate_name=False, channel_id=None,
|
||||
channel_name=None, channel_account_id=None, account_id=None, wallet_id=None,
|
||||
claim_address=None, funding_account_ids=None, preview=False, blocking=False, **kwargs):
|
||||
"""
|
||||
Creates a claim that references an existing stream by its claim id.
|
||||
|
||||
Usage:
|
||||
stream_repost (<name> | --name=<name>) (<bid> | --bid=<bid>) (<claim_id> | --claim_id=<claim_id>)
|
||||
[--allow_duplicate_name=<allow_duplicate_name>]
|
||||
[--title=<title>] [--description=<description>] [--tags=<tags>...]
|
||||
[--channel_id=<channel_id> | --channel_name=<channel_name>]
|
||||
[--channel_account_id=<channel_account_id>...]
|
||||
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
|
||||
|
@ -3240,6 +3322,9 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
--claim_id=<claim_id> : (str) id of the claim being reposted
|
||||
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with
|
||||
given name. default: false.
|
||||
--title=<title> : (str) title of the repost
|
||||
--description=<description> : (str) description of the repost
|
||||
--tags=<tags> : (list) add repost tags
|
||||
--channel_id=<channel_id> : (str) claim id of the publisher channel
|
||||
--channel_name=<channel_name> : (str) name of the publisher channel
|
||||
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
|
||||
|
@ -3274,6 +3359,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
raise Exception('Invalid claim id. It is expected to be a 40 characters long hexadecimal string.')
|
||||
|
||||
claim = Claim()
|
||||
claim.repost.update(**kwargs)
|
||||
claim.repost.reference.claim_id = claim_id
|
||||
tx = await Transaction.claim_create(
|
||||
name, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel
|
||||
|
@ -3451,7 +3537,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
async def save_claims():
|
||||
await self.storage.save_claims([self._old_get_temp_claim_info(
|
||||
tx, new_txo, claim_address, claim, name, dewies_to_lbc(amount)
|
||||
tx, new_txo, claim_address, claim, name
|
||||
)])
|
||||
if file_path is not None:
|
||||
await self.storage.save_content_claim(file_stream.stream_hash, new_txo.id)
|
||||
|
@ -3598,15 +3684,17 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
)
|
||||
if len(existing_claims) != 1:
|
||||
account_ids = ', '.join(f"'{account.id}'" for account in accounts)
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception(
|
||||
raise InputValueError(
|
||||
f"Can't find the stream '{claim_id}' in account(s) {account_ids}."
|
||||
)
|
||||
|
||||
old_txo = existing_claims[0]
|
||||
if not old_txo.claim.is_stream:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception(
|
||||
f"A claim with id '{claim_id}' was found but it is not a stream claim."
|
||||
if not old_txo.claim.is_stream and not old_txo.claim.is_repost:
|
||||
# in principle it should work with any type of claim, but its safer to
|
||||
# limit it to ones we know won't be broken. in the future we can expand
|
||||
# this if we have a test case for e.g. channel or support claims
|
||||
raise InputValueError(
|
||||
f"A claim with id '{claim_id}' was found but it is not a stream or repost claim."
|
||||
)
|
||||
|
||||
if bid is not None:
|
||||
|
@ -3620,7 +3708,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
claim_address = old_txo.get_address(account.ledger)
|
||||
|
||||
channel = None
|
||||
if channel_id or channel_name:
|
||||
if not clear_channel and (channel_id or channel_name):
|
||||
channel = await self.get_channel_or_error(
|
||||
wallet, channel_account_id, channel_id, channel_name, for_signing=True)
|
||||
elif old_txo.claim.is_signed and not clear_channel and not replace:
|
||||
|
@ -3637,26 +3725,34 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
if replace:
|
||||
claim = Claim()
|
||||
if old_txo.claim.stream.has_source:
|
||||
claim.stream.message.source.CopyFrom(
|
||||
old_txo.claim.stream.message.source
|
||||
)
|
||||
stream_type = old_txo.claim.stream.stream_type
|
||||
if stream_type:
|
||||
old_stream_type = getattr(old_txo.claim.stream.message, stream_type)
|
||||
new_stream_type = getattr(claim.stream.message, stream_type)
|
||||
new_stream_type.CopyFrom(old_stream_type)
|
||||
claim.stream.update(file_path=file_path, **kwargs)
|
||||
if old_txo.claim.is_stream:
|
||||
if old_txo.claim.stream.has_source:
|
||||
claim.stream.message.source.CopyFrom(
|
||||
old_txo.claim.stream.message.source
|
||||
)
|
||||
stream_type = old_txo.claim.stream.stream_type
|
||||
if stream_type:
|
||||
old_stream_type = getattr(old_txo.claim.stream.message, stream_type)
|
||||
new_stream_type = getattr(claim.stream.message, stream_type)
|
||||
new_stream_type.CopyFrom(old_stream_type)
|
||||
else:
|
||||
claim = Claim.from_bytes(old_txo.claim.to_bytes())
|
||||
claim.stream.update(file_path=file_path, **kwargs)
|
||||
tx = await Transaction.claim_update(
|
||||
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel
|
||||
)
|
||||
new_txo = tx.outputs[0]
|
||||
|
||||
if old_txo.claim.is_stream:
|
||||
claim.stream.update(file_path=file_path, **kwargs)
|
||||
elif old_txo.claim.is_repost:
|
||||
claim.repost.update(**kwargs)
|
||||
|
||||
if clear_channel:
|
||||
claim.clear_signature()
|
||||
tx = await Transaction.claim_update(
|
||||
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0],
|
||||
channel if not clear_channel else None
|
||||
)
|
||||
|
||||
new_txo = tx.outputs[0]
|
||||
stream_hash = None
|
||||
if not preview:
|
||||
if not preview and old_txo.claim.is_stream:
|
||||
old_stream = self.file_manager.get_filtered(sd_hash=old_txo.claim.stream.source.sd_hash)
|
||||
old_stream = old_stream[0] if old_stream else None
|
||||
if file_path is not None:
|
||||
|
@ -3678,7 +3774,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
async def save_claims():
|
||||
await self.storage.save_claims([self._old_get_temp_claim_info(
|
||||
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name, dewies_to_lbc(amount)
|
||||
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name
|
||||
)])
|
||||
if stream_hash:
|
||||
await self.storage.save_content_claim(stream_hash, new_txo.id)
|
||||
|
@ -3940,6 +4036,8 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
[--languages=<languages>...] [--clear_languages]
|
||||
[--locations=<locations>...] [--clear_locations]
|
||||
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
|
||||
[--channel_id=<channel_id> | --channel_name=<channel_name>]
|
||||
[--channel_account_id=<channel_account_id>...]
|
||||
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
|
||||
[--claim_address=<claim_address>]
|
||||
[--funding_account_ids=<funding_account_ids>...]
|
||||
|
@ -3995,6 +4093,10 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
--clear_locations : (bool) clear existing locations (prior to adding new ones)
|
||||
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
|
||||
--channel_id=<channel_id> : (str) claim id of the publisher channel
|
||||
--channel_name=<channel_name> : (str) name of the publisher channel
|
||||
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
|
||||
for channel certificates, defaults to all accounts.
|
||||
--account_id=<account_id> : (str) account in which to look for collection (default: all)
|
||||
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
|
||||
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
|
||||
|
@ -4152,7 +4254,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||
|
||||
if claim_id:
|
||||
txo = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
|
||||
txo = await self.ledger.get_claim_by_claim_id(claim_id, wallet.accounts)
|
||||
if not isinstance(txo, Output) or not txo.is_claim:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception(f"Could not find collection with claim_id '{claim_id}'.")
|
||||
|
@ -4219,7 +4321,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
|
||||
channel = await self.get_channel_or_none(wallet, channel_account_id, channel_id, channel_name, for_signing=True)
|
||||
amount = self.get_dewies_or_error("amount", amount)
|
||||
claim = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
|
||||
claim = await self.ledger.get_claim_by_claim_id(claim_id)
|
||||
claim_address = claim.get_address(self.ledger)
|
||||
if not tip:
|
||||
account = wallet.get_account_or_default(account_id)
|
||||
|
@ -4242,7 +4344,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
'nout': tx.position,
|
||||
'address': claim_address,
|
||||
'claim_id': claim_id,
|
||||
'amount': dewies_to_lbc(amount)
|
||||
'amount': dewies_to_lbc(new_txo.amount)
|
||||
}]})
|
||||
self.component_manager.loop.create_task(self.analytics_manager.send_claim_action('new_support'))
|
||||
else:
|
||||
|
@ -4859,21 +4961,16 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
DHT / Blob Exchange peer commands.
|
||||
"""
|
||||
|
||||
@requires(DHT_COMPONENT)
|
||||
async def jsonrpc_peer_list(self, blob_hash, search_bottom_out_limit=None, page=None, page_size=None):
|
||||
async def jsonrpc_peer_list(self, blob_hash, page=None, page_size=None):
|
||||
"""
|
||||
Get peers for blob hash
|
||||
|
||||
Usage:
|
||||
peer_list (<blob_hash> | --blob_hash=<blob_hash>)
|
||||
[<search_bottom_out_limit> | --search_bottom_out_limit=<search_bottom_out_limit>]
|
||||
[--page=<page>] [--page_size=<page_size>]
|
||||
|
||||
Options:
|
||||
--blob_hash=<blob_hash> : (str) find available peers for this blob hash
|
||||
--search_bottom_out_limit=<search_bottom_out_limit> : (int) the number of search probes in a row
|
||||
that don't find any new peers
|
||||
before giving up and returning
|
||||
--page=<page> : (int) page to return during paginating
|
||||
--page_size=<page_size> : (int) number of items on page during pagination
|
||||
|
||||
|
@ -4885,28 +4982,29 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
if not is_valid_blobhash(blob_hash):
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception("invalid blob hash")
|
||||
if search_bottom_out_limit is not None:
|
||||
search_bottom_out_limit = int(search_bottom_out_limit)
|
||||
if search_bottom_out_limit <= 0:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception("invalid bottom out limit")
|
||||
else:
|
||||
search_bottom_out_limit = 4
|
||||
peers = []
|
||||
peer_q = asyncio.Queue(loop=self.component_manager.loop)
|
||||
await self.dht_node._peers_for_value_producer(blob_hash, peer_q)
|
||||
if self.component_manager.has_component(TRACKER_ANNOUNCER_COMPONENT):
|
||||
tracker = self.component_manager.get_component(TRACKER_ANNOUNCER_COMPONENT)
|
||||
tracker_peers = await tracker.get_kademlia_peer_list(bytes.fromhex(blob_hash))
|
||||
log.info("Found %d peers for %s from trackers.", len(tracker_peers), blob_hash[:8])
|
||||
peer_q.put_nowait(tracker_peers)
|
||||
elif not self.component_manager.has_component(DHT_COMPONENT):
|
||||
raise Exception("Peer list needs, at least, either a DHT component or a Tracker component for discovery.")
|
||||
peers = []
|
||||
if self.component_manager.has_component(DHT_COMPONENT):
|
||||
await self.dht_node._peers_for_value_producer(blob_hash, peer_q)
|
||||
while not peer_q.empty():
|
||||
peers.extend(peer_q.get_nowait())
|
||||
results = [
|
||||
{
|
||||
"node_id": hexlify(peer.node_id).decode(),
|
||||
results = {
|
||||
(peer.address, peer.tcp_port): {
|
||||
"node_id": hexlify(peer.node_id).decode() if peer.node_id else None,
|
||||
"address": peer.address,
|
||||
"udp_port": peer.udp_port,
|
||||
"tcp_port": peer.tcp_port,
|
||||
}
|
||||
for peer in peers
|
||||
]
|
||||
return paginate_list(results, page, page_size)
|
||||
}
|
||||
return paginate_list(list(results.values()), page, page_size)
|
||||
|
||||
@requires(DATABASE_COMPONENT)
|
||||
async def jsonrpc_blob_announce(self, blob_hash=None, stream_hash=None, sd_hash=None):
|
||||
|
@ -4961,7 +5059,8 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
--finished : (bool) only return finished blobs
|
||||
--uri=<uri> : (str) filter blobs by stream in a uri
|
||||
--stream_hash=<stream_hash> : (str) filter blobs by stream hash
|
||||
--sd_hash=<sd_hash> : (str) filter blobs by sd hash
|
||||
--sd_hash=<sd_hash> : (str) filter blobs in a stream by sd hash, ie the hash of the stream
|
||||
descriptor blob for a stream that has been downloaded
|
||||
--page=<page> : (int) page to return during paginating
|
||||
--page_size=<page_size> : (int) number of items on page during pagination
|
||||
|
||||
|
@ -5041,7 +5140,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
Returns:
|
||||
(bool) true if successful
|
||||
"""
|
||||
return self.disk_space_manager.clean()
|
||||
return await self.disk_space_manager.clean()
|
||||
|
||||
@requires(FILE_MANAGER_COMPONENT)
|
||||
async def jsonrpc_file_reflect(self, **kwargs):
|
||||
|
@ -5072,8 +5171,8 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
else:
|
||||
server, port = random.choice(self.conf.reflector_servers)
|
||||
reflected = await asyncio.gather(*[
|
||||
self.file_manager['stream'].reflect_stream(stream, server, port)
|
||||
for stream in self.file_manager.get_filtered_streams(**kwargs)
|
||||
self.file_manager.source_managers['stream'].reflect_stream(stream, server, port)
|
||||
for stream in self.file_manager.get_filtered(**kwargs)
|
||||
])
|
||||
total = []
|
||||
for reflected_for_stream in reflected:
|
||||
|
@ -5130,10 +5229,12 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
]
|
||||
},
|
||||
"node_id": (str) the local dht node id
|
||||
"prefix_neighbors_count": (int) the amount of peers sharing the same byte prefix of the local node id
|
||||
}
|
||||
"""
|
||||
result = {
|
||||
'buckets': {}
|
||||
'buckets': {},
|
||||
'prefix_neighbors_count': 0
|
||||
}
|
||||
|
||||
for i, _ in enumerate(self.dht_node.protocol.routing_table.buckets):
|
||||
|
@ -5146,6 +5247,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
"node_id": hexlify(peer.node_id).decode(),
|
||||
}
|
||||
result['buckets'][i].append(host)
|
||||
result['prefix_neighbors_count'] += 1 if peer.node_id[0] == self.dht_node.protocol.node_id[0] else 0
|
||||
|
||||
result['node_id'] = hexlify(self.dht_node.protocol.node_id).decode()
|
||||
return result
|
||||
|
@ -5246,8 +5348,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
def valid_stream_name_or_error(name: str):
|
||||
try:
|
||||
if not name:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception('Stream name cannot be blank.')
|
||||
raise InputStringIsBlankError('Stream name')
|
||||
parsed = URL.parse(name)
|
||||
if parsed.has_channel:
|
||||
# TODO: use error from lbry.error
|
||||
|
@ -5337,7 +5438,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
if len(channels) == 1:
|
||||
if for_signing and not channels[0].has_private_key:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception(f"Couldn't find private key for {key} '{value}'. ")
|
||||
raise PrivateKeyNotFoundError(key, value)
|
||||
return channels[0]
|
||||
elif len(channels) > 1:
|
||||
# TODO: use error from lbry.error
|
||||
|
@ -5373,11 +5474,11 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
return results
|
||||
|
||||
@staticmethod
|
||||
def _old_get_temp_claim_info(tx, txo, address, claim_dict, name, bid):
|
||||
def _old_get_temp_claim_info(tx, txo, address, claim_dict, name):
|
||||
return {
|
||||
"claim_id": txo.claim_id,
|
||||
"name": name,
|
||||
"amount": bid,
|
||||
"amount": dewies_to_lbc(txo.amount),
|
||||
"address": address,
|
||||
"txid": tx.id,
|
||||
"nout": txo.position,
|
||||
|
|
|
@ -80,8 +80,6 @@ class MarketFeed:
|
|||
self.rate = ExchangeRate(self.market, rate, int(time.time()))
|
||||
self.last_check = time.time()
|
||||
return self.rate
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except asyncio.TimeoutError:
|
||||
log.warning("Timed out fetching exchange rate from %s.", self.name)
|
||||
except json.JSONDecodeError as e:
|
||||
|
@ -196,9 +194,9 @@ FEEDS: Iterable[Type[MarketFeed]] = (
|
|||
BittrexUSDFeed,
|
||||
CoinExBTCFeed,
|
||||
CoinExUSDFeed,
|
||||
HotbitBTCFeed,
|
||||
HotbitUSDFeed,
|
||||
UPbitBTCFeed,
|
||||
# HotbitBTCFeed,
|
||||
# HotbitUSDFeed,
|
||||
# UPbitBTCFeed,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ from lbry.schema.claim import Claim
|
|||
from lbry.schema.support import Support
|
||||
from lbry.torrent.torrent_manager import TorrentSource
|
||||
from lbry.wallet import Wallet, Ledger, Account, Transaction, Output
|
||||
from lbry.wallet.bip32 import PubKey
|
||||
from lbry.wallet.bip32 import PublicKey
|
||||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.stream.managed_stream import ManagedStream
|
||||
|
||||
|
@ -138,7 +138,7 @@ class JSONResponseEncoder(JSONEncoder):
|
|||
return self.encode_claim(obj)
|
||||
if isinstance(obj, Support):
|
||||
return obj.to_dict()
|
||||
if isinstance(obj, PubKey):
|
||||
if isinstance(obj, PublicKey):
|
||||
return obj.extended_key_string()
|
||||
if isinstance(obj, datetime):
|
||||
return obj.strftime("%Y%m%dT%H:%M:%S")
|
||||
|
@ -328,8 +328,8 @@ class JSONResponseEncoder(JSONEncoder):
|
|||
result.update({
|
||||
'streaming_url': managed_stream.stream_url,
|
||||
'stream_hash': managed_stream.stream_hash,
|
||||
'stream_name': managed_stream.descriptor.stream_name,
|
||||
'suggested_file_name': managed_stream.descriptor.suggested_file_name,
|
||||
'stream_name': managed_stream.stream_name,
|
||||
'suggested_file_name': managed_stream.suggested_file_name,
|
||||
'sd_hash': managed_stream.descriptor.sd_hash,
|
||||
'mime_type': managed_stream.mime_type,
|
||||
'key': managed_stream.descriptor.key,
|
||||
|
|
|
@ -35,6 +35,10 @@ def migrate_db(conf, start, end):
|
|||
from .migrate12to13 import do_migration
|
||||
elif current == 13:
|
||||
from .migrate13to14 import do_migration
|
||||
elif current == 14:
|
||||
from .migrate14to15 import do_migration
|
||||
elif current == 15:
|
||||
from .migrate15to16 import do_migration
|
||||
else:
|
||||
raise Exception(f"DB migration of version {current} to {current+1} is not available")
|
||||
try:
|
||||
|
|
16
lbry/extras/daemon/migrator/migrate14to15.py
Normal file
16
lbry/extras/daemon/migrator/migrate14to15.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
cursor.executescript("""
|
||||
alter table blob add column added_on integer not null default 0;
|
||||
alter table blob add column is_mine integer not null default 1;
|
||||
""")
|
||||
|
||||
connection.commit()
|
||||
connection.close()
|
17
lbry/extras/daemon/migrator/migrate15to16.py
Normal file
17
lbry/extras/daemon/migrator/migrate15to16.py
Normal file
|
@ -0,0 +1,17 @@
|
|||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
cursor.executescript("""
|
||||
update blob set should_announce=0
|
||||
where should_announce=1 and
|
||||
blob.blob_hash in (select stream_blob.blob_hash from stream_blob where position=0);
|
||||
""")
|
||||
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -20,7 +20,7 @@ def do_migration(conf):
|
|||
"left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall()
|
||||
blobs_by_stream = {}
|
||||
for stream_hash, position, iv, blob_hash, blob_length in blobs:
|
||||
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, blob_hash))
|
||||
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, 0, blob_hash))
|
||||
|
||||
for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams:
|
||||
sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename,
|
||||
|
|
|
@ -170,8 +170,8 @@ def get_all_lbry_files(transaction: sqlite3.Connection) -> typing.List[typing.Di
|
|||
def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
|
||||
# add all blobs, except the last one, which is empty
|
||||
transaction.executemany(
|
||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
|
||||
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0)
|
||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0, blob.added_on, blob.is_mine)
|
||||
for blob in (descriptor.blobs[:-1] if len(descriptor.blobs) > 1 else descriptor.blobs) + [sd_blob])
|
||||
).fetchall()
|
||||
# associate the blobs to the stream
|
||||
|
@ -187,8 +187,8 @@ def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descripto
|
|||
).fetchall()
|
||||
# ensure should_announce is set regardless if insert was ignored
|
||||
transaction.execute(
|
||||
"update blob set should_announce=1 where blob_hash in (?, ?)",
|
||||
(sd_blob.blob_hash, descriptor.blobs[0].blob_hash,)
|
||||
"update blob set should_announce=1 where blob_hash in (?)",
|
||||
(sd_blob.blob_hash,)
|
||||
).fetchall()
|
||||
|
||||
|
||||
|
@ -242,7 +242,9 @@ class SQLiteStorage(SQLiteMixin):
|
|||
should_announce integer not null default 0,
|
||||
status text not null,
|
||||
last_announced_time integer,
|
||||
single_announce integer
|
||||
single_announce integer,
|
||||
added_on integer not null,
|
||||
is_mine integer not null default 0
|
||||
);
|
||||
|
||||
create table if not exists stream (
|
||||
|
@ -335,6 +337,7 @@ class SQLiteStorage(SQLiteMixin):
|
|||
tcp_port integer,
|
||||
unique (address, udp_port)
|
||||
);
|
||||
create index if not exists blob_data on blob(blob_hash, blob_length, is_mine);
|
||||
"""
|
||||
|
||||
def __init__(self, conf: Config, path, loop=None, time_getter: typing.Optional[typing.Callable[[], float]] = None):
|
||||
|
@ -356,19 +359,19 @@ class SQLiteStorage(SQLiteMixin):
|
|||
|
||||
# # # # # # # # # blob functions # # # # # # # # #
|
||||
|
||||
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int], finished=False):
|
||||
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int, int, int], finished=False):
|
||||
def _add_blobs(transaction: sqlite3.Connection):
|
||||
transaction.executemany(
|
||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
|
||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
(
|
||||
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0)
|
||||
for blob_hash, length in blob_hashes_and_lengths
|
||||
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0, added_on, is_mine)
|
||||
for blob_hash, length, added_on, is_mine in blob_hashes_and_lengths
|
||||
)
|
||||
).fetchall()
|
||||
if finished:
|
||||
transaction.executemany(
|
||||
"update blob set status='finished' where blob.blob_hash=?", (
|
||||
(blob_hash, ) for blob_hash, _ in blob_hashes_and_lengths
|
||||
(blob_hash, ) for blob_hash, _, _, _ in blob_hashes_and_lengths
|
||||
)
|
||||
).fetchall()
|
||||
return await self.db.run(_add_blobs)
|
||||
|
@ -378,6 +381,11 @@ class SQLiteStorage(SQLiteMixin):
|
|||
"select status from blob where blob_hash=?", blob_hash
|
||||
)
|
||||
|
||||
def set_announce(self, *blob_hashes):
|
||||
return self.db.execute_fetchall(
|
||||
"update blob set should_announce=1 where blob_hash in (?, ?)", blob_hashes
|
||||
)
|
||||
|
||||
def update_last_announced_blobs(self, blob_hashes: typing.List[str]):
|
||||
def _update_last_announced_blobs(transaction: sqlite3.Connection):
|
||||
last_announced = self.time_getter()
|
||||
|
@ -435,6 +443,62 @@ class SQLiteStorage(SQLiteMixin):
|
|||
def get_all_blob_hashes(self):
|
||||
return self.run_and_return_list("select blob_hash from blob")
|
||||
|
||||
async def get_stored_blobs(self, is_mine: bool, is_network_blob=False):
|
||||
is_mine = 1 if is_mine else 0
|
||||
if is_network_blob:
|
||||
return await self.db.execute_fetchall(
|
||||
"select blob.blob_hash, blob.blob_length, blob.added_on "
|
||||
"from blob left join stream_blob using (blob_hash) "
|
||||
"where stream_blob.stream_hash is null and blob.is_mine=? and blob.status='finished'"
|
||||
"order by blob.blob_length desc, blob.added_on asc",
|
||||
(is_mine,)
|
||||
)
|
||||
|
||||
sd_blobs = await self.db.execute_fetchall(
|
||||
"select blob.blob_hash, blob.blob_length, blob.added_on "
|
||||
"from blob join stream on blob.blob_hash=stream.sd_hash join file using (stream_hash) "
|
||||
"where blob.is_mine=? order by blob.added_on asc",
|
||||
(is_mine,)
|
||||
)
|
||||
content_blobs = await self.db.execute_fetchall(
|
||||
"select blob.blob_hash, blob.blob_length, blob.added_on "
|
||||
"from blob join stream_blob using (blob_hash) cross join stream using (stream_hash)"
|
||||
"cross join file using (stream_hash)"
|
||||
"where blob.is_mine=? and blob.status='finished' order by blob.added_on asc, blob.blob_length asc",
|
||||
(is_mine,)
|
||||
)
|
||||
return content_blobs + sd_blobs
|
||||
|
||||
async def get_stored_blob_disk_usage(self):
|
||||
total, network_size, content_size, private_size = await self.db.execute_fetchone("""
|
||||
select coalesce(sum(blob_length), 0) as total,
|
||||
coalesce(sum(case when
|
||||
stream_blob.stream_hash is null
|
||||
then blob_length else 0 end), 0) as network_storage,
|
||||
coalesce(sum(case when
|
||||
stream_blob.blob_hash is not null and is_mine=0
|
||||
then blob_length else 0 end), 0) as content_storage,
|
||||
coalesce(sum(case when
|
||||
is_mine=1
|
||||
then blob_length else 0 end), 0) as private_storage
|
||||
from blob left join stream_blob using (blob_hash)
|
||||
where blob_hash not in (select sd_hash from stream) and blob.status="finished"
|
||||
""")
|
||||
return {
|
||||
'network_storage': network_size,
|
||||
'content_storage': content_size,
|
||||
'private_storage': private_size,
|
||||
'total': total
|
||||
}
|
||||
|
||||
async def update_blob_ownership(self, sd_hash, is_mine: bool):
|
||||
is_mine = 1 if is_mine else 0
|
||||
await self.db.execute_fetchall(
|
||||
"update blob set is_mine = ? where blob_hash in ("
|
||||
" select blob_hash from blob natural join stream_blob natural join stream where sd_hash = ?"
|
||||
") OR blob_hash = ?", (is_mine, sd_hash, sd_hash)
|
||||
)
|
||||
|
||||
def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]:
|
||||
def _sync_blobs(transaction: sqlite3.Connection) -> typing.Set[str]:
|
||||
finished_blob_hashes = tuple(
|
||||
|
@ -470,7 +534,8 @@ class SQLiteStorage(SQLiteMixin):
|
|||
def _get_blobs_for_stream(transaction):
|
||||
crypt_blob_infos = []
|
||||
stream_blobs = transaction.execute(
|
||||
"select blob_hash, position, iv from stream_blob where stream_hash=? "
|
||||
"select s.blob_hash, s.position, s.iv, b.added_on "
|
||||
"from stream_blob s left outer join blob b on b.blob_hash=s.blob_hash where stream_hash=? "
|
||||
"order by position asc", (stream_hash, )
|
||||
).fetchall()
|
||||
if only_completed:
|
||||
|
@ -490,9 +555,10 @@ class SQLiteStorage(SQLiteMixin):
|
|||
for blob_hash, length in lengths:
|
||||
blob_length_dict[blob_hash] = length
|
||||
|
||||
for blob_hash, position, iv in stream_blobs:
|
||||
current_time = time.time()
|
||||
for blob_hash, position, iv, added_on in stream_blobs:
|
||||
blob_length = blob_length_dict.get(blob_hash, 0)
|
||||
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, blob_hash))
|
||||
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, added_on or current_time, blob_hash))
|
||||
if not blob_hash:
|
||||
break
|
||||
return crypt_blob_infos
|
||||
|
@ -570,6 +636,10 @@ class SQLiteStorage(SQLiteMixin):
|
|||
log.debug("update file status %s -> %s", stream_hash, new_status)
|
||||
return self.db.execute_fetchall("update file set status=? where stream_hash=?", (new_status, stream_hash))
|
||||
|
||||
def stop_all_files(self):
|
||||
log.debug("stopping all files")
|
||||
return self.db.execute_fetchall("update file set status=?", ("stopped",))
|
||||
|
||||
async def change_file_download_dir_and_file_name(self, stream_hash: str, download_dir: typing.Optional[str],
|
||||
file_name: typing.Optional[str]):
|
||||
if not file_name or not download_dir:
|
||||
|
@ -723,7 +793,7 @@ class SQLiteStorage(SQLiteMixin):
|
|||
|
||||
await self.db.run(_save_claims)
|
||||
if update_file_callbacks:
|
||||
await asyncio.wait(update_file_callbacks)
|
||||
await asyncio.wait(map(asyncio.create_task, update_file_callbacks))
|
||||
if claim_id_to_supports:
|
||||
await self.save_supports(claim_id_to_supports)
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ from typing import Optional
|
|||
from aiohttp.web import Request
|
||||
from lbry.error import ResolveError, DownloadSDTimeoutError, InsufficientFundsError
|
||||
from lbry.error import ResolveTimeoutError, DownloadDataTimeoutError, KeyFeeAboveMaxAllowedError
|
||||
from lbry.error import InvalidStreamURLError
|
||||
from lbry.stream.managed_stream import ManagedStream
|
||||
from lbry.torrent.torrent_manager import TorrentSource
|
||||
from lbry.utils import cache_concurrent
|
||||
|
@ -12,11 +13,12 @@ from lbry.schema.url import URL
|
|||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.file.source_manager import SourceManager
|
||||
from lbry.file.source import ManagedDownloadSource
|
||||
from lbry.extras.daemon.storage import StoredContentClaim
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.extras.daemon.analytics import AnalyticsManager
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
from lbry.wallet import WalletManager, Output
|
||||
from lbry.wallet import WalletManager
|
||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -48,10 +50,10 @@ class FileManager:
|
|||
await manager.started.wait()
|
||||
self.started.set()
|
||||
|
||||
def stop(self):
|
||||
async def stop(self):
|
||||
for manager in self.source_managers.values():
|
||||
# fixme: pop or not?
|
||||
manager.stop()
|
||||
await manager.stop()
|
||||
self.started.clear()
|
||||
|
||||
@cache_concurrent
|
||||
|
@ -81,8 +83,11 @@ class FileManager:
|
|||
payment = None
|
||||
try:
|
||||
# resolve the claim
|
||||
if not URL.parse(uri).has_stream:
|
||||
raise ResolveError("cannot download a channel claim, specify a /path")
|
||||
try:
|
||||
if not URL.parse(uri).has_stream:
|
||||
raise InvalidStreamURLError(uri)
|
||||
except ValueError:
|
||||
raise InvalidStreamURLError(uri)
|
||||
try:
|
||||
resolved_result = await asyncio.wait_for(
|
||||
self.wallet_manager.ledger.resolve(
|
||||
|
@ -94,8 +99,6 @@ class FileManager:
|
|||
except asyncio.TimeoutError:
|
||||
raise ResolveTimeoutError(uri)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError):
|
||||
raise
|
||||
log.exception("Unexpected error resolving stream:")
|
||||
raise ResolveError(f"Unexpected error resolving stream: {str(err)}")
|
||||
if 'error' in resolved_result:
|
||||
|
@ -190,21 +193,24 @@ class FileManager:
|
|||
####################
|
||||
# make downloader and wait for start
|
||||
####################
|
||||
# temporary with fields we know so downloader can start. Missing fields are populated later.
|
||||
stored_claim = StoredContentClaim(outpoint=outpoint, claim_id=txo.claim_id, name=txo.claim_name,
|
||||
amount=txo.amount, height=txo.tx_ref.height,
|
||||
serialized=claim.to_bytes().hex())
|
||||
|
||||
if not claim.stream.source.bt_infohash:
|
||||
# fixme: this shouldnt be here
|
||||
stream = ManagedStream(
|
||||
self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash,
|
||||
download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment,
|
||||
analytics_manager=self.analytics_manager
|
||||
analytics_manager=self.analytics_manager, claim=stored_claim
|
||||
)
|
||||
stream.downloader.node = source_manager.node
|
||||
else:
|
||||
stream = TorrentSource(
|
||||
self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash,
|
||||
file_name=file_name, download_directory=download_directory or self.config.download_dir,
|
||||
status=ManagedStream.STATUS_RUNNING,
|
||||
analytics_manager=self.analytics_manager,
|
||||
status=ManagedStream.STATUS_RUNNING, claim=stored_claim, analytics_manager=self.analytics_manager,
|
||||
torrent_session=source_manager.torrent_session
|
||||
)
|
||||
log.info("starting download for %s", uri)
|
||||
|
@ -236,15 +242,14 @@ class FileManager:
|
|||
claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier)
|
||||
stream.set_claim(claim_info, claim)
|
||||
if save_file:
|
||||
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download),
|
||||
loop=self.loop)
|
||||
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download))
|
||||
return stream
|
||||
except asyncio.TimeoutError:
|
||||
error = DownloadDataTimeoutError(stream.sd_hash)
|
||||
raise error
|
||||
except Exception as err: # forgive data timeout, don't delete stream
|
||||
except (Exception, asyncio.CancelledError) as err: # forgive data timeout, don't delete stream
|
||||
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
|
||||
KeyFeeAboveMaxAllowedError)
|
||||
KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError)
|
||||
if isinstance(err, expected):
|
||||
log.warning("Failed to download %s: %s", uri, str(err))
|
||||
elif isinstance(err, asyncio.CancelledError):
|
||||
|
|
|
@ -45,11 +45,12 @@ class ManagedDownloadSource:
|
|||
self.purchase_receipt = None
|
||||
self._added_on = added_on
|
||||
self.analytics_manager = analytics_manager
|
||||
self.downloader = None
|
||||
|
||||
self.saving = asyncio.Event(loop=self.loop)
|
||||
self.finished_writing = asyncio.Event(loop=self.loop)
|
||||
self.started_writing = asyncio.Event(loop=self.loop)
|
||||
self.finished_write_attempt = asyncio.Event(loop=self.loop)
|
||||
self.saving = asyncio.Event()
|
||||
self.finished_writing = asyncio.Event()
|
||||
self.started_writing = asyncio.Event()
|
||||
self.finished_write_attempt = asyncio.Event()
|
||||
|
||||
# @classmethod
|
||||
# async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str,
|
||||
|
@ -66,7 +67,7 @@ class ManagedDownloadSource:
|
|||
async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def stop_tasks(self):
|
||||
async def stop_tasks(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):
|
||||
|
|
|
@ -54,16 +54,16 @@ class SourceManager:
|
|||
self.storage = storage
|
||||
self.analytics_manager = analytics_manager
|
||||
self._sources: typing.Dict[str, ManagedDownloadSource] = {}
|
||||
self.started = asyncio.Event(loop=self.loop)
|
||||
self.started = asyncio.Event()
|
||||
|
||||
def add(self, source: ManagedDownloadSource):
|
||||
self._sources[source.identifier] = source
|
||||
|
||||
def remove(self, source: ManagedDownloadSource):
|
||||
async def remove(self, source: ManagedDownloadSource):
|
||||
if source.identifier not in self._sources:
|
||||
return
|
||||
self._sources.pop(source.identifier)
|
||||
source.stop_tasks()
|
||||
await source.stop_tasks()
|
||||
|
||||
async def initialize_from_database(self):
|
||||
raise NotImplementedError()
|
||||
|
@ -72,10 +72,10 @@ class SourceManager:
|
|||
await self.initialize_from_database()
|
||||
self.started.set()
|
||||
|
||||
def stop(self):
|
||||
async def stop(self):
|
||||
while self._sources:
|
||||
_, source = self._sources.popitem()
|
||||
source.stop_tasks()
|
||||
await source.stop_tasks()
|
||||
self.started.clear()
|
||||
|
||||
async def create(self, file_path: str, key: Optional[bytes] = None,
|
||||
|
@ -83,7 +83,7 @@ class SourceManager:
|
|||
raise NotImplementedError()
|
||||
|
||||
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
|
||||
self.remove(source)
|
||||
await self.remove(source)
|
||||
if delete_file and source.output_file_exists:
|
||||
os.remove(source.full_path)
|
||||
|
||||
|
|
|
@ -2,4 +2,5 @@ build:
|
|||
rm types/v2/* -rf
|
||||
touch types/v2/__init__.py
|
||||
cd types/v2/ && protoc --python_out=. -I ../../../../../types/v2/proto/ ../../../../../types/v2/proto/*.proto
|
||||
cd types/v2/ && cp ../../../../../types/jsonschema/* ./
|
||||
sed -e 's/^import\ \(.*\)_pb2\ /from . import\ \1_pb2\ /g' -i types/v2/*.py
|
||||
|
|
|
@ -10,6 +10,7 @@ from google.protobuf.json_format import MessageToDict
|
|||
|
||||
from lbry.crypto.base58 import Base58
|
||||
from lbry.constants import COIN
|
||||
from lbry.error import MissingPublishedFileError, EmptyPublishedFileError
|
||||
|
||||
from lbry.schema.mime_types import guess_media_type
|
||||
from lbry.schema.base import Metadata, BaseMessageList
|
||||
|
@ -139,10 +140,10 @@ class Source(Metadata):
|
|||
self.name = os.path.basename(file_path)
|
||||
self.media_type, stream_type = guess_media_type(file_path)
|
||||
if not os.path.isfile(file_path):
|
||||
raise Exception(f"File does not exist: {file_path}")
|
||||
raise MissingPublishedFileError(file_path)
|
||||
self.size = os.path.getsize(file_path)
|
||||
if self.size == 0:
|
||||
raise Exception(f"Cannot publish empty file: {file_path}")
|
||||
raise EmptyPublishedFileError(file_path)
|
||||
self.file_hash_bytes = calculate_sha384_file_hash(file_path)
|
||||
return stream_type
|
||||
|
||||
|
|
|
@ -2,6 +2,9 @@ import logging
|
|||
from typing import List
|
||||
from binascii import hexlify, unhexlify
|
||||
|
||||
from asn1crypto.keys import PublicKeyInfo
|
||||
from coincurve import PublicKey as cPublicKey
|
||||
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
from google.protobuf.message import DecodeError
|
||||
from hachoir.core.log import log as hachoir_log
|
||||
|
@ -346,7 +349,7 @@ class Channel(BaseClaim):
|
|||
|
||||
@property
|
||||
def public_key(self) -> str:
|
||||
return hexlify(self.message.public_key).decode()
|
||||
return hexlify(self.public_key_bytes).decode()
|
||||
|
||||
@public_key.setter
|
||||
def public_key(self, sd_public_key: str):
|
||||
|
@ -354,7 +357,11 @@ class Channel(BaseClaim):
|
|||
|
||||
@property
|
||||
def public_key_bytes(self) -> bytes:
|
||||
return self.message.public_key
|
||||
if len(self.message.public_key) == 33:
|
||||
return self.message.public_key
|
||||
public_key_info = PublicKeyInfo.load(self.message.public_key)
|
||||
public_key = cPublicKey(public_key_info.native['public_key'])
|
||||
return public_key.format(compressed=True)
|
||||
|
||||
@public_key_bytes.setter
|
||||
def public_key_bytes(self, public_key: bytes):
|
||||
|
@ -391,6 +398,12 @@ class Repost(BaseClaim):
|
|||
|
||||
claim_type = Claim.REPOST
|
||||
|
||||
def to_dict(self):
|
||||
claim = super().to_dict()
|
||||
if claim.pop('claim_hash', None):
|
||||
claim['claim_id'] = self.reference.claim_id
|
||||
return claim
|
||||
|
||||
@property
|
||||
def reference(self) -> ClaimReference:
|
||||
return ClaimReference(self.message)
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
import os
|
||||
import filetype
|
||||
import logging
|
||||
|
||||
types_map = {
|
||||
# http://www.iana.org/assignments/media-types
|
||||
|
@ -166,10 +168,38 @@ types_map = {
|
|||
'.wmv': ('video/x-ms-wmv', 'video')
|
||||
}
|
||||
|
||||
# maps detected extensions to the possible analogs
|
||||
# i.e. .cbz file is actually a .zip
|
||||
synonyms_map = {
|
||||
'.zip': ['.cbz'],
|
||||
'.rar': ['.cbr'],
|
||||
'.ar': ['.a']
|
||||
}
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def guess_media_type(path):
|
||||
_, ext = os.path.splitext(path)
|
||||
extension = ext.strip().lower()
|
||||
|
||||
try:
|
||||
kind = filetype.guess(path)
|
||||
if kind:
|
||||
real_extension = f".{kind.extension}"
|
||||
|
||||
if extension != real_extension:
|
||||
if extension:
|
||||
log.warning(f"file extension does not match it's contents: {path}, identified as {real_extension}")
|
||||
else:
|
||||
log.debug(f"file {path} does not have extension, identified by it's contents as {real_extension}")
|
||||
|
||||
if extension not in synonyms_map.get(real_extension, []):
|
||||
extension = real_extension
|
||||
|
||||
except OSError as error:
|
||||
pass
|
||||
|
||||
if extension[1:]:
|
||||
if extension in types_map:
|
||||
return types_map[extension]
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import base64
|
||||
import struct
|
||||
from typing import List
|
||||
from typing import List, Union, Optional, NamedTuple
|
||||
from binascii import hexlify
|
||||
from itertools import chain
|
||||
|
||||
|
@ -13,11 +12,39 @@ NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND)
|
|||
BLOCKED = ErrorMessage.Code.Name(ErrorMessage.BLOCKED)
|
||||
|
||||
|
||||
def set_reference(reference, txo_row):
|
||||
if txo_row:
|
||||
reference.tx_hash = txo_row['txo_hash'][:32]
|
||||
reference.nout = struct.unpack('<I', txo_row['txo_hash'][32:])[0]
|
||||
reference.height = txo_row['height']
|
||||
def set_reference(reference, claim_hash, rows):
|
||||
if claim_hash:
|
||||
for txo in rows:
|
||||
if claim_hash == txo.claim_hash:
|
||||
reference.tx_hash = txo.tx_hash
|
||||
reference.nout = txo.position
|
||||
reference.height = txo.height
|
||||
return
|
||||
|
||||
|
||||
class ResolveResult(NamedTuple):
|
||||
name: str
|
||||
normalized_name: str
|
||||
claim_hash: bytes
|
||||
tx_num: int
|
||||
position: int
|
||||
tx_hash: bytes
|
||||
height: int
|
||||
amount: int
|
||||
short_url: str
|
||||
is_controlling: bool
|
||||
canonical_url: str
|
||||
creation_height: int
|
||||
activation_height: int
|
||||
expiration_height: int
|
||||
effective_amount: int
|
||||
support_amount: int
|
||||
reposted: int
|
||||
last_takeover_height: Optional[int]
|
||||
claims_in_channel: Optional[int]
|
||||
channel_hash: Optional[bytes]
|
||||
reposted_claim_hash: Optional[bytes]
|
||||
signature_valid: Optional[bool]
|
||||
|
||||
|
||||
class Censor:
|
||||
|
@ -38,19 +65,19 @@ class Censor:
|
|||
def apply(self, rows):
|
||||
return [row for row in rows if not self.censor(row)]
|
||||
|
||||
def censor(self, row) -> bool:
|
||||
def censor(self, row) -> Optional[bytes]:
|
||||
if self.is_censored(row):
|
||||
censoring_channel_hash = bytes.fromhex(row['censoring_channel_id'])[::-1]
|
||||
self.censored.setdefault(censoring_channel_hash, set())
|
||||
self.censored[censoring_channel_hash].add(row['tx_hash'])
|
||||
return True
|
||||
return False
|
||||
return censoring_channel_hash
|
||||
return None
|
||||
|
||||
def to_message(self, outputs: OutputsMessage, extra_txo_rows: dict):
|
||||
for censoring_channel_hash, count in self.censored.items():
|
||||
blocked = outputs.blocked.add()
|
||||
blocked.count = len(count)
|
||||
set_reference(blocked.channel, extra_txo_rows.get(censoring_channel_hash))
|
||||
set_reference(blocked.channel, censoring_channel_hash, extra_txo_rows)
|
||||
outputs.blocked_total += len(count)
|
||||
|
||||
|
||||
|
@ -115,10 +142,10 @@ class Outputs:
|
|||
'expiration_height': claim.expiration_height,
|
||||
'effective_amount': claim.effective_amount,
|
||||
'support_amount': claim.support_amount,
|
||||
'trending_group': claim.trending_group,
|
||||
'trending_mixed': claim.trending_mixed,
|
||||
'trending_local': claim.trending_local,
|
||||
'trending_global': claim.trending_global,
|
||||
# 'trending_group': claim.trending_group,
|
||||
# 'trending_mixed': claim.trending_mixed,
|
||||
# 'trending_local': claim.trending_local,
|
||||
# 'trending_global': claim.trending_global,
|
||||
}
|
||||
if claim.HasField('channel'):
|
||||
txo.channel = tx_map[claim.channel.tx_hash].outputs[claim.channel.nout]
|
||||
|
@ -150,70 +177,66 @@ class Outputs:
|
|||
outputs.blocked, outputs.blocked_total
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_grpc(cls, outputs: OutputsMessage) -> 'Outputs':
|
||||
txs = set()
|
||||
for txo_message in chain(outputs.txos, outputs.extra_txos):
|
||||
if txo_message.WhichOneof('meta') == 'error':
|
||||
continue
|
||||
txs.add((hexlify(txo_message.tx_hash[::-1]).decode(), txo_message.height))
|
||||
return cls(
|
||||
outputs.txos, outputs.extra_txos, txs,
|
||||
outputs.offset, outputs.total,
|
||||
outputs.blocked, outputs.blocked_total
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def to_base64(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked=None) -> str:
|
||||
return base64.b64encode(cls.to_bytes(txo_rows, extra_txo_rows, offset, total, blocked)).decode()
|
||||
|
||||
@classmethod
|
||||
def to_bytes(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked: Censor = None) -> bytes:
|
||||
extra_txo_rows = {row['claim_hash']: row for row in extra_txo_rows}
|
||||
page = OutputsMessage()
|
||||
page.offset = offset
|
||||
if total is not None:
|
||||
page.total = total
|
||||
if blocked is not None:
|
||||
blocked.to_message(page, extra_txo_rows)
|
||||
for row in extra_txo_rows:
|
||||
txo_message: 'OutputsMessage' = page.extra_txos.add()
|
||||
if not isinstance(row, Exception):
|
||||
if row.channel_hash:
|
||||
set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows)
|
||||
if row.reposted_claim_hash:
|
||||
set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows)
|
||||
cls.encode_txo(txo_message, row)
|
||||
|
||||
for row in txo_rows:
|
||||
cls.row_to_message(row, page.txos.add(), extra_txo_rows)
|
||||
for row in extra_txo_rows.values():
|
||||
cls.row_to_message(row, page.extra_txos.add(), extra_txo_rows)
|
||||
# cls.row_to_message(row, page.txos.add(), extra_txo_rows)
|
||||
txo_message: 'OutputsMessage' = page.txos.add()
|
||||
cls.encode_txo(txo_message, row)
|
||||
if not isinstance(row, Exception):
|
||||
if row.channel_hash:
|
||||
set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows)
|
||||
if row.reposted_claim_hash:
|
||||
set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows)
|
||||
elif isinstance(row, ResolveCensoredError):
|
||||
set_reference(txo_message.error.blocked.channel, row.censor_id, extra_txo_rows)
|
||||
return page.SerializeToString()
|
||||
|
||||
@classmethod
|
||||
def row_to_message(cls, txo, txo_message, extra_row_dict: dict):
|
||||
if isinstance(txo, Exception):
|
||||
txo_message.error.text = txo.args[0]
|
||||
if isinstance(txo, ValueError):
|
||||
def encode_txo(cls, txo_message, resolve_result: Union['ResolveResult', Exception]):
|
||||
if isinstance(resolve_result, Exception):
|
||||
txo_message.error.text = resolve_result.args[0]
|
||||
if isinstance(resolve_result, ValueError):
|
||||
txo_message.error.code = ErrorMessage.INVALID
|
||||
elif isinstance(txo, LookupError):
|
||||
elif isinstance(resolve_result, LookupError):
|
||||
txo_message.error.code = ErrorMessage.NOT_FOUND
|
||||
elif isinstance(txo, ResolveCensoredError):
|
||||
elif isinstance(resolve_result, ResolveCensoredError):
|
||||
txo_message.error.code = ErrorMessage.BLOCKED
|
||||
set_reference(txo_message.error.blocked.channel, extra_row_dict.get(bytes.fromhex(txo.censor_id)[::-1]))
|
||||
return
|
||||
txo_message.tx_hash = txo['txo_hash'][:32]
|
||||
txo_message.nout, = struct.unpack('<I', txo['txo_hash'][32:])
|
||||
txo_message.height = txo['height']
|
||||
txo_message.claim.short_url = txo['short_url']
|
||||
txo_message.claim.reposted = txo['reposted']
|
||||
if txo['canonical_url'] is not None:
|
||||
txo_message.claim.canonical_url = txo['canonical_url']
|
||||
txo_message.claim.is_controlling = bool(txo['is_controlling'])
|
||||
if txo['last_take_over_height'] is not None:
|
||||
txo_message.claim.take_over_height = txo['last_take_over_height']
|
||||
txo_message.claim.creation_height = txo['creation_height']
|
||||
txo_message.claim.activation_height = txo['activation_height']
|
||||
txo_message.claim.expiration_height = txo['expiration_height']
|
||||
if txo['claims_in_channel'] is not None:
|
||||
txo_message.claim.claims_in_channel = txo['claims_in_channel']
|
||||
txo_message.claim.effective_amount = txo['effective_amount']
|
||||
txo_message.claim.support_amount = txo['support_amount']
|
||||
txo_message.claim.trending_group = txo['trending_group']
|
||||
txo_message.claim.trending_mixed = txo['trending_mixed']
|
||||
txo_message.claim.trending_local = txo['trending_local']
|
||||
txo_message.claim.trending_global = txo['trending_global']
|
||||
set_reference(txo_message.claim.channel, extra_row_dict.get(txo['channel_hash']))
|
||||
set_reference(txo_message.claim.repost, extra_row_dict.get(txo['reposted_claim_hash']))
|
||||
txo_message.tx_hash = resolve_result.tx_hash
|
||||
txo_message.nout = resolve_result.position
|
||||
txo_message.height = resolve_result.height
|
||||
txo_message.claim.short_url = resolve_result.short_url
|
||||
txo_message.claim.reposted = resolve_result.reposted
|
||||
txo_message.claim.is_controlling = resolve_result.is_controlling
|
||||
txo_message.claim.creation_height = resolve_result.creation_height
|
||||
txo_message.claim.activation_height = resolve_result.activation_height
|
||||
txo_message.claim.expiration_height = resolve_result.expiration_height
|
||||
txo_message.claim.effective_amount = resolve_result.effective_amount
|
||||
txo_message.claim.support_amount = resolve_result.support_amount
|
||||
|
||||
if resolve_result.canonical_url is not None:
|
||||
txo_message.claim.canonical_url = resolve_result.canonical_url
|
||||
if resolve_result.last_takeover_height is not None:
|
||||
txo_message.claim.take_over_height = resolve_result.last_takeover_height
|
||||
if resolve_result.claims_in_channel is not None:
|
||||
txo_message.claim.claims_in_channel = resolve_result.claims_in_channel
|
||||
|
|
|
@ -1,740 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: hub.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
import lbry.schema.types.v2.result_pb2 as result__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor.FileDescriptor(
|
||||
name='hub.proto',
|
||||
package='pb',
|
||||
syntax='proto3',
|
||||
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
|
||||
create_key=_descriptor._internal_create_key,
|
||||
serialized_pb=b'\n\thub.proto\x12\x02pb\x1a\x0cresult.proto\"0\n\x0fInvertibleField\x12\x0e\n\x06invert\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x03(\t\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"j\n\nRangeField\x12\x1d\n\x02op\x18\x01 \x01(\x0e\x32\x11.pb.RangeField.Op\x12\r\n\x05value\x18\x02 \x03(\t\".\n\x02Op\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x01\x12\x07\n\x03GTE\x10\x02\x12\x06\n\x02LT\x10\x03\x12\x06\n\x02GT\x10\x04\"\x9c\r\n\rSearchRequest\x12%\n\x08\x63laim_id\x18\x01 \x01(\x0b\x32\x13.pb.InvertibleField\x12\'\n\nchannel_id\x18\x02 \x01(\x0b\x32\x13.pb.InvertibleField\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\r\n\x05limit\x18\x04 \x01(\r\x12\x10\n\x08order_by\x18\x05 \x03(\t\x12\x0e\n\x06offset\x18\x06 \x01(\r\x12\x16\n\x0eis_controlling\x18\x07 \x01(\x08\x12\x1d\n\x15last_take_over_height\x18\x08 \x01(\t\x12\x12\n\nclaim_name\x18\t \x01(\t\x12\x17\n\x0fnormalized_name\x18\n \x01(\t\x12#\n\x0btx_position\x18\x0b \x01(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06\x61mount\x18\x0c \x01(\x0b\x32\x0e.pb.RangeField\x12!\n\ttimestamp\x18\r \x01(\x0b\x32\x0e.pb.RangeField\x12*\n\x12\x63reation_timestamp\x18\x0e \x01(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06height\x18\x0f \x01(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0f\x63reation_height\x18\x10 \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x61\x63tivation_height\x18\x11 \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x65xpiration_height\x18\x12 \x01(\x0b\x32\x0e.pb.RangeField\x12$\n\x0crelease_time\x18\x13 \x01(\x0b\x32\x0e.pb.RangeField\x12\x11\n\tshort_url\x18\x14 \x01(\t\x12\x15\n\rcanonical_url\x18\x15 \x01(\t\x12\r\n\x05title\x18\x16 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x17 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x18 \x01(\t\x12\x12\n\nclaim_type\x18\x19 \x03(\t\x12$\n\x0crepost_count\x18\x1a \x01(\x0b\x32\x0e.pb.RangeField\x12\x13\n\x0bstream_type\x18\x1b \x03(\t\x12\x12\n\nmedia_type\x18\x1c \x03(\t\x12\"\n\nfee_amount\x18\x1d \x01(\x0b\x32\x0e.pb.RangeField\x12\x14\n\x0c\x66\x65\x65_currency\x18\x1e \x01(\t\x12 \n\x08\x64uration\x18\x1f \x01(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11reposted_claim_id\x18 \x01(\t\x12#\n\x0b\x63\x65nsor_type\x18! \x01(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11\x63laims_in_channel\x18\" \x01(\t\x12$\n\x0c\x63hannel_join\x18# \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x12is_signature_valid\x18$ \x01(\x0b\x32\r.pb.BoolValue\x12(\n\x10\x65\x66\x66\x65\x63tive_amount\x18% \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0esupport_amount\x18& \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_group\x18\' \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_mixed\x18( \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_local\x18) \x01(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0ftrending_global\x18* \x01(\x0b\x32\x0e.pb.RangeField\x12\r\n\x05tx_id\x18+ \x01(\t\x12 \n\x07tx_nout\x18, \x01(\x0b\x32\x0f.pb.UInt32Value\x12\x11\n\tsignature\x18- \x01(\t\x12\x18\n\x10signature_digest\x18. \x01(\t\x12\x18\n\x10public_key_bytes\x18/ \x01(\t\x12\x15\n\rpublic_key_id\x18\x30 \x01(\t\x12\x10\n\x08\x61ny_tags\x18\x31 \x03(\t\x12\x10\n\x08\x61ll_tags\x18\x32 \x03(\t\x12\x10\n\x08not_tags\x18\x33 \x03(\t\x12\x1d\n\x15has_channel_signature\x18\x34 \x01(\x08\x12!\n\nhas_source\x18\x35 \x01(\x0b\x32\r.pb.BoolValue\x12 \n\x18limit_claims_per_channel\x18\x36 \x01(\r\x12\x15\n\rany_languages\x18\x37 \x03(\t\x12\x15\n\rall_languages\x18\x38 \x03(\t\x12\x19\n\x11remove_duplicates\x18\x39 \x01(\x08\x12\x11\n\tno_totals\x18: \x01(\x08\x32\x31\n\x03Hub\x12*\n\x06Search\x12\x11.pb.SearchRequest\x1a\x0b.pb.Outputs\"\x00\x42&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
|
||||
,
|
||||
dependencies=[result__pb2.DESCRIPTOR,])
|
||||
|
||||
|
||||
|
||||
_RANGEFIELD_OP = _descriptor.EnumDescriptor(
|
||||
name='Op',
|
||||
full_name='pb.RangeField.Op',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
values=[
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='EQ', index=0, number=0,
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='LTE', index=1, number=1,
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='GTE', index=2, number=2,
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='LT', index=3, number=3,
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='GT', index=4, number=4,
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
containing_type=None,
|
||||
serialized_options=None,
|
||||
serialized_start=199,
|
||||
serialized_end=245,
|
||||
)
|
||||
_sym_db.RegisterEnumDescriptor(_RANGEFIELD_OP)
|
||||
|
||||
|
||||
_INVERTIBLEFIELD = _descriptor.Descriptor(
|
||||
name='InvertibleField',
|
||||
full_name='pb.InvertibleField',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='invert', full_name='pb.InvertibleField.invert', index=0,
|
||||
number=1, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='value', full_name='pb.InvertibleField.value', index=1,
|
||||
number=2, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=31,
|
||||
serialized_end=79,
|
||||
)
|
||||
|
||||
|
||||
_BOOLVALUE = _descriptor.Descriptor(
|
||||
name='BoolValue',
|
||||
full_name='pb.BoolValue',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='value', full_name='pb.BoolValue.value', index=0,
|
||||
number=1, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=81,
|
||||
serialized_end=107,
|
||||
)
|
||||
|
||||
|
||||
_UINT32VALUE = _descriptor.Descriptor(
|
||||
name='UInt32Value',
|
||||
full_name='pb.UInt32Value',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='value', full_name='pb.UInt32Value.value', index=0,
|
||||
number=1, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=109,
|
||||
serialized_end=137,
|
||||
)
|
||||
|
||||
|
||||
_RANGEFIELD = _descriptor.Descriptor(
|
||||
name='RangeField',
|
||||
full_name='pb.RangeField',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='op', full_name='pb.RangeField.op', index=0,
|
||||
number=1, type=14, cpp_type=8, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='value', full_name='pb.RangeField.value', index=1,
|
||||
number=2, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
_RANGEFIELD_OP,
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=139,
|
||||
serialized_end=245,
|
||||
)
|
||||
|
||||
|
||||
_SEARCHREQUEST = _descriptor.Descriptor(
|
||||
name='SearchRequest',
|
||||
full_name='pb.SearchRequest',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claim_id', full_name='pb.SearchRequest.claim_id', index=0,
|
||||
number=1, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='channel_id', full_name='pb.SearchRequest.channel_id', index=1,
|
||||
number=2, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='text', full_name='pb.SearchRequest.text', index=2,
|
||||
number=3, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='limit', full_name='pb.SearchRequest.limit', index=3,
|
||||
number=4, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='order_by', full_name='pb.SearchRequest.order_by', index=4,
|
||||
number=5, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='offset', full_name='pb.SearchRequest.offset', index=5,
|
||||
number=6, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='is_controlling', full_name='pb.SearchRequest.is_controlling', index=6,
|
||||
number=7, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='last_take_over_height', full_name='pb.SearchRequest.last_take_over_height', index=7,
|
||||
number=8, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claim_name', full_name='pb.SearchRequest.claim_name', index=8,
|
||||
number=9, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='normalized_name', full_name='pb.SearchRequest.normalized_name', index=9,
|
||||
number=10, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tx_position', full_name='pb.SearchRequest.tx_position', index=10,
|
||||
number=11, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='amount', full_name='pb.SearchRequest.amount', index=11,
|
||||
number=12, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='timestamp', full_name='pb.SearchRequest.timestamp', index=12,
|
||||
number=13, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='creation_timestamp', full_name='pb.SearchRequest.creation_timestamp', index=13,
|
||||
number=14, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='height', full_name='pb.SearchRequest.height', index=14,
|
||||
number=15, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='creation_height', full_name='pb.SearchRequest.creation_height', index=15,
|
||||
number=16, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='activation_height', full_name='pb.SearchRequest.activation_height', index=16,
|
||||
number=17, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='expiration_height', full_name='pb.SearchRequest.expiration_height', index=17,
|
||||
number=18, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='release_time', full_name='pb.SearchRequest.release_time', index=18,
|
||||
number=19, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='short_url', full_name='pb.SearchRequest.short_url', index=19,
|
||||
number=20, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='canonical_url', full_name='pb.SearchRequest.canonical_url', index=20,
|
||||
number=21, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='title', full_name='pb.SearchRequest.title', index=21,
|
||||
number=22, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='author', full_name='pb.SearchRequest.author', index=22,
|
||||
number=23, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='description', full_name='pb.SearchRequest.description', index=23,
|
||||
number=24, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claim_type', full_name='pb.SearchRequest.claim_type', index=24,
|
||||
number=25, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='repost_count', full_name='pb.SearchRequest.repost_count', index=25,
|
||||
number=26, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='stream_type', full_name='pb.SearchRequest.stream_type', index=26,
|
||||
number=27, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='media_type', full_name='pb.SearchRequest.media_type', index=27,
|
||||
number=28, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='fee_amount', full_name='pb.SearchRequest.fee_amount', index=28,
|
||||
number=29, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='fee_currency', full_name='pb.SearchRequest.fee_currency', index=29,
|
||||
number=30, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='duration', full_name='pb.SearchRequest.duration', index=30,
|
||||
number=31, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='reposted_claim_id', full_name='pb.SearchRequest.reposted_claim_id', index=31,
|
||||
number=32, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='censor_type', full_name='pb.SearchRequest.censor_type', index=32,
|
||||
number=33, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claims_in_channel', full_name='pb.SearchRequest.claims_in_channel', index=33,
|
||||
number=34, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='channel_join', full_name='pb.SearchRequest.channel_join', index=34,
|
||||
number=35, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='is_signature_valid', full_name='pb.SearchRequest.is_signature_valid', index=35,
|
||||
number=36, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='effective_amount', full_name='pb.SearchRequest.effective_amount', index=36,
|
||||
number=37, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='support_amount', full_name='pb.SearchRequest.support_amount', index=37,
|
||||
number=38, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_group', full_name='pb.SearchRequest.trending_group', index=38,
|
||||
number=39, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_mixed', full_name='pb.SearchRequest.trending_mixed', index=39,
|
||||
number=40, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_local', full_name='pb.SearchRequest.trending_local', index=40,
|
||||
number=41, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_global', full_name='pb.SearchRequest.trending_global', index=41,
|
||||
number=42, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tx_id', full_name='pb.SearchRequest.tx_id', index=42,
|
||||
number=43, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tx_nout', full_name='pb.SearchRequest.tx_nout', index=43,
|
||||
number=44, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='signature', full_name='pb.SearchRequest.signature', index=44,
|
||||
number=45, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='signature_digest', full_name='pb.SearchRequest.signature_digest', index=45,
|
||||
number=46, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='public_key_bytes', full_name='pb.SearchRequest.public_key_bytes', index=46,
|
||||
number=47, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='public_key_id', full_name='pb.SearchRequest.public_key_id', index=47,
|
||||
number=48, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='any_tags', full_name='pb.SearchRequest.any_tags', index=48,
|
||||
number=49, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='all_tags', full_name='pb.SearchRequest.all_tags', index=49,
|
||||
number=50, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='not_tags', full_name='pb.SearchRequest.not_tags', index=50,
|
||||
number=51, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='has_channel_signature', full_name='pb.SearchRequest.has_channel_signature', index=51,
|
||||
number=52, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='has_source', full_name='pb.SearchRequest.has_source', index=52,
|
||||
number=53, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='limit_claims_per_channel', full_name='pb.SearchRequest.limit_claims_per_channel', index=53,
|
||||
number=54, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='any_languages', full_name='pb.SearchRequest.any_languages', index=54,
|
||||
number=55, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='all_languages', full_name='pb.SearchRequest.all_languages', index=55,
|
||||
number=56, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='remove_duplicates', full_name='pb.SearchRequest.remove_duplicates', index=56,
|
||||
number=57, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='no_totals', full_name='pb.SearchRequest.no_totals', index=57,
|
||||
number=58, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=248,
|
||||
serialized_end=1940,
|
||||
)
|
||||
|
||||
_RANGEFIELD.fields_by_name['op'].enum_type = _RANGEFIELD_OP
|
||||
_RANGEFIELD_OP.containing_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['claim_id'].message_type = _INVERTIBLEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['channel_id'].message_type = _INVERTIBLEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['tx_position'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['amount'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['timestamp'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['creation_timestamp'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['height'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['creation_height'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['activation_height'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['expiration_height'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['release_time'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['repost_count'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['fee_amount'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['duration'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['censor_type'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['channel_join'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['is_signature_valid'].message_type = _BOOLVALUE
|
||||
_SEARCHREQUEST.fields_by_name['effective_amount'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['support_amount'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['trending_group'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['trending_mixed'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['trending_local'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['trending_global'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['tx_nout'].message_type = _UINT32VALUE
|
||||
_SEARCHREQUEST.fields_by_name['has_source'].message_type = _BOOLVALUE
|
||||
DESCRIPTOR.message_types_by_name['InvertibleField'] = _INVERTIBLEFIELD
|
||||
DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE
|
||||
DESCRIPTOR.message_types_by_name['UInt32Value'] = _UINT32VALUE
|
||||
DESCRIPTOR.message_types_by_name['RangeField'] = _RANGEFIELD
|
||||
DESCRIPTOR.message_types_by_name['SearchRequest'] = _SEARCHREQUEST
|
||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||
|
||||
InvertibleField = _reflection.GeneratedProtocolMessageType('InvertibleField', (_message.Message,), {
|
||||
'DESCRIPTOR' : _INVERTIBLEFIELD,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.InvertibleField)
|
||||
})
|
||||
_sym_db.RegisterMessage(InvertibleField)
|
||||
|
||||
BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), {
|
||||
'DESCRIPTOR' : _BOOLVALUE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.BoolValue)
|
||||
})
|
||||
_sym_db.RegisterMessage(BoolValue)
|
||||
|
||||
UInt32Value = _reflection.GeneratedProtocolMessageType('UInt32Value', (_message.Message,), {
|
||||
'DESCRIPTOR' : _UINT32VALUE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.UInt32Value)
|
||||
})
|
||||
_sym_db.RegisterMessage(UInt32Value)
|
||||
|
||||
RangeField = _reflection.GeneratedProtocolMessageType('RangeField', (_message.Message,), {
|
||||
'DESCRIPTOR' : _RANGEFIELD,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.RangeField)
|
||||
})
|
||||
_sym_db.RegisterMessage(RangeField)
|
||||
|
||||
SearchRequest = _reflection.GeneratedProtocolMessageType('SearchRequest', (_message.Message,), {
|
||||
'DESCRIPTOR' : _SEARCHREQUEST,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.SearchRequest)
|
||||
})
|
||||
_sym_db.RegisterMessage(SearchRequest)
|
||||
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
|
||||
_HUB = _descriptor.ServiceDescriptor(
|
||||
name='Hub',
|
||||
full_name='pb.Hub',
|
||||
file=DESCRIPTOR,
|
||||
index=0,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
serialized_start=1942,
|
||||
serialized_end=1991,
|
||||
methods=[
|
||||
_descriptor.MethodDescriptor(
|
||||
name='Search',
|
||||
full_name='pb.Hub.Search',
|
||||
index=0,
|
||||
containing_service=None,
|
||||
input_type=_SEARCHREQUEST,
|
||||
output_type=result__pb2._OUTPUTS,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
])
|
||||
_sym_db.RegisterServiceDescriptor(_HUB)
|
||||
|
||||
DESCRIPTOR.services_by_name['Hub'] = _HUB
|
||||
|
||||
# @@protoc_insertion_point(module_scope)
|
|
@ -1,67 +0,0 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
|
||||
import lbry.schema.types.v2.hub_pb2 as hub__pb2
|
||||
import lbry.schema.types.v2.result_pb2 as result__pb2
|
||||
|
||||
|
||||
class HubStub(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
def __init__(self, channel):
|
||||
"""Constructor.
|
||||
|
||||
Args:
|
||||
channel: A grpc.Channel.
|
||||
"""
|
||||
self.Search = channel.unary_unary(
|
||||
'/pb.Hub/Search',
|
||||
request_serializer=hub__pb2.SearchRequest.SerializeToString,
|
||||
response_deserializer=result__pb2.Outputs.FromString,
|
||||
)
|
||||
|
||||
|
||||
class HubServicer(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
def Search(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
|
||||
def add_HubServicer_to_server(servicer, server):
|
||||
rpc_method_handlers = {
|
||||
'Search': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Search,
|
||||
request_deserializer=hub__pb2.SearchRequest.FromString,
|
||||
response_serializer=result__pb2.Outputs.SerializeToString,
|
||||
),
|
||||
}
|
||||
generic_handler = grpc.method_handlers_generic_handler(
|
||||
'pb.Hub', rpc_method_handlers)
|
||||
server.add_generic_rpc_handlers((generic_handler,))
|
||||
|
||||
|
||||
# This class is part of an EXPERIMENTAL API.
|
||||
class Hub(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
@staticmethod
|
||||
def Search(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Search',
|
||||
hub__pb2.SearchRequest.SerializeToString,
|
||||
result__pb2.Outputs.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
|
@ -1,13 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: result.proto
|
||||
|
||||
import sys
|
||||
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
from google.protobuf import descriptor_pb2
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
@ -19,9 +17,10 @@ DESCRIPTOR = _descriptor.FileDescriptor(
|
|||
name='result.proto',
|
||||
package='pb',
|
||||
syntax='proto3',
|
||||
serialized_pb=_b('\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xaf\x03\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_group\x18\x16 \x01(\r\x12\x16\n\x0etrending_mixed\x18\x17 \x01(\x02\x12\x16\n\x0etrending_local\x18\x18 \x01(\x02\x12\x17\n\x0ftrending_global\x18\x19 \x01(\x02\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.Outputb\x06proto3')
|
||||
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
|
||||
create_key=_descriptor._internal_create_key,
|
||||
serialized_pb=b'\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xe6\x02\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_score\x18\x16 \x01(\x01\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.OutputB&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
|
||||
)
|
||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||
|
||||
|
||||
|
||||
|
@ -30,28 +29,33 @@ _ERROR_CODE = _descriptor.EnumDescriptor(
|
|||
full_name='pb.Error.Code',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
values=[
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='UNKNOWN_CODE', index=0, number=0,
|
||||
options=None,
|
||||
type=None),
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='NOT_FOUND', index=1, number=1,
|
||||
options=None,
|
||||
type=None),
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='INVALID', index=2, number=2,
|
||||
options=None,
|
||||
type=None),
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='BLOCKED', index=3, number=3,
|
||||
options=None,
|
||||
type=None),
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
containing_type=None,
|
||||
options=None,
|
||||
serialized_start=817,
|
||||
serialized_end=882,
|
||||
serialized_options=None,
|
||||
serialized_start=744,
|
||||
serialized_end=809,
|
||||
)
|
||||
_sym_db.RegisterEnumDescriptor(_ERROR_CODE)
|
||||
|
||||
|
@ -62,6 +66,7 @@ _OUTPUTS = _descriptor.Descriptor(
|
|||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='txos', full_name='pb.Outputs.txos', index=0,
|
||||
|
@ -69,49 +74,49 @@ _OUTPUTS = _descriptor.Descriptor(
|
|||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='extra_txos', full_name='pb.Outputs.extra_txos', index=1,
|
||||
number=2, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='total', full_name='pb.Outputs.total', index=2,
|
||||
number=3, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='offset', full_name='pb.Outputs.offset', index=3,
|
||||
number=4, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='blocked', full_name='pb.Outputs.blocked', index=4,
|
||||
number=5, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='blocked_total', full_name='pb.Outputs.blocked_total', index=5,
|
||||
number=6, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
|
@ -128,56 +133,59 @@ _OUTPUT = _descriptor.Descriptor(
|
|||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tx_hash', full_name='pb.Output.tx_hash', index=0,
|
||||
number=1, type=12, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b(""),
|
||||
has_default_value=False, default_value=b"",
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='nout', full_name='pb.Output.nout', index=1,
|
||||
number=2, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='height', full_name='pb.Output.height', index=2,
|
||||
number=3, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claim', full_name='pb.Output.claim', index=3,
|
||||
number=7, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='error', full_name='pb.Output.error', index=4,
|
||||
number=15, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
_descriptor.OneofDescriptor(
|
||||
name='meta', full_name='pb.Output.meta',
|
||||
index=0, containing_type=None, fields=[]),
|
||||
index=0, containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[]),
|
||||
],
|
||||
serialized_start=174,
|
||||
serialized_end=297,
|
||||
|
@ -190,6 +198,7 @@ _CLAIMMETA = _descriptor.Descriptor(
|
|||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='channel', full_name='pb.ClaimMeta.channel', index=0,
|
||||
|
@ -197,133 +206,112 @@ _CLAIMMETA = _descriptor.Descriptor(
|
|||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='repost', full_name='pb.ClaimMeta.repost', index=1,
|
||||
number=2, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='short_url', full_name='pb.ClaimMeta.short_url', index=2,
|
||||
number=3, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='canonical_url', full_name='pb.ClaimMeta.canonical_url', index=3,
|
||||
number=4, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='is_controlling', full_name='pb.ClaimMeta.is_controlling', index=4,
|
||||
number=5, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='take_over_height', full_name='pb.ClaimMeta.take_over_height', index=5,
|
||||
number=6, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='creation_height', full_name='pb.ClaimMeta.creation_height', index=6,
|
||||
number=7, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='activation_height', full_name='pb.ClaimMeta.activation_height', index=7,
|
||||
number=8, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='expiration_height', full_name='pb.ClaimMeta.expiration_height', index=8,
|
||||
number=9, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claims_in_channel', full_name='pb.ClaimMeta.claims_in_channel', index=9,
|
||||
number=10, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='reposted', full_name='pb.ClaimMeta.reposted', index=10,
|
||||
number=11, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='effective_amount', full_name='pb.ClaimMeta.effective_amount', index=11,
|
||||
number=20, type=4, cpp_type=4, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='support_amount', full_name='pb.ClaimMeta.support_amount', index=12,
|
||||
number=21, type=4, cpp_type=4, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_group', full_name='pb.ClaimMeta.trending_group', index=13,
|
||||
number=22, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_mixed', full_name='pb.ClaimMeta.trending_mixed', index=14,
|
||||
number=23, type=2, cpp_type=6, label=1,
|
||||
name='trending_score', full_name='pb.ClaimMeta.trending_score', index=13,
|
||||
number=22, type=1, cpp_type=5, label=1,
|
||||
has_default_value=False, default_value=float(0),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_local', full_name='pb.ClaimMeta.trending_local', index=15,
|
||||
number=24, type=2, cpp_type=6, label=1,
|
||||
has_default_value=False, default_value=float(0),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_global', full_name='pb.ClaimMeta.trending_global', index=16,
|
||||
number=25, type=2, cpp_type=6, label=1,
|
||||
has_default_value=False, default_value=float(0),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=300,
|
||||
serialized_end=731,
|
||||
serialized_end=658,
|
||||
)
|
||||
|
||||
|
||||
|
@ -333,6 +321,7 @@ _ERROR = _descriptor.Descriptor(
|
|||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='code', full_name='pb.Error.code', index=0,
|
||||
|
@ -340,21 +329,21 @@ _ERROR = _descriptor.Descriptor(
|
|||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='text', full_name='pb.Error.text', index=1,
|
||||
number=2, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='blocked', full_name='pb.Error.blocked', index=2,
|
||||
number=3, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
|
@ -362,14 +351,14 @@ _ERROR = _descriptor.Descriptor(
|
|||
enum_types=[
|
||||
_ERROR_CODE,
|
||||
],
|
||||
options=None,
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=734,
|
||||
serialized_end=882,
|
||||
serialized_start=661,
|
||||
serialized_end=809,
|
||||
)
|
||||
|
||||
|
||||
|
@ -379,6 +368,7 @@ _BLOCKED = _descriptor.Descriptor(
|
|||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='count', full_name='pb.Blocked.count', index=0,
|
||||
|
@ -386,28 +376,28 @@ _BLOCKED = _descriptor.Descriptor(
|
|||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='channel', full_name='pb.Blocked.channel', index=1,
|
||||
number=2, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=884,
|
||||
serialized_end=937,
|
||||
serialized_start=811,
|
||||
serialized_end=864,
|
||||
)
|
||||
|
||||
_OUTPUTS.fields_by_name['txos'].message_type = _OUTPUT
|
||||
|
@ -432,41 +422,43 @@ DESCRIPTOR.message_types_by_name['Output'] = _OUTPUT
|
|||
DESCRIPTOR.message_types_by_name['ClaimMeta'] = _CLAIMMETA
|
||||
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
|
||||
DESCRIPTOR.message_types_by_name['Blocked'] = _BLOCKED
|
||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||
|
||||
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), dict(
|
||||
DESCRIPTOR = _OUTPUTS,
|
||||
__module__ = 'result_pb2'
|
||||
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), {
|
||||
'DESCRIPTOR' : _OUTPUTS,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Outputs)
|
||||
))
|
||||
})
|
||||
_sym_db.RegisterMessage(Outputs)
|
||||
|
||||
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), dict(
|
||||
DESCRIPTOR = _OUTPUT,
|
||||
__module__ = 'result_pb2'
|
||||
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), {
|
||||
'DESCRIPTOR' : _OUTPUT,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Output)
|
||||
))
|
||||
})
|
||||
_sym_db.RegisterMessage(Output)
|
||||
|
||||
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), dict(
|
||||
DESCRIPTOR = _CLAIMMETA,
|
||||
__module__ = 'result_pb2'
|
||||
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), {
|
||||
'DESCRIPTOR' : _CLAIMMETA,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.ClaimMeta)
|
||||
))
|
||||
})
|
||||
_sym_db.RegisterMessage(ClaimMeta)
|
||||
|
||||
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), dict(
|
||||
DESCRIPTOR = _ERROR,
|
||||
__module__ = 'result_pb2'
|
||||
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), {
|
||||
'DESCRIPTOR' : _ERROR,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Error)
|
||||
))
|
||||
})
|
||||
_sym_db.RegisterMessage(Error)
|
||||
|
||||
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), dict(
|
||||
DESCRIPTOR = _BLOCKED,
|
||||
__module__ = 'result_pb2'
|
||||
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), {
|
||||
'DESCRIPTOR' : _BLOCKED,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Blocked)
|
||||
))
|
||||
})
|
||||
_sym_db.RegisterMessage(Blocked)
|
||||
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
|
|
139
lbry/schema/types/v2/wallet.json
Normal file
139
lbry/schema/types/v2/wallet.json
Normal file
|
@ -0,0 +1,139 @@
|
|||
{
|
||||
"title": "Wallet",
|
||||
"description": "An LBC wallet",
|
||||
"type": "object",
|
||||
"required": ["name", "version", "accounts", "preferences"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "Human readable name for this wallet",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Wallet spec version",
|
||||
"type": "integer",
|
||||
"$comment": "Should this be a string? We may need some sort of decimal type if we want exact decimal versions."
|
||||
},
|
||||
"accounts": {
|
||||
"description": "Accounts associated with this wallet",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": ["address_generator", "certificates", "encrypted", "ledger", "modified_on", "name", "private_key", "public_key", "seed"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"address_generator": {
|
||||
"description": "Higher level manager of either singular or deterministically generated addresses",
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{
|
||||
"required": ["name", "change", "receiving"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "type of address generator: a deterministic chain of addresses",
|
||||
"enum": ["deterministic-chain"],
|
||||
"type": "string"
|
||||
},
|
||||
"change": {
|
||||
"$ref": "#/$defs/address_manager",
|
||||
"description": "Manager for deterministically generated change address (not used for single address)"
|
||||
},
|
||||
"receiving": {
|
||||
"$ref": "#/$defs/address_manager",
|
||||
"description": "Manager for deterministically generated receiving address (not used for single address)"
|
||||
}
|
||||
}
|
||||
}, {
|
||||
"required": ["name"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "type of address generator: a single address",
|
||||
"enum": ["single-address"],
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"certificates": {
|
||||
"type": "object",
|
||||
"description": "Channel keys. Mapping from public key address to pem-formatted private key.",
|
||||
"additionalProperties": {"type": "string"}
|
||||
},
|
||||
"encrypted": {
|
||||
"type": "boolean",
|
||||
"description": "Whether private key and seed are encrypted with a password"
|
||||
},
|
||||
"ledger": {
|
||||
"description": "Which network to use",
|
||||
"type": "string",
|
||||
"examples": [
|
||||
"lbc_mainnet",
|
||||
"lbc_testnet"
|
||||
]
|
||||
},
|
||||
"modified_on": {
|
||||
"description": "last modified time in Unix Time",
|
||||
"type": "integer"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name for account, possibly human readable",
|
||||
"type": "string"
|
||||
},
|
||||
"private_key": {
|
||||
"description": "Private key for address if `address_generator` is a single address. Root of chain of private keys for addresses if `address_generator` is a deterministic chain of addresses. Encrypted if `encrypted` is true.",
|
||||
"type": "string"
|
||||
},
|
||||
"public_key": {
|
||||
"description": "Public key for address if `address_generator` is a single address. Root of chain of public keys for addresses if `address_generator` is a deterministic chain of addresses.",
|
||||
"type": "string"
|
||||
},
|
||||
"seed": {
|
||||
"description": "Human readable representation of `private_key`. encrypted if `encrypted` is set to `true`",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"preferences": {
|
||||
"description": "Timestamped application-level preferences. Values can be objects or of a primitive type.",
|
||||
"$comment": "enable-sync is seen in example wallet. encrypt-on-disk is seen in example wallet. they both have a boolean `value` field. Do we want them explicitly defined here? local and shared seem to have at least a similar structure (type, value [yes, again], version), value being the free-form part. Should we define those here? Or can there be any key under preferences, and `value` be literally be anything in any form?",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"required": ["ts", "value"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"ts": {
|
||||
"type": "number",
|
||||
"description": "When the item was set, in Unix time format.",
|
||||
"$comment": "Do we want a string (decimal)?"
|
||||
},
|
||||
"value": {
|
||||
"$comment": "Sometimes this has been an object, sometimes just a boolean. I don't want to prescribe anything."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"$defs": {
|
||||
"address_manager": {
|
||||
"description": "Manager for deterministically generated addresses",
|
||||
"type": "object",
|
||||
"required": ["gap", "maximum_uses_per_address"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"gap": {
|
||||
"description": "Maximum allowed consecutive generated addresses with no transactions",
|
||||
"type": "integer"
|
||||
},
|
||||
"maximum_uses_per_address": {
|
||||
"description": "Maximum number of uses for each generated address",
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
31
lbry/stream/background_downloader.py
Normal file
31
lbry/stream/background_downloader.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
from lbry.stream.downloader import StreamDownloader
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BackgroundDownloader:
|
||||
def __init__(self, conf, storage, blob_manager, dht_node=None):
|
||||
self.storage = storage
|
||||
self.blob_manager = blob_manager
|
||||
self.node = dht_node
|
||||
self.conf = conf
|
||||
|
||||
async def download_blobs(self, sd_hash):
|
||||
downloader = StreamDownloader(asyncio.get_running_loop(), self.conf, self.blob_manager, sd_hash)
|
||||
try:
|
||||
await downloader.start(self.node, save_stream=False)
|
||||
for blob_info in downloader.descriptor.blobs[:-1]:
|
||||
await downloader.download_stream_blob(blob_info)
|
||||
except ValueError:
|
||||
return
|
||||
except asyncio.CancelledError:
|
||||
log.debug("Cancelled background downloader")
|
||||
raise
|
||||
except Exception:
|
||||
log.error("Unexpected download error on background downloader")
|
||||
finally:
|
||||
downloader.stop()
|
|
@ -4,6 +4,7 @@ import binascii
|
|||
import logging
|
||||
import typing
|
||||
import asyncio
|
||||
import time
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from cryptography.hazmat.primitives.ciphers.algorithms import AES
|
||||
|
@ -152,15 +153,19 @@ class StreamDescriptor:
|
|||
h.update(self.old_sort_json())
|
||||
return h.hexdigest()
|
||||
|
||||
async def make_sd_blob(self, blob_file_obj: typing.Optional[AbstractBlob] = None,
|
||||
old_sort: typing.Optional[bool] = False,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None):
|
||||
async def make_sd_blob(
|
||||
self, blob_file_obj: typing.Optional[AbstractBlob] = None, old_sort: typing.Optional[bool] = False,
|
||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
|
||||
added_on: float = None, is_mine: bool = False
|
||||
):
|
||||
sd_hash = self.calculate_sd_hash() if not old_sort else self.calculate_old_sort_sd_hash()
|
||||
if not old_sort:
|
||||
sd_data = self.as_json()
|
||||
else:
|
||||
sd_data = self.old_sort_json()
|
||||
sd_blob = blob_file_obj or BlobFile(self.loop, sd_hash, len(sd_data), blob_completed_callback, self.blob_dir)
|
||||
sd_blob = blob_file_obj or BlobFile(
|
||||
self.loop, sd_hash, len(sd_data), blob_completed_callback, self.blob_dir, added_on, is_mine
|
||||
)
|
||||
if blob_file_obj:
|
||||
blob_file_obj.set_length(len(sd_data))
|
||||
if not sd_blob.get_is_verified():
|
||||
|
@ -189,12 +194,13 @@ class StreamDescriptor:
|
|||
raise InvalidStreamDescriptorError("Stream terminator blob should not have a hash")
|
||||
if any(i != blob_info['blob_num'] for i, blob_info in enumerate(decoded['blobs'])):
|
||||
raise InvalidStreamDescriptorError("Stream contains out of order or skipped blobs")
|
||||
added_on = time.time()
|
||||
descriptor = cls(
|
||||
loop, blob_dir,
|
||||
binascii.unhexlify(decoded['stream_name']).decode(),
|
||||
decoded['key'],
|
||||
binascii.unhexlify(decoded['suggested_file_name']).decode(),
|
||||
[BlobInfo(info['blob_num'], info['length'], info['iv'], info.get('blob_hash'))
|
||||
[BlobInfo(info['blob_num'], info['length'], info['iv'], added_on, info.get('blob_hash'))
|
||||
for info in decoded['blobs']],
|
||||
decoded['stream_hash'],
|
||||
blob.blob_hash
|
||||
|
@ -252,20 +258,25 @@ class StreamDescriptor:
|
|||
iv_generator = iv_generator or random_iv_generator()
|
||||
key = key or os.urandom(AES.block_size // 8)
|
||||
blob_num = -1
|
||||
added_on = time.time()
|
||||
async for blob_bytes in file_reader(file_path):
|
||||
blob_num += 1
|
||||
blob_info = await BlobFile.create_from_unencrypted(
|
||||
loop, blob_dir, key, next(iv_generator), blob_bytes, blob_num, blob_completed_callback
|
||||
loop, blob_dir, key, next(iv_generator), blob_bytes, blob_num, added_on, True, blob_completed_callback
|
||||
)
|
||||
blobs.append(blob_info)
|
||||
blobs.append(
|
||||
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode())) # add the stream terminator
|
||||
# add the stream terminator
|
||||
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode(), added_on, None, True)
|
||||
)
|
||||
file_name = os.path.basename(file_path)
|
||||
suggested_file_name = sanitize_file_name(file_name)
|
||||
descriptor = cls(
|
||||
loop, blob_dir, file_name, binascii.hexlify(key).decode(), suggested_file_name, blobs
|
||||
)
|
||||
sd_blob = await descriptor.make_sd_blob(old_sort=old_sort, blob_completed_callback=blob_completed_callback)
|
||||
sd_blob = await descriptor.make_sd_blob(
|
||||
old_sort=old_sort, blob_completed_callback=blob_completed_callback, added_on=added_on, is_mine=True
|
||||
)
|
||||
descriptor.sd_hash = sd_blob.blob_hash
|
||||
return descriptor
|
||||
|
||||
|
|
|
@ -3,11 +3,13 @@ import typing
|
|||
import logging
|
||||
import binascii
|
||||
|
||||
from lbry.dht.peer import make_kademlia_peer
|
||||
from lbry.dht.node import get_kademlia_peers_from_hosts
|
||||
from lbry.error import DownloadSDTimeoutError
|
||||
from lbry.utils import resolve_host, lru_cache_concurrent
|
||||
from lbry.utils import lru_cache_concurrent
|
||||
from lbry.stream.descriptor import StreamDescriptor
|
||||
from lbry.blob_exchange.downloader import BlobDownloader
|
||||
from lbry.torrent.tracker import enqueue_tracker_search
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.dht.node import Node
|
||||
|
@ -25,8 +27,8 @@ class StreamDownloader:
|
|||
self.config = config
|
||||
self.blob_manager = blob_manager
|
||||
self.sd_hash = sd_hash
|
||||
self.search_queue = asyncio.Queue(loop=loop) # blob hashes to feed into the iterative finder
|
||||
self.peer_queue = asyncio.Queue(loop=loop) # new peers to try
|
||||
self.search_queue = asyncio.Queue() # blob hashes to feed into the iterative finder
|
||||
self.peer_queue = asyncio.Queue() # new peers to try
|
||||
self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue)
|
||||
self.descriptor: typing.Optional[StreamDescriptor] = descriptor
|
||||
self.node: typing.Optional['Node'] = None
|
||||
|
@ -48,26 +50,19 @@ class StreamDownloader:
|
|||
self.cached_read_blob = cached_read_blob
|
||||
|
||||
async def add_fixed_peers(self):
|
||||
def _delayed_add_fixed_peers():
|
||||
def _add_fixed_peers(fixed_peers):
|
||||
self.peer_queue.put_nowait(fixed_peers)
|
||||
self.added_fixed_peers = True
|
||||
self.peer_queue.put_nowait([
|
||||
make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
|
||||
for address, port in addresses
|
||||
])
|
||||
|
||||
if not self.config.fixed_peers:
|
||||
return
|
||||
addresses = [
|
||||
(await resolve_host(url, port, proto='tcp'), port)
|
||||
for url, port in self.config.fixed_peers
|
||||
]
|
||||
if 'dht' in self.config.components_to_skip or not self.node or not \
|
||||
len(self.node.protocol.routing_table.get_peers()) > 0:
|
||||
self.fixed_peers_delay = 0.0
|
||||
else:
|
||||
self.fixed_peers_delay = self.config.fixed_peer_delay
|
||||
|
||||
self.fixed_peers_handle = self.loop.call_later(self.fixed_peers_delay, _delayed_add_fixed_peers)
|
||||
fixed_peers = await get_kademlia_peers_from_hosts(self.config.fixed_peers)
|
||||
self.fixed_peers_handle = self.loop.call_later(self.fixed_peers_delay, _add_fixed_peers, fixed_peers)
|
||||
|
||||
async def load_descriptor(self, connection_id: int = 0):
|
||||
# download or get the sd blob
|
||||
|
@ -77,7 +72,7 @@ class StreamDownloader:
|
|||
now = self.loop.time()
|
||||
sd_blob = await asyncio.wait_for(
|
||||
self.blob_downloader.download_blob(self.sd_hash, connection_id),
|
||||
self.config.blob_download_timeout, loop=self.loop
|
||||
self.config.blob_download_timeout
|
||||
)
|
||||
log.info("downloaded sd blob %s", self.sd_hash)
|
||||
self.time_to_descriptor = self.loop.time() - now
|
||||
|
@ -90,7 +85,7 @@ class StreamDownloader:
|
|||
)
|
||||
log.info("loaded stream manifest %s", self.sd_hash)
|
||||
|
||||
async def start(self, node: typing.Optional['Node'] = None, connection_id: int = 0):
|
||||
async def start(self, node: typing.Optional['Node'] = None, connection_id: int = 0, save_stream=True):
|
||||
# set up peer accumulation
|
||||
self.node = node or self.node # fixme: this shouldnt be set here!
|
||||
if self.node:
|
||||
|
@ -98,6 +93,7 @@ class StreamDownloader:
|
|||
self.accumulate_task.cancel()
|
||||
_, self.accumulate_task = self.node.accumulate_peers(self.search_queue, self.peer_queue)
|
||||
await self.add_fixed_peers()
|
||||
enqueue_tracker_search(bytes.fromhex(self.sd_hash), self.peer_queue)
|
||||
# start searching for peers for the sd hash
|
||||
self.search_queue.put_nowait(self.sd_hash)
|
||||
log.info("searching for peers for stream %s", self.sd_hash)
|
||||
|
@ -105,11 +101,7 @@ class StreamDownloader:
|
|||
if not self.descriptor:
|
||||
await self.load_descriptor(connection_id)
|
||||
|
||||
# add the head blob to the peer search
|
||||
self.search_queue.put_nowait(self.descriptor.blobs[0].blob_hash)
|
||||
log.info("added head blob to peer search for stream %s", self.sd_hash)
|
||||
|
||||
if not await self.blob_manager.storage.stream_exists(self.sd_hash):
|
||||
if not await self.blob_manager.storage.stream_exists(self.sd_hash) and save_stream:
|
||||
await self.blob_manager.storage.store_stream(
|
||||
self.blob_manager.get_blob(self.sd_hash, length=self.descriptor.length), self.descriptor
|
||||
)
|
||||
|
@ -119,7 +111,7 @@ class StreamDownloader:
|
|||
raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}")
|
||||
blob = await asyncio.wait_for(
|
||||
self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id),
|
||||
self.config.blob_download_timeout * 10, loop=self.loop
|
||||
self.config.blob_download_timeout * 10
|
||||
)
|
||||
return blob
|
||||
|
||||
|
|
|
@ -16,10 +16,8 @@ from lbry.file.source import ManagedDownloadSource
|
|||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.schema.claim import Claim
|
||||
from lbry.blob.blob_manager import BlobManager
|
||||
from lbry.blob.blob_info import BlobInfo
|
||||
from lbry.dht.node import Node
|
||||
from lbry.extras.daemon.analytics import AnalyticsManager
|
||||
from lbry.wallet.transaction import Transaction
|
||||
|
||||
|
@ -62,9 +60,9 @@ class ManagedStream(ManagedDownloadSource):
|
|||
self.file_output_task: typing.Optional[asyncio.Task] = None
|
||||
self.delayed_stop_task: typing.Optional[asyncio.Task] = None
|
||||
self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = []
|
||||
self.fully_reflected = asyncio.Event(loop=self.loop)
|
||||
self.streaming = asyncio.Event(loop=self.loop)
|
||||
self._running = asyncio.Event(loop=self.loop)
|
||||
self.fully_reflected = asyncio.Event()
|
||||
self.streaming = asyncio.Event()
|
||||
self._running = asyncio.Event()
|
||||
|
||||
@property
|
||||
def sd_hash(self) -> str:
|
||||
|
@ -84,7 +82,19 @@ class ManagedStream(ManagedDownloadSource):
|
|||
|
||||
@property
|
||||
def file_name(self) -> Optional[str]:
|
||||
return self._file_name or (self.descriptor.suggested_file_name if self.descriptor else None)
|
||||
return self._file_name or self.suggested_file_name
|
||||
|
||||
@property
|
||||
def suggested_file_name(self) -> Optional[str]:
|
||||
first_option = ((self.descriptor and self.descriptor.suggested_file_name) or '').strip()
|
||||
return sanitize_file_name(first_option or (self.stream_claim_info and self.stream_claim_info.claim and
|
||||
self.stream_claim_info.claim.stream.source.name))
|
||||
|
||||
@property
|
||||
def stream_name(self) -> Optional[str]:
|
||||
first_option = ((self.descriptor and self.descriptor.stream_name) or '').strip()
|
||||
return first_option or (self.stream_claim_info and self.stream_claim_info.claim and
|
||||
self.stream_claim_info.claim.stream.source.name)
|
||||
|
||||
@property
|
||||
def written_bytes(self) -> int:
|
||||
|
@ -118,7 +128,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
|
||||
@property
|
||||
def mime_type(self):
|
||||
return guess_media_type(os.path.basename(self.descriptor.suggested_file_name))[0]
|
||||
return guess_media_type(os.path.basename(self.suggested_file_name))[0]
|
||||
|
||||
@property
|
||||
def download_path(self):
|
||||
|
@ -151,7 +161,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
log.info("start downloader for stream (sd hash: %s)", self.sd_hash)
|
||||
self._running.set()
|
||||
try:
|
||||
await asyncio.wait_for(self.downloader.start(), timeout, loop=self.loop)
|
||||
await asyncio.wait_for(self.downloader.start(), timeout)
|
||||
except asyncio.TimeoutError:
|
||||
self._running.clear()
|
||||
raise DownloadSDTimeoutError(self.sd_hash)
|
||||
|
@ -164,7 +174,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
if not self._file_name:
|
||||
self._file_name = await get_next_available_file_name(
|
||||
self.loop, self.download_directory,
|
||||
self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
|
||||
self._file_name or sanitize_file_name(self.suggested_file_name)
|
||||
)
|
||||
file_name, download_dir = self._file_name, self.download_directory
|
||||
else:
|
||||
|
@ -181,7 +191,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
Stop any running save/stream tasks as well as the downloader and update the status in the database
|
||||
"""
|
||||
|
||||
self.stop_tasks()
|
||||
await self.stop_tasks()
|
||||
if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING:
|
||||
await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED)
|
||||
|
||||
|
@ -269,7 +279,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id,
|
||||
self.sd_hash[:6], self.full_path)
|
||||
await self.blob_manager.storage.set_saved_file(self.stream_hash)
|
||||
except Exception as err:
|
||||
except (Exception, asyncio.CancelledError) as err:
|
||||
if os.path.isfile(output_path):
|
||||
log.warning("removing incomplete download %s for %s", output_path, self.sd_hash)
|
||||
os.remove(output_path)
|
||||
|
@ -296,14 +306,14 @@ class ManagedStream(ManagedDownloadSource):
|
|||
self.download_directory = download_directory or self.download_directory or self.config.download_dir
|
||||
if not self.download_directory:
|
||||
raise ValueError("no directory to download to")
|
||||
if not (file_name or self._file_name or self.descriptor.suggested_file_name):
|
||||
if not (file_name or self._file_name or self.suggested_file_name):
|
||||
raise ValueError("no file name to download to")
|
||||
if not os.path.isdir(self.download_directory):
|
||||
log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory)
|
||||
os.mkdir(self.download_directory)
|
||||
self._file_name = await get_next_available_file_name(
|
||||
self.loop, self.download_directory,
|
||||
file_name or self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
|
||||
file_name or self._file_name or sanitize_file_name(self.suggested_file_name)
|
||||
)
|
||||
await self.blob_manager.storage.change_file_download_dir_and_file_name(
|
||||
self.stream_hash, self.download_directory, self.file_name
|
||||
|
@ -311,15 +321,16 @@ class ManagedStream(ManagedDownloadSource):
|
|||
await self.update_status(ManagedStream.STATUS_RUNNING)
|
||||
self.file_output_task = self.loop.create_task(self._save_file(self.full_path))
|
||||
try:
|
||||
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout, loop=self.loop)
|
||||
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout)
|
||||
except asyncio.TimeoutError:
|
||||
log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id)
|
||||
self.stop_tasks()
|
||||
await self.stop_tasks()
|
||||
await self.update_status(ManagedStream.STATUS_STOPPED)
|
||||
|
||||
def stop_tasks(self):
|
||||
async def stop_tasks(self):
|
||||
if self.file_output_task and not self.file_output_task.done():
|
||||
self.file_output_task.cancel()
|
||||
await asyncio.gather(self.file_output_task, return_exceptions=True)
|
||||
self.file_output_task = None
|
||||
while self.streaming_responses:
|
||||
req, response = self.streaming_responses.pop()
|
||||
|
@ -356,7 +367,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
return sent
|
||||
except ConnectionError:
|
||||
return sent
|
||||
except (OSError, Exception) as err:
|
||||
except (OSError, Exception, asyncio.CancelledError) as err:
|
||||
if isinstance(err, asyncio.CancelledError):
|
||||
log.warning("stopped uploading %s#%s to reflector", self.claim_name, self.claim_id)
|
||||
elif isinstance(err, OSError):
|
||||
|
@ -391,7 +402,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
self.sd_hash[:6])
|
||||
await self.stop()
|
||||
return
|
||||
await asyncio.sleep(1, loop=self.loop)
|
||||
await asyncio.sleep(1)
|
||||
|
||||
def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]:
|
||||
if '=' in get_range:
|
||||
|
|
|
@ -21,7 +21,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
|||
self.loop = asyncio.get_event_loop()
|
||||
self.blob_manager = blob_manager
|
||||
self.server_task: asyncio.Task = None
|
||||
self.started_listening = asyncio.Event(loop=self.loop)
|
||||
self.started_listening = asyncio.Event()
|
||||
self.buf = b''
|
||||
self.transport: asyncio.StreamWriter = None
|
||||
self.writer: typing.Optional['HashBlobWriter'] = None
|
||||
|
@ -29,9 +29,9 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
|||
self.descriptor: typing.Optional['StreamDescriptor'] = None
|
||||
self.sd_blob: typing.Optional['BlobFile'] = None
|
||||
self.received = []
|
||||
self.incoming = incoming_event or asyncio.Event(loop=self.loop)
|
||||
self.not_incoming = not_incoming_event or asyncio.Event(loop=self.loop)
|
||||
self.stop_event = stop_event or asyncio.Event(loop=self.loop)
|
||||
self.incoming = incoming_event or asyncio.Event()
|
||||
self.not_incoming = not_incoming_event or asyncio.Event()
|
||||
self.stop_event = stop_event or asyncio.Event()
|
||||
self.chunk_size = response_chunk_size
|
||||
self.wait_for_stop_task: typing.Optional[asyncio.Task] = None
|
||||
self.partial_event = partial_event
|
||||
|
@ -94,7 +94,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
|||
self.incoming.set()
|
||||
self.send_response({"send_sd_blob": True})
|
||||
try:
|
||||
await asyncio.wait_for(self.sd_blob.verified.wait(), 30, loop=self.loop)
|
||||
await asyncio.wait_for(self.sd_blob.verified.wait(), 30)
|
||||
self.descriptor = await StreamDescriptor.from_stream_descriptor_blob(
|
||||
self.loop, self.blob_manager.blob_dir, self.sd_blob
|
||||
)
|
||||
|
@ -140,7 +140,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
|||
self.incoming.set()
|
||||
self.send_response({"send_blob": True})
|
||||
try:
|
||||
await asyncio.wait_for(blob.verified.wait(), 30, loop=self.loop)
|
||||
await asyncio.wait_for(blob.verified.wait(), 30)
|
||||
self.send_response({"received_blob": True})
|
||||
except asyncio.TimeoutError:
|
||||
self.send_response({"received_blob": False})
|
||||
|
@ -162,10 +162,10 @@ class ReflectorServer:
|
|||
self.loop = asyncio.get_event_loop()
|
||||
self.blob_manager = blob_manager
|
||||
self.server_task: typing.Optional[asyncio.Task] = None
|
||||
self.started_listening = asyncio.Event(loop=self.loop)
|
||||
self.stopped_listening = asyncio.Event(loop=self.loop)
|
||||
self.incoming_event = incoming_event or asyncio.Event(loop=self.loop)
|
||||
self.not_incoming_event = not_incoming_event or asyncio.Event(loop=self.loop)
|
||||
self.started_listening = asyncio.Event()
|
||||
self.stopped_listening = asyncio.Event()
|
||||
self.incoming_event = incoming_event or asyncio.Event()
|
||||
self.not_incoming_event = not_incoming_event or asyncio.Event()
|
||||
self.response_chunk_size = response_chunk_size
|
||||
self.stop_event = stop_event
|
||||
self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants
|
||||
|
|
|
@ -54,7 +54,7 @@ class StreamManager(SourceManager):
|
|||
self.re_reflect_task: Optional[asyncio.Task] = None
|
||||
self.update_stream_finished_futs: typing.List[asyncio.Future] = []
|
||||
self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {}
|
||||
self.started = asyncio.Event(loop=self.loop)
|
||||
self.started = asyncio.Event()
|
||||
|
||||
@property
|
||||
def streams(self):
|
||||
|
@ -70,6 +70,7 @@ class StreamManager(SourceManager):
|
|||
|
||||
async def recover_streams(self, file_infos: typing.List[typing.Dict]):
|
||||
to_restore = []
|
||||
to_check = []
|
||||
|
||||
async def recover_stream(sd_hash: str, stream_hash: str, stream_name: str,
|
||||
suggested_file_name: str, key: str,
|
||||
|
@ -82,6 +83,7 @@ class StreamManager(SourceManager):
|
|||
if not descriptor:
|
||||
return
|
||||
to_restore.append((descriptor, sd_blob, content_fee))
|
||||
to_check.extend([sd_blob.blob_hash] + [blob.blob_hash for blob in descriptor.blobs[:-1]])
|
||||
|
||||
await asyncio.gather(*[
|
||||
recover_stream(
|
||||
|
@ -93,6 +95,8 @@ class StreamManager(SourceManager):
|
|||
|
||||
if to_restore:
|
||||
await self.storage.recover_streams(to_restore, self.config.download_dir)
|
||||
if to_check:
|
||||
await self.blob_manager.ensure_completed_blobs_status(to_check)
|
||||
|
||||
# if self.blob_manager._save_blobs:
|
||||
# log.info("Recovered %i/%i attempted streams", len(to_restore), len(file_infos))
|
||||
|
@ -146,7 +150,7 @@ class StreamManager(SourceManager):
|
|||
file_info['added_on'], file_info['fully_reflected']
|
||||
)))
|
||||
if add_stream_tasks:
|
||||
await asyncio.gather(*add_stream_tasks, loop=self.loop)
|
||||
await asyncio.gather(*add_stream_tasks)
|
||||
log.info("Started stream manager with %i files", len(self._sources))
|
||||
if not self.node:
|
||||
log.info("no DHT node given, resuming downloads trusting that we can contact reflector")
|
||||
|
@ -155,14 +159,11 @@ class StreamManager(SourceManager):
|
|||
self.resume_saving_task = asyncio.ensure_future(asyncio.gather(
|
||||
*(self._sources[sd_hash].save_file(file_name, download_directory)
|
||||
for (file_name, download_directory, sd_hash) in to_resume_saving),
|
||||
loop=self.loop
|
||||
))
|
||||
|
||||
async def reflect_streams(self):
|
||||
try:
|
||||
return await self._reflect_streams()
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
log.exception("reflector task encountered an unexpected error!")
|
||||
|
||||
|
@ -182,21 +183,21 @@ class StreamManager(SourceManager):
|
|||
batch.append(self.reflect_stream(stream))
|
||||
if len(batch) >= self.config.concurrent_reflector_uploads:
|
||||
log.debug("waiting for batch of %s reflecting streams", len(batch))
|
||||
await asyncio.gather(*batch, loop=self.loop)
|
||||
await asyncio.gather(*batch)
|
||||
log.debug("done processing %s streams", len(batch))
|
||||
batch = []
|
||||
if batch:
|
||||
log.debug("waiting for batch of %s reflecting streams", len(batch))
|
||||
await asyncio.gather(*batch, loop=self.loop)
|
||||
await asyncio.gather(*batch)
|
||||
log.debug("done processing %s streams", len(batch))
|
||||
await asyncio.sleep(300, loop=self.loop)
|
||||
await asyncio.sleep(300)
|
||||
|
||||
async def start(self):
|
||||
await super().start()
|
||||
self.re_reflect_task = self.loop.create_task(self.reflect_streams())
|
||||
|
||||
def stop(self):
|
||||
super().stop()
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
if self.resume_saving_task and not self.resume_saving_task.done():
|
||||
self.resume_saving_task.cancel()
|
||||
if self.re_reflect_task and not self.re_reflect_task.done():
|
||||
|
@ -223,11 +224,13 @@ class StreamManager(SourceManager):
|
|||
)
|
||||
return task
|
||||
|
||||
async def _retriable_reflect_stream(self, stream, host, port):
|
||||
@staticmethod
|
||||
async def _retriable_reflect_stream(stream, host, port):
|
||||
sent = await stream.upload_to_reflector(host, port)
|
||||
while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0:
|
||||
stream.reflector_progress = 0
|
||||
sent = await stream.upload_to_reflector(host, port)
|
||||
return sent
|
||||
|
||||
async def create(self, file_path: str, key: Optional[bytes] = None,
|
||||
iv_generator: Optional[typing.Generator[bytes, None, None]] = None) -> ManagedStream:
|
||||
|
@ -236,7 +239,7 @@ class StreamManager(SourceManager):
|
|||
blob_completed_callback=self.blob_manager.blob_completed
|
||||
)
|
||||
await self.storage.store_stream(
|
||||
self.blob_manager.get_blob(descriptor.sd_hash), descriptor
|
||||
self.blob_manager.get_blob(descriptor.sd_hash, is_mine=True), descriptor
|
||||
)
|
||||
row_id = await self.storage.save_published_file(
|
||||
descriptor.stream_hash, os.path.basename(file_path), os.path.dirname(file_path), 0
|
||||
|
@ -257,7 +260,7 @@ class StreamManager(SourceManager):
|
|||
return
|
||||
if source.identifier in self.running_reflector_uploads:
|
||||
self.running_reflector_uploads[source.identifier].cancel()
|
||||
source.stop_tasks()
|
||||
await source.stop_tasks()
|
||||
if source.identifier in self.streams:
|
||||
del self.streams[source.identifier]
|
||||
blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]]
|
||||
|
|
136
lbry/testcase.py
136
lbry/testcase.py
|
@ -17,8 +17,10 @@ from functools import partial
|
|||
from lbry.wallet import WalletManager, Wallet, Ledger, Account, Transaction
|
||||
from lbry.conf import Config
|
||||
from lbry.wallet.util import satoshis_to_coins
|
||||
from lbry.wallet.dewies import lbc_to_dewies
|
||||
from lbry.wallet.orchstr8 import Conductor
|
||||
from lbry.wallet.orchstr8.node import BlockchainNode, WalletNode, HubNode
|
||||
from lbry.wallet.orchstr8.node import LBCWalletNode, WalletNode
|
||||
from lbry.schema.claim import Claim
|
||||
|
||||
from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty
|
||||
from lbry.extras.daemon.components import Component, WalletComponent
|
||||
|
@ -132,17 +134,18 @@ class AsyncioTestCase(unittest.TestCase):
|
|||
|
||||
with outcome.testPartExecutor(self):
|
||||
self.setUp()
|
||||
self.add_timeout()
|
||||
self.loop.run_until_complete(self.asyncSetUp())
|
||||
if outcome.success:
|
||||
outcome.expecting_failure = expecting_failure
|
||||
with outcome.testPartExecutor(self, isTest=True):
|
||||
maybe_coroutine = testMethod()
|
||||
if asyncio.iscoroutine(maybe_coroutine):
|
||||
if self.TIMEOUT:
|
||||
self.loop.call_later(self.TIMEOUT, self.cancel)
|
||||
self.add_timeout()
|
||||
self.loop.run_until_complete(maybe_coroutine)
|
||||
outcome.expecting_failure = False
|
||||
with outcome.testPartExecutor(self):
|
||||
self.add_timeout()
|
||||
self.loop.run_until_complete(self.asyncTearDown())
|
||||
self.tearDown()
|
||||
|
||||
|
@ -190,6 +193,7 @@ class AsyncioTestCase(unittest.TestCase):
|
|||
with outcome.testPartExecutor(self):
|
||||
maybe_coroutine = function(*args, **kwargs)
|
||||
if asyncio.iscoroutine(maybe_coroutine):
|
||||
self.add_timeout()
|
||||
self.loop.run_until_complete(maybe_coroutine)
|
||||
|
||||
def cancel(self):
|
||||
|
@ -198,6 +202,16 @@ class AsyncioTestCase(unittest.TestCase):
|
|||
task.print_stack()
|
||||
task.cancel()
|
||||
|
||||
def add_timeout(self):
|
||||
if self.TIMEOUT:
|
||||
self.loop.call_later(self.TIMEOUT, self.check_timeout, time())
|
||||
|
||||
def check_timeout(self, started):
|
||||
if time() - started >= self.TIMEOUT:
|
||||
self.cancel()
|
||||
else:
|
||||
self.loop.call_later(self.TIMEOUT, self.check_timeout, started)
|
||||
|
||||
|
||||
class AdvanceTimeTestCase(AsyncioTestCase):
|
||||
|
||||
|
@ -222,8 +236,7 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.conductor: Optional[Conductor] = None
|
||||
self.blockchain: Optional[BlockchainNode] = None
|
||||
self.hub: Optional[HubNode] = None
|
||||
self.blockchain: Optional[LBCWalletNode] = None
|
||||
self.wallet_node: Optional[WalletNode] = None
|
||||
self.manager: Optional[WalletManager] = None
|
||||
self.ledger: Optional[Ledger] = None
|
||||
|
@ -232,16 +245,15 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
|
||||
async def asyncSetUp(self):
|
||||
self.conductor = Conductor(seed=self.SEED)
|
||||
await self.conductor.start_blockchain()
|
||||
self.addCleanup(self.conductor.stop_blockchain)
|
||||
await self.conductor.start_lbcd()
|
||||
self.addCleanup(self.conductor.stop_lbcd)
|
||||
await self.conductor.start_lbcwallet()
|
||||
self.addCleanup(self.conductor.stop_lbcwallet)
|
||||
await self.conductor.start_spv()
|
||||
self.addCleanup(self.conductor.stop_spv)
|
||||
await self.conductor.start_wallet()
|
||||
self.addCleanup(self.conductor.stop_wallet)
|
||||
await self.conductor.start_hub()
|
||||
self.addCleanup(self.conductor.stop_hub)
|
||||
self.blockchain = self.conductor.blockchain_node
|
||||
self.hub = self.conductor.hub_node
|
||||
self.blockchain = self.conductor.lbcwallet_node
|
||||
self.wallet_node = self.conductor.wallet_node
|
||||
self.manager = self.wallet_node.manager
|
||||
self.ledger = self.wallet_node.ledger
|
||||
|
@ -255,6 +267,13 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
def broadcast(self, tx):
|
||||
return self.ledger.broadcast(tx)
|
||||
|
||||
async def broadcast_and_confirm(self, tx, ledger=None):
|
||||
ledger = ledger or self.ledger
|
||||
notifications = asyncio.create_task(ledger.wait(tx))
|
||||
await ledger.broadcast(tx)
|
||||
await notifications
|
||||
await self.generate_and_wait(1, [tx.id], ledger)
|
||||
|
||||
async def on_header(self, height):
|
||||
if self.ledger.headers.height < height:
|
||||
await self.ledger.on_header.where(
|
||||
|
@ -262,11 +281,29 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
)
|
||||
return True
|
||||
|
||||
def on_transaction_id(self, txid, ledger=None):
|
||||
return (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: e.tx.id == txid
|
||||
async def send_to_address_and_wait(self, address, amount, blocks_to_generate=0, ledger=None):
|
||||
tx_watch = []
|
||||
txid = None
|
||||
done = False
|
||||
watcher = (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: e.tx.id == txid or done or tx_watch.append(e.tx.id)
|
||||
)
|
||||
|
||||
txid = await self.blockchain.send_to_address(address, amount)
|
||||
done = txid in tx_watch
|
||||
await watcher
|
||||
|
||||
await self.generate_and_wait(blocks_to_generate, [txid], ledger)
|
||||
return txid
|
||||
|
||||
async def generate_and_wait(self, blocks_to_generate, txids, ledger=None):
|
||||
if blocks_to_generate > 0:
|
||||
watcher = (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda
|
||||
)
|
||||
await self.generate(blocks_to_generate)
|
||||
await watcher
|
||||
|
||||
def on_address_update(self, address):
|
||||
return self.ledger.on_transaction.where(
|
||||
lambda e: e.address == address
|
||||
|
@ -277,6 +314,22 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
lambda e: e.tx.id == tx.id and e.address == address
|
||||
)
|
||||
|
||||
async def generate(self, blocks):
|
||||
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
|
||||
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
await self.blockchain.generate(blocks)
|
||||
height = self.blockchain.block_expected
|
||||
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||
while True:
|
||||
await self.conductor.spv_node.server.synchronized.wait()
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
if self.conductor.spv_node.server.db.db_height < height:
|
||||
continue
|
||||
if self.conductor.spv_node.server._es_height < height:
|
||||
continue
|
||||
break
|
||||
|
||||
|
||||
class FakeExchangeRateManager(ExchangeRateManager):
|
||||
|
||||
|
@ -337,20 +390,19 @@ class CommandTestCase(IntegrationTestCase):
|
|||
self.skip_libtorrent = True
|
||||
|
||||
async def asyncSetUp(self):
|
||||
await super().asyncSetUp()
|
||||
|
||||
logging.getLogger('lbry.blob_exchange').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.daemon').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.stream').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.wallet').setLevel(self.VERBOSITY)
|
||||
|
||||
await super().asyncSetUp()
|
||||
|
||||
self.daemon = await self.add_daemon(self.wallet_node)
|
||||
|
||||
await self.account.ensure_address_gap()
|
||||
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
|
||||
sendtxid = await self.blockchain.send_to_address(address, 10)
|
||||
await self.confirm_tx(sendtxid)
|
||||
await self.generate(5)
|
||||
await self.send_to_address_and_wait(address, 10, 6)
|
||||
|
||||
server_tmp_dir = tempfile.mkdtemp()
|
||||
self.addCleanup(shutil.rmtree, server_tmp_dir)
|
||||
|
@ -447,9 +499,14 @@ class CommandTestCase(IntegrationTestCase):
|
|||
|
||||
async def confirm_tx(self, txid, ledger=None):
|
||||
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
|
||||
await self.on_transaction_id(txid, ledger)
|
||||
await self.generate(1)
|
||||
await self.on_transaction_id(txid, ledger)
|
||||
# await (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
|
||||
on_tx = (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
|
||||
await asyncio.wait([self.generate(1), on_tx], timeout=5)
|
||||
|
||||
# # actually, if it's in the mempool or in the block we're fine
|
||||
# await self.generate_and_wait(1, [txid], ledger=ledger)
|
||||
# return txid
|
||||
|
||||
return txid
|
||||
|
||||
async def on_transaction_dict(self, tx):
|
||||
|
@ -464,12 +521,6 @@ class CommandTestCase(IntegrationTestCase):
|
|||
addresses.add(txo['address'])
|
||||
return list(addresses)
|
||||
|
||||
async def generate(self, blocks):
|
||||
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
|
||||
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||
await self.blockchain.generate(blocks)
|
||||
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||
|
||||
async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True):
|
||||
txid = await self.blockchain._cli_cmnd('claimname', name, value, amount)
|
||||
if confirm:
|
||||
|
@ -490,12 +541,27 @@ class CommandTestCase(IntegrationTestCase):
|
|||
""" Synchronous version of `out` method. """
|
||||
return json.loads(jsonrpc_dumps_pretty(value, ledger=self.ledger))['result']
|
||||
|
||||
async def confirm_and_render(self, awaitable, confirm) -> Transaction:
|
||||
async def confirm_and_render(self, awaitable, confirm, return_tx=False) -> Transaction:
|
||||
tx = await awaitable
|
||||
if confirm:
|
||||
await self.ledger.wait(tx)
|
||||
await self.generate(1)
|
||||
await self.ledger.wait(tx, self.blockchain.block_expected)
|
||||
if not return_tx:
|
||||
return self.sout(tx)
|
||||
return tx
|
||||
|
||||
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None, blocking=False):
|
||||
account = (daemon or self.daemon).wallet_manager.default_account
|
||||
claim_address = await account.receiving.get_or_create_usable_address()
|
||||
claim = Claim()
|
||||
claim.channel.public_key_bytes = pubkey_bytes
|
||||
tx = await Transaction.claim_create(
|
||||
name, claim, lbc_to_dewies(price),
|
||||
claim_address, [self.account], self.account
|
||||
)
|
||||
await tx.sign([self.account])
|
||||
await (daemon or self.daemon).broadcast_or_release(tx, blocking)
|
||||
return self.sout(tx)
|
||||
|
||||
def create_upload_file(self, data, prefix=None, suffix=None):
|
||||
|
@ -507,19 +573,19 @@ class CommandTestCase(IntegrationTestCase):
|
|||
|
||||
async def stream_create(
|
||||
self, name='hovercraft', bid='1.0', file_path=None,
|
||||
data=b'hi!', confirm=True, prefix=None, suffix=None, **kwargs):
|
||||
data=b'hi!', confirm=True, prefix=None, suffix=None, return_tx=False, **kwargs):
|
||||
if file_path is None and data is not None:
|
||||
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
|
||||
return await self.confirm_and_render(
|
||||
self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm
|
||||
self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm, return_tx
|
||||
)
|
||||
|
||||
async def stream_update(
|
||||
self, claim_id, data=None, prefix=None, suffix=None, confirm=True, **kwargs):
|
||||
self, claim_id, data=None, prefix=None, suffix=None, confirm=True, return_tx=False, **kwargs):
|
||||
if data is not None:
|
||||
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
|
||||
return await self.confirm_and_render(
|
||||
self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm
|
||||
self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm, return_tx
|
||||
)
|
||||
return await self.confirm_and_render(
|
||||
self.daemon.jsonrpc_stream_update(claim_id, **kwargs), confirm
|
||||
|
@ -625,6 +691,9 @@ class CommandTestCase(IntegrationTestCase):
|
|||
async def claim_search(self, **kwargs):
|
||||
return (await self.out(self.daemon.jsonrpc_claim_search(**kwargs)))['items']
|
||||
|
||||
async def get_claim_by_claim_id(self, claim_id):
|
||||
return await self.out(self.ledger.get_claim_by_claim_id(claim_id))
|
||||
|
||||
async def file_list(self, *args, **kwargs):
|
||||
return (await self.out(self.daemon.jsonrpc_file_list(*args, **kwargs)))['items']
|
||||
|
||||
|
@ -649,6 +718,9 @@ class CommandTestCase(IntegrationTestCase):
|
|||
async def transaction_list(self, *args, **kwargs):
|
||||
return (await self.out(self.daemon.jsonrpc_transaction_list(*args, **kwargs)))['items']
|
||||
|
||||
async def blob_list(self, *args, **kwargs):
|
||||
return (await self.out(self.daemon.jsonrpc_blob_list(*args, **kwargs)))['items']
|
||||
|
||||
@staticmethod
|
||||
def get_claim_id(tx):
|
||||
return tx['outputs'][0]['claim_id']
|
||||
|
|
|
@ -10,47 +10,13 @@ from typing import Optional
|
|||
import libtorrent
|
||||
|
||||
|
||||
NOTIFICATION_MASKS = [
|
||||
"error",
|
||||
"peer",
|
||||
"port_mapping",
|
||||
"storage",
|
||||
"tracker",
|
||||
"debug",
|
||||
"status",
|
||||
"progress",
|
||||
"ip_block",
|
||||
"dht",
|
||||
"stats",
|
||||
"session_log",
|
||||
"torrent_log",
|
||||
"peer_log",
|
||||
"incoming_request",
|
||||
"dht_log",
|
||||
"dht_operation",
|
||||
"port_mapping_log",
|
||||
"picker_log",
|
||||
"file_progress",
|
||||
"piece_progress",
|
||||
"upload",
|
||||
"block_progress"
|
||||
]
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted?
|
||||
libtorrent.add_torrent_params_flags_t.flag_auto_managed
|
||||
| libtorrent.add_torrent_params_flags_t.flag_update_subscribe
|
||||
)
|
||||
|
||||
|
||||
def get_notification_type(notification) -> str:
|
||||
for i, notification_type in enumerate(NOTIFICATION_MASKS):
|
||||
if (1 << i) & notification:
|
||||
return notification_type
|
||||
raise ValueError("unrecognized notification type")
|
||||
|
||||
|
||||
class TorrentHandle:
|
||||
def __init__(self, loop, executor, handle):
|
||||
self._loop = loop
|
||||
|
@ -121,7 +87,7 @@ class TorrentHandle:
|
|||
self._show_status()
|
||||
if self.finished.is_set():
|
||||
break
|
||||
await asyncio.sleep(0.1, loop=self._loop)
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
async def pause(self):
|
||||
await self._loop.run_in_executor(
|
||||
|
@ -156,10 +122,8 @@ class TorrentSession:
|
|||
async def bind(self, interface: str = '0.0.0.0', port: int = 10889):
|
||||
settings = {
|
||||
'listen_interfaces': f"{interface}:{port}",
|
||||
'enable_outgoing_utp': True,
|
||||
'enable_incoming_utp': True,
|
||||
'enable_outgoing_tcp': False,
|
||||
'enable_incoming_tcp': False
|
||||
'enable_natpmp': False,
|
||||
'enable_upnp': False
|
||||
}
|
||||
self._session = await self._loop.run_in_executor(
|
||||
self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member
|
||||
|
@ -186,7 +150,7 @@ class TorrentSession:
|
|||
await self._loop.run_in_executor(
|
||||
self._executor, self._pop_alerts
|
||||
)
|
||||
await asyncio.sleep(1, loop=self._loop)
|
||||
await asyncio.sleep(1)
|
||||
|
||||
async def pause(self):
|
||||
await self._loop.run_in_executor(
|
||||
|
|
|
@ -36,7 +36,7 @@ class Torrent:
|
|||
def __init__(self, loop, handle):
|
||||
self._loop = loop
|
||||
self._handle = handle
|
||||
self.finished = asyncio.Event(loop=loop)
|
||||
self.finished = asyncio.Event()
|
||||
|
||||
def _threaded_update_status(self):
|
||||
status = self._handle.status()
|
||||
|
@ -58,7 +58,7 @@ class Torrent:
|
|||
log.info("finished downloading torrent!")
|
||||
await self.pause()
|
||||
break
|
||||
await asyncio.sleep(1, loop=self._loop)
|
||||
await asyncio.sleep(1)
|
||||
|
||||
async def pause(self):
|
||||
log.info("pause torrent")
|
||||
|
|
|
@ -74,7 +74,7 @@ class TorrentSource(ManagedDownloadSource):
|
|||
def bt_infohash(self):
|
||||
return self.identifier
|
||||
|
||||
def stop_tasks(self):
|
||||
async def stop_tasks(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
|
@ -118,8 +118,8 @@ class TorrentManager(SourceManager):
|
|||
async def start(self):
|
||||
await super().start()
|
||||
|
||||
def stop(self):
|
||||
super().stop()
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
log.info("finished stopping the torrent manager")
|
||||
|
||||
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
|
||||
|
|
285
lbry/torrent/tracker.py
Normal file
285
lbry/torrent/tracker.py
Normal file
|
@ -0,0 +1,285 @@
|
|||
import random
|
||||
import socket
|
||||
import string
|
||||
import struct
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
import ipaddress
|
||||
from collections import namedtuple
|
||||
from functools import reduce
|
||||
from typing import Optional
|
||||
|
||||
from lbry.dht.node import get_kademlia_peers_from_hosts
|
||||
from lbry.utils import resolve_host, async_timed_cache, cache_concurrent
|
||||
from lbry.wallet.stream import StreamController
|
||||
from lbry import version
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
CONNECTION_EXPIRES_AFTER_SECONDS = 50
|
||||
PREFIX = 'LB' # todo: PR BEP20 to add ourselves
|
||||
DEFAULT_TIMEOUT_SECONDS = 10.0
|
||||
DEFAULT_CONCURRENCY_LIMIT = 100
|
||||
# see: http://bittorrent.org/beps/bep_0015.html and http://xbtt.sourceforge.net/udp_tracker_protocol.html
|
||||
ConnectRequest = namedtuple("ConnectRequest", ["connection_id", "action", "transaction_id"])
|
||||
ConnectResponse = namedtuple("ConnectResponse", ["action", "transaction_id", "connection_id"])
|
||||
AnnounceRequest = namedtuple("AnnounceRequest",
|
||||
["connection_id", "action", "transaction_id", "info_hash", "peer_id", "downloaded", "left",
|
||||
"uploaded", "event", "ip_addr", "key", "num_want", "port"])
|
||||
AnnounceResponse = namedtuple("AnnounceResponse",
|
||||
["action", "transaction_id", "interval", "leechers", "seeders", "peers"])
|
||||
CompactIPv4Peer = namedtuple("CompactPeer", ["address", "port"])
|
||||
ScrapeRequest = namedtuple("ScrapeRequest", ["connection_id", "action", "transaction_id", "infohashes"])
|
||||
ScrapeResponse = namedtuple("ScrapeResponse", ["action", "transaction_id", "items"])
|
||||
ScrapeResponseItem = namedtuple("ScrapeResponseItem", ["seeders", "completed", "leechers"])
|
||||
ErrorResponse = namedtuple("ErrorResponse", ["action", "transaction_id", "message"])
|
||||
structs = {
|
||||
ConnectRequest: struct.Struct(">QII"),
|
||||
ConnectResponse: struct.Struct(">IIQ"),
|
||||
AnnounceRequest: struct.Struct(">QII20s20sQQQIIIiH"),
|
||||
AnnounceResponse: struct.Struct(">IIIII"),
|
||||
CompactIPv4Peer: struct.Struct(">IH"),
|
||||
ScrapeRequest: struct.Struct(">QII"),
|
||||
ScrapeResponse: struct.Struct(">II"),
|
||||
ScrapeResponseItem: struct.Struct(">III"),
|
||||
ErrorResponse: struct.Struct(">II")
|
||||
}
|
||||
|
||||
|
||||
def decode(cls, data, offset=0):
|
||||
decoder = structs[cls]
|
||||
if cls is AnnounceResponse:
|
||||
return AnnounceResponse(*decoder.unpack_from(data, offset),
|
||||
peers=[decode(CompactIPv4Peer, data, index) for index in range(20, len(data), 6)])
|
||||
elif cls is ScrapeResponse:
|
||||
return ScrapeResponse(*decoder.unpack_from(data, offset),
|
||||
items=[decode(ScrapeResponseItem, data, index) for index in range(8, len(data), 12)])
|
||||
elif cls is ErrorResponse:
|
||||
return ErrorResponse(*decoder.unpack_from(data, offset), data[decoder.size:])
|
||||
return cls(*decoder.unpack_from(data, offset))
|
||||
|
||||
|
||||
def encode(obj):
|
||||
if isinstance(obj, ScrapeRequest):
|
||||
return structs[ScrapeRequest].pack(*obj[:-1]) + b''.join(obj.infohashes)
|
||||
elif isinstance(obj, ErrorResponse):
|
||||
return structs[ErrorResponse].pack(*obj[:-1]) + obj.message
|
||||
elif isinstance(obj, AnnounceResponse):
|
||||
return structs[AnnounceResponse].pack(*obj[:-1]) + b''.join([encode(peer) for peer in obj.peers])
|
||||
return structs[type(obj)].pack(*obj)
|
||||
|
||||
|
||||
def make_peer_id(random_part: Optional[str] = None) -> bytes:
|
||||
# see https://wiki.theory.org/BitTorrentSpecification#peer_id and https://www.bittorrent.org/beps/bep_0020.html
|
||||
# not to confuse with node id; peer id identifies uniquely the software, version and instance
|
||||
random_part = random_part or ''.join(random.choice(string.ascii_letters) for _ in range(20))
|
||||
return f"{PREFIX}-{'-'.join(map(str, version))}-{random_part}"[:20].encode()
|
||||
|
||||
|
||||
class UDPTrackerClientProtocol(asyncio.DatagramProtocol):
|
||||
def __init__(self, timeout: float = DEFAULT_TIMEOUT_SECONDS):
|
||||
self.transport = None
|
||||
self.data_queue = {}
|
||||
self.timeout = timeout
|
||||
self.semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY_LIMIT)
|
||||
|
||||
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
|
||||
self.transport = transport
|
||||
|
||||
async def request(self, obj, tracker_ip, tracker_port):
|
||||
self.data_queue[obj.transaction_id] = asyncio.get_running_loop().create_future()
|
||||
try:
|
||||
async with self.semaphore:
|
||||
self.transport.sendto(encode(obj), (tracker_ip, tracker_port))
|
||||
return await asyncio.wait_for(self.data_queue[obj.transaction_id], self.timeout)
|
||||
finally:
|
||||
self.data_queue.pop(obj.transaction_id, None)
|
||||
|
||||
async def connect(self, tracker_ip, tracker_port):
|
||||
transaction_id = random.getrandbits(32)
|
||||
return decode(ConnectResponse,
|
||||
await self.request(ConnectRequest(0x41727101980, 0, transaction_id), tracker_ip, tracker_port))
|
||||
|
||||
@cache_concurrent
|
||||
@async_timed_cache(CONNECTION_EXPIRES_AFTER_SECONDS)
|
||||
async def ensure_connection_id(self, peer_id, tracker_ip, tracker_port):
|
||||
# peer_id is just to ensure cache coherency
|
||||
return (await self.connect(tracker_ip, tracker_port)).connection_id
|
||||
|
||||
async def announce(self, info_hash, peer_id, port, tracker_ip, tracker_port, stopped=False):
|
||||
connection_id = await self.ensure_connection_id(peer_id, tracker_ip, tracker_port)
|
||||
# this should make the key deterministic but unique per info hash + peer id
|
||||
key = int.from_bytes(info_hash[:4], "big") ^ int.from_bytes(peer_id[:4], "big") ^ port
|
||||
transaction_id = random.getrandbits(32)
|
||||
req = AnnounceRequest(
|
||||
connection_id, 1, transaction_id, info_hash, peer_id, 0, 0, 0, 3 if stopped else 1, 0, key, -1, port)
|
||||
return decode(AnnounceResponse, await self.request(req, tracker_ip, tracker_port))
|
||||
|
||||
async def scrape(self, infohashes, tracker_ip, tracker_port, connection_id=None):
|
||||
connection_id = await self.ensure_connection_id(None, tracker_ip, tracker_port)
|
||||
transaction_id = random.getrandbits(32)
|
||||
reply = await self.request(
|
||||
ScrapeRequest(connection_id, 2, transaction_id, infohashes), tracker_ip, tracker_port)
|
||||
return decode(ScrapeResponse, reply), connection_id
|
||||
|
||||
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
|
||||
if len(data) < 8:
|
||||
return
|
||||
transaction_id = int.from_bytes(data[4:8], byteorder="big", signed=False)
|
||||
if transaction_id in self.data_queue:
|
||||
if not self.data_queue[transaction_id].done():
|
||||
if data[3] == 3:
|
||||
return self.data_queue[transaction_id].set_exception(Exception(decode(ErrorResponse, data).message))
|
||||
return self.data_queue[transaction_id].set_result(data)
|
||||
log.debug("unexpected packet (can be a response for a previously timed out request): %s", data.hex())
|
||||
|
||||
def connection_lost(self, exc: Exception = None) -> None:
|
||||
self.transport = None
|
||||
|
||||
|
||||
class TrackerClient:
|
||||
event_controller = StreamController()
|
||||
|
||||
def __init__(self, node_id, announce_port, get_servers, timeout=10.0):
|
||||
self.client = UDPTrackerClientProtocol(timeout=timeout)
|
||||
self.transport = None
|
||||
self.peer_id = make_peer_id(node_id.hex() if node_id else None)
|
||||
self.announce_port = announce_port
|
||||
self._get_servers = get_servers
|
||||
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
|
||||
self.tasks = {}
|
||||
|
||||
async def start(self):
|
||||
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
|
||||
lambda: self.client, local_addr=("0.0.0.0", 0))
|
||||
self.event_controller.stream.listen(
|
||||
lambda request: self.on_hash(request[1], request[2]) if request[0] == 'search' else None)
|
||||
|
||||
def stop(self):
|
||||
while self.tasks:
|
||||
self.tasks.popitem()[1].cancel()
|
||||
if self.transport is not None:
|
||||
self.transport.close()
|
||||
self.client = None
|
||||
self.transport = None
|
||||
self.event_controller.close()
|
||||
|
||||
def on_hash(self, info_hash, on_announcement=None):
|
||||
if info_hash not in self.tasks:
|
||||
task = asyncio.create_task(self.get_peer_list(info_hash, on_announcement=on_announcement))
|
||||
task.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
|
||||
self.tasks[info_hash] = task
|
||||
|
||||
async def announce_many(self, *info_hashes, stopped=False):
|
||||
await asyncio.gather(
|
||||
*[self._announce_many(server, info_hashes, stopped=stopped) for server in self._get_servers()],
|
||||
return_exceptions=True)
|
||||
|
||||
async def _announce_many(self, server, info_hashes, stopped=False):
|
||||
tracker_ip = await resolve_host(*server, 'udp')
|
||||
still_good_info_hashes = {
|
||||
info_hash for (info_hash, (next_announcement, _)) in self.results.get(tracker_ip, {}).items()
|
||||
if time.time() < next_announcement
|
||||
}
|
||||
results = await asyncio.gather(
|
||||
*[self._probe_server(info_hash, tracker_ip, server[1], stopped=stopped)
|
||||
for info_hash in info_hashes if info_hash not in still_good_info_hashes],
|
||||
return_exceptions=True)
|
||||
if results:
|
||||
errors = sum([1 for result in results if result is None or isinstance(result, Exception)])
|
||||
log.info("Tracker: finished announcing %d files to %s:%d, %d errors", len(results), *server, errors)
|
||||
|
||||
async def get_peer_list(self, info_hash, stopped=False, on_announcement=None, no_port=False):
|
||||
found = []
|
||||
probes = [self._probe_server(info_hash, *server, stopped, no_port) for server in self._get_servers()]
|
||||
for done in asyncio.as_completed(probes):
|
||||
result = await done
|
||||
if result is not None:
|
||||
await asyncio.gather(*filter(asyncio.iscoroutine, [on_announcement(result)] if on_announcement else []))
|
||||
found.append(result)
|
||||
return found
|
||||
|
||||
async def get_kademlia_peer_list(self, info_hash):
|
||||
responses = await self.get_peer_list(info_hash, no_port=True)
|
||||
return await announcement_to_kademlia_peers(*responses)
|
||||
|
||||
async def _probe_server(self, info_hash, tracker_host, tracker_port, stopped=False, no_port=False):
|
||||
result = None
|
||||
try:
|
||||
tracker_host = await resolve_host(tracker_host, tracker_port, 'udp')
|
||||
except socket.error:
|
||||
log.warning("DNS failure while resolving tracker host: %s, skipping.", tracker_host)
|
||||
return
|
||||
self.results.setdefault(tracker_host, {})
|
||||
if info_hash in self.results[tracker_host]:
|
||||
next_announcement, result = self.results[tracker_host][info_hash]
|
||||
if time.time() < next_announcement:
|
||||
return result
|
||||
try:
|
||||
result = await self.client.announce(
|
||||
info_hash, self.peer_id, 0 if no_port else self.announce_port, tracker_host, tracker_port, stopped)
|
||||
self.results[tracker_host][info_hash] = (time.time() + result.interval, result)
|
||||
except asyncio.TimeoutError: # todo: this is UDP, timeout is common, we need a better metric for failures
|
||||
self.results[tracker_host][info_hash] = (time.time() + 60.0, result)
|
||||
log.debug("Tracker timed out: %s:%d", tracker_host, tracker_port)
|
||||
return None
|
||||
log.debug("Announced: %s found %d peers for %s", tracker_host, len(result.peers), info_hash.hex()[:8])
|
||||
return result
|
||||
|
||||
|
||||
def enqueue_tracker_search(info_hash: bytes, peer_q: asyncio.Queue):
|
||||
async def on_announcement(announcement: AnnounceResponse):
|
||||
peers = await announcement_to_kademlia_peers(announcement)
|
||||
log.info("Found %d peers from tracker for %s", len(peers), info_hash.hex()[:8])
|
||||
peer_q.put_nowait(peers)
|
||||
TrackerClient.event_controller.add(('search', info_hash, on_announcement))
|
||||
|
||||
|
||||
def announcement_to_kademlia_peers(*announcements: AnnounceResponse):
|
||||
peers = [
|
||||
(str(ipaddress.ip_address(peer.address)), peer.port)
|
||||
for announcement in announcements for peer in announcement.peers if peer.port > 1024 # no privileged or 0
|
||||
]
|
||||
return get_kademlia_peers_from_hosts(peers)
|
||||
|
||||
|
||||
class UDPTrackerServerProtocol(asyncio.DatagramProtocol): # for testing. Not suitable for production
|
||||
def __init__(self):
|
||||
self.transport = None
|
||||
self.known_conns = set()
|
||||
self.peers = {}
|
||||
|
||||
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
|
||||
self.transport = transport
|
||||
|
||||
def add_peer(self, info_hash, ip_address: str, port: int):
|
||||
self.peers.setdefault(info_hash, [])
|
||||
self.peers[info_hash].append(encode_peer(ip_address, port))
|
||||
|
||||
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
|
||||
if len(data) < 16:
|
||||
return
|
||||
action = int.from_bytes(data[8:12], "big", signed=False)
|
||||
if action == 0:
|
||||
req = decode(ConnectRequest, data)
|
||||
connection_id = random.getrandbits(32)
|
||||
self.known_conns.add(connection_id)
|
||||
return self.transport.sendto(encode(ConnectResponse(0, req.transaction_id, connection_id)), addr)
|
||||
elif action == 1:
|
||||
req = decode(AnnounceRequest, data)
|
||||
if req.connection_id not in self.known_conns:
|
||||
resp = encode(ErrorResponse(3, req.transaction_id, b'Connection ID missmatch.\x00'))
|
||||
else:
|
||||
compact_address = encode_peer(addr[0], req.port)
|
||||
if req.event != 3:
|
||||
self.add_peer(req.info_hash, addr[0], req.port)
|
||||
elif compact_address in self.peers.get(req.info_hash, []):
|
||||
self.peers[req.info_hash].remove(compact_address)
|
||||
peers = [decode(CompactIPv4Peer, peer) for peer in self.peers[req.info_hash]]
|
||||
resp = encode(AnnounceResponse(1, req.transaction_id, 1700, 0, len(peers), peers))
|
||||
return self.transport.sendto(resp, addr)
|
||||
|
||||
|
||||
def encode_peer(ip_address: str, port: int):
|
||||
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), ip_address.split('.'), bytearray())
|
||||
return compact_ip + port.to_bytes(2, "big", signed=False)
|
|
@ -130,21 +130,16 @@ def get_sd_hash(stream_info):
|
|||
def json_dumps_pretty(obj, **kwargs):
|
||||
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '), **kwargs)
|
||||
|
||||
|
||||
def cancel_task(task: typing.Optional[asyncio.Task]):
|
||||
if task and not task.done():
|
||||
task.cancel()
|
||||
|
||||
|
||||
def cancel_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
|
||||
for task in tasks:
|
||||
cancel_task(task)
|
||||
|
||||
|
||||
def drain_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
|
||||
while tasks:
|
||||
cancel_task(tasks.pop())
|
||||
|
||||
try:
|
||||
# the standard contextlib.aclosing() is available in 3.10+
|
||||
from contextlib import aclosing # pylint: disable=unused-import
|
||||
except ImportError:
|
||||
@contextlib.asynccontextmanager
|
||||
async def aclosing(thing):
|
||||
try:
|
||||
yield thing
|
||||
finally:
|
||||
await thing.aclose()
|
||||
|
||||
def async_timed_cache(duration: int):
|
||||
def wrapper(func):
|
||||
|
@ -405,7 +400,7 @@ async def fallback_get_external_ip(): # used if spv servers can't be used for i
|
|||
|
||||
async def _get_external_ip(default_servers) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
|
||||
# used if upnp is disabled or non-functioning
|
||||
from lbry.wallet.server.udp import SPVStatusClientProtocol # pylint: disable=C0415
|
||||
from lbry.wallet.udp import SPVStatusClientProtocol # pylint: disable=C0415
|
||||
|
||||
hostname_to_ip = {}
|
||||
ip_to_hostnames = collections.defaultdict(list)
|
||||
|
@ -455,8 +450,8 @@ def is_running_from_bundle():
|
|||
|
||||
|
||||
class LockWithMetrics(asyncio.Lock):
|
||||
def __init__(self, acquire_metric, held_time_metric, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
def __init__(self, acquire_metric, held_time_metric):
|
||||
super().__init__()
|
||||
self._acquire_metric = acquire_metric
|
||||
self._lock_held_time_metric = held_time_metric
|
||||
self._lock_acquired_time = None
|
||||
|
@ -474,3 +469,18 @@ class LockWithMetrics(asyncio.Lock):
|
|||
return super().release()
|
||||
finally:
|
||||
self._lock_held_time_metric.observe(time.perf_counter() - self._lock_acquired_time)
|
||||
|
||||
|
||||
def get_colliding_prefix_bits(first_value: bytes, second_value: bytes):
|
||||
"""
|
||||
Calculates the amount of colliding prefix bits between <first_value> and <second_value>.
|
||||
This is given by the amount of bits that are the same until the first different one (via XOR),
|
||||
starting from the most significant bit to the least significant bit.
|
||||
:param first_value: first value to compare, bigger than size.
|
||||
:param second_value: second value to compare, bigger than size.
|
||||
:return: amount of prefix colliding bits.
|
||||
"""
|
||||
assert len(first_value) == len(second_value), "length should be the same"
|
||||
size = len(first_value) * 8
|
||||
first_value, second_value = int.from_bytes(first_value, "big"), int.from_bytes(second_value, "big")
|
||||
return size - (first_value ^ second_value).bit_length()
|
||||
|
|
|
@ -1,17 +1,23 @@
|
|||
__node_daemon__ = 'lbrycrdd'
|
||||
__node_cli__ = 'lbrycrd-cli'
|
||||
__node_bin__ = ''
|
||||
__node_url__ = (
|
||||
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.4.6/lbrycrd-linux-1746.zip'
|
||||
__lbcd__ = 'lbcd'
|
||||
__lbcctl__ = 'lbcctl'
|
||||
__lbcwallet__ = 'lbcwallet'
|
||||
__lbcd_url__ = (
|
||||
'https://github.com/lbryio/lbcd/releases/download/' +
|
||||
'v0.22.100-rc.0/lbcd_0.22.100-rc.0_TARGET_PLATFORM.tar.gz'
|
||||
)
|
||||
__lbcwallet_url__ = (
|
||||
'https://github.com/lbryio/lbcwallet/releases/download/' +
|
||||
'v0.13.100-alpha.0/lbcwallet_0.13.100-alpha.0_TARGET_PLATFORM.tar.gz'
|
||||
)
|
||||
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
|
||||
|
||||
from .wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
|
||||
from .manager import WalletManager
|
||||
from .network import Network
|
||||
from .ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
|
||||
from .account import Account, AddressManager, SingleKey, HierarchicalDeterministic
|
||||
from .transaction import Transaction, Output, Input
|
||||
from .script import OutputScript, InputScript
|
||||
from .database import SQLiteMixin, Database
|
||||
from .header import Headers
|
||||
from lbry.wallet.wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
|
||||
from lbry.wallet.manager import WalletManager
|
||||
from lbry.wallet.network import Network
|
||||
from lbry.wallet.ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
|
||||
from lbry.wallet.account import Account, AddressManager, SingleKey, HierarchicalDeterministic, \
|
||||
DeterministicChannelKeyManager
|
||||
from lbry.wallet.transaction import Transaction, Output, Input
|
||||
from lbry.wallet.script import OutputScript, InputScript
|
||||
from lbry.wallet.database import SQLiteMixin, Database
|
||||
from lbry.wallet.header import Headers
|
||||
|
|
Binary file not shown.
|
@ -9,11 +9,10 @@ from hashlib import sha256
|
|||
from string import hexdigits
|
||||
from typing import Type, Dict, Tuple, Optional, Any, List
|
||||
|
||||
import ecdsa
|
||||
from lbry.error import InvalidPasswordError
|
||||
from lbry.crypto.crypt import aes_encrypt, aes_decrypt
|
||||
|
||||
from .bip32 import PrivateKey, PubKey, from_extended_key_string
|
||||
from .bip32 import PrivateKey, PublicKey, KeyPath, from_extended_key_string
|
||||
from .mnemonic import Mnemonic
|
||||
from .constants import COIN, TXO_TYPES
|
||||
from .transaction import Transaction, Input, Output
|
||||
|
@ -34,6 +33,49 @@ def validate_claim_id(claim_id):
|
|||
raise Exception("Claim id is not hex encoded")
|
||||
|
||||
|
||||
class DeterministicChannelKeyManager:
|
||||
|
||||
def __init__(self, account: 'Account'):
|
||||
self.account = account
|
||||
self.last_known = 0
|
||||
self.cache = {}
|
||||
self._private_key: Optional[PrivateKey] = None
|
||||
|
||||
@property
|
||||
def private_key(self):
|
||||
if self._private_key is None:
|
||||
if self.account.private_key is not None:
|
||||
self._private_key = self.account.private_key.child(KeyPath.CHANNEL)
|
||||
return self._private_key
|
||||
|
||||
def maybe_generate_deterministic_key_for_channel(self, txo):
|
||||
if self.private_key is None:
|
||||
return
|
||||
next_private_key = self.private_key.child(self.last_known)
|
||||
public_key = next_private_key.public_key
|
||||
public_key_bytes = public_key.pubkey_bytes
|
||||
if txo.claim.channel.public_key_bytes == public_key_bytes:
|
||||
self.cache[public_key.address] = next_private_key
|
||||
self.last_known += 1
|
||||
|
||||
async def ensure_cache_primed(self):
|
||||
if self.private_key is not None:
|
||||
await self.generate_next_key()
|
||||
|
||||
async def generate_next_key(self) -> PrivateKey:
|
||||
db = self.account.ledger.db
|
||||
while True:
|
||||
next_private_key = self.private_key.child(self.last_known)
|
||||
public_key = next_private_key.public_key
|
||||
self.cache[public_key.address] = next_private_key
|
||||
if not await db.is_channel_key_used(self.account, public_key):
|
||||
return next_private_key
|
||||
self.last_known += 1
|
||||
|
||||
def get_private_key_from_pubkey_hash(self, pubkey_hash) -> PrivateKey:
|
||||
return self.cache.get(pubkey_hash)
|
||||
|
||||
|
||||
class AddressManager:
|
||||
|
||||
name: str
|
||||
|
@ -79,7 +121,7 @@ class AddressManager:
|
|||
def get_private_key(self, index: int) -> PrivateKey:
|
||||
raise NotImplementedError
|
||||
|
||||
def get_public_key(self, index: int) -> PubKey:
|
||||
def get_public_key(self, index: int) -> PublicKey:
|
||||
raise NotImplementedError
|
||||
|
||||
async def get_max_gap(self):
|
||||
|
@ -119,8 +161,8 @@ class HierarchicalDeterministic(AddressManager):
|
|||
@classmethod
|
||||
def from_dict(cls, account: 'Account', d: dict) -> Tuple[AddressManager, AddressManager]:
|
||||
return (
|
||||
cls(account, 0, **d.get('receiving', {'gap': 20, 'maximum_uses_per_address': 1})),
|
||||
cls(account, 1, **d.get('change', {'gap': 6, 'maximum_uses_per_address': 1}))
|
||||
cls(account, KeyPath.RECEIVE, **d.get('receiving', {'gap': 20, 'maximum_uses_per_address': 1})),
|
||||
cls(account, KeyPath.CHANGE, **d.get('change', {'gap': 6, 'maximum_uses_per_address': 1}))
|
||||
)
|
||||
|
||||
def merge(self, d: dict):
|
||||
|
@ -133,7 +175,7 @@ class HierarchicalDeterministic(AddressManager):
|
|||
def get_private_key(self, index: int) -> PrivateKey:
|
||||
return self.account.private_key.child(self.chain_number).child(index)
|
||||
|
||||
def get_public_key(self, index: int) -> PubKey:
|
||||
def get_public_key(self, index: int) -> PublicKey:
|
||||
return self.account.public_key.child(self.chain_number).child(index)
|
||||
|
||||
async def get_max_gap(self) -> int:
|
||||
|
@ -193,7 +235,7 @@ class SingleKey(AddressManager):
|
|||
@classmethod
|
||||
def from_dict(cls, account: 'Account', d: dict) \
|
||||
-> Tuple[AddressManager, AddressManager]:
|
||||
same_address_manager = cls(account, account.public_key, 0)
|
||||
same_address_manager = cls(account, account.public_key, KeyPath.RECEIVE)
|
||||
return same_address_manager, same_address_manager
|
||||
|
||||
def to_dict_instance(self):
|
||||
|
@ -202,7 +244,7 @@ class SingleKey(AddressManager):
|
|||
def get_private_key(self, index: int) -> PrivateKey:
|
||||
return self.account.private_key
|
||||
|
||||
def get_public_key(self, index: int) -> PubKey:
|
||||
def get_public_key(self, index: int) -> PublicKey:
|
||||
return self.account.public_key
|
||||
|
||||
async def get_max_gap(self) -> int:
|
||||
|
@ -224,9 +266,6 @@ class SingleKey(AddressManager):
|
|||
|
||||
class Account:
|
||||
|
||||
mnemonic_class = Mnemonic
|
||||
private_key_class = PrivateKey
|
||||
public_key_class = PubKey
|
||||
address_generators: Dict[str, Type[AddressManager]] = {
|
||||
SingleKey.name: SingleKey,
|
||||
HierarchicalDeterministic.name: HierarchicalDeterministic,
|
||||
|
@ -234,7 +273,7 @@ class Account:
|
|||
|
||||
def __init__(self, ledger: 'Ledger', wallet: 'Wallet', name: str,
|
||||
seed: str, private_key_string: str, encrypted: bool,
|
||||
private_key: Optional[PrivateKey], public_key: PubKey,
|
||||
private_key: Optional[PrivateKey], public_key: PublicKey,
|
||||
address_generator: dict, modified_on: float, channel_keys: dict) -> None:
|
||||
self.ledger = ledger
|
||||
self.wallet = wallet
|
||||
|
@ -245,13 +284,14 @@ class Account:
|
|||
self.private_key_string = private_key_string
|
||||
self.init_vectors: Dict[str, bytes] = {}
|
||||
self.encrypted = encrypted
|
||||
self.private_key = private_key
|
||||
self.public_key = public_key
|
||||
self.private_key: Optional[PrivateKey] = private_key
|
||||
self.public_key: PublicKey = public_key
|
||||
generator_name = address_generator.get('name', HierarchicalDeterministic.name)
|
||||
self.address_generator = self.address_generators[generator_name]
|
||||
self.receiving, self.change = self.address_generator.from_dict(self, address_generator)
|
||||
self.address_managers = {am.chain_number: am for am in (self.receiving, self.change)}
|
||||
self.channel_keys = channel_keys
|
||||
self.deterministic_channel_keys = DeterministicChannelKeyManager(self)
|
||||
ledger.add_account(self)
|
||||
wallet.add_account(self)
|
||||
|
||||
|
@ -266,19 +306,19 @@ class Account:
|
|||
name: str = None, address_generator: dict = None):
|
||||
return cls.from_dict(ledger, wallet, {
|
||||
'name': name,
|
||||
'seed': cls.mnemonic_class().make_seed(),
|
||||
'seed': Mnemonic().make_seed(),
|
||||
'address_generator': address_generator or {}
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def get_private_key_from_seed(cls, ledger: 'Ledger', seed: str, password: str):
|
||||
return cls.private_key_class.from_seed(
|
||||
ledger, cls.mnemonic_class.mnemonic_to_seed(seed, password or 'lbryum')
|
||||
return PrivateKey.from_seed(
|
||||
ledger, Mnemonic.mnemonic_to_seed(seed, password or 'lbryum')
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def keys_from_dict(cls, ledger: 'Ledger', d: dict) \
|
||||
-> Tuple[str, Optional[PrivateKey], PubKey]:
|
||||
-> Tuple[str, Optional[PrivateKey], PublicKey]:
|
||||
seed = d.get('seed', '')
|
||||
private_key_string = d.get('private_key', '')
|
||||
private_key = None
|
||||
|
@ -449,7 +489,7 @@ class Account:
|
|||
assert not self.encrypted, "Cannot get private key on encrypted wallet account."
|
||||
return self.address_managers[chain].get_private_key(index)
|
||||
|
||||
def get_public_key(self, chain: int, index: int) -> PubKey:
|
||||
def get_public_key(self, chain: int, index: int) -> PublicKey:
|
||||
return self.address_managers[chain].get_public_key(index)
|
||||
|
||||
def get_balance(self, confirmations=0, include_claims=False, read_only=False, **constraints):
|
||||
|
@ -520,33 +560,30 @@ class Account:
|
|||
|
||||
return tx
|
||||
|
||||
def add_channel_private_key(self, private_key):
|
||||
public_key_bytes = private_key.get_verifying_key().to_der()
|
||||
channel_pubkey_hash = self.ledger.public_key_to_address(public_key_bytes)
|
||||
self.channel_keys[channel_pubkey_hash] = private_key.to_pem().decode()
|
||||
async def generate_channel_private_key(self):
|
||||
return await self.deterministic_channel_keys.generate_next_key()
|
||||
|
||||
async def get_channel_private_key(self, public_key_bytes):
|
||||
def add_channel_private_key(self, private_key: PrivateKey):
|
||||
self.channel_keys[private_key.address] = private_key.to_pem().decode()
|
||||
|
||||
async def get_channel_private_key(self, public_key_bytes) -> PrivateKey:
|
||||
channel_pubkey_hash = self.ledger.public_key_to_address(public_key_bytes)
|
||||
private_key_pem = self.channel_keys.get(channel_pubkey_hash)
|
||||
if private_key_pem:
|
||||
return await asyncio.get_event_loop().run_in_executor(
|
||||
None, ecdsa.SigningKey.from_pem, private_key_pem, sha256
|
||||
)
|
||||
return PrivateKey.from_pem(self.ledger, private_key_pem)
|
||||
return self.deterministic_channel_keys.get_private_key_from_pubkey_hash(channel_pubkey_hash)
|
||||
|
||||
async def maybe_migrate_certificates(self):
|
||||
def to_der(private_key_pem):
|
||||
return ecdsa.SigningKey.from_pem(private_key_pem, hashfunc=sha256).get_verifying_key().to_der()
|
||||
|
||||
if not self.channel_keys:
|
||||
return
|
||||
channel_keys = {}
|
||||
for private_key_pem in self.channel_keys.values():
|
||||
if not isinstance(private_key_pem, str):
|
||||
continue
|
||||
if "-----BEGIN EC PRIVATE KEY-----" not in private_key_pem:
|
||||
if not private_key_pem.startswith("-----BEGIN"):
|
||||
continue
|
||||
public_key_der = await asyncio.get_event_loop().run_in_executor(None, to_der, private_key_pem)
|
||||
channel_keys[self.ledger.public_key_to_address(public_key_der)] = private_key_pem
|
||||
private_key = PrivateKey.from_pem(self.ledger, private_key_pem)
|
||||
channel_keys[private_key.address] = private_key_pem
|
||||
if self.channel_keys != channel_keys:
|
||||
self.channel_keys = channel_keys
|
||||
self.wallet.save()
|
||||
|
|
|
@ -1,10 +1,21 @@
|
|||
from coincurve import PublicKey, PrivateKey as _PrivateKey
|
||||
from asn1crypto.keys import PrivateKeyInfo, ECPrivateKey
|
||||
from coincurve import PublicKey as cPublicKey, PrivateKey as cPrivateKey
|
||||
from coincurve.utils import (
|
||||
pem_to_der, lib as libsecp256k1, ffi as libsecp256k1_ffi
|
||||
)
|
||||
from coincurve.ecdsa import CDATA_SIG_LENGTH
|
||||
|
||||
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
|
||||
from lbry.crypto.base58 import Base58
|
||||
from .util import cachedproperty
|
||||
|
||||
|
||||
class KeyPath:
|
||||
RECEIVE = 0
|
||||
CHANGE = 1
|
||||
CHANNEL = 2
|
||||
|
||||
|
||||
class DerivationError(Exception):
|
||||
""" Raised when an invalid derivation occurs. """
|
||||
|
||||
|
@ -71,26 +82,30 @@ class _KeyBase:
|
|||
return Base58.encode_check(self.extended_key())
|
||||
|
||||
|
||||
class PubKey(_KeyBase):
|
||||
class PublicKey(_KeyBase):
|
||||
""" A BIP32 public key. """
|
||||
|
||||
def __init__(self, ledger, pubkey, chain_code, n, depth, parent=None):
|
||||
super().__init__(ledger, chain_code, n, depth, parent)
|
||||
if isinstance(pubkey, PublicKey):
|
||||
if isinstance(pubkey, cPublicKey):
|
||||
self.verifying_key = pubkey
|
||||
else:
|
||||
self.verifying_key = self._verifying_key_from_pubkey(pubkey)
|
||||
|
||||
@classmethod
|
||||
def from_compressed(cls, public_key_bytes, ledger=None) -> 'PublicKey':
|
||||
return cls(ledger, public_key_bytes, bytes((0,)*32), 0, 0)
|
||||
|
||||
@classmethod
|
||||
def _verifying_key_from_pubkey(cls, pubkey):
|
||||
""" Converts a 33-byte compressed pubkey into an PublicKey object. """
|
||||
""" Converts a 33-byte compressed pubkey into an coincurve.PublicKey object. """
|
||||
if not isinstance(pubkey, (bytes, bytearray)):
|
||||
raise TypeError('pubkey must be raw bytes')
|
||||
if len(pubkey) != 33:
|
||||
raise ValueError('pubkey must be 33 bytes')
|
||||
if pubkey[0] not in (2, 3):
|
||||
raise ValueError('invalid pubkey prefix byte')
|
||||
return PublicKey(pubkey)
|
||||
return cPublicKey(pubkey)
|
||||
|
||||
@cachedproperty
|
||||
def pubkey_bytes(self):
|
||||
|
@ -105,7 +120,7 @@ class PubKey(_KeyBase):
|
|||
def ec_point(self):
|
||||
return self.verifying_key.point()
|
||||
|
||||
def child(self, n: int):
|
||||
def child(self, n: int) -> 'PublicKey':
|
||||
""" Return the derived child extended pubkey at index N. """
|
||||
if not 0 <= n < (1 << 31):
|
||||
raise ValueError('invalid BIP32 public key child number')
|
||||
|
@ -113,7 +128,7 @@ class PubKey(_KeyBase):
|
|||
msg = self.pubkey_bytes + n.to_bytes(4, 'big')
|
||||
L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name
|
||||
derived_key = self.verifying_key.add(L_b)
|
||||
return PubKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
|
||||
return PublicKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
|
||||
|
||||
def identifier(self):
|
||||
""" Return the key's identifier as 20 bytes. """
|
||||
|
@ -126,6 +141,36 @@ class PubKey(_KeyBase):
|
|||
self.pubkey_bytes
|
||||
)
|
||||
|
||||
def verify(self, signature, digest) -> bool:
|
||||
""" Verify that a signature is valid for a 32 byte digest. """
|
||||
|
||||
if len(signature) != 64:
|
||||
raise ValueError('Signature must be 64 bytes long.')
|
||||
|
||||
if len(digest) != 32:
|
||||
raise ValueError('Digest must be 32 bytes long.')
|
||||
|
||||
key = self.verifying_key
|
||||
|
||||
raw_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
|
||||
|
||||
parsed = libsecp256k1.secp256k1_ecdsa_signature_parse_compact(
|
||||
key.context.ctx, raw_signature, signature
|
||||
)
|
||||
assert parsed == 1
|
||||
|
||||
normalized_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
|
||||
|
||||
libsecp256k1.secp256k1_ecdsa_signature_normalize(
|
||||
key.context.ctx, normalized_signature, raw_signature
|
||||
)
|
||||
|
||||
verified = libsecp256k1.secp256k1_ecdsa_verify(
|
||||
key.context.ctx, normalized_signature, digest, key.public_key
|
||||
)
|
||||
|
||||
return bool(verified)
|
||||
|
||||
|
||||
class PrivateKey(_KeyBase):
|
||||
"""A BIP32 private key."""
|
||||
|
@ -134,7 +179,7 @@ class PrivateKey(_KeyBase):
|
|||
|
||||
def __init__(self, ledger, privkey, chain_code, n, depth, parent=None):
|
||||
super().__init__(ledger, chain_code, n, depth, parent)
|
||||
if isinstance(privkey, _PrivateKey):
|
||||
if isinstance(privkey, cPrivateKey):
|
||||
self.signing_key = privkey
|
||||
else:
|
||||
self.signing_key = self._signing_key_from_privkey(privkey)
|
||||
|
@ -142,7 +187,7 @@ class PrivateKey(_KeyBase):
|
|||
@classmethod
|
||||
def _signing_key_from_privkey(cls, private_key):
|
||||
""" Converts a 32-byte private key into an coincurve.PrivateKey object. """
|
||||
return _PrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
|
||||
return cPrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
|
||||
|
||||
@classmethod
|
||||
def _private_key_secret_exponent(cls, private_key):
|
||||
|
@ -154,24 +199,40 @@ class PrivateKey(_KeyBase):
|
|||
return int.from_bytes(private_key, 'big')
|
||||
|
||||
@classmethod
|
||||
def from_seed(cls, ledger, seed):
|
||||
def from_seed(cls, ledger, seed) -> 'PrivateKey':
|
||||
# This hard-coded message string seems to be coin-independent...
|
||||
hmac = hmac_sha512(b'Bitcoin seed', seed)
|
||||
privkey, chain_code = hmac[:32], hmac[32:]
|
||||
return cls(ledger, privkey, chain_code, 0, 0)
|
||||
|
||||
@classmethod
|
||||
def from_pem(cls, ledger, pem) -> 'PrivateKey':
|
||||
der = pem_to_der(pem.encode())
|
||||
try:
|
||||
key_int = ECPrivateKey.load(der).native['private_key']
|
||||
except ValueError:
|
||||
key_int = PrivateKeyInfo.load(der).native['private_key']['private_key']
|
||||
private_key = cPrivateKey.from_int(key_int)
|
||||
return cls(ledger, private_key, bytes((0,)*32), 0, 0)
|
||||
|
||||
@classmethod
|
||||
def from_bytes(cls, ledger, key_bytes) -> 'PrivateKey':
|
||||
return cls(ledger, cPrivateKey(key_bytes), bytes((0,)*32), 0, 0)
|
||||
|
||||
@cachedproperty
|
||||
def private_key_bytes(self):
|
||||
""" Return the serialized private key (no leading zero byte). """
|
||||
return self.signing_key.secret
|
||||
|
||||
@cachedproperty
|
||||
def public_key(self):
|
||||
def public_key(self) -> PublicKey:
|
||||
""" Return the corresponding extended public key. """
|
||||
verifying_key = self.signing_key.public_key
|
||||
parent_pubkey = self.parent.public_key if self.parent else None
|
||||
return PubKey(self.ledger, verifying_key, self.chain_code, self.n, self.depth,
|
||||
parent_pubkey)
|
||||
return PublicKey(
|
||||
self.ledger, verifying_key, self.chain_code,
|
||||
self.n, self.depth, parent_pubkey
|
||||
)
|
||||
|
||||
def ec_point(self):
|
||||
return self.public_key.ec_point()
|
||||
|
@ -184,11 +245,12 @@ class PrivateKey(_KeyBase):
|
|||
""" Return the private key encoded in Wallet Import Format. """
|
||||
return self.ledger.private_key_to_wif(self.private_key_bytes)
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
""" The public key as a P2PKH address. """
|
||||
return self.public_key.address
|
||||
|
||||
def child(self, n):
|
||||
def child(self, n) -> 'PrivateKey':
|
||||
""" Return the derived child extended private key at index N."""
|
||||
if not 0 <= n < (1 << 32):
|
||||
raise ValueError('invalid BIP32 private key child number')
|
||||
|
@ -207,6 +269,28 @@ class PrivateKey(_KeyBase):
|
|||
""" Produce a signature for piece of data by double hashing it and signing the hash. """
|
||||
return self.signing_key.sign(data, hasher=double_sha256)
|
||||
|
||||
def sign_compact(self, digest):
|
||||
""" Produce a compact signature. """
|
||||
key = self.signing_key
|
||||
|
||||
signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
|
||||
signed = libsecp256k1.secp256k1_ecdsa_sign(
|
||||
key.context.ctx, signature, digest, key.secret,
|
||||
libsecp256k1_ffi.NULL, libsecp256k1_ffi.NULL
|
||||
)
|
||||
|
||||
if not signed:
|
||||
raise ValueError('The private key was invalid.')
|
||||
|
||||
serialized = libsecp256k1_ffi.new('unsigned char[%d]' % CDATA_SIG_LENGTH)
|
||||
compacted = libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(
|
||||
key.context.ctx, serialized, signature
|
||||
)
|
||||
if compacted != 1:
|
||||
raise ValueError('The signature could not be compacted.')
|
||||
|
||||
return bytes(libsecp256k1_ffi.buffer(serialized, CDATA_SIG_LENGTH))
|
||||
|
||||
def identifier(self):
|
||||
"""Return the key's identifier as 20 bytes."""
|
||||
return self.public_key.identifier()
|
||||
|
@ -218,9 +302,12 @@ class PrivateKey(_KeyBase):
|
|||
b'\0' + self.private_key_bytes
|
||||
)
|
||||
|
||||
def to_pem(self):
|
||||
return self.signing_key.to_pem()
|
||||
|
||||
|
||||
def _from_extended_key(ledger, ekey):
|
||||
"""Return a PubKey or PrivateKey from an extended key raw bytes."""
|
||||
"""Return a PublicKey or PrivateKey from an extended key raw bytes."""
|
||||
if not isinstance(ekey, (bytes, bytearray)):
|
||||
raise TypeError('extended key must be raw bytes')
|
||||
if len(ekey) != 78:
|
||||
|
@ -232,7 +319,7 @@ def _from_extended_key(ledger, ekey):
|
|||
|
||||
if ekey[:4] == ledger.extended_public_key_prefix:
|
||||
pubkey = ekey[45:]
|
||||
key = PubKey(ledger, pubkey, chain_code, n, depth)
|
||||
key = PublicKey(ledger, pubkey, chain_code, n, depth)
|
||||
elif ekey[:4] == ledger.extended_private_key_prefix:
|
||||
if ekey[45] != 0:
|
||||
raise ValueError('invalid extended private key prefix byte')
|
||||
|
@ -250,6 +337,6 @@ def from_extended_key_string(ledger, ekey_str):
|
|||
xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd
|
||||
3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL
|
||||
|
||||
return a PubKey or PrivateKey.
|
||||
return a PublicKey or PrivateKey.
|
||||
"""
|
||||
return _from_extended_key(ledger, Base58.decode_check(ekey_str))
|
||||
|
|
|
@ -881,4 +881,365 @@ HASHES = {
|
|||
879000: '0eb0810f4b81d1845b0a88f05449408df2e45715c9210a656f45278c5fdf7956',
|
||||
880000: 'e7d613027e3b4ca38d09bbef07998b57db237c6d67f1e8ea50024d2e0d9a1a72',
|
||||
881000: '21af4d355d8756b8bf0369b2d79b5c824148ae069026ba5c14f9dd6b7555e1db',
|
||||
882000: 'bc26f028e547ec44fc3864925bd1493211773b5cb9a9583ba4c1909b89fe0d33',
|
||||
883000: '170a624f4be04cd2fd435cfb6ba1f31b9ef5d7b084a25dfa23cd118c2752029e',
|
||||
884000: '46cccb7a12b4d01d07c211b7b8db41321cd73f30069df27bcdb3bb600c0272b0',
|
||||
885000: '7c27f79d5a99baf0f81f2b09eb5c1bf905976a0f872e02bd4ca9e82f0ed50cb0',
|
||||
886000: '256e3e00cecc72dbbfef5cea627ecf1d43b56edd5fd1642a2bc4e97c17056f34',
|
||||
887000: '658ebac7dfa62bc7a22b1a9ba4e5b425a866f7550a6b40fd07de47119fd1f7e8',
|
||||
888000: '497a9d02868605b9ff6e7f15948a83a7e07606829107e63c2e091c90c7a7b4d4',
|
||||
889000: '561daaa7ebc87e586d37a96ecfbc72484d7eb602824f38f484ed333e78208e9e',
|
||||
890000: 'ab5a8cb625b28343f8fac858eab6576c856dab88bde8cda02b80b3edfd307d71',
|
||||
891000: '2e81d9fc885ddc09222b298ac9efbb73638a5721802b9256de6505ecf122dbaa',
|
||||
892000: '73be08881b8832e986c0bb9a06c70fff346edb2afaf69630e47e4a4a90c5fece',
|
||||
893000: 'd39079dcaa4d8af1c26f0edf7e16df43cd857a31e0aa4c4123226793f1ab497f',
|
||||
894000: '0a3b677d72c590d4b1ff7a9b4098d6b52d0dc10d64c30c2766d18e6eb02872cd',
|
||||
895000: 'a3bbba831f48c5b68e494ee63015b487782c64c5c24bb29436283360c28fd1e0',
|
||||
896000: '20af178a192ca43975ab6c838fe97ca42ba6c682682eddbc6481efd153ecb0a2',
|
||||
897000: '8d0ee14b9fdb853a09ab2951d26b8f7cb8bc8038b09513bd330ee4b0bdcc4780',
|
||||
898000: 'c97fbb70f804408b131a98f9fb4c04cdf2df1655d3e8ff2e0d58ed8537349f4e',
|
||||
899000: 'eba2be80478e8dec2d66ca40b853580c5dad040351c64c177e3d8c25aff6c1b6',
|
||||
900000: 'c4dc344a993558418b93b3f60aaef0030e2a4116086577fbf1e2f544bdbddae1',
|
||||
901000: '36d84229afa63045875fc8fea0c55de8eb90694b3a37cceb825c87abf1fea998',
|
||||
902000: '8ca4890ecfc5e3f9d767e4fcdf318a1e3e3597675bbcfe534d64e76bc4e8fbf4',
|
||||
903000: '8b9f6a7514033c57668ca94fb3758cc6d1ef37ac982c2ff5a9f0f206fcd8d0a8',
|
||||
904000: 'e9ae813991f35ca89af2fe1f1b6adf9e93c6b1dd6a74f003ebbe699a30b252ea',
|
||||
905000: 'd426489d01d4f4c829f2eb68a67721d2c0e1c71e8c33ef9253593447e8603462',
|
||||
906000: '63000bbed97451e68d64485c02c1c3d90b4156237dac315f4e012ffb538e375b',
|
||||
907000: '96759653a4e514541effa7ef86d9f22a272ddde7b069149d17e9d9203a1edafb',
|
||||
908000: 'eec6477d2f3b71bde76dc2380d6e06aa8aa306ca56ba1dd15a31c22ae0db501b',
|
||||
909000: 'd5c2984cf130335aa29296ba5b17672d00360fe0ec73977326180014908c0b55',
|
||||
910000: '7b99cb1c94144f606937903e173bd9ef63bfffd3db8110693fa4c2caa0abc21f',
|
||||
911000: '95eed0d9dd9869ac6f83fa67863e77f24df69bcb90fef70918f30b2400e24ea8',
|
||||
912000: '34c3c8780c54ecced50f0a6b394309d09ee6ce37cd98794699c63771d1d91144',
|
||||
913000: '536052ddcd445702160288ef3f669ce56868c085315556c9f5ca081ef0c0b9e1',
|
||||
914000: '1bcd1fe9632f93a0a1fe7d8a1891a4fc6ef1be40ccf887524a9095ed7aa9fa44',
|
||||
915000: '139bad9fa12ec72a37b62ad8511300ebfda89330fa5d5a83861f864b6adeae67',
|
||||
916000: '81d15282214ff83e2a034212eb58abeafcb5664d3734bff13b22b4c093b20fea',
|
||||
917000: 'f31081031cebe450e4450ef397d91790fc0068e98e6746cd0aab86d17e4448f5',
|
||||
918000: '4af8eb28616ef0e859b5471650c7f8e910cd692a6b4ff3a7171a709db2f18e4e',
|
||||
919000: '78a197b5f9733e9e4dc9820e1c79bd335beb19f6b87056e48e8e21fbe27d83d6',
|
||||
920000: '33d20f86d1367f07d6731e1e2cc9305252b281b1b092403133924cc1052f501d',
|
||||
921000: '6926f1e31e7fe9b8f7a81efa73d5635f8f28c1db1708e4d57f6e7ead951a4beb',
|
||||
922000: '811e2335798eb54696a4b11ca3a44b9d79486262119383d542491afa9ae80204',
|
||||
923000: '8f47ac365bc380885db809f2818ffc7dd2076aaa0f9bf6c180df1b4358dc842e',
|
||||
924000: '535e79802c10630c17fb8fddec3ba2bf85eedbc0c076f3575f8189fe887ba993',
|
||||
925000: 'ca43bd24d17d75d55e72e45549384b395c62e1daf0d3f58f296e18168b918fbf',
|
||||
926000: '9a03be89e0725877d42296e6c995d9c48bb5f4bbd971f5a9add191af2d1c144b',
|
||||
927000: 'a14e0ef6bd1bc221dbba99031c16ddbbd76394186677c29bdf07b89fa2a6efac',
|
||||
928000: 'b16931bd7392e9db26be975b072024210fb5fe6ee22fc0809d51980aa8068a98',
|
||||
929000: '4da56a2e66fcd98a70039d9061ea5eb0fb6d9460b437d2191e47441182419a04',
|
||||
930000: '87e820e2237a54c4ea100bdd0145598f05add92185cd3d0929aa2d5099f4d5e0',
|
||||
931000: '515b22c91172157c443a47cf213014aff144181a77e276e291535ab3762bb1ae',
|
||||
932000: 'e130c6a9eb416f96256d1f90256a148957daa32f56af228d2d9ce6ff27ce2011',
|
||||
933000: '30c992ec7a9a320fb4db260373121efc7b5e7fc744f4b31defbe6a7608e0749e',
|
||||
934000: 'ec490fa0de6b1d78a4121a5044f501bbb3bd9e448c18121cea87eb8e3cadba41',
|
||||
935000: '603e4ae6a6d936c79b3f1c9f9e88305930953b9b390dac442976a6e8395fc520',
|
||||
936000: '2b756fe2de4328e598ed511b8828e5c2c6b5cdda1b5e7c1c26f8e0424c81afa9',
|
||||
937000: '1ae0f15f14a0d4819e34a6c18de9428a9e43e17d75383bffa9ffb18358e93b63',
|
||||
938000: 'cbd7001825ec87b8c6917d6e9e7dc5c8d7767788b6ffd61a61d0c612dbe5de66',
|
||||
939000: 'd770d0395aa79076044783fb37a1bb173cb95c93ff1ba82c34a72c4d8e425a03',
|
||||
940000: '3341d0a0349d091d88d233cd6ea6e0ad553d52039b4d47af51b8a8e7573a7916',
|
||||
941000: '16123b8758e99344ebe6670cd95826881b274c31d4da2a051052955a32bade3a',
|
||||
942000: 'ac7430961e77f902918fe79a52cbf6b523e3f2804ec83d0b17908e131ea9ea68',
|
||||
943000: '2ad08a6877e4687dcb7a623adeddc88403e8082efd6de28328b351282dc141e2',
|
||||
944000: '81382e8c1f47fa7c03fa1726f9b09ed1cd38140fe50683896eaa1b403d7e5fe3',
|
||||
945000: '152bfbb166da04dab16030af28ae65b3275819eed1d0bbfc11eba65616ebefd6',
|
||||
946000: '25b3da0962f87a0d3e4aec8b16483efbcab9514893a42fd31f4cb544ddc45a1f',
|
||||
947000: '2cb738ba342436628ff292797e3d36c4752d71bdc1af87fe758d469d06e36e0e',
|
||||
948000: 'b3683e18570fcc8b986720514539181ec43fb5dbc20fe314c56ab6bd31ab766a',
|
||||
949000: '94ced5bfba55ccffc909bf098d537e047d8d4cbb79f5e2a74146073f39804865',
|
||||
950000: 'b11543cd2aedae27f6ddc3d2b431c897fdcfe59ed3c926b0777bc1e99de4d12a',
|
||||
951000: '21508881a7f80fcd0b9b27bbcfba634b39c6525f5313968c4605cd55b4fec446',
|
||||
952000: 'f9b3ed919c9ca20cd2927d899ee7a86c93c2dd919dafb6fdb792f2d9f1895cb0',
|
||||
953000: 'cf578d8e80eec4102dc1b5321f10b36020b3b32f4b5d4664c90c412ca2ef6b42',
|
||||
954000: 'ed17c919ae5c4be835966b47f667d6082c75917b95584b2d2aff0e32f5c8aa98',
|
||||
955000: '948ea467fa01a20122e2146669214fdd3bb025038554609f7299ece5bca63e39',
|
||||
956000: 'b50ff4c02957ed8764215d25f206f6f1fe6d0eb712a378b937ff952dd479afd2',
|
||||
957000: '169922a3e51517ba6104a883d29aac03a9d20b4d448bd2773137b0d790e3db6b',
|
||||
958000: '92258ac2e8b53167dc30436d93f385d432bd549711ab9790ba4e8263c5c54382',
|
||||
959000: '7ca824697459eb302bcd7fba9d255fb269555abe7cf9d2dd5e54e196d751e682',
|
||||
960000: '89f9ec925d23698076d84f9e852ab04fc956ac4465827303de0c3bb0b685eb32',
|
||||
961000: '41cf75cd71bc12b93674c416e8b01b7410eb9e09eb8727ad93ff0b833c9966c9',
|
||||
962000: '7db1f1dbff3e389713067879bfedf9513ec74bb1e128b13fc2fe23ad55fd0306',
|
||||
963000: 'a35e71c611b2227adeac824d151d2f09bdbecd5765a4e62c6e74a3e4290abc66',
|
||||
964000: 'dc1811130e249d2208d6f85838512b4e5482efb0bd2f619164a68a0c60d7f248',
|
||||
965000: '92f5e25dd1c03102720dd0c3136b1a0769901bf89fcc0262a5e24405f349ca07',
|
||||
966000: '08243d780d8ba96a940f409b87d9c6b8a95c92804173b9156ada0dad35b628dc',
|
||||
967000: 'cb769a8935bb6faeb981da74f4079babbbb89476f825cc897f43e79790295260',
|
||||
968000: 'ff3fc27d2998f4dc4ac1ff378afe14c7d0f43cc328deb9c978ec0e067d1dfaf9',
|
||||
969000: 'e41a3452f45d5f025627d08c9c41017679e9c4804371dd1cc02f3ed49f85dbb2',
|
||||
970000: 'f5eaaf7ba6b47245a4a8096a7785c7b25dc6db342ac2ccbba0c321e97ab58284',
|
||||
971000: '75414062f1d4ed675dadc8f04ba10147a484aaca1ae316dc0b896a92809b3db6',
|
||||
972000: '5bcf2ee00133774c7d060a1a1863dfccc20d5127ecb542470f607dec2504fe6f',
|
||||
973000: '07d15b9656ecde2cd86a9d22c3de8b6505d6bab2aa5a94560b0db9119f1f6f6c',
|
||||
974000: '2059e7924d7a210a88f5a65abc61152506a82edccd27416e796c81b9b8003f13',
|
||||
975000: '7fcf5d8b2c0e51cfbdaa2502a9da0bdb323646899dad37dacc39af9f9e16fc5c',
|
||||
976000: '02acb8cf87a0900436eccfca50371948531041d7b8b410a902205f84dd7fb88e',
|
||||
977000: '2636dfd5a47016c893265473e78ecbf2000769d886f0d01ee7a91e9397210d15',
|
||||
978000: 'ce92f52a35096b94bea73a7d4e113bc4564a4a589b66f1ab86f61c822cf9ee76',
|
||||
979000: '21b8102f5b76be0c8e20d537ebc78ebe46bfcea6b6d2dda950ce5b48e85f72d7',
|
||||
980000: 'f4df0bd63b36105705de62266d654612d9804bad7069d41344de269657e6f084',
|
||||
981000: 'f006cd2718d98d774a5cd18394db7744c812fa149c8a63e76bab934aee89f571',
|
||||
982000: 'da5d6609265d9153022d823b0260aa07e7511ceff7a3fd2ca7ce83cb3900a661',
|
||||
983000: '3a26f3f02aa145fa8c5268fbe10dd9c3546d7dda57489ca5d4b161beb0d5a6e2',
|
||||
984000: '968e8cd37a1137797d40f39f106cae62d1e252b46c7473b9434ad5f870ee88fb',
|
||||
985000: '3129c3bf20deace1a9c92646a9d769da7a07f18dcd5b7a7b1e8cf5fd5390f8e1',
|
||||
986000: '6ce830ca5da322ddbb97fc572ea03218913d070e5910516b33c6113b02b23c21',
|
||||
987000: '7fb1a8635623847132ab766a99b792953379f782d1115b9649f5f9c5a742ca04',
|
||||
988000: '5e8e6c6da7f271129c20c4dd891dcb1df4f9d690ee7cf391c6b7fbd028a0da4c',
|
||||
989000: '12919e34bb9a9ac1d2a01e221eb8c511117fc4e1b3ae15355d95caf4673bdb08',
|
||||
990000: '016f8b18227a0c09da55594a98638ad5b0fbb4896e2ab6163ac40b6015b2811e',
|
||||
991000: 'ddf8cd6e2f4ee07530ae7567cef4fa2c2fd4a655cb20e20422e66fd49bde6489',
|
||||
992000: 'dca77707c0caa3a9605f3dadf593402339c29448869907fb31f6c624e942dcbd',
|
||||
993000: 'de9acc4c7c482ecac741fd6acbbc3a333afab52f3fe5eea4130c0770299a56dd',
|
||||
994000: '54420631f8a801a1b8f391088f599ee22cedc06f24bf67f18272feb8fe70c682',
|
||||
995000: '4b44b26e3e2495716dfd86fc42594cd4b1e4b70bdab4f0905cce4cb9556e008a',
|
||||
996000: 'd6e41fd301fc5f519c343ceb39c9ff845656a4482e4e182abdcd3963fd5fde1c',
|
||||
997000: 'd68b6a509d742b182ffb5a98b0e585a2320a5d3fe6977ad3e6cd06835ef2ea55',
|
||||
998000: '1efcdcbadbec54ce3a93a1857253614536c34f05a0b1924f24bff194dc3392e1',
|
||||
999000: '10a7713e46f47527f3819b4a9257a03f3e207d18e4917d6bcb43fdea3ba82b9a',
|
||||
1000000: '1b4ddb1436df05f07807d6337b93ee1aa8b600fd6a910a8fd5313a39e0440eec',
|
||||
1001000: 'cde0df1abdae26d2c2bdc111be15fb33231c5e167bb8b8f8eec667d71379fee4',
|
||||
1002000: 'd7ce7a96a3ca73a4dfd6a1780e23f834f339142519ea7f45d256c113e27e4857',
|
||||
1003000: 'b1a9b1c562ec62b9dd746d336b4211afc37482d0274ff692a44fa17ac9fe9a28',
|
||||
1004000: '7afd6d0fb0014fbe16a31c84d3f1731736eaeef35e40bb1a1f232fb00345deae',
|
||||
1005000: '4af61ce4cda5de58277f7a67cadea5d3f6ce56e54785b188e32306e00b0414df',
|
||||
1006000: '08e1fb7295efd4a48cb999d899a3d481b682ddbce738fecd88a6d32cbe8234f0',
|
||||
1007000: '14a367a41603dd690541daee8aa4a2882260059e3f85bd8978b7431e8f7db844',
|
||||
1008000: 'e673230e62aaefad0678611f94ff35ee8a6e18eb96438bdfb4b614f54f54dba7',
|
||||
1009000: 'e191af8fb71d0d91419abd19443af3d3f23ee4fe359bb8c390429cc838132bde',
|
||||
1010000: 'ffdba58f184cf60838b75b7899b6633e7cfd34cf36eded572c0133d07387bc49',
|
||||
1011000: '40801af3a5546cb9d53e05e21b74be09de9a421b762ca1d52d2266f5c2055ce8',
|
||||
1012000: '552519acebed0e38102f5270dc60b1da7a123600b6b94169ae74462ae454693f',
|
||||
1013000: '1eee96f48418929927eaa9642777bc806d326cfffaf077bc8695a7ecd438d631',
|
||||
1014000: 'a471093e1de2a8db586412d7351c8d88e44ea890f46e9b43251af427a0a4a879',
|
||||
1015000: '57532f5a522295cc139f008bdcb7a1e6d02e6035d5221b2687c7c216f06297a2',
|
||||
1016000: 'ec46dba07addcb6e62f58456a53c513d876f1c49ae7d76d230adb8debd26027d',
|
||||
1017000: '33ea8d25f342a7465ed71e4bab2b91007991e0994c61d321e3625301a1390322',
|
||||
1018000: '4871c03cc95d4ce0a39bd2cebbb001b2ea1cce1b3561bb841d88f43bb9d12ffd',
|
||||
1019000: 'f5248257576eb2ff4139d6374cc7ce34121cc942598cf9e04d2bd572e09189bb',
|
||||
1020000: 'e7785286897c85cfb0276957bff216039eeb11bc1ebca89d0bb586022caa5750',
|
||||
1021000: 'a30220f17d060634c5f6a1ddc5ea34b01c18fb5eb7e0e8267b66bf5a49525627',
|
||||
1022000: '6083ea49e64ac0d4507c674237cf87d30b90b285ec63d082e626df0223eb7c9c',
|
||||
1023000: '1dc5596d716bc33ee0f56fc40c1f073155a58a7692935c9e5854ef3b65b76828',
|
||||
1024000: '065adfee40dc33abff07fb55339571712b959bc1830dc60b6691e36eab1508ae',
|
||||
1025000: 'bb6903752d31278570e774b80a80782179c78f099e58c3dc4cba7afea7a471c4',
|
||||
1026000: 'f3050f3c2f3a76f5084856b0f089383517caa3f51530fbc29335308f5f170625',
|
||||
1027000: '746ed3701510d07958d11a06f22dbb839d9858373dc5a33249dd69e91bab01fd',
|
||||
1028000: '43f7a96ea6a45b78c29ad4a2f8680ef184438c2bd3686172b0564e0ae6dd7ba1',
|
||||
1029000: 'cbb9916099c59e14fe61d284374f4feaa3d43afec59e4698ed92143576f24b34',
|
||||
1030000: '2e805fc2331e32e586ea692bc3d4e6b11e1ec3f1cab6e331b459f9f1ac9a1f1e',
|
||||
1031000: '04f324f8f6d4f9901cf65f78dc91d6010ea6cf125f5ac0253b57b5f1f79e81e0',
|
||||
1032000: '60ca62f52fdfd858b0ee0fdb380648bde85ca14e2a73565205ed4ee0bc861c77',
|
||||
1033000: 'eb60aac23d599d3099cf98ed8fc3213f1bc06bc1c677429b303e9c81f79f1340',
|
||||
1034000: 'f0328df2daf119ce673ddfa7a39a84576985f701f7a7dec3f56f58c2019ebd4d',
|
||||
1035000: 'f9d3cbce3854de168d8835c96917c01be6244c8f82641e8d9398dfffec4e7107',
|
||||
1036000: '7dca97e6e1d6ed70aa7805f74b768009a270e7ebe1dd951e8727d1d2f2d271f2',
|
||||
1037000: '5329504126b2845b3044f423b521e77ff58d7d242f24bf87c87f4d8d4e03a947',
|
||||
1038000: '5bad3ad55e3daa415f3182a1f2a099fe1767e8fae34e9bb95d47e242b8971434',
|
||||
1039000: 'c29729b8ba49ac0043fe4aa6fc971f8ac3eda68ff92970957ada39a2989b2491',
|
||||
1040000: 'f303aebfc9267600c081d0c021065743f93790df6f5c924a86b773788e0c45be',
|
||||
1041000: 'a1cbe5059fa2275707785b77970c36d79b12c1ba93121bc9064ab9b64abacf7b',
|
||||
1042000: '004b0dd4e438abc54ae832d733df32a6ba35b75e6d3e0c9c1dee5a7950507295',
|
||||
1043000: '31893a3fe7bb4f6dd546c7a8de4a65990e94046aab442d18c68b6bf6acd54518',
|
||||
1044000: '2c4dd479948acc42946f94050810000b0539864ad24a67a7251bff1c4971b035',
|
||||
1045000: '1cea782d60df35a88b30ae205ce37e30abc7cad2b22181722be150bd92c53814',
|
||||
1046000: 'ee808f0efb0f2ef93e8599d8b7f0e2e7c3cdc42353e4ea5165028b961f43d548',
|
||||
1047000: '75f057e2a8cb1d46e5c943d63cc56936a6bac8b1cb89300593845a20baf39765',
|
||||
1048000: '2abcd227f5314baed85e3c5b49d3888a60085c1845c955a8bf96aa3dd6394798',
|
||||
1049000: '5d0ec24b9acd5ab21b42f68e1f3142b7bf83433b98f2fa9794586c8eff45893e',
|
||||
1050000: '1d364b13a4c17bd67a6d1e5f77c26d02faa014d7cd152b4da70380f168b8e0ff',
|
||||
1051000: 'b9a20cec21de84433be9b85817dd4803e875d9275dbc02907b29888431859bae',
|
||||
1052000: '424cb56b00407d73b309b2081dd0bf89213cf024e3aafb3090506aa0ba10f835',
|
||||
1053000: '6df3041a32fafd6a4e08778546d077cf591e1a2a16e77fe7a610efc2b542a9ff',
|
||||
1054000: '78f8dee794f3d4366019339d7ba74ad2b543ecd25dc575620f66e1d535411971',
|
||||
1055000: '43b8e9dae5addd58a7cccf62ba57ab46ffdaa2dcd113cc8ca537e9101b54c096',
|
||||
1056000: '86b7f3741343f85d93410b78cc3fbf03d49b60a664e908703016aa56a206ae7e',
|
||||
1057000: 'b033cf6ec622be6a99dff536a2cf73b36d3c3f8c3835ee17e0dd357403e85c41',
|
||||
1058000: 'a65a6db692a8358e399a5ac3c818902fdb60595262ae05531084848febead249',
|
||||
1059000: 'f6d781d2e2fdb4b7b074d1d8123875d899cdbd6be375cb4288e86f1d14a929f6',
|
||||
1060000: 'cd9019bb1de4926cca16a7bef1a46786f10a3260d467cda0775f73361795abc9',
|
||||
1061000: 'ed4f5dc6f475f95b40595632fafd9e7e5eef388b6cc15772204c0b0e9ee4e542',
|
||||
1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80',
|
||||
1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465',
|
||||
1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6',
|
||||
1065000: '563188accc4a6e311bd5046516a92a233f11f891b2304d37f151c5a6002b6958',
|
||||
1066000: '333f1b4e996fac87e32dec667533715b31f1736b4342806a81d568b5c5238456',
|
||||
1067000: 'df59a0b7319d5269bdf55043d91ec62bbb30829bb7054da623717a394b6ed678',
|
||||
1068000: '06d8b674a205393edaf20c1d837baadc9caf0b0a675645246263cc163302241d',
|
||||
1069000: 'ac065c48fad1383039d39e23c8367bad7cf9a37e07a5294cd7b04af5827b9961',
|
||||
1070000: '90cd8b50f94208bc459081356474a961f6b764a1217f8fd291f5e4828081b730',
|
||||
1071000: '3c0aa207ba9eea45458ab4fa26d6a027862592adb9bcce30915816e777dc6cfc',
|
||||
1072000: '3d556c08f2300b67b704d3cbf46e22866e3ac164472b5930e2ada23b08475a0f',
|
||||
1073000: 'a39b5c54c24efe3066aa203358b96baea405cd59aac6b0b48930e77799b4dd7d',
|
||||
1074000: 'e8c8273d5a50a60e8744716c9f31496fb29eca87b4d68643f4ecd7ec4e400e23',
|
||||
1075000: 'b8043ae41a1d0d7d4310c85764fcba1424733df347ffc2e8cbda1fe6ccbb5153',
|
||||
1076000: '58468db1f91805e767d334824d6bffe54e0f900d1fb2a89b105086a493053b3d',
|
||||
1077000: '04a78749b58465efa3a56d1735cd082c1f0f796e26486c7136950dbaf6effaa4',
|
||||
1078000: 'e1dd6b58c75b01a67d4a4594dc7b4b2ee9e7d7fa7b25fd6246ce0e86eff33c75',
|
||||
1079000: 'd239af017a6bb664485b14ad15e0eb703775e43018a045a8612b3697794460da',
|
||||
1080000: '29ae5503f8c1249fefeb63fd967a71a70588ee0db1c97497e16366163a684341',
|
||||
1081000: '05103ab27469e0859cbcd3daf42faa2bae798f522534697c7f2b34f7a050ee0f',
|
||||
1082000: '4553d2cb7e90b6db11d242e287fe96822e6cd60e6388b94bf9006411f202ba03',
|
||||
1083000: '97995acd178b2a142d571d5ae1c2a3deaf93a909fd91fb9c541d57f73e32dc99',
|
||||
1084000: '9e3f23376af14d76ab24cd54e321dec019af73ad61067d959ff90043acc5ffcc',
|
||||
1085000: '81c056b14f13cee0d6d6c8079fdd5a1a84c3a5c76cc9448612e8ef6d3531300e',
|
||||
1086000: '8a0004f6809bdd075915a804e43991dfe8f22e05679d2fdaf8e373f101bac5c2',
|
||||
1087000: '27c45a4c9ad24e038f2ebe40835a1c49ac7221d7185082866ee354351ba87c7a',
|
||||
1088000: 'fd27e21747117b00b4ada1cba161ac49edb57cca540f86ac5ba885050f08f824',
|
||||
1089000: 'bff867335767103bc3ed15ede5b9fde88016f8ede15dc5bf3e81ea40dcfc61ae',
|
||||
1090000: '608f75016d1db08888dd59640f63e838c19bdfa833c0cc177ad3d2b818b0db5b',
|
||||
1091000: '90750b452bd4dedaab6b57fecbfe88f71ce3d5437fad7f9ec0fdd270445c7526',
|
||||
1092000: '98287b39f9f1233017dc5d932e5c77f0521ca84587eb3f39f0e7b6c297c749af',
|
||||
1093000: '68a5846ed05c9bb142197849106838765f90f15c10b2cc938eef49b95eaa9d33',
|
||||
1094000: '5660a1aac2fc763a417fc656c8887fc8186bf613ae1ccbb1a664fb43ce1fa1d6',
|
||||
1095000: '62bad3db418b3f4cad3596881b645b72479c71deb0d39c7a4c8bd1577dc225fd',
|
||||
1096000: 'e0e4b2b183591f10dd5614c289412f2fb5e320b7d3278f7c028f42f591872666',
|
||||
1097000: 'a233a233fc2aa5dab9e75106d91388343ef969458ea974f1409a2ab5fc441911',
|
||||
1098000: '16dfa5fa6cbd1188e562697b5f00ac206960d0851ed84adf37ae975fd5ffdd6a',
|
||||
1099000: 'b8a870b7dc6d3263730c00f59d52aa6cce35dc59aa8fba715034cc2d14927260',
|
||||
1100000: 'a3cd7749743da22a3846dcc2edbf1df21b938e829419389e3bc09284797c5b43',
|
||||
1101000: '75b14c2a95e2a095949729b7c0b624bd725a2de98404a8e3247b60c977d0198e',
|
||||
1102000: '4d3af64d37064dd5f57e25d61f248a1e21c1b1cadd7bb1404e35c9fbe06f1fd4',
|
||||
1103000: 'd73c92bfed358dfcd7659228974ab75ea2fc86f2301ee47133adad8075203872',
|
||||
1104000: '30cd82354f37bc0b412123867c7e1835206022a7501853bf8c0d3df02f291645',
|
||||
1105000: '1d2ef984f26693dce77460cd2694e5da46e675077e91a1cea26051733b01a7ef',
|
||||
1106000: '51c076c304222fe3ca308ba6968c46fef448f85be13a095cecb75b90e7954698',
|
||||
1107000: '99e2221339e16acc34c9816f2ef7b866c2dd753aa3cbe484ae831959a23ece68',
|
||||
1108000: '0f1227c250296bfe88eb7eb41703f99f633cfe02870816111e0cadfe778ddb19',
|
||||
1109000: 'b35447f1ad76f95bc4f5886e4028d33acb3ad7b5000dd15516d3f11ce4baa990',
|
||||
1110000: 'ac7baff996062bfaaaddd7d496b17e3ec1c8d34b2143095645ff22fb3888ae00',
|
||||
1111000: '430bbbdcca36b2d69b6a2dd8b07c583a060a467e5f9acbc6de62462e1f7c7036',
|
||||
1112000: 'e5274dea029dc44baff55c05b0555f91b74d29ffd40e3a8c4e2c5b57f9d40bef',
|
||||
1113000: 'cf43863249fa42cfe108220dd40169dac702b0dd9cf5cb699cf2fc96feda8371',
|
||||
1114000: 'fa1c0e551784d21c451564124d2d730e616724f3e535de3c186bcdeb47e80a8f',
|
||||
1115000: '49fe6ecee35a397b83b5a704e950ad028cfb4b7e7a524021e789f4acc0fd6ffe',
|
||||
1116000: '74ecded36751aa8b7901b31f0d16d75d111fc3c40b567f649c04f74ed028aa5c',
|
||||
1117000: 'd9ca760a22190bdf545766b47d963c738a4edcc27f4d15ca801b35751577cfa7',
|
||||
1118000: 'c28d42f871682800ac4e867608227cfb6bc4c00b618e83a8556f201a1c28813c',
|
||||
1119000: 'c5fafc4e1785b0b9e84bb052e392154a5ba1aefe612998017e90772bcd554e08',
|
||||
1120000: 'aa054d428bc9ccee0761da92163817163413065fe1e67ef79a056c5233ea3476',
|
||||
1121000: '0df295bb944218503bd1bf66d2ece0c50fd22dae3391b80673a7ad1e4e5c3934',
|
||||
1122000: 'a13abb350a26673b3933b1de307a60a6845ca594d502599548c6253e21a6d8e8',
|
||||
1123000: 'a4bc6a3abf9ed1f4b14338ff0f03f83456312bc91a93fa89ae6db493050115e1',
|
||||
1124000: '65869938df99adf0dda76200291ce09a54c9bcc787e4bb62cd72c367db58f4f0',
|
||||
1125000: 'ea5e918233b14c3c73d488a906e3741c61bdcafe0393bd0404168fe80c950a46',
|
||||
1126000: 'ce88cd35104fcec51bcee77302e03162dc694802536f5b668786b2245e61bca5',
|
||||
1127000: 'ea19c0c8d205be4be87d02c5301c9ed331e7d75e25b93d1c2137c248882af515',
|
||||
1128000: '006f32d63c2a3adcf4fbad0b0629c97f1beab6446a9c27fbde9472f2d066219e',
|
||||
1129000: '218e5392e1ecf471c3bbc3d79c24dee30ac8db315dbeb61317318efb3f221163',
|
||||
1130000: '30b9da0bd8364e9cd5551b2529341a01a3b7257a238d15b2560e2c99fdb324e8',
|
||||
1131000: '8a7f382cfa023d2eba6639443e67206f8883b57d23ce7e1339234b8bb3098a82',
|
||||
1132000: 'bf9af68a6fe2112d8fe311dfd52334ae2e7b0bac6675c9ebfddb1f386c212668',
|
||||
1133000: '1a30951e2be633502a47c255a93ddbb9ed231d6bb4c55a807c0e910b437766b3',
|
||||
1134000: 'a9bcaf3300b7915e701a8e396eb13f0c7287576323420be7aab3c3ba48020f76',
|
||||
1135000: '337eed9ed072b5ad862af2d3d651f1b49fa852abc590b7e1c2dc381b496f438a',
|
||||
1136000: '208761dbc29ec58302d722a05e937a3cf9e78bfb6495be395dd7b54f02e169dc',
|
||||
1137000: '4e5b67ff3324b64e268049fdc3d82982b847ee359d409ade6368864c38a111e5',
|
||||
1138000: '55d1d0833021a664e85eec8cc90a0985e67cc80d28841aaa8c2231ec28087ebb',
|
||||
1139000: 'e750ada1ec9fa0f2f2461ed68958c7d116a699a82ec12911da5563139f8df19e',
|
||||
1140000: '9cf81407b6ccc8046f0233f97484166945758f7392bb54841c912fcb34cf205c',
|
||||
1141000: 'fccf32b2fae03e3b6b562483776625f9843cd68734c55659e2069cde7e383170',
|
||||
1142000: 'c3608c215dd6569da6c1871c4d72a09ab1caa9663647f2a9454b5693d5d72a65',
|
||||
1143000: 'bd39cb8c4e529d15bbea6baeec66afe52ca18afe32bd812f28fbb0676647cdff',
|
||||
1144000: '6e42d02538565ce7e2d9bf31a304f1fd0ac122d35d17a030160575815901b0b1',
|
||||
1145000: 'b9722e1de2904ce1219140fffb1f4f9f5a041f885faa634404238d103c738b4c',
|
||||
1146000: 'd4de4271459966cee774f538a243d7db0689b213b296463d42e45c93194d7861',
|
||||
1147000: '51fadf109f22bb85574d0fbcbd0b20992983e89aee3d415a7b1c37c44775d9a9',
|
||||
1148000: '137e1fe8da31680d21a42e7421eb608a883a497314e4404625ce44b0edadde6a',
|
||||
1149000: 'cb87867eb04203ce15e0763a2f4389376cea75e0a2877f55e2911c575bef07a8',
|
||||
1150000: '977528ca7953a2c9c19fefaa3aab7ebdec3ac324d74a07d83764ba25d9be0689',
|
||||
1151000: 'a09c51c832600ded63a19201df008075273ea248fd406886e93a2cbaa3bba46b',
|
||||
1152000: '0e5367cfa0f00dd932a5bcc00dcc807fa6825161806bed588e16a57947b4b32d',
|
||||
1153000: '55a9de3dcde2efb56a3c5fea7d22b98c1e180db9a4d4f4f6be7aae1f1cbd7608',
|
||||
1154000: 'abc58cf71c4691ebfaef920252730cf69abbe9de88b424c03051b9b03e85d45a',
|
||||
1155000: '4f074ce73c8a096620b8a32498362eb66a072eae95d561f2d53557cd513ae785',
|
||||
1156000: '540a838a0f0a8834466b17dd456d35b8acae2ec8419f8bd9a704d9ea439062ac',
|
||||
1157000: 'd5310ac671abdb658ea028db86c23fc729af965f91d67a37218c1412cf32a1f5',
|
||||
1158000: '162d906a07e6c35e7c3ebf7069a200521605a97920f5b589d31b19bfd7766ee2',
|
||||
1159000: '600bd8f5e1e62219e220f4dcb650db5812e79956f95ae8a50e83126932685ee0',
|
||||
1160000: '91319398d1a805fac8582c8485e6d84e7490d6cfa6e44e2c630665b6bce0e6b8',
|
||||
1161000: 'f7ad3cff6ee76e1e3df4abe70c600e4af66e1df55bf7b03aee12251d4455a1d4',
|
||||
1162000: '85b9fbba669c2a4d3f85cdb5123f9538c05bd66172b7236d756703f99258454d',
|
||||
1163000: '966085d767d1e5e2e8baf8eda8c11472ec5351181c418b503585284009aaea79',
|
||||
1164000: '1c94e1b531215c019b12caf407296d8868481f49524b7180c7161b0363c1f789',
|
||||
1165000: '803b6bf93735aeae2cf607824e2adf0d754b58da2516c2da1e485c697e472143',
|
||||
1166000: '872561a82f7991633d0927d25cb659d096bbe556fe6dac7a0b6a679820733069',
|
||||
1167000: '6bd7cdd605a3179b54c8af88d1638bf8133fab12cbf0a78d37cf21eddf4395a1',
|
||||
1168000: '79946f5758c1817239cc642d27298bd710983551a8236e49832c6d818b097337',
|
||||
1169000: 'b0994c60728e74de4aa361f37fa85e5296ce3188ae4e0b66d7b34fe86a239c9c',
|
||||
1170000: 'a54188a5a64e0cf8da2406d16a0ac3983b087fc7d6231b6f8abf92cf11dc78cd',
|
||||
1171000: 'ec2924d98e470cc6359821e6468df2c15d60301861d443188730342581230ef2',
|
||||
1172000: 'b4ac11116aa73ce19428009a80e583e19dc9bcd380f7f7ce272a92921d5868d2',
|
||||
1173000: '501d3551f762999dd5a799f3c5658fff2a7f3aff0511488272cd7693fefb8f9d',
|
||||
1174000: '4660074ea48a78ae453cb14b694b2844cc0fb63ed9352ed20d11158bbb5c1f28',
|
||||
1175000: '0727f6b1d9f8fe5677a9ffa0d475f53f5a419ef90b80896c22c2c95de22175de',
|
||||
1176000: '150633d6a35496c24a93c9e19817e90f649c56b7e2558f99e97325bfd5df8b17',
|
||||
1177000: '0849e19f22571b62dba8ff02f6b5a064a7ac36e7ed491321b3663567e8e17294',
|
||||
1178000: '770dd463e7bad80f689f12934e4ae06e24378d1545dcf211fd143beaef49464e',
|
||||
1179000: '059d383dcc60a49b658b674d92fc35cab07b06329c58d73818b6387cb0c06534',
|
||||
1180000: 'e547cb3c636243ca9ae4cfb92c30a0f583eda84e329a5c1e5f64a26fc6fc791e',
|
||||
1181000: '4521a4396ab02f73d45d7a3393ea1c602d255778d52c12079c88bfbad32aab43',
|
||||
1182000: '051cfe993e4b0b34233403a9e8c397dd50e8b78a30fb07e9c260604ee9e624a9',
|
||||
1183000: '44a69c99bb8b85e84ae279f2d8e5400d51cb3d5f0bcd178db49d55548cd66191',
|
||||
1184000: '2a1d23c9bb3c71a533e0c9d25b03bfa7e9db8e014645f3e7fbede6d99fff0191',
|
||||
1185000: 'bb90d6c6d77819163a9e909ee621d874707cdb21c91b1d9e861b204cf37d0ffa',
|
||||
1186000: '4a92051b738ea0e28c64c64f1eb6f0405bc7c3427bef91ff20f4c43cf084d750',
|
||||
1187000: 'f782ac330ca20fb5d8a094ee0f0f8c086a76e3f03ecc6a2c42f8fd07e52e0f41',
|
||||
1188000: '94cb7b653dd3d838c186420158cf0e73db73ec28deaf67d9a2ca902caba4141a',
|
||||
1189000: 'c8128e59b9ec948de890184578a113478ea63f7d57cb75c2c8d5c001a5a724c0',
|
||||
1190000: '4da643bd35e5b98932ae21515a6bffb9c72f2cd8d514cd2d7eac1922af785c3f',
|
||||
1191000: '0f922d86658ac3f53c5f9db360c68ab3f3253a925f23e1323820e3384214719a',
|
||||
1192000: '4c3ab631cf5ba0c236f7c64af6f790fc24448319de6f75dbd28df4e2648d0b7d',
|
||||
1193000: 'eda118d1fac3470a1f8f01f5c78108c8ecdcd6420be30f6d20f1d1831e7b6975',
|
||||
1194000: '5723fff88abd9bb5088476fa5f4221a61c6f8a718703a92f13248ad350abeea2',
|
||||
1195000: '1715846f82d011919e3446c6ce675a65fb80338bd791d4e735702c4767d9adc4',
|
||||
1196000: 'b497667996aee2db61e88f442e728be15ab0b2b64cfd43198691fcf6cdafacc8',
|
||||
1197000: '309a6170d837b8cb334fb888a64ed4e47e6592747e93c8e9d1bf7d608cfef87d',
|
||||
1198000: '3ea918ef64a67dec20051519e6aefaeb7aca2d8583baca9ad5c5bd07073e513a',
|
||||
1199000: '4ec7b7361b0243e5b2996a16e3b27acd662126b95fe542a487c7030e47ea3667',
|
||||
1200000: 'b829c742686fcd642d0f9443336d7e2c4eab81667c90ce553df1350ed10b4233',
|
||||
1201000: '44c022887f1e126fd281b1cae26b2017fa6415a64b105762c87643204ce165a5',
|
||||
1202000: 'b11cc739eb28a14f4e47be125aa7e62d6d6f90c8f8014ee70044ed506d53d938',
|
||||
1203000: '997a7c5fd7a98b39c9ca0790519924d73c3567656b605c97a6fdb7b406c3c64d',
|
||||
1204000: '7d25d872e17195ee277243f7a5a39aa64d8750cec62e4777146acf61a8e76b04',
|
||||
1205000: 'ce8486ae745a4645bee081ef3291d9505174bed05b0668d963b2998b7643dbb0',
|
||||
1206000: '46a0bcea3c411c600dffe3e06e3d1dfbf5879a7ec4dcf3848e794cefcbf2bc0b',
|
||||
1207000: '37e6297bf6e4e2bdd40401d4d7f95e3e3bdafd4a7f76b9c52865cefc6b82b20b',
|
||||
1208000: 'd09e3982a9827b8cf56a5a2f4031dc6b082926c1fd57b63beaaa6cfd534eb902',
|
||||
1209000: '54ae9010a9f146c83464e7ee60b30d9dbee36418561abc4e8d61bce9baa2d21d',
|
||||
1210000: '5dcfd33f8e5ac21c9ba8553758b8cd8afae7961cad428530b5109c2db2ebf39f',
|
||||
1211000: '91c952348bb2c3dfac0d6531a3dac770ea6dab571af257530e9c55493c96bdd9',
|
||||
1212000: 'e62cc3fe044a7f5de4c04a8aed5619548f9d5c6fad9f989d3382cb96de1d780d',
|
||||
1213000: '66b46ffdca8acf1dd04528dadb28b6ac4ce38807c1b84abd685d4ddb3dc59a34',
|
||||
1214000: '2ce4091756ad23746bab4906f46545953cadaf61deae0d78e8a10d4eb51866b1',
|
||||
1215000: '83ce3ca087799cdc4b4c5e7cfeb4a127708724a7ca76aa5f7f4ec1ed48b5fca6',
|
||||
1216000: '7d07b739b7991fbd74926281bf51bba9d5721afab39598720f9ff5f7410a6721',
|
||||
1217000: '76adf49491670d0e8379058eacf0228f330f3c18955dfea1ebe43bc11ee065f3',
|
||||
1218000: '77f422e7301a81692dec69e5c6d35fa988a00a4d820ad0ebb1d595add36558cc',
|
||||
1219000: '8ba9d944f8c468c81799294aeea8dc05ed1bb90bb26552fcd190bd88fedcddf2',
|
||||
1220000: '00330367c255e0fe51b374597995c53353bc5700ad7d603cbd4197141933fe9c',
|
||||
1221000: '3ba8b316b7964f31fdf628ed869a6fd023680cca6611257a31efe22e4d17e578',
|
||||
1222000: '016e58d3fb6a29a3f9281789359460e776e9feb2f0db500482b6e231e1272aef',
|
||||
1223000: 'fdfe767c29a3de7acd913b627d1e5fa887a1af9974f6a8a6474db822468c785c',
|
||||
1224000: '92239f6207bff3689c554e92b24fe2e7be4a2203104ad8ef08b2c6bedd9aeccf',
|
||||
1225000: '9a2f2dd9527b533d3d743efc55236e73e15192171bc8d0cd910918d1ab00aef7',
|
||||
1226000: 'eb8269c75b8c5f66e6ea88ad70883dddcf8a75a45198ca7a46eb0ec606a791bb',
|
||||
1227000: '5c82e624390cd57942dc9d64344eaa3d8991e0437e01802473053245b706290c',
|
||||
1228000: '51e9a7d727f07fc01be7c03e3dd854eb666697f05bf89259baac628520d4402c',
|
||||
1229000: 'c4bfdb651c9abdeda717fb9c8a4c8a6c9c0f78c13d3e6cae3f24f504d734c643',
|
||||
1230000: '9f1ce781d16f2334567cbfb22fff42c14d2b9290cc2883746f435a1fb127021d',
|
||||
1231000: '5c996634b377412ae0a3d8f541f3cc4a354aab72c198aa23a5cfc2678cbabf09',
|
||||
1232000: '86702316a2d1730fbae01a08f36fffe5bf6d3ebb7d76b35a1617713766698b46',
|
||||
1233000: 'fb16b63916c0287cb9b01d0c5aad626ced1b73c49a374c9009703aa90fd27a82',
|
||||
1234000: '7c6f7904602ccd86bfb05cb8d6b5547c989c57cb2e214e93f1220fa4fe29bcb0',
|
||||
1235000: '898b0f20811f52aa5a6bd0c35eff86fca3fbe3b066e423644fa77b2e269d9513',
|
||||
1236000: '39128910ef624b6a8bbd390a311b5587c0991cda834eed996d814fe410cac352',
|
||||
1237000: 'a0709afeedb64af4168ce8cf3dbda667a248df8e91da96acb2333686a2b89325',
|
||||
1238000: 'e00075e7ba8c18cc277bfc5115ae6ff6b9678e6e99efd6e45f549ef8a3981a3d',
|
||||
1239000: '3fba891600738f2d37e279209d52bbe6dc7ce005eeed62048247c96f370e7cd5',
|
||||
1240000: 'def9bf1bec9325db90bb070f532972cfdd74e814c2b5e74a4d5a7c09a963a5f1',
|
||||
1241000: '6a5d187e32bc189ac786959e1fe846031b97ae1ce202c22e1bdb1d2a963005fd',
|
||||
1242000: 'a74d7c0b104eaf76c53a3a31ce51b75bbd8e05b5e84c31f593f505a13d83634c',
|
||||
}
|
||||
|
|
|
@ -141,7 +141,7 @@ class CoinSelector:
|
|||
_) -> List[OutputEffectiveAmountEstimator]:
|
||||
""" Accumulate UTXOs at random until there is enough to cover the target. """
|
||||
target = self.target + self.cost_of_change
|
||||
self.random.shuffle(txos, self.random.random)
|
||||
self.random.shuffle(txos, random=self.random.random) # pylint: disable=deprecated-argument
|
||||
selection = []
|
||||
amount = 0
|
||||
for coin in txos:
|
||||
|
|
|
@ -2,6 +2,7 @@ NULL_HASH32 = b'\x00'*32
|
|||
|
||||
CENT = 1000000
|
||||
COIN = 100*CENT
|
||||
DUST = 1000
|
||||
|
||||
TIMEOUT = 30.0
|
||||
|
||||
|
|
|
@ -9,10 +9,11 @@ from dataclasses import dataclass
|
|||
from contextvars import ContextVar
|
||||
from typing import Tuple, List, Union, Callable, Any, Awaitable, Iterable, Dict, Optional
|
||||
from datetime import date
|
||||
|
||||
from prometheus_client import Gauge, Counter, Histogram
|
||||
from lbry.utils import LockWithMetrics
|
||||
|
||||
from .bip32 import PubKey
|
||||
from .bip32 import PublicKey
|
||||
from .transaction import Transaction, Output, OutputScript, TXRefImmutable, Input
|
||||
from .constants import TXO_TYPES, CLAIM_TYPES
|
||||
from .util import date_to_julian_day
|
||||
|
@ -975,7 +976,9 @@ class Database(SQLiteMixin):
|
|||
sql.append("LEFT JOIN txi ON (txi.position=0 AND txi.txid=txo.txid)")
|
||||
return await self.db.execute_fetchall(*query(' '.join(sql), **constraints), read_only=read_only)
|
||||
|
||||
async def get_txos(self, wallet=None, no_tx=False, no_channel_info=False, read_only=False, **constraints):
|
||||
async def get_txos(
|
||||
self, wallet=None, no_tx=False, no_channel_info=False, read_only=False, **constraints
|
||||
) -> List[Output]:
|
||||
include_is_spent = constraints.get('include_is_spent', False)
|
||||
include_is_my_input = constraints.get('include_is_my_input', False)
|
||||
include_is_my_output = constraints.pop('include_is_my_output', False)
|
||||
|
@ -1201,13 +1204,14 @@ class Database(SQLiteMixin):
|
|||
addresses = await self.select_addresses(', '.join(cols), read_only=read_only, **constraints)
|
||||
if 'pubkey' in cols:
|
||||
for address in addresses:
|
||||
address['pubkey'] = PubKey(
|
||||
address['pubkey'] = PublicKey(
|
||||
self.ledger, address.pop('pubkey'), address.pop('chain_code'),
|
||||
address.pop('n'), address.pop('depth')
|
||||
)
|
||||
return addresses
|
||||
|
||||
async def get_address_count(self, cols=None, read_only=False, **constraints):
|
||||
self._clean_txo_constraints_for_aggregation(constraints)
|
||||
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
|
||||
return count[0]['total'] or 0
|
||||
|
||||
|
@ -1241,6 +1245,18 @@ class Database(SQLiteMixin):
|
|||
async def set_address_history(self, address, history):
|
||||
await self._set_address_history(address, history)
|
||||
|
||||
async def is_channel_key_used(self, account, key: PublicKey):
|
||||
channels = await self.get_txos(
|
||||
accounts=[account], txo_type=TXO_TYPES['channel'],
|
||||
no_tx=True, no_channel_info=True
|
||||
)
|
||||
other_key_bytes = key.pubkey_bytes
|
||||
for channel in channels:
|
||||
claim = channel.can_decode_claim
|
||||
if claim and claim.channel.public_key_bytes == other_key_bytes:
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def constrain_purchases(constraints):
|
||||
accounts = constraints.pop('accounts', None)
|
||||
|
|
|
@ -16,18 +16,18 @@ from lbry.crypto.hash import hash160, double_sha256, sha256
|
|||
from lbry.crypto.base58 import Base58
|
||||
from lbry.utils import LRUCacheWithMetrics
|
||||
|
||||
from .tasks import TaskGroup
|
||||
from .database import Database
|
||||
from .stream import StreamController
|
||||
from .dewies import dewies_to_lbc
|
||||
from .account import Account, AddressManager, SingleKey
|
||||
from .network import Network
|
||||
from .transaction import Transaction, Output
|
||||
from .header import Headers, UnvalidatedHeaders
|
||||
from .checkpoints import HASHES
|
||||
from .constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
|
||||
from .bip32 import PubKey, PrivateKey
|
||||
from .coinselection import CoinSelector
|
||||
from lbry.wallet.tasks import TaskGroup
|
||||
from lbry.wallet.database import Database
|
||||
from lbry.wallet.stream import StreamController
|
||||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.wallet.account import Account, AddressManager, SingleKey
|
||||
from lbry.wallet.network import Network
|
||||
from lbry.wallet.transaction import Transaction, Output
|
||||
from lbry.wallet.header import Headers, UnvalidatedHeaders
|
||||
from lbry.wallet.checkpoints import HASHES
|
||||
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
|
||||
from lbry.wallet.bip32 import PublicKey, PrivateKey
|
||||
from lbry.wallet.coinselection import CoinSelector
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -106,7 +106,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
target_timespan = 150
|
||||
|
||||
default_fee_per_byte = 50
|
||||
default_fee_per_name_char = 200000
|
||||
default_fee_per_name_char = 0
|
||||
|
||||
checkpoints = HASHES
|
||||
|
||||
|
@ -226,7 +226,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
return account.get_private_key(address_info['chain'], address_info['pubkey'].n)
|
||||
return None
|
||||
|
||||
async def get_public_key_for_address(self, wallet, address) -> Optional[PubKey]:
|
||||
async def get_public_key_for_address(self, wallet, address) -> Optional[PublicKey]:
|
||||
match = await self._get_account_and_address_info_for_address(wallet, address)
|
||||
if match:
|
||||
_, address_info = match
|
||||
|
@ -329,10 +329,10 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
async def start(self):
|
||||
if not os.path.exists(self.path):
|
||||
os.mkdir(self.path)
|
||||
await asyncio.wait([
|
||||
await asyncio.wait(map(asyncio.create_task, [
|
||||
self.db.open(),
|
||||
self.headers.open()
|
||||
])
|
||||
]))
|
||||
fully_synced = self.on_ready.first
|
||||
asyncio.create_task(self.network.start())
|
||||
await self.network.on_connected.first
|
||||
|
@ -365,6 +365,10 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
await self.db.close()
|
||||
await self.headers.close()
|
||||
|
||||
async def tasks_are_done(self):
|
||||
await self._update_tasks.done.wait()
|
||||
await self._other_tasks.done.wait()
|
||||
|
||||
@property
|
||||
def local_height_including_downloaded_height(self):
|
||||
return max(self.headers.height, self._download_height)
|
||||
|
@ -462,14 +466,15 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
async def subscribe_accounts(self):
|
||||
if self.network.is_connected and self.accounts:
|
||||
log.info("Subscribe to %i accounts", len(self.accounts))
|
||||
await asyncio.wait([
|
||||
await asyncio.wait(map(asyncio.create_task, [
|
||||
self.subscribe_account(a) for a in self.accounts
|
||||
])
|
||||
]))
|
||||
|
||||
async def subscribe_account(self, account: Account):
|
||||
for address_manager in account.address_managers.values():
|
||||
await self.subscribe_addresses(address_manager, await address_manager.get_addresses())
|
||||
await account.ensure_address_gap()
|
||||
await account.deterministic_channel_keys.ensure_cache_primed()
|
||||
|
||||
async def unsubscribe_account(self, account: Account):
|
||||
for address in await account.get_addresses():
|
||||
|
@ -550,13 +555,14 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
)
|
||||
remote_history_txids = {txid for txid, _ in remote_history}
|
||||
async for tx in self.request_synced_transactions(to_request, remote_history_txids, address):
|
||||
self.maybe_has_channel_key(tx)
|
||||
pending_synced_history[tx_indexes[tx.id]] = f"{tx.id}:{tx.height}:"
|
||||
if len(pending_synced_history) % 100 == 0:
|
||||
log.info("Syncing address %s: %d/%d", address, len(pending_synced_history), len(to_request))
|
||||
log.info("Sync finished for address %s: %d/%d", address, len(pending_synced_history), len(to_request))
|
||||
|
||||
assert len(pending_synced_history) == len(remote_history), \
|
||||
f"{len(pending_synced_history)} vs {len(remote_history)}"
|
||||
f"{len(pending_synced_history)} vs {len(remote_history)} for {address}"
|
||||
synced_history = ""
|
||||
for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())):
|
||||
assert i == remote_i, f"{i} vs {remote_i}"
|
||||
|
@ -617,6 +623,12 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
tx.is_verified = merkle_root == header['merkle_root']
|
||||
return tx
|
||||
|
||||
def maybe_has_channel_key(self, tx):
|
||||
for txo in tx._outputs:
|
||||
if txo.can_decode_claim and txo.claim.is_channel:
|
||||
for account in self.accounts:
|
||||
account.deterministic_channel_keys.maybe_generate_deterministic_key_for_channel(txo)
|
||||
|
||||
async def request_transactions(self, to_request: Tuple[Tuple[str, int], ...], cached=False):
|
||||
batches = [[]]
|
||||
remote_heights = {}
|
||||
|
@ -710,6 +722,15 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
return account.address_managers[details['chain']]
|
||||
return None
|
||||
|
||||
async def broadcast_or_release(self, tx, blocking=False):
|
||||
try:
|
||||
await self.broadcast(tx)
|
||||
except:
|
||||
await self.release_tx(tx)
|
||||
raise
|
||||
if blocking:
|
||||
await self.wait(tx, timeout=None)
|
||||
|
||||
def broadcast(self, tx):
|
||||
# broadcast can't be a retriable call yet
|
||||
return self.network.broadcast(hexlify(tx.raw).decode())
|
||||
|
@ -731,7 +752,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
while timeout and (int(time.perf_counter()) - start) <= timeout:
|
||||
if await self._wait_round(tx, height, addresses):
|
||||
return
|
||||
raise asyncio.TimeoutError('Timed out waiting for transaction.')
|
||||
raise asyncio.TimeoutError(f'Timed out waiting for transaction. {tx.id}')
|
||||
|
||||
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
|
||||
records = await self.db.get_addresses(address__in=addresses)
|
||||
|
@ -768,13 +789,9 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
include_is_my_output=False,
|
||||
include_sent_supports=False,
|
||||
include_sent_tips=False,
|
||||
include_received_tips=False,
|
||||
hub_server=False) -> Tuple[List[Output], dict, int, int]:
|
||||
include_received_tips=False) -> Tuple[List[Output], dict, int, int]:
|
||||
encoded_outputs = await query
|
||||
if hub_server:
|
||||
outputs = Outputs.from_grpc(encoded_outputs)
|
||||
else:
|
||||
outputs = Outputs.from_base64(encoded_outputs or b'') # TODO: why is the server returning None?
|
||||
outputs = Outputs.from_base64(encoded_outputs or '') # TODO: why is the server returning None?
|
||||
txs: List[Transaction] = []
|
||||
if len(outputs.txs) > 0:
|
||||
async for tx in self.request_transactions(tuple(outputs.txs), cached=True):
|
||||
|
@ -850,13 +867,10 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
txo.received_tips = tips
|
||||
return txos, blocked, outputs.offset, outputs.total
|
||||
|
||||
async def resolve(self, accounts, urls, new_sdk_server=None, **kwargs):
|
||||
async def resolve(self, accounts, urls, **kwargs):
|
||||
txos = []
|
||||
urls_copy = list(urls)
|
||||
if new_sdk_server:
|
||||
resolve = partial(self.network.new_resolve, new_sdk_server)
|
||||
else:
|
||||
resolve = partial(self.network.retriable_call, self.network.resolve)
|
||||
resolve = partial(self.network.retriable_call, self.network.resolve)
|
||||
while urls_copy:
|
||||
batch, urls_copy = urls_copy[:100], urls_copy[100:]
|
||||
txos.extend(
|
||||
|
@ -881,22 +895,31 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
return await self.network.sum_supports(new_sdk_server, **kwargs)
|
||||
|
||||
async def claim_search(
|
||||
self, accounts, include_purchase_receipt=False, include_is_my_output=False,
|
||||
new_sdk_server=None, **kwargs) -> Tuple[List[Output], dict, int, int]:
|
||||
if new_sdk_server:
|
||||
claim_search = partial(self.network.new_claim_search, new_sdk_server)
|
||||
else:
|
||||
claim_search = self.network.claim_search
|
||||
self, accounts,
|
||||
include_purchase_receipt=False,
|
||||
include_is_my_output=False,
|
||||
**kwargs) -> Tuple[List[Output], dict, int, int]:
|
||||
return await self._inflate_outputs(
|
||||
claim_search(**kwargs), accounts,
|
||||
self.network.claim_search(**kwargs), accounts,
|
||||
include_purchase_receipt=include_purchase_receipt,
|
||||
include_is_my_output=include_is_my_output,
|
||||
hub_server=new_sdk_server is not None
|
||||
include_is_my_output=include_is_my_output
|
||||
)
|
||||
|
||||
async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
|
||||
for claim in (await self.claim_search(accounts, claim_id=claim_id, **kwargs))[0]:
|
||||
return claim
|
||||
# async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
|
||||
# return await self.network.get_claim_by_id(claim_id)
|
||||
|
||||
async def get_claim_by_claim_id(self, claim_id, accounts=None, include_purchase_receipt=False,
|
||||
include_is_my_output=False):
|
||||
accounts = accounts or []
|
||||
# return await self.network.get_claim_by_id(claim_id)
|
||||
inflated = await self._inflate_outputs(
|
||||
self.network.get_claim_by_id(claim_id), accounts,
|
||||
include_purchase_receipt=include_purchase_receipt,
|
||||
include_is_my_output=include_is_my_output,
|
||||
)
|
||||
txos = inflated[0]
|
||||
if txos:
|
||||
return txos[0]
|
||||
|
||||
async def _report_state(self):
|
||||
try:
|
||||
|
@ -915,9 +938,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
"%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ",
|
||||
account.id, balance, total_receiving, account.receiving.gap, total_change,
|
||||
account.change.gap, channel_count, len(account.channel_keys), claim_count)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
except Exception:
|
||||
log.exception(
|
||||
'Failed to display wallet state, please file issue '
|
||||
'for this bug along with the traceback you see below:')
|
||||
|
@ -940,9 +961,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
claim_ids = [p.purchased_claim_id for p in purchases]
|
||||
try:
|
||||
resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
except Exception:
|
||||
log.exception("Resolve failed while looking up purchased claim ids:")
|
||||
resolved = []
|
||||
lookup = {claim.claim_id: claim for claim in resolved}
|
||||
|
@ -1022,9 +1041,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset]
|
||||
try:
|
||||
resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
except Exception:
|
||||
log.exception("Resolve failed while looking up collection claim ids:")
|
||||
return []
|
||||
claims = []
|
||||
|
|
|
@ -3,7 +3,6 @@ import json
|
|||
import typing
|
||||
import logging
|
||||
import asyncio
|
||||
from distutils.util import strtobool
|
||||
|
||||
from binascii import unhexlify
|
||||
from decimal import Decimal
|
||||
|
@ -12,13 +11,13 @@ from typing import List, Type, MutableSequence, MutableMapping, Optional
|
|||
from lbry.error import KeyFeeAboveMaxAllowedError, WalletNotLoadedError
|
||||
from lbry.conf import Config, NOT_SET
|
||||
|
||||
from .dewies import dewies_to_lbc
|
||||
from .account import Account
|
||||
from .ledger import Ledger, LedgerRegistry
|
||||
from .transaction import Transaction, Output
|
||||
from .database import Database
|
||||
from .wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
|
||||
from .rpc.jsonrpc import CodeMessageError
|
||||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.wallet.account import Account
|
||||
from lbry.wallet.ledger import Ledger, LedgerRegistry
|
||||
from lbry.wallet.transaction import Transaction, Output
|
||||
from lbry.wallet.database import Database
|
||||
from lbry.wallet.wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
|
||||
from lbry.wallet.rpc.jsonrpc import CodeMessageError
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||
|
@ -183,7 +182,6 @@ class WalletManager:
|
|||
}[config.blockchain_name]
|
||||
|
||||
ledger_config = {
|
||||
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
|
||||
'auto_connect': True,
|
||||
'explicit_servers': [],
|
||||
'hub_timeout': config.hub_timeout,
|
||||
|
@ -194,6 +192,8 @@ class WalletManager:
|
|||
'data_path': config.wallet_dir,
|
||||
'tx_cache_size': config.transaction_cache_size
|
||||
}
|
||||
if 'LBRY_FEE_PER_NAME_CHAR' in os.environ:
|
||||
ledger_config['fee_per_name_char'] = int(os.environ.get('LBRY_FEE_PER_NAME_CHAR'))
|
||||
|
||||
wallets_directory = os.path.join(config.wallet_dir, 'wallets')
|
||||
if not os.path.exists(wallets_directory):
|
||||
|
@ -236,7 +236,6 @@ class WalletManager:
|
|||
|
||||
async def reset(self):
|
||||
self.ledger.config = {
|
||||
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
|
||||
'auto_connect': True,
|
||||
'explicit_servers': [],
|
||||
'default_servers': Config.lbryum_servers.default,
|
||||
|
@ -318,10 +317,4 @@ class WalletManager:
|
|||
)
|
||||
|
||||
async def broadcast_or_release(self, tx, blocking=False):
|
||||
try:
|
||||
await self.ledger.broadcast(tx)
|
||||
except:
|
||||
await self.ledger.release_tx(tx)
|
||||
raise
|
||||
if blocking:
|
||||
await self.ledger.wait(tx, timeout=None)
|
||||
await self.ledger.broadcast_or_release(tx, blocking=blocking)
|
||||
|
|
|
@ -7,16 +7,13 @@ from time import perf_counter
|
|||
from collections import defaultdict
|
||||
from typing import Dict, Optional, Tuple
|
||||
import aiohttp
|
||||
import grpc
|
||||
from lbry.schema.types.v2 import hub_pb2_grpc
|
||||
from lbry.schema.types.v2.hub_pb2 import SearchRequest
|
||||
|
||||
from lbry import __version__
|
||||
from lbry.utils import resolve_host
|
||||
from lbry.error import IncompatibleWalletServerError
|
||||
from lbry.wallet.rpc import RPCSession as BaseClientSession, Connector, RPCError, ProtocolError
|
||||
from lbry.wallet.stream import StreamController
|
||||
from lbry.wallet.server.udp import SPVStatusClientProtocol, SPVPong
|
||||
from lbry.wallet.udp import SPVStatusClientProtocol, SPVPong
|
||||
from lbry.conf import KnownHubsList
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -120,9 +117,9 @@ class ClientSession(BaseClientSession):
|
|||
)
|
||||
else:
|
||||
await asyncio.sleep(max(0, max_idle - (now - self.last_send)))
|
||||
except Exception as err:
|
||||
except (Exception, asyncio.CancelledError) as err:
|
||||
if isinstance(err, asyncio.CancelledError):
|
||||
log.warning("closing connection to %s:%i", *self.server)
|
||||
log.info("closing connection to %s:%i", *self.server)
|
||||
else:
|
||||
log.exception("lost connection to spv")
|
||||
finally:
|
||||
|
@ -140,7 +137,7 @@ class ClientSession(BaseClientSession):
|
|||
controller.add(request.args)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
log.warning("Connection lost: %s:%d", *self.server)
|
||||
log.debug("Connection lost: %s:%d", *self.server)
|
||||
super().connection_lost(exc)
|
||||
self.response_time = None
|
||||
self.connection_latency = None
|
||||
|
@ -217,7 +214,7 @@ class Network:
|
|||
def loop_task_done_callback(f):
|
||||
try:
|
||||
f.result()
|
||||
except Exception:
|
||||
except (Exception, asyncio.CancelledError):
|
||||
if self.running:
|
||||
log.exception("wallet server connection loop crashed")
|
||||
|
||||
|
@ -238,7 +235,7 @@ class Network:
|
|||
log.exception("error looking up dns for spv server %s:%i", server, port)
|
||||
|
||||
# accumulate the dns results
|
||||
if self.config['explicit_servers']:
|
||||
if self.config.get('explicit_servers', []):
|
||||
hubs = self.config['explicit_servers']
|
||||
elif self.known_hubs:
|
||||
hubs = self.known_hubs
|
||||
|
@ -254,7 +251,7 @@ class Network:
|
|||
sent_ping_timestamps = {}
|
||||
_, ip_to_hostnames = await self.resolve_spv_dns()
|
||||
n = len(ip_to_hostnames)
|
||||
log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config['explicit_servers']))
|
||||
log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config.get('explicit_servers', [])))
|
||||
pongs = {}
|
||||
known_hubs = self.known_hubs
|
||||
try:
|
||||
|
@ -299,11 +296,11 @@ class Network:
|
|||
if (pong is not None and self.jurisdiction is not None) and \
|
||||
(pong.country_name != self.jurisdiction):
|
||||
continue
|
||||
client = ClientSession(network=self, server=(host, port), timeout=self.config['hub_timeout'],
|
||||
concurrency=self.config['concurrent_hub_requests'])
|
||||
client = ClientSession(network=self, server=(host, port), timeout=self.config.get('hub_timeout', 30),
|
||||
concurrency=self.config.get('concurrent_hub_requests', 30))
|
||||
try:
|
||||
await client.create_connection()
|
||||
log.warning("Connected to spv server %s:%i", host, port)
|
||||
log.info("Connected to spv server %s:%i", host, port)
|
||||
await client.ensure_server_version()
|
||||
return client
|
||||
except (asyncio.TimeoutError, ConnectionError, OSError, IncompatibleWalletServerError, RPCError):
|
||||
|
@ -315,7 +312,8 @@ class Network:
|
|||
sleep_delay = 30
|
||||
while self.running:
|
||||
await asyncio.wait(
|
||||
[asyncio.sleep(30), self._urgent_need_reconnect.wait()], return_when=asyncio.FIRST_COMPLETED
|
||||
map(asyncio.create_task, [asyncio.sleep(30), self._urgent_need_reconnect.wait()]),
|
||||
return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
if self._urgent_need_reconnect.is_set():
|
||||
sleep_delay = 30
|
||||
|
@ -341,14 +339,13 @@ class Network:
|
|||
try:
|
||||
if not self._urgent_need_reconnect.is_set():
|
||||
await asyncio.wait(
|
||||
[self._keepalive_task, self._urgent_need_reconnect.wait()],
|
||||
[self._keepalive_task, asyncio.create_task(self._urgent_need_reconnect.wait())],
|
||||
return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
else:
|
||||
await self._keepalive_task
|
||||
if self._urgent_need_reconnect.is_set():
|
||||
log.warning("urgent reconnect needed")
|
||||
self._urgent_need_reconnect.clear()
|
||||
if self._keepalive_task and not self._keepalive_task.done():
|
||||
self._keepalive_task.cancel()
|
||||
except asyncio.CancelledError:
|
||||
|
@ -357,7 +354,7 @@ class Network:
|
|||
self._keepalive_task = None
|
||||
self.client = None
|
||||
self.server_features = None
|
||||
log.warning("connection lost to %s", server_str)
|
||||
log.info("connection lost to %s", server_str)
|
||||
log.info("network loop finished")
|
||||
|
||||
async def stop(self):
|
||||
|
@ -394,7 +391,6 @@ class Network:
|
|||
log.warning("Wallet server call timed out, retrying.")
|
||||
except ConnectionError:
|
||||
log.warning("connection error")
|
||||
|
||||
raise asyncio.CancelledError() # if we got here, we are shutting down
|
||||
|
||||
def _update_remote_height(self, header_args):
|
||||
|
@ -465,27 +461,18 @@ class Network:
|
|||
def get_server_features(self):
|
||||
return self.rpc('server.features', (), restricted=True)
|
||||
|
||||
# def get_claims_by_ids(self, claim_ids):
|
||||
# return self.rpc('blockchain.claimtrie.getclaimsbyids', claim_ids)
|
||||
|
||||
def get_claim_by_id(self, claim_id):
|
||||
return self.rpc('blockchain.claimtrie.getclaimbyid', [claim_id])
|
||||
|
||||
def resolve(self, urls, session_override=None):
|
||||
return self.rpc('blockchain.claimtrie.resolve', urls, False, session_override)
|
||||
|
||||
def claim_search(self, session_override=None, **kwargs):
|
||||
return self.rpc('blockchain.claimtrie.search', kwargs, False, session_override)
|
||||
|
||||
async def new_resolve(self, server, urls):
|
||||
message = {"method": "resolve", "params": {"urls": urls, "protobuf": True}}
|
||||
async with self.aiohttp_session.post(server, json=message) as r:
|
||||
result = await r.json()
|
||||
return result['result']
|
||||
|
||||
async def new_claim_search(self, server, **kwargs):
|
||||
async with grpc.aio.insecure_channel(server) as channel:
|
||||
stub = hub_pb2_grpc.HubStub(channel)
|
||||
try:
|
||||
response = await stub.Search(SearchRequest(**kwargs))
|
||||
except grpc.aio.AioRpcError as error:
|
||||
raise RPCError(error.code(), error.details())
|
||||
return response
|
||||
|
||||
async def sum_supports(self, server, **kwargs):
|
||||
message = {"method": "support_sum", "params": kwargs}
|
||||
async with self.aiohttp_session.post(server, json=message) as r:
|
||||
|
|
|
@ -1,5 +1,2 @@
|
|||
__hub_url__ = (
|
||||
"https://github.com/lbryio/hub/releases/download/v0.2021.08.24-beta/hub"
|
||||
)
|
||||
from .node import Conductor
|
||||
from .service import ConductorService
|
||||
from lbry.wallet.orchstr8.node import Conductor
|
||||
from lbry.wallet.orchstr8.service import ConductorService
|
||||
|
|
|
@ -5,7 +5,9 @@ import aiohttp
|
|||
|
||||
from lbry import wallet
|
||||
from lbry.wallet.orchstr8.node import (
|
||||
Conductor, get_blockchain_node_from_ledger
|
||||
Conductor,
|
||||
get_lbcd_node_from_ledger,
|
||||
get_lbcwallet_node_from_ledger
|
||||
)
|
||||
from lbry.wallet.orchstr8.service import ConductorService
|
||||
|
||||
|
@ -16,10 +18,11 @@ def get_argument_parser():
|
|||
)
|
||||
subparsers = parser.add_subparsers(dest='command', help='sub-command help')
|
||||
|
||||
subparsers.add_parser("download", help="Download blockchain node binary.")
|
||||
subparsers.add_parser("download", help="Download lbcd and lbcwallet node binaries.")
|
||||
|
||||
start = subparsers.add_parser("start", help="Start orchstr8 service.")
|
||||
start.add_argument("--blockchain", help="Hostname to start blockchain node.")
|
||||
start.add_argument("--lbcd", help="Hostname to start lbcd node.")
|
||||
start.add_argument("--lbcwallet", help="Hostname to start lbcwallet node.")
|
||||
start.add_argument("--spv", help="Hostname to start SPV server.")
|
||||
start.add_argument("--wallet", help="Hostname to start wallet daemon.")
|
||||
|
||||
|
@ -47,7 +50,8 @@ def main():
|
|||
|
||||
if command == 'download':
|
||||
logging.getLogger('blockchain').setLevel(logging.INFO)
|
||||
get_blockchain_node_from_ledger(wallet).ensure()
|
||||
get_lbcd_node_from_ledger(wallet).ensure()
|
||||
get_lbcwallet_node_from_ledger(wallet).ensure()
|
||||
|
||||
elif command == 'generate':
|
||||
loop.run_until_complete(run_remote_command(
|
||||
|
@ -57,9 +61,12 @@ def main():
|
|||
elif command == 'start':
|
||||
|
||||
conductor = Conductor()
|
||||
if getattr(args, 'blockchain', False):
|
||||
conductor.blockchain_node.hostname = args.blockchain
|
||||
loop.run_until_complete(conductor.start_blockchain())
|
||||
if getattr(args, 'lbcd', False):
|
||||
conductor.lbcd_node.hostname = args.lbcd
|
||||
loop.run_until_complete(conductor.start_lbcd())
|
||||
if getattr(args, 'lbcwallet', False):
|
||||
conductor.lbcwallet_node.hostname = args.lbcwallet
|
||||
loop.run_until_complete(conductor.start_lbcwallet())
|
||||
if getattr(args, 'spv', False):
|
||||
conductor.spv_node.hostname = args.spv
|
||||
loop.run_until_complete(conductor.start_spv())
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# pylint: disable=import-error
|
||||
import os
|
||||
import json
|
||||
import shutil
|
||||
|
@ -7,35 +8,44 @@ import tarfile
|
|||
import logging
|
||||
import tempfile
|
||||
import subprocess
|
||||
import importlib
|
||||
from distutils.util import strtobool
|
||||
import platform
|
||||
|
||||
from binascii import hexlify
|
||||
from typing import Type, Optional
|
||||
import urllib.request
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
import lbry
|
||||
from lbry.wallet.server.server import Server
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
|
||||
from lbry.conf import KnownHubsList, Config
|
||||
from lbry.wallet.orchstr8 import __hub_url__
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_spvserver_from_ledger(ledger_module):
|
||||
spvserver_path, regtest_class_name = ledger_module.__spvserver__.rsplit('.', 1)
|
||||
spvserver_module = importlib.import_module(spvserver_path)
|
||||
return getattr(spvserver_module, regtest_class_name)
|
||||
try:
|
||||
from hub.herald.env import ServerEnv
|
||||
from hub.scribe.env import BlockchainEnv
|
||||
from hub.elastic_sync.env import ElasticEnv
|
||||
from hub.herald.service import HubServerService
|
||||
from hub.elastic_sync.service import ElasticSyncService
|
||||
from hub.scribe.service import BlockchainProcessorService
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def get_blockchain_node_from_ledger(ledger_module):
|
||||
return BlockchainNode(
|
||||
ledger_module.__node_url__,
|
||||
os.path.join(ledger_module.__node_bin__, ledger_module.__node_daemon__),
|
||||
os.path.join(ledger_module.__node_bin__, ledger_module.__node_cli__)
|
||||
def get_lbcd_node_from_ledger(ledger_module):
|
||||
return LBCDNode(
|
||||
ledger_module.__lbcd_url__,
|
||||
ledger_module.__lbcd__,
|
||||
ledger_module.__lbcctl__
|
||||
)
|
||||
|
||||
|
||||
def get_lbcwallet_node_from_ledger(ledger_module):
|
||||
return LBCWalletNode(
|
||||
ledger_module.__lbcwallet_url__,
|
||||
ledger_module.__lbcwallet__,
|
||||
ledger_module.__lbcctl__
|
||||
)
|
||||
|
||||
|
||||
|
@ -43,53 +53,37 @@ class Conductor:
|
|||
|
||||
def __init__(self, seed=None):
|
||||
self.manager_module = WalletManager
|
||||
self.spv_module = get_spvserver_from_ledger(lbry.wallet)
|
||||
|
||||
self.blockchain_node = get_blockchain_node_from_ledger(lbry.wallet)
|
||||
self.spv_node = SPVNode(self.spv_module)
|
||||
self.lbcd_node = get_lbcd_node_from_ledger(lbry.wallet)
|
||||
self.lbcwallet_node = get_lbcwallet_node_from_ledger(lbry.wallet)
|
||||
self.spv_node = SPVNode()
|
||||
self.wallet_node = WalletNode(
|
||||
self.manager_module, RegTestLedger, default_seed=seed
|
||||
)
|
||||
self.hub_node = HubNode(__hub_url__, "hub", self.spv_node)
|
||||
|
||||
self.blockchain_started = False
|
||||
self.lbcd_started = False
|
||||
self.lbcwallet_started = False
|
||||
self.spv_started = False
|
||||
self.wallet_started = False
|
||||
self.hub_started = False
|
||||
|
||||
self.log = log.getChild('conductor')
|
||||
|
||||
async def start_blockchain(self):
|
||||
if not self.blockchain_started:
|
||||
asyncio.create_task(self.blockchain_node.start())
|
||||
await self.blockchain_node.running.wait()
|
||||
await self.blockchain_node.generate(200)
|
||||
self.blockchain_started = True
|
||||
async def start_lbcd(self):
|
||||
if not self.lbcd_started:
|
||||
await self.lbcd_node.start()
|
||||
self.lbcd_started = True
|
||||
|
||||
async def stop_blockchain(self):
|
||||
if self.blockchain_started:
|
||||
await self.blockchain_node.stop(cleanup=True)
|
||||
self.blockchain_started = False
|
||||
|
||||
async def start_hub(self):
|
||||
if not self.hub_started:
|
||||
asyncio.create_task(self.hub_node.start())
|
||||
await self.blockchain_node.running.wait()
|
||||
self.hub_started = True
|
||||
|
||||
async def stop_hub(self):
|
||||
if self.hub_started:
|
||||
await self.hub_node.stop(cleanup=True)
|
||||
self.hub_started = False
|
||||
async def stop_lbcd(self, cleanup=True):
|
||||
if self.lbcd_started:
|
||||
await self.lbcd_node.stop(cleanup)
|
||||
self.lbcd_started = False
|
||||
|
||||
async def start_spv(self):
|
||||
if not self.spv_started:
|
||||
await self.spv_node.start(self.blockchain_node)
|
||||
await self.spv_node.start(self.lbcwallet_node)
|
||||
self.spv_started = True
|
||||
|
||||
async def stop_spv(self):
|
||||
async def stop_spv(self, cleanup=True):
|
||||
if self.spv_started:
|
||||
await self.spv_node.stop(cleanup=True)
|
||||
await self.spv_node.stop(cleanup)
|
||||
self.spv_started = False
|
||||
|
||||
async def start_wallet(self):
|
||||
|
@ -97,13 +91,30 @@ class Conductor:
|
|||
await self.wallet_node.start(self.spv_node)
|
||||
self.wallet_started = True
|
||||
|
||||
async def stop_wallet(self):
|
||||
async def stop_wallet(self, cleanup=True):
|
||||
if self.wallet_started:
|
||||
await self.wallet_node.stop(cleanup=True)
|
||||
await self.wallet_node.stop(cleanup)
|
||||
self.wallet_started = False
|
||||
|
||||
async def start_lbcwallet(self, clean=True):
|
||||
if not self.lbcwallet_started:
|
||||
await self.lbcwallet_node.start()
|
||||
if clean:
|
||||
mining_addr = await self.lbcwallet_node.get_new_address()
|
||||
self.lbcwallet_node.mining_addr = mining_addr
|
||||
await self.lbcwallet_node.generate(200)
|
||||
# unlock the wallet for the next 1 hour
|
||||
await self.lbcwallet_node.wallet_passphrase("password", 3600)
|
||||
self.lbcwallet_started = True
|
||||
|
||||
async def stop_lbcwallet(self, cleanup=True):
|
||||
if self.lbcwallet_started:
|
||||
await self.lbcwallet_node.stop(cleanup)
|
||||
self.lbcwallet_started = False
|
||||
|
||||
async def start(self):
|
||||
await self.start_blockchain()
|
||||
await self.start_lbcd()
|
||||
await self.start_lbcwallet()
|
||||
await self.start_spv()
|
||||
await self.start_wallet()
|
||||
|
||||
|
@ -111,7 +122,8 @@ class Conductor:
|
|||
all_the_stops = [
|
||||
self.stop_wallet,
|
||||
self.stop_spv,
|
||||
self.stop_blockchain
|
||||
self.stop_lbcwallet,
|
||||
self.stop_lbcd
|
||||
]
|
||||
for stop in all_the_stops:
|
||||
try:
|
||||
|
@ -119,6 +131,12 @@ class Conductor:
|
|||
except Exception as e:
|
||||
log.exception('Exception raised while stopping services:', exc_info=e)
|
||||
|
||||
async def clear_mempool(self):
|
||||
await self.stop_lbcwallet(cleanup=False)
|
||||
await self.stop_lbcd(cleanup=False)
|
||||
await self.start_lbcd()
|
||||
await self.start_lbcwallet(clean=False)
|
||||
|
||||
|
||||
class WalletNode:
|
||||
|
||||
|
@ -139,14 +157,14 @@ class WalletNode:
|
|||
|
||||
async def start(self, spv_node: 'SPVNode', seed=None, connect=True, config=None):
|
||||
wallets_dir = os.path.join(self.data_path, 'wallets')
|
||||
os.mkdir(wallets_dir)
|
||||
wallet_file_name = os.path.join(wallets_dir, 'my_wallet.json')
|
||||
with open(wallet_file_name, 'w') as wallet_file:
|
||||
wallet_file.write('{"version": 1, "accounts": []}\n')
|
||||
if not os.path.isdir(wallets_dir):
|
||||
os.mkdir(wallets_dir)
|
||||
with open(wallet_file_name, 'w') as wallet_file:
|
||||
wallet_file.write('{"version": 1, "accounts": []}\n')
|
||||
self.manager = self.manager_class.from_config({
|
||||
'ledgers': {
|
||||
self.ledger_class.get_id(): {
|
||||
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
|
||||
'api_port': self.port,
|
||||
'explicit_servers': [(spv_node.hostname, spv_node.port)],
|
||||
'default_servers': Config.lbryum_servers.default,
|
||||
|
@ -154,6 +172,7 @@ class WalletNode:
|
|||
'known_hubs': config.known_hubs if config else KnownHubsList(),
|
||||
'hub_timeout': 30,
|
||||
'concurrent_hub_requests': 32,
|
||||
'fee_per_name_char': 200000
|
||||
}
|
||||
},
|
||||
'wallets': [wallet_file_name]
|
||||
|
@ -184,56 +203,83 @@ class WalletNode:
|
|||
|
||||
|
||||
class SPVNode:
|
||||
|
||||
def __init__(self, coin_class, node_number=1):
|
||||
self.coin_class = coin_class
|
||||
def __init__(self, node_number=1):
|
||||
self.node_number = node_number
|
||||
self.controller = None
|
||||
self.data_path = None
|
||||
self.server = None
|
||||
self.server: Optional[HubServerService] = None
|
||||
self.writer: Optional[BlockchainProcessorService] = None
|
||||
self.es_writer: Optional[ElasticSyncService] = None
|
||||
self.hostname = 'localhost'
|
||||
self.port = 50001 + node_number # avoid conflict with default daemon
|
||||
self.udp_port = self.port
|
||||
self.elastic_notifier_port = 19080 + node_number
|
||||
self.elastic_services = f'localhost:9200/localhost:{self.elastic_notifier_port}'
|
||||
self.session_timeout = 600
|
||||
self.rpc_port = '0' # disabled by default
|
||||
self.stopped = False
|
||||
self.index_name = None
|
||||
|
||||
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None):
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
self.stopped = True
|
||||
self.index_name = uuid4().hex
|
||||
conf = {
|
||||
'DESCRIPTION': '',
|
||||
'PAYMENT_ADDRESS': '',
|
||||
'DAILY_FEE': '0',
|
||||
'DB_DIRECTORY': self.data_path,
|
||||
'DAEMON_URL': blockchain_node.rpc_url,
|
||||
'REORG_LIMIT': '100',
|
||||
'HOST': self.hostname,
|
||||
'TCP_PORT': str(self.port),
|
||||
'UDP_PORT': str(self.udp_port),
|
||||
'SESSION_TIMEOUT': str(self.session_timeout),
|
||||
'MAX_QUERY_WORKERS': '0',
|
||||
'INDIVIDUAL_TAG_INDEXES': '',
|
||||
'RPC_PORT': self.rpc_port,
|
||||
'ES_INDEX_PREFIX': self.index_name,
|
||||
'ES_MODE': 'writer',
|
||||
}
|
||||
if extraconf:
|
||||
conf.update(extraconf)
|
||||
# TODO: don't use os.environ
|
||||
os.environ.update(conf)
|
||||
self.server = Server(Env(self.coin_class))
|
||||
self.server.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5
|
||||
await self.server.start()
|
||||
|
||||
async def start(self, lbcwallet_node: 'LBCWalletNode', extraconf=None):
|
||||
if not self.stopped:
|
||||
log.warning("spv node is already running")
|
||||
return
|
||||
self.stopped = False
|
||||
try:
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
conf = {
|
||||
'description': '',
|
||||
'payment_address': '',
|
||||
'daily_fee': '0',
|
||||
'db_dir': self.data_path,
|
||||
'daemon_url': lbcwallet_node.rpc_url,
|
||||
'reorg_limit': 100,
|
||||
'host': self.hostname,
|
||||
'tcp_port': self.port,
|
||||
'udp_port': self.udp_port,
|
||||
'elastic_services': self.elastic_services,
|
||||
'session_timeout': self.session_timeout,
|
||||
'max_query_workers': 0,
|
||||
'es_index_prefix': self.index_name,
|
||||
'chain': 'regtest',
|
||||
'index_address_status': False
|
||||
}
|
||||
if extraconf:
|
||||
conf.update(extraconf)
|
||||
self.writer = BlockchainProcessorService(
|
||||
BlockchainEnv(db_dir=self.data_path, daemon_url=lbcwallet_node.rpc_url,
|
||||
reorg_limit=100, max_query_workers=0, chain='regtest', index_address_status=False)
|
||||
)
|
||||
self.server = HubServerService(ServerEnv(**conf))
|
||||
self.es_writer = ElasticSyncService(
|
||||
ElasticEnv(
|
||||
db_dir=self.data_path, reorg_limit=100, max_query_workers=0, chain='regtest',
|
||||
elastic_notifier_port=self.elastic_notifier_port,
|
||||
es_index_prefix=self.index_name,
|
||||
filtering_channel_ids=(extraconf or {}).get('filtering_channel_ids'),
|
||||
blocking_channel_ids=(extraconf or {}).get('blocking_channel_ids')
|
||||
)
|
||||
)
|
||||
await self.writer.start()
|
||||
await self.es_writer.start()
|
||||
await self.server.start()
|
||||
except Exception as e:
|
||||
self.stopped = True
|
||||
log.exception("failed to start spv node")
|
||||
raise e
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
log.warning("spv node is already stopped")
|
||||
return
|
||||
try:
|
||||
await self.server.db.search_index.delete_index()
|
||||
await self.server.db.search_index.stop()
|
||||
await self.server.stop()
|
||||
await self.es_writer.delete_index()
|
||||
await self.es_writer.stop()
|
||||
await self.writer.stop()
|
||||
self.stopped = True
|
||||
except Exception as e:
|
||||
log.exception("failed to stop spv node")
|
||||
raise e
|
||||
finally:
|
||||
cleanup and self.cleanup()
|
||||
|
||||
|
@ -241,18 +287,19 @@ class SPVNode:
|
|||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
|
||||
class BlockchainProcess(asyncio.SubprocessProtocol):
|
||||
class LBCDProcess(asyncio.SubprocessProtocol):
|
||||
|
||||
IGNORE_OUTPUT = [
|
||||
b'keypool keep',
|
||||
b'keypool reserve',
|
||||
b'keypool return',
|
||||
b'Block submitted',
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
self.log = log.getChild('blockchain')
|
||||
self.log = log.getChild('lbcd')
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||
|
@ -263,7 +310,7 @@ class BlockchainProcess(asyncio.SubprocessProtocol):
|
|||
if b'Error:' in data:
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'Done loading' in data:
|
||||
if b'RPCS: RPC server listening on' in data:
|
||||
self.ready.set()
|
||||
|
||||
def process_exited(self):
|
||||
|
@ -271,39 +318,57 @@ class BlockchainProcess(asyncio.SubprocessProtocol):
|
|||
self.ready.set()
|
||||
|
||||
|
||||
class BlockchainNode:
|
||||
class WalletProcess(asyncio.SubprocessProtocol):
|
||||
|
||||
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
|
||||
BECH32_ADDRESS = "bech32"
|
||||
IGNORE_OUTPUT = [
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
self.log = log.getChild('lbcwallet')
|
||||
self.transport: Optional[asyncio.transports.SubprocessTransport] = None
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||
if b'Error:' in data:
|
||||
self.log.error(data.decode())
|
||||
else:
|
||||
self.log.info(data.decode())
|
||||
if b'Error:' in data:
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'WLLT: Finished rescan' in data:
|
||||
self.ready.set()
|
||||
|
||||
def process_exited(self):
|
||||
self.stopped.set()
|
||||
self.ready.set()
|
||||
|
||||
|
||||
class LBCDNode:
|
||||
def __init__(self, url, daemon, cli):
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
self.daemon_bin = os.path.join(self.bin_dir, daemon)
|
||||
self.cli_bin = os.path.join(self.bin_dir, cli)
|
||||
self.log = log.getChild('blockchain')
|
||||
self.data_path = None
|
||||
self.log = log.getChild('lbcd')
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
self.protocol = None
|
||||
self.transport = None
|
||||
self.block_expected = 0
|
||||
self.hostname = 'localhost'
|
||||
self.peerport = 9246 + 2 # avoid conflict with default peer port
|
||||
self.rpcport = 9245 + 2 # avoid conflict with default rpc port
|
||||
self.peerport = 29246
|
||||
self.rpcport = 29245
|
||||
self.rpcuser = 'rpcuser'
|
||||
self.rpcpassword = 'rpcpassword'
|
||||
self.stopped = False
|
||||
self.restart_ready = asyncio.Event()
|
||||
self.restart_ready.set()
|
||||
self.stopped = True
|
||||
self.running = asyncio.Event()
|
||||
|
||||
@property
|
||||
def rpc_url(self):
|
||||
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.rpcport}/'
|
||||
|
||||
def is_expected_block(self, e: BlockHeightEvent):
|
||||
return self.block_expected == e.height
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
|
@ -312,6 +377,12 @@ class BlockchainNode:
|
|||
)
|
||||
|
||||
def download(self):
|
||||
uname = platform.uname()
|
||||
target_os = str.lower(uname.system)
|
||||
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
|
||||
target_platform = target_os + '_' + target_arch
|
||||
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
|
||||
|
||||
downloaded_file = os.path.join(
|
||||
self.bin_dir,
|
||||
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||
|
@ -345,72 +416,206 @@ class BlockchainNode:
|
|||
return self.exists or self.download()
|
||||
|
||||
async def start(self):
|
||||
assert self.ensure()
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
command = [
|
||||
self.daemon_bin,
|
||||
f'-datadir={self.data_path}', '-printtoconsole', '-regtest', '-server', '-txindex',
|
||||
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}',
|
||||
f'-port={self.peerport}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
while not self.stopped:
|
||||
if self.running.is_set():
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
await self.restart_ready.wait()
|
||||
try:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
BlockchainProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start lbrycrdd', exc_info=e)
|
||||
if not self.stopped:
|
||||
return
|
||||
self.stopped = False
|
||||
try:
|
||||
assert self.ensure()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
command = [
|
||||
self.daemon_bin,
|
||||
'--notls',
|
||||
f'--datadir={self.data_path}',
|
||||
'--regtest', f'--listen=127.0.0.1:{self.peerport}', f'--rpclisten=127.0.0.1:{self.rpcport}',
|
||||
'--txindex', f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
LBCDProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
self.stopped = True
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
self.stopped = True
|
||||
log.exception('failed to start lbcd', exc_info=e)
|
||||
raise
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
return
|
||||
try:
|
||||
if self.transport:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
except Exception as e:
|
||||
log.exception('failed to stop lbcd', exc_info=e)
|
||||
raise
|
||||
finally:
|
||||
self.log.info("Done shutting down " + self.daemon_bin)
|
||||
self.stopped = True
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
self.running.clear()
|
||||
|
||||
def cleanup(self):
|
||||
assert self.stopped
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
|
||||
class LBCWalletNode:
|
||||
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
|
||||
BECH32_ADDRESS = "bech32"
|
||||
|
||||
def __init__(self, url, lbcwallet, cli):
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
self.lbcwallet_bin = os.path.join(self.bin_dir, lbcwallet)
|
||||
self.cli_bin = os.path.join(self.bin_dir, cli)
|
||||
self.log = log.getChild('lbcwallet')
|
||||
self.protocol = None
|
||||
self.transport = None
|
||||
self.hostname = 'localhost'
|
||||
self.lbcd_rpcport = 29245
|
||||
self.lbcwallet_rpcport = 29244
|
||||
self.rpcuser = 'rpcuser'
|
||||
self.rpcpassword = 'rpcpassword'
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
self.stopped = True
|
||||
self.running = asyncio.Event()
|
||||
self.block_expected = 0
|
||||
self.mining_addr = ''
|
||||
|
||||
@property
|
||||
def rpc_url(self):
|
||||
# FIXME: somehow the hub/sdk doesn't learn the blocks through the Walet RPC port, why?
|
||||
# return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcwallet_rpcport}/'
|
||||
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcd_rpcport}/'
|
||||
|
||||
def is_expected_block(self, e: BlockHeightEvent):
|
||||
return self.block_expected == e.height
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
os.path.exists(self.lbcwallet_bin)
|
||||
)
|
||||
|
||||
def download(self):
|
||||
uname = platform.uname()
|
||||
target_os = str.lower(uname.system)
|
||||
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
|
||||
target_platform = target_os + '_' + target_arch
|
||||
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
|
||||
|
||||
downloaded_file = os.path.join(
|
||||
self.bin_dir,
|
||||
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||
)
|
||||
|
||||
if not os.path.exists(self.bin_dir):
|
||||
os.mkdir(self.bin_dir)
|
||||
|
||||
if not os.path.exists(downloaded_file):
|
||||
self.log.info('Downloading: %s', self.latest_release_url)
|
||||
with urllib.request.urlopen(self.latest_release_url) as response:
|
||||
with open(downloaded_file, 'wb') as out_file:
|
||||
shutil.copyfileobj(response, out_file)
|
||||
|
||||
self.log.info('Extracting: %s', downloaded_file)
|
||||
|
||||
if downloaded_file.endswith('.zip'):
|
||||
with zipfile.ZipFile(downloaded_file) as dotzip:
|
||||
dotzip.extractall(self.bin_dir)
|
||||
# zipfile bug https://bugs.python.org/issue15795
|
||||
os.chmod(self.lbcwallet_bin, 0o755)
|
||||
|
||||
elif downloaded_file.endswith('.tar.gz'):
|
||||
with tarfile.open(downloaded_file) as tar:
|
||||
tar.extractall(self.bin_dir)
|
||||
|
||||
return self.exists
|
||||
|
||||
def ensure(self):
|
||||
return self.exists or self.download()
|
||||
|
||||
async def start(self):
|
||||
assert self.ensure()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
|
||||
command = [
|
||||
self.lbcwallet_bin,
|
||||
'--noservertls', '--noclienttls',
|
||||
'--regtest',
|
||||
f'--rpcconnect=127.0.0.1:{self.lbcd_rpcport}', f'--rpclisten=127.0.0.1:{self.lbcwallet_rpcport}',
|
||||
'--createtemp', f'--appdata={self.data_path}',
|
||||
f'--username={self.rpcuser}', f'--password={self.rpcpassword}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
try:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
WalletProcess, *command
|
||||
)
|
||||
self.protocol.transport = self.transport
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
self.stopped = False
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start lbcwallet', exc_info=e)
|
||||
|
||||
def cleanup(self):
|
||||
assert self.stopped
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
return
|
||||
try:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
except Exception as e:
|
||||
log.exception('failed to stop lbcwallet', exc_info=e)
|
||||
raise
|
||||
finally:
|
||||
self.log.info("Done shutting down " + self.lbcwallet_bin)
|
||||
self.stopped = True
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
|
||||
async def clear_mempool(self):
|
||||
self.restart_ready.clear()
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
self.running.clear()
|
||||
os.remove(os.path.join(self.data_path, 'regtest', 'mempool.dat'))
|
||||
self.restart_ready.set()
|
||||
await self.running.wait()
|
||||
|
||||
def cleanup(self):
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
self.running.clear()
|
||||
|
||||
async def _cli_cmnd(self, *args):
|
||||
cmnd_args = [
|
||||
self.cli_bin, f'-datadir={self.data_path}', '-regtest',
|
||||
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}'
|
||||
self.cli_bin,
|
||||
f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}', '--notls', '--regtest', '--wallet'
|
||||
] + list(args)
|
||||
self.log.info(' '.join(cmnd_args))
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
)
|
||||
out, _ = await process.communicate()
|
||||
out, err = await process.communicate()
|
||||
result = out.decode().strip()
|
||||
err = err.decode().strip()
|
||||
if len(result) <= 0 and err.startswith('-'):
|
||||
raise Exception(err)
|
||||
if err and 'creating a default config file' not in err:
|
||||
log.warning(err)
|
||||
self.log.info(result)
|
||||
if result.startswith('error code'):
|
||||
raise Exception(result)
|
||||
|
@ -418,7 +623,14 @@ class BlockchainNode:
|
|||
|
||||
def generate(self, blocks):
|
||||
self.block_expected += blocks
|
||||
return self._cli_cmnd('generate', str(blocks))
|
||||
return self._cli_cmnd('generatetoaddress', str(blocks), self.mining_addr)
|
||||
|
||||
def generate_to_address(self, blocks, addr):
|
||||
self.block_expected += blocks
|
||||
return self._cli_cmnd('generatetoaddress', str(blocks), addr)
|
||||
|
||||
def wallet_passphrase(self, passphrase, timeout):
|
||||
return self._cli_cmnd('walletpassphrase', passphrase, str(timeout))
|
||||
|
||||
def invalidate_block(self, blockhash):
|
||||
return self._cli_cmnd('invalidateblock', blockhash)
|
||||
|
@ -435,7 +647,7 @@ class BlockchainNode:
|
|||
def get_raw_change_address(self):
|
||||
return self._cli_cmnd('getrawchangeaddress')
|
||||
|
||||
def get_new_address(self, address_type):
|
||||
def get_new_address(self, address_type='legacy'):
|
||||
return self._cli_cmnd('getnewaddress', "", address_type)
|
||||
|
||||
async def get_balance(self):
|
||||
|
@ -451,136 +663,13 @@ class BlockchainNode:
|
|||
return self._cli_cmnd('createrawtransaction', json.dumps(inputs), json.dumps(outputs))
|
||||
|
||||
async def sign_raw_transaction_with_wallet(self, tx):
|
||||
return json.loads(await self._cli_cmnd('signrawtransactionwithwallet', tx))['hex'].encode()
|
||||
# the "withwallet" portion should only come into play if we are doing segwit.
|
||||
# and "withwallet" doesn't exist on lbcd yet.
|
||||
result = await self._cli_cmnd('signrawtransaction', tx)
|
||||
return json.loads(result)['hex'].encode()
|
||||
|
||||
def decode_raw_transaction(self, tx):
|
||||
return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode())
|
||||
|
||||
def get_raw_transaction(self, txid):
|
||||
return self._cli_cmnd('getrawtransaction', txid, '1')
|
||||
|
||||
|
||||
class HubProcess(asyncio.SubprocessProtocol):
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
self.log = log.getChild('hub')
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if self.log:
|
||||
self.log.info(data.decode())
|
||||
if b'error' in data.lower():
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'listening on' in data:
|
||||
self.ready.set()
|
||||
|
||||
def process_exited(self):
|
||||
self.stopped.set()
|
||||
self.ready.set()
|
||||
|
||||
|
||||
class HubNode:
|
||||
|
||||
def __init__(self, url, daemon, spv_node):
|
||||
self.spv_node = spv_node
|
||||
self.debug = False
|
||||
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
self.daemon_bin = os.path.join(self.bin_dir, daemon)
|
||||
self.cli_bin = os.path.join(self.bin_dir, daemon)
|
||||
self.log = log.getChild('hub')
|
||||
self.transport = None
|
||||
self.protocol = None
|
||||
self.hostname = 'localhost'
|
||||
self.rpcport = 50051 # avoid conflict with default rpc port
|
||||
self.stopped = False
|
||||
self.restart_ready = asyncio.Event()
|
||||
self.restart_ready.set()
|
||||
self.running = asyncio.Event()
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
os.path.exists(self.cli_bin) and
|
||||
os.path.exists(self.daemon_bin)
|
||||
)
|
||||
|
||||
def download(self):
|
||||
downloaded_file = os.path.join(
|
||||
self.bin_dir,
|
||||
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||
)
|
||||
|
||||
if not os.path.exists(self.bin_dir):
|
||||
os.mkdir(self.bin_dir)
|
||||
|
||||
if not os.path.exists(downloaded_file):
|
||||
self.log.info('Downloading: %s', self.latest_release_url)
|
||||
with urllib.request.urlopen(self.latest_release_url) as response:
|
||||
with open(downloaded_file, 'wb') as out_file:
|
||||
shutil.copyfileobj(response, out_file)
|
||||
|
||||
self.log.info('Extracting: %s', downloaded_file)
|
||||
|
||||
if downloaded_file.endswith('.zip'):
|
||||
with zipfile.ZipFile(downloaded_file) as dotzip:
|
||||
dotzip.extractall(self.bin_dir)
|
||||
# zipfile bug https://bugs.python.org/issue15795
|
||||
os.chmod(self.cli_bin, 0o755)
|
||||
os.chmod(self.daemon_bin, 0o755)
|
||||
|
||||
elif downloaded_file.endswith('.tar.gz'):
|
||||
with tarfile.open(downloaded_file) as tar:
|
||||
tar.extractall(self.bin_dir)
|
||||
|
||||
os.chmod(self.daemon_bin, 0o755)
|
||||
|
||||
return self.exists
|
||||
|
||||
def ensure(self):
|
||||
return self.exists or self.download()
|
||||
|
||||
async def start(self):
|
||||
assert self.ensure()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
command = [
|
||||
self.daemon_bin, 'serve', '--esindex', self.spv_node.index_name + 'claims', '--debug'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
while not self.stopped:
|
||||
if self.running.is_set():
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
await self.restart_ready.wait()
|
||||
try:
|
||||
if not self.debug:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
HubProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start hub', exc_info=e)
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
self.stopped = True
|
||||
try:
|
||||
if not self.debug:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
finally:
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
|
|
@ -61,8 +61,10 @@ class ConductorService:
|
|||
#set_logging(
|
||||
# self.stack.ledger_module, logging.DEBUG, WebSocketLogHandler(self.send_message)
|
||||
#)
|
||||
self.stack.blockchain_started or await self.stack.start_blockchain()
|
||||
self.send_message({'type': 'service', 'name': 'blockchain', 'port': self.stack.blockchain_node.port})
|
||||
self.stack.lbcd_started or await self.stack.start_lbcd()
|
||||
self.send_message({'type': 'service', 'name': 'lbcd', 'port': self.stack.lbcd_node.port})
|
||||
self.stack.lbcwallet_started or await self.stack.start_lbcwallet()
|
||||
self.send_message({'type': 'service', 'name': 'lbcwallet', 'port': self.stack.lbcwallet_node.port})
|
||||
self.stack.spv_started or await self.stack.start_spv()
|
||||
self.send_message({'type': 'service', 'name': 'spv', 'port': self.stack.spv_node.port})
|
||||
self.stack.wallet_started or await self.stack.start_wallet()
|
||||
|
@ -74,7 +76,7 @@ class ConductorService:
|
|||
async def generate(self, request):
|
||||
data = await request.post()
|
||||
blocks = data.get('blocks', 1)
|
||||
await self.stack.blockchain_node.generate(int(blocks))
|
||||
await self.stack.lbcwallet_node.generate(int(blocks))
|
||||
return json_response({'blocks': blocks})
|
||||
|
||||
async def transfer(self, request):
|
||||
|
@ -85,11 +87,14 @@ class ConductorService:
|
|||
if not address:
|
||||
raise ValueError("No address was provided.")
|
||||
amount = data.get('amount', 1)
|
||||
txid = await self.stack.blockchain_node.send_to_address(address, amount)
|
||||
if self.stack.wallet_started:
|
||||
await self.stack.wallet_node.ledger.on_transaction.where(
|
||||
lambda e: e.tx.id == txid and e.address == address
|
||||
watcher = self.stack.wallet_node.ledger.on_transaction.where(
|
||||
lambda e: e.address == address # and e.tx.id == txid -- might stall; see send_to_address_and_wait
|
||||
)
|
||||
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
|
||||
await watcher
|
||||
else:
|
||||
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
|
||||
return json_response({
|
||||
'address': address,
|
||||
'amount': amount,
|
||||
|
@ -98,7 +103,7 @@ class ConductorService:
|
|||
|
||||
async def balance(self, _):
|
||||
return json_response({
|
||||
'balance': await self.stack.blockchain_node.get_balance()
|
||||
'balance': await self.stack.lbcwallet_node.get_balance()
|
||||
})
|
||||
|
||||
async def log(self, request):
|
||||
|
@ -129,7 +134,7 @@ class ConductorService:
|
|||
'type': 'status',
|
||||
'height': self.stack.wallet_node.ledger.headers.height,
|
||||
'balance': satoshis_to_coins(await self.stack.wallet_node.account.get_balance()),
|
||||
'miner': await self.stack.blockchain_node.get_balance()
|
||||
'miner': await self.stack.lbcwallet_node.get_balance()
|
||||
})
|
||||
|
||||
def send_message(self, msg):
|
||||
|
|
|
@ -395,8 +395,8 @@ class RPCSession(SessionBase):
|
|||
namespace=NAMESPACE, labelnames=("version",)
|
||||
)
|
||||
|
||||
def __init__(self, *, framer=None, loop=None, connection=None):
|
||||
super().__init__(framer=framer, loop=loop)
|
||||
def __init__(self, *, framer=None, connection=None):
|
||||
super().__init__(framer=framer)
|
||||
self.connection = connection or self.default_connection()
|
||||
self.client_version = 'unknown'
|
||||
|
||||
|
@ -496,6 +496,17 @@ class RPCSession(SessionBase):
|
|||
self.abort()
|
||||
return False
|
||||
|
||||
async def send_notifications(self, notifications) -> bool:
|
||||
"""Send an RPC notification over the network."""
|
||||
message, _ = self.connection.send_batch(notifications)
|
||||
try:
|
||||
await self._send_message(message)
|
||||
return True
|
||||
except asyncio.TimeoutError:
|
||||
self.logger.info("timeout sending address notification to %s", self.peer_address_str(for_log=True))
|
||||
self.abort()
|
||||
return False
|
||||
|
||||
def send_batch(self, raise_errors=False):
|
||||
"""Return a BatchRequest. Intended to be used like so:
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ OP_HASH160 = 0xa9
|
|||
OP_EQUALVERIFY = 0x88
|
||||
OP_CHECKSIG = 0xac
|
||||
OP_CHECKMULTISIG = 0xae
|
||||
OP_CHECKLOCKTIMEVERIFY = 0xb1
|
||||
OP_EQUAL = 0x87
|
||||
OP_PUSHDATA1 = 0x4c
|
||||
OP_PUSHDATA2 = 0x4d
|
||||
|
@ -276,7 +277,7 @@ class Template:
|
|||
elif isinstance(opcode, PUSH_INTEGER):
|
||||
data = values[opcode.name]
|
||||
source.write_many(push_data(
|
||||
data.to_bytes((data.bit_length() + 7) // 8, byteorder='little')
|
||||
data.to_bytes((data.bit_length() + 8) // 8, byteorder='little', signed=True)
|
||||
))
|
||||
elif isinstance(opcode, PUSH_SUBSCRIPT):
|
||||
data = values[opcode.name]
|
||||
|
@ -357,19 +358,27 @@ class InputScript(Script):
|
|||
REDEEM_PUBKEY_HASH = Template('pubkey_hash', (
|
||||
PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey')
|
||||
))
|
||||
REDEEM_SCRIPT = Template('script', (
|
||||
MULTI_SIG_SCRIPT = Template('multi_sig', (
|
||||
SMALL_INTEGER('signatures_count'), PUSH_MANY('pubkeys'), SMALL_INTEGER('pubkeys_count'),
|
||||
OP_CHECKMULTISIG
|
||||
))
|
||||
REDEEM_SCRIPT_HASH = Template('script_hash', (
|
||||
OP_0, PUSH_MANY('signatures'), PUSH_SUBSCRIPT('script', REDEEM_SCRIPT)
|
||||
REDEEM_SCRIPT_HASH_MULTI_SIG = Template('script_hash+multi_sig', (
|
||||
OP_0, PUSH_MANY('signatures'), PUSH_SUBSCRIPT('script', MULTI_SIG_SCRIPT)
|
||||
))
|
||||
TIME_LOCK_SCRIPT = Template('timelock', (
|
||||
PUSH_INTEGER('height'), OP_CHECKLOCKTIMEVERIFY, OP_DROP,
|
||||
# rest is identical to OutputScript.PAY_PUBKEY_HASH:
|
||||
OP_DUP, OP_HASH160, PUSH_SINGLE('pubkey_hash'), OP_EQUALVERIFY, OP_CHECKSIG
|
||||
))
|
||||
REDEEM_SCRIPT_HASH_TIME_LOCK = Template('script_hash+timelock', (
|
||||
PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey'), PUSH_SUBSCRIPT('script', TIME_LOCK_SCRIPT)
|
||||
))
|
||||
|
||||
templates = [
|
||||
REDEEM_PUBKEY,
|
||||
REDEEM_PUBKEY_HASH,
|
||||
REDEEM_SCRIPT_HASH,
|
||||
REDEEM_SCRIPT
|
||||
REDEEM_SCRIPT_HASH_TIME_LOCK,
|
||||
REDEEM_SCRIPT_HASH_MULTI_SIG,
|
||||
]
|
||||
|
||||
@classmethod
|
||||
|
@ -380,20 +389,38 @@ class InputScript(Script):
|
|||
})
|
||||
|
||||
@classmethod
|
||||
def redeem_script_hash(cls, signatures, pubkeys):
|
||||
return cls(template=cls.REDEEM_SCRIPT_HASH, values={
|
||||
def redeem_multi_sig_script_hash(cls, signatures, pubkeys):
|
||||
return cls(template=cls.REDEEM_SCRIPT_HASH_MULTI_SIG, values={
|
||||
'signatures': signatures,
|
||||
'script': cls.redeem_script(signatures, pubkeys)
|
||||
'script': cls(template=cls.MULTI_SIG_SCRIPT, values={
|
||||
'signatures_count': len(signatures),
|
||||
'pubkeys': pubkeys,
|
||||
'pubkeys_count': len(pubkeys)
|
||||
})
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def redeem_script(cls, signatures, pubkeys):
|
||||
return cls(template=cls.REDEEM_SCRIPT, values={
|
||||
'signatures_count': len(signatures),
|
||||
'pubkeys': pubkeys,
|
||||
'pubkeys_count': len(pubkeys)
|
||||
def redeem_time_lock_script_hash(cls, signature, pubkey, height=None, pubkey_hash=None, script_source=None):
|
||||
if height and pubkey_hash:
|
||||
script = cls(template=cls.TIME_LOCK_SCRIPT, values={
|
||||
'height': height,
|
||||
'pubkey_hash': pubkey_hash
|
||||
})
|
||||
elif script_source:
|
||||
script = cls(source=script_source, template=cls.TIME_LOCK_SCRIPT)
|
||||
script.parse(script.template)
|
||||
else:
|
||||
raise ValueError("script_source or both height and pubkey_hash are required.")
|
||||
return cls(template=cls.REDEEM_SCRIPT_HASH_TIME_LOCK, values={
|
||||
'signature': signature,
|
||||
'pubkey': pubkey,
|
||||
'script': script
|
||||
})
|
||||
|
||||
@property
|
||||
def is_script_hash(self):
|
||||
return self.template.name.startswith('script_hash+')
|
||||
|
||||
|
||||
class OutputScript(Script):
|
||||
|
||||
|
@ -460,21 +487,6 @@ class OutputScript(Script):
|
|||
UPDATE_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes
|
||||
))
|
||||
|
||||
SELL_SCRIPT = Template('sell_script', (
|
||||
OP_VERIFY, OP_DROP, OP_DROP, OP_DROP, PUSH_INTEGER('price'), OP_PRICECHECK
|
||||
))
|
||||
SELL_CLAIM = Template('sell_claim+pay_script_hash', (
|
||||
OP_SELL_CLAIM, PUSH_SINGLE('claim_id'), PUSH_SUBSCRIPT('sell_script', SELL_SCRIPT),
|
||||
PUSH_SUBSCRIPT('receive_script', InputScript.REDEEM_SCRIPT), OP_2DROP, OP_2DROP
|
||||
) + PAY_SCRIPT_HASH.opcodes)
|
||||
|
||||
BUY_CLAIM = Template('buy_claim+pay_script_hash', (
|
||||
OP_BUY_CLAIM, PUSH_SINGLE('sell_id'),
|
||||
PUSH_SINGLE('claim_id'), PUSH_SINGLE('claim_version'),
|
||||
PUSH_SINGLE('owner_pubkey_hash'), PUSH_SINGLE('negotiation_signature'),
|
||||
OP_2DROP, OP_2DROP, OP_2DROP,
|
||||
) + PAY_SCRIPT_HASH.opcodes)
|
||||
|
||||
templates = [
|
||||
PAY_PUBKEY_FULL,
|
||||
PAY_PUBKEY_HASH,
|
||||
|
@ -489,8 +501,6 @@ class OutputScript(Script):
|
|||
SUPPORT_CLAIM_DATA_SCRIPT,
|
||||
UPDATE_CLAIM_PUBKEY,
|
||||
UPDATE_CLAIM_SCRIPT,
|
||||
SELL_CLAIM, SELL_SCRIPT,
|
||||
BUY_CLAIM,
|
||||
]
|
||||
|
||||
@classmethod
|
||||
|
@ -550,30 +560,6 @@ class OutputScript(Script):
|
|||
'pubkey_hash': pubkey_hash
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def sell_script(cls, price):
|
||||
return cls(template=cls.SELL_SCRIPT, values={
|
||||
'price': price,
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def sell_claim(cls, claim_id, price, signatures, pubkeys):
|
||||
return cls(template=cls.SELL_CLAIM, values={
|
||||
'claim_id': claim_id,
|
||||
'sell_script': OutputScript.sell_script(price),
|
||||
'receive_script': InputScript.redeem_script(signatures, pubkeys)
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def buy_claim(cls, sell_id, claim_id, claim_version, owner_pubkey_hash, negotiation_signature):
|
||||
return cls(template=cls.BUY_CLAIM, values={
|
||||
'sell_id': sell_id,
|
||||
'claim_id': claim_id,
|
||||
'claim_version': claim_version,
|
||||
'owner_pubkey_hash': owner_pubkey_hash,
|
||||
'negotiation_signature': negotiation_signature,
|
||||
})
|
||||
|
||||
@property
|
||||
def is_pay_pubkey_hash(self):
|
||||
return self.template.name.endswith('pay_pubkey_hash')
|
||||
|
@ -602,17 +588,6 @@ class OutputScript(Script):
|
|||
def is_support_claim_data(self):
|
||||
return self.template.name.startswith('support_claim+data+')
|
||||
|
||||
@property
|
||||
def is_sell_claim(self):
|
||||
return self.template.name.startswith('sell_claim+')
|
||||
|
||||
@property
|
||||
def is_buy_claim(self):
|
||||
return self.template.name.startswith('buy_claim+')
|
||||
|
||||
@property
|
||||
def is_claim_involved(self):
|
||||
return any((
|
||||
self.is_claim_name, self.is_support_claim, self.is_update_claim,
|
||||
self.is_sell_claim, self.is_buy_claim
|
||||
))
|
||||
return any((self.is_claim_name, self.is_support_claim, self.is_update_claim))
|
||||
|
|
|
@ -1,809 +0,0 @@
|
|||
import time
|
||||
import asyncio
|
||||
from struct import pack, unpack
|
||||
from concurrent.futures.thread import ThreadPoolExecutor
|
||||
from typing import Optional
|
||||
from prometheus_client import Gauge, Histogram
|
||||
import lbry
|
||||
from lbry.wallet.server.db.writer import SQLDB
|
||||
from lbry.wallet.server.daemon import DaemonError
|
||||
from lbry.wallet.server.hash import hash_to_hex_str, HASHX_LEN
|
||||
from lbry.wallet.server.util import chunks, class_logger
|
||||
from lbry.wallet.server.leveldb import FlushData
|
||||
from lbry.wallet.server.udp import StatusServer
|
||||
|
||||
|
||||
class Prefetcher:
|
||||
"""Prefetches blocks (in the forward direction only)."""
|
||||
|
||||
def __init__(self, daemon, coin, blocks_event):
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.daemon = daemon
|
||||
self.coin = coin
|
||||
self.blocks_event = blocks_event
|
||||
self.blocks = []
|
||||
self.caught_up = False
|
||||
# Access to fetched_height should be protected by the semaphore
|
||||
self.fetched_height = None
|
||||
self.semaphore = asyncio.Semaphore()
|
||||
self.refill_event = asyncio.Event()
|
||||
# The prefetched block cache size. The min cache size has
|
||||
# little effect on sync time.
|
||||
self.cache_size = 0
|
||||
self.min_cache_size = 10 * 1024 * 1024
|
||||
# This makes the first fetch be 10 blocks
|
||||
self.ave_size = self.min_cache_size // 10
|
||||
self.polling_delay = 5
|
||||
|
||||
async def main_loop(self, bp_height):
|
||||
"""Loop forever polling for more blocks."""
|
||||
await self.reset_height(bp_height)
|
||||
while True:
|
||||
try:
|
||||
# Sleep a while if there is nothing to prefetch
|
||||
await self.refill_event.wait()
|
||||
if not await self._prefetch_blocks():
|
||||
await asyncio.sleep(self.polling_delay)
|
||||
except DaemonError as e:
|
||||
self.logger.info(f'ignoring daemon error: {e}')
|
||||
|
||||
def get_prefetched_blocks(self):
|
||||
"""Called by block processor when it is processing queued blocks."""
|
||||
blocks = self.blocks
|
||||
self.blocks = []
|
||||
self.cache_size = 0
|
||||
self.refill_event.set()
|
||||
return blocks
|
||||
|
||||
async def reset_height(self, height):
|
||||
"""Reset to prefetch blocks from the block processor's height.
|
||||
|
||||
Used in blockchain reorganisations. This coroutine can be
|
||||
called asynchronously to the _prefetch_blocks coroutine so we
|
||||
must synchronize with a semaphore.
|
||||
"""
|
||||
async with self.semaphore:
|
||||
self.blocks.clear()
|
||||
self.cache_size = 0
|
||||
self.fetched_height = height
|
||||
self.refill_event.set()
|
||||
|
||||
daemon_height = await self.daemon.height()
|
||||
behind = daemon_height - height
|
||||
if behind > 0:
|
||||
self.logger.info(f'catching up to daemon height {daemon_height:,d} '
|
||||
f'({behind:,d} blocks behind)')
|
||||
else:
|
||||
self.logger.info(f'caught up to daemon height {daemon_height:,d}')
|
||||
|
||||
async def _prefetch_blocks(self):
|
||||
"""Prefetch some blocks and put them on the queue.
|
||||
|
||||
Repeats until the queue is full or caught up.
|
||||
"""
|
||||
daemon = self.daemon
|
||||
daemon_height = await daemon.height()
|
||||
async with self.semaphore:
|
||||
while self.cache_size < self.min_cache_size:
|
||||
# Try and catch up all blocks but limit to room in cache.
|
||||
# Constrain fetch count to between 0 and 500 regardless;
|
||||
# testnet can be lumpy.
|
||||
cache_room = self.min_cache_size // self.ave_size
|
||||
count = min(daemon_height - self.fetched_height, cache_room)
|
||||
count = min(500, max(count, 0))
|
||||
if not count:
|
||||
self.caught_up = True
|
||||
return False
|
||||
|
||||
first = self.fetched_height + 1
|
||||
hex_hashes = await daemon.block_hex_hashes(first, count)
|
||||
if self.caught_up:
|
||||
self.logger.info('new block height {:,d} hash {}'
|
||||
.format(first + count-1, hex_hashes[-1]))
|
||||
blocks = await daemon.raw_blocks(hex_hashes)
|
||||
|
||||
assert count == len(blocks)
|
||||
|
||||
# Special handling for genesis block
|
||||
if first == 0:
|
||||
blocks[0] = self.coin.genesis_block(blocks[0])
|
||||
self.logger.info(f'verified genesis block with hash {hex_hashes[0]}')
|
||||
|
||||
# Update our recent average block size estimate
|
||||
size = sum(len(block) for block in blocks)
|
||||
if count >= 10:
|
||||
self.ave_size = size // count
|
||||
else:
|
||||
self.ave_size = (size + (10 - count) * self.ave_size) // 10
|
||||
|
||||
self.blocks.extend(blocks)
|
||||
self.cache_size += size
|
||||
self.fetched_height += count
|
||||
self.blocks_event.set()
|
||||
|
||||
self.refill_event.clear()
|
||||
return True
|
||||
|
||||
|
||||
class ChainError(Exception):
|
||||
"""Raised on error processing blocks."""
|
||||
|
||||
|
||||
NAMESPACE = "wallet_server"
|
||||
HISTOGRAM_BUCKETS = (
|
||||
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
|
||||
)
|
||||
|
||||
|
||||
class BlockProcessor:
|
||||
"""Process blocks and update the DB state to match.
|
||||
|
||||
Employ a prefetcher to prefetch blocks in batches for processing.
|
||||
Coordinate backing up in case of chain reorganisations.
|
||||
"""
|
||||
|
||||
block_count_metric = Gauge(
|
||||
"block_count", "Number of processed blocks", namespace=NAMESPACE
|
||||
)
|
||||
block_update_time_metric = Histogram(
|
||||
"block_time", "Block update times", namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
|
||||
)
|
||||
reorg_count_metric = Gauge(
|
||||
"reorg_count", "Number of reorgs", namespace=NAMESPACE
|
||||
)
|
||||
|
||||
def __init__(self, env, db, daemon, notifications):
|
||||
self.env = env
|
||||
self.db = db
|
||||
self.daemon = daemon
|
||||
self.notifications = notifications
|
||||
|
||||
self.coin = env.coin
|
||||
self.blocks_event = asyncio.Event()
|
||||
self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event)
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.executor = ThreadPoolExecutor(1)
|
||||
|
||||
# Meta
|
||||
self.next_cache_check = 0
|
||||
self.touched = set()
|
||||
self.reorg_count = 0
|
||||
|
||||
# Caches of unflushed items.
|
||||
self.headers = []
|
||||
self.block_hashes = []
|
||||
self.block_txs = []
|
||||
self.undo_infos = []
|
||||
|
||||
# UTXO cache
|
||||
self.utxo_cache = {}
|
||||
self.db_deletes = []
|
||||
|
||||
# If the lock is successfully acquired, in-memory chain state
|
||||
# is consistent with self.height
|
||||
self.state_lock = asyncio.Lock()
|
||||
|
||||
self.search_cache = {}
|
||||
self.history_cache = {}
|
||||
self.status_server = StatusServer()
|
||||
|
||||
async def run_in_thread_with_lock(self, func, *args):
|
||||
# Run in a thread to prevent blocking. Shielded so that
|
||||
# cancellations from shutdown don't lose work - when the task
|
||||
# completes the data will be flushed and then we shut down.
|
||||
# Take the state lock to be certain in-memory state is
|
||||
# consistent and not being updated elsewhere.
|
||||
async def run_in_thread_locked():
|
||||
async with self.state_lock:
|
||||
return await asyncio.get_event_loop().run_in_executor(self.executor, func, *args)
|
||||
return await asyncio.shield(run_in_thread_locked())
|
||||
|
||||
async def check_and_advance_blocks(self, raw_blocks):
|
||||
"""Process the list of raw blocks passed. Detects and handles
|
||||
reorgs.
|
||||
"""
|
||||
if not raw_blocks:
|
||||
return
|
||||
first = self.height + 1
|
||||
blocks = [self.coin.block(raw_block, first + n)
|
||||
for n, raw_block in enumerate(raw_blocks)]
|
||||
headers = [block.header for block in blocks]
|
||||
hprevs = [self.coin.header_prevhash(h) for h in headers]
|
||||
chain = [self.tip] + [self.coin.header_hash(h) for h in headers[:-1]]
|
||||
|
||||
if hprevs == chain:
|
||||
start = time.perf_counter()
|
||||
await self.run_in_thread_with_lock(self.advance_blocks, blocks)
|
||||
if self.sql:
|
||||
await self.db.search_index.claim_consumer(self.sql.claim_producer())
|
||||
for cache in self.search_cache.values():
|
||||
cache.clear()
|
||||
self.history_cache.clear()
|
||||
self.notifications.notified_mempool_txs.clear()
|
||||
await self._maybe_flush()
|
||||
processed_time = time.perf_counter() - start
|
||||
self.block_count_metric.set(self.height)
|
||||
self.block_update_time_metric.observe(processed_time)
|
||||
self.status_server.set_height(self.db.fs_height, self.db.db_tip)
|
||||
if not self.db.first_sync:
|
||||
s = '' if len(blocks) == 1 else 's'
|
||||
self.logger.info('processed {:,d} block{} in {:.1f}s'.format(len(blocks), s, processed_time))
|
||||
if self._caught_up_event.is_set():
|
||||
if self.sql:
|
||||
await self.db.search_index.apply_filters(self.sql.blocked_streams, self.sql.blocked_channels,
|
||||
self.sql.filtered_streams, self.sql.filtered_channels)
|
||||
await self.notifications.on_block(self.touched, self.height)
|
||||
self.touched = set()
|
||||
elif hprevs[0] != chain[0]:
|
||||
await self.reorg_chain()
|
||||
else:
|
||||
# It is probably possible but extremely rare that what
|
||||
# bitcoind returns doesn't form a chain because it
|
||||
# reorg-ed the chain as it was processing the batched
|
||||
# block hash requests. Should this happen it's simplest
|
||||
# just to reset the prefetcher and try again.
|
||||
self.logger.warning('daemon blocks do not form a chain; '
|
||||
'resetting the prefetcher')
|
||||
await self.prefetcher.reset_height(self.height)
|
||||
|
||||
async def reorg_chain(self, count: Optional[int] = None):
|
||||
"""Handle a chain reorganisation.
|
||||
|
||||
Count is the number of blocks to simulate a reorg, or None for
|
||||
a real reorg."""
|
||||
if count is None:
|
||||
self.logger.info('chain reorg detected')
|
||||
else:
|
||||
self.logger.info(f'faking a reorg of {count:,d} blocks')
|
||||
|
||||
|
||||
async def get_raw_blocks(last_height, hex_hashes):
|
||||
heights = range(last_height, last_height - len(hex_hashes), -1)
|
||||
try:
|
||||
blocks = [await self.db.read_raw_block(height) for height in heights]
|
||||
self.logger.info(f'read {len(blocks)} blocks from disk')
|
||||
return blocks
|
||||
except FileNotFoundError:
|
||||
return await self.daemon.raw_blocks(hex_hashes)
|
||||
|
||||
def flush_backup():
|
||||
# self.touched can include other addresses which is
|
||||
# harmless, but remove None.
|
||||
self.touched.discard(None)
|
||||
self.db.flush_backup(self.flush_data(), self.touched)
|
||||
|
||||
try:
|
||||
await self.flush(True)
|
||||
|
||||
start, last, hashes = await self.reorg_hashes(count)
|
||||
# Reverse and convert to hex strings.
|
||||
hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
|
||||
self.logger.info("reorg %i block hashes", len(hashes))
|
||||
for hex_hashes in chunks(hashes, 50):
|
||||
raw_blocks = await get_raw_blocks(last, hex_hashes)
|
||||
self.logger.info("got %i raw blocks", len(raw_blocks))
|
||||
await self.run_in_thread_with_lock(self.backup_blocks, raw_blocks)
|
||||
await self.run_in_thread_with_lock(flush_backup)
|
||||
last -= len(raw_blocks)
|
||||
|
||||
await self.prefetcher.reset_height(self.height)
|
||||
self.reorg_count_metric.inc()
|
||||
except:
|
||||
self.logger.exception("boom")
|
||||
raise
|
||||
finally:
|
||||
self.logger.info("done with reorg")
|
||||
|
||||
async def reorg_hashes(self, count):
|
||||
"""Return a pair (start, last, hashes) of blocks to back up during a
|
||||
reorg.
|
||||
|
||||
The hashes are returned in order of increasing height. Start
|
||||
is the height of the first hash, last of the last.
|
||||
"""
|
||||
start, count = await self.calc_reorg_range(count)
|
||||
last = start + count - 1
|
||||
s = '' if count == 1 else 's'
|
||||
self.logger.info(f'chain was reorganised replacing {count:,d} '
|
||||
f'block{s} at heights {start:,d}-{last:,d}')
|
||||
|
||||
return start, last, await self.db.fs_block_hashes(start, count)
|
||||
|
||||
async def calc_reorg_range(self, count: Optional[int]):
|
||||
"""Calculate the reorg range"""
|
||||
|
||||
def diff_pos(hashes1, hashes2):
|
||||
"""Returns the index of the first difference in the hash lists.
|
||||
If both lists match returns their length."""
|
||||
for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)):
|
||||
if hash1 != hash2:
|
||||
return n
|
||||
return len(hashes)
|
||||
|
||||
if count is None:
|
||||
# A real reorg
|
||||
start = self.height - 1
|
||||
count = 1
|
||||
while start > 0:
|
||||
hashes = await self.db.fs_block_hashes(start, count)
|
||||
hex_hashes = [hash_to_hex_str(hash) for hash in hashes]
|
||||
d_hex_hashes = await self.daemon.block_hex_hashes(start, count)
|
||||
n = diff_pos(hex_hashes, d_hex_hashes)
|
||||
if n > 0:
|
||||
start += n
|
||||
break
|
||||
count = min(count * 2, start)
|
||||
start -= count
|
||||
|
||||
count = (self.height - start) + 1
|
||||
else:
|
||||
start = (self.height - count) + 1
|
||||
|
||||
return start, count
|
||||
|
||||
def estimate_txs_remaining(self):
|
||||
# Try to estimate how many txs there are to go
|
||||
daemon_height = self.daemon.cached_height()
|
||||
coin = self.coin
|
||||
tail_count = daemon_height - max(self.height, coin.TX_COUNT_HEIGHT)
|
||||
# Damp the initial enthusiasm
|
||||
realism = max(2.0 - 0.9 * self.height / coin.TX_COUNT_HEIGHT, 1.0)
|
||||
return (tail_count * coin.TX_PER_BLOCK +
|
||||
max(coin.TX_COUNT - self.tx_count, 0)) * realism
|
||||
|
||||
# - Flushing
|
||||
def flush_data(self):
|
||||
"""The data for a flush. The lock must be taken."""
|
||||
assert self.state_lock.locked()
|
||||
return FlushData(self.height, self.tx_count, self.headers, self.block_hashes,
|
||||
self.block_txs, self.undo_infos, self.utxo_cache,
|
||||
self.db_deletes, self.tip)
|
||||
|
||||
async def flush(self, flush_utxos):
|
||||
def flush():
|
||||
self.db.flush_dbs(self.flush_data(), flush_utxos,
|
||||
self.estimate_txs_remaining)
|
||||
await self.run_in_thread_with_lock(flush)
|
||||
|
||||
async def _maybe_flush(self):
|
||||
# If caught up, flush everything as client queries are
|
||||
# performed on the DB.
|
||||
if self._caught_up_event.is_set():
|
||||
await self.flush(True)
|
||||
elif time.perf_counter() > self.next_cache_check:
|
||||
await self.flush(True)
|
||||
self.next_cache_check = time.perf_counter() + 30
|
||||
|
||||
def check_cache_size(self):
|
||||
"""Flush a cache if it gets too big."""
|
||||
# Good average estimates based on traversal of subobjects and
|
||||
# requesting size from Python (see deep_getsizeof).
|
||||
one_MB = 1000*1000
|
||||
utxo_cache_size = len(self.utxo_cache) * 205
|
||||
db_deletes_size = len(self.db_deletes) * 57
|
||||
hist_cache_size = self.db.history.unflushed_memsize()
|
||||
# Roughly ntxs * 32 + nblocks * 42
|
||||
tx_hash_size = ((self.tx_count - self.db.fs_tx_count) * 32
|
||||
+ (self.height - self.db.fs_height) * 42)
|
||||
utxo_MB = (db_deletes_size + utxo_cache_size) // one_MB
|
||||
hist_MB = (hist_cache_size + tx_hash_size) // one_MB
|
||||
|
||||
self.logger.info('our height: {:,d} daemon: {:,d} '
|
||||
'UTXOs {:,d}MB hist {:,d}MB'
|
||||
.format(self.height, self.daemon.cached_height(),
|
||||
utxo_MB, hist_MB))
|
||||
|
||||
# Flush history if it takes up over 20% of cache memory.
|
||||
# Flush UTXOs once they take up 80% of cache memory.
|
||||
cache_MB = self.env.cache_MB
|
||||
if utxo_MB + hist_MB >= cache_MB or hist_MB >= cache_MB // 5:
|
||||
return utxo_MB >= cache_MB * 4 // 5
|
||||
return None
|
||||
|
||||
def advance_blocks(self, blocks):
|
||||
"""Synchronously advance the blocks.
|
||||
|
||||
It is already verified they correctly connect onto our tip.
|
||||
"""
|
||||
min_height = self.db.min_undo_height(self.daemon.cached_height())
|
||||
height = self.height
|
||||
|
||||
for block in blocks:
|
||||
height += 1
|
||||
undo_info = self.advance_txs(
|
||||
height, block.transactions, self.coin.electrum_header(block.header, height),
|
||||
self.coin.header_hash(block.header)
|
||||
)
|
||||
if height >= min_height:
|
||||
self.undo_infos.append((undo_info, height))
|
||||
self.db.write_raw_block(block.raw, height)
|
||||
|
||||
headers = [block.header for block in blocks]
|
||||
self.height = height
|
||||
self.headers.extend(headers)
|
||||
self.tip = self.coin.header_hash(headers[-1])
|
||||
|
||||
def advance_txs(self, height, txs, header, block_hash):
|
||||
self.block_hashes.append(block_hash)
|
||||
self.block_txs.append((b''.join(tx_hash for tx, tx_hash in txs), [tx.raw for tx, _ in txs]))
|
||||
|
||||
undo_info = []
|
||||
tx_num = self.tx_count
|
||||
hashXs_by_tx = []
|
||||
|
||||
# Use local vars for speed in the loops
|
||||
put_utxo = self.utxo_cache.__setitem__
|
||||
spend_utxo = self.spend_utxo
|
||||
undo_info_append = undo_info.append
|
||||
update_touched = self.touched.update
|
||||
append_hashX_by_tx = hashXs_by_tx.append
|
||||
hashX_from_script = self.coin.hashX_from_script
|
||||
|
||||
for tx, tx_hash in txs:
|
||||
hashXs = []
|
||||
append_hashX = hashXs.append
|
||||
tx_numb = pack('<I', tx_num)
|
||||
|
||||
# Spend the inputs
|
||||
for txin in tx.inputs:
|
||||
if txin.is_generation():
|
||||
continue
|
||||
cache_value = spend_utxo(txin.prev_hash, txin.prev_idx)
|
||||
undo_info_append(cache_value)
|
||||
append_hashX(cache_value[:-12])
|
||||
|
||||
# Add the new UTXOs
|
||||
for idx, txout in enumerate(tx.outputs):
|
||||
# Get the hashX. Ignore unspendable outputs
|
||||
hashX = hashX_from_script(txout.pk_script)
|
||||
if hashX:
|
||||
append_hashX(hashX)
|
||||
put_utxo(tx_hash + pack('<H', idx), hashX + tx_numb + pack('<Q', txout.value))
|
||||
|
||||
append_hashX_by_tx(hashXs)
|
||||
update_touched(hashXs)
|
||||
self.db.total_transactions.append(tx_hash)
|
||||
tx_num += 1
|
||||
|
||||
self.db.history.add_unflushed(hashXs_by_tx, self.tx_count)
|
||||
self.tx_count = tx_num
|
||||
self.db.tx_counts.append(tx_num)
|
||||
|
||||
return undo_info
|
||||
|
||||
def backup_blocks(self, raw_blocks):
|
||||
"""Backup the raw blocks and flush.
|
||||
|
||||
The blocks should be in order of decreasing height, starting at.
|
||||
self.height. A flush is performed once the blocks are backed up.
|
||||
"""
|
||||
self.db.assert_flushed(self.flush_data())
|
||||
assert self.height >= len(raw_blocks)
|
||||
|
||||
coin = self.coin
|
||||
for raw_block in raw_blocks:
|
||||
self.logger.info("backup block %i", self.height)
|
||||
# Check and update self.tip
|
||||
block = coin.block(raw_block, self.height)
|
||||
header_hash = coin.header_hash(block.header)
|
||||
if header_hash != self.tip:
|
||||
raise ChainError('backup block {} not tip {} at height {:,d}'
|
||||
.format(hash_to_hex_str(header_hash),
|
||||
hash_to_hex_str(self.tip),
|
||||
self.height))
|
||||
self.tip = coin.header_prevhash(block.header)
|
||||
self.backup_txs(block.transactions)
|
||||
self.height -= 1
|
||||
self.db.tx_counts.pop()
|
||||
|
||||
self.logger.info(f'backed up to height {self.height:,d}')
|
||||
|
||||
def backup_txs(self, txs):
|
||||
# Prevout values, in order down the block (coinbase first if present)
|
||||
# undo_info is in reverse block order
|
||||
undo_info = self.db.read_undo_info(self.height)
|
||||
if undo_info is None:
|
||||
raise ChainError(f'no undo information found for height {self.height:,d}')
|
||||
n = len(undo_info)
|
||||
|
||||
# Use local vars for speed in the loops
|
||||
s_pack = pack
|
||||
undo_entry_len = 12 + HASHX_LEN
|
||||
|
||||
for tx, tx_hash in reversed(txs):
|
||||
for idx, txout in enumerate(tx.outputs):
|
||||
# Spend the TX outputs. Be careful with unspendable
|
||||
# outputs - we didn't save those in the first place.
|
||||
hashX = self.coin.hashX_from_script(txout.pk_script)
|
||||
if hashX:
|
||||
cache_value = self.spend_utxo(tx_hash, idx)
|
||||
self.touched.add(cache_value[:-12])
|
||||
|
||||
# Restore the inputs
|
||||
for txin in reversed(tx.inputs):
|
||||
if txin.is_generation():
|
||||
continue
|
||||
n -= undo_entry_len
|
||||
undo_item = undo_info[n:n + undo_entry_len]
|
||||
self.utxo_cache[txin.prev_hash + s_pack('<H', txin.prev_idx)] = undo_item
|
||||
self.touched.add(undo_item[:-12])
|
||||
|
||||
self.db.total_transactions.pop()
|
||||
|
||||
assert n == 0
|
||||
self.tx_count -= len(txs)
|
||||
|
||||
"""An in-memory UTXO cache, representing all changes to UTXO state
|
||||
since the last DB flush.
|
||||
|
||||
We want to store millions of these in memory for optimal
|
||||
performance during initial sync, because then it is possible to
|
||||
spend UTXOs without ever going to the database (other than as an
|
||||
entry in the address history, and there is only one such entry per
|
||||
TX not per UTXO). So store them in a Python dictionary with
|
||||
binary keys and values.
|
||||
|
||||
Key: TX_HASH + TX_IDX (32 + 2 = 34 bytes)
|
||||
Value: HASHX + TX_NUM + VALUE (11 + 4 + 8 = 23 bytes)
|
||||
|
||||
That's 57 bytes of raw data in-memory. Python dictionary overhead
|
||||
means each entry actually uses about 205 bytes of memory. So
|
||||
almost 5 million UTXOs can fit in 1GB of RAM. There are
|
||||
approximately 42 million UTXOs on bitcoin mainnet at height
|
||||
433,000.
|
||||
|
||||
Semantics:
|
||||
|
||||
add: Add it to the cache dictionary.
|
||||
|
||||
spend: Remove it if in the cache dictionary. Otherwise it's
|
||||
been flushed to the DB. Each UTXO is responsible for two
|
||||
entries in the DB. Mark them for deletion in the next
|
||||
cache flush.
|
||||
|
||||
The UTXO database format has to be able to do two things efficiently:
|
||||
|
||||
1. Given an address be able to list its UTXOs and their values
|
||||
so its balance can be efficiently computed.
|
||||
|
||||
2. When processing transactions, for each prevout spent - a (tx_hash,
|
||||
idx) pair - we have to be able to remove it from the DB. To send
|
||||
notifications to clients we also need to know any address it paid
|
||||
to.
|
||||
|
||||
To this end we maintain two "tables", one for each point above:
|
||||
|
||||
1. Key: b'u' + address_hashX + tx_idx + tx_num
|
||||
Value: the UTXO value as a 64-bit unsigned integer
|
||||
|
||||
2. Key: b'h' + compressed_tx_hash + tx_idx + tx_num
|
||||
Value: hashX
|
||||
|
||||
The compressed tx hash is just the first few bytes of the hash of
|
||||
the tx in which the UTXO was created. As this is not unique there
|
||||
will be potential collisions so tx_num is also in the key. When
|
||||
looking up a UTXO the prefix space of the compressed hash needs to
|
||||
be searched and resolved if necessary with the tx_num. The
|
||||
collision rate is low (<0.1%).
|
||||
"""
|
||||
|
||||
def spend_utxo(self, tx_hash, tx_idx):
|
||||
"""Spend a UTXO and return the 33-byte value.
|
||||
|
||||
If the UTXO is not in the cache it must be on disk. We store
|
||||
all UTXOs so not finding one indicates a logic error or DB
|
||||
corruption.
|
||||
"""
|
||||
# Fast track is it being in the cache
|
||||
idx_packed = pack('<H', tx_idx)
|
||||
cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
|
||||
if cache_value:
|
||||
return cache_value
|
||||
|
||||
# Spend it from the DB.
|
||||
|
||||
# Key: b'h' + compressed_tx_hash + tx_idx + tx_num
|
||||
# Value: hashX
|
||||
prefix = b'h' + tx_hash[:4] + idx_packed
|
||||
candidates = dict(self.db.utxo_db.iterator(prefix=prefix))
|
||||
for hdb_key, hashX in candidates.items():
|
||||
tx_num_packed = hdb_key[-4:]
|
||||
if len(candidates) > 1:
|
||||
|
||||
tx_num, = unpack('<I', tx_num_packed)
|
||||
try:
|
||||
hash, height = self.db.fs_tx_hash(tx_num)
|
||||
except IndexError:
|
||||
self.logger.error("data integrity error for hashx history: %s missing tx #%s (%s:%s)",
|
||||
hashX.hex(), tx_num, hash_to_hex_str(tx_hash), tx_idx)
|
||||
continue
|
||||
if hash != tx_hash:
|
||||
assert hash is not None # Should always be found
|
||||
continue
|
||||
|
||||
# Key: b'u' + address_hashX + tx_idx + tx_num
|
||||
# Value: the UTXO value as a 64-bit unsigned integer
|
||||
udb_key = b'u' + hashX + hdb_key[-6:]
|
||||
utxo_value_packed = self.db.utxo_db.get(udb_key)
|
||||
if utxo_value_packed is None:
|
||||
self.logger.warning(
|
||||
"%s:%s is not found in UTXO db for %s", hash_to_hex_str(tx_hash), tx_idx, hash_to_hex_str(hashX)
|
||||
)
|
||||
raise ChainError(f"{hash_to_hex_str(tx_hash)}:{tx_idx} is not found in UTXO db for {hash_to_hex_str(hashX)}")
|
||||
# Remove both entries for this UTXO
|
||||
self.db_deletes.append(hdb_key)
|
||||
self.db_deletes.append(udb_key)
|
||||
return hashX + tx_num_packed + utxo_value_packed
|
||||
|
||||
self.logger.error('UTXO {hash_to_hex_str(tx_hash)} / {tx_idx} not found in "h" table')
|
||||
raise ChainError('UTXO {} / {:,d} not found in "h" table'
|
||||
.format(hash_to_hex_str(tx_hash), tx_idx))
|
||||
|
||||
async def _process_prefetched_blocks(self):
|
||||
"""Loop forever processing blocks as they arrive."""
|
||||
while True:
|
||||
if self.height == self.daemon.cached_height():
|
||||
if not self._caught_up_event.is_set():
|
||||
await self._first_caught_up()
|
||||
self._caught_up_event.set()
|
||||
await self.blocks_event.wait()
|
||||
self.blocks_event.clear()
|
||||
if self.reorg_count: # this could only happen by calling the reorg rpc
|
||||
await self.reorg_chain(self.reorg_count)
|
||||
self.reorg_count = 0
|
||||
else:
|
||||
blocks = self.prefetcher.get_prefetched_blocks()
|
||||
try:
|
||||
await self.check_and_advance_blocks(blocks)
|
||||
except Exception:
|
||||
self.logger.exception("error while processing txs")
|
||||
raise
|
||||
|
||||
async def _first_caught_up(self):
|
||||
self.logger.info(f'caught up to height {self.height}')
|
||||
# Flush everything but with first_sync->False state.
|
||||
first_sync = self.db.first_sync
|
||||
self.db.first_sync = False
|
||||
await self.flush(True)
|
||||
if first_sync:
|
||||
self.logger.info(f'{lbry.__version__} synced to '
|
||||
f'height {self.height:,d}')
|
||||
# Reopen for serving
|
||||
await self.db.open_for_serving()
|
||||
|
||||
async def _first_open_dbs(self):
|
||||
await self.db.open_for_sync()
|
||||
self.height = self.db.db_height
|
||||
self.tip = self.db.db_tip
|
||||
self.tx_count = self.db.db_tx_count
|
||||
|
||||
# --- External API
|
||||
|
||||
async def fetch_and_process_blocks(self, caught_up_event):
|
||||
"""Fetch, process and index blocks from the daemon.
|
||||
|
||||
Sets caught_up_event when first caught up. Flushes to disk
|
||||
and shuts down cleanly if cancelled.
|
||||
|
||||
This is mainly because if, during initial sync ElectrumX is
|
||||
asked to shut down when a large number of blocks have been
|
||||
processed but not written to disk, it should write those to
|
||||
disk before exiting, as otherwise a significant amount of work
|
||||
could be lost.
|
||||
"""
|
||||
|
||||
self._caught_up_event = caught_up_event
|
||||
try:
|
||||
await self._first_open_dbs()
|
||||
self.status_server.set_height(self.db.fs_height, self.db.db_tip)
|
||||
await asyncio.wait([
|
||||
self.prefetcher.main_loop(self.height),
|
||||
self._process_prefetched_blocks()
|
||||
])
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except:
|
||||
self.logger.exception("Block processing failed!")
|
||||
raise
|
||||
finally:
|
||||
self.status_server.stop()
|
||||
# Shut down block processing
|
||||
self.logger.info('flushing to DB for a clean shutdown...')
|
||||
await self.flush(True)
|
||||
self.db.close()
|
||||
self.executor.shutdown(wait=True)
|
||||
|
||||
def force_chain_reorg(self, count):
|
||||
"""Force a reorg of the given number of blocks.
|
||||
|
||||
Returns True if a reorg is queued, false if not caught up.
|
||||
"""
|
||||
if self._caught_up_event.is_set():
|
||||
self.reorg_count = count
|
||||
self.blocks_event.set()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class Timer:
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.total = 0
|
||||
self.count = 0
|
||||
self.sub_timers = {}
|
||||
self._last_start = None
|
||||
|
||||
def add_timer(self, name):
|
||||
if name not in self.sub_timers:
|
||||
self.sub_timers[name] = Timer(name)
|
||||
return self.sub_timers[name]
|
||||
|
||||
def run(self, func, *args, forward_timer=False, timer_name=None, **kwargs):
|
||||
t = self.add_timer(timer_name or func.__name__)
|
||||
t.start()
|
||||
try:
|
||||
if forward_timer:
|
||||
return func(*args, **kwargs, timer=t)
|
||||
else:
|
||||
return func(*args, **kwargs)
|
||||
finally:
|
||||
t.stop()
|
||||
|
||||
def start(self):
|
||||
self._last_start = time.time()
|
||||
return self
|
||||
|
||||
def stop(self):
|
||||
self.total += (time.time() - self._last_start)
|
||||
self.count += 1
|
||||
self._last_start = None
|
||||
return self
|
||||
|
||||
def show(self, depth=0, height=None):
|
||||
if depth == 0:
|
||||
print('='*100)
|
||||
if height is not None:
|
||||
print(f'STATISTICS AT HEIGHT {height}')
|
||||
print('='*100)
|
||||
else:
|
||||
print(
|
||||
f"{' '*depth} {self.total/60:4.2f}mins {self.name}"
|
||||
# f"{self.total/self.count:.5f}sec/call, "
|
||||
)
|
||||
for sub_timer in self.sub_timers.values():
|
||||
sub_timer.show(depth+1)
|
||||
if depth == 0:
|
||||
print('='*100)
|
||||
|
||||
|
||||
class LBRYBlockProcessor(BlockProcessor):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
if self.env.coin.NET == "regtest":
|
||||
self.prefetcher.polling_delay = 0.5
|
||||
self.should_validate_signatures = self.env.boolean('VALIDATE_CLAIM_SIGNATURES', False)
|
||||
self.logger.info(f"LbryumX Block Processor - Validating signatures: {self.should_validate_signatures}")
|
||||
self.sql: SQLDB = self.db.sql
|
||||
self.timer = Timer('BlockProcessor')
|
||||
|
||||
def advance_blocks(self, blocks):
|
||||
if self.sql:
|
||||
self.sql.begin()
|
||||
try:
|
||||
self.timer.run(super().advance_blocks, blocks)
|
||||
except:
|
||||
self.logger.exception(f'Error while advancing transaction in new block.')
|
||||
raise
|
||||
finally:
|
||||
if self.sql:
|
||||
self.sql.commit()
|
||||
|
||||
def advance_txs(self, height, txs, header, block_hash):
|
||||
timer = self.timer.sub_timers['advance_blocks']
|
||||
undo = timer.run(super().advance_txs, height, txs, header, block_hash, timer_name='super().advance_txs')
|
||||
if self.sql:
|
||||
timer.run(self.sql.advance_txs, height, txs, header, self.daemon.cached_height(), forward_timer=True)
|
||||
if (height % 10000 == 0 or not self.db.first_sync) and self.logger.isEnabledFor(10):
|
||||
self.timer.show(height=height)
|
||||
return undo
|
|
@ -1,43 +0,0 @@
|
|||
import logging
|
||||
import traceback
|
||||
import argparse
|
||||
import importlib
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet.server.server import Server
|
||||
|
||||
|
||||
def get_argument_parser():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="lbry-hub"
|
||||
)
|
||||
parser.add_argument("spvserver", type=str, help="Python class path to SPV server implementation.",
|
||||
nargs="?", default="lbry.wallet.server.coin.LBC")
|
||||
return parser
|
||||
|
||||
|
||||
def get_coin_class(spvserver):
|
||||
spvserver_path, coin_class_name = spvserver.rsplit('.', 1)
|
||||
spvserver_module = importlib.import_module(spvserver_path)
|
||||
return getattr(spvserver_module, coin_class_name)
|
||||
|
||||
|
||||
def main():
|
||||
parser = get_argument_parser()
|
||||
args = parser.parse_args()
|
||||
coin_class = get_coin_class(args.spvserver)
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
|
||||
logging.info('lbry.server starting')
|
||||
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
||||
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
||||
try:
|
||||
server = Server(Env(coin_class))
|
||||
server.run()
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
logging.critical('lbry.server terminated abnormally')
|
||||
else:
|
||||
logging.info('lbry.server terminated normally')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,350 +0,0 @@
|
|||
import re
|
||||
import struct
|
||||
from typing import List
|
||||
from hashlib import sha256
|
||||
from decimal import Decimal
|
||||
from collections import namedtuple
|
||||
|
||||
import lbry.wallet.server.tx as lib_tx
|
||||
from lbry.wallet.script import OutputScript, OP_CLAIM_NAME, OP_UPDATE_CLAIM, OP_SUPPORT_CLAIM
|
||||
from lbry.wallet.server.tx import DeserializerSegWit
|
||||
from lbry.wallet.server.util import cachedproperty, subclasses
|
||||
from lbry.wallet.server.hash import Base58, hash160, double_sha256, hash_to_hex_str, HASHX_LEN
|
||||
from lbry.wallet.server.daemon import Daemon, LBCDaemon
|
||||
from lbry.wallet.server.script import ScriptPubKey, OpCodes
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.session import LBRYElectrumX, LBRYSessionManager
|
||||
from lbry.wallet.server.db.writer import LBRYLevelDB
|
||||
from lbry.wallet.server.block_processor import LBRYBlockProcessor
|
||||
|
||||
|
||||
Block = namedtuple("Block", "raw header transactions")
|
||||
OP_RETURN = OpCodes.OP_RETURN
|
||||
|
||||
|
||||
class CoinError(Exception):
|
||||
"""Exception raised for coin-related errors."""
|
||||
|
||||
|
||||
class Coin:
|
||||
"""Base class of coin hierarchy."""
|
||||
|
||||
REORG_LIMIT = 200
|
||||
# Not sure if these are coin-specific
|
||||
RPC_URL_REGEX = re.compile('.+@(\\[[0-9a-fA-F:]+\\]|[^:]+)(:[0-9]+)?')
|
||||
VALUE_PER_COIN = 100000000
|
||||
CHUNK_SIZE = 2016
|
||||
BASIC_HEADER_SIZE = 80
|
||||
STATIC_BLOCK_HEADERS = True
|
||||
SESSIONCLS = LBRYElectrumX
|
||||
DESERIALIZER = lib_tx.Deserializer
|
||||
DAEMON = Daemon
|
||||
BLOCK_PROCESSOR = LBRYBlockProcessor
|
||||
SESSION_MANAGER = LBRYSessionManager
|
||||
DB = LevelDB
|
||||
HEADER_VALUES = [
|
||||
'version', 'prev_block_hash', 'merkle_root', 'timestamp', 'bits', 'nonce'
|
||||
]
|
||||
HEADER_UNPACK = struct.Struct('< I 32s 32s I I I').unpack_from
|
||||
MEMPOOL_HISTOGRAM_REFRESH_SECS = 500
|
||||
XPUB_VERBYTES = bytes('????', 'utf-8')
|
||||
XPRV_VERBYTES = bytes('????', 'utf-8')
|
||||
ENCODE_CHECK = Base58.encode_check
|
||||
DECODE_CHECK = Base58.decode_check
|
||||
# Peer discovery
|
||||
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
|
||||
PEERS: List[str] = []
|
||||
|
||||
@classmethod
|
||||
def lookup_coin_class(cls, name, net):
|
||||
"""Return a coin class given name and network.
|
||||
|
||||
Raise an exception if unrecognised."""
|
||||
req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK']
|
||||
for coin in subclasses(Coin):
|
||||
if (coin.NAME.lower() == name.lower() and
|
||||
coin.NET.lower() == net.lower()):
|
||||
coin_req_attrs = req_attrs.copy()
|
||||
missing = [attr for attr in coin_req_attrs
|
||||
if not hasattr(coin, attr)]
|
||||
if missing:
|
||||
raise CoinError(f'coin {name} missing {missing} attributes')
|
||||
return coin
|
||||
raise CoinError(f'unknown coin {name} and network {net} combination')
|
||||
|
||||
@classmethod
|
||||
def sanitize_url(cls, url):
|
||||
# Remove surrounding ws and trailing /s
|
||||
url = url.strip().rstrip('/')
|
||||
match = cls.RPC_URL_REGEX.match(url)
|
||||
if not match:
|
||||
raise CoinError(f'invalid daemon URL: "{url}"')
|
||||
if match.groups()[1] is None:
|
||||
url += f':{cls.RPC_PORT:d}'
|
||||
if not url.startswith('http://') and not url.startswith('https://'):
|
||||
url = 'http://' + url
|
||||
return url + '/'
|
||||
|
||||
@classmethod
|
||||
def genesis_block(cls, block):
|
||||
"""Check the Genesis block is the right one for this coin.
|
||||
|
||||
Return the block less its unspendable coinbase.
|
||||
"""
|
||||
header = cls.block_header(block, 0)
|
||||
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
|
||||
if header_hex_hash != cls.GENESIS_HASH:
|
||||
raise CoinError(f'genesis block has hash {header_hex_hash} expected {cls.GENESIS_HASH}')
|
||||
|
||||
return header + bytes(1)
|
||||
|
||||
@classmethod
|
||||
def hashX_from_script(cls, script):
|
||||
"""Returns a hashX from a script, or None if the script is provably
|
||||
unspendable so the output can be dropped.
|
||||
"""
|
||||
if script and script[0] == OP_RETURN:
|
||||
return None
|
||||
return sha256(script).digest()[:HASHX_LEN]
|
||||
|
||||
@staticmethod
|
||||
def lookup_xverbytes(verbytes):
|
||||
"""Return a (is_xpub, coin_class) pair given xpub/xprv verbytes."""
|
||||
# Order means BTC testnet will override NMC testnet
|
||||
for coin in subclasses(Coin):
|
||||
if verbytes == coin.XPUB_VERBYTES:
|
||||
return True, coin
|
||||
if verbytes == coin.XPRV_VERBYTES:
|
||||
return False, coin
|
||||
raise CoinError('version bytes unrecognised')
|
||||
|
||||
@classmethod
|
||||
def address_to_hashX(cls, address):
|
||||
"""Return a hashX given a coin address."""
|
||||
return cls.hashX_from_script(cls.pay_to_address_script(address))
|
||||
|
||||
@classmethod
|
||||
def P2PKH_address_from_hash160(cls, hash160):
|
||||
"""Return a P2PKH address given a public key."""
|
||||
assert len(hash160) == 20
|
||||
return cls.ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160)
|
||||
|
||||
@classmethod
|
||||
def P2PKH_address_from_pubkey(cls, pubkey):
|
||||
"""Return a coin address given a public key."""
|
||||
return cls.P2PKH_address_from_hash160(hash160(pubkey))
|
||||
|
||||
@classmethod
|
||||
def P2SH_address_from_hash160(cls, hash160):
|
||||
"""Return a coin address given a hash160."""
|
||||
assert len(hash160) == 20
|
||||
return cls.ENCODE_CHECK(cls.P2SH_VERBYTES[0] + hash160)
|
||||
|
||||
@classmethod
|
||||
def hash160_to_P2PKH_script(cls, hash160):
|
||||
return ScriptPubKey.P2PKH_script(hash160)
|
||||
|
||||
@classmethod
|
||||
def hash160_to_P2PKH_hashX(cls, hash160):
|
||||
return cls.hashX_from_script(cls.hash160_to_P2PKH_script(hash160))
|
||||
|
||||
@classmethod
|
||||
def pay_to_address_script(cls, address):
|
||||
"""Return a pubkey script that pays to a pubkey hash.
|
||||
|
||||
Pass the address (either P2PKH or P2SH) in base58 form.
|
||||
"""
|
||||
raw = cls.DECODE_CHECK(address)
|
||||
|
||||
# Require version byte(s) plus hash160.
|
||||
verbyte = -1
|
||||
verlen = len(raw) - 20
|
||||
if verlen > 0:
|
||||
verbyte, hash160 = raw[:verlen], raw[verlen:]
|
||||
|
||||
if verbyte == cls.P2PKH_VERBYTE:
|
||||
return cls.hash160_to_P2PKH_script(hash160)
|
||||
if verbyte in cls.P2SH_VERBYTES:
|
||||
return ScriptPubKey.P2SH_script(hash160)
|
||||
|
||||
raise CoinError(f'invalid address: {address}')
|
||||
|
||||
@classmethod
|
||||
def privkey_WIF(cls, privkey_bytes, compressed):
|
||||
"""Return the private key encoded in Wallet Import Format."""
|
||||
payload = bytearray(cls.WIF_BYTE) + privkey_bytes
|
||||
if compressed:
|
||||
payload.append(0x01)
|
||||
return cls.ENCODE_CHECK(payload)
|
||||
|
||||
@classmethod
|
||||
def header_hash(cls, header):
|
||||
"""Given a header return hash"""
|
||||
return double_sha256(header)
|
||||
|
||||
@classmethod
|
||||
def header_prevhash(cls, header):
|
||||
"""Given a header return previous hash"""
|
||||
return header[4:36]
|
||||
|
||||
@classmethod
|
||||
def static_header_offset(cls, height):
|
||||
"""Given a header height return its offset in the headers file.
|
||||
|
||||
If header sizes change at some point, this is the only code
|
||||
that needs updating."""
|
||||
assert cls.STATIC_BLOCK_HEADERS
|
||||
return height * cls.BASIC_HEADER_SIZE
|
||||
|
||||
@classmethod
|
||||
def static_header_len(cls, height):
|
||||
"""Given a header height return its length."""
|
||||
return (cls.static_header_offset(height + 1)
|
||||
- cls.static_header_offset(height))
|
||||
|
||||
@classmethod
|
||||
def block_header(cls, block, height):
|
||||
"""Returns the block header given a block and its height."""
|
||||
return block[:cls.static_header_len(height)]
|
||||
|
||||
@classmethod
|
||||
def block(cls, raw_block, height):
|
||||
"""Return a Block namedtuple given a raw block and its height."""
|
||||
header = cls.block_header(raw_block, height)
|
||||
txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block()
|
||||
return Block(raw_block, header, txs)
|
||||
|
||||
@classmethod
|
||||
def decimal_value(cls, value):
|
||||
"""Return the number of standard coin units as a Decimal given a
|
||||
quantity of smallest units.
|
||||
|
||||
For example 1 BTC is returned for 100 million satoshis.
|
||||
"""
|
||||
return Decimal(value) / cls.VALUE_PER_COIN
|
||||
|
||||
@classmethod
|
||||
def electrum_header(cls, header, height):
|
||||
h = dict(zip(cls.HEADER_VALUES, cls.HEADER_UNPACK(header)))
|
||||
# Add the height that is not present in the header itself
|
||||
h['block_height'] = height
|
||||
# Convert bytes to str
|
||||
h['prev_block_hash'] = hash_to_hex_str(h['prev_block_hash'])
|
||||
h['merkle_root'] = hash_to_hex_str(h['merkle_root'])
|
||||
return h
|
||||
|
||||
|
||||
class LBC(Coin):
|
||||
DAEMON = LBCDaemon
|
||||
SESSIONCLS = LBRYElectrumX
|
||||
BLOCK_PROCESSOR = LBRYBlockProcessor
|
||||
SESSION_MANAGER = LBRYSessionManager
|
||||
DESERIALIZER = DeserializerSegWit
|
||||
DB = LBRYLevelDB
|
||||
NAME = "LBRY"
|
||||
SHORTNAME = "LBC"
|
||||
NET = "mainnet"
|
||||
BASIC_HEADER_SIZE = 112
|
||||
CHUNK_SIZE = 96
|
||||
XPUB_VERBYTES = bytes.fromhex("0488b21e")
|
||||
XPRV_VERBYTES = bytes.fromhex("0488ade4")
|
||||
P2PKH_VERBYTE = bytes.fromhex("55")
|
||||
P2SH_VERBYTES = bytes.fromhex("7A")
|
||||
WIF_BYTE = bytes.fromhex("1C")
|
||||
GENESIS_HASH = ('9c89283ba0f3227f6c03b70216b9f665'
|
||||
'f0118d5e0fa729cedf4fb34d6a34f463')
|
||||
TX_COUNT = 2716936
|
||||
TX_COUNT_HEIGHT = 329554
|
||||
TX_PER_BLOCK = 1
|
||||
RPC_PORT = 9245
|
||||
REORG_LIMIT = 200
|
||||
PEERS = [
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def genesis_block(cls, block):
|
||||
'''Check the Genesis block is the right one for this coin.
|
||||
|
||||
Return the block less its unspendable coinbase.
|
||||
'''
|
||||
header = cls.block_header(block, 0)
|
||||
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
|
||||
if header_hex_hash != cls.GENESIS_HASH:
|
||||
raise CoinError(f'genesis block has hash {header_hex_hash} expected {cls.GENESIS_HASH}')
|
||||
|
||||
return block
|
||||
|
||||
@classmethod
|
||||
def electrum_header(cls, header, height):
|
||||
version, = struct.unpack('<I', header[:4])
|
||||
timestamp, bits, nonce = struct.unpack('<III', header[100:112])
|
||||
return {
|
||||
'version': version,
|
||||
'prev_block_hash': hash_to_hex_str(header[4:36]),
|
||||
'merkle_root': hash_to_hex_str(header[36:68]),
|
||||
'claim_trie_root': hash_to_hex_str(header[68:100]),
|
||||
'timestamp': timestamp,
|
||||
'bits': bits,
|
||||
'nonce': nonce,
|
||||
'block_height': height,
|
||||
}
|
||||
|
||||
@cachedproperty
|
||||
def address_handlers(self):
|
||||
return ScriptPubKey.PayToHandlers(
|
||||
address=self.P2PKH_address_from_hash160,
|
||||
script_hash=self.P2SH_address_from_hash160,
|
||||
pubkey=self.P2PKH_address_from_pubkey,
|
||||
unspendable=lambda: None,
|
||||
strange=self.claim_address_handler,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def address_from_script(cls, script):
|
||||
'''Given a pk_script, return the address it pays to, or None.'''
|
||||
return ScriptPubKey.pay_to(cls.address_handlers, script)
|
||||
|
||||
@classmethod
|
||||
def claim_address_handler(cls, script):
|
||||
'''Parse a claim script, returns the address
|
||||
'''
|
||||
output = OutputScript(script)
|
||||
if output.is_pay_pubkey_hash:
|
||||
return cls.P2PKH_address_from_hash160(output.values['pubkey_hash'])
|
||||
if output.is_pay_script_hash:
|
||||
return cls.P2SH_address_from_hash160(output.values['script_hash'])
|
||||
if output.is_pay_pubkey:
|
||||
return cls.P2PKH_address_from_pubkey(output.values['pubkey'])
|
||||
if output.is_return_data:
|
||||
return None
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def hashX_from_script(cls, script):
|
||||
'''
|
||||
Overrides electrumx hashX from script by extracting addresses from claim scripts.
|
||||
'''
|
||||
if script and script[0] == OpCodes.OP_RETURN or not script:
|
||||
return None
|
||||
if script[0] in [
|
||||
OP_CLAIM_NAME,
|
||||
OP_UPDATE_CLAIM,
|
||||
OP_SUPPORT_CLAIM,
|
||||
]:
|
||||
return cls.address_to_hashX(cls.claim_address_handler(script))
|
||||
else:
|
||||
return sha256(script).digest()[:HASHX_LEN]
|
||||
|
||||
|
||||
class LBCRegTest(LBC):
|
||||
NET = "regtest"
|
||||
GENESIS_HASH = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
|
||||
XPUB_VERBYTES = bytes.fromhex('043587cf')
|
||||
XPRV_VERBYTES = bytes.fromhex('04358394')
|
||||
P2PKH_VERBYTE = bytes.fromhex("6f")
|
||||
P2SH_VERBYTES = bytes.fromhex("c4")
|
||||
|
||||
|
||||
class LBCTestNet(LBCRegTest):
|
||||
NET = "testnet"
|
||||
GENESIS_HASH = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
|
|
@ -1,370 +0,0 @@
|
|||
import asyncio
|
||||
import itertools
|
||||
import json
|
||||
import time
|
||||
from functools import wraps
|
||||
|
||||
import aiohttp
|
||||
from prometheus_client import Gauge, Histogram
|
||||
from lbry.utils import LRUCacheWithMetrics
|
||||
from lbry.wallet.rpc.jsonrpc import RPCError
|
||||
from lbry.wallet.server.util import hex_to_bytes, class_logger
|
||||
from lbry.wallet.rpc import JSONRPC
|
||||
|
||||
|
||||
class DaemonError(Exception):
|
||||
"""Raised when the daemon returns an error in its results."""
|
||||
|
||||
|
||||
class WarmingUpError(Exception):
|
||||
"""Internal - when the daemon is warming up."""
|
||||
|
||||
|
||||
class WorkQueueFullError(Exception):
|
||||
"""Internal - when the daemon's work queue is full."""
|
||||
|
||||
|
||||
NAMESPACE = "wallet_server"
|
||||
|
||||
|
||||
class Daemon:
|
||||
"""Handles connections to a daemon at the given URL."""
|
||||
|
||||
WARMING_UP = -28
|
||||
id_counter = itertools.count()
|
||||
|
||||
lbrycrd_request_time_metric = Histogram(
|
||||
"lbrycrd_request", "lbrycrd requests count", namespace=NAMESPACE, labelnames=("method",)
|
||||
)
|
||||
lbrycrd_pending_count_metric = Gauge(
|
||||
"lbrycrd_pending_count", "Number of lbrycrd rpcs that are in flight", namespace=NAMESPACE,
|
||||
labelnames=("method",)
|
||||
)
|
||||
|
||||
def __init__(self, coin, url, max_workqueue=10, init_retry=0.25,
|
||||
max_retry=4.0):
|
||||
self.coin = coin
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.set_url(url)
|
||||
# Limit concurrent RPC calls to this number.
|
||||
# See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
|
||||
self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
|
||||
self.init_retry = init_retry
|
||||
self.max_retry = max_retry
|
||||
self._height = None
|
||||
self.available_rpcs = {}
|
||||
self.connector = aiohttp.TCPConnector()
|
||||
self._block_hash_cache = LRUCacheWithMetrics(100000)
|
||||
self._block_cache = LRUCacheWithMetrics(2 ** 16, metric_name='block', namespace=NAMESPACE)
|
||||
|
||||
async def close(self):
|
||||
if self.connector:
|
||||
await self.connector.close()
|
||||
self.connector = None
|
||||
|
||||
def set_url(self, url):
|
||||
"""Set the URLS to the given list, and switch to the first one."""
|
||||
urls = url.split(',')
|
||||
urls = [self.coin.sanitize_url(url) for url in urls]
|
||||
for n, url in enumerate(urls):
|
||||
status = '' if n else ' (current)'
|
||||
logged_url = self.logged_url(url)
|
||||
self.logger.info(f'daemon #{n + 1} at {logged_url}{status}')
|
||||
self.url_index = 0
|
||||
self.urls = urls
|
||||
|
||||
def current_url(self):
|
||||
"""Returns the current daemon URL."""
|
||||
return self.urls[self.url_index]
|
||||
|
||||
def logged_url(self, url=None):
|
||||
"""The host and port part, for logging."""
|
||||
url = url or self.current_url()
|
||||
return url[url.rindex('@') + 1:]
|
||||
|
||||
def failover(self):
|
||||
"""Call to fail-over to the next daemon URL.
|
||||
|
||||
Returns False if there is only one, otherwise True.
|
||||
"""
|
||||
if len(self.urls) > 1:
|
||||
self.url_index = (self.url_index + 1) % len(self.urls)
|
||||
self.logger.info(f'failing over to {self.logged_url()}')
|
||||
return True
|
||||
return False
|
||||
|
||||
def client_session(self):
|
||||
"""An aiohttp client session."""
|
||||
return aiohttp.ClientSession(connector=self.connector, connector_owner=False)
|
||||
|
||||
async def _send_data(self, data):
|
||||
if not self.connector:
|
||||
raise asyncio.CancelledError('Tried to send request during shutdown.')
|
||||
async with self.workqueue_semaphore:
|
||||
async with self.client_session() as session:
|
||||
async with session.post(self.current_url(), data=data) as resp:
|
||||
kind = resp.headers.get('Content-Type', None)
|
||||
if kind == 'application/json':
|
||||
return await resp.json()
|
||||
# bitcoind's HTTP protocol "handling" is a bad joke
|
||||
text = await resp.text()
|
||||
if 'Work queue depth exceeded' in text:
|
||||
raise WorkQueueFullError
|
||||
text = text.strip() or resp.reason
|
||||
self.logger.error(text)
|
||||
raise DaemonError(text)
|
||||
|
||||
async def _send(self, payload, processor):
|
||||
"""Send a payload to be converted to JSON.
|
||||
|
||||
Handles temporary connection issues. Daemon response errors
|
||||
are raise through DaemonError.
|
||||
"""
|
||||
|
||||
def log_error(error):
|
||||
nonlocal last_error_log, retry
|
||||
now = time.time()
|
||||
if now - last_error_log > 60:
|
||||
last_error_log = now
|
||||
self.logger.error(f'{error} Retrying occasionally...')
|
||||
if retry == self.max_retry and self.failover():
|
||||
retry = 0
|
||||
|
||||
on_good_message = None
|
||||
last_error_log = 0
|
||||
data = json.dumps(payload)
|
||||
retry = self.init_retry
|
||||
methods = tuple(
|
||||
[payload['method']] if isinstance(payload, dict) else [request['method'] for request in payload]
|
||||
)
|
||||
while True:
|
||||
try:
|
||||
for method in methods:
|
||||
self.lbrycrd_pending_count_metric.labels(method=method).inc()
|
||||
result = await self._send_data(data)
|
||||
result = processor(result)
|
||||
if on_good_message:
|
||||
self.logger.info(on_good_message)
|
||||
return result
|
||||
except asyncio.TimeoutError:
|
||||
log_error('timeout error.')
|
||||
except aiohttp.ServerDisconnectedError:
|
||||
log_error('disconnected.')
|
||||
on_good_message = 'connection restored'
|
||||
except aiohttp.ClientConnectionError:
|
||||
log_error('connection problem - is your daemon running?')
|
||||
on_good_message = 'connection restored'
|
||||
except aiohttp.ClientError as e:
|
||||
log_error(f'daemon error: {e}')
|
||||
on_good_message = 'running normally'
|
||||
except WarmingUpError:
|
||||
log_error('starting up checking blocks.')
|
||||
on_good_message = 'running normally'
|
||||
except WorkQueueFullError:
|
||||
log_error('work queue full.')
|
||||
on_good_message = 'running normally'
|
||||
finally:
|
||||
for method in methods:
|
||||
self.lbrycrd_pending_count_metric.labels(method=method).dec()
|
||||
await asyncio.sleep(retry)
|
||||
retry = max(min(self.max_retry, retry * 2), self.init_retry)
|
||||
|
||||
async def _send_single(self, method, params=None):
|
||||
"""Send a single request to the daemon."""
|
||||
|
||||
start = time.perf_counter()
|
||||
|
||||
def processor(result):
|
||||
err = result['error']
|
||||
if not err:
|
||||
return result['result']
|
||||
if err.get('code') == self.WARMING_UP:
|
||||
raise WarmingUpError
|
||||
raise DaemonError(err)
|
||||
|
||||
payload = {'method': method, 'id': next(self.id_counter)}
|
||||
if params:
|
||||
payload['params'] = params
|
||||
result = await self._send(payload, processor)
|
||||
self.lbrycrd_request_time_metric.labels(method=method).observe(time.perf_counter() - start)
|
||||
return result
|
||||
|
||||
async def _send_vector(self, method, params_iterable, replace_errs=False):
|
||||
"""Send several requests of the same method.
|
||||
|
||||
The result will be an array of the same length as params_iterable.
|
||||
If replace_errs is true, any item with an error is returned as None,
|
||||
otherwise an exception is raised."""
|
||||
|
||||
start = time.perf_counter()
|
||||
|
||||
def processor(result):
|
||||
errs = [item['error'] for item in result if item['error']]
|
||||
if any(err.get('code') == self.WARMING_UP for err in errs):
|
||||
raise WarmingUpError
|
||||
if not errs or replace_errs:
|
||||
return [item['result'] for item in result]
|
||||
raise DaemonError(errs)
|
||||
|
||||
payload = [{'method': method, 'params': p, 'id': next(self.id_counter)}
|
||||
for p in params_iterable]
|
||||
result = []
|
||||
if payload:
|
||||
result = await self._send(payload, processor)
|
||||
self.lbrycrd_request_time_metric.labels(method=method).observe(time.perf_counter() - start)
|
||||
return result
|
||||
|
||||
async def _is_rpc_available(self, method):
|
||||
"""Return whether given RPC method is available in the daemon.
|
||||
|
||||
Results are cached and the daemon will generally not be queried with
|
||||
the same method more than once."""
|
||||
available = self.available_rpcs.get(method)
|
||||
if available is None:
|
||||
available = True
|
||||
try:
|
||||
await self._send_single(method)
|
||||
except DaemonError as e:
|
||||
err = e.args[0]
|
||||
error_code = err.get("code")
|
||||
available = error_code != JSONRPC.METHOD_NOT_FOUND
|
||||
self.available_rpcs[method] = available
|
||||
return available
|
||||
|
||||
async def block_hex_hashes(self, first, count):
|
||||
"""Return the hex hashes of count block starting at height first."""
|
||||
if first + count < (self.cached_height() or 0) - 200:
|
||||
return await self._cached_block_hex_hashes(first, count)
|
||||
params_iterable = ((h, ) for h in range(first, first + count))
|
||||
return await self._send_vector('getblockhash', params_iterable)
|
||||
|
||||
async def _cached_block_hex_hashes(self, first, count):
|
||||
"""Return the hex hashes of count block starting at height first."""
|
||||
cached = self._block_hash_cache.get((first, count))
|
||||
if cached:
|
||||
return cached
|
||||
params_iterable = ((h, ) for h in range(first, first + count))
|
||||
self._block_hash_cache[(first, count)] = await self._send_vector('getblockhash', params_iterable)
|
||||
return self._block_hash_cache[(first, count)]
|
||||
|
||||
async def deserialised_block(self, hex_hash):
|
||||
"""Return the deserialised block with the given hex hash."""
|
||||
if hex_hash not in self._block_cache:
|
||||
block = await self._send_single('getblock', (hex_hash, True))
|
||||
self._block_cache[hex_hash] = block
|
||||
return block
|
||||
return self._block_cache[hex_hash]
|
||||
|
||||
async def raw_blocks(self, hex_hashes):
|
||||
"""Return the raw binary blocks with the given hex hashes."""
|
||||
params_iterable = ((h, False) for h in hex_hashes)
|
||||
blocks = await self._send_vector('getblock', params_iterable)
|
||||
# Convert hex string to bytes
|
||||
return [hex_to_bytes(block) for block in blocks]
|
||||
|
||||
async def mempool_hashes(self):
|
||||
"""Update our record of the daemon's mempool hashes."""
|
||||
return await self._send_single('getrawmempool')
|
||||
|
||||
async def estimatefee(self, block_count):
|
||||
"""Return the fee estimate for the block count. Units are whole
|
||||
currency units per KB, e.g. 0.00000995, or -1 if no estimate
|
||||
is available.
|
||||
"""
|
||||
args = (block_count, )
|
||||
if await self._is_rpc_available('estimatesmartfee'):
|
||||
estimate = await self._send_single('estimatesmartfee', args)
|
||||
return estimate.get('feerate', -1)
|
||||
return await self._send_single('estimatefee', args)
|
||||
|
||||
async def getnetworkinfo(self):
|
||||
"""Return the result of the 'getnetworkinfo' RPC call."""
|
||||
return await self._send_single('getnetworkinfo')
|
||||
|
||||
async def relayfee(self):
|
||||
"""The minimum fee a low-priority tx must pay in order to be accepted
|
||||
to the daemon's memory pool."""
|
||||
network_info = await self.getnetworkinfo()
|
||||
return network_info['relayfee']
|
||||
|
||||
async def getrawtransaction(self, hex_hash, verbose=False):
|
||||
"""Return the serialized raw transaction with the given hash."""
|
||||
# Cast to int because some coin daemons are old and require it
|
||||
return await self._send_single('getrawtransaction',
|
||||
(hex_hash, int(verbose)))
|
||||
|
||||
async def getrawtransactions(self, hex_hashes, replace_errs=True):
|
||||
"""Return the serialized raw transactions with the given hashes.
|
||||
|
||||
Replaces errors with None by default."""
|
||||
params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes)
|
||||
txs = await self._send_vector('getrawtransaction', params_iterable,
|
||||
replace_errs=replace_errs)
|
||||
# Convert hex strings to bytes
|
||||
return [hex_to_bytes(tx) if tx else None for tx in txs]
|
||||
|
||||
async def broadcast_transaction(self, raw_tx):
|
||||
"""Broadcast a transaction to the network."""
|
||||
return await self._send_single('sendrawtransaction', (raw_tx, ))
|
||||
|
||||
async def height(self):
|
||||
"""Query the daemon for its current height."""
|
||||
self._height = await self._send_single('getblockcount')
|
||||
return self._height
|
||||
|
||||
def cached_height(self):
|
||||
"""Return the cached daemon height.
|
||||
|
||||
If the daemon has not been queried yet this returns None."""
|
||||
return self._height
|
||||
|
||||
|
||||
def handles_errors(decorated_function):
|
||||
@wraps(decorated_function)
|
||||
async def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return await decorated_function(*args, **kwargs)
|
||||
except DaemonError as daemon_error:
|
||||
raise RPCError(1, daemon_error.args[0])
|
||||
return wrapper
|
||||
|
||||
|
||||
class LBCDaemon(Daemon):
|
||||
@handles_errors
|
||||
async def getrawtransaction(self, hex_hash, verbose=False):
|
||||
return await super().getrawtransaction(hex_hash=hex_hash, verbose=verbose)
|
||||
|
||||
@handles_errors
|
||||
async def getclaimbyid(self, claim_id):
|
||||
'''Given a claim id, retrieves claim information.'''
|
||||
return await self._send_single('getclaimbyid', (claim_id,))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsbyids(self, claim_ids):
|
||||
'''Given a list of claim ids, batches calls to retrieve claim information.'''
|
||||
return await self._send_vector('getclaimbyid', ((claim_id,) for claim_id in claim_ids))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsforname(self, name):
|
||||
'''Given a name, retrieves all claims matching that name.'''
|
||||
return await self._send_single('getclaimsforname', (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsfortx(self, txid):
|
||||
'''Given a txid, returns the claims it make.'''
|
||||
return await self._send_single('getclaimsfortx', (txid,)) or []
|
||||
|
||||
@handles_errors
|
||||
async def getnameproof(self, name, block_hash=None):
|
||||
'''Given a name and optional block_hash, returns a name proof and winner, if any.'''
|
||||
return await self._send_single('getnameproof', (name, block_hash,) if block_hash else (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getvalueforname(self, name):
|
||||
'''Given a name, returns the winning claim value.'''
|
||||
return await self._send_single('getvalueforname', (name,))
|
||||
|
||||
@handles_errors
|
||||
async def claimname(self, name, hexvalue, amount):
|
||||
'''Claim a name, used for functional tests only.'''
|
||||
return await self._send_single('claimname', (name, hexvalue, float(amount)))
|
|
@ -1,22 +0,0 @@
|
|||
class FindShortestID:
|
||||
__slots__ = 'short_id', 'new_id'
|
||||
|
||||
def __init__(self):
|
||||
self.short_id = ''
|
||||
self.new_id = None
|
||||
|
||||
def step(self, other_id, new_id):
|
||||
self.new_id = new_id
|
||||
for i in range(len(self.new_id)):
|
||||
if other_id[i] != self.new_id[i]:
|
||||
if i > len(self.short_id)-1:
|
||||
self.short_id = self.new_id[:i+1]
|
||||
break
|
||||
|
||||
def finalize(self):
|
||||
if self.short_id:
|
||||
return '#'+self.short_id
|
||||
|
||||
|
||||
def register_canonical_functions(connection):
|
||||
connection.create_aggregate("shortest_id", 2, FindShortestID)
|
|
@ -1,420 +0,0 @@
|
|||
CLAIM_TYPES = {
|
||||
'stream': 1,
|
||||
'channel': 2,
|
||||
'repost': 3,
|
||||
'collection': 4,
|
||||
}
|
||||
|
||||
STREAM_TYPES = {
|
||||
'video': 1,
|
||||
'audio': 2,
|
||||
'image': 3,
|
||||
'document': 4,
|
||||
'binary': 5,
|
||||
'model': 6,
|
||||
}
|
||||
|
||||
# 9/21/2020
|
||||
MOST_USED_TAGS = {
|
||||
"gaming",
|
||||
"people & blogs",
|
||||
"entertainment",
|
||||
"music",
|
||||
"pop culture",
|
||||
"education",
|
||||
"technology",
|
||||
"blockchain",
|
||||
"news",
|
||||
"funny",
|
||||
"science & technology",
|
||||
"learning",
|
||||
"gameplay",
|
||||
"news & politics",
|
||||
"comedy",
|
||||
"bitcoin",
|
||||
"beliefs",
|
||||
"nature",
|
||||
"art",
|
||||
"economics",
|
||||
"film & animation",
|
||||
"lets play",
|
||||
"games",
|
||||
"sports",
|
||||
"howto & style",
|
||||
"game",
|
||||
"cryptocurrency",
|
||||
"playstation 4",
|
||||
"automotive",
|
||||
"crypto",
|
||||
"mature",
|
||||
"sony interactive entertainment",
|
||||
"walkthrough",
|
||||
"tutorial",
|
||||
"video game",
|
||||
"weapons",
|
||||
"playthrough",
|
||||
"pc",
|
||||
"anime",
|
||||
"how to",
|
||||
"btc",
|
||||
"fun",
|
||||
"ethereum",
|
||||
"food",
|
||||
"travel & events",
|
||||
"minecraft",
|
||||
"science",
|
||||
"autos & vehicles",
|
||||
"play",
|
||||
"politics",
|
||||
"commentary",
|
||||
"twitch",
|
||||
"ps4live",
|
||||
"love",
|
||||
"ps4",
|
||||
"nonprofits & activism",
|
||||
"ps4share",
|
||||
"fortnite",
|
||||
"xbox",
|
||||
"porn",
|
||||
"video games",
|
||||
"trump",
|
||||
"español",
|
||||
"money",
|
||||
"music video",
|
||||
"nintendo",
|
||||
"movie",
|
||||
"coronavirus",
|
||||
"donald trump",
|
||||
"steam",
|
||||
"trailer",
|
||||
"android",
|
||||
"podcast",
|
||||
"xbox one",
|
||||
"survival",
|
||||
"audio",
|
||||
"linux",
|
||||
"travel",
|
||||
"funny moments",
|
||||
"litecoin",
|
||||
"animation",
|
||||
"gamer",
|
||||
"lets",
|
||||
"playstation",
|
||||
"bitcoin news",
|
||||
"history",
|
||||
"xxx",
|
||||
"fox news",
|
||||
"dance",
|
||||
"god",
|
||||
"adventure",
|
||||
"liberal",
|
||||
"2020",
|
||||
"horror",
|
||||
"government",
|
||||
"freedom",
|
||||
"reaction",
|
||||
"meme",
|
||||
"photography",
|
||||
"truth",
|
||||
"health",
|
||||
"lbry",
|
||||
"family",
|
||||
"online",
|
||||
"eth",
|
||||
"crypto news",
|
||||
"diy",
|
||||
"trading",
|
||||
"gold",
|
||||
"memes",
|
||||
"world",
|
||||
"space",
|
||||
"lol",
|
||||
"covid-19",
|
||||
"rpg",
|
||||
"humor",
|
||||
"democrat",
|
||||
"film",
|
||||
"call of duty",
|
||||
"tech",
|
||||
"religion",
|
||||
"conspiracy",
|
||||
"rap",
|
||||
"cnn",
|
||||
"hangoutsonair",
|
||||
"unboxing",
|
||||
"fiction",
|
||||
"conservative",
|
||||
"cars",
|
||||
"hoa",
|
||||
"epic",
|
||||
"programming",
|
||||
"progressive",
|
||||
"cryptocurrency news",
|
||||
"classical",
|
||||
"jesus",
|
||||
"movies",
|
||||
"book",
|
||||
"ps3",
|
||||
"republican",
|
||||
"fitness",
|
||||
"books",
|
||||
"multiplayer",
|
||||
"animals",
|
||||
"pokemon",
|
||||
"bitcoin price",
|
||||
"facebook",
|
||||
"sharefactory",
|
||||
"criptomonedas",
|
||||
"cod",
|
||||
"bible",
|
||||
"business",
|
||||
"stream",
|
||||
"comics",
|
||||
"how",
|
||||
"fail",
|
||||
"nsfw",
|
||||
"new music",
|
||||
"satire",
|
||||
"pets & animals",
|
||||
"computer",
|
||||
"classical music",
|
||||
"indie",
|
||||
"musica",
|
||||
"msnbc",
|
||||
"fps",
|
||||
"mod",
|
||||
"sport",
|
||||
"sony",
|
||||
"ripple",
|
||||
"auto",
|
||||
"rock",
|
||||
"marvel",
|
||||
"complete",
|
||||
"mining",
|
||||
"political",
|
||||
"mobile",
|
||||
"pubg",
|
||||
"hip hop",
|
||||
"flat earth",
|
||||
"xbox 360",
|
||||
"reviews",
|
||||
"vlogging",
|
||||
"latest news",
|
||||
"hack",
|
||||
"tarot",
|
||||
"iphone",
|
||||
"media",
|
||||
"cute",
|
||||
"christian",
|
||||
"free speech",
|
||||
"trap",
|
||||
"war",
|
||||
"remix",
|
||||
"ios",
|
||||
"xrp",
|
||||
"spirituality",
|
||||
"song",
|
||||
"league of legends",
|
||||
"cat"
|
||||
}
|
||||
|
||||
MATURE_TAGS = [
|
||||
'nsfw', 'porn', 'xxx', 'mature', 'adult', 'sex'
|
||||
]
|
||||
|
||||
|
||||
def normalize_tag(tag):
|
||||
return tag.replace(" ", "_").replace("&", "and").replace("-", "_")
|
||||
|
||||
|
||||
COMMON_TAGS = {
|
||||
tag: normalize_tag(tag) for tag in list(MOST_USED_TAGS)
|
||||
}
|
||||
|
||||
INDEXED_LANGUAGES = [
|
||||
'none',
|
||||
'en',
|
||||
'aa',
|
||||
'ab',
|
||||
'ae',
|
||||
'af',
|
||||
'ak',
|
||||
'am',
|
||||
'an',
|
||||
'ar',
|
||||
'as',
|
||||
'av',
|
||||
'ay',
|
||||
'az',
|
||||
'ba',
|
||||
'be',
|
||||
'bg',
|
||||
'bh',
|
||||
'bi',
|
||||
'bm',
|
||||
'bn',
|
||||
'bo',
|
||||
'br',
|
||||
'bs',
|
||||
'ca',
|
||||
'ce',
|
||||
'ch',
|
||||
'co',
|
||||
'cr',
|
||||
'cs',
|
||||
'cu',
|
||||
'cv',
|
||||
'cy',
|
||||
'da',
|
||||
'de',
|
||||
'dv',
|
||||
'dz',
|
||||
'ee',
|
||||
'el',
|
||||
'eo',
|
||||
'es',
|
||||
'et',
|
||||
'eu',
|
||||
'fa',
|
||||
'ff',
|
||||
'fi',
|
||||
'fj',
|
||||
'fo',
|
||||
'fr',
|
||||
'fy',
|
||||
'ga',
|
||||
'gd',
|
||||
'gl',
|
||||
'gn',
|
||||
'gu',
|
||||
'gv',
|
||||
'ha',
|
||||
'he',
|
||||
'hi',
|
||||
'ho',
|
||||
'hr',
|
||||
'ht',
|
||||
'hu',
|
||||
'hy',
|
||||
'hz',
|
||||
'ia',
|
||||
'id',
|
||||
'ie',
|
||||
'ig',
|
||||
'ii',
|
||||
'ik',
|
||||
'io',
|
||||
'is',
|
||||
'it',
|
||||
'iu',
|
||||
'ja',
|
||||
'jv',
|
||||
'ka',
|
||||
'kg',
|
||||
'ki',
|
||||
'kj',
|
||||
'kk',
|
||||
'kl',
|
||||
'km',
|
||||
'kn',
|
||||
'ko',
|
||||
'kr',
|
||||
'ks',
|
||||
'ku',
|
||||
'kv',
|
||||
'kw',
|
||||
'ky',
|
||||
'la',
|
||||
'lb',
|
||||
'lg',
|
||||
'li',
|
||||
'ln',
|
||||
'lo',
|
||||
'lt',
|
||||
'lu',
|
||||
'lv',
|
||||
'mg',
|
||||
'mh',
|
||||
'mi',
|
||||
'mk',
|
||||
'ml',
|
||||
'mn',
|
||||
'mr',
|
||||
'ms',
|
||||
'mt',
|
||||
'my',
|
||||
'na',
|
||||
'nb',
|
||||
'nd',
|
||||
'ne',
|
||||
'ng',
|
||||
'nl',
|
||||
'nn',
|
||||
'no',
|
||||
'nr',
|
||||
'nv',
|
||||
'ny',
|
||||
'oc',
|
||||
'oj',
|
||||
'om',
|
||||
'or',
|
||||
'os',
|
||||
'pa',
|
||||
'pi',
|
||||
'pl',
|
||||
'ps',
|
||||
'pt',
|
||||
'qu',
|
||||
'rm',
|
||||
'rn',
|
||||
'ro',
|
||||
'ru',
|
||||
'rw',
|
||||
'sa',
|
||||
'sc',
|
||||
'sd',
|
||||
'se',
|
||||
'sg',
|
||||
'si',
|
||||
'sk',
|
||||
'sl',
|
||||
'sm',
|
||||
'sn',
|
||||
'so',
|
||||
'sq',
|
||||
'sr',
|
||||
'ss',
|
||||
'st',
|
||||
'su',
|
||||
'sv',
|
||||
'sw',
|
||||
'ta',
|
||||
'te',
|
||||
'tg',
|
||||
'th',
|
||||
'ti',
|
||||
'tk',
|
||||
'tl',
|
||||
'tn',
|
||||
'to',
|
||||
'tr',
|
||||
'ts',
|
||||
'tt',
|
||||
'tw',
|
||||
'ty',
|
||||
'ug',
|
||||
'uk',
|
||||
'ur',
|
||||
'uz',
|
||||
've',
|
||||
'vi',
|
||||
'vo',
|
||||
'wa',
|
||||
'wo',
|
||||
'xh',
|
||||
'yi',
|
||||
'yo',
|
||||
'za',
|
||||
'zh',
|
||||
'zu'
|
||||
]
|
|
@ -1 +0,0 @@
|
|||
from .search import SearchIndex
|
|
@ -1,82 +0,0 @@
|
|||
INDEX_DEFAULT_SETTINGS = {
|
||||
"settings":
|
||||
{"analysis":
|
||||
{"analyzer": {
|
||||
"default": {"tokenizer": "whitespace", "filter": ["lowercase", "porter_stem"]}}},
|
||||
"index":
|
||||
{"refresh_interval": -1,
|
||||
"number_of_shards": 1,
|
||||
"number_of_replicas": 0,
|
||||
"sort": {
|
||||
"field": ["trending_mixed", "release_time"],
|
||||
"order": ["desc", "desc"]
|
||||
}}
|
||||
},
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"claim_id": {
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"ignore_above": 256,
|
||||
"type": "keyword"
|
||||
}
|
||||
},
|
||||
"type": "text",
|
||||
"index_prefixes": {
|
||||
"min_chars": 1,
|
||||
"max_chars": 10
|
||||
}
|
||||
},
|
||||
"height": {"type": "integer"},
|
||||
"claim_type": {"type": "byte"},
|
||||
"censor_type": {"type": "byte"},
|
||||
"trending_mixed": {"type": "float"},
|
||||
"release_time": {"type": "long"},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FIELDS = {
|
||||
'_id',
|
||||
'claim_id', 'claim_type', 'claim_name', 'normalized_name',
|
||||
'tx_id', 'tx_nout', 'tx_position',
|
||||
'short_url', 'canonical_url',
|
||||
'is_controlling', 'last_take_over_height',
|
||||
'public_key_bytes', 'public_key_id', 'claims_in_channel', 'channel_join_height',
|
||||
'channel_id', 'signature', 'signature_digest', 'is_signature_valid',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'fee_amount', 'fee_currency',
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'stream_type', 'media_type', 'censor_type',
|
||||
'title', 'author', 'description',
|
||||
'timestamp', 'creation_timestamp',
|
||||
'duration', 'release_time',
|
||||
'tags', 'languages', 'has_source', 'reposted_claim_type',
|
||||
'reposted_claim_id', 'repost_count',
|
||||
'trending_group', 'trending_mixed', 'trending_local', 'trending_global',
|
||||
}
|
||||
|
||||
TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'claim_name', 'description', 'claim_id', 'censoring_channel_id',
|
||||
'media_type', 'normalized_name', 'public_key_bytes', 'public_key_id', 'short_url', 'signature',
|
||||
'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id', 'tags'}
|
||||
|
||||
RANGE_FIELDS = {
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
|
||||
'tx_position', 'channel_join', 'repost_count', 'limit_claims_per_channel',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'trending_group', 'trending_mixed', 'censor_type',
|
||||
'trending_local', 'trending_global',
|
||||
}
|
||||
|
||||
ALL_FIELDS = RANGE_FIELDS | TEXT_FIELDS | FIELDS
|
||||
|
||||
REPLACEMENTS = {
|
||||
'name': 'normalized_name',
|
||||
'txid': 'tx_id',
|
||||
'nout': 'tx_nout',
|
||||
'valid_channel_signature': 'is_signature_valid',
|
||||
'stream_types': 'stream_type',
|
||||
'media_types': 'media_type',
|
||||
'reposted': 'repost_count'
|
||||
}
|
|
@ -1,645 +0,0 @@
|
|||
import asyncio
|
||||
import struct
|
||||
from binascii import unhexlify
|
||||
from collections import Counter, deque
|
||||
from decimal import Decimal
|
||||
from operator import itemgetter
|
||||
from typing import Optional, List, Iterable, Union
|
||||
|
||||
from elasticsearch import AsyncElasticsearch, NotFoundError, ConnectionError
|
||||
from elasticsearch.helpers import async_streaming_bulk
|
||||
|
||||
from lbry.crypto.base58 import Base58
|
||||
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
|
||||
from lbry.schema.result import Outputs, Censor
|
||||
from lbry.schema.tags import clean_tags
|
||||
from lbry.schema.url import URL, normalize_name
|
||||
from lbry.utils import LRUCache
|
||||
from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES
|
||||
from lbry.wallet.server.db.elasticsearch.constants import INDEX_DEFAULT_SETTINGS, REPLACEMENTS, FIELDS, TEXT_FIELDS, \
|
||||
RANGE_FIELDS, ALL_FIELDS
|
||||
from lbry.wallet.server.util import class_logger
|
||||
|
||||
|
||||
class ChannelResolution(str):
|
||||
@classmethod
|
||||
def lookup_error(cls, url):
|
||||
return LookupError(f'Could not find channel in "{url}".')
|
||||
|
||||
|
||||
class StreamResolution(str):
|
||||
@classmethod
|
||||
def lookup_error(cls, url):
|
||||
return LookupError(f'Could not find claim at "{url}".')
|
||||
|
||||
|
||||
class IndexVersionMismatch(Exception):
|
||||
def __init__(self, got_version, expected_version):
|
||||
self.got_version = got_version
|
||||
self.expected_version = expected_version
|
||||
|
||||
|
||||
class SearchIndex:
|
||||
VERSION = 1
|
||||
|
||||
def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200):
|
||||
self.search_timeout = search_timeout
|
||||
self.sync_timeout = 600 # wont hit that 99% of the time, but can hit on a fresh import
|
||||
self.search_client: Optional[AsyncElasticsearch] = None
|
||||
self.sync_client: Optional[AsyncElasticsearch] = None
|
||||
self.index = index_prefix + 'claims'
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.claim_cache = LRUCache(2 ** 15)
|
||||
self.short_id_cache = LRUCache(2 ** 17)
|
||||
self.search_cache = LRUCache(2 ** 17)
|
||||
self.resolution_cache = LRUCache(2 ** 17)
|
||||
self._elastic_host = elastic_host
|
||||
self._elastic_port = elastic_port
|
||||
|
||||
async def get_index_version(self) -> int:
|
||||
try:
|
||||
template = await self.sync_client.indices.get_template(self.index)
|
||||
return template[self.index]['version']
|
||||
except NotFoundError:
|
||||
return 0
|
||||
|
||||
async def set_index_version(self, version):
|
||||
await self.sync_client.indices.put_template(
|
||||
self.index, body={'version': version, 'index_patterns': ['ignored']}, ignore=400
|
||||
)
|
||||
|
||||
async def start(self) -> bool:
|
||||
if self.sync_client:
|
||||
return False
|
||||
hosts = [{'host': self._elastic_host, 'port': self._elastic_port}]
|
||||
self.sync_client = AsyncElasticsearch(hosts, timeout=self.sync_timeout)
|
||||
self.search_client = AsyncElasticsearch(hosts, timeout=self.search_timeout)
|
||||
while True:
|
||||
try:
|
||||
await self.sync_client.cluster.health(wait_for_status='yellow')
|
||||
break
|
||||
except ConnectionError:
|
||||
self.logger.warning("Failed to connect to Elasticsearch. Waiting for it!")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400)
|
||||
acked = res.get('acknowledged', False)
|
||||
if acked:
|
||||
await self.set_index_version(self.VERSION)
|
||||
return acked
|
||||
index_version = await self.get_index_version()
|
||||
if index_version != self.VERSION:
|
||||
self.logger.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION)
|
||||
raise IndexVersionMismatch(index_version, self.VERSION)
|
||||
return acked
|
||||
|
||||
def stop(self):
|
||||
clients = [self.sync_client, self.search_client]
|
||||
self.sync_client, self.search_client = None, None
|
||||
return asyncio.ensure_future(asyncio.gather(*(client.close() for client in clients)))
|
||||
|
||||
def delete_index(self):
|
||||
return self.sync_client.indices.delete(self.index, ignore_unavailable=True)
|
||||
|
||||
async def _consume_claim_producer(self, claim_producer):
|
||||
count = 0
|
||||
for op, doc in claim_producer:
|
||||
if op == 'delete':
|
||||
yield {'_index': self.index, '_op_type': 'delete', '_id': doc}
|
||||
else:
|
||||
yield extract_doc(doc, self.index)
|
||||
count += 1
|
||||
if count % 100 == 0:
|
||||
self.logger.info("Indexing in progress, %d claims.", count)
|
||||
self.logger.info("Indexing done for %d claims.", count)
|
||||
|
||||
async def claim_consumer(self, claim_producer):
|
||||
touched = set()
|
||||
async for ok, item in async_streaming_bulk(self.sync_client, self._consume_claim_producer(claim_producer),
|
||||
raise_on_error=False):
|
||||
if not ok:
|
||||
self.logger.warning("indexing failed for an item: %s", item)
|
||||
else:
|
||||
item = item.popitem()[1]
|
||||
touched.add(item['_id'])
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.logger.info("Indexing done.")
|
||||
|
||||
def update_filter_query(self, censor_type, blockdict, channels=False):
|
||||
blockdict = {key[::-1].hex(): value[::-1].hex() for key, value in blockdict.items()}
|
||||
if channels:
|
||||
update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
else:
|
||||
update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
key = 'channel_id' if channels else 'claim_id'
|
||||
update['script'] = {
|
||||
"source": f"ctx._source.censor_type={censor_type}; ctx._source.censoring_channel_id=params[ctx._source.{key}]",
|
||||
"lang": "painless",
|
||||
"params": blockdict
|
||||
}
|
||||
return update
|
||||
|
||||
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
|
||||
if filtered_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if filtered_channels:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if blocked_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if blocked_channels:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.clear_caches()
|
||||
|
||||
def clear_caches(self):
|
||||
self.search_cache.clear()
|
||||
self.short_id_cache.clear()
|
||||
self.claim_cache.clear()
|
||||
self.resolution_cache.clear()
|
||||
|
||||
async def session_query(self, query_name, kwargs):
|
||||
offset, total = kwargs.get('offset', 0) if isinstance(kwargs, dict) else 0, 0
|
||||
total_referenced = []
|
||||
if query_name == 'resolve':
|
||||
total_referenced, response, censor = await self.resolve(*kwargs)
|
||||
else:
|
||||
cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache)
|
||||
if cache_item.result is not None:
|
||||
return cache_item.result
|
||||
async with cache_item.lock:
|
||||
if cache_item.result:
|
||||
return cache_item.result
|
||||
censor = Censor(Censor.SEARCH)
|
||||
if kwargs.get('no_totals'):
|
||||
response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
||||
else:
|
||||
response, offset, total = await self.search(**kwargs)
|
||||
censor.apply(response)
|
||||
total_referenced.extend(response)
|
||||
if censor.censored:
|
||||
response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
||||
total_referenced.extend(response)
|
||||
result = Outputs.to_base64(
|
||||
response, await self._get_referenced_rows(total_referenced), offset, total, censor
|
||||
)
|
||||
cache_item.result = result
|
||||
return result
|
||||
return Outputs.to_base64(response, await self._get_referenced_rows(total_referenced), offset, total, censor)
|
||||
|
||||
async def resolve(self, *urls):
|
||||
censor = Censor(Censor.RESOLVE)
|
||||
results = [await self.resolve_url(url) for url in urls]
|
||||
# just heat the cache
|
||||
await self.populate_claim_cache(*filter(lambda x: isinstance(x, str), results))
|
||||
results = [self._get_from_cache_or_error(url, result) for url, result in zip(urls, results)]
|
||||
|
||||
censored = [
|
||||
result if not isinstance(result, dict) or not censor.censor(result)
|
||||
else ResolveCensoredError(url, result['censoring_channel_id'])
|
||||
for url, result in zip(urls, results)
|
||||
]
|
||||
return results, censored, censor
|
||||
|
||||
def _get_from_cache_or_error(self, url: str, resolution: Union[LookupError, StreamResolution, ChannelResolution]):
|
||||
cached = self.claim_cache.get(resolution)
|
||||
return cached or (resolution if isinstance(resolution, LookupError) else resolution.lookup_error(url))
|
||||
|
||||
async def get_many(self, *claim_ids):
|
||||
await self.populate_claim_cache(*claim_ids)
|
||||
return filter(None, map(self.claim_cache.get, claim_ids))
|
||||
|
||||
async def populate_claim_cache(self, *claim_ids):
|
||||
missing = [claim_id for claim_id in claim_ids if self.claim_cache.get(claim_id) is None]
|
||||
if missing:
|
||||
results = await self.search_client.mget(
|
||||
index=self.index, body={"ids": missing}
|
||||
)
|
||||
for result in expand_result(filter(lambda doc: doc['found'], results["docs"])):
|
||||
self.claim_cache.set(result['claim_id'], result)
|
||||
|
||||
async def full_id_from_short_id(self, name, short_id, channel_id=None):
|
||||
key = '#'.join((channel_id or '', name, short_id))
|
||||
if key not in self.short_id_cache:
|
||||
query = {'name': name, 'claim_id': short_id}
|
||||
if channel_id:
|
||||
query['channel_id'] = channel_id
|
||||
query['order_by'] = ['^channel_join']
|
||||
query['signature_valid'] = True
|
||||
else:
|
||||
query['order_by'] = '^creation_height'
|
||||
result, _, _ = await self.search(**query, limit=1)
|
||||
if len(result) == 1:
|
||||
result = result[0]['claim_id']
|
||||
self.short_id_cache[key] = result
|
||||
return self.short_id_cache.get(key, None)
|
||||
|
||||
async def search(self, **kwargs):
|
||||
if 'channel' in kwargs:
|
||||
kwargs['channel_id'] = await self.resolve_url(kwargs.pop('channel'))
|
||||
if not kwargs['channel_id'] or not isinstance(kwargs['channel_id'], str):
|
||||
return [], 0, 0
|
||||
try:
|
||||
return await self.search_ahead(**kwargs)
|
||||
except NotFoundError:
|
||||
return [], 0, 0
|
||||
return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0)
|
||||
|
||||
async def search_ahead(self, **kwargs):
|
||||
# 'limit_claims_per_channel' case. Fetch 1000 results, reorder, slice, inflate and return
|
||||
per_channel_per_page = kwargs.pop('limit_claims_per_channel', 0) or 0
|
||||
remove_duplicates = kwargs.pop('remove_duplicates', False)
|
||||
page_size = kwargs.pop('limit', 10)
|
||||
offset = kwargs.pop('offset', 0)
|
||||
kwargs['limit'] = 1000
|
||||
cache_item = ResultCacheItem.from_cache(f"ahead{per_channel_per_page}{kwargs}", self.search_cache)
|
||||
if cache_item.result is not None:
|
||||
reordered_hits = cache_item.result
|
||||
else:
|
||||
async with cache_item.lock:
|
||||
if cache_item.result:
|
||||
reordered_hits = cache_item.result
|
||||
else:
|
||||
query = expand_query(**kwargs)
|
||||
search_hits = deque((await self.search_client.search(
|
||||
query, index=self.index, track_total_hits=False,
|
||||
_source_includes=['_id', 'channel_id', 'reposted_claim_id', 'creation_height']
|
||||
))['hits']['hits'])
|
||||
if remove_duplicates:
|
||||
search_hits = self.__remove_duplicates(search_hits)
|
||||
if per_channel_per_page > 0:
|
||||
reordered_hits = self.__search_ahead(search_hits, page_size, per_channel_per_page)
|
||||
else:
|
||||
reordered_hits = [(hit['_id'], hit['_source']['channel_id']) for hit in search_hits]
|
||||
cache_item.result = reordered_hits
|
||||
result = list(await self.get_many(*(claim_id for claim_id, _ in reordered_hits[offset:(offset + page_size)])))
|
||||
return result, 0, len(reordered_hits)
|
||||
|
||||
def __remove_duplicates(self, search_hits: deque) -> deque:
|
||||
known_ids = {} # claim_id -> (creation_height, hit_id), where hit_id is either reposted claim id or original
|
||||
dropped = set()
|
||||
for hit in search_hits:
|
||||
hit_height, hit_id = hit['_source']['creation_height'], hit['_source']['reposted_claim_id'] or hit['_id']
|
||||
if hit_id not in known_ids:
|
||||
known_ids[hit_id] = (hit_height, hit['_id'])
|
||||
else:
|
||||
previous_height, previous_id = known_ids[hit_id]
|
||||
if hit_height < previous_height:
|
||||
known_ids[hit_id] = (hit_height, hit['_id'])
|
||||
dropped.add(previous_id)
|
||||
else:
|
||||
dropped.add(hit['_id'])
|
||||
return deque(hit for hit in search_hits if hit['_id'] not in dropped)
|
||||
|
||||
def __search_ahead(self, search_hits: list, page_size: int, per_channel_per_page: int):
|
||||
reordered_hits = []
|
||||
channel_counters = Counter()
|
||||
next_page_hits_maybe_check_later = deque()
|
||||
while search_hits or next_page_hits_maybe_check_later:
|
||||
if reordered_hits and len(reordered_hits) % page_size == 0:
|
||||
channel_counters.clear()
|
||||
elif not reordered_hits:
|
||||
pass
|
||||
else:
|
||||
break # means last page was incomplete and we are left with bad replacements
|
||||
for _ in range(len(next_page_hits_maybe_check_later)):
|
||||
claim_id, channel_id = next_page_hits_maybe_check_later.popleft()
|
||||
if per_channel_per_page > 0 and channel_counters[channel_id] < per_channel_per_page:
|
||||
reordered_hits.append((claim_id, channel_id))
|
||||
channel_counters[channel_id] += 1
|
||||
else:
|
||||
next_page_hits_maybe_check_later.append((claim_id, channel_id))
|
||||
while search_hits:
|
||||
hit = search_hits.popleft()
|
||||
hit_id, hit_channel_id = hit['_id'], hit['_source']['channel_id']
|
||||
if hit_channel_id is None or per_channel_per_page <= 0:
|
||||
reordered_hits.append((hit_id, hit_channel_id))
|
||||
elif channel_counters[hit_channel_id] < per_channel_per_page:
|
||||
reordered_hits.append((hit_id, hit_channel_id))
|
||||
channel_counters[hit_channel_id] += 1
|
||||
if len(reordered_hits) % page_size == 0:
|
||||
break
|
||||
else:
|
||||
next_page_hits_maybe_check_later.append((hit_id, hit_channel_id))
|
||||
return reordered_hits
|
||||
|
||||
async def resolve_url(self, raw_url):
|
||||
if raw_url not in self.resolution_cache:
|
||||
self.resolution_cache[raw_url] = await self._resolve_url(raw_url)
|
||||
return self.resolution_cache[raw_url]
|
||||
|
||||
async def _resolve_url(self, raw_url):
|
||||
try:
|
||||
url = URL.parse(raw_url)
|
||||
except ValueError as e:
|
||||
return e
|
||||
|
||||
stream = LookupError(f'Could not find claim at "{raw_url}".')
|
||||
|
||||
channel_id = await self.resolve_channel_id(url)
|
||||
if isinstance(channel_id, LookupError):
|
||||
return channel_id
|
||||
stream = (await self.resolve_stream(url, channel_id if isinstance(channel_id, str) else None)) or stream
|
||||
if url.has_stream:
|
||||
return StreamResolution(stream)
|
||||
else:
|
||||
return ChannelResolution(channel_id)
|
||||
|
||||
async def resolve_channel_id(self, url: URL):
|
||||
if not url.has_channel:
|
||||
return
|
||||
if url.channel.is_fullid:
|
||||
return url.channel.claim_id
|
||||
if url.channel.is_shortid:
|
||||
channel_id = await self.full_id_from_short_id(url.channel.name, url.channel.claim_id)
|
||||
if not channel_id:
|
||||
return LookupError(f'Could not find channel in "{url}".')
|
||||
return channel_id
|
||||
|
||||
query = url.channel.to_dict()
|
||||
if set(query) == {'name'}:
|
||||
query['is_controlling'] = True
|
||||
else:
|
||||
query['order_by'] = ['^creation_height']
|
||||
matches, _, _ = await self.search(**query, limit=1)
|
||||
if matches:
|
||||
channel_id = matches[0]['claim_id']
|
||||
else:
|
||||
return LookupError(f'Could not find channel in "{url}".')
|
||||
return channel_id
|
||||
|
||||
async def resolve_stream(self, url: URL, channel_id: str = None):
|
||||
if not url.has_stream:
|
||||
return None
|
||||
if url.has_channel and channel_id is None:
|
||||
return None
|
||||
query = url.stream.to_dict()
|
||||
if url.stream.claim_id is not None:
|
||||
if url.stream.is_fullid:
|
||||
claim_id = url.stream.claim_id
|
||||
else:
|
||||
claim_id = await self.full_id_from_short_id(query['name'], query['claim_id'], channel_id)
|
||||
return claim_id
|
||||
|
||||
if channel_id is not None:
|
||||
if set(query) == {'name'}:
|
||||
# temporarily emulate is_controlling for claims in channel
|
||||
query['order_by'] = ['effective_amount', '^height']
|
||||
else:
|
||||
query['order_by'] = ['^channel_join']
|
||||
query['channel_id'] = channel_id
|
||||
query['signature_valid'] = True
|
||||
elif set(query) == {'name'}:
|
||||
query['is_controlling'] = True
|
||||
matches, _, _ = await self.search(**query, limit=1)
|
||||
if matches:
|
||||
return matches[0]['claim_id']
|
||||
|
||||
async def _get_referenced_rows(self, txo_rows: List[dict]):
|
||||
txo_rows = [row for row in txo_rows if isinstance(row, dict)]
|
||||
referenced_ids = set(filter(None, map(itemgetter('reposted_claim_id'), txo_rows)))
|
||||
referenced_ids |= set(filter(None, (row['channel_id'] for row in txo_rows)))
|
||||
referenced_ids |= set(filter(None, (row['censoring_channel_id'] for row in txo_rows)))
|
||||
|
||||
referenced_txos = []
|
||||
if referenced_ids:
|
||||
referenced_txos.extend(await self.get_many(*referenced_ids))
|
||||
referenced_ids = set(filter(None, (row['channel_id'] for row in referenced_txos)))
|
||||
|
||||
if referenced_ids:
|
||||
referenced_txos.extend(await self.get_many(*referenced_ids))
|
||||
|
||||
return referenced_txos
|
||||
|
||||
|
||||
def extract_doc(doc, index):
|
||||
doc['claim_id'] = doc.pop('claim_hash')[::-1].hex()
|
||||
if doc['reposted_claim_hash'] is not None:
|
||||
doc['reposted_claim_id'] = doc.pop('reposted_claim_hash')[::-1].hex()
|
||||
else:
|
||||
doc['reposted_claim_id'] = None
|
||||
channel_hash = doc.pop('channel_hash')
|
||||
doc['channel_id'] = channel_hash[::-1].hex() if channel_hash else channel_hash
|
||||
doc['censoring_channel_id'] = doc.get('censoring_channel_id')
|
||||
txo_hash = doc.pop('txo_hash')
|
||||
doc['tx_id'] = txo_hash[:32][::-1].hex()
|
||||
doc['tx_nout'] = struct.unpack('<I', txo_hash[32:])[0]
|
||||
doc['repost_count'] = doc.pop('reposted')
|
||||
doc['is_controlling'] = bool(doc['is_controlling'])
|
||||
doc['signature'] = (doc.pop('signature') or b'').hex() or None
|
||||
doc['signature_digest'] = (doc.pop('signature_digest') or b'').hex() or None
|
||||
doc['public_key_bytes'] = (doc.pop('public_key_bytes') or b'').hex() or None
|
||||
doc['public_key_id'] = (doc.pop('public_key_hash') or b'').hex() or None
|
||||
doc['is_signature_valid'] = bool(doc['signature_valid'])
|
||||
doc['claim_type'] = doc.get('claim_type', 0) or 0
|
||||
doc['stream_type'] = int(doc.get('stream_type', 0) or 0)
|
||||
doc['has_source'] = bool(doc['has_source'])
|
||||
doc['normalized_name'] = doc.pop('normalized')
|
||||
doc = {key: value for key, value in doc.items() if key in ALL_FIELDS}
|
||||
return {'doc': doc, '_id': doc['claim_id'], '_index': index, '_op_type': 'update', 'doc_as_upsert': True}
|
||||
|
||||
|
||||
def expand_query(**kwargs):
|
||||
if "amount_order" in kwargs:
|
||||
kwargs["limit"] = 1
|
||||
kwargs["order_by"] = "effective_amount"
|
||||
kwargs["offset"] = int(kwargs["amount_order"]) - 1
|
||||
if 'name' in kwargs:
|
||||
kwargs['name'] = normalize_name(kwargs.pop('name'))
|
||||
if kwargs.get('is_controlling') is False:
|
||||
kwargs.pop('is_controlling')
|
||||
query = {'must': [], 'must_not': []}
|
||||
collapse = None
|
||||
for key, value in kwargs.items():
|
||||
key = key.replace('claim.', '')
|
||||
many = key.endswith('__in') or isinstance(value, list)
|
||||
if many and len(value) > 2048:
|
||||
raise TooManyClaimSearchParametersError(key, 2048)
|
||||
if many:
|
||||
key = key.replace('__in', '')
|
||||
value = list(filter(None, value))
|
||||
if value is None or isinstance(value, list) and len(value) == 0:
|
||||
continue
|
||||
key = REPLACEMENTS.get(key, key)
|
||||
if key in FIELDS:
|
||||
partial_id = False
|
||||
if key == 'claim_type':
|
||||
if isinstance(value, str):
|
||||
value = CLAIM_TYPES[value]
|
||||
else:
|
||||
value = [CLAIM_TYPES[claim_type] for claim_type in value]
|
||||
elif key == 'stream_type':
|
||||
value = STREAM_TYPES[value] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
|
||||
if key == '_id':
|
||||
if isinstance(value, Iterable):
|
||||
value = [item[::-1].hex() for item in value]
|
||||
else:
|
||||
value = value[::-1].hex()
|
||||
if not many and key in ('_id', 'claim_id') and len(value) < 20:
|
||||
partial_id = True
|
||||
if key == 'public_key_id':
|
||||
value = Base58.decode(value)[1:21].hex()
|
||||
if key in ('signature_valid', 'has_source'):
|
||||
continue # handled later
|
||||
if key in TEXT_FIELDS:
|
||||
key += '.keyword'
|
||||
ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'}
|
||||
if partial_id:
|
||||
query['must'].append({"prefix": {"claim_id": value}})
|
||||
elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops:
|
||||
operator_length = 2 if value[:2] in ops else 1
|
||||
operator, value = value[:operator_length], value[operator_length:]
|
||||
if key == 'fee_amount':
|
||||
value = str(Decimal(value)*1000)
|
||||
query['must'].append({"range": {key: {ops[operator]: value}}})
|
||||
elif many:
|
||||
query['must'].append({"terms": {key: value}})
|
||||
else:
|
||||
if key == 'fee_amount':
|
||||
value = str(Decimal(value)*1000)
|
||||
query['must'].append({"term": {key: {"value": value}}})
|
||||
elif key == 'not_channel_ids':
|
||||
for channel_id in value:
|
||||
query['must_not'].append({"term": {'channel_id.keyword': channel_id}})
|
||||
query['must_not'].append({"term": {'_id': channel_id}})
|
||||
elif key == 'channel_ids':
|
||||
query['must'].append({"terms": {'channel_id.keyword': value}})
|
||||
elif key == 'claim_ids':
|
||||
query['must'].append({"terms": {'claim_id.keyword': value}})
|
||||
elif key == 'media_types':
|
||||
query['must'].append({"terms": {'media_type.keyword': value}})
|
||||
elif key == 'any_languages':
|
||||
query['must'].append({"terms": {'languages': clean_tags(value)}})
|
||||
elif key == 'any_languages':
|
||||
query['must'].append({"terms": {'languages': value}})
|
||||
elif key == 'all_languages':
|
||||
query['must'].extend([{"term": {'languages': tag}} for tag in value])
|
||||
elif key == 'any_tags':
|
||||
query['must'].append({"terms": {'tags.keyword': clean_tags(value)}})
|
||||
elif key == 'all_tags':
|
||||
query['must'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||
elif key == 'not_tags':
|
||||
query['must_not'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||
elif key == 'not_claim_id':
|
||||
query['must_not'].extend([{"term": {'claim_id.keyword': cid}} for cid in value])
|
||||
elif key == 'limit_claims_per_channel':
|
||||
collapse = ('channel_id.keyword', value)
|
||||
if kwargs.get('has_channel_signature'):
|
||||
query['must'].append({"exists": {"field": "signature_digest"}})
|
||||
if 'signature_valid' in kwargs:
|
||||
query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
elif 'signature_valid' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature_digest"}}}})
|
||||
query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
if 'has_source' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
is_stream_or_repost = {"terms": {"claim_type": [CLAIM_TYPES['stream'], CLAIM_TYPES['repost']]}}
|
||||
query['should'].append(
|
||||
{"bool": {"must": [{"match": {"has_source": kwargs['has_source']}}, is_stream_or_repost]}})
|
||||
query['should'].append({"bool": {"must_not": [is_stream_or_repost]}})
|
||||
query['should'].append({"bool": {"must": [{"term": {"reposted_claim_type": CLAIM_TYPES['channel']}}]}})
|
||||
if kwargs.get('text'):
|
||||
query['must'].append(
|
||||
{"simple_query_string":
|
||||
{"query": kwargs["text"], "fields": [
|
||||
"claim_name^4", "channel_name^8", "title^1", "description^.5", "author^1", "tags^.5"
|
||||
]}})
|
||||
query = {
|
||||
"_source": {"excludes": ["description", "title"]},
|
||||
'query': {'bool': query},
|
||||
"sort": [],
|
||||
}
|
||||
if "limit" in kwargs:
|
||||
query["size"] = kwargs["limit"]
|
||||
if 'offset' in kwargs:
|
||||
query["from"] = kwargs["offset"]
|
||||
if 'order_by' in kwargs:
|
||||
if isinstance(kwargs["order_by"], str):
|
||||
kwargs["order_by"] = [kwargs["order_by"]]
|
||||
for value in kwargs['order_by']:
|
||||
if 'trending_group' in value:
|
||||
# fixme: trending_mixed is 0 for all records on variable decay, making sort slow.
|
||||
continue
|
||||
is_asc = value.startswith('^')
|
||||
value = value[1:] if is_asc else value
|
||||
value = REPLACEMENTS.get(value, value)
|
||||
if value in TEXT_FIELDS:
|
||||
value += '.keyword'
|
||||
query['sort'].append({value: "asc" if is_asc else "desc"})
|
||||
if collapse:
|
||||
query["collapse"] = {
|
||||
"field": collapse[0],
|
||||
"inner_hits": {
|
||||
"name": collapse[0],
|
||||
"size": collapse[1],
|
||||
"sort": query["sort"]
|
||||
}
|
||||
}
|
||||
return query
|
||||
|
||||
|
||||
def expand_result(results):
|
||||
inner_hits = []
|
||||
expanded = []
|
||||
for result in results:
|
||||
if result.get("inner_hits"):
|
||||
for _, inner_hit in result["inner_hits"].items():
|
||||
inner_hits.extend(inner_hit["hits"]["hits"])
|
||||
continue
|
||||
result = result['_source']
|
||||
result['claim_hash'] = unhexlify(result['claim_id'])[::-1]
|
||||
if result['reposted_claim_id']:
|
||||
result['reposted_claim_hash'] = unhexlify(result['reposted_claim_id'])[::-1]
|
||||
else:
|
||||
result['reposted_claim_hash'] = None
|
||||
result['channel_hash'] = unhexlify(result['channel_id'])[::-1] if result['channel_id'] else None
|
||||
result['txo_hash'] = unhexlify(result['tx_id'])[::-1] + struct.pack('<I', result['tx_nout'])
|
||||
result['tx_hash'] = unhexlify(result['tx_id'])[::-1]
|
||||
result['reposted'] = result.pop('repost_count')
|
||||
result['signature_valid'] = result.pop('is_signature_valid')
|
||||
result['normalized'] = result.pop('normalized_name')
|
||||
expanded.append(result)
|
||||
if inner_hits:
|
||||
return expand_result(inner_hits)
|
||||
return expanded
|
||||
|
||||
|
||||
class ResultCacheItem:
|
||||
__slots__ = '_result', 'lock', 'has_result'
|
||||
|
||||
def __init__(self):
|
||||
self.has_result = asyncio.Event()
|
||||
self.lock = asyncio.Lock()
|
||||
self._result = None
|
||||
|
||||
@property
|
||||
def result(self) -> str:
|
||||
return self._result
|
||||
|
||||
@result.setter
|
||||
def result(self, result: str):
|
||||
self._result = result
|
||||
if result is not None:
|
||||
self.has_result.set()
|
||||
|
||||
@classmethod
|
||||
def from_cache(cls, cache_key, cache):
|
||||
cache_item = cache.get(cache_key)
|
||||
if cache_item is None:
|
||||
cache_item = cache[cache_key] = ResultCacheItem()
|
||||
return cache_item
|
|
@ -1,126 +0,0 @@
|
|||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
from collections import namedtuple
|
||||
from multiprocessing import Process
|
||||
|
||||
import sqlite3
|
||||
from elasticsearch import AsyncElasticsearch
|
||||
from elasticsearch.helpers import async_bulk
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet.server.coin import LBC
|
||||
from lbry.wallet.server.db.elasticsearch.search import extract_doc, SearchIndex, IndexVersionMismatch
|
||||
|
||||
|
||||
async def get_all(db, shard_num, shards_total, limit=0, index_name='claims'):
|
||||
logging.info("shard %d starting", shard_num)
|
||||
|
||||
def namedtuple_factory(cursor, row):
|
||||
Row = namedtuple('Row', (d[0] for d in cursor.description))
|
||||
return Row(*row)
|
||||
db.row_factory = namedtuple_factory
|
||||
total = db.execute(f"select count(*) as total from claim where height % {shards_total} = {shard_num};").fetchone()[0]
|
||||
for num, claim in enumerate(db.execute(f"""
|
||||
SELECT claimtrie.claim_hash as is_controlling,
|
||||
claimtrie.last_take_over_height,
|
||||
(select group_concat(tag, ',,') from tag where tag.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as tags,
|
||||
(select group_concat(language, ' ') from language where language.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as languages,
|
||||
cr.has_source as reposted_has_source,
|
||||
cr.claim_type as reposted_claim_type,
|
||||
cr.stream_type as reposted_stream_type,
|
||||
cr.media_type as reposted_media_type,
|
||||
cr.duration as reposted_duration,
|
||||
cr.fee_amount as reposted_fee_amount,
|
||||
cr.fee_currency as reposted_fee_currency,
|
||||
claim.*
|
||||
FROM claim LEFT JOIN claimtrie USING (claim_hash) LEFT JOIN claim cr ON cr.claim_hash=claim.reposted_claim_hash
|
||||
WHERE claim.height % {shards_total} = {shard_num}
|
||||
ORDER BY claim.height desc
|
||||
""")):
|
||||
claim = dict(claim._asdict())
|
||||
claim['has_source'] = bool(claim.pop('reposted_has_source') or claim['has_source'])
|
||||
claim['stream_type'] = claim.pop('reposted_stream_type') or claim['stream_type']
|
||||
claim['media_type'] = claim.pop('reposted_media_type') or claim['media_type']
|
||||
claim['fee_amount'] = claim.pop('reposted_fee_amount') or claim['fee_amount']
|
||||
claim['fee_currency'] = claim.pop('reposted_fee_currency') or claim['fee_currency']
|
||||
claim['duration'] = claim.pop('reposted_duration') or claim['duration']
|
||||
claim['censor_type'] = 0
|
||||
claim['censoring_channel_id'] = None
|
||||
claim['tags'] = claim['tags'].split(',,') if claim['tags'] else []
|
||||
claim['languages'] = claim['languages'].split(' ') if claim['languages'] else []
|
||||
if num % 10_000 == 0:
|
||||
logging.info("%d/%d", num, total)
|
||||
yield extract_doc(claim, index_name)
|
||||
if 0 < limit <= num:
|
||||
break
|
||||
|
||||
|
||||
async def consume(producer, index_name):
|
||||
env = Env(LBC)
|
||||
logging.info("ES sync host: %s:%i", env.elastic_host, env.elastic_port)
|
||||
es = AsyncElasticsearch([{'host': env.elastic_host, 'port': env.elastic_port}])
|
||||
try:
|
||||
await async_bulk(es, producer, request_timeout=120)
|
||||
await es.indices.refresh(index=index_name)
|
||||
finally:
|
||||
await es.close()
|
||||
|
||||
|
||||
async def make_es_index(index=None):
|
||||
env = Env(LBC)
|
||||
if index is None:
|
||||
index = SearchIndex('', elastic_host=env.elastic_host, elastic_port=env.elastic_port)
|
||||
|
||||
try:
|
||||
return await index.start()
|
||||
except IndexVersionMismatch as err:
|
||||
logging.info(
|
||||
"dropping ES search index (version %s) for upgrade to version %s", err.got_version, err.expected_version
|
||||
)
|
||||
await index.delete_index()
|
||||
await index.stop()
|
||||
return await index.start()
|
||||
finally:
|
||||
index.stop()
|
||||
|
||||
|
||||
async def run(db_path, clients, blocks, shard, index_name='claims'):
|
||||
db = sqlite3.connect(db_path, isolation_level=None, check_same_thread=False, uri=True)
|
||||
db.execute('pragma journal_mode=wal;')
|
||||
db.execute('pragma temp_store=memory;')
|
||||
producer = get_all(db, shard, clients, limit=blocks, index_name=index_name)
|
||||
await asyncio.gather(*(consume(producer, index_name=index_name) for _ in range(min(8, clients))))
|
||||
|
||||
|
||||
def __run(args, shard):
|
||||
asyncio.run(run(args.db_path, args.clients, args.blocks, shard))
|
||||
|
||||
|
||||
def run_elastic_sync():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
||||
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
||||
|
||||
logging.info('lbry.server starting')
|
||||
parser = argparse.ArgumentParser(prog="lbry-hub-elastic-sync")
|
||||
parser.add_argument("db_path", type=str)
|
||||
parser.add_argument("-c", "--clients", type=int, default=16)
|
||||
parser.add_argument("-b", "--blocks", type=int, default=0)
|
||||
parser.add_argument("-f", "--force", default=False, action='store_true')
|
||||
args = parser.parse_args()
|
||||
processes = []
|
||||
|
||||
if not args.force and not os.path.exists(args.db_path):
|
||||
logging.info("DB path doesnt exist")
|
||||
return
|
||||
|
||||
if not args.force and not asyncio.run(make_es_index()):
|
||||
logging.info("ES is already initialized")
|
||||
return
|
||||
for i in range(args.clients):
|
||||
processes.append(Process(target=__run, args=(args, i)))
|
||||
processes[-1].start()
|
||||
for process in processes:
|
||||
process.join()
|
||||
process.close()
|
|
@ -1,9 +0,0 @@
|
|||
from . import zscore
|
||||
from . import ar
|
||||
from . import variable_decay
|
||||
|
||||
TRENDING_ALGORITHMS = {
|
||||
'zscore': zscore,
|
||||
'ar': ar,
|
||||
'variable_decay': variable_decay
|
||||
}
|
|
@ -1,265 +0,0 @@
|
|||
import copy
|
||||
import math
|
||||
import time
|
||||
|
||||
# Half life in blocks
|
||||
HALF_LIFE = 134
|
||||
|
||||
# Decay coefficient per block
|
||||
DECAY = 0.5**(1.0/HALF_LIFE)
|
||||
|
||||
# How frequently to write trending values to the db
|
||||
SAVE_INTERVAL = 10
|
||||
|
||||
# Renormalisation interval
|
||||
RENORM_INTERVAL = 1000
|
||||
|
||||
# Assertion
|
||||
assert RENORM_INTERVAL % SAVE_INTERVAL == 0
|
||||
|
||||
# Decay coefficient per renormalisation interval
|
||||
DECAY_PER_RENORM = DECAY**(RENORM_INTERVAL)
|
||||
|
||||
# Log trending calculations?
|
||||
TRENDING_LOG = True
|
||||
|
||||
|
||||
def install(connection):
|
||||
"""
|
||||
Install the AR trending algorithm.
|
||||
"""
|
||||
check_trending_values(connection)
|
||||
|
||||
if TRENDING_LOG:
|
||||
f = open("trending_ar.log", "a")
|
||||
f.close()
|
||||
|
||||
# Stub
|
||||
CREATE_TREND_TABLE = ""
|
||||
|
||||
|
||||
def check_trending_values(connection):
|
||||
"""
|
||||
If the trending values appear to be based on the zscore algorithm,
|
||||
reset them. This will allow resyncing from a standard snapshot.
|
||||
"""
|
||||
c = connection.cursor()
|
||||
needs_reset = False
|
||||
for row in c.execute("SELECT COUNT(*) num FROM claim WHERE trending_global <> 0;"):
|
||||
if row[0] != 0:
|
||||
needs_reset = True
|
||||
break
|
||||
|
||||
if needs_reset:
|
||||
print("Resetting some columns. This might take a while...", flush=True, end="")
|
||||
c.execute(""" BEGIN;
|
||||
UPDATE claim SET trending_group = 0;
|
||||
UPDATE claim SET trending_mixed = 0;
|
||||
UPDATE claim SET trending_global = 0;
|
||||
UPDATE claim SET trending_local = 0;
|
||||
COMMIT;""")
|
||||
print("done.")
|
||||
|
||||
|
||||
def spike_height(trending_score, x, x_old, time_boost=1.0):
|
||||
"""
|
||||
Compute the size of a trending spike.
|
||||
"""
|
||||
|
||||
# Change in softened amount
|
||||
change_in_softened_amount = x**0.25 - x_old**0.25
|
||||
|
||||
# Softened change in amount
|
||||
delta = x - x_old
|
||||
softened_change_in_amount = abs(delta)**0.25
|
||||
|
||||
# Softened change in amount counts more for minnows
|
||||
if delta > 0.0:
|
||||
if trending_score >= 0.0:
|
||||
multiplier = 0.1/((trending_score/time_boost + softened_change_in_amount) + 1.0)
|
||||
softened_change_in_amount *= multiplier
|
||||
else:
|
||||
softened_change_in_amount *= -1.0
|
||||
|
||||
return time_boost*(softened_change_in_amount + change_in_softened_amount)
|
||||
|
||||
|
||||
def get_time_boost(height):
|
||||
"""
|
||||
Return the time boost at a given height.
|
||||
"""
|
||||
return 1.0/DECAY**(height % RENORM_INTERVAL)
|
||||
|
||||
|
||||
def trending_log(s):
|
||||
"""
|
||||
Log a string.
|
||||
"""
|
||||
if TRENDING_LOG:
|
||||
fout = open("trending_ar.log", "a")
|
||||
fout.write(s)
|
||||
fout.flush()
|
||||
fout.close()
|
||||
|
||||
class TrendingData:
|
||||
"""
|
||||
An object of this class holds trending data
|
||||
"""
|
||||
def __init__(self):
|
||||
self.claims = {}
|
||||
|
||||
# Have all claims been read from db yet?
|
||||
self.initialised = False
|
||||
|
||||
def insert_claim_from_load(self, claim_hash, trending_score, total_amount):
|
||||
assert not self.initialised
|
||||
self.claims[claim_hash] = {"trending_score": trending_score,
|
||||
"total_amount": total_amount,
|
||||
"changed": False}
|
||||
|
||||
|
||||
def update_claim(self, claim_hash, total_amount, time_boost=1.0):
|
||||
"""
|
||||
Update trending data for a claim, given its new total amount.
|
||||
"""
|
||||
assert self.initialised
|
||||
|
||||
# Extract existing total amount and trending score
|
||||
# or use starting values if the claim is new
|
||||
if claim_hash in self.claims:
|
||||
old_state = copy.deepcopy(self.claims[claim_hash])
|
||||
else:
|
||||
old_state = {"trending_score": 0.0,
|
||||
"total_amount": 0.0,
|
||||
"changed": False}
|
||||
|
||||
# Calculate LBC change
|
||||
change = total_amount - old_state["total_amount"]
|
||||
|
||||
# Modify data if there was an LBC change
|
||||
if change != 0.0:
|
||||
spike = spike_height(old_state["trending_score"],
|
||||
total_amount,
|
||||
old_state["total_amount"],
|
||||
time_boost)
|
||||
trending_score = old_state["trending_score"] + spike
|
||||
self.claims[claim_hash] = {"total_amount": total_amount,
|
||||
"trending_score": trending_score,
|
||||
"changed": True}
|
||||
|
||||
|
||||
|
||||
def test_trending():
|
||||
"""
|
||||
Quick trending test for something receiving 10 LBC per block
|
||||
"""
|
||||
data = TrendingData()
|
||||
data.insert_claim_from_load("abc", 10.0, 1.0)
|
||||
data.initialised = True
|
||||
|
||||
for height in range(1, 5000):
|
||||
|
||||
if height % RENORM_INTERVAL == 0:
|
||||
data.claims["abc"]["trending_score"] *= DECAY_PER_RENORM
|
||||
|
||||
time_boost = get_time_boost(height)
|
||||
data.update_claim("abc", data.claims["abc"]["total_amount"] + 10.0,
|
||||
time_boost=time_boost)
|
||||
|
||||
|
||||
print(str(height) + " " + str(time_boost) + " " \
|
||||
+ str(data.claims["abc"]["trending_score"]))
|
||||
|
||||
|
||||
|
||||
# One global instance
|
||||
# pylint: disable=C0103
|
||||
trending_data = TrendingData()
|
||||
|
||||
def run(db, height, final_height, recalculate_claim_hashes):
|
||||
|
||||
if height < final_height - 5*HALF_LIFE:
|
||||
trending_log("Skipping AR trending at block {h}.\n".format(h=height))
|
||||
return
|
||||
|
||||
start = time.time()
|
||||
|
||||
trending_log("Calculating AR trending at block {h}.\n".format(h=height))
|
||||
trending_log(" Length of trending data = {l}.\n"\
|
||||
.format(l=len(trending_data.claims)))
|
||||
|
||||
# Renormalise trending scores and mark all as having changed
|
||||
if height % RENORM_INTERVAL == 0:
|
||||
trending_log(" Renormalising trending scores...")
|
||||
|
||||
keys = trending_data.claims.keys()
|
||||
for key in keys:
|
||||
if trending_data.claims[key]["trending_score"] != 0.0:
|
||||
trending_data.claims[key]["trending_score"] *= DECAY_PER_RENORM
|
||||
trending_data.claims[key]["changed"] = True
|
||||
|
||||
# Tiny becomes zero
|
||||
if abs(trending_data.claims[key]["trending_score"]) < 1E-9:
|
||||
trending_data.claims[key]["trending_score"] = 0.0
|
||||
|
||||
trending_log("done.\n")
|
||||
|
||||
|
||||
# Regular message.
|
||||
trending_log(" Reading total_amounts from db and updating"\
|
||||
+ " trending scores in RAM...")
|
||||
|
||||
# Get the value of the time boost
|
||||
time_boost = get_time_boost(height)
|
||||
|
||||
# Update claims from db
|
||||
if not trending_data.initialised:
|
||||
# On fresh launch
|
||||
for row in db.execute("""
|
||||
SELECT claim_hash, trending_mixed,
|
||||
(amount + support_amount)
|
||||
AS total_amount
|
||||
FROM claim;
|
||||
"""):
|
||||
trending_data.insert_claim_from_load(row[0], row[1], 1E-8*row[2])
|
||||
trending_data.initialised = True
|
||||
else:
|
||||
for row in db.execute(f"""
|
||||
SELECT claim_hash,
|
||||
(amount + support_amount)
|
||||
AS total_amount
|
||||
FROM claim
|
||||
WHERE claim_hash IN
|
||||
({','.join('?' for _ in recalculate_claim_hashes)});
|
||||
""", list(recalculate_claim_hashes)):
|
||||
trending_data.update_claim(row[0], 1E-8*row[1], time_boost)
|
||||
|
||||
trending_log("done.\n")
|
||||
|
||||
|
||||
# Write trending scores to DB
|
||||
if height % SAVE_INTERVAL == 0:
|
||||
|
||||
trending_log(" Writing trending scores to db...")
|
||||
|
||||
the_list = []
|
||||
keys = trending_data.claims.keys()
|
||||
for key in keys:
|
||||
if trending_data.claims[key]["changed"]:
|
||||
the_list.append((trending_data.claims[key]["trending_score"],
|
||||
key))
|
||||
trending_data.claims[key]["changed"] = False
|
||||
|
||||
trending_log("{n} scores to write...".format(n=len(the_list)))
|
||||
|
||||
db.executemany("UPDATE claim SET trending_mixed=? WHERE claim_hash=?;",
|
||||
the_list)
|
||||
|
||||
trending_log("done.\n")
|
||||
|
||||
trending_log("Trending operations took {time} seconds.\n\n"\
|
||||
.format(time=time.time() - start))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_trending()
|
|
@ -1,485 +0,0 @@
|
|||
"""
|
||||
AR-like trending with a delayed effect and a faster
|
||||
decay rate for high valued claims.
|
||||
"""
|
||||
|
||||
import math
|
||||
import time
|
||||
import sqlite3
|
||||
|
||||
# Half life in blocks *for lower LBC claims* (it's shorter for whale claims)
|
||||
HALF_LIFE = 200
|
||||
|
||||
# Whale threshold, in LBC (higher -> less DB writing)
|
||||
WHALE_THRESHOLD = 10000.0
|
||||
|
||||
# Decay coefficient per block
|
||||
DECAY = 0.5**(1.0/HALF_LIFE)
|
||||
|
||||
# How frequently to write trending values to the db
|
||||
SAVE_INTERVAL = 10
|
||||
|
||||
# Renormalisation interval
|
||||
RENORM_INTERVAL = 1000
|
||||
|
||||
# Assertion
|
||||
assert RENORM_INTERVAL % SAVE_INTERVAL == 0
|
||||
|
||||
# Decay coefficient per renormalisation interval
|
||||
DECAY_PER_RENORM = DECAY**(RENORM_INTERVAL)
|
||||
|
||||
# Log trending calculations?
|
||||
TRENDING_LOG = True
|
||||
|
||||
|
||||
def install(connection):
|
||||
"""
|
||||
Install the trending algorithm.
|
||||
"""
|
||||
check_trending_values(connection)
|
||||
trending_data.initialise(connection.cursor())
|
||||
|
||||
if TRENDING_LOG:
|
||||
f = open("trending_variable_decay.log", "a")
|
||||
f.close()
|
||||
|
||||
# Stub
|
||||
CREATE_TREND_TABLE = ""
|
||||
|
||||
def check_trending_values(connection):
|
||||
"""
|
||||
If the trending values appear to be based on the zscore algorithm,
|
||||
reset them. This will allow resyncing from a standard snapshot.
|
||||
"""
|
||||
c = connection.cursor()
|
||||
needs_reset = False
|
||||
for row in c.execute("SELECT COUNT(*) num FROM claim WHERE trending_global <> 0;"):
|
||||
if row[0] != 0:
|
||||
needs_reset = True
|
||||
break
|
||||
|
||||
if needs_reset:
|
||||
print("Resetting some columns. This might take a while...", flush=True,
|
||||
end="")
|
||||
c.execute(""" BEGIN;
|
||||
UPDATE claim SET trending_group = 0;
|
||||
UPDATE claim SET trending_mixed = 0;
|
||||
COMMIT;""")
|
||||
print("done.")
|
||||
|
||||
|
||||
|
||||
|
||||
def trending_log(s):
|
||||
"""
|
||||
Log a string to the log file
|
||||
"""
|
||||
if TRENDING_LOG:
|
||||
fout = open("trending_variable_decay.log", "a")
|
||||
fout.write(s)
|
||||
fout.flush()
|
||||
fout.close()
|
||||
|
||||
|
||||
def trending_unit(height):
|
||||
"""
|
||||
Return the trending score unit at a given height.
|
||||
"""
|
||||
# Round to the beginning of a SAVE_INTERVAL batch of blocks.
|
||||
_height = height - (height % SAVE_INTERVAL)
|
||||
return 1.0/DECAY**(height % RENORM_INTERVAL)
|
||||
|
||||
|
||||
class TrendingDB:
|
||||
"""
|
||||
An in-memory database of trending scores
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.conn = sqlite3.connect(":memory:", check_same_thread=False)
|
||||
self.cursor = self.conn.cursor()
|
||||
self.initialised = False
|
||||
self.write_needed = set()
|
||||
|
||||
def execute(self, query, *args, **kwargs):
|
||||
return self.conn.execute(query, *args, **kwargs)
|
||||
|
||||
def executemany(self, query, *args, **kwargs):
|
||||
return self.conn.executemany(query, *args, **kwargs)
|
||||
|
||||
def begin(self):
|
||||
self.execute("BEGIN;")
|
||||
|
||||
def commit(self):
|
||||
self.execute("COMMIT;")
|
||||
|
||||
def initialise(self, db):
|
||||
"""
|
||||
Pass in claims.db
|
||||
"""
|
||||
if self.initialised:
|
||||
return
|
||||
|
||||
trending_log("Initialising trending database...")
|
||||
|
||||
# The need for speed
|
||||
self.execute("PRAGMA JOURNAL_MODE=OFF;")
|
||||
self.execute("PRAGMA SYNCHRONOUS=0;")
|
||||
|
||||
self.begin()
|
||||
|
||||
# Create the tables
|
||||
self.execute("""
|
||||
CREATE TABLE IF NOT EXISTS claims
|
||||
(claim_hash BYTES PRIMARY KEY,
|
||||
lbc REAL NOT NULL DEFAULT 0.0,
|
||||
trending_score REAL NOT NULL DEFAULT 0.0)
|
||||
WITHOUT ROWID;""")
|
||||
|
||||
self.execute("""
|
||||
CREATE TABLE IF NOT EXISTS spikes
|
||||
(id INTEGER PRIMARY KEY,
|
||||
claim_hash BYTES NOT NULL,
|
||||
height INTEGER NOT NULL,
|
||||
mass REAL NOT NULL,
|
||||
FOREIGN KEY (claim_hash)
|
||||
REFERENCES claims (claim_hash));""")
|
||||
|
||||
# Clear out any existing data
|
||||
self.execute("DELETE FROM claims;")
|
||||
self.execute("DELETE FROM spikes;")
|
||||
|
||||
# Create indexes
|
||||
self.execute("CREATE INDEX idx1 ON spikes (claim_hash, height, mass);")
|
||||
self.execute("CREATE INDEX idx2 ON spikes (claim_hash, height, mass DESC);")
|
||||
self.execute("CREATE INDEX idx3 on claims (lbc DESC, claim_hash, trending_score);")
|
||||
|
||||
# Import data from claims.db
|
||||
for row in db.execute("""
|
||||
SELECT claim_hash,
|
||||
1E-8*(amount + support_amount) AS lbc,
|
||||
trending_mixed
|
||||
FROM claim;
|
||||
"""):
|
||||
self.execute("INSERT INTO claims VALUES (?, ?, ?);", row)
|
||||
self.commit()
|
||||
|
||||
self.initialised = True
|
||||
trending_log("done.\n")
|
||||
|
||||
def apply_spikes(self, height):
|
||||
"""
|
||||
Apply spikes that are due. This occurs inside a transaction.
|
||||
"""
|
||||
|
||||
spikes = []
|
||||
unit = trending_unit(height)
|
||||
for row in self.execute("""
|
||||
SELECT SUM(mass), claim_hash FROM spikes
|
||||
WHERE height = ?
|
||||
GROUP BY claim_hash;
|
||||
""", (height, )):
|
||||
spikes.append((row[0]*unit, row[1]))
|
||||
self.write_needed.add(row[1])
|
||||
|
||||
self.executemany("""
|
||||
UPDATE claims
|
||||
SET trending_score = (trending_score + ?)
|
||||
WHERE claim_hash = ?;
|
||||
""", spikes)
|
||||
self.execute("DELETE FROM spikes WHERE height = ?;", (height, ))
|
||||
|
||||
|
||||
def decay_whales(self, height):
|
||||
"""
|
||||
Occurs inside transaction.
|
||||
"""
|
||||
if height % SAVE_INTERVAL != 0:
|
||||
return
|
||||
|
||||
whales = self.execute("""
|
||||
SELECT trending_score, lbc, claim_hash
|
||||
FROM claims
|
||||
WHERE lbc >= ?;
|
||||
""", (WHALE_THRESHOLD, )).fetchall()
|
||||
whales2 = []
|
||||
for whale in whales:
|
||||
trending, lbc, claim_hash = whale
|
||||
|
||||
# Overall multiplication factor for decay rate
|
||||
# At WHALE_THRESHOLD, this is 1
|
||||
# At 10*WHALE_THRESHOLD, it is 3
|
||||
decay_rate_factor = 1.0 + 2.0*math.log10(lbc/WHALE_THRESHOLD)
|
||||
|
||||
# The -1 is because this is just the *extra* part being applied
|
||||
factor = (DECAY**SAVE_INTERVAL)**(decay_rate_factor - 1.0)
|
||||
|
||||
# Decay
|
||||
trending *= factor
|
||||
whales2.append((trending, claim_hash))
|
||||
self.write_needed.add(claim_hash)
|
||||
|
||||
self.executemany("UPDATE claims SET trending_score=? WHERE claim_hash=?;",
|
||||
whales2)
|
||||
|
||||
|
||||
def renorm(self, height):
|
||||
"""
|
||||
Renormalise trending scores. Occurs inside a transaction.
|
||||
"""
|
||||
|
||||
if height % RENORM_INTERVAL == 0:
|
||||
threshold = 1.0E-3/DECAY_PER_RENORM
|
||||
for row in self.execute("""SELECT claim_hash FROM claims
|
||||
WHERE ABS(trending_score) >= ?;""",
|
||||
(threshold, )):
|
||||
self.write_needed.add(row[0])
|
||||
|
||||
self.execute("""UPDATE claims SET trending_score = ?*trending_score
|
||||
WHERE ABS(trending_score) >= ?;""",
|
||||
(DECAY_PER_RENORM, threshold))
|
||||
|
||||
def write_to_claims_db(self, db, height):
|
||||
"""
|
||||
Write changed trending scores to claims.db.
|
||||
"""
|
||||
if height % SAVE_INTERVAL != 0:
|
||||
return
|
||||
|
||||
rows = self.execute(f"""
|
||||
SELECT trending_score, claim_hash
|
||||
FROM claims
|
||||
WHERE claim_hash IN
|
||||
({','.join('?' for _ in self.write_needed)});
|
||||
""", list(self.write_needed)).fetchall()
|
||||
|
||||
db.executemany("""UPDATE claim SET trending_mixed = ?
|
||||
WHERE claim_hash = ?;""", rows)
|
||||
|
||||
# Clear list of claims needing to be written to claims.db
|
||||
self.write_needed = set()
|
||||
|
||||
|
||||
def update(self, db, height, recalculate_claim_hashes):
|
||||
"""
|
||||
Update trending scores.
|
||||
Input is a cursor to claims.db, the block height, and the list of
|
||||
claims that changed.
|
||||
"""
|
||||
assert self.initialised
|
||||
|
||||
self.begin()
|
||||
self.renorm(height)
|
||||
|
||||
# Fetch changed/new claims from claims.db
|
||||
for row in db.execute(f"""
|
||||
SELECT claim_hash,
|
||||
1E-8*(amount + support_amount) AS lbc
|
||||
FROM claim
|
||||
WHERE claim_hash IN
|
||||
({','.join('?' for _ in recalculate_claim_hashes)});
|
||||
""", list(recalculate_claim_hashes)):
|
||||
claim_hash, lbc = row
|
||||
|
||||
# Insert into trending db if it does not exist
|
||||
self.execute("""
|
||||
INSERT INTO claims (claim_hash)
|
||||
VALUES (?)
|
||||
ON CONFLICT (claim_hash) DO NOTHING;""",
|
||||
(claim_hash, ))
|
||||
|
||||
# See if it was an LBC change
|
||||
old = self.execute("SELECT * FROM claims WHERE claim_hash=?;",
|
||||
(claim_hash, )).fetchone()
|
||||
lbc_old = old[1]
|
||||
|
||||
# Save new LBC value into trending db
|
||||
self.execute("UPDATE claims SET lbc = ? WHERE claim_hash = ?;",
|
||||
(lbc, claim_hash))
|
||||
|
||||
if lbc > lbc_old:
|
||||
|
||||
# Schedule a future spike
|
||||
delay = min(int((lbc + 1E-8)**0.4), HALF_LIFE)
|
||||
spike = (claim_hash, height + delay, spike_mass(lbc, lbc_old))
|
||||
self.execute("""INSERT INTO spikes
|
||||
(claim_hash, height, mass)
|
||||
VALUES (?, ?, ?);""", spike)
|
||||
|
||||
elif lbc < lbc_old:
|
||||
|
||||
# Subtract from future spikes
|
||||
penalty = spike_mass(lbc_old, lbc)
|
||||
spikes = self.execute("""
|
||||
SELECT * FROM spikes
|
||||
WHERE claim_hash = ?
|
||||
ORDER BY height ASC, mass DESC;
|
||||
""", (claim_hash, )).fetchall()
|
||||
for spike in spikes:
|
||||
spike_id, mass = spike[0], spike[3]
|
||||
|
||||
if mass > penalty:
|
||||
# The entire penalty merely reduces this spike
|
||||
self.execute("UPDATE spikes SET mass=? WHERE id=?;",
|
||||
(mass - penalty, spike_id))
|
||||
penalty = 0.0
|
||||
else:
|
||||
# Removing this spike entirely accounts for some (or
|
||||
# all) of the penalty, then move on to other spikes
|
||||
self.execute("DELETE FROM spikes WHERE id=?;",
|
||||
(spike_id, ))
|
||||
penalty -= mass
|
||||
|
||||
# If penalty remains, that's a negative spike to be applied
|
||||
# immediately.
|
||||
if penalty > 0.0:
|
||||
self.execute("""
|
||||
INSERT INTO spikes (claim_hash, height, mass)
|
||||
VALUES (?, ?, ?);""",
|
||||
(claim_hash, height, -penalty))
|
||||
|
||||
self.apply_spikes(height)
|
||||
self.decay_whales(height)
|
||||
self.commit()
|
||||
|
||||
self.write_to_claims_db(db, height)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# The "global" instance to work with
|
||||
# pylint: disable=C0103
|
||||
trending_data = TrendingDB()
|
||||
|
||||
def spike_mass(x, x_old):
|
||||
"""
|
||||
Compute the mass of a trending spike (normed - constant units).
|
||||
x_old = old LBC value
|
||||
x = new LBC value
|
||||
"""
|
||||
|
||||
# Sign of trending spike
|
||||
sign = 1.0
|
||||
if x < x_old:
|
||||
sign = -1.0
|
||||
|
||||
# Magnitude
|
||||
mag = abs(x**0.25 - x_old**0.25)
|
||||
|
||||
# Minnow boost
|
||||
mag *= 1.0 + 2E4/(x + 100.0)**2
|
||||
|
||||
return sign*mag
|
||||
|
||||
|
||||
def run(db, height, final_height, recalculate_claim_hashes):
|
||||
if height < final_height - 5*HALF_LIFE:
|
||||
trending_log(f"Skipping trending calculations at block {height}.\n")
|
||||
return
|
||||
|
||||
start = time.time()
|
||||
trending_log(f"Calculating variable_decay trending at block {height}.\n")
|
||||
trending_data.update(db, height, recalculate_claim_hashes)
|
||||
end = time.time()
|
||||
trending_log(f"Trending operations took {end - start} seconds.\n\n")
|
||||
|
||||
def test_trending():
|
||||
"""
|
||||
Quick trending test for claims with different support patterns.
|
||||
Actually use the run() function.
|
||||
"""
|
||||
|
||||
# Create a fake "claims.db" for testing
|
||||
# pylint: disable=I1101
|
||||
dbc = apsw.Connection(":memory:")
|
||||
db = dbc.cursor()
|
||||
|
||||
# Create table
|
||||
db.execute("""
|
||||
BEGIN;
|
||||
CREATE TABLE claim (claim_hash TEXT PRIMARY KEY,
|
||||
amount REAL NOT NULL DEFAULT 0.0,
|
||||
support_amount REAL NOT NULL DEFAULT 0.0,
|
||||
trending_mixed REAL NOT NULL DEFAULT 0.0);
|
||||
COMMIT;
|
||||
""")
|
||||
|
||||
# Initialise trending data before anything happens with the claims
|
||||
trending_data.initialise(db)
|
||||
|
||||
# Insert initial states of claims
|
||||
everything = {"huge_whale": 0.01, "medium_whale": 0.01, "small_whale": 0.01,
|
||||
"huge_whale_botted": 0.01, "minnow": 0.01}
|
||||
|
||||
def to_list_of_tuples(stuff):
|
||||
l = []
|
||||
for key in stuff:
|
||||
l.append((key, stuff[key]))
|
||||
return l
|
||||
|
||||
db.executemany("""
|
||||
INSERT INTO claim (claim_hash, amount) VALUES (?, 1E8*?);
|
||||
""", to_list_of_tuples(everything))
|
||||
|
||||
# Process block zero
|
||||
height = 0
|
||||
run(db, height, height, everything.keys())
|
||||
|
||||
# Save trajectories for plotting
|
||||
trajectories = {}
|
||||
for row in trending_data.execute("""
|
||||
SELECT claim_hash, trending_score
|
||||
FROM claims;
|
||||
"""):
|
||||
trajectories[row[0]] = [row[1]/trending_unit(height)]
|
||||
|
||||
# Main loop
|
||||
for height in range(1, 1000):
|
||||
|
||||
# One-off supports
|
||||
if height == 1:
|
||||
everything["huge_whale"] += 5E5
|
||||
everything["medium_whale"] += 5E4
|
||||
everything["small_whale"] += 5E3
|
||||
|
||||
# Every block
|
||||
if height < 500:
|
||||
everything["huge_whale_botted"] += 5E5/500
|
||||
everything["minnow"] += 1
|
||||
|
||||
# Remove supports
|
||||
if height == 500:
|
||||
for key in everything:
|
||||
everything[key] = 0.01
|
||||
|
||||
# Whack into the db
|
||||
db.executemany("""
|
||||
UPDATE claim SET amount = 1E8*? WHERE claim_hash = ?;
|
||||
""", [(y, x) for (x, y) in to_list_of_tuples(everything)])
|
||||
|
||||
# Call run()
|
||||
run(db, height, height, everything.keys())
|
||||
|
||||
# Append current trending scores to trajectories
|
||||
for row in db.execute("""
|
||||
SELECT claim_hash, trending_mixed
|
||||
FROM claim;
|
||||
"""):
|
||||
trajectories[row[0]].append(row[1]/trending_unit(height))
|
||||
|
||||
dbc.close()
|
||||
|
||||
# pylint: disable=C0415
|
||||
import matplotlib.pyplot as plt
|
||||
for key in trajectories:
|
||||
plt.plot(trajectories[key], label=key)
|
||||
plt.legend()
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_trending()
|
|
@ -1,119 +0,0 @@
|
|||
from math import sqrt
|
||||
|
||||
# TRENDING_WINDOW is the number of blocks in ~6hr period (21600 seconds / 161 seconds per block)
|
||||
TRENDING_WINDOW = 134
|
||||
|
||||
# TRENDING_DATA_POINTS says how many samples to use for the trending algorithm
|
||||
# i.e. only consider claims from the most recent (TRENDING_WINDOW * TRENDING_DATA_POINTS) blocks
|
||||
TRENDING_DATA_POINTS = 28
|
||||
|
||||
CREATE_TREND_TABLE = """
|
||||
create table if not exists trend (
|
||||
claim_hash bytes not null,
|
||||
height integer not null,
|
||||
amount integer not null,
|
||||
primary key (claim_hash, height)
|
||||
) without rowid;
|
||||
"""
|
||||
|
||||
|
||||
class ZScore:
|
||||
__slots__ = 'count', 'total', 'power', 'last'
|
||||
|
||||
def __init__(self):
|
||||
self.count = 0
|
||||
self.total = 0
|
||||
self.power = 0
|
||||
self.last = None
|
||||
|
||||
def step(self, value):
|
||||
if self.last is not None:
|
||||
self.count += 1
|
||||
self.total += self.last
|
||||
self.power += self.last ** 2
|
||||
self.last = value
|
||||
|
||||
@property
|
||||
def mean(self):
|
||||
return self.total / self.count
|
||||
|
||||
@property
|
||||
def standard_deviation(self):
|
||||
value = (self.power / self.count) - self.mean ** 2
|
||||
return sqrt(value) if value > 0 else 0
|
||||
|
||||
def finalize(self):
|
||||
if self.count == 0:
|
||||
return self.last
|
||||
return (self.last - self.mean) / (self.standard_deviation or 1)
|
||||
|
||||
|
||||
def install(connection):
|
||||
connection.create_aggregate("zscore", 1, ZScore)
|
||||
connection.executescript(CREATE_TREND_TABLE)
|
||||
|
||||
|
||||
def run(db, height, final_height, affected_claims):
|
||||
# don't start tracking until we're at the end of initial sync
|
||||
if height < (final_height - (TRENDING_WINDOW * TRENDING_DATA_POINTS)):
|
||||
return
|
||||
|
||||
if height % TRENDING_WINDOW != 0:
|
||||
return
|
||||
|
||||
db.execute(f"""
|
||||
DELETE FROM trend WHERE height < {height - (TRENDING_WINDOW * TRENDING_DATA_POINTS)}
|
||||
""")
|
||||
|
||||
start = (height - TRENDING_WINDOW) + 1
|
||||
db.execute(f"""
|
||||
INSERT OR IGNORE INTO trend (claim_hash, height, amount)
|
||||
SELECT claim_hash, {start}, COALESCE(
|
||||
(SELECT SUM(amount) FROM support WHERE claim_hash=claim.claim_hash
|
||||
AND height >= {start}), 0
|
||||
) AS support_sum
|
||||
FROM claim WHERE support_sum > 0
|
||||
""")
|
||||
|
||||
zscore = ZScore()
|
||||
for global_sum in db.execute("SELECT AVG(amount) AS avg_amount FROM trend GROUP BY height"):
|
||||
zscore.step(global_sum.avg_amount)
|
||||
global_mean, global_deviation = 0, 1
|
||||
if zscore.count > 0:
|
||||
global_mean = zscore.mean
|
||||
global_deviation = zscore.standard_deviation
|
||||
|
||||
db.execute(f"""
|
||||
UPDATE claim SET
|
||||
trending_local = COALESCE((
|
||||
SELECT zscore(amount) FROM trend
|
||||
WHERE claim_hash=claim.claim_hash ORDER BY height DESC
|
||||
), 0),
|
||||
trending_global = COALESCE((
|
||||
SELECT (amount - {global_mean}) / {global_deviation} FROM trend
|
||||
WHERE claim_hash=claim.claim_hash AND height = {start}
|
||||
), 0),
|
||||
trending_group = 0,
|
||||
trending_mixed = 0
|
||||
""")
|
||||
|
||||
# trending_group and trending_mixed determine how trending will show in query results
|
||||
# normally the SQL will be: "ORDER BY trending_group, trending_mixed"
|
||||
# changing the trending_group will have significant impact on trending results
|
||||
# changing the value used for trending_mixed will only impact trending within a trending_group
|
||||
db.execute(f"""
|
||||
UPDATE claim SET
|
||||
trending_group = CASE
|
||||
WHEN trending_local > 0 AND trending_global > 0 THEN 4
|
||||
WHEN trending_local <= 0 AND trending_global > 0 THEN 3
|
||||
WHEN trending_local > 0 AND trending_global <= 0 THEN 2
|
||||
WHEN trending_local <= 0 AND trending_global <= 0 THEN 1
|
||||
END,
|
||||
trending_mixed = CASE
|
||||
WHEN trending_local > 0 AND trending_global > 0 THEN trending_global
|
||||
WHEN trending_local <= 0 AND trending_global > 0 THEN trending_local
|
||||
WHEN trending_local > 0 AND trending_global <= 0 THEN trending_local
|
||||
WHEN trending_local <= 0 AND trending_global <= 0 THEN trending_global
|
||||
END
|
||||
WHERE trending_local <> 0 OR trending_global <> 0
|
||||
""")
|
|
@ -1,994 +0,0 @@
|
|||
import os
|
||||
|
||||
import sqlite3
|
||||
from typing import Union, Tuple, Set, List
|
||||
from itertools import chain
|
||||
from decimal import Decimal
|
||||
from collections import namedtuple
|
||||
from binascii import unhexlify, hexlify
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.util import class_logger
|
||||
from lbry.wallet.database import query, constraints_to_sql
|
||||
|
||||
from lbry.schema.tags import clean_tags
|
||||
from lbry.schema.mime_types import guess_stream_type
|
||||
from lbry.wallet import Ledger, RegTestLedger
|
||||
from lbry.wallet.transaction import Transaction, Output
|
||||
from lbry.wallet.server.db.canonical import register_canonical_functions
|
||||
from lbry.wallet.server.db.trending import TRENDING_ALGORITHMS
|
||||
|
||||
from .common import CLAIM_TYPES, STREAM_TYPES, COMMON_TAGS, INDEXED_LANGUAGES
|
||||
from lbry.wallet.server.db.elasticsearch import SearchIndex
|
||||
|
||||
ATTRIBUTE_ARRAY_MAX_LENGTH = 100
|
||||
sqlite3.enable_callback_tracebacks(True)
|
||||
|
||||
|
||||
class SQLDB:
|
||||
|
||||
PRAGMAS = """
|
||||
pragma journal_mode=WAL;
|
||||
"""
|
||||
|
||||
CREATE_CLAIM_TABLE = """
|
||||
create table if not exists claim (
|
||||
claim_hash bytes primary key,
|
||||
claim_id text not null,
|
||||
claim_name text not null,
|
||||
normalized text not null,
|
||||
txo_hash bytes not null,
|
||||
tx_position integer not null,
|
||||
amount integer not null,
|
||||
timestamp integer not null, -- last updated timestamp
|
||||
creation_timestamp integer not null,
|
||||
height integer not null, -- last updated height
|
||||
creation_height integer not null,
|
||||
activation_height integer,
|
||||
expiration_height integer not null,
|
||||
release_time integer not null,
|
||||
|
||||
short_url text not null, -- normalized#shortest-unique-claim_id
|
||||
canonical_url text, -- channel's-short_url/normalized#shortest-unique-claim_id-within-channel
|
||||
|
||||
title text,
|
||||
author text,
|
||||
description text,
|
||||
|
||||
claim_type integer,
|
||||
has_source bool,
|
||||
reposted integer default 0,
|
||||
|
||||
-- streams
|
||||
stream_type text,
|
||||
media_type text,
|
||||
fee_amount integer default 0,
|
||||
fee_currency text,
|
||||
duration integer,
|
||||
|
||||
-- reposts
|
||||
reposted_claim_hash bytes,
|
||||
|
||||
-- claims which are channels
|
||||
public_key_bytes bytes,
|
||||
public_key_hash bytes,
|
||||
claims_in_channel integer,
|
||||
|
||||
-- claims which are inside channels
|
||||
channel_hash bytes,
|
||||
channel_join integer, -- height at which claim got valid signature / joined channel
|
||||
signature bytes,
|
||||
signature_digest bytes,
|
||||
signature_valid bool,
|
||||
|
||||
effective_amount integer not null default 0,
|
||||
support_amount integer not null default 0,
|
||||
trending_group integer not null default 0,
|
||||
trending_mixed integer not null default 0,
|
||||
trending_local integer not null default 0,
|
||||
trending_global integer not null default 0
|
||||
);
|
||||
|
||||
create index if not exists claim_normalized_idx on claim (normalized, activation_height);
|
||||
create index if not exists claim_channel_hash_idx on claim (channel_hash, signature, claim_hash);
|
||||
create index if not exists claim_claims_in_channel_idx on claim (signature_valid, channel_hash, normalized);
|
||||
create index if not exists claim_txo_hash_idx on claim (txo_hash);
|
||||
create index if not exists claim_activation_height_idx on claim (activation_height, claim_hash);
|
||||
create index if not exists claim_expiration_height_idx on claim (expiration_height);
|
||||
create index if not exists claim_reposted_claim_hash_idx on claim (reposted_claim_hash);
|
||||
"""
|
||||
|
||||
CREATE_SUPPORT_TABLE = """
|
||||
create table if not exists support (
|
||||
txo_hash bytes primary key,
|
||||
tx_position integer not null,
|
||||
height integer not null,
|
||||
claim_hash bytes not null,
|
||||
amount integer not null
|
||||
);
|
||||
create index if not exists support_claim_hash_idx on support (claim_hash, height);
|
||||
"""
|
||||
|
||||
CREATE_TAG_TABLE = """
|
||||
create table if not exists tag (
|
||||
tag text not null,
|
||||
claim_hash bytes not null,
|
||||
height integer not null
|
||||
);
|
||||
create unique index if not exists tag_claim_hash_tag_idx on tag (claim_hash, tag);
|
||||
"""
|
||||
|
||||
CREATE_LANGUAGE_TABLE = """
|
||||
create table if not exists language (
|
||||
language text not null,
|
||||
claim_hash bytes not null,
|
||||
height integer not null
|
||||
);
|
||||
create unique index if not exists language_claim_hash_language_idx on language (claim_hash, language);
|
||||
"""
|
||||
|
||||
CREATE_CLAIMTRIE_TABLE = """
|
||||
create table if not exists claimtrie (
|
||||
normalized text primary key,
|
||||
claim_hash bytes not null,
|
||||
last_take_over_height integer not null
|
||||
);
|
||||
create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash);
|
||||
"""
|
||||
|
||||
CREATE_CHANGELOG_TRIGGER = """
|
||||
create table if not exists changelog (
|
||||
claim_hash bytes primary key
|
||||
);
|
||||
create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash);
|
||||
create trigger if not exists claim_changelog after update on claim
|
||||
begin
|
||||
insert or ignore into changelog (claim_hash) values (new.claim_hash);
|
||||
end;
|
||||
create trigger if not exists claimtrie_changelog after update on claimtrie
|
||||
begin
|
||||
insert or ignore into changelog (claim_hash) values (new.claim_hash);
|
||||
insert or ignore into changelog (claim_hash) values (old.claim_hash);
|
||||
end;
|
||||
"""
|
||||
|
||||
SEARCH_INDEXES = """
|
||||
-- used by any tag clouds
|
||||
create index if not exists tag_tag_idx on tag (tag, claim_hash);
|
||||
|
||||
-- naked order bys (no filters)
|
||||
create unique index if not exists claim_release_idx on claim (release_time, claim_hash);
|
||||
create unique index if not exists claim_trending_idx on claim (trending_group, trending_mixed, claim_hash);
|
||||
create unique index if not exists claim_effective_amount_idx on claim (effective_amount, claim_hash);
|
||||
|
||||
-- claim_type filter + order by
|
||||
create unique index if not exists claim_type_release_idx on claim (release_time, claim_type, claim_hash);
|
||||
create unique index if not exists claim_type_trending_idx on claim (trending_group, trending_mixed, claim_type, claim_hash);
|
||||
create unique index if not exists claim_type_effective_amount_idx on claim (effective_amount, claim_type, claim_hash);
|
||||
|
||||
-- stream_type filter + order by
|
||||
create unique index if not exists stream_type_release_idx on claim (stream_type, release_time, claim_hash);
|
||||
create unique index if not exists stream_type_trending_idx on claim (stream_type, trending_group, trending_mixed, claim_hash);
|
||||
create unique index if not exists stream_type_effective_amount_idx on claim (stream_type, effective_amount, claim_hash);
|
||||
|
||||
-- channel_hash filter + order by
|
||||
create unique index if not exists channel_hash_release_idx on claim (channel_hash, release_time, claim_hash);
|
||||
create unique index if not exists channel_hash_trending_idx on claim (channel_hash, trending_group, trending_mixed, claim_hash);
|
||||
create unique index if not exists channel_hash_effective_amount_idx on claim (channel_hash, effective_amount, claim_hash);
|
||||
|
||||
-- duration filter + order by
|
||||
create unique index if not exists duration_release_idx on claim (duration, release_time, claim_hash);
|
||||
create unique index if not exists duration_trending_idx on claim (duration, trending_group, trending_mixed, claim_hash);
|
||||
create unique index if not exists duration_effective_amount_idx on claim (duration, effective_amount, claim_hash);
|
||||
|
||||
-- fee_amount + order by
|
||||
create unique index if not exists fee_amount_release_idx on claim (fee_amount, release_time, claim_hash);
|
||||
create unique index if not exists fee_amount_trending_idx on claim (fee_amount, trending_group, trending_mixed, claim_hash);
|
||||
create unique index if not exists fee_amount_effective_amount_idx on claim (fee_amount, effective_amount, claim_hash);
|
||||
|
||||
-- TODO: verify that all indexes below are used
|
||||
create index if not exists claim_height_normalized_idx on claim (height, normalized asc);
|
||||
create index if not exists claim_resolve_idx on claim (normalized, claim_id);
|
||||
create index if not exists claim_id_idx on claim (claim_id, claim_hash);
|
||||
create index if not exists claim_timestamp_idx on claim (timestamp);
|
||||
create index if not exists claim_public_key_hash_idx on claim (public_key_hash);
|
||||
create index if not exists claim_signature_valid_idx on claim (signature_valid);
|
||||
"""
|
||||
|
||||
TAG_INDEXES = '\n'.join(
|
||||
f"create unique index if not exists tag_{tag_key}_idx on tag (tag, claim_hash) WHERE tag='{tag_value}';"
|
||||
for tag_value, tag_key in COMMON_TAGS.items()
|
||||
)
|
||||
|
||||
LANGUAGE_INDEXES = '\n'.join(
|
||||
f"create unique index if not exists language_{language}_idx on language (language, claim_hash) WHERE language='{language}';"
|
||||
for language in INDEXED_LANGUAGES
|
||||
)
|
||||
|
||||
CREATE_TABLES_QUERY = (
|
||||
CREATE_CLAIM_TABLE +
|
||||
CREATE_SUPPORT_TABLE +
|
||||
CREATE_CLAIMTRIE_TABLE +
|
||||
CREATE_TAG_TABLE +
|
||||
CREATE_CHANGELOG_TRIGGER +
|
||||
CREATE_LANGUAGE_TABLE
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, main, path: str, blocking_channels: list, filtering_channels: list, trending: list):
|
||||
self.main = main
|
||||
self._db_path = path
|
||||
self.db = None
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.ledger = Ledger if main.coin.NET == 'mainnet' else RegTestLedger
|
||||
self.blocked_streams = None
|
||||
self.blocked_channels = None
|
||||
self.blocking_channel_hashes = {
|
||||
unhexlify(channel_id)[::-1] for channel_id in blocking_channels if channel_id
|
||||
}
|
||||
self.filtered_streams = None
|
||||
self.filtered_channels = None
|
||||
self.filtering_channel_hashes = {
|
||||
unhexlify(channel_id)[::-1] for channel_id in filtering_channels if channel_id
|
||||
}
|
||||
self.trending = trending
|
||||
self.pending_deletes = set()
|
||||
|
||||
def open(self):
|
||||
self.db = sqlite3.connect(self._db_path, isolation_level=None, check_same_thread=False, uri=True)
|
||||
|
||||
def namedtuple_factory(cursor, row):
|
||||
Row = namedtuple('Row', (d[0] for d in cursor.description))
|
||||
return Row(*row)
|
||||
self.db.row_factory = namedtuple_factory
|
||||
self.db.executescript(self.PRAGMAS)
|
||||
self.db.executescript(self.CREATE_TABLES_QUERY)
|
||||
register_canonical_functions(self.db)
|
||||
self.blocked_streams = {}
|
||||
self.blocked_channels = {}
|
||||
self.filtered_streams = {}
|
||||
self.filtered_channels = {}
|
||||
self.update_blocked_and_filtered_claims()
|
||||
for algorithm in self.trending:
|
||||
algorithm.install(self.db)
|
||||
|
||||
def close(self):
|
||||
if self.db is not None:
|
||||
self.db.close()
|
||||
|
||||
def update_blocked_and_filtered_claims(self):
|
||||
self.update_claims_from_channel_hashes(
|
||||
self.blocked_streams, self.blocked_channels, self.blocking_channel_hashes
|
||||
)
|
||||
self.update_claims_from_channel_hashes(
|
||||
self.filtered_streams, self.filtered_channels, self.filtering_channel_hashes
|
||||
)
|
||||
self.filtered_streams.update(self.blocked_streams)
|
||||
self.filtered_channels.update(self.blocked_channels)
|
||||
|
||||
def update_claims_from_channel_hashes(self, shared_streams, shared_channels, channel_hashes):
|
||||
streams, channels = {}, {}
|
||||
if channel_hashes:
|
||||
sql = query(
|
||||
"SELECT repost.channel_hash, repost.reposted_claim_hash, target.claim_type "
|
||||
"FROM claim as repost JOIN claim AS target ON (target.claim_hash=repost.reposted_claim_hash)", **{
|
||||
'repost.reposted_claim_hash__is_not_null': 1,
|
||||
'repost.channel_hash__in': channel_hashes
|
||||
}
|
||||
)
|
||||
for blocked_claim in self.execute(*sql):
|
||||
if blocked_claim.claim_type == CLAIM_TYPES['stream']:
|
||||
streams[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash
|
||||
elif blocked_claim.claim_type == CLAIM_TYPES['channel']:
|
||||
channels[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash
|
||||
shared_streams.clear()
|
||||
shared_streams.update(streams)
|
||||
shared_channels.clear()
|
||||
shared_channels.update(channels)
|
||||
|
||||
@staticmethod
|
||||
def _insert_sql(table: str, data: dict) -> Tuple[str, list]:
|
||||
columns, values = [], []
|
||||
for column, value in data.items():
|
||||
columns.append(column)
|
||||
values.append(value)
|
||||
sql = (
|
||||
f"INSERT INTO {table} ({', '.join(columns)}) "
|
||||
f"VALUES ({', '.join(['?'] * len(values))})"
|
||||
)
|
||||
return sql, values
|
||||
|
||||
@staticmethod
|
||||
def _update_sql(table: str, data: dict, where: str,
|
||||
constraints: Union[list, tuple]) -> Tuple[str, list]:
|
||||
columns, values = [], []
|
||||
for column, value in data.items():
|
||||
columns.append(f"{column} = ?")
|
||||
values.append(value)
|
||||
values.extend(constraints)
|
||||
return f"UPDATE {table} SET {', '.join(columns)} WHERE {where}", values
|
||||
|
||||
@staticmethod
|
||||
def _delete_sql(table: str, constraints: dict) -> Tuple[str, dict]:
|
||||
where, values = constraints_to_sql(constraints)
|
||||
return f"DELETE FROM {table} WHERE {where}", values
|
||||
|
||||
def execute(self, *args):
|
||||
return self.db.execute(*args)
|
||||
|
||||
def executemany(self, *args):
|
||||
return self.db.executemany(*args)
|
||||
|
||||
def begin(self):
|
||||
self.execute('begin;')
|
||||
|
||||
def commit(self):
|
||||
self.execute('commit;')
|
||||
|
||||
def _upsertable_claims(self, txos: List[Output], header, clear_first=False):
|
||||
claim_hashes, claims, tags, languages = set(), [], {}, {}
|
||||
for txo in txos:
|
||||
tx = txo.tx_ref.tx
|
||||
|
||||
try:
|
||||
assert txo.claim_name
|
||||
assert txo.normalized_name
|
||||
except:
|
||||
#self.logger.exception(f"Could not decode claim name for {tx.id}:{txo.position}.")
|
||||
continue
|
||||
|
||||
language = 'none'
|
||||
try:
|
||||
if txo.claim.is_stream and txo.claim.stream.languages:
|
||||
language = txo.claim.stream.languages[0].language
|
||||
except:
|
||||
pass
|
||||
|
||||
claim_hash = txo.claim_hash
|
||||
claim_hashes.add(claim_hash)
|
||||
claim_record = {
|
||||
'claim_hash': claim_hash,
|
||||
'claim_id': txo.claim_id,
|
||||
'claim_name': txo.claim_name,
|
||||
'normalized': txo.normalized_name,
|
||||
'txo_hash': txo.ref.hash,
|
||||
'tx_position': tx.position,
|
||||
'amount': txo.amount,
|
||||
'timestamp': header['timestamp'],
|
||||
'height': tx.height,
|
||||
'title': None,
|
||||
'description': None,
|
||||
'author': None,
|
||||
'duration': None,
|
||||
'claim_type': None,
|
||||
'has_source': False,
|
||||
'stream_type': None,
|
||||
'media_type': None,
|
||||
'release_time': None,
|
||||
'fee_currency': None,
|
||||
'fee_amount': 0,
|
||||
'reposted_claim_hash': None
|
||||
}
|
||||
claims.append(claim_record)
|
||||
|
||||
try:
|
||||
claim = txo.claim
|
||||
except:
|
||||
#self.logger.exception(f"Could not parse claim protobuf for {tx.id}:{txo.position}.")
|
||||
continue
|
||||
|
||||
if claim.is_stream:
|
||||
claim_record['claim_type'] = CLAIM_TYPES['stream']
|
||||
claim_record['has_source'] = claim.stream.has_source
|
||||
claim_record['media_type'] = claim.stream.source.media_type
|
||||
claim_record['stream_type'] = STREAM_TYPES[guess_stream_type(claim_record['media_type'])]
|
||||
claim_record['title'] = claim.stream.title
|
||||
claim_record['description'] = claim.stream.description
|
||||
claim_record['author'] = claim.stream.author
|
||||
if claim.stream.video and claim.stream.video.duration:
|
||||
claim_record['duration'] = claim.stream.video.duration
|
||||
if claim.stream.audio and claim.stream.audio.duration:
|
||||
claim_record['duration'] = claim.stream.audio.duration
|
||||
if claim.stream.release_time:
|
||||
claim_record['release_time'] = claim.stream.release_time
|
||||
if claim.stream.has_fee:
|
||||
fee = claim.stream.fee
|
||||
if isinstance(fee.currency, str):
|
||||
claim_record['fee_currency'] = fee.currency.lower()
|
||||
if isinstance(fee.amount, Decimal):
|
||||
if fee.amount >= 0 and int(fee.amount*1000) < 9223372036854775807:
|
||||
claim_record['fee_amount'] = int(fee.amount*1000)
|
||||
elif claim.is_repost:
|
||||
claim_record['claim_type'] = CLAIM_TYPES['repost']
|
||||
claim_record['reposted_claim_hash'] = claim.repost.reference.claim_hash
|
||||
elif claim.is_channel:
|
||||
claim_record['claim_type'] = CLAIM_TYPES['channel']
|
||||
elif claim.is_collection:
|
||||
claim_record['claim_type'] = CLAIM_TYPES['collection']
|
||||
|
||||
languages[(language, claim_hash)] = (language, claim_hash, tx.height)
|
||||
|
||||
for tag in clean_tags(claim.message.tags):
|
||||
tags[(tag, claim_hash)] = (tag, claim_hash, tx.height)
|
||||
|
||||
if clear_first:
|
||||
self._clear_claim_metadata(claim_hashes)
|
||||
|
||||
if tags:
|
||||
self.executemany(
|
||||
"INSERT OR IGNORE INTO tag (tag, claim_hash, height) VALUES (?, ?, ?)", tags.values()
|
||||
)
|
||||
if languages:
|
||||
self.executemany(
|
||||
"INSERT OR IGNORE INTO language (language, claim_hash, height) VALUES (?, ?, ?)", languages.values()
|
||||
)
|
||||
|
||||
return claims
|
||||
|
||||
def insert_claims(self, txos: List[Output], header):
|
||||
claims = self._upsertable_claims(txos, header)
|
||||
if claims:
|
||||
self.executemany("""
|
||||
INSERT OR REPLACE INTO claim (
|
||||
claim_hash, claim_id, claim_name, normalized, txo_hash, tx_position, amount,
|
||||
claim_type, media_type, stream_type, timestamp, creation_timestamp, has_source,
|
||||
fee_currency, fee_amount, title, description, author, duration, height, reposted_claim_hash,
|
||||
creation_height, release_time, activation_height, expiration_height, short_url)
|
||||
VALUES (
|
||||
:claim_hash, :claim_id, :claim_name, :normalized, :txo_hash, :tx_position, :amount,
|
||||
:claim_type, :media_type, :stream_type, :timestamp, :timestamp, :has_source,
|
||||
:fee_currency, :fee_amount, :title, :description, :author, :duration, :height, :reposted_claim_hash, :height,
|
||||
CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE :timestamp END,
|
||||
CASE WHEN :normalized NOT IN (SELECT normalized FROM claimtrie) THEN :height END,
|
||||
CASE WHEN :height >= 137181 THEN :height+2102400 ELSE :height+262974 END,
|
||||
:claim_name||COALESCE(
|
||||
(SELECT shortest_id(claim_id, :claim_id) FROM claim WHERE normalized = :normalized),
|
||||
'#'||substr(:claim_id, 1, 1)
|
||||
)
|
||||
)""", claims)
|
||||
|
||||
def update_claims(self, txos: List[Output], header):
|
||||
claims = self._upsertable_claims(txos, header, clear_first=True)
|
||||
if claims:
|
||||
self.executemany("""
|
||||
UPDATE claim SET
|
||||
txo_hash=:txo_hash, tx_position=:tx_position, amount=:amount, height=:height,
|
||||
claim_type=:claim_type, media_type=:media_type, stream_type=:stream_type,
|
||||
timestamp=:timestamp, fee_amount=:fee_amount, fee_currency=:fee_currency, has_source=:has_source,
|
||||
title=:title, duration=:duration, description=:description, author=:author, reposted_claim_hash=:reposted_claim_hash,
|
||||
release_time=CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE release_time END
|
||||
WHERE claim_hash=:claim_hash;
|
||||
""", claims)
|
||||
|
||||
def delete_claims(self, claim_hashes: Set[bytes]):
|
||||
""" Deletes claim supports and from claimtrie in case of an abandon. """
|
||||
if claim_hashes:
|
||||
affected_channels = self.execute(*query(
|
||||
"SELECT channel_hash FROM claim", channel_hash__is_not_null=1, claim_hash__in=claim_hashes
|
||||
)).fetchall()
|
||||
for table in ('claim', 'support', 'claimtrie'):
|
||||
self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes}))
|
||||
self._clear_claim_metadata(claim_hashes)
|
||||
return {r.channel_hash for r in affected_channels}
|
||||
return set()
|
||||
|
||||
def delete_claims_above_height(self, height: int):
|
||||
claim_hashes = [x[0] for x in self.execute(
|
||||
"SELECT claim_hash FROM claim WHERE height>?", (height, )
|
||||
).fetchall()]
|
||||
while claim_hashes:
|
||||
batch = set(claim_hashes[:500])
|
||||
claim_hashes = claim_hashes[500:]
|
||||
self.delete_claims(batch)
|
||||
|
||||
def _clear_claim_metadata(self, claim_hashes: Set[bytes]):
|
||||
if claim_hashes:
|
||||
for table in ('tag',): # 'language', 'location', etc
|
||||
self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes}))
|
||||
|
||||
def split_inputs_into_claims_supports_and_other(self, txis):
|
||||
txo_hashes = {txi.txo_ref.hash for txi in txis}
|
||||
claims = self.execute(*query(
|
||||
"SELECT txo_hash, claim_hash, normalized FROM claim", txo_hash__in=txo_hashes
|
||||
)).fetchall()
|
||||
txo_hashes -= {r.txo_hash for r in claims}
|
||||
supports = {}
|
||||
if txo_hashes:
|
||||
supports = self.execute(*query(
|
||||
"SELECT txo_hash, claim_hash FROM support", txo_hash__in=txo_hashes
|
||||
)).fetchall()
|
||||
txo_hashes -= {r.txo_hash for r in supports}
|
||||
return claims, supports, txo_hashes
|
||||
|
||||
def insert_supports(self, txos: List[Output]):
|
||||
supports = []
|
||||
for txo in txos:
|
||||
tx = txo.tx_ref.tx
|
||||
supports.append((
|
||||
txo.ref.hash, tx.position, tx.height,
|
||||
txo.claim_hash, txo.amount
|
||||
))
|
||||
if supports:
|
||||
self.executemany(
|
||||
"INSERT OR IGNORE INTO support ("
|
||||
" txo_hash, tx_position, height, claim_hash, amount"
|
||||
") "
|
||||
"VALUES (?, ?, ?, ?, ?)", supports
|
||||
)
|
||||
|
||||
def delete_supports(self, txo_hashes: Set[bytes]):
|
||||
if txo_hashes:
|
||||
self.execute(*self._delete_sql('support', {'txo_hash__in': txo_hashes}))
|
||||
|
||||
def calculate_reposts(self, txos: List[Output]):
|
||||
targets = set()
|
||||
for txo in txos:
|
||||
try:
|
||||
claim = txo.claim
|
||||
except:
|
||||
continue
|
||||
if claim.is_repost:
|
||||
targets.add((claim.repost.reference.claim_hash,))
|
||||
if targets:
|
||||
self.executemany(
|
||||
"""
|
||||
UPDATE claim SET reposted = (
|
||||
SELECT count(*) FROM claim AS repost WHERE repost.reposted_claim_hash = claim.claim_hash
|
||||
)
|
||||
WHERE claim_hash = ?
|
||||
""", targets
|
||||
)
|
||||
return {target[0] for target in targets}
|
||||
|
||||
def validate_channel_signatures(self, height, new_claims, updated_claims, spent_claims, affected_channels, timer):
|
||||
if not new_claims and not updated_claims and not spent_claims:
|
||||
return
|
||||
|
||||
sub_timer = timer.add_timer('segregate channels and signables')
|
||||
sub_timer.start()
|
||||
channels, new_channel_keys, signables = {}, {}, {}
|
||||
for txo in chain(new_claims, updated_claims):
|
||||
try:
|
||||
claim = txo.claim
|
||||
except:
|
||||
continue
|
||||
if claim.is_channel:
|
||||
channels[txo.claim_hash] = txo
|
||||
new_channel_keys[txo.claim_hash] = claim.channel.public_key_bytes
|
||||
else:
|
||||
signables[txo.claim_hash] = txo
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('make list of channels we need to lookup')
|
||||
sub_timer.start()
|
||||
missing_channel_keys = set()
|
||||
for txo in signables.values():
|
||||
claim = txo.claim
|
||||
if claim.is_signed and claim.signing_channel_hash not in new_channel_keys:
|
||||
missing_channel_keys.add(claim.signing_channel_hash)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('lookup missing channels')
|
||||
sub_timer.start()
|
||||
all_channel_keys = {}
|
||||
if new_channel_keys or missing_channel_keys or affected_channels:
|
||||
all_channel_keys = dict(self.execute(*query(
|
||||
"SELECT claim_hash, public_key_bytes FROM claim",
|
||||
claim_hash__in=set(new_channel_keys) | missing_channel_keys | affected_channels
|
||||
)))
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('prepare for updating claims')
|
||||
sub_timer.start()
|
||||
changed_channel_keys = {}
|
||||
for claim_hash, new_key in new_channel_keys.items():
|
||||
if claim_hash not in all_channel_keys or all_channel_keys[claim_hash] != new_key:
|
||||
all_channel_keys[claim_hash] = new_key
|
||||
changed_channel_keys[claim_hash] = new_key
|
||||
|
||||
claim_updates = []
|
||||
|
||||
for claim_hash, txo in signables.items():
|
||||
claim = txo.claim
|
||||
update = {
|
||||
'claim_hash': claim_hash,
|
||||
'channel_hash': None,
|
||||
'signature': None,
|
||||
'signature_digest': None,
|
||||
'signature_valid': None
|
||||
}
|
||||
if claim.is_signed:
|
||||
update.update({
|
||||
'channel_hash': claim.signing_channel_hash,
|
||||
'signature': txo.get_encoded_signature(),
|
||||
'signature_digest': txo.get_signature_digest(self.ledger),
|
||||
'signature_valid': 0
|
||||
})
|
||||
claim_updates.append(update)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('find claims affected by a change in channel key')
|
||||
sub_timer.start()
|
||||
if changed_channel_keys:
|
||||
sql = f"""
|
||||
SELECT * FROM claim WHERE
|
||||
channel_hash IN ({','.join('?' for _ in changed_channel_keys)}) AND
|
||||
signature IS NOT NULL
|
||||
"""
|
||||
for affected_claim in self.execute(sql, list(changed_channel_keys.keys())):
|
||||
if affected_claim.claim_hash not in signables:
|
||||
claim_updates.append({
|
||||
'claim_hash': affected_claim.claim_hash,
|
||||
'channel_hash': affected_claim.channel_hash,
|
||||
'signature': affected_claim.signature,
|
||||
'signature_digest': affected_claim.signature_digest,
|
||||
'signature_valid': 0
|
||||
})
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('verify signatures')
|
||||
sub_timer.start()
|
||||
for update in claim_updates:
|
||||
channel_pub_key = all_channel_keys.get(update['channel_hash'])
|
||||
if channel_pub_key and update['signature']:
|
||||
update['signature_valid'] = Output.is_signature_valid(
|
||||
bytes(update['signature']), bytes(update['signature_digest']), channel_pub_key
|
||||
)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('update claims')
|
||||
sub_timer.start()
|
||||
if claim_updates:
|
||||
self.executemany(f"""
|
||||
UPDATE claim SET
|
||||
channel_hash=:channel_hash, signature=:signature, signature_digest=:signature_digest,
|
||||
signature_valid=:signature_valid,
|
||||
channel_join=CASE
|
||||
WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN channel_join
|
||||
WHEN :signature_valid=1 THEN {height}
|
||||
END,
|
||||
canonical_url=CASE
|
||||
WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN canonical_url
|
||||
WHEN :signature_valid=1 THEN
|
||||
(SELECT short_url FROM claim WHERE claim_hash=:channel_hash)||'/'||
|
||||
claim_name||COALESCE(
|
||||
(SELECT shortest_id(other_claim.claim_id, claim.claim_id) FROM claim AS other_claim
|
||||
WHERE other_claim.signature_valid = 1 AND
|
||||
other_claim.channel_hash = :channel_hash AND
|
||||
other_claim.normalized = claim.normalized),
|
||||
'#'||substr(claim_id, 1, 1)
|
||||
)
|
||||
END
|
||||
WHERE claim_hash=:claim_hash;
|
||||
""", claim_updates)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('update claims affected by spent channels')
|
||||
sub_timer.start()
|
||||
if spent_claims:
|
||||
self.execute(
|
||||
f"""
|
||||
UPDATE claim SET
|
||||
signature_valid=CASE WHEN signature IS NOT NULL THEN 0 END,
|
||||
channel_join=NULL, canonical_url=NULL
|
||||
WHERE channel_hash IN ({','.join('?' for _ in spent_claims)})
|
||||
""", list(spent_claims)
|
||||
)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('update channels')
|
||||
sub_timer.start()
|
||||
if channels:
|
||||
self.executemany(
|
||||
"""
|
||||
UPDATE claim SET
|
||||
public_key_bytes=:public_key_bytes,
|
||||
public_key_hash=:public_key_hash
|
||||
WHERE claim_hash=:claim_hash""", [{
|
||||
'claim_hash': claim_hash,
|
||||
'public_key_bytes': txo.claim.channel.public_key_bytes,
|
||||
'public_key_hash': self.ledger.address_to_hash160(
|
||||
self.ledger.public_key_to_address(txo.claim.channel.public_key_bytes)
|
||||
)
|
||||
} for claim_hash, txo in channels.items()]
|
||||
)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('update claims_in_channel counts')
|
||||
sub_timer.start()
|
||||
if all_channel_keys:
|
||||
self.executemany(f"""
|
||||
UPDATE claim SET
|
||||
claims_in_channel=(
|
||||
SELECT COUNT(*) FROM claim AS claim_in_channel
|
||||
WHERE claim_in_channel.signature_valid=1 AND
|
||||
claim_in_channel.channel_hash=claim.claim_hash
|
||||
)
|
||||
WHERE claim_hash = ?
|
||||
""", [(channel_hash,) for channel_hash in all_channel_keys])
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('update blocked claims list')
|
||||
sub_timer.start()
|
||||
if (self.blocking_channel_hashes.intersection(all_channel_keys) or
|
||||
self.filtering_channel_hashes.intersection(all_channel_keys)):
|
||||
self.update_blocked_and_filtered_claims()
|
||||
sub_timer.stop()
|
||||
|
||||
def _update_support_amount(self, claim_hashes):
|
||||
if claim_hashes:
|
||||
self.execute(f"""
|
||||
UPDATE claim SET
|
||||
support_amount = COALESCE(
|
||||
(SELECT SUM(amount) FROM support WHERE support.claim_hash=claim.claim_hash), 0
|
||||
)
|
||||
WHERE claim_hash IN ({','.join('?' for _ in claim_hashes)})
|
||||
""", claim_hashes)
|
||||
|
||||
def _update_effective_amount(self, height, claim_hashes=None):
|
||||
self.execute(
|
||||
f"UPDATE claim SET effective_amount = amount + support_amount "
|
||||
f"WHERE activation_height = {height}"
|
||||
)
|
||||
if claim_hashes:
|
||||
self.execute(
|
||||
f"UPDATE claim SET effective_amount = amount + support_amount "
|
||||
f"WHERE activation_height < {height} "
|
||||
f" AND claim_hash IN ({','.join('?' for _ in claim_hashes)})",
|
||||
claim_hashes
|
||||
)
|
||||
|
||||
def _calculate_activation_height(self, height):
|
||||
last_take_over_height = f"""COALESCE(
|
||||
(SELECT last_take_over_height FROM claimtrie
|
||||
WHERE claimtrie.normalized=claim.normalized),
|
||||
{height}
|
||||
)
|
||||
"""
|
||||
self.execute(f"""
|
||||
UPDATE claim SET activation_height =
|
||||
{height} + min(4032, cast(({height} - {last_take_over_height}) / 32 AS INT))
|
||||
WHERE activation_height IS NULL
|
||||
""")
|
||||
|
||||
def _perform_overtake(self, height, changed_claim_hashes, deleted_names):
|
||||
deleted_names_sql = claim_hashes_sql = ""
|
||||
if changed_claim_hashes:
|
||||
claim_hashes_sql = f"OR claim_hash IN ({','.join('?' for _ in changed_claim_hashes)})"
|
||||
if deleted_names:
|
||||
deleted_names_sql = f"OR normalized IN ({','.join('?' for _ in deleted_names)})"
|
||||
overtakes = self.execute(f"""
|
||||
SELECT winner.normalized, winner.claim_hash,
|
||||
claimtrie.claim_hash AS current_winner,
|
||||
MAX(winner.effective_amount) AS max_winner_effective_amount
|
||||
FROM (
|
||||
SELECT normalized, claim_hash, effective_amount FROM claim
|
||||
WHERE normalized IN (
|
||||
SELECT normalized FROM claim WHERE activation_height={height} {claim_hashes_sql}
|
||||
) {deleted_names_sql}
|
||||
ORDER BY effective_amount DESC, height ASC, tx_position ASC
|
||||
) AS winner LEFT JOIN claimtrie USING (normalized)
|
||||
GROUP BY winner.normalized
|
||||
HAVING current_winner IS NULL OR current_winner <> winner.claim_hash
|
||||
""", list(changed_claim_hashes)+deleted_names)
|
||||
for overtake in overtakes:
|
||||
if overtake.current_winner:
|
||||
self.execute(
|
||||
f"UPDATE claimtrie SET claim_hash = ?, last_take_over_height = {height} "
|
||||
f"WHERE normalized = ?",
|
||||
(overtake.claim_hash, overtake.normalized)
|
||||
)
|
||||
else:
|
||||
self.execute(
|
||||
f"INSERT INTO claimtrie (claim_hash, normalized, last_take_over_height) "
|
||||
f"VALUES (?, ?, {height})",
|
||||
(overtake.claim_hash, overtake.normalized)
|
||||
)
|
||||
self.execute(
|
||||
f"UPDATE claim SET activation_height = {height} WHERE normalized = ? "
|
||||
f"AND (activation_height IS NULL OR activation_height > {height})",
|
||||
(overtake.normalized,)
|
||||
)
|
||||
|
||||
def _copy(self, height):
|
||||
if height > 50:
|
||||
self.execute(f"DROP TABLE claimtrie{height-50}")
|
||||
self.execute(f"CREATE TABLE claimtrie{height} AS SELECT * FROM claimtrie")
|
||||
|
||||
def update_claimtrie(self, height, changed_claim_hashes, deleted_names, timer):
|
||||
r = timer.run
|
||||
binary_claim_hashes = list(changed_claim_hashes)
|
||||
|
||||
r(self._calculate_activation_height, height)
|
||||
r(self._update_support_amount, binary_claim_hashes)
|
||||
|
||||
r(self._update_effective_amount, height, binary_claim_hashes)
|
||||
r(self._perform_overtake, height, binary_claim_hashes, list(deleted_names))
|
||||
|
||||
r(self._update_effective_amount, height)
|
||||
r(self._perform_overtake, height, [], [])
|
||||
|
||||
def get_expiring(self, height):
|
||||
return self.execute(
|
||||
f"SELECT claim_hash, normalized FROM claim WHERE expiration_height = {height}"
|
||||
)
|
||||
|
||||
def enqueue_changes(self):
|
||||
query = """
|
||||
SELECT claimtrie.claim_hash as is_controlling,
|
||||
claimtrie.last_take_over_height,
|
||||
(select group_concat(tag, ',,') from tag where tag.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as tags,
|
||||
(select group_concat(language, ' ') from language where language.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as languages,
|
||||
cr.has_source as reposted_has_source,
|
||||
cr.claim_type as reposted_claim_type,
|
||||
cr.stream_type as reposted_stream_type,
|
||||
cr.media_type as reposted_media_type,
|
||||
cr.duration as reposted_duration,
|
||||
cr.fee_amount as reposted_fee_amount,
|
||||
cr.fee_currency as reposted_fee_currency,
|
||||
claim.*
|
||||
FROM claim LEFT JOIN claimtrie USING (claim_hash) LEFT JOIN claim cr ON cr.claim_hash=claim.reposted_claim_hash
|
||||
WHERE claim.claim_hash in (SELECT claim_hash FROM changelog)
|
||||
"""
|
||||
for claim in self.execute(query):
|
||||
claim = claim._asdict()
|
||||
id_set = set(filter(None, (claim['claim_hash'], claim['channel_hash'], claim['reposted_claim_hash'])))
|
||||
claim['censor_type'] = 0
|
||||
censoring_channel_hash = None
|
||||
claim['has_source'] = bool(claim.pop('reposted_has_source') or claim['has_source'])
|
||||
claim['stream_type'] = claim.pop('reposted_stream_type') or claim['stream_type']
|
||||
claim['media_type'] = claim.pop('reposted_media_type') or claim['media_type']
|
||||
claim['fee_amount'] = claim.pop('reposted_fee_amount') or claim['fee_amount']
|
||||
claim['fee_currency'] = claim.pop('reposted_fee_currency') or claim['fee_currency']
|
||||
claim['duration'] = claim.pop('reposted_duration') or claim['duration']
|
||||
for reason_id in id_set:
|
||||
if reason_id in self.blocked_streams:
|
||||
claim['censor_type'] = 2
|
||||
censoring_channel_hash = self.blocked_streams.get(reason_id)
|
||||
elif reason_id in self.blocked_channels:
|
||||
claim['censor_type'] = 2
|
||||
censoring_channel_hash = self.blocked_channels.get(reason_id)
|
||||
elif reason_id in self.filtered_streams:
|
||||
claim['censor_type'] = 1
|
||||
censoring_channel_hash = self.filtered_streams.get(reason_id)
|
||||
elif reason_id in self.filtered_channels:
|
||||
claim['censor_type'] = 1
|
||||
censoring_channel_hash = self.filtered_channels.get(reason_id)
|
||||
claim['censoring_channel_id'] = censoring_channel_hash[::-1].hex() if censoring_channel_hash else None
|
||||
|
||||
claim['tags'] = claim['tags'].split(',,') if claim['tags'] else []
|
||||
claim['languages'] = claim['languages'].split(' ') if claim['languages'] else []
|
||||
yield 'update', claim
|
||||
|
||||
def clear_changelog(self):
|
||||
self.execute("delete from changelog;")
|
||||
|
||||
def claim_producer(self):
|
||||
while self.pending_deletes:
|
||||
claim_hash = self.pending_deletes.pop()
|
||||
yield 'delete', hexlify(claim_hash[::-1]).decode()
|
||||
for claim in self.enqueue_changes():
|
||||
yield claim
|
||||
self.clear_changelog()
|
||||
|
||||
def advance_txs(self, height, all_txs, header, daemon_height, timer):
|
||||
insert_claims = []
|
||||
update_claims = []
|
||||
update_claim_hashes = set()
|
||||
delete_claim_hashes = self.pending_deletes
|
||||
insert_supports = []
|
||||
delete_support_txo_hashes = set()
|
||||
recalculate_claim_hashes = set() # added/deleted supports, added/updated claim
|
||||
deleted_claim_names = set()
|
||||
delete_others = set()
|
||||
body_timer = timer.add_timer('body')
|
||||
for position, (etx, txid) in enumerate(all_txs):
|
||||
tx = timer.run(
|
||||
Transaction, etx.raw, height=height, position=position
|
||||
)
|
||||
# Inputs
|
||||
spent_claims, spent_supports, spent_others = timer.run(
|
||||
self.split_inputs_into_claims_supports_and_other, tx.inputs
|
||||
)
|
||||
body_timer.start()
|
||||
delete_claim_hashes.update({r.claim_hash for r in spent_claims})
|
||||
deleted_claim_names.update({r.normalized for r in spent_claims})
|
||||
delete_support_txo_hashes.update({r.txo_hash for r in spent_supports})
|
||||
recalculate_claim_hashes.update({r.claim_hash for r in spent_supports})
|
||||
delete_others.update(spent_others)
|
||||
# Outputs
|
||||
for output in tx.outputs:
|
||||
if output.is_support:
|
||||
insert_supports.append(output)
|
||||
recalculate_claim_hashes.add(output.claim_hash)
|
||||
elif output.script.is_claim_name:
|
||||
insert_claims.append(output)
|
||||
recalculate_claim_hashes.add(output.claim_hash)
|
||||
elif output.script.is_update_claim:
|
||||
claim_hash = output.claim_hash
|
||||
update_claims.append(output)
|
||||
recalculate_claim_hashes.add(claim_hash)
|
||||
body_timer.stop()
|
||||
|
||||
skip_update_claim_timer = timer.add_timer('skip update of abandoned claims')
|
||||
skip_update_claim_timer.start()
|
||||
for updated_claim in list(update_claims):
|
||||
if updated_claim.ref.hash in delete_others:
|
||||
update_claims.remove(updated_claim)
|
||||
for updated_claim in update_claims:
|
||||
claim_hash = updated_claim.claim_hash
|
||||
delete_claim_hashes.discard(claim_hash)
|
||||
update_claim_hashes.add(claim_hash)
|
||||
skip_update_claim_timer.stop()
|
||||
|
||||
skip_insert_claim_timer = timer.add_timer('skip insertion of abandoned claims')
|
||||
skip_insert_claim_timer.start()
|
||||
for new_claim in list(insert_claims):
|
||||
if new_claim.ref.hash in delete_others:
|
||||
if new_claim.claim_hash not in update_claim_hashes:
|
||||
insert_claims.remove(new_claim)
|
||||
skip_insert_claim_timer.stop()
|
||||
|
||||
skip_insert_support_timer = timer.add_timer('skip insertion of abandoned supports')
|
||||
skip_insert_support_timer.start()
|
||||
for new_support in list(insert_supports):
|
||||
if new_support.ref.hash in delete_others:
|
||||
insert_supports.remove(new_support)
|
||||
skip_insert_support_timer.stop()
|
||||
|
||||
expire_timer = timer.add_timer('recording expired claims')
|
||||
expire_timer.start()
|
||||
for expired in self.get_expiring(height):
|
||||
delete_claim_hashes.add(expired.claim_hash)
|
||||
deleted_claim_names.add(expired.normalized)
|
||||
expire_timer.stop()
|
||||
|
||||
r = timer.run
|
||||
affected_channels = r(self.delete_claims, delete_claim_hashes)
|
||||
r(self.delete_supports, delete_support_txo_hashes)
|
||||
r(self.insert_claims, insert_claims, header)
|
||||
r(self.calculate_reposts, insert_claims)
|
||||
r(self.update_claims, update_claims, header)
|
||||
r(self.validate_channel_signatures, height, insert_claims,
|
||||
update_claims, delete_claim_hashes, affected_channels, forward_timer=True)
|
||||
r(self.insert_supports, insert_supports)
|
||||
r(self.update_claimtrie, height, recalculate_claim_hashes, deleted_claim_names, forward_timer=True)
|
||||
for algorithm in self.trending:
|
||||
r(algorithm.run, self.db.cursor(), height, daemon_height, recalculate_claim_hashes)
|
||||
|
||||
|
||||
class LBRYLevelDB(LevelDB):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
path = os.path.join(self.env.db_dir, 'claims.db')
|
||||
trending = []
|
||||
for algorithm_name in self.env.trending_algorithms:
|
||||
if algorithm_name in TRENDING_ALGORITHMS:
|
||||
trending.append(TRENDING_ALGORITHMS[algorithm_name])
|
||||
if self.env.es_mode == 'reader':
|
||||
self.logger.info('Index mode: reader')
|
||||
self.sql = None
|
||||
else:
|
||||
self.logger.info('Index mode: writer. Using SQLite db to sync ES')
|
||||
self.sql = SQLDB(
|
||||
self, path,
|
||||
self.env.default('BLOCKING_CHANNEL_IDS', '').split(' '),
|
||||
self.env.default('FILTERING_CHANNEL_IDS', '').split(' '),
|
||||
trending
|
||||
)
|
||||
|
||||
# Search index
|
||||
self.search_index = SearchIndex(
|
||||
self.env.es_index_prefix, self.env.database_query_timeout, self.env.elastic_host, self.env.elastic_port
|
||||
)
|
||||
|
||||
def close(self):
|
||||
super().close()
|
||||
if self.sql:
|
||||
self.sql.close()
|
||||
|
||||
async def _open_dbs(self, *args, **kwargs):
|
||||
await self.search_index.start()
|
||||
await super()._open_dbs(*args, **kwargs)
|
||||
if self.sql:
|
||||
self.sql.open()
|
|
@ -1,276 +0,0 @@
|
|||
# Copyright (c) 2016, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# See the file "LICENCE" for information about the copyright
|
||||
# and warranty status of this software.
|
||||
|
||||
|
||||
import re
|
||||
import resource
|
||||
from os import environ
|
||||
from collections import namedtuple
|
||||
from ipaddress import ip_address
|
||||
|
||||
from lbry.wallet.server.util import class_logger
|
||||
from lbry.wallet.server.coin import Coin
|
||||
import lbry.wallet.server.util as lib_util
|
||||
|
||||
|
||||
NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix')
|
||||
|
||||
|
||||
class Env:
|
||||
|
||||
# Peer discovery
|
||||
PD_OFF, PD_SELF, PD_ON = range(3)
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
def __init__(self, coin=None):
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.allow_root = self.boolean('ALLOW_ROOT', False)
|
||||
self.host = self.default('HOST', 'localhost')
|
||||
self.rpc_host = self.default('RPC_HOST', 'localhost')
|
||||
self.elastic_host = self.default('ELASTIC_HOST', 'localhost')
|
||||
self.elastic_port = self.integer('ELASTIC_PORT', 9200)
|
||||
self.loop_policy = self.set_event_loop_policy()
|
||||
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
|
||||
self.db_dir = self.required('DB_DIRECTORY')
|
||||
self.db_engine = self.default('DB_ENGINE', 'leveldb')
|
||||
self.trending_algorithms = [
|
||||
trending for trending in set(self.default('TRENDING_ALGORITHMS', 'zscore').split(' ')) if trending
|
||||
]
|
||||
self.max_query_workers = self.integer('MAX_QUERY_WORKERS', None)
|
||||
self.individual_tag_indexes = self.boolean('INDIVIDUAL_TAG_INDEXES', True)
|
||||
self.track_metrics = self.boolean('TRACK_METRICS', False)
|
||||
self.websocket_host = self.default('WEBSOCKET_HOST', self.host)
|
||||
self.websocket_port = self.integer('WEBSOCKET_PORT', None)
|
||||
self.daemon_url = self.required('DAEMON_URL')
|
||||
if coin is not None:
|
||||
assert issubclass(coin, Coin)
|
||||
self.coin = coin
|
||||
else:
|
||||
coin_name = self.required('COIN').strip()
|
||||
network = self.default('NET', 'mainnet').strip()
|
||||
self.coin = Coin.lookup_coin_class(coin_name, network)
|
||||
self.es_index_prefix = self.default('ES_INDEX_PREFIX', '')
|
||||
self.es_mode = self.default('ES_MODE', 'writer')
|
||||
self.cache_MB = self.integer('CACHE_MB', 1200)
|
||||
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
|
||||
# Server stuff
|
||||
self.tcp_port = self.integer('TCP_PORT', None)
|
||||
self.udp_port = self.integer('UDP_PORT', self.tcp_port)
|
||||
self.ssl_port = self.integer('SSL_PORT', None)
|
||||
if self.ssl_port:
|
||||
self.ssl_certfile = self.required('SSL_CERTFILE')
|
||||
self.ssl_keyfile = self.required('SSL_KEYFILE')
|
||||
self.rpc_port = self.integer('RPC_PORT', 8000)
|
||||
self.prometheus_port = self.integer('PROMETHEUS_PORT', 0)
|
||||
self.max_subscriptions = self.integer('MAX_SUBSCRIPTIONS', 10000)
|
||||
self.banner_file = self.default('BANNER_FILE', None)
|
||||
self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file)
|
||||
self.anon_logs = self.boolean('ANON_LOGS', False)
|
||||
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
|
||||
self.allow_lan_udp = self.boolean('ALLOW_LAN_UDP', False)
|
||||
self.country = self.default('COUNTRY', 'US')
|
||||
# Peer discovery
|
||||
self.peer_discovery = self.peer_discovery_enum()
|
||||
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
|
||||
self.peer_hubs = self.extract_peer_hubs()
|
||||
self.force_proxy = self.boolean('FORCE_PROXY', False)
|
||||
self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
|
||||
self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
|
||||
# The electrum client takes the empty string as unspecified
|
||||
self.payment_address = self.default('PAYMENT_ADDRESS', '')
|
||||
self.donation_address = self.default('DONATION_ADDRESS', '')
|
||||
# Server limits to help prevent DoS
|
||||
self.max_send = self.integer('MAX_SEND', 1000000)
|
||||
self.max_receive = self.integer('MAX_RECEIVE', 1000000)
|
||||
self.max_subs = self.integer('MAX_SUBS', 250000)
|
||||
self.max_sessions = self.sane_max_sessions()
|
||||
self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
|
||||
self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
|
||||
self.drop_client = self.custom("DROP_CLIENT", None, re.compile)
|
||||
self.description = self.default('DESCRIPTION', '')
|
||||
self.daily_fee = self.string_amount('DAILY_FEE', '0')
|
||||
|
||||
# Identities
|
||||
clearnet_identity = self.clearnet_identity()
|
||||
tor_identity = self.tor_identity(clearnet_identity)
|
||||
self.identities = [identity
|
||||
for identity in (clearnet_identity, tor_identity)
|
||||
if identity is not None]
|
||||
self.database_query_timeout = float(self.integer('QUERY_TIMEOUT_MS', 3000)) / 1000.0
|
||||
|
||||
@classmethod
|
||||
def default(cls, envvar, default):
|
||||
return environ.get(envvar, default)
|
||||
|
||||
@classmethod
|
||||
def boolean(cls, envvar, default):
|
||||
default = 'Yes' if default else ''
|
||||
return bool(cls.default(envvar, default).strip())
|
||||
|
||||
@classmethod
|
||||
def required(cls, envvar):
|
||||
value = environ.get(envvar)
|
||||
if value is None:
|
||||
raise cls.Error(f'required envvar {envvar} not set')
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def string_amount(cls, envvar, default):
|
||||
value = environ.get(envvar, default)
|
||||
amount_pattern = re.compile("[0-9]{0,10}(\.[0-9]{1,8})?")
|
||||
if len(value) > 0 and not amount_pattern.fullmatch(value):
|
||||
raise cls.Error(f'{value} is not a valid amount for {envvar}')
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def integer(cls, envvar, default):
|
||||
value = environ.get(envvar)
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
return int(value)
|
||||
except Exception:
|
||||
raise cls.Error(f'cannot convert envvar {envvar} value {value} to an integer')
|
||||
|
||||
@classmethod
|
||||
def custom(cls, envvar, default, parse):
|
||||
value = environ.get(envvar)
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
return parse(value)
|
||||
except Exception as e:
|
||||
raise cls.Error(f'cannot parse envvar {envvar} value {value}') from e
|
||||
|
||||
@classmethod
|
||||
def obsolete(cls, envvars):
|
||||
bad = [envvar for envvar in envvars if environ.get(envvar)]
|
||||
if bad:
|
||||
raise cls.Error(f'remove obsolete environment variables {bad}')
|
||||
|
||||
def set_event_loop_policy(self):
|
||||
policy_name = self.default('EVENT_LOOP_POLICY', None)
|
||||
if not policy_name:
|
||||
import asyncio
|
||||
return asyncio.get_event_loop_policy()
|
||||
elif policy_name == 'uvloop':
|
||||
import uvloop
|
||||
import asyncio
|
||||
loop_policy = uvloop.EventLoopPolicy()
|
||||
asyncio.set_event_loop_policy(loop_policy)
|
||||
return loop_policy
|
||||
raise self.Error(f'unknown event loop policy "{policy_name}"')
|
||||
|
||||
def cs_host(self, *, for_rpc):
|
||||
"""Returns the 'host' argument to pass to asyncio's create_server
|
||||
call. The result can be a single host name string, a list of
|
||||
host name strings, or an empty string to bind to all interfaces.
|
||||
|
||||
If rpc is True the host to use for the RPC server is returned.
|
||||
Otherwise the host to use for SSL/TCP servers is returned.
|
||||
"""
|
||||
host = self.rpc_host if for_rpc else self.host
|
||||
result = [part.strip() for part in host.split(',')]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
# An empty result indicates all interfaces, which we do not
|
||||
# permitted for an RPC server.
|
||||
if for_rpc and not result:
|
||||
result = 'localhost'
|
||||
if result == 'localhost':
|
||||
# 'localhost' resolves to ::1 (ipv6) on many systems, which fails on default setup of
|
||||
# docker, using 127.0.0.1 instead forces ipv4
|
||||
result = '127.0.0.1'
|
||||
return result
|
||||
|
||||
def sane_max_sessions(self):
|
||||
"""Return the maximum number of sessions to permit. Normally this
|
||||
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
|
||||
downwards if running with a small open file rlimit."""
|
||||
env_value = self.integer('MAX_SESSIONS', 1000)
|
||||
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
|
||||
# We give the DB 250 files; allow ElectrumX 100 for itself
|
||||
value = max(0, min(env_value, nofile_limit - 350))
|
||||
if value < env_value:
|
||||
self.logger.warning(f'lowered maximum sessions from {env_value:,d} to {value:,d} '
|
||||
f'because your open file limit is {nofile_limit:,d}')
|
||||
return value
|
||||
|
||||
def clearnet_identity(self):
|
||||
host = self.default('REPORT_HOST', None)
|
||||
if host is None:
|
||||
return None
|
||||
try:
|
||||
ip = ip_address(host)
|
||||
except ValueError:
|
||||
bad = (not lib_util.is_valid_hostname(host)
|
||||
or host.lower() == 'localhost')
|
||||
else:
|
||||
bad = (ip.is_multicast or ip.is_unspecified
|
||||
or (ip.is_private and self.peer_announce))
|
||||
if bad:
|
||||
raise self.Error(f'"{host}" is not a valid REPORT_HOST')
|
||||
tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port) or None
|
||||
ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port) or None
|
||||
if tcp_port == ssl_port:
|
||||
raise self.Error('REPORT_TCP_PORT and REPORT_SSL_PORT '
|
||||
f'both resolve to {tcp_port}')
|
||||
return NetIdentity(
|
||||
host,
|
||||
tcp_port,
|
||||
ssl_port,
|
||||
''
|
||||
)
|
||||
|
||||
def tor_identity(self, clearnet):
|
||||
host = self.default('REPORT_HOST_TOR', None)
|
||||
if host is None:
|
||||
return None
|
||||
if not host.endswith('.onion'):
|
||||
raise self.Error(f'tor host "{host}" must end with ".onion"')
|
||||
|
||||
def port(port_kind):
|
||||
"""Returns the clearnet identity port, if any and not zero,
|
||||
otherwise the listening port."""
|
||||
result = 0
|
||||
if clearnet:
|
||||
result = getattr(clearnet, port_kind)
|
||||
return result or getattr(self, port_kind)
|
||||
|
||||
tcp_port = self.integer('REPORT_TCP_PORT_TOR',
|
||||
port('tcp_port')) or None
|
||||
ssl_port = self.integer('REPORT_SSL_PORT_TOR',
|
||||
port('ssl_port')) or None
|
||||
if tcp_port == ssl_port:
|
||||
raise self.Error('REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR '
|
||||
f'both resolve to {tcp_port}')
|
||||
|
||||
return NetIdentity(
|
||||
host,
|
||||
tcp_port,
|
||||
ssl_port,
|
||||
'_tor',
|
||||
)
|
||||
|
||||
def hosts_dict(self):
|
||||
return {identity.host: {'tcp_port': identity.tcp_port,
|
||||
'ssl_port': identity.ssl_port}
|
||||
for identity in self.identities}
|
||||
|
||||
def peer_discovery_enum(self):
|
||||
pd = self.default('PEER_DISCOVERY', 'on').strip().lower()
|
||||
if pd in ('off', ''):
|
||||
return self.PD_OFF
|
||||
elif pd == 'self':
|
||||
return self.PD_SELF
|
||||
else:
|
||||
return self.PD_ON
|
||||
|
||||
def extract_peer_hubs(self):
|
||||
return [hub.strip() for hub in self.default('PEER_HUBS', '').split(',') if hub.strip()]
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue