Compare commits
1492 commits
Author | SHA1 | Date | |
---|---|---|---|
|
eb5da9511e | ||
|
8722ef840e | ||
|
6e75a1a89b | ||
|
ef3189de1d | ||
|
c2d2080034 | ||
|
d0b5a0a8fd | ||
|
1d0e17be21 | ||
|
4ef03bb1f4 | ||
|
4bd4bcdc27 | ||
|
e5ca967fa2 | ||
|
eed7d02e8b | ||
|
02aecad52b | ||
|
585962d930 | ||
|
ea4fba39a6 | ||
|
7a86406746 | ||
|
c8a3eb97a4 | ||
|
20213628d7 | ||
|
2d1649f972 | ||
|
5cb04b86a0 | ||
|
93ab6b3be3 | ||
|
b9762c3e64 | ||
|
82592d00ef | ||
|
c118174c1a | ||
|
d284acd8b8 | ||
|
235c98372d | ||
|
d2f5073ef4 | ||
|
84e5e43117 | ||
|
7bd025ae54 | ||
|
8f28ce65b0 | ||
|
d36e305129 | ||
|
2609dee8fb | ||
|
a2da86d4b5 | ||
|
aa16c7fee5 | ||
|
3266f72b82 | ||
|
77cd2a3f8a | ||
|
308e586e9a | ||
84beddfd77 | |||
|
6258651650 | ||
|
cc5f0b6630 | ||
|
f64d507d39 | ||
|
001819d5c2 | ||
|
8b4c046d28 | ||
|
2c20ad6c43 | ||
|
9e610cc54c | ||
|
b9d25c6d01 | ||
|
419b5b45f2 | ||
|
516c2dd5d0 | ||
|
b99102f9c9 | ||
|
8c6c7b655c | ||
|
48c6873fc4 | ||
|
15dc52bd9a | ||
|
52d555078f | ||
|
cc976bd010 | ||
|
9cc6992011 | ||
|
a1b87460c5 | ||
|
007e1115c4 | ||
|
20ae51b949 | ||
|
24e9c7b435 | ||
|
b452e76e1d | ||
|
341834c30d | ||
|
12bac730bd | ||
|
1027337833 | ||
|
97fef21f75 | ||
|
9dafd5f69b | ||
|
fd4f0b2049 | ||
|
734f0651a4 | ||
|
94deaf55df | ||
|
d957d46f96 | ||
|
0217aede3d | ||
|
e4e1600f51 | ||
|
d0aad8ccaf | ||
|
ab50cfa5c1 | ||
|
5a26aea398 | ||
|
bd1cebdb4c | ||
|
ec433f069f | ||
|
cd6d3fec9c | ||
|
8c474a69de | ||
|
8903056648 | ||
|
749a92d0e5 | ||
|
a7d7efecc7 | ||
|
c88f0797a3 | ||
|
137ebd503d | ||
|
c3f5dd780e | ||
|
20b1865879 | ||
|
231b982422 | ||
|
fd69401791 | ||
|
718d046833 | ||
|
e10f57d1ed | ||
|
8a033d58df | ||
|
c07c369a28 | ||
|
5be990fc55 | ||
|
8f26010c04 | ||
|
3021962e3d | ||
|
84d89ce5af | ||
|
0961cad716 | ||
|
5c543cb374 | ||
|
f78d7896a5 | ||
|
78a28de2aa | ||
|
45a255e7a2 | ||
|
d2738c2e72 | ||
|
a7c7ab7f7b | ||
|
988f288715 | ||
|
38e9b5b432 | ||
|
f7455600cc | ||
|
c7c2d6fe5a | ||
|
c6c0228970 | ||
|
8d9d2c76ae | ||
|
0b059a5445 | ||
|
ab67f417ee | ||
|
0e7a1aee0a | ||
|
d0497cf6b5 | ||
|
c38573d5de | ||
|
f077e56cec | ||
|
5e58c2f224 | ||
|
cc64789e96 | ||
|
b5c390ca04 | ||
|
da2ffb000e | ||
|
df77392fe0 | ||
|
9aa9ecdc0a | ||
|
43b45a939b | ||
|
e2922a434f | ||
|
0d6125de0b | ||
|
13af7800c2 | ||
|
47a5d37d7c | ||
|
4a3a7e318d | ||
|
85ff487af5 | ||
|
62eb9d5c75 | ||
|
cfe5c8de8a | ||
|
0497698c5b | ||
|
508bdb8e94 | ||
|
cd42f0d726 | ||
|
2706b66a92 | ||
|
29c2d5715d | ||
|
965389b759 | ||
|
174439f517 | ||
|
baf422fc03 | ||
|
61f7fbe230 | ||
|
c6c27925b7 | ||
|
be4c62cf32 | ||
|
443a1c32fa | ||
|
90c2a58470 | ||
|
adc79ec404 | ||
|
137d8ca4ac | ||
|
abf4d888af | ||
|
6c350e57dd | ||
|
fb7a93096e | ||
|
7ea88e7b31 | ||
|
2361e34541 | ||
|
be06378437 | ||
|
a334a93757 | ||
|
e3ee3892b2 | ||
|
d61accea1a | ||
|
e887453aa5 | ||
|
c3e4f0b988 | ||
|
318728aebd | ||
|
d8c1aaebc2 | ||
|
d7b65c15d2 | ||
|
972db80246 | ||
|
0d343ecb2f | ||
|
01cd95fe46 | ||
|
6dc57fc02c | ||
|
10df0c1fba | ||
|
ec751e5add | ||
|
3e3974f813 | ||
|
ec82486e15 | ||
|
e16f6b07b8 | ||
|
9a842c273b | ||
|
40f7d3ee4b | ||
|
1dc2f0458b | ||
|
3924b28cc3 | ||
|
020487b6a0 | ||
|
14037c9b2f | ||
|
0cb37a5c4b | ||
|
fa5f3e7e55 | ||
|
30aa0724ec | ||
|
059890e4e5 | ||
|
9654d4f003 | ||
|
956b52a2c1 | ||
|
2e975c8b61 | ||
|
656e299100 | ||
|
352e45b6b7 | ||
|
a9a1076362 | ||
|
6d370b0a12 | ||
|
c9fac27b66 | ||
|
59bc0b9682 | ||
|
ba60aeeebc | ||
|
dc427ecf6c | ||
|
87b4404767 | ||
|
ba9ac489c3 | ||
|
7049629ad7 | ||
|
3ae4aeea47 | ||
|
8becf1f69f | ||
|
582f79ba1c | ||
|
3c28d869f4 | ||
|
fe61b90610 | ||
|
c04fbb2908 | ||
|
571e71b28e | ||
|
39fcfcccfb | ||
|
2313d30996 | ||
|
ac7e94c6ed | ||
|
a391fe9fc7 | ||
|
ea8adc5367 | ||
|
0ea8ba72dd | ||
|
7a8d5da0e8 | ||
|
da30f003e8 | ||
|
6257948ad7 | ||
|
a7f606d62c | ||
|
1d95eb1549 | ||
|
e5e9873f79 | ||
|
530f9c72ea | ||
|
fad84c771c | ||
|
fe07aac79c | ||
|
91a6eae831 | ||
|
5852fcd287 | ||
|
4767bb9dee | ||
|
82d7f81f41 | ||
|
b036961954 | ||
|
5c708e1c6f | ||
|
9436600267 | ||
|
4ab29c4d5f | ||
|
6944c4a7c4 | ||
|
2735484fae | ||
|
03b0d5e250 | ||
|
629812337b | ||
|
e54cc8850c | ||
|
7cba51ca7d | ||
|
3dc145fe68 | ||
|
7d560df9fd | ||
|
b3f894e480 | ||
|
235cc5dc05 | ||
|
c276053301 | ||
|
2e85e29ef1 | ||
|
1169a02c8b | ||
|
a7cea4082e | ||
|
7e6ea97499 | ||
|
3c46cc4fdd | ||
|
6e5c7a1927 | ||
|
4e09b35012 | ||
|
16a2023bbd | ||
|
99fc7178c1 | ||
|
d4aca89a48 | ||
|
2918d8c7b4 | ||
|
407c570f8b | ||
|
e299a9c159 | ||
|
cc4a578578 | ||
|
0e4f1eae5b | ||
|
eccf0e6234 | ||
|
a3da041412 | ||
|
2f1617eee4 | ||
|
05124d41ae | ||
|
42fd1c962e | ||
|
47e432b4bb | ||
|
61c99abcf1 | ||
|
28fdd62945 | ||
|
3855db6c66 | ||
|
30acde0afc | ||
|
2d9c5742c7 | ||
|
43e50f7f04 | ||
|
888e9918a6 | ||
|
9e9a64d989 | ||
|
7acaecaed2 | ||
|
24eb189b7f | ||
|
2344aca146 | ||
|
758f9deafe | ||
|
7b425eb2ac | ||
|
30e8728f7f | ||
|
3989eef84b | ||
|
dc6f8c4fc4 | ||
|
2df8a1d99d | ||
|
4ea858fdd3 | ||
|
006391dd26 | ||
|
4a0bf8a702 | ||
|
d0e715feb9 | ||
|
fd73412f12 | ||
|
3819552861 | ||
|
ca6fd5b7b9 | ||
|
b8867cd18c | ||
|
8209eafc6b | ||
|
858e72a555 | ||
|
d3880fffa0 | ||
|
0a51898722 | ||
|
63cef81015 | ||
|
9279865078 | ||
|
fba7fc7aba | ||
|
a3d9d5bce7 | ||
|
23ecbc8ebe | ||
|
42b2dbd92e | ||
|
37eb55375a | ||
|
94bf357817 | ||
|
eca69391ef | ||
|
d0c5b32a90 | ||
|
84ef52cf4d | ||
|
8fb14bf713 | ||
|
16eb50a291 | ||
|
dd503fbb82 | ||
|
ae79314869 | ||
|
0cbc514a8e | ||
|
5777f3e15c | ||
|
8cdcd770c0 | ||
|
2d20458bc2 | ||
|
2bd2088248 | ||
|
5818270803 | ||
|
79a5f0e375 | ||
|
c830784f65 | ||
|
3fc538104d | ||
|
96490fdb15 | ||
|
5a0c225c6f | ||
|
c3e524cb8b | ||
|
9faf6e46ca | ||
|
e89acac235 | ||
|
200761ff13 | ||
|
cb78e95e3d | ||
|
f01cf98d62 | ||
|
c9c2495611 | ||
|
aac72fa512 | ||
|
c5e2f19dde | ||
|
34bd9e5cb4 | ||
|
ad489ed606 | ||
|
bb541901d9 | ||
|
ca4ba19a5e | ||
|
f05943ff79 | ||
|
7ded8a1333 | ||
|
c2478d4add | ||
|
f69747bc89 | ||
|
441cc950aa | ||
|
a76a0ac8c4 | ||
|
8b1009161a | ||
|
868a620e91 | ||
|
a0e34b0bc8 | ||
|
612dbcb2f3 | ||
|
b3614d965d | ||
|
5d7137255e | ||
|
6ff867ef55 | ||
|
c14915df29 | ||
|
7d4966e2ae | ||
|
3876e0317d | ||
|
0b2b10f759 | ||
|
9a79b33664 | ||
|
af1a6edd15 | ||
|
b78929f4d5 | ||
|
fb6e342043 | ||
|
0faa2d35da | ||
|
511e57c231 | ||
|
d762d675c4 | ||
|
3fdadee87c | ||
|
1aa4d9d585 | ||
|
8019f4bdb3 | ||
|
ca65c1ebc5 | ||
|
f0e47aae86 | ||
|
dc7cd545ba | ||
|
76bd59d82e | ||
|
461687ffb4 | ||
|
dd5b9ca81b | ||
|
89ed04f8a7 | ||
|
ec0d9f06c5 | ||
|
03b59ac6fc | ||
|
43ac3336d7 | ||
|
d12c78db74 | ||
|
bfaf1b0957 | ||
|
bb60c385d5 | ||
|
c96d1d9c32 | ||
|
7c7a0d4bdf | ||
|
cc829a7bf4 | ||
|
e0ea6383e2 | ||
|
bcec5dc2ae | ||
|
cba9c16a06 | ||
|
dd68fb077b | ||
|
c2294e97db | ||
|
c0f512ace7 | ||
|
3305eb67c6 | ||
|
c9d637b4da | ||
|
ae3e8fadf5 | ||
|
a1abd94387 | ||
|
9b463a8cab | ||
|
babc54a240 | ||
|
5836a93b21 | ||
|
557348e345 | ||
|
9adfec6b00 | ||
|
3a496902f8 | ||
|
b5ead91746 | ||
|
302461b446 | ||
|
ac201c718e | ||
|
f78e3825ca | ||
|
0618053bd4 | ||
|
8e6fa3490c | ||
|
8a1a1a4000 | ||
|
fd9dcbf9a8 | ||
|
beb8583436 | ||
|
b44e2c0b38 | ||
|
06e94640b5 | ||
|
ff36bdc802 | ||
|
46f576de46 | ||
|
7b09c34fce | ||
|
a22f50aa84 | ||
|
2d9130b4e0 | ||
|
470ee72462 | ||
|
add147b409 | ||
|
371df6e6c2 | ||
|
7ed5fe8f66 | ||
|
a6ca7a6f38 | ||
|
1c857b8dd8 | ||
|
87ff3f95ff | ||
|
5cb4c06d0c | ||
|
e7d9079389 | ||
|
9cdcff0e1e | ||
|
a4dce8cf9f | ||
|
aaa11c02bf | ||
|
d2ebbf5db6 | ||
|
e6efc1ad4a | ||
|
a8523996a9 | ||
|
f586de2bbe | ||
|
7df02303b2 | ||
|
f89c75e642 | ||
|
d2c1961101 | ||
|
2a4c5a48bf | ||
|
5f5f39a4aa | ||
|
df54cc04af | ||
|
0439616480 | ||
|
19fa274227 | ||
|
8076000c27 | ||
|
c80b30f070 | ||
|
486d5c48b0 | ||
|
4822792ee2 | ||
|
569f1d42b1 | ||
|
23c10faff5 | ||
|
1eaa195363 | ||
|
fb57cfa5d8 | ||
|
d33086c8f7 | ||
|
d815a6f02c | ||
|
8216f4a873 | ||
|
e4cc4521d9 | ||
|
6bd9b3744d | ||
|
f741b00768 | ||
|
5eb95d7dd4 | ||
|
e5268f43e7 | ||
|
54d6fb9da4 | ||
|
3d5c9cc1c2 | ||
|
442326f1d8 | ||
|
d66f46e07b | ||
|
757b53443d | ||
|
3436965b33 | ||
|
df71132957 | ||
|
1b322dc404 | ||
|
58341f4ff1 | ||
|
0d3ca80008 | ||
|
63437712cd | ||
|
26d0e87f46 | ||
|
2cad4fa1ce | ||
|
7bb293e5d6 | ||
|
e4777f9314 | ||
|
3508f562a7 | ||
|
1aa66c6038 | ||
|
e7458edb72 | ||
|
7f97013703 | ||
|
9e43060d41 | ||
|
d69486fb6e | ||
|
d4ebfdbc3c | ||
|
e00c3db71a | ||
|
11c3ea0b87 | ||
|
7531401623 | ||
|
e6c1dc251e | ||
|
dca7977051 | ||
|
d19e07d661 | ||
|
751ff6e21f | ||
|
3f6fe995b8 | ||
|
1e00fb369d | ||
|
54b522383a | ||
|
90a7de3b5c | ||
|
3fe1582432 | ||
|
85eddd2100 | ||
|
f5f8775c59 | ||
|
0ca98678f7 | ||
|
a19060c08d | ||
|
fa2ad88cc4 | ||
|
63cbcd0956 | ||
|
d6d0ebf8f4 | ||
|
0d810d92ca | ||
|
1ff914a6f4 | ||
|
5959b1be72 | ||
|
d12a214c05 | ||
|
3a83052f2e | ||
|
510b44ca92 | ||
|
15edb6756d | ||
|
fbfd02b08b | ||
|
b39c26fc86 | ||
|
95b2c8d175 | ||
|
d52748b09f | ||
|
34d18a3a9a | ||
|
3b27d6a9b5 | ||
|
703c391f99 | ||
|
4f1dc29df1 | ||
|
13667df374 | ||
|
8800d6985f | ||
|
364b8f2605 | ||
|
67b9ea9deb | ||
|
b78f2336a7 | ||
|
c7ba637c7d | ||
|
23a5ce3df7 | ||
|
8f88e28e50 | ||
|
9cf6139557 | ||
|
d556065a8b | ||
|
951716f7dc | ||
|
1ddc7ddda3 | ||
|
903ed9f3dc | ||
|
c42b76dcb8 | ||
|
a73582d9ae | ||
|
42c4fc7557 | ||
|
ddbbb6f1dd | ||
|
ff21a92330 | ||
|
07f76f7ad1 | ||
|
c90ccffd7b | ||
|
a00d5f18af | ||
|
1e391d211b | ||
|
d87f9672fa | ||
|
2b5838aa01 | ||
|
e10486d6ec | ||
|
1a74d6604d | ||
|
6d118536b6 | ||
|
ca4d758db9 | ||
|
dc18c26aa4 | ||
|
48505c2968 | ||
|
a98ea1e66a | ||
|
3dec697816 | ||
|
88fd41e597 | ||
|
b05d071a1c | ||
|
a27d3b9689 | ||
|
1facc0cd01 | ||
|
837f91d830 | ||
|
9c5f5aefb0 | ||
|
6b8d4a444b | ||
|
6bef09a3b1 | ||
|
e35319e5a2 | ||
|
0e548b3812 | ||
|
bfac02ccab | ||
|
7ea1a2b361 | ||
|
99df418f1d | ||
|
6416d8ce9c | ||
|
22b43a2b01 | ||
|
05e5d24c5e | ||
|
eabcc30367 | ||
|
f5e0ef5223 | ||
|
f46d9330b0 | ||
|
b62a0b4607 | ||
|
1f044321fb | ||
|
a841d49483 | ||
|
9509acc490 | ||
|
02d356ef12 | ||
|
d3516f299e | ||
|
79630767c2 | ||
|
084a76d075 | ||
|
bc6822e397 | ||
|
43432a9e48 | ||
|
d64a5bc12f | ||
|
b2922d18e2 | ||
|
ccf03fc07b | ||
|
a7c45da10c | ||
|
e03f01e24a | ||
|
0939589557 | ||
|
8167af9b4a | ||
|
4cf76123e5 | ||
|
01ee4b23e6 | ||
|
b198f79214 | ||
|
09db868a28 | ||
|
33e8ef75ff | ||
|
11dcb16b14 | ||
|
86f21da28b | ||
|
89cd6a9aa4 | ||
|
18e1256037 | ||
|
02cf478d91 | ||
|
6ec70192fe | ||
|
8c75098a9a | ||
|
72500f6948 | ||
|
37ec9ab464 | ||
|
82fe2a4c8d | ||
|
aa50e6ee66 | ||
|
91a07cfaee | ||
|
709f5e9a65 | ||
|
b2f9ef21cc | ||
|
be6b72edcd | ||
|
ece2d1e78a | ||
|
1ee1a5f2a1 | ||
|
a567326853 | ||
|
6231861dd6 | ||
|
1ff7b77ee0 | ||
|
9365708bb2 | ||
|
d23a0a8589 | ||
|
701b39b043 | ||
|
58ad1f3876 | ||
|
2138e7ea33 | ||
|
32f8c9e59f | ||
|
57028eab39 | ||
|
3a16edd8a6 | ||
|
165f3bb270 | ||
|
0ba75153f3 | ||
|
db2789990f | ||
|
acaf299bcb | ||
|
1940301824 | ||
|
34576e880d | ||
|
65c0668d40 | ||
|
53bd2bcbfe | ||
|
388724fccb | ||
|
231eabb013 | ||
|
54903fc2ea | ||
|
3a1baf0700 | ||
|
0c0e36b6f8 | ||
|
234c03db09 | ||
|
59db5e7889 | ||
|
28aa7da349 | ||
|
c51e344b87 | ||
|
54461dfa75 | ||
|
2d48e93f74 | ||
|
af22646322 | ||
|
722b42a93e | ||
|
8f9e7f77a7 | ||
|
09bb1ba494 | ||
|
d4137428ff | ||
|
b4d6c4f5b7 | ||
|
ffbe59ece5 | ||
|
fab9c90ccb | ||
|
fb1a774bc4 | ||
|
98bc7d1e0e | ||
|
f7622f24b2 | ||
|
f0a195a6d4 | ||
|
180ba27d84 | ||
|
f944671f86 | ||
|
def2903f7d | ||
|
0273a4e839 | ||
|
f8d2f02c5d | ||
|
25147d8897 | ||
|
0fb6f05fba | ||
|
4e4e899356 | ||
|
5a01dbf269 | ||
|
30b923b283 | ||
|
73ba381d20 | ||
|
1a5912877e | ||
|
813e506b68 | ||
|
077ca987f7 | ||
|
c632a7a6a5 | ||
|
e33e767510 | ||
|
ac82617aa9 | ||
|
a35dfd1fd1 | ||
|
c28aae9913 | ||
|
c26a99e65c | ||
|
ca57dcfc2f | ||
|
df5662dd69 | ||
|
8927a4889e | ||
|
1ac7831f3c | ||
|
292d272a94 | ||
|
a6ee8dc66e | ||
|
496f89f184 | ||
|
7a56eff1ac | ||
|
07e182aa16 | ||
|
7de06aa1e0 | ||
|
3955b64405 | ||
|
2bb55d681d | ||
|
f94e6ac527 | ||
|
b344f17b86 | ||
|
677b8cb633 | ||
|
6f3342e09e | ||
|
a1ddd762e0 | ||
|
68474e4057 | ||
|
a84b9ee396 | ||
|
b9c2ee745a | ||
|
c91a47fcaa | ||
|
615e489d8d | ||
|
c68f9f6f16 | ||
|
229cb85a6a | ||
|
e5c22fa665 | ||
|
8bcfff05d7 | ||
|
6416ee8151 | ||
|
f8eceb48e6 | ||
|
310c483bfa | ||
|
a8f20361aa | ||
|
290be69d99 | ||
|
3b96bd7ea0 | ||
|
dc2f22f5fa | ||
|
821be29f41 | ||
|
52ff1a12ff | ||
|
814699ef11 | ||
|
0c30838b25 | ||
|
cf66c2a1ee | ||
|
2ee419ffca | ||
|
bfb9d696d7 | ||
|
bb2a34dd6b | ||
|
ed652c0c56 | ||
|
1dc961d6eb | ||
|
d119fcfc98 | ||
|
4d3573724a | ||
|
8b37a66075 | ||
|
ba4f32075a | ||
|
218be22576 | ||
|
7688293716 | ||
|
458f8533c4 | ||
|
34502752fc | ||
|
d6758fd823 | ||
|
65700e790e | ||
|
7c34e4bb96 | ||
|
d0d6e3563b | ||
|
a2619f8c78 | ||
|
42d07fd2f0 | ||
|
8bea10960f | ||
|
9cbb19c304 | ||
|
1b94dfd712 | ||
|
9f3604d739 | ||
|
4a1b2be269 | ||
|
962dc1b55b | ||
|
07c86502f6 | ||
|
adb188e5d0 | ||
|
ce031dc6b8 | ||
|
18b5f03247 | ||
|
8a555ecf1c | ||
|
1b325b9acd | ||
|
1bdaddb319 | ||
|
7896e177ef | ||
|
ce8e659008 | ||
|
27be5deeb2 | ||
|
515f270c3a | ||
|
ffff3bd334 | ||
|
f493f13b25 | ||
|
e605c14b13 | ||
|
338488f16d | ||
|
2abc67c3e8 | ||
|
eb1ba143ec | ||
|
6f5bca0f67 | ||
|
407cd8dd4b | ||
|
62a4f0fc04 | ||
|
77cde411f1 | ||
|
3eb9d23108 | ||
|
410d4aeb21 | ||
|
0a28d216fd | ||
|
b69faf6920 | ||
|
efb92ea37a | ||
|
e77f9981df | ||
|
d27c2cc1e9 | ||
|
586b19675e | ||
|
f2907536b4 | ||
|
4aa4e35d1c | ||
|
9a11ac06bf | ||
|
aa3b18f848 | ||
|
103bdc151f | ||
|
6d4c1cd879 | ||
|
cacbe30871 | ||
|
bfeeacb230 | ||
|
04bb7b4919 | ||
|
b7df277a5c | ||
|
c681041b48 | ||
|
923834c784 | ||
|
588edf98be | ||
|
28c603ad5f | ||
|
6988a47e02 | ||
|
2c8ceb1217 | ||
|
ccac4ffa24 | ||
|
4258cef9bd | ||
|
62cc6dfe76 | ||
|
9f224a971b | ||
|
cf5dba9157 | ||
|
23035b9aa0 | ||
|
84908ec8ec | ||
|
dade49743b | ||
|
f29bf35c2a | ||
|
dfa6701c43 | ||
|
763ca69a73 | ||
|
6bf3b152bf | ||
|
aa19f85996 | ||
|
156d89567e | ||
|
ecc71baf61 | ||
|
90c743d963 | ||
|
b926293fa7 | ||
|
71a19191f8 | ||
|
38a0f20a33 | ||
|
c35192108c | ||
|
245b564f13 | ||
|
0d8d1ea4f3 | ||
|
27a427a363 | ||
|
2ff028a694 | ||
|
c211338218 | ||
|
8ac89af8bd | ||
|
bbbaf59591 | ||
|
169419896f | ||
|
0543dca502 | ||
|
cc6011d57a | ||
|
fc4407ef7e | ||
|
03735a125f | ||
|
5baeda9ff1 | ||
|
9b9794b5e0 | ||
|
0697d60a48 | ||
|
cfe6c82a31 | ||
|
3e30228d95 | ||
|
7264b53e5f | ||
|
60836d8523 | ||
|
ef89c2e47a | ||
|
2d9e3e1847 | ||
|
30136a9697 | ||
|
db7ccd66d3 | ||
|
cfe6483102 | ||
|
561566e723 | ||
|
c2dcc4c898 | ||
|
d09bfdc4ff | ||
|
358ef4536f | ||
|
5061a35e66 | ||
|
cd9a1e8c9e | ||
|
646902e75e | ||
|
40d26cb868 | ||
|
b64aa51c0c | ||
|
8206441834 | ||
|
d713783736 | ||
|
57dffaa2ce | ||
|
9e81dd2360 | ||
|
e2798969d7 | ||
|
1c31ec66f2 | ||
|
241f9fc7b0 | ||
|
270192486a | ||
|
a799503c97 | ||
|
9685928087 | ||
|
0e4b2fad99 | ||
|
3c4571a4e0 | ||
|
046147eb1d | ||
|
7834520e54 | ||
|
8e5b4d4b6f | ||
|
4544a074d9 | ||
|
9b78501392 | ||
|
f59ddcc88d | ||
|
a4955a2b79 | ||
|
92ae1a565b | ||
|
15a56ca25e | ||
|
9dcaa829ea | ||
|
9f65799a3d | ||
|
886587848b | ||
|
a97fc6dba8 | ||
|
c124e88d12 | ||
|
17f3870296 | ||
|
4626d42d08 | ||
|
e1e760055c | ||
|
45bf6c3bf3 | ||
|
fef0cc764d | ||
|
72049afcf6 | ||
|
d26c06dbf3 | ||
|
268decd655 | ||
|
7ae246c839 | ||
|
c7c454e4fb | ||
|
8e27297a81 | ||
|
2cdec72985 | ||
|
0085ac534d | ||
|
7828a79a96 | ||
|
5576c21e67 | ||
|
e49cfb1d2b | ||
|
1e541d0225 | ||
|
0974afd26d | ||
|
8d93594771 | ||
|
1136ac70e8 | ||
|
dc8d5a39ea | ||
|
8329e649b0 | ||
|
66da8b164f | ||
|
ea48577864 | ||
|
597146b136 | ||
|
30dd0c1e11 | ||
|
88772c4266 | ||
|
dc1d9e1c84 | ||
|
69ea65835d | ||
|
d5bae3a8c6 | ||
|
f14010bd5b | ||
|
87094fc83f | ||
|
7c179cfeab | ||
|
7582c221d1 | ||
|
c109895848 | ||
|
eccedada40 | ||
|
25d54accf8 | ||
|
d07685f0e9 | ||
|
2445c00c7e | ||
|
4c1d3ef514 | ||
|
4614c7d4c2 | ||
|
bbf1ef0dc3 | ||
|
3433c9e708 | ||
|
2cd5d75a2e | ||
|
2535b8adef | ||
|
4edab7bb7f | ||
|
fd8658e317 | ||
|
51d21d8c86 | ||
|
b4c3307cdf | ||
|
4e8d10cb44 | ||
|
e96875a425 | ||
|
5ab0035348 | ||
|
4ddff96b1e | ||
|
a08d84c1df | ||
|
21c71bfac1 | ||
|
6baaed3581 | ||
|
152dbfd5d1 | ||
|
a56d14086b | ||
|
aee87693f8 | ||
|
976b4affd9 | ||
|
e222b6ad9c | ||
|
19b17374e8 | ||
|
43989122bb | ||
|
72712d6047 | ||
|
0b52d2cc15 | ||
|
8304102136 | ||
|
3381aefcfa | ||
|
279a365cb1 | ||
|
2c9e00da56 | ||
|
f7cae69704 | ||
|
b7d58bcdbc | ||
|
13a856b843 | ||
|
8da38985c3 | ||
|
60cf6c6b97 | ||
|
35c2b34564 | ||
|
ef2e048efc | ||
|
6b3261aa33 | ||
|
1849c02cb6 | ||
|
1ec74a89e2 | ||
|
c591792de9 | ||
|
3108543ae5 | ||
|
1eb221c743 | ||
|
bebf6bc2e7 | ||
|
9e91cc2138 | ||
|
c5b939cfb7 | ||
|
5bd411ca27 | ||
|
a533cda6f0 | ||
|
fe4b07b8ae | ||
|
f9f2ccd904 | ||
|
d9e87d7c32 | ||
|
a0092c0770 | ||
|
3100131125 | ||
|
988880cf83 | ||
|
c3fb9672c4 | ||
|
0a2d94e425 | ||
|
8d9073cd31 | ||
|
d075961ffa | ||
|
7a72409b61 | ||
|
34fc530fba | ||
|
f257ff2f97 | ||
|
7ad5822c5b | ||
|
9a8f9f0a94 | ||
|
6421cecafb | ||
|
be544d6d89 | ||
|
3c89ecafdd | ||
|
35ec4eec52 | ||
|
e47f737a2f | ||
|
ac671a065b | ||
|
74116cc550 | ||
|
406070a5c3 | ||
|
0ccafd5b53 | ||
|
940f517aa3 | ||
|
216e5f65ad | ||
|
a74685d66d | ||
|
b7791d2845 | ||
|
d151a82d78 | ||
|
8ce61fbd52 | ||
|
90c24aade3 | ||
|
6b3f787fee | ||
|
4ebe4ce1b7 | ||
|
8c79740ee8 | ||
|
59d027ca02 | ||
|
37a7345a90 | ||
|
c519d4651b | ||
|
9b3b609e40 | ||
|
6254f53716 | ||
|
f05dc46432 | ||
|
3de0982a4a | ||
|
c2184fb3bf | ||
|
919c09fcb0 | ||
|
1d9dbd40ec | ||
|
0cd953a6f3 | ||
|
4db2b72351 | ||
|
dd54fcbdbd | ||
|
3123cf7ac6 | ||
|
6b579dd4ce | ||
|
16dfaa3e27 | ||
|
d7842b9f84 | ||
|
115034fccb | ||
|
309e957a85 | ||
|
d7007e402e | ||
|
91323a21cf | ||
|
fea893d76c | ||
|
761bc6ba4c | ||
|
75172feb4e | ||
|
3285fb1608 | ||
|
03a4c6910d | ||
|
485b958599 | ||
|
da47ba2f67 | ||
|
c39195488a | ||
|
227fb0ae9b | ||
|
b12ff5b503 | ||
|
0946c72b88 | ||
|
7d49b046d4 | ||
|
5f0426c840 | ||
|
73e239cc5f | ||
|
ad670f721a | ||
|
028a4a70cf | ||
|
77d7960347 | ||
|
39821146bd | ||
|
7d505a41ac | ||
|
e457b2f0d6 | ||
|
c9cf7fd4d4 | ||
|
b0371dd33d | ||
|
25e16c3565 | ||
|
7b39527863 | ||
|
d861b08866 | ||
|
fb438dc108 | ||
|
4e6b4f179b | ||
|
00d038c8f3 | ||
|
a9f6a68952 | ||
|
b9142bbc5a | ||
|
6c812f663e | ||
|
a93ec9783a | ||
|
2d184d77b6 | ||
|
bce299ccc7 | ||
|
235cebd14a | ||
|
a638aa9d53 | ||
|
67cce0ef7e | ||
|
82f4267bf6 | ||
|
45a9ca29c4 | ||
|
7f4e813277 | ||
|
3805ff4a0c | ||
|
464cfd475e | ||
|
fe469ae57f | ||
|
550ef9a1c4 | ||
|
935adfb51a | ||
|
3974df4a62 | ||
|
4870974161 | ||
|
8c4b0037f5 | ||
|
2c6f763ef2 | ||
|
ca28de02d8 | ||
|
bfc15ea029 | ||
|
6e8b8a5920 | ||
|
099f3b6a62 | ||
|
142d182bc1 | ||
|
1437871d88 | ||
|
352bf69409 | ||
|
9bdf3d23e1 | ||
|
be8ecfa707 | ||
|
51da0d0259 | ||
|
f55b78a994 | ||
|
e1a44c93f8 | ||
|
07e7087a09 | ||
|
2c79c7e2f6 | ||
|
09f6637fe0 | ||
|
3784db3308 | ||
|
2b950ff5dd | ||
|
09339c9cfb | ||
|
ccadd88af5 | ||
|
cc02a0efc2 | ||
|
43a1385b79 | ||
|
5101464e3b | ||
|
3d71478d38 | ||
|
4989ed445e | ||
|
d9413039ec | ||
|
eba0c9be34 | ||
|
48c9e9f3cc | ||
|
81ebde88db | ||
|
79ced9d0f8 | ||
|
a4058b84ce | ||
|
7bf211a52b | ||
|
d5f722792f | ||
|
0f02906c9b | ||
|
9582e228b1 | ||
|
45f20431f9 | ||
|
7554e6d7f9 | ||
|
cb8f26f177 | ||
|
b5dfce7861 | ||
|
2ca5a65544 | ||
|
17deb136db | ||
|
8c9710c76c | ||
|
32f7ecb261 | ||
|
fb77fde710 | ||
|
3c67bb90d7 | ||
|
dabb168853 | ||
|
45e5b3b219 | ||
|
a6b7469923 | ||
|
cb5dab3033 | ||
|
21d0038ff2 | ||
|
c094d8f2e8 | ||
|
c465d6a6c2 | ||
|
73d35bc985 | ||
|
2a8ccb065b | ||
|
8f04a50ce1 | ||
|
888aa5586b | ||
|
99f56f5d22 | ||
|
ad6281090d | ||
|
f0d334d3e2 | ||
|
5f829b048f | ||
|
1a961e66ff | ||
|
fdb0e22656 | ||
|
132ee1915f | ||
|
44bf4f3c8f | ||
|
6237767d5a | ||
|
dec9d96417 | ||
|
b167c87267 | ||
|
2280fe8e8e | ||
|
575d6dcd2d | ||
|
f729490c6b | ||
|
b32124cdd6 | ||
|
3d4321ee38 | ||
|
85034b382e | ||
|
77a51d1ad4 | ||
|
33e0cdc2d7 | ||
|
6519faa2fe | ||
|
5e3a234cbe | ||
|
e54c31d2d5 | ||
|
66c0537251 | ||
|
ac58516593 | ||
|
c3da6322b5 | ||
|
3d241500cf | ||
|
ded8224f66 | ||
|
f8814881a1 | ||
|
cc2852cd48 | ||
|
467637a9eb | ||
|
3cfc292d84 | ||
|
6acf94a810 | ||
|
31367fb4c4 | ||
|
12d6074e3b | ||
|
ff30386051 | ||
|
601f99ac16 | ||
|
87fe5c6101 | ||
|
68399ca31c | ||
|
2a6d7fd80f | ||
|
4725f510d8 | ||
|
be0ba22222 | ||
|
c8781392be | ||
|
b97164fcfb | ||
|
0dfb92281b | ||
|
4fe80c40da | ||
|
f0fac5115a | ||
|
46dd389d0d | ||
|
1e28e21ab5 | ||
|
7832c62c5d | ||
|
d025ee9dbe | ||
|
a9a9cb4319 | ||
|
aa727cb9b1 | ||
|
b8c9a99f20 | ||
|
aff995b0d0 | ||
|
2cc7e5dfdc | ||
|
5235a150b1 | ||
|
c6372ea9de | ||
|
7df4cc44c4 | ||
|
d47cf40544 | ||
|
7f5d88e95c | ||
|
d09663c066 | ||
|
ef97c9b69f | ||
|
d855e6c8b1 | ||
|
cd66f7eb43 | ||
|
6a35a7ba4c | ||
|
a3e146dc68 | ||
|
b81305a4a9 | ||
|
73884b34bc | ||
|
6166a34db2 | ||
|
6fa7da4b1c | ||
|
c3e426c491 | ||
|
21e023f0db | ||
|
063be001b3 | ||
|
5dff02e8bc | ||
|
60a59407d8 | ||
|
20a5aecfca | ||
|
c2e7b5a67d | ||
|
8f32303d07 | ||
|
891b1e7782 | ||
|
f26394fd3b | ||
|
4d83d42b4c | ||
|
57f1108df2 | ||
|
2641a9abe5 | ||
|
6b193ab350 | ||
|
b1bb37511c | ||
|
319187d6d6 | ||
|
02eb789f84 | ||
|
5a9338a27f | ||
|
eb6924277f | ||
|
325419404d | ||
|
bd8f371fd5 | ||
|
1783ff2845 | ||
|
d388527ffa | ||
|
19494088bd | ||
|
920dad524a | ||
|
ec89bcac8e | ||
|
a916c1f4ad | ||
|
a9a0ac92d7 | ||
|
da8a8bd1ef | ||
|
d9c746891d | ||
|
67817005b5 | ||
|
24d11de5a7 | ||
|
9251c87323 | ||
|
e12fab90d1 | ||
|
0a194b5b01 | ||
|
8d028adc53 | ||
|
dfca15395e | ||
|
e21f2362fe | ||
|
1ce328e8a9 | ||
|
038a5f999f | ||
|
5d3704c7ea | ||
|
87037c06c9 | ||
|
dd412c0f50 | ||
|
bf44befff6 | ||
|
e61874bb6f | ||
|
1e5331768f | ||
|
ec9a3a4f7c | ||
|
e439a3a8dc | ||
|
19f70d7a11 | ||
|
afe7ed5b05 | ||
|
d4bf004d74 | ||
|
e4d06a088b | ||
|
0929088b12 | ||
|
7b4838fc9b | ||
|
0cf9533248 | ||
|
84ff0b8a9f | ||
|
d467dcfeaf | ||
|
8e68ba4751 | ||
|
0f2a85ba9f | ||
|
7674a0a91e | ||
|
5bc1a66572 | ||
|
9b56067213 | ||
|
9a9df2fc3c | ||
|
9989d8d1d4 | ||
|
f9471f297e | ||
|
146b693e4a | ||
|
7295b7e329 | ||
|
e2441ea3e7 | ||
|
119e51912e | ||
|
dd950f5b0d | ||
|
78a9bad1e1 | ||
|
0c6eaf5484 | ||
|
1010068ddb | ||
|
82eec3d8d7 | ||
|
ee7b37d3f3 | ||
|
143d82d242 | ||
|
8b91b38855 | ||
|
1098f0d2a3 | ||
|
ab53cec022 | ||
|
6f5f8e5648 | ||
|
edfd707c22 | ||
|
1870f30af8 | ||
|
90106f5f08 | ||
|
9924b7b438 | ||
|
aa37faab0a | ||
|
dc10f8ce72 | ||
|
996686c1da | ||
|
488785d013 | ||
|
3abdc01230 | ||
|
8da04a584f | ||
|
27cc61d45e | ||
|
7371c30064 | ||
|
140d163895 | ||
|
dc33bdc1dc | ||
|
74df4fab83 | ||
|
1e5cd3d7a1 | ||
|
a54e9b64aa | ||
|
74660704e3 | ||
|
7439893a2a | ||
|
e27e49e9dc | ||
|
34ed729c59 | ||
|
adaeeca3fd | ||
|
dac75563d3 | ||
|
cbc76adcaa | ||
|
69a9cb383d | ||
|
4343073c00 | ||
|
fe60d4be88 | ||
|
ae337807f5 | ||
|
9ae30ac08e | ||
|
62fa85c0a4 | ||
|
7bb873dad9 | ||
|
5f6c1c14cb | ||
|
d43189ad33 | ||
|
fcad76fc51 | ||
|
97e6e1684e | ||
|
67a0d3e926 | ||
|
183fb9f9ff | ||
|
9815ddef1f | ||
|
f6d0847453 | ||
|
b0b9f0d65f | ||
|
0cec80f676 | ||
|
48c64143e3 | ||
|
a8712422bc | ||
|
97f65bd283 | ||
|
fd3c1c50f1 | ||
|
b153e4bb9f | ||
|
db9856a8db | ||
|
75ecea265d | ||
|
be8751cb73 | ||
|
fb25ecb4a1 | ||
|
f1cb7d27ac | ||
|
dee494e12f | ||
|
b13a121915 | ||
|
7486ee9537 | ||
|
4a20ccc28e | ||
|
f80dd2b307 | ||
|
b208cf6d32 | ||
|
39e78ff17e | ||
|
a8177ea7fe | ||
|
bedcfc154b | ||
|
4c38b4aa3c | ||
|
f6cfe266e0 | ||
|
4905e65f14 | ||
|
ccb250b410 | ||
|
aca57ffc62 | ||
|
7f375f42d8 | ||
|
eedcc2034d | ||
|
24c9a167d7 | ||
|
909df8ef1f | ||
|
3b27cb3671 | ||
|
3fe0db4a7d | ||
|
8b55814ab2 | ||
|
575e471553 | ||
|
0f5f1aebed | ||
|
50e17eb1ab | ||
|
158cc2f660 | ||
|
1066a31acd | ||
|
1f9d0f4582 | ||
|
a6d65233f1 | ||
|
eff2fe7a1b | ||
|
20efdc70b3 | ||
|
f0d8fb8f1a | ||
|
f7a380e9b7 | ||
|
e9c7cf6f63 | ||
|
68f1661452 | ||
|
36fd1b91ae | ||
|
a4ec430ac0 | ||
|
519614b2fd | ||
|
bf0118c8ef | ||
|
a4db0820bc | ||
|
ee7528413e | ||
|
7952fc8324 | ||
|
652773d2cf | ||
|
2a17787242 | ||
|
0a53ad5721 | ||
|
6da6bdc863 | ||
|
42ad2bb83f | ||
|
f309a65cb4 | ||
|
77e19ab1a4 | ||
|
1a996b6ef3 | ||
|
b882f1a010 | ||
|
82a030e6ff | ||
|
0758b85179 | ||
|
ab3d9bd080 | ||
|
8ff813f689 | ||
|
88ae72c0d3 | ||
|
312aa4be26 | ||
|
cbb06fce9d | ||
|
f259e497c4 | ||
|
dd4172ac66 | ||
|
66029e60d3 | ||
|
364f484f04 | ||
|
9dd5159414 | ||
|
13e38d6fd8 | ||
|
10dcb64715 | ||
|
7551b51e7d | ||
|
adb418aafc | ||
|
270da80d64 | ||
|
b2027cfd66 | ||
|
7f1f4eeac6 | ||
|
7a7446c8bd | ||
|
ddbae294e6 | ||
|
8c71b744f3 | ||
|
479b5d31a9 | ||
|
4cbf4230e8 | ||
|
6a610187e0 | ||
|
eb2a4aebba | ||
|
21a2e67755 | ||
|
3b9e312615 | ||
|
26dab04c9e | ||
|
00713c0d11 | ||
|
751b5f3027 | ||
|
e8261b000e | ||
|
41ecb70297 | ||
|
09ee104b8c | ||
|
e3a4964787 | ||
|
9bf72910a4 | ||
|
ee39e20e6d | ||
|
399d6db6f6 | ||
|
0821ce44b5 | ||
|
ea279111c6 | ||
|
674ce02e58 | ||
|
8dfa2767ec | ||
|
20dad7f07f | ||
|
751cc4c44d | ||
|
2318e6d8e9 | ||
|
61b4a492c3 | ||
|
9db3d01e09 | ||
|
8da73ad3dd | ||
|
b8c16d8ac5 | ||
|
429c0951f3 | ||
|
74e103c791 | ||
|
f941950ee2 | ||
|
846df2eef1 | ||
|
34ed058c97 | ||
|
eae0290978 | ||
|
561368570e | ||
|
3467d1fed0 | ||
|
d02ff232e5 | ||
|
2d1c6a5402 | ||
|
eab3b65629 | ||
|
20b435732a | ||
|
929617273d | ||
|
2717bf7d49 | ||
|
5cd2ebc960 | ||
|
9b4afe9816 | ||
|
23bb5598d5 | ||
|
af1d7813e9 | ||
|
16c2e5a585 | ||
|
c02750edbd | ||
|
7204ddafec | ||
|
faeba9a7e4 | ||
|
190d238a1f | ||
|
715451b5fb | ||
|
87f1895405 | ||
|
923d817751 | ||
|
0728209b66 | ||
|
b8b9dcc2ee | ||
|
f35e879852 | ||
|
34f4f12eb9 | ||
|
fa63bf758d | ||
|
f6b396ae64 | ||
|
2c7fd58e34 | ||
|
982f2c9634 | ||
|
f2fd42b47a | ||
|
1b4ccad938 | ||
|
a9de1ce8e0 | ||
|
ac752d5ec2 | ||
|
632d8d02d2 | ||
|
48aeb26e02 | ||
|
1694af8b5e | ||
|
83bcab9cd2 | ||
|
bdc7f4b3f5 | ||
|
39202a3d79 | ||
|
912065a121 | ||
|
c8466afac2 | ||
|
2619e162c1 | ||
|
e1112e17f8 | ||
|
92b2ead74c | ||
|
bbed9b94c1 | ||
|
73d07311db | ||
|
1cdff47477 | ||
|
511a5c3f82 | ||
|
853885e2ff | ||
|
d83936a66a | ||
|
5517d2bf56 | ||
|
f21ab49ac5 | ||
|
925a458abe | ||
|
76946c447f | ||
|
2faa29b1c4 | ||
|
6826cc311d | ||
|
5e17ce0a0b | ||
|
e8d299d3b6 | ||
|
7637aa2ab6 | ||
|
ab067d1d3a | ||
|
e6f84666c7 | ||
|
4c5429af15 | ||
|
0a0ac3b7c9 | ||
|
24833ce9fb | ||
|
ec2c18dc87 | ||
|
7384609e74 | ||
|
3047649650 | ||
|
d298dac3f3 | ||
|
1574bca8a8 | ||
|
ec2f6c6b80 | ||
|
838cc60161 | ||
|
310c61a5cc | ||
|
318cc15323 | ||
|
3a64ceb4d6 | ||
|
d0f21c0095 | ||
|
46dc15dd29 | ||
|
8dc654b513 | ||
|
7000ac3f3f | ||
|
43c2e8d8e9 | ||
|
0231139b01 | ||
|
d6ee6446dd | ||
|
7b666efcf8 | ||
|
eb5d2198fc | ||
|
34e44ebd1c | ||
|
bf2f4bc040 | ||
|
9dc4559aba | ||
|
eba8856261 | ||
|
a8c5aa471a | ||
|
52f6dcf092 | ||
|
dec79f3742 | ||
|
8bdcac0f3e | ||
|
8426b674a3 | ||
|
da391bcc8d | ||
|
2d7443d454 | ||
|
991987ed76 | ||
|
ec24ebf2cf | ||
|
6ba0976085 | ||
|
2b88d01a01 | ||
|
0c09f24cbf | ||
|
61d22afeba | ||
|
9f1ed6e8c3 | ||
|
bbc4113cac | ||
|
91194bf422 | ||
|
9c5f940b00 | ||
|
455b4043b8 | ||
|
bd83ee7931 | ||
|
f6bdf7c09a | ||
|
2db8afb8c2 | ||
|
e033129dd3 | ||
|
f9dc590100 | ||
|
8996aafe0d |
217 changed files with 12335 additions and 16796 deletions
190
.github/workflows/main.yml
vendored
190
.github/workflows/main.yml
vendored
|
@ -1,84 +1,206 @@
|
||||||
name: ci
|
name: ci
|
||||||
on: pull_request
|
on: ["push", "pull_request", "workflow_dispatch"]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
name: lint
|
name: lint
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-python@v1
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
python-version: '3.9'
|
||||||
- run: make install tools
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pip
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pip-
|
||||||
|
- run: pip install --user --upgrade pip wheel
|
||||||
|
- run: pip install -e .[lint]
|
||||||
- run: make lint
|
- run: make lint
|
||||||
|
|
||||||
tests-unit:
|
tests-unit:
|
||||||
name: "tests / unit"
|
name: "tests / unit"
|
||||||
runs-on: ubuntu-latest
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- ubuntu-20.04
|
||||||
|
- macos-latest
|
||||||
|
- windows-latest
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-python@v1
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
python-version: '3.9'
|
||||||
- run: make install tools
|
- name: set pip cache dir
|
||||||
- working-directory: lbry
|
shell: bash
|
||||||
|
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
|
||||||
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.PIP_CACHE_DIR }}
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pip-
|
||||||
|
- id: os-name
|
||||||
|
uses: ASzc/change-string-case-action@v5
|
||||||
|
with:
|
||||||
|
string: ${{ runner.os }}
|
||||||
|
- run: python -m pip install --user --upgrade pip wheel
|
||||||
|
- if: startsWith(runner.os, 'linux')
|
||||||
|
run: pip install -e .[test]
|
||||||
|
- if: startsWith(runner.os, 'linux')
|
||||||
env:
|
env:
|
||||||
HOME: /tmp
|
HOME: /tmp
|
||||||
run: coverage run -p --source=lbry -m unittest discover -vv tests.unit
|
run: make test-unit-coverage
|
||||||
|
- if: startsWith(runner.os, 'linux') != true
|
||||||
|
run: pip install -e .[test]
|
||||||
|
- if: startsWith(runner.os, 'linux') != true
|
||||||
|
env:
|
||||||
|
HOME: /tmp
|
||||||
|
run: coverage run --source=lbry -m unittest tests/unit/test_conf.py
|
||||||
|
- name: submit coverage report
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
COVERALLS_FLAG_NAME: tests-unit-${{ steps.os-name.outputs.lowercase }}
|
||||||
|
COVERALLS_PARALLEL: true
|
||||||
|
run: |
|
||||||
|
pip install coveralls
|
||||||
|
coveralls --service=github
|
||||||
|
|
||||||
tests-integration:
|
tests-integration:
|
||||||
name: "tests / integration"
|
name: "tests / integration"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-20.04
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
test:
|
test:
|
||||||
- datanetwork
|
- datanetwork
|
||||||
- blockchain
|
- blockchain
|
||||||
|
- claims
|
||||||
|
- takeovers
|
||||||
|
- transactions
|
||||||
- other
|
- other
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- name: Configure sysctl limits
|
||||||
- uses: actions/setup-python@v1
|
run: |
|
||||||
|
sudo swapoff -a
|
||||||
|
sudo sysctl -w vm.swappiness=1
|
||||||
|
sudo sysctl -w fs.file-max=262144
|
||||||
|
sudo sysctl -w vm.max_map_count=262144
|
||||||
|
- name: Runs Elasticsearch
|
||||||
|
uses: elastic/elastic-github-actions/elasticsearch@master
|
||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
stack-version: 7.12.1
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.9'
|
||||||
- if: matrix.test == 'other'
|
- if: matrix.test == 'other'
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y --no-install-recommends ffmpeg
|
sudo apt-get install -y --no-install-recommends ffmpeg
|
||||||
- run: pip install tox-travis
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ./.tox
|
||||||
|
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: txo-integration-${{ matrix.test }}-
|
||||||
|
- run: pip install tox coverage coveralls
|
||||||
|
- if: matrix.test == 'claims'
|
||||||
|
run: rm -rf .tox
|
||||||
- run: tox -e ${{ matrix.test }}
|
- run: tox -e ${{ matrix.test }}
|
||||||
|
- name: submit coverage report
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
COVERALLS_FLAG_NAME: tests-integration-${{ matrix.test }}
|
||||||
|
COVERALLS_PARALLEL: true
|
||||||
|
run: |
|
||||||
|
coverage combine tests
|
||||||
|
coveralls --service=github
|
||||||
|
|
||||||
|
|
||||||
|
coverage:
|
||||||
|
needs: ["tests-unit", "tests-integration"]
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- name: finalize coverage report submission
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
pip install coveralls
|
||||||
|
coveralls --service=github --finish
|
||||||
|
|
||||||
build:
|
build:
|
||||||
needs: ["lint", "tests-unit", "tests-integration"]
|
needs: ["lint", "tests-unit", "tests-integration"]
|
||||||
name: "build"
|
name: "build / binary"
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os:
|
os:
|
||||||
- ubuntu-latest
|
- ubuntu-20.04
|
||||||
- macos-latest
|
- macos-latest
|
||||||
- windows-latest
|
- windows-latest
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-python@v1
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
python-version: '3.9'
|
||||||
- name: Setup
|
- id: os-name
|
||||||
run: |
|
uses: ASzc/change-string-case-action@v5
|
||||||
pip install pyinstaller
|
with:
|
||||||
pip install -e .
|
string: ${{ runner.os }}
|
||||||
# https://stackoverflow.com/a/61693590
|
- name: set pip cache dir
|
||||||
# https://github.com/pypa/setuptools/issues/1963
|
shell: bash
|
||||||
pip install --upgrade 'setuptools<45.0.0'
|
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
|
||||||
- if: startsWith(matrix.os, 'windows') == false
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.PIP_CACHE_DIR }}
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pip-
|
||||||
|
- run: pip install pyinstaller==4.6
|
||||||
|
- run: pip install -e .
|
||||||
|
- if: startsWith(github.ref, 'refs/tags/v')
|
||||||
|
run: python docker/set_build.py
|
||||||
|
- if: startsWith(runner.os, 'linux') || startsWith(runner.os, 'mac')
|
||||||
name: Build & Run (Unix)
|
name: Build & Run (Unix)
|
||||||
run: |
|
run: |
|
||||||
pyinstaller --onefile --name lbrynet lbry/extras/cli.py
|
pyinstaller --onefile --name lbrynet lbry/extras/cli.py
|
||||||
chmod +x dist/lbrynet
|
|
||||||
dist/lbrynet --version
|
dist/lbrynet --version
|
||||||
- if: startsWith(matrix.os, 'windows')
|
- if: startsWith(runner.os, 'windows')
|
||||||
name: Build & Run (Windows)
|
name: Build & Run (Windows)
|
||||||
run: |
|
run: |
|
||||||
pip install pywin32
|
pip install pywin32==301
|
||||||
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
|
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
|
||||||
dist/lbrynet.exe --version
|
dist/lbrynet.exe --version
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: lbrynet-${{ steps.os-name.outputs.lowercase }}
|
||||||
|
path: dist/
|
||||||
|
|
||||||
|
release:
|
||||||
|
name: "release"
|
||||||
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
|
needs: ["build"]
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- uses: actions/download-artifact@v2
|
||||||
|
- name: upload binaries
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.RELEASE_API_TOKEN }}
|
||||||
|
run: |
|
||||||
|
pip install githubrelease
|
||||||
|
chmod +x lbrynet-macos/lbrynet
|
||||||
|
chmod +x lbrynet-linux/lbrynet
|
||||||
|
zip --junk-paths lbrynet-mac.zip lbrynet-macos/lbrynet
|
||||||
|
zip --junk-paths lbrynet-linux.zip lbrynet-linux/lbrynet
|
||||||
|
zip --junk-paths lbrynet-windows.zip lbrynet-windows/lbrynet.exe
|
||||||
|
ls -lh
|
||||||
|
githubrelease release lbryio/lbry-sdk info ${GITHUB_REF#refs/tags/}
|
||||||
|
githubrelease asset lbryio/lbry-sdk upload ${GITHUB_REF#refs/tags/} \
|
||||||
|
lbrynet-mac.zip lbrynet-linux.zip lbrynet-windows.zip
|
||||||
|
githubrelease release lbryio/lbry-sdk publish ${GITHUB_REF#refs/tags/}
|
||||||
|
|
||||||
|
|
22
.github/workflows/release.yml
vendored
Normal file
22
.github/workflows/release.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
name: slack
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
name: "slack notification"
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: LoveToKnow/slackify-markdown-action@v1.0.0
|
||||||
|
id: markdown
|
||||||
|
with:
|
||||||
|
text: "There is a new SDK release: ${{github.event.release.html_url}}\n${{ github.event.release.body }}"
|
||||||
|
- uses: slackapi/slack-github-action@v1.14.0
|
||||||
|
env:
|
||||||
|
CHANGELOG: '<!channel> ${{ steps.markdown.outputs.text }}'
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_RELEASE_BOT_WEBHOOK }}
|
||||||
|
with:
|
||||||
|
payload: '{"type": "mrkdwn", "text": ${{ toJSON(env.CHANGELOG) }} }'
|
||||||
|
|
6
.gitignore
vendored
6
.gitignore
vendored
|
@ -6,15 +6,17 @@
|
||||||
/.coverage*
|
/.coverage*
|
||||||
/lbry-venv
|
/lbry-venv
|
||||||
/venv
|
/venv
|
||||||
|
/lbry/blockchain
|
||||||
|
|
||||||
lbry.egg-info
|
lbry.egg-info
|
||||||
__pycache__
|
__pycache__
|
||||||
_trial_temp/
|
_trial_temp/
|
||||||
|
trending*.log
|
||||||
|
|
||||||
/tests/integration/blockchain/files
|
/tests/integration/claims/files
|
||||||
/tests/.coverage.*
|
/tests/.coverage.*
|
||||||
|
|
||||||
/lbry/wallet/bin
|
/lbry/wallet/bin
|
||||||
|
|
||||||
/.vscode
|
/.vscode
|
||||||
/.gitignore
|
/.gitignore
|
||||||
|
|
211
.gitlab-ci.yml
211
.gitlab-ci.yml
|
@ -1,211 +0,0 @@
|
||||||
default:
|
|
||||||
image: python:3.7
|
|
||||||
|
|
||||||
|
|
||||||
#cache:
|
|
||||||
# directories:
|
|
||||||
# - $HOME/venv
|
|
||||||
# - $HOME/.cache/pip
|
|
||||||
# - $HOME/Library/Caches/pip
|
|
||||||
# - $HOME/Library/Caches/Homebrew
|
|
||||||
# - $TRAVIS_BUILD_DIR/.tox
|
|
||||||
|
|
||||||
|
|
||||||
stages:
|
|
||||||
- test
|
|
||||||
- build
|
|
||||||
- assets
|
|
||||||
- release
|
|
||||||
|
|
||||||
|
|
||||||
.tagged:
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_TAG =~ /^v[0-9\.]+$/'
|
|
||||||
when: on_success
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
test:lint:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- make install tools
|
|
||||||
- make lint
|
|
||||||
|
|
||||||
test:unit:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- make install tools
|
|
||||||
- HOME=/tmp coverage run -p --source=lbry -m unittest discover -vv tests.unit
|
|
||||||
|
|
||||||
test:datanetwork-integration:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- pip install tox-travis
|
|
||||||
- tox -e datanetwork --recreate
|
|
||||||
|
|
||||||
test:blockchain-integration:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- pip install tox-travis
|
|
||||||
- tox -e blockchain
|
|
||||||
|
|
||||||
test:other-integration:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- apt-get update
|
|
||||||
- apt-get install -y --no-install-recommends ffmpeg
|
|
||||||
- pip install tox-travis
|
|
||||||
- tox -e other
|
|
||||||
|
|
||||||
test:json-api:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- make install tools
|
|
||||||
- HOME=/tmp coverage run -p --source=lbry scripts/generate_json_api.py
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
.build:
|
|
||||||
stage: build
|
|
||||||
artifacts:
|
|
||||||
expire_in: 1 day
|
|
||||||
paths:
|
|
||||||
- lbrynet-${OS}.zip
|
|
||||||
script:
|
|
||||||
- pip install --upgrade 'setuptools<45.0.0'
|
|
||||||
- pip install pyinstaller
|
|
||||||
- pip install -e .
|
|
||||||
- python3.7 docker/set_build.py # must come after lbry is installed because it imports lbry
|
|
||||||
- pyinstaller --onefile --name lbrynet lbry/extras/cli.py
|
|
||||||
- chmod +x dist/lbrynet
|
|
||||||
- zip --junk-paths ${CI_PROJECT_DIR}/lbrynet-${OS}.zip dist/lbrynet # gitlab expects artifacts to be in $CI_PROJECT_DIR
|
|
||||||
- openssl dgst -sha256 ${CI_PROJECT_DIR}/lbrynet-${OS}.zip | egrep -o [0-9a-f]+$ # get sha256 of asset. works on mac and ubuntu
|
|
||||||
- dist/lbrynet --version
|
|
||||||
|
|
||||||
build:linux:
|
|
||||||
extends: .build
|
|
||||||
image: ubuntu:16.04
|
|
||||||
variables:
|
|
||||||
OS: linux
|
|
||||||
before_script:
|
|
||||||
- apt-get update
|
|
||||||
- apt-get install -y --no-install-recommends software-properties-common zip curl build-essential
|
|
||||||
- add-apt-repository -y ppa:deadsnakes/ppa
|
|
||||||
- apt-get update
|
|
||||||
- apt-get install -y --no-install-recommends python3.7-dev
|
|
||||||
- python3.7 <(curl -q https://bootstrap.pypa.io/get-pip.py) # make sure we get pip with python3.7
|
|
||||||
- pip install lbry-libtorrent
|
|
||||||
|
|
||||||
build:mac:
|
|
||||||
extends: .build
|
|
||||||
tags: [macos] # makes gitlab use the mac runner
|
|
||||||
variables:
|
|
||||||
OS: mac
|
|
||||||
GIT_DEPTH: 5
|
|
||||||
VENV: /tmp/gitlab-lbry-sdk-venv
|
|
||||||
before_script:
|
|
||||||
# - brew upgrade python || true
|
|
||||||
- python3 --version | grep -q '^Python 3\.7\.' # dont upgrade python on every run. just make sure we're on the right Python
|
|
||||||
# - pip3 install --user --upgrade pip virtualenv
|
|
||||||
- pip3 --version | grep -q '\(python 3\.7\)'
|
|
||||||
- virtualenv --python=python3.7 "${VENV}"
|
|
||||||
- source "${VENV}/bin/activate"
|
|
||||||
after_script:
|
|
||||||
- rm -rf "${VENV}"
|
|
||||||
|
|
||||||
build:windows:
|
|
||||||
extends: .build
|
|
||||||
tags: [windows] # makes gitlab use the windows runner
|
|
||||||
variables:
|
|
||||||
OS: windows
|
|
||||||
GIT_DEPTH: 5
|
|
||||||
before_script:
|
|
||||||
- ./docker/install_choco.ps1
|
|
||||||
- choco install -y --x86 python3 7zip checksum
|
|
||||||
- python --version # | findstr /B "Python 3\.7\." # dont upgrade python on every run. just make sure we're on the right Python
|
|
||||||
- pip --version # | findstr /E '\(python 3\.7\)'
|
|
||||||
- pip install virtualenv pywin32
|
|
||||||
- virtualenv venv
|
|
||||||
- venv/Scripts/activate.ps1
|
|
||||||
- pip install pip==19.3.1; $true # $true ignores errors. need this to get the correct coincurve wheel. see commit notes for details.
|
|
||||||
after_script:
|
|
||||||
- rmdir -Recurse venv
|
|
||||||
script:
|
|
||||||
- pip install --upgrade 'setuptools<45.0.0'
|
|
||||||
- pip install pyinstaller==3.5
|
|
||||||
- pip install -e .
|
|
||||||
- python docker/set_build.py # must come after lbry is installed because it imports lbry
|
|
||||||
- pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico -F -n lbrynet lbry/extras/cli.py
|
|
||||||
- 7z a -tzip $env:CI_PROJECT_DIR/lbrynet-${OS}.zip ./dist/lbrynet.exe
|
|
||||||
- checksum --type=sha256 --file=$env:CI_PROJECT_DIR/lbrynet-${OS}.zip
|
|
||||||
- dist/lbrynet.exe --version
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# s3 = upload asset to s3 (build.lbry.io)
|
|
||||||
.s3:
|
|
||||||
stage: assets
|
|
||||||
variables:
|
|
||||||
GIT_STRATEGY: none
|
|
||||||
script:
|
|
||||||
- "[ -f lbrynet-${OS}.zip ]" # check that asset exists before trying to upload
|
|
||||||
- pip install awscli
|
|
||||||
- S3_PATH="daemon/gitlab-build-${CI_PIPELINE_ID}_commit-${CI_COMMIT_SHA:0:7}$( if [ ! -z ${CI_COMMIT_TAG} ]; then echo _tag-${CI_COMMIT_TAG}; else echo _branch-${CI_COMMIT_REF_NAME}; fi )"
|
|
||||||
- AWS_ACCESS_KEY_ID=${ARTIFACTS_KEY} AWS_SECRET_ACCESS_KEY=${ARTIFACTS_SECRET} AWS_REGION=${ARTIFACTS_REGION}
|
|
||||||
aws s3 cp lbrynet-${OS}.zip s3://${ARTIFACTS_BUCKET}/${S3_PATH}/lbrynet-${OS}.zip
|
|
||||||
|
|
||||||
s3:linux:
|
|
||||||
extends: .s3
|
|
||||||
variables: {OS: linux}
|
|
||||||
needs: ["build:linux"]
|
|
||||||
|
|
||||||
s3:mac:
|
|
||||||
extends: .s3
|
|
||||||
variables: {OS: mac}
|
|
||||||
needs: ["build:mac"]
|
|
||||||
|
|
||||||
s3:windows:
|
|
||||||
extends: .s3
|
|
||||||
variables: {OS: windows}
|
|
||||||
needs: ["build:windows"]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# github = upload assets to github when there's a tagged release
|
|
||||||
.github:
|
|
||||||
extends: .tagged
|
|
||||||
stage: assets
|
|
||||||
variables:
|
|
||||||
GIT_STRATEGY: none
|
|
||||||
script:
|
|
||||||
- "[ -f lbrynet-${OS}.zip ]" # check that asset exists before trying to upload. githubrelease won't error if its missing
|
|
||||||
- pip install githubrelease
|
|
||||||
- githubrelease --no-progress --github-token ${GITHUB_CI_USER_ACCESS_TOKEN} asset lbryio/lbry-sdk upload ${CI_COMMIT_TAG} lbrynet-${OS}.zip
|
|
||||||
|
|
||||||
github:linux:
|
|
||||||
extends: .github
|
|
||||||
variables: {OS: linux}
|
|
||||||
needs: ["build:linux"]
|
|
||||||
|
|
||||||
github:mac:
|
|
||||||
extends: .github
|
|
||||||
variables: {OS: mac}
|
|
||||||
needs: ["build:mac"]
|
|
||||||
|
|
||||||
github:windows:
|
|
||||||
extends: .github
|
|
||||||
variables: {OS: windows}
|
|
||||||
needs: ["build:windows"]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
publish:
|
|
||||||
extends: .tagged
|
|
||||||
stage: release
|
|
||||||
variables:
|
|
||||||
GIT_STRATEGY: none
|
|
||||||
script:
|
|
||||||
- pip install githubrelease
|
|
||||||
- githubrelease --no-progress --github-token ${GITHUB_CI_USER_ACCESS_TOKEN} release lbryio/lbry-sdk publish ${CI_COMMIT_TAG}
|
|
||||||
- >
|
|
||||||
curl -X POST -H 'Content-type: application/json' --data '{"text":"<!channel> There is a new SDK release: https://github.com/lbryio/lbry-sdk/releases/tag/'"${CI_COMMIT_TAG}"'\n'"$(curl -s "https://api.github.com/repos/lbryio/lbry-sdk/releases/tags/${CI_COMMIT_TAG}" | egrep '\w*\"body\":' | cut -d':' -f 2- | tail -c +3 | head -c -2)"'", "channel":"tech"}' "$(echo ${SLACK_WEBHOOK_URL_BASE64} | base64 -d)"
|
|
157
INSTALL.md
157
INSTALL.md
|
@ -9,20 +9,29 @@ Here's a video walkthrough of this setup, which is itself hosted by the LBRY net
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
Running `lbrynet` from source requires Python 3.7 or higher. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
|
Running `lbrynet` from source requires Python 3.7. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
|
||||||
|
|
||||||
After installing python 3, you'll need to install some additional libraries depending on your operating system.
|
After installing Python 3.7, you'll need to install some additional libraries depending on your operating system.
|
||||||
|
|
||||||
|
Because of [issue #2769](https://github.com/lbryio/lbry-sdk/issues/2769)
|
||||||
|
at the moment the `lbrynet` daemon will only work correctly with Python 3.7.
|
||||||
|
If Python 3.8+ is used, the daemon will start but the RPC server
|
||||||
|
may not accept messages, returning the following:
|
||||||
|
```
|
||||||
|
Could not connect to daemon. Are you sure it's running?
|
||||||
|
```
|
||||||
|
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/).
|
macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/).
|
||||||
|
|
||||||
These environment variables also need to be set:
|
These environment variables also need to be set:
|
||||||
1. PYTHONUNBUFFERED=1
|
```
|
||||||
2. EVENT_NOKQUEUE=1
|
PYTHONUNBUFFERED=1
|
||||||
|
EVENT_NOKQUEUE=1
|
||||||
|
```
|
||||||
|
|
||||||
Remaining dependencies can then be installed by running:
|
Remaining dependencies can then be installed by running:
|
||||||
|
|
||||||
```
|
```
|
||||||
brew install python protobuf
|
brew install python protobuf
|
||||||
```
|
```
|
||||||
|
@ -31,14 +40,17 @@ Assistance installing Python3: https://docs.python-guide.org/starting/install3/o
|
||||||
|
|
||||||
### Linux
|
### Linux
|
||||||
|
|
||||||
On Ubuntu (16.04 minimum, we recommend 18.04), install the following:
|
On Ubuntu (we recommend 18.04 or 20.04), install the following:
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo add-apt-repository ppa:deadsnakes/ppa
|
sudo add-apt-repository ppa:deadsnakes/ppa
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install build-essential python3.7 python3.7-dev git python3.7-venv libssl-dev python-protobuf
|
sudo apt-get install build-essential python3.7 python3.7-dev git python3.7-venv libssl-dev python-protobuf
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The [deadsnakes PPA](https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa) provides Python 3.7
|
||||||
|
for those Ubuntu distributions that no longer have it in their
|
||||||
|
official repositories.
|
||||||
|
|
||||||
On Raspbian, you will also need to install `python-pyparsing`.
|
On Raspbian, you will also need to install `python-pyparsing`.
|
||||||
|
|
||||||
If you're running another Linux distro, install the equivalent of the above packages for your system.
|
If you're running another Linux distro, install the equivalent of the above packages for your system.
|
||||||
|
@ -47,62 +59,119 @@ If you're running another Linux distro, install the equivalent of the above pack
|
||||||
|
|
||||||
### Linux/Mac
|
### Linux/Mac
|
||||||
|
|
||||||
To install on Linux/Mac:
|
Clone the repository:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/lbryio/lbry-sdk.git
|
||||||
|
cd lbry-sdk
|
||||||
|
```
|
||||||
|
|
||||||
```
|
Create a Python virtual environment for lbry-sdk:
|
||||||
Clone the repository:
|
```bash
|
||||||
$ git clone https://github.com/lbryio/lbry-sdk.git
|
python3.7 -m venv lbry-venv
|
||||||
$ cd lbry-sdk
|
```
|
||||||
|
|
||||||
Create a Python virtual environment for lbry-sdk:
|
Activate virtual environment:
|
||||||
$ python3.7 -m venv lbry-venv
|
```bash
|
||||||
|
source lbry-venv/bin/activate
|
||||||
Activating lbry-sdk virtual environment:
|
```
|
||||||
$ source lbry-venv/bin/activate
|
|
||||||
|
|
||||||
Make sure you're on Python 3.7+ (as the default Python in virtual environment):
|
|
||||||
$ python --version
|
|
||||||
|
|
||||||
Install packages:
|
Make sure you're on Python 3.7+ as default in the virtual environment:
|
||||||
$ make install
|
```bash
|
||||||
|
python --version
|
||||||
|
```
|
||||||
|
|
||||||
If you are on Linux and using PyCharm, generates initial configs:
|
Install packages:
|
||||||
$ make idea
|
```bash
|
||||||
```
|
make install
|
||||||
|
```
|
||||||
|
|
||||||
To verify your installation, `which lbrynet` should return a path inside of the `lbry-venv` folder created by the `python3.7 -m venv lbry-venv` command.
|
If you are on Linux and using PyCharm, generates initial configs:
|
||||||
|
```bash
|
||||||
|
make idea
|
||||||
|
```
|
||||||
|
|
||||||
|
To verify your installation, `which lbrynet` should return a path inside
|
||||||
|
of the `lbry-venv` folder.
|
||||||
|
```bash
|
||||||
|
(lbry-venv) $ which lbrynet
|
||||||
|
/opt/lbry-sdk/lbry-venv/bin/lbrynet
|
||||||
|
```
|
||||||
|
|
||||||
|
To exit the virtual environment simply use the command `deactivate`.
|
||||||
|
|
||||||
### Windows
|
### Windows
|
||||||
|
|
||||||
To install on Windows:
|
Clone the repository:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/lbryio/lbry-sdk.git
|
||||||
|
cd lbry-sdk
|
||||||
|
```
|
||||||
|
|
||||||
```
|
Create a Python virtual environment for lbry-sdk:
|
||||||
Clone the repository:
|
```bash
|
||||||
> git clone https://github.com/lbryio/lbry-sdk.git
|
python -m venv lbry-venv
|
||||||
> cd lbry-sdk
|
```
|
||||||
|
|
||||||
Create a Python virtual environment for lbry-sdk:
|
Activate virtual environment:
|
||||||
> python -m venv lbry-venv
|
```bash
|
||||||
|
lbry-venv\Scripts\activate
|
||||||
|
```
|
||||||
|
|
||||||
Activating lbry-sdk virtual environment:
|
Install packages:
|
||||||
> lbry-venv\Scripts\activate
|
```bash
|
||||||
|
pip install -e .
|
||||||
Install packages:
|
```
|
||||||
> pip install -e .
|
|
||||||
```
|
|
||||||
|
|
||||||
## Run the tests
|
## Run the tests
|
||||||
|
### Elasticsearch
|
||||||
|
|
||||||
To run the unit tests from the repo directory:
|
For running integration tests, Elasticsearch is required to be available at localhost:9200/
|
||||||
|
|
||||||
```
|
The easiest way to start it is using docker with:
|
||||||
python -m unittest discover tests.unit
|
```bash
|
||||||
```
|
make elastic-docker
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternative installation methods are available [at Elasticsearch website](https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html).
|
||||||
|
|
||||||
|
To run the unit and integration tests from the repo directory:
|
||||||
|
```
|
||||||
|
python -m unittest discover tests.unit
|
||||||
|
python -m unittest discover tests.integration
|
||||||
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
To start the API server:
|
To start the API server:
|
||||||
`lbrynet start`
|
```
|
||||||
|
lbrynet start
|
||||||
|
```
|
||||||
|
|
||||||
|
Whenever the code inside [lbry-sdk/lbry](./lbry)
|
||||||
|
is modified we should run `make install` to recompile the `lbrynet`
|
||||||
|
executable with the newest code.
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
When developing, remember to enter the environment,
|
||||||
|
and if you wish start the server interactively.
|
||||||
|
```bash
|
||||||
|
$ source lbry-venv/bin/activate
|
||||||
|
|
||||||
|
(lbry-venv) $ python lbry/extras/cli.py start
|
||||||
|
```
|
||||||
|
|
||||||
|
Parameters can be passed in the same way.
|
||||||
|
```bash
|
||||||
|
(lbry-venv) $ python lbry/extras/cli.py wallet balance
|
||||||
|
```
|
||||||
|
|
||||||
|
If a Python debugger (`pdb` or `ipdb`) is installed we can also start it
|
||||||
|
in this way, set up break points, and step through the code.
|
||||||
|
```bash
|
||||||
|
(lbry-venv) $ pip install ipdb
|
||||||
|
|
||||||
|
(lbry-venv) $ ipdb lbry/extras/cli.py
|
||||||
|
```
|
||||||
|
|
||||||
Happy hacking!
|
Happy hacking!
|
||||||
|
|
2
LICENSE
2
LICENSE
|
@ -1,6 +1,6 @@
|
||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2015-2020 LBRY Inc
|
Copyright (c) 2015-2022 LBRY Inc
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
|
||||||
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
|
24
Makefile
24
Makefile
|
@ -1,24 +1,26 @@
|
||||||
.PHONY: install tools lint test idea
|
.PHONY: install tools lint test test-unit test-unit-coverage test-integration idea
|
||||||
|
|
||||||
install:
|
install:
|
||||||
pip install https://s3.amazonaws.com/files.lbry.io/python_libtorrent-1.2.4-py3-none-any.whl
|
|
||||||
CFLAGS="-DSQLITE_MAX_VARIABLE_NUMBER=2500000" pip install -U https://github.com/rogerbinns/apsw/releases/download/3.30.1-r1/apsw-3.30.1-r1.zip \
|
|
||||||
--global-option=fetch \
|
|
||||||
--global-option=--version --global-option=3.30.1 --global-option=--all \
|
|
||||||
--global-option=build --global-option=--enable --global-option=fts5
|
|
||||||
pip install -e .
|
pip install -e .
|
||||||
|
|
||||||
tools:
|
|
||||||
pip install mypy==0.701 pylint==2.4.4
|
|
||||||
pip install coverage astroid pylint
|
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
pylint --rcfile=setup.cfg lbry
|
pylint --rcfile=setup.cfg lbry
|
||||||
#mypy --ignore-missing-imports lbry
|
#mypy --ignore-missing-imports lbry
|
||||||
|
|
||||||
test:
|
test: test-unit test-integration
|
||||||
|
|
||||||
|
test-unit:
|
||||||
|
python -m unittest discover tests.unit
|
||||||
|
|
||||||
|
test-unit-coverage:
|
||||||
|
coverage run --source=lbry -m unittest discover -vv tests.unit
|
||||||
|
|
||||||
|
test-integration:
|
||||||
tox
|
tox
|
||||||
|
|
||||||
idea:
|
idea:
|
||||||
mkdir -p .idea
|
mkdir -p .idea
|
||||||
cp -r scripts/idea/* .idea
|
cp -r scripts/idea/* .idea
|
||||||
|
|
||||||
|
elastic-docker:
|
||||||
|
docker run -d -v lbryhub:/usr/share/elasticsearch/data -p 9200:9200 -p 9300:9300 -e"ES_JAVA_OPTS=-Xms512m -Xmx512m" -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.12.1
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![Gitlab CI Badge](https://ci.lbry.tech/lbry/lbry-sdk/badges/master/pipeline.svg)](https://ci.lbry.tech/lbry/lbry-sdk)
|
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![build](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml/badge.svg)](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml) [![coverage](https://coveralls.io/repos/github/lbryio/lbry-sdk/badge.svg)](https://coveralls.io/github/lbryio/lbry-sdk)
|
||||||
|
|
||||||
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
|
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
|
||||||
|
|
||||||
LBRY SDK for Python is currently the most fully featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components include:
|
LBRY SDK for Python is currently the most fully featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components include:
|
||||||
|
|
||||||
* Built on Python 3.7+ and `asyncio`.
|
* Built on Python 3.7 and `asyncio`.
|
||||||
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/dht)).
|
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/dht)).
|
||||||
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/blob_exchange)).
|
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/blob_exchange)).
|
||||||
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/schema)).
|
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/schema)).
|
||||||
|
@ -41,7 +41,7 @@ This project is MIT licensed. For the full license, see [LICENSE](LICENSE).
|
||||||
|
|
||||||
## Security
|
## Security
|
||||||
|
|
||||||
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our GPG key is here](https://lbry.com/faq/gpg-key) if you need it.
|
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
|
||||||
|
|
||||||
## Contact
|
## Contact
|
||||||
|
|
||||||
|
|
9
SECURITY.md
Normal file
9
SECURITY.md
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
While we are not at v1.0 yet, only the latest release will be supported.
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
See https://lbry.com/faq/security
|
43
docker/Dockerfile.dht_node
Normal file
43
docker/Dockerfile.dht_node
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
FROM debian:10-slim
|
||||||
|
|
||||||
|
ARG user=lbry
|
||||||
|
ARG projects_dir=/home/$user
|
||||||
|
ARG db_dir=/database
|
||||||
|
|
||||||
|
ARG DOCKER_TAG
|
||||||
|
ARG DOCKER_COMMIT=docker
|
||||||
|
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get -y --no-install-recommends install \
|
||||||
|
wget \
|
||||||
|
automake libtool \
|
||||||
|
tar unzip \
|
||||||
|
build-essential \
|
||||||
|
pkg-config \
|
||||||
|
libleveldb-dev \
|
||||||
|
python3.7 \
|
||||||
|
python3-dev \
|
||||||
|
python3-pip \
|
||||||
|
python3-wheel \
|
||||||
|
python3-setuptools && \
|
||||||
|
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
||||||
|
|
||||||
|
COPY . $projects_dir
|
||||||
|
RUN chown -R $user:$user $projects_dir
|
||||||
|
RUN mkdir -p $db_dir
|
||||||
|
RUN chown -R $user:$user $db_dir
|
||||||
|
|
||||||
|
USER $user
|
||||||
|
WORKDIR $projects_dir
|
||||||
|
|
||||||
|
RUN python3 -m pip install -U setuptools pip
|
||||||
|
RUN make install
|
||||||
|
RUN python3 docker/set_build.py
|
||||||
|
RUN rm ~/.cache -rf
|
||||||
|
VOLUME $db_dir
|
||||||
|
ENTRYPOINT ["python3", "scripts/dht_node.py"]
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM ubuntu:19.10
|
FROM debian:10-slim
|
||||||
|
|
||||||
ARG user=lbry
|
ARG user=lbry
|
||||||
ARG db_dir=/database
|
ARG db_dir=/database
|
||||||
|
@ -13,10 +13,14 @@ RUN apt-get update && \
|
||||||
wget \
|
wget \
|
||||||
tar unzip \
|
tar unzip \
|
||||||
build-essential \
|
build-essential \
|
||||||
python3 \
|
automake libtool \
|
||||||
|
pkg-config \
|
||||||
|
libleveldb-dev \
|
||||||
|
python3.7 \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3-wheel \
|
python3-wheel \
|
||||||
|
python3-cffi \
|
||||||
python3-setuptools && \
|
python3-setuptools && \
|
||||||
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
45
docker/Dockerfile.web
Normal file
45
docker/Dockerfile.web
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
FROM debian:10-slim
|
||||||
|
|
||||||
|
ARG user=lbry
|
||||||
|
ARG downloads_dir=/database
|
||||||
|
ARG projects_dir=/home/$user
|
||||||
|
|
||||||
|
ARG DOCKER_TAG
|
||||||
|
ARG DOCKER_COMMIT=docker
|
||||||
|
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get -y --no-install-recommends install \
|
||||||
|
wget \
|
||||||
|
automake libtool \
|
||||||
|
tar unzip \
|
||||||
|
build-essential \
|
||||||
|
pkg-config \
|
||||||
|
libleveldb-dev \
|
||||||
|
python3.7 \
|
||||||
|
python3-dev \
|
||||||
|
python3-pip \
|
||||||
|
python3-wheel \
|
||||||
|
python3-setuptools && \
|
||||||
|
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
||||||
|
RUN mkdir -p $downloads_dir
|
||||||
|
RUN chown -R $user:$user $downloads_dir
|
||||||
|
|
||||||
|
COPY . $projects_dir
|
||||||
|
RUN chown -R $user:$user $projects_dir
|
||||||
|
|
||||||
|
USER $user
|
||||||
|
WORKDIR $projects_dir
|
||||||
|
|
||||||
|
RUN pip install uvloop
|
||||||
|
RUN make install
|
||||||
|
RUN python3 docker/set_build.py
|
||||||
|
RUN rm ~/.cache -rf
|
||||||
|
|
||||||
|
# entry point
|
||||||
|
VOLUME $downloads_dir
|
||||||
|
COPY ./docker/webconf.yaml /webconf.yaml
|
||||||
|
ENTRYPOINT ["/home/lbry/.local/bin/lbrynet", "start", "--config=/webconf.yaml"]
|
9
docker/README.md
Normal file
9
docker/README.md
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
### How to run with docker-compose
|
||||||
|
1. Edit config file and after that fix permissions with
|
||||||
|
```
|
||||||
|
sudo chown -R 999:999 webconf.yaml
|
||||||
|
```
|
||||||
|
2. Start SDK with
|
||||||
|
```
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
|
@ -1,34 +1,49 @@
|
||||||
version: "3"
|
version: "3"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
lbrycrd:
|
|
||||||
wallet_server:
|
wallet_server:
|
||||||
|
es01:
|
||||||
|
|
||||||
services:
|
services:
|
||||||
lbrycrd:
|
|
||||||
image: lbry/lbrycrd:${LBRYCRD_TAG:-latest-release}
|
|
||||||
restart: always
|
|
||||||
ports: # accessible from host
|
|
||||||
- "9246:9246" # rpc port
|
|
||||||
expose: # internal to docker network. also this doesn't do anything. its for documentation only.
|
|
||||||
- "9245" # node-to-node comms port
|
|
||||||
volumes:
|
|
||||||
- "lbrycrd:/data/.lbrycrd"
|
|
||||||
environment:
|
|
||||||
- RUN_MODE=default
|
|
||||||
- SNAPSHOT_URL=${LBRYCRD_SNAPSHOT_URL-https://lbry.com/snapshot/blockchain}
|
|
||||||
- RPC_ALLOW_IP=0.0.0.0/0
|
|
||||||
wallet_server:
|
wallet_server:
|
||||||
|
depends_on:
|
||||||
|
- es01
|
||||||
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
|
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
|
||||||
depends_on:
|
|
||||||
- lbrycrd
|
|
||||||
restart: always
|
restart: always
|
||||||
|
network_mode: host
|
||||||
ports:
|
ports:
|
||||||
- "50001:50001" # rpc port
|
- "50001:50001" # rpc port
|
||||||
- "50005:50005" # websocket port
|
- "2112:2112" # uncomment to enable prometheus
|
||||||
#- "2112:2112" # uncomment to enable prometheus
|
|
||||||
volumes:
|
volumes:
|
||||||
- "wallet_server:/database"
|
- "wallet_server:/database"
|
||||||
environment:
|
environment:
|
||||||
- SNAPSHOT_URL=${WALLET_SERVER_SNAPSHOT_URL-https://lbry.com/snapshot/wallet}
|
- DAEMON_URL=http://lbry:lbry@127.0.0.1:9245
|
||||||
- DAEMON_URL=http://lbry:lbry@lbrycrd:9245
|
- MAX_QUERY_WORKERS=4
|
||||||
|
- CACHE_MB=1024
|
||||||
|
- CACHE_ALL_TX_HASHES=
|
||||||
|
- CACHE_ALL_CLAIM_TXOS=
|
||||||
|
- MAX_SEND=1000000000000000000
|
||||||
|
- MAX_RECEIVE=1000000000000000000
|
||||||
|
- MAX_SESSIONS=100000
|
||||||
|
- HOST=0.0.0.0
|
||||||
|
- TCP_PORT=50001
|
||||||
|
- PROMETHEUS_PORT=2112
|
||||||
|
- FILTERING_CHANNEL_IDS=770bd7ecba84fd2f7607fb15aedd2b172c2e153f 95e5db68a3101df19763f3a5182e4b12ba393ee8
|
||||||
|
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6
|
||||||
|
es01:
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
|
||||||
|
container_name: es01
|
||||||
|
environment:
|
||||||
|
- node.name=es01
|
||||||
|
- discovery.type=single-node
|
||||||
|
- indices.query.bool.max_clause_count=8192
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- "ES_JAVA_OPTS=-Xms4g -Xmx4g" # no more than 32, remember to disable swap
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
volumes:
|
||||||
|
- es01:/usr/share/elasticsearch/data
|
||||||
|
ports:
|
||||||
|
- 127.0.0.1:9200:9200
|
||||||
|
|
9
docker/docker-compose.yml
Normal file
9
docker/docker-compose.yml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
websdk:
|
||||||
|
image: vshyba/websdk
|
||||||
|
ports:
|
||||||
|
- '5279:5279'
|
||||||
|
- '5280:5280'
|
||||||
|
volumes:
|
||||||
|
- ./webconf.yaml:/webconf.yaml
|
|
@ -20,7 +20,7 @@ def _check_and_set(d: dict, key: str, value: str):
|
||||||
def main():
|
def main():
|
||||||
build_info = {item: build_info_mod.__dict__[item] for item in dir(build_info_mod) if not item.startswith("__")}
|
build_info = {item: build_info_mod.__dict__[item] for item in dir(build_info_mod) if not item.startswith("__")}
|
||||||
|
|
||||||
commit_hash = os.getenv('DOCKER_COMMIT', os.getenv('CI_COMMIT_SHA', os.getenv('TRAVIS_COMMIT')))
|
commit_hash = os.getenv('DOCKER_COMMIT', os.getenv('GITHUB_SHA'))
|
||||||
if commit_hash is None:
|
if commit_hash is None:
|
||||||
raise ValueError("Commit hash not found in env vars")
|
raise ValueError("Commit hash not found in env vars")
|
||||||
_check_and_set(build_info, "COMMIT_HASH", commit_hash[:6])
|
_check_and_set(build_info, "COMMIT_HASH", commit_hash[:6])
|
||||||
|
@ -30,8 +30,10 @@ def main():
|
||||||
_check_and_set(build_info, "DOCKER_TAG", docker_tag)
|
_check_and_set(build_info, "DOCKER_TAG", docker_tag)
|
||||||
_check_and_set(build_info, "BUILD", "docker")
|
_check_and_set(build_info, "BUILD", "docker")
|
||||||
else:
|
else:
|
||||||
ci_tag = os.getenv('CI_COMMIT_TAG', os.getenv('TRAVIS_TAG'))
|
if re.match(r'refs/tags/v\d+\.\d+\.\d+$', str(os.getenv('GITHUB_REF'))):
|
||||||
_check_and_set(build_info, "BUILD", "release" if re.match(r'v\d+\.\d+\.\d+$', str(ci_tag)) else "qa")
|
_check_and_set(build_info, "BUILD", "release")
|
||||||
|
else:
|
||||||
|
_check_and_set(build_info, "BUILD", "qa")
|
||||||
|
|
||||||
log.debug("build info: %s", ", ".join([f"{k}={v}" for k, v in build_info.items()]))
|
log.debug("build info: %s", ", ".join([f"{k}={v}" for k, v in build_info.items()]))
|
||||||
with open(build_info_mod.__file__, 'w') as f:
|
with open(build_info_mod.__file__, 'w') as f:
|
||||||
|
|
|
@ -6,7 +6,7 @@ set -euo pipefail
|
||||||
|
|
||||||
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
|
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
|
||||||
|
|
||||||
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/claims.db ]]; then
|
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
|
||||||
files="$(ls)"
|
files="$(ls)"
|
||||||
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
|
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
|
||||||
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
|
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
|
||||||
|
@ -20,4 +20,6 @@ if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/claims.db ]]; then
|
||||||
rm "$filename"
|
rm "$filename"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
/home/lbry/.local/bin/torba-server "$@"
|
/home/lbry/.local/bin/lbry-hub-elastic-sync
|
||||||
|
echo 'starting server'
|
||||||
|
/home/lbry/.local/bin/lbry-hub "$@"
|
||||||
|
|
9
docker/webconf.yaml
Normal file
9
docker/webconf.yaml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
allowed_origin: "*"
|
||||||
|
max_key_fee: "0.0 USD"
|
||||||
|
save_files: false
|
||||||
|
save_blobs: false
|
||||||
|
streaming_server: "0.0.0.0:5280"
|
||||||
|
api: "0.0.0.0:5279"
|
||||||
|
data_dir: /tmp
|
||||||
|
download_dir: /tmp
|
||||||
|
wallet_dir: /tmp
|
1028
docs/api.json
1028
docs/api.json
File diff suppressed because one or more lines are too long
|
@ -1,2 +1,2 @@
|
||||||
__version__ = "0.79.0"
|
__version__ = "0.113.0"
|
||||||
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
|
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import time
|
||||||
import asyncio
|
import asyncio
|
||||||
import binascii
|
import binascii
|
||||||
import logging
|
import logging
|
||||||
|
@ -70,21 +71,27 @@ class AbstractBlob:
|
||||||
'writers',
|
'writers',
|
||||||
'verified',
|
'verified',
|
||||||
'writing',
|
'writing',
|
||||||
'readers'
|
'readers',
|
||||||
|
'added_on',
|
||||||
|
'is_mine',
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
def __init__(
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||||
blob_directory: typing.Optional[str] = None):
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||||
|
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False,
|
||||||
|
):
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.blob_hash = blob_hash
|
self.blob_hash = blob_hash
|
||||||
self.length = length
|
self.length = length
|
||||||
self.blob_completed_callback = blob_completed_callback
|
self.blob_completed_callback = blob_completed_callback
|
||||||
self.blob_directory = blob_directory
|
self.blob_directory = blob_directory
|
||||||
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
|
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
|
||||||
self.verified: asyncio.Event = asyncio.Event(loop=self.loop)
|
self.verified: asyncio.Event = asyncio.Event()
|
||||||
self.writing: asyncio.Event = asyncio.Event(loop=self.loop)
|
self.writing: asyncio.Event = asyncio.Event()
|
||||||
self.readers: typing.List[typing.BinaryIO] = []
|
self.readers: typing.List[typing.BinaryIO] = []
|
||||||
|
self.added_on = added_on or time.time()
|
||||||
|
self.is_mine = is_mine
|
||||||
|
|
||||||
if not is_valid_blobhash(blob_hash):
|
if not is_valid_blobhash(blob_hash):
|
||||||
raise InvalidBlobHashError(blob_hash)
|
raise InvalidBlobHashError(blob_hash)
|
||||||
|
@ -180,20 +187,21 @@ class AbstractBlob:
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_from_unencrypted(
|
async def create_from_unencrypted(
|
||||||
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
||||||
unencrypted: bytes, blob_num: int,
|
unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool,
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None) -> BlobInfo:
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
|
||||||
|
) -> BlobInfo:
|
||||||
"""
|
"""
|
||||||
Create an encrypted BlobFile from plaintext bytes
|
Create an encrypted BlobFile from plaintext bytes
|
||||||
"""
|
"""
|
||||||
|
|
||||||
blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted)
|
blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted)
|
||||||
length = len(blob_bytes)
|
length = len(blob_bytes)
|
||||||
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir)
|
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir, added_on, is_mine)
|
||||||
writer = blob.get_blob_writer()
|
writer = blob.get_blob_writer()
|
||||||
writer.write(blob_bytes)
|
writer.write(blob_bytes)
|
||||||
await blob.verified.wait()
|
await blob.verified.wait()
|
||||||
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash)
|
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), added_on, blob_hash, is_mine)
|
||||||
|
|
||||||
def save_verified_blob(self, verified_bytes: bytes):
|
def save_verified_blob(self, verified_bytes: bytes):
|
||||||
if self.verified.is_set():
|
if self.verified.is_set():
|
||||||
|
@ -214,7 +222,7 @@ class AbstractBlob:
|
||||||
peer_port: typing.Optional[int] = None) -> HashBlobWriter:
|
peer_port: typing.Optional[int] = None) -> HashBlobWriter:
|
||||||
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
|
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
|
||||||
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
|
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
|
||||||
fut = asyncio.Future(loop=self.loop)
|
fut = asyncio.Future()
|
||||||
writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
|
writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
|
||||||
self.writers[(peer_address, peer_port)] = writer
|
self.writers[(peer_address, peer_port)] = writer
|
||||||
|
|
||||||
|
@ -248,11 +256,13 @@ class BlobBuffer(AbstractBlob):
|
||||||
"""
|
"""
|
||||||
An in-memory only blob
|
An in-memory only blob
|
||||||
"""
|
"""
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
def __init__(
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||||
blob_directory: typing.Optional[str] = None):
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||||
|
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
|
||||||
|
):
|
||||||
self._verified_bytes: typing.Optional[BytesIO] = None
|
self._verified_bytes: typing.Optional[BytesIO] = None
|
||||||
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
|
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
|
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
|
||||||
|
@ -289,10 +299,12 @@ class BlobFile(AbstractBlob):
|
||||||
"""
|
"""
|
||||||
A blob existing on the local file system
|
A blob existing on the local file system
|
||||||
"""
|
"""
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
def __init__(
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||||
blob_directory: typing.Optional[str] = None):
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||||
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
|
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
|
||||||
|
):
|
||||||
|
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
|
||||||
if not blob_directory or not os.path.isdir(blob_directory):
|
if not blob_directory or not os.path.isdir(blob_directory):
|
||||||
raise OSError(f"invalid blob directory '{blob_directory}'")
|
raise OSError(f"invalid blob directory '{blob_directory}'")
|
||||||
self.file_path = os.path.join(self.blob_directory, self.blob_hash)
|
self.file_path = os.path.join(self.blob_directory, self.blob_hash)
|
||||||
|
@ -337,18 +349,18 @@ class BlobFile(AbstractBlob):
|
||||||
return self.loop.create_task(write_blob())
|
return self.loop.create_task(write_blob())
|
||||||
|
|
||||||
def delete(self):
|
def delete(self):
|
||||||
|
super().delete()
|
||||||
if os.path.isfile(self.file_path):
|
if os.path.isfile(self.file_path):
|
||||||
os.remove(self.file_path)
|
os.remove(self.file_path)
|
||||||
return super().delete()
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_from_unencrypted(
|
async def create_from_unencrypted(
|
||||||
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
||||||
unencrypted: bytes, blob_num: int,
|
unencrypted: bytes, blob_num: int, added_on: float, is_mine: bool,
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'],
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None
|
||||||
asyncio.Task]] = None) -> BlobInfo:
|
) -> BlobInfo:
|
||||||
if not blob_dir or not os.path.isdir(blob_dir):
|
if not blob_dir or not os.path.isdir(blob_dir):
|
||||||
raise OSError(f"cannot create blob in directory: '{blob_dir}'")
|
raise OSError(f"cannot create blob in directory: '{blob_dir}'")
|
||||||
return await super().create_from_unencrypted(
|
return await super().create_from_unencrypted(
|
||||||
loop, blob_dir, key, iv, unencrypted, blob_num, blob_completed_callback
|
loop, blob_dir, key, iv, unencrypted, blob_num, added_on, is_mine, blob_completed_callback
|
||||||
)
|
)
|
||||||
|
|
|
@ -7,13 +7,19 @@ class BlobInfo:
|
||||||
'blob_num',
|
'blob_num',
|
||||||
'length',
|
'length',
|
||||||
'iv',
|
'iv',
|
||||||
|
'added_on',
|
||||||
|
'is_mine'
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, blob_num: int, length: int, iv: str, blob_hash: typing.Optional[str] = None):
|
def __init__(
|
||||||
|
self, blob_num: int, length: int, iv: str, added_on,
|
||||||
|
blob_hash: typing.Optional[str] = None, is_mine=False):
|
||||||
self.blob_hash = blob_hash
|
self.blob_hash = blob_hash
|
||||||
self.blob_num = blob_num
|
self.blob_num = blob_num
|
||||||
self.length = length
|
self.length = length
|
||||||
self.iv = iv
|
self.iv = iv
|
||||||
|
self.added_on = added_on
|
||||||
|
self.is_mine = is_mine
|
||||||
|
|
||||||
def as_dict(self) -> typing.Dict:
|
def as_dict(self) -> typing.Dict:
|
||||||
d = {
|
d = {
|
||||||
|
|
|
@ -2,7 +2,7 @@ import os
|
||||||
import typing
|
import typing
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from lbry.utils import LRUCache
|
from lbry.utils import LRUCacheWithMetrics
|
||||||
from lbry.blob.blob_file import is_valid_blobhash, BlobFile, BlobBuffer, AbstractBlob
|
from lbry.blob.blob_file import is_valid_blobhash, BlobFile, BlobBuffer, AbstractBlob
|
||||||
from lbry.stream.descriptor import StreamDescriptor
|
from lbry.stream.descriptor import StreamDescriptor
|
||||||
from lbry.connection_manager import ConnectionManager
|
from lbry.connection_manager import ConnectionManager
|
||||||
|
@ -32,34 +32,34 @@ class BlobManager:
|
||||||
else self._node_data_store.completed_blobs
|
else self._node_data_store.completed_blobs
|
||||||
self.blobs: typing.Dict[str, AbstractBlob] = {}
|
self.blobs: typing.Dict[str, AbstractBlob] = {}
|
||||||
self.config = config
|
self.config = config
|
||||||
self.decrypted_blob_lru_cache = None if not self.config.blob_lru_cache_size else LRUCache(
|
self.decrypted_blob_lru_cache = None if not self.config.blob_lru_cache_size else LRUCacheWithMetrics(
|
||||||
self.config.blob_lru_cache_size)
|
self.config.blob_lru_cache_size)
|
||||||
self.connection_manager = ConnectionManager(loop)
|
self.connection_manager = ConnectionManager(loop)
|
||||||
|
|
||||||
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None):
|
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None, is_mine: bool = False):
|
||||||
if self.config.save_blobs or (
|
if self.config.save_blobs or (
|
||||||
is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))):
|
is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))):
|
||||||
return BlobFile(
|
return BlobFile(
|
||||||
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
|
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
|
||||||
)
|
)
|
||||||
return BlobBuffer(
|
return BlobBuffer(
|
||||||
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
|
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_blob(self, blob_hash, length: typing.Optional[int] = None):
|
def get_blob(self, blob_hash, length: typing.Optional[int] = None, is_mine: bool = False):
|
||||||
if blob_hash in self.blobs:
|
if blob_hash in self.blobs:
|
||||||
if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer):
|
if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer):
|
||||||
buffer = self.blobs.pop(blob_hash)
|
buffer = self.blobs.pop(blob_hash)
|
||||||
if blob_hash in self.completed_blob_hashes:
|
if blob_hash in self.completed_blob_hashes:
|
||||||
self.completed_blob_hashes.remove(blob_hash)
|
self.completed_blob_hashes.remove(blob_hash)
|
||||||
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
|
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
|
||||||
if buffer.is_readable():
|
if buffer.is_readable():
|
||||||
with buffer.reader_context() as reader:
|
with buffer.reader_context() as reader:
|
||||||
self.blobs[blob_hash].write_blob(reader.read())
|
self.blobs[blob_hash].write_blob(reader.read())
|
||||||
if length and self.blobs[blob_hash].length is None:
|
if length and self.blobs[blob_hash].length is None:
|
||||||
self.blobs[blob_hash].set_length(length)
|
self.blobs[blob_hash].set_length(length)
|
||||||
else:
|
else:
|
||||||
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
|
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
|
||||||
return self.blobs[blob_hash]
|
return self.blobs[blob_hash]
|
||||||
|
|
||||||
def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool:
|
def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool:
|
||||||
|
@ -83,6 +83,8 @@ class BlobManager:
|
||||||
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
|
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
|
||||||
if to_add:
|
if to_add:
|
||||||
self.completed_blob_hashes.update(to_add)
|
self.completed_blob_hashes.update(to_add)
|
||||||
|
# check blobs that aren't set as finished but were seen on disk
|
||||||
|
await self.ensure_completed_blobs_status(in_blobfiles_dir - to_add)
|
||||||
if self.config.track_bandwidth:
|
if self.config.track_bandwidth:
|
||||||
self.connection_manager.start()
|
self.connection_manager.start()
|
||||||
return True
|
return True
|
||||||
|
@ -105,13 +107,26 @@ class BlobManager:
|
||||||
if isinstance(blob, BlobFile):
|
if isinstance(blob, BlobFile):
|
||||||
if blob.blob_hash not in self.completed_blob_hashes:
|
if blob.blob_hash not in self.completed_blob_hashes:
|
||||||
self.completed_blob_hashes.add(blob.blob_hash)
|
self.completed_blob_hashes.add(blob.blob_hash)
|
||||||
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=True))
|
return self.loop.create_task(self.storage.add_blobs(
|
||||||
|
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=True)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=False))
|
return self.loop.create_task(self.storage.add_blobs(
|
||||||
|
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False)
|
||||||
|
)
|
||||||
|
|
||||||
def check_completed_blobs(self, blob_hashes: typing.List[str]) -> typing.List[str]:
|
async def ensure_completed_blobs_status(self, blob_hashes: typing.Iterable[str]):
|
||||||
"""Returns of the blobhashes_to_check, which are valid"""
|
"""Ensures that completed blobs from a given list of blob hashes are set as 'finished' in the database."""
|
||||||
return [blob_hash for blob_hash in blob_hashes if self.is_blob_verified(blob_hash)]
|
to_add = []
|
||||||
|
for blob_hash in blob_hashes:
|
||||||
|
if not self.is_blob_verified(blob_hash):
|
||||||
|
continue
|
||||||
|
blob = self.get_blob(blob_hash)
|
||||||
|
to_add.append((blob.blob_hash, blob.length, blob.added_on, blob.is_mine))
|
||||||
|
if len(to_add) > 500:
|
||||||
|
await self.storage.add_blobs(*to_add, finished=True)
|
||||||
|
to_add.clear()
|
||||||
|
return await self.storage.add_blobs(*to_add, finished=True)
|
||||||
|
|
||||||
def delete_blob(self, blob_hash: str):
|
def delete_blob(self, blob_hash: str):
|
||||||
if not is_valid_blobhash(blob_hash):
|
if not is_valid_blobhash(blob_hash):
|
||||||
|
|
77
lbry/blob/disk_space_manager.py
Normal file
77
lbry/blob/disk_space_manager.py
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DiskSpaceManager:
|
||||||
|
|
||||||
|
def __init__(self, config, db, blob_manager, cleaning_interval=30 * 60, analytics=None):
|
||||||
|
self.config = config
|
||||||
|
self.db = db
|
||||||
|
self.blob_manager = blob_manager
|
||||||
|
self.cleaning_interval = cleaning_interval
|
||||||
|
self.running = False
|
||||||
|
self.task = None
|
||||||
|
self.analytics = analytics
|
||||||
|
self._used_space_bytes = None
|
||||||
|
|
||||||
|
async def get_free_space_mb(self, is_network_blob=False):
|
||||||
|
limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
|
||||||
|
space_used_mb = await self.get_space_used_mb()
|
||||||
|
space_used_mb = space_used_mb['network_storage'] if is_network_blob else space_used_mb['content_storage']
|
||||||
|
return max(0, limit_mb - space_used_mb)
|
||||||
|
|
||||||
|
async def get_space_used_bytes(self):
|
||||||
|
self._used_space_bytes = await self.db.get_stored_blob_disk_usage()
|
||||||
|
return self._used_space_bytes
|
||||||
|
|
||||||
|
async def get_space_used_mb(self, cached=True):
|
||||||
|
cached = cached and self._used_space_bytes is not None
|
||||||
|
space_used_bytes = self._used_space_bytes if cached else await self.get_space_used_bytes()
|
||||||
|
return {key: int(value/1024.0/1024.0) for key, value in space_used_bytes.items()}
|
||||||
|
|
||||||
|
async def clean(self):
|
||||||
|
await self._clean(False)
|
||||||
|
await self._clean(True)
|
||||||
|
|
||||||
|
async def _clean(self, is_network_blob=False):
|
||||||
|
space_used_mb = await self.get_space_used_mb(cached=False)
|
||||||
|
if is_network_blob:
|
||||||
|
space_used_mb = space_used_mb['network_storage']
|
||||||
|
else:
|
||||||
|
space_used_mb = space_used_mb['content_storage'] + space_used_mb['private_storage']
|
||||||
|
storage_limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
|
||||||
|
if self.analytics:
|
||||||
|
asyncio.create_task(
|
||||||
|
self.analytics.send_disk_space_used(space_used_mb, storage_limit_mb, is_network_blob)
|
||||||
|
)
|
||||||
|
delete = []
|
||||||
|
available = storage_limit_mb - space_used_mb
|
||||||
|
if storage_limit_mb == 0 if not is_network_blob else available >= 0:
|
||||||
|
return 0
|
||||||
|
for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False, is_network_blob=is_network_blob):
|
||||||
|
delete.append(blob_hash)
|
||||||
|
available += int(file_size/1024.0/1024.0)
|
||||||
|
if available >= 0:
|
||||||
|
break
|
||||||
|
if delete:
|
||||||
|
await self.db.stop_all_files()
|
||||||
|
await self.blob_manager.delete_blobs(delete, delete_from_db=True)
|
||||||
|
self._used_space_bytes = None
|
||||||
|
return len(delete)
|
||||||
|
|
||||||
|
async def cleaning_loop(self):
|
||||||
|
while self.running:
|
||||||
|
await asyncio.sleep(self.cleaning_interval)
|
||||||
|
await self.clean()
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
self.running = True
|
||||||
|
self.task = asyncio.create_task(self.cleaning_loop())
|
||||||
|
self.task.add_done_callback(lambda _: log.info("Stopping blob cleanup service."))
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
if self.running:
|
||||||
|
self.running = False
|
||||||
|
self.task.cancel()
|
|
@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
||||||
self.buf = b''
|
self.buf = b''
|
||||||
|
|
||||||
# this is here to handle the race when the downloader is closed right as response_fut gets a result
|
# this is here to handle the race when the downloader is closed right as response_fut gets a result
|
||||||
self.closed = asyncio.Event(loop=self.loop)
|
self.closed = asyncio.Event()
|
||||||
|
|
||||||
def data_received(self, data: bytes):
|
def data_received(self, data: bytes):
|
||||||
if self.connection_manager:
|
if self.connection_manager:
|
||||||
|
@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
||||||
self.transport.write(msg)
|
self.transport.write(msg)
|
||||||
if self.connection_manager:
|
if self.connection_manager:
|
||||||
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
|
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
|
||||||
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop)
|
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout)
|
||||||
availability_response = response.get_availability_response()
|
availability_response = response.get_availability_response()
|
||||||
price_response = response.get_price_response()
|
price_response = response.get_price_response()
|
||||||
blob_response = response.get_blob_response()
|
blob_response = response.get_blob_response()
|
||||||
|
@ -151,7 +151,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
||||||
f" timeout in {self.peer_timeout}"
|
f" timeout in {self.peer_timeout}"
|
||||||
log.debug(msg)
|
log.debug(msg)
|
||||||
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
|
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
|
||||||
await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop)
|
await asyncio.wait_for(self.writer.finished, self.peer_timeout)
|
||||||
# wait for the io to finish
|
# wait for the io to finish
|
||||||
await self.blob.verified.wait()
|
await self.blob.verified.wait()
|
||||||
log.info("%s at %fMB/s", msg,
|
log.info("%s at %fMB/s", msg,
|
||||||
|
@ -187,7 +187,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
||||||
try:
|
try:
|
||||||
self._blob_bytes_received = 0
|
self._blob_bytes_received = 0
|
||||||
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
|
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
|
||||||
self._response_fut = asyncio.Future(loop=self.loop)
|
self._response_fut = asyncio.Future()
|
||||||
return await self._download_blob()
|
return await self._download_blob()
|
||||||
except OSError:
|
except OSError:
|
||||||
# i'm not sure how to fix this race condition - jack
|
# i'm not sure how to fix this race condition - jack
|
||||||
|
@ -244,7 +244,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract
|
||||||
try:
|
try:
|
||||||
if not connected_protocol:
|
if not connected_protocol:
|
||||||
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
|
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
|
||||||
peer_connect_timeout, loop=loop)
|
peer_connect_timeout)
|
||||||
connected_protocol = protocol
|
connected_protocol = protocol
|
||||||
if blob is None or blob.get_is_verified() or not blob.is_writeable():
|
if blob is None or blob.get_is_verified() or not blob.is_writeable():
|
||||||
# blob is None happens when we are just opening a connection
|
# blob is None happens when we are just opening a connection
|
||||||
|
|
|
@ -3,6 +3,7 @@ import typing
|
||||||
import logging
|
import logging
|
||||||
from lbry.utils import cache_concurrent
|
from lbry.utils import cache_concurrent
|
||||||
from lbry.blob_exchange.client import request_blob
|
from lbry.blob_exchange.client import request_blob
|
||||||
|
from lbry.dht.node import get_kademlia_peers_from_hosts
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.conf import Config
|
from lbry.conf import Config
|
||||||
from lbry.dht.node import Node
|
from lbry.dht.node import Node
|
||||||
|
@ -29,7 +30,7 @@ class BlobDownloader:
|
||||||
self.failures: typing.Dict['KademliaPeer', int] = {}
|
self.failures: typing.Dict['KademliaPeer', int] = {}
|
||||||
self.connection_failures: typing.Set['KademliaPeer'] = set()
|
self.connection_failures: typing.Set['KademliaPeer'] = set()
|
||||||
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
|
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
|
||||||
self.is_running = asyncio.Event(loop=self.loop)
|
self.is_running = asyncio.Event()
|
||||||
|
|
||||||
def should_race_continue(self, blob: 'AbstractBlob'):
|
def should_race_continue(self, blob: 'AbstractBlob'):
|
||||||
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
|
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
|
||||||
|
@ -63,8 +64,8 @@ class BlobDownloader:
|
||||||
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
|
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
|
||||||
|
|
||||||
async def new_peer_or_finished(self):
|
async def new_peer_or_finished(self):
|
||||||
active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)]
|
active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
|
||||||
await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED')
|
await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
|
||||||
|
|
||||||
def cleanup_active(self):
|
def cleanup_active(self):
|
||||||
if not self.active_connections and not self.connections:
|
if not self.active_connections and not self.connections:
|
||||||
|
@ -87,7 +88,6 @@ class BlobDownloader:
|
||||||
if blob.get_is_verified():
|
if blob.get_is_verified():
|
||||||
return blob
|
return blob
|
||||||
self.is_running.set()
|
self.is_running.set()
|
||||||
tried_for_this_blob: typing.Set['KademliaPeer'] = set()
|
|
||||||
try:
|
try:
|
||||||
while not blob.get_is_verified() and self.is_running.is_set():
|
while not blob.get_is_verified() and self.is_running.is_set():
|
||||||
batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
|
batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
|
||||||
|
@ -97,24 +97,15 @@ class BlobDownloader:
|
||||||
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
|
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
|
||||||
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
|
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
|
||||||
)
|
)
|
||||||
re_add: typing.Set['KademliaPeer'] = set()
|
|
||||||
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
|
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
|
||||||
if peer in self.ignored:
|
if peer in self.ignored:
|
||||||
continue
|
continue
|
||||||
if peer in tried_for_this_blob:
|
if peer in self.active_connections or not self.should_race_continue(blob):
|
||||||
continue
|
continue
|
||||||
if peer in self.active_connections:
|
|
||||||
if peer not in re_add:
|
|
||||||
re_add.add(peer)
|
|
||||||
continue
|
|
||||||
if not self.should_race_continue(blob):
|
|
||||||
break
|
|
||||||
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
|
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
|
||||||
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
|
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
|
||||||
self.active_connections[peer] = t
|
self.active_connections[peer] = t
|
||||||
tried_for_this_blob.add(peer)
|
self.peer_queue.put_nowait(list(batch))
|
||||||
if not re_add:
|
|
||||||
self.peer_queue.put_nowait(list(batch))
|
|
||||||
await self.new_peer_or_finished()
|
await self.new_peer_or_finished()
|
||||||
self.cleanup_active()
|
self.cleanup_active()
|
||||||
log.debug("downloaded %s", blob_hash[:8])
|
log.debug("downloaded %s", blob_hash[:8])
|
||||||
|
@ -133,11 +124,14 @@ class BlobDownloader:
|
||||||
protocol.close()
|
protocol.close()
|
||||||
|
|
||||||
|
|
||||||
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', node: 'Node',
|
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
|
||||||
blob_hash: str) -> 'AbstractBlob':
|
blob_hash: str) -> 'AbstractBlob':
|
||||||
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download)
|
search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
|
||||||
search_queue.put_nowait(blob_hash)
|
search_queue.put_nowait(blob_hash)
|
||||||
peer_queue, accumulate_task = node.accumulate_peers(search_queue)
|
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
|
||||||
|
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)
|
||||||
|
if fixed_peers:
|
||||||
|
loop.call_later(config.fixed_peer_delay, peer_queue.put_nowait, fixed_peers)
|
||||||
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
|
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
|
||||||
try:
|
try:
|
||||||
return await downloader.download_blob(blob_hash)
|
return await downloader.download_blob(blob_hash)
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import asyncio
|
import asyncio
|
||||||
import binascii
|
import binascii
|
||||||
import logging
|
import logging
|
||||||
|
import socket
|
||||||
import typing
|
import typing
|
||||||
from json.decoder import JSONDecodeError
|
from json.decoder import JSONDecodeError
|
||||||
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
|
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
|
||||||
|
@ -24,19 +25,19 @@ class BlobServerProtocol(asyncio.Protocol):
|
||||||
self.idle_timeout = idle_timeout
|
self.idle_timeout = idle_timeout
|
||||||
self.transfer_timeout = transfer_timeout
|
self.transfer_timeout = transfer_timeout
|
||||||
self.server_task: typing.Optional[asyncio.Task] = None
|
self.server_task: typing.Optional[asyncio.Task] = None
|
||||||
self.started_listening = asyncio.Event(loop=self.loop)
|
self.started_listening = asyncio.Event()
|
||||||
self.buf = b''
|
self.buf = b''
|
||||||
self.transport: typing.Optional[asyncio.Transport] = None
|
self.transport: typing.Optional[asyncio.Transport] = None
|
||||||
self.lbrycrd_address = lbrycrd_address
|
self.lbrycrd_address = lbrycrd_address
|
||||||
self.peer_address_and_port: typing.Optional[str] = None
|
self.peer_address_and_port: typing.Optional[str] = None
|
||||||
self.started_transfer = asyncio.Event(loop=self.loop)
|
self.started_transfer = asyncio.Event()
|
||||||
self.transfer_finished = asyncio.Event(loop=self.loop)
|
self.transfer_finished = asyncio.Event()
|
||||||
self.close_on_idle_task: typing.Optional[asyncio.Task] = None
|
self.close_on_idle_task: typing.Optional[asyncio.Task] = None
|
||||||
|
|
||||||
async def close_on_idle(self):
|
async def close_on_idle(self):
|
||||||
while self.transport:
|
while self.transport:
|
||||||
try:
|
try:
|
||||||
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop)
|
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
log.debug("closing idle connection from %s", self.peer_address_and_port)
|
log.debug("closing idle connection from %s", self.peer_address_and_port)
|
||||||
return self.close()
|
return self.close()
|
||||||
|
@ -100,7 +101,7 @@ class BlobServerProtocol(asyncio.Protocol):
|
||||||
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
|
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
|
||||||
self.started_transfer.set()
|
self.started_transfer.set()
|
||||||
try:
|
try:
|
||||||
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop)
|
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
|
||||||
if sent and sent > 0:
|
if sent and sent > 0:
|
||||||
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
|
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
|
||||||
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
|
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
|
||||||
|
@ -137,7 +138,7 @@ class BlobServerProtocol(asyncio.Protocol):
|
||||||
try:
|
try:
|
||||||
request = BlobRequest.deserialize(self.buf + data)
|
request = BlobRequest.deserialize(self.buf + data)
|
||||||
self.buf = remainder
|
self.buf = remainder
|
||||||
except JSONDecodeError:
|
except (UnicodeDecodeError, JSONDecodeError):
|
||||||
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
|
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
|
||||||
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
|
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
|
||||||
self.close()
|
self.close()
|
||||||
|
@ -156,7 +157,7 @@ class BlobServer:
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.blob_manager = blob_manager
|
self.blob_manager = blob_manager
|
||||||
self.server_task: typing.Optional[asyncio.Task] = None
|
self.server_task: typing.Optional[asyncio.Task] = None
|
||||||
self.started_listening = asyncio.Event(loop=self.loop)
|
self.started_listening = asyncio.Event()
|
||||||
self.lbrycrd_address = lbrycrd_address
|
self.lbrycrd_address = lbrycrd_address
|
||||||
self.idle_timeout = idle_timeout
|
self.idle_timeout = idle_timeout
|
||||||
self.transfer_timeout = transfer_timeout
|
self.transfer_timeout = transfer_timeout
|
||||||
|
@ -167,6 +168,13 @@ class BlobServer:
|
||||||
raise Exception("already running")
|
raise Exception("already running")
|
||||||
|
|
||||||
async def _start_server():
|
async def _start_server():
|
||||||
|
# checking if the port is in use
|
||||||
|
# thx https://stackoverflow.com/a/52872579
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
if s.connect_ex(('localhost', port)) == 0:
|
||||||
|
# the port is already in use!
|
||||||
|
log.error("Failed to bind TCP %s:%d", interface, port)
|
||||||
|
|
||||||
server = await self.loop.create_server(
|
server = await self.loop.create_server(
|
||||||
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
|
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
|
||||||
self.idle_timeout, self.transfer_timeout),
|
self.idle_timeout, self.transfer_timeout),
|
||||||
|
|
154
lbry/conf.py
154
lbry/conf.py
|
@ -1,8 +1,8 @@
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import typing
|
|
||||||
import logging
|
import logging
|
||||||
|
from typing import List, Dict, Tuple, Union, TypeVar, Generic, Optional
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from appdirs import user_data_dir, user_config_dir
|
from appdirs import user_data_dir, user_config_dir
|
||||||
|
@ -15,7 +15,7 @@ log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
NOT_SET = type('NOT_SET', (object,), {}) # pylint: disable=invalid-name
|
NOT_SET = type('NOT_SET', (object,), {}) # pylint: disable=invalid-name
|
||||||
T = typing.TypeVar('T')
|
T = TypeVar('T')
|
||||||
|
|
||||||
CURRENCIES = {
|
CURRENCIES = {
|
||||||
'BTC': {'type': 'crypto'},
|
'BTC': {'type': 'crypto'},
|
||||||
|
@ -24,11 +24,11 @@ CURRENCIES = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class Setting(typing.Generic[T]):
|
class Setting(Generic[T]):
|
||||||
|
|
||||||
def __init__(self, doc: str, default: typing.Optional[T] = None,
|
def __init__(self, doc: str, default: Optional[T] = None,
|
||||||
previous_names: typing.Optional[typing.List[str]] = None,
|
previous_names: Optional[List[str]] = None,
|
||||||
metavar: typing.Optional[str] = None):
|
metavar: Optional[str] = None):
|
||||||
self.doc = doc
|
self.doc = doc
|
||||||
self.default = default
|
self.default = default
|
||||||
self.previous_names = previous_names or []
|
self.previous_names = previous_names or []
|
||||||
|
@ -45,7 +45,7 @@ class Setting(typing.Generic[T]):
|
||||||
def no_cli_name(self):
|
def no_cli_name(self):
|
||||||
return f"--no-{self.name.replace('_', '-')}"
|
return f"--no-{self.name.replace('_', '-')}"
|
||||||
|
|
||||||
def __get__(self, obj: typing.Optional['BaseConfig'], owner) -> T:
|
def __get__(self, obj: Optional['BaseConfig'], owner) -> T:
|
||||||
if obj is None:
|
if obj is None:
|
||||||
return self
|
return self
|
||||||
for location in obj.search_order:
|
for location in obj.search_order:
|
||||||
|
@ -53,7 +53,7 @@ class Setting(typing.Generic[T]):
|
||||||
return location[self.name]
|
return location[self.name]
|
||||||
return self.default
|
return self.default
|
||||||
|
|
||||||
def __set__(self, obj: 'BaseConfig', val: typing.Union[T, NOT_SET]):
|
def __set__(self, obj: 'BaseConfig', val: Union[T, NOT_SET]):
|
||||||
if val == NOT_SET:
|
if val == NOT_SET:
|
||||||
for location in obj.modify_order:
|
for location in obj.modify_order:
|
||||||
if self.name in location:
|
if self.name in location:
|
||||||
|
@ -63,6 +63,18 @@ class Setting(typing.Generic[T]):
|
||||||
for location in obj.modify_order:
|
for location in obj.modify_order:
|
||||||
location[self.name] = val
|
location[self.name] = val
|
||||||
|
|
||||||
|
def is_set(self, obj: 'BaseConfig') -> bool:
|
||||||
|
for location in obj.search_order:
|
||||||
|
if self.name in location:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def is_set_to_default(self, obj: 'BaseConfig') -> bool:
|
||||||
|
for location in obj.search_order:
|
||||||
|
if self.name in location:
|
||||||
|
return location[self.name] == self.default
|
||||||
|
return False
|
||||||
|
|
||||||
def validate(self, value):
|
def validate(self, value):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@ -87,7 +99,7 @@ class String(Setting[str]):
|
||||||
f"Setting '{self.name}' must be a string."
|
f"Setting '{self.name}' must be a string."
|
||||||
|
|
||||||
# TODO: removes this after pylint starts to understand generics
|
# TODO: removes this after pylint starts to understand generics
|
||||||
def __get__(self, obj: typing.Optional['BaseConfig'], owner) -> str: # pylint: disable=useless-super-delegation
|
def __get__(self, obj: Optional['BaseConfig'], owner) -> str: # pylint: disable=useless-super-delegation
|
||||||
return super().__get__(obj, owner)
|
return super().__get__(obj, owner)
|
||||||
|
|
||||||
|
|
||||||
|
@ -191,7 +203,7 @@ class MaxKeyFee(Setting[dict]):
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
self.no_cli_name,
|
self.no_cli_name,
|
||||||
help=f"Disable maximum key fee check.",
|
help="Disable maximum key fee check.",
|
||||||
dest=self.name,
|
dest=self.name,
|
||||||
const=None,
|
const=None,
|
||||||
action="store_const",
|
action="store_const",
|
||||||
|
@ -200,7 +212,7 @@ class MaxKeyFee(Setting[dict]):
|
||||||
|
|
||||||
|
|
||||||
class StringChoice(String):
|
class StringChoice(String):
|
||||||
def __init__(self, doc: str, valid_values: typing.List[str], default: str, *args, **kwargs):
|
def __init__(self, doc: str, valid_values: List[str], default: str, *args, **kwargs):
|
||||||
super().__init__(doc, default, *args, **kwargs)
|
super().__init__(doc, default, *args, **kwargs)
|
||||||
if not valid_values:
|
if not valid_values:
|
||||||
raise ValueError("No valid values provided")
|
raise ValueError("No valid values provided")
|
||||||
|
@ -273,6 +285,75 @@ class Strings(ListSetting):
|
||||||
f"'{self.name}' must be a string."
|
f"'{self.name}' must be a string."
|
||||||
|
|
||||||
|
|
||||||
|
class KnownHubsList:
|
||||||
|
|
||||||
|
def __init__(self, config: 'Config' = None, file_name: str = 'known_hubs.yml'):
|
||||||
|
self.file_name = file_name
|
||||||
|
self.path = os.path.join(config.wallet_dir, self.file_name) if config else None
|
||||||
|
self.hubs: Dict[Tuple[str, int], Dict] = {}
|
||||||
|
if self.exists:
|
||||||
|
self.load()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exists(self):
|
||||||
|
return self.path and os.path.exists(self.path)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def serialized(self) -> Dict[str, Dict]:
|
||||||
|
return {f"{host}:{port}": details for (host, port), details in self.hubs.items()}
|
||||||
|
|
||||||
|
def filter(self, match_none=False, **kwargs):
|
||||||
|
if not kwargs:
|
||||||
|
return self.hubs
|
||||||
|
result = {}
|
||||||
|
for hub, details in self.hubs.items():
|
||||||
|
for key, constraint in kwargs.items():
|
||||||
|
value = details.get(key)
|
||||||
|
if value == constraint or (match_none and value is None):
|
||||||
|
result[hub] = details
|
||||||
|
break
|
||||||
|
return result
|
||||||
|
|
||||||
|
def load(self):
|
||||||
|
if self.path:
|
||||||
|
with open(self.path, 'r') as known_hubs_file:
|
||||||
|
raw = known_hubs_file.read()
|
||||||
|
for hub, details in yaml.safe_load(raw).items():
|
||||||
|
self.set(hub, details)
|
||||||
|
|
||||||
|
def save(self):
|
||||||
|
if self.path:
|
||||||
|
with open(self.path, 'w') as known_hubs_file:
|
||||||
|
known_hubs_file.write(yaml.safe_dump(self.serialized, default_flow_style=False))
|
||||||
|
|
||||||
|
def set(self, hub: str, details: Dict):
|
||||||
|
if hub and hub.count(':') == 1:
|
||||||
|
host, port = hub.split(':')
|
||||||
|
hub_parts = (host, int(port))
|
||||||
|
if hub_parts not in self.hubs:
|
||||||
|
self.hubs[hub_parts] = details
|
||||||
|
return hub
|
||||||
|
|
||||||
|
def add_hubs(self, hubs: List[str]):
|
||||||
|
added = False
|
||||||
|
for hub in hubs:
|
||||||
|
if self.set(hub, {}) is not None:
|
||||||
|
added = True
|
||||||
|
return added
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
return self.hubs.items()
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
return len(self) > 0
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return self.hubs.__len__()
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return iter(self.hubs)
|
||||||
|
|
||||||
|
|
||||||
class EnvironmentAccess:
|
class EnvironmentAccess:
|
||||||
PREFIX = 'LBRY_'
|
PREFIX = 'LBRY_'
|
||||||
|
|
||||||
|
@ -377,7 +458,7 @@ class ConfigFileAccess:
|
||||||
del self.data[key]
|
del self.data[key]
|
||||||
|
|
||||||
|
|
||||||
TBC = typing.TypeVar('TBC', bound='BaseConfig')
|
TBC = TypeVar('TBC', bound='BaseConfig')
|
||||||
|
|
||||||
|
|
||||||
class BaseConfig:
|
class BaseConfig:
|
||||||
|
@ -508,6 +589,9 @@ class CLIConfig(TranscodeConfig):
|
||||||
|
|
||||||
|
|
||||||
class Config(CLIConfig):
|
class Config(CLIConfig):
|
||||||
|
|
||||||
|
jurisdiction = String("Limit interactions to wallet server in this jurisdiction.")
|
||||||
|
|
||||||
# directories
|
# directories
|
||||||
data_dir = Path("Directory path to store blobs.", metavar='DIR')
|
data_dir = Path("Directory path to store blobs.", metavar='DIR')
|
||||||
download_dir = Path(
|
download_dir = Path(
|
||||||
|
@ -529,7 +613,7 @@ class Config(CLIConfig):
|
||||||
"ports or have firewall rules you likely want to disable this.", True
|
"ports or have firewall rules you likely want to disable this.", True
|
||||||
)
|
)
|
||||||
udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port'])
|
udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port'])
|
||||||
tcp_port = Integer("TCP port to listen for incoming blob requests", 3333, previous_names=['peer_port'])
|
tcp_port = Integer("TCP port to listen for incoming blob requests", 4444, previous_names=['peer_port'])
|
||||||
prometheus_port = Integer("Port to expose prometheus metrics (off by default)", 0)
|
prometheus_port = Integer("Port to expose prometheus metrics (off by default)", 0)
|
||||||
network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0')
|
network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0')
|
||||||
|
|
||||||
|
@ -538,17 +622,24 @@ class Config(CLIConfig):
|
||||||
"Routing table bucket index below which we always split the bucket if given a new key to add to it and "
|
"Routing table bucket index below which we always split the bucket if given a new key to add to it and "
|
||||||
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
|
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
|
||||||
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
|
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
|
||||||
"use.", 1
|
"use.", 2
|
||||||
|
)
|
||||||
|
is_bootstrap_node = Toggle(
|
||||||
|
"When running as a bootstrap node, disable all logic related to balancing the routing table, so we can "
|
||||||
|
"add as many peers as possible and better help first-runs.", False
|
||||||
)
|
)
|
||||||
|
|
||||||
# protocol timeouts
|
# protocol timeouts
|
||||||
download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0)
|
download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0)
|
||||||
blob_download_timeout = Float("Timeout to download a blob from a peer", 30.0)
|
blob_download_timeout = Float("Timeout to download a blob from a peer", 30.0)
|
||||||
|
hub_timeout = Float("Timeout when making a hub request", 30.0)
|
||||||
peer_connect_timeout = Float("Timeout to establish a TCP connection to a peer", 3.0)
|
peer_connect_timeout = Float("Timeout to establish a TCP connection to a peer", 3.0)
|
||||||
node_rpc_timeout = Float("Timeout when making a DHT request", constants.RPC_TIMEOUT)
|
node_rpc_timeout = Float("Timeout when making a DHT request", constants.RPC_TIMEOUT)
|
||||||
|
|
||||||
# blob announcement and download
|
# blob announcement and download
|
||||||
save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True)
|
save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True)
|
||||||
|
network_storage_limit = Integer("Disk space in MB to be allocated for helping the P2P network. 0 = disable", 0)
|
||||||
|
blob_storage_limit = Integer("Disk space in MB to be allocated for blob storage. 0 = no limit", 0)
|
||||||
blob_lru_cache_size = Integer(
|
blob_lru_cache_size = Integer(
|
||||||
"LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when "
|
"LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when "
|
||||||
"replying to a range request. Set to 0 to disable.", 32
|
"replying to a range request. Set to 0 to disable.", 32
|
||||||
|
@ -565,6 +656,7 @@ class Config(CLIConfig):
|
||||||
"Maximum number of peers to connect to while downloading a blob", 4,
|
"Maximum number of peers to connect to while downloading a blob", 4,
|
||||||
previous_names=['max_connections_per_stream']
|
previous_names=['max_connections_per_stream']
|
||||||
)
|
)
|
||||||
|
concurrent_hub_requests = Integer("Maximum number of concurrent hub requests", 32)
|
||||||
fixed_peer_delay = Float(
|
fixed_peer_delay = Float(
|
||||||
"Amount of seconds before adding the reflector servers as potential peers to download from in case dht"
|
"Amount of seconds before adding the reflector servers as potential peers to download from in case dht"
|
||||||
"peers are not found or are slow", 2.0
|
"peers are not found or are slow", 2.0
|
||||||
|
@ -593,6 +685,14 @@ class Config(CLIConfig):
|
||||||
('cdn.reflector.lbry.com', 5567)
|
('cdn.reflector.lbry.com', 5567)
|
||||||
])
|
])
|
||||||
|
|
||||||
|
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
|
||||||
|
('tracker.lbry.com', 9252),
|
||||||
|
('tracker.lbry.grin.io', 9252),
|
||||||
|
('tracker.lbry.pigg.es', 9252),
|
||||||
|
('tracker.lizard.technology', 9252),
|
||||||
|
('s1.lbry.network', 9252),
|
||||||
|
])
|
||||||
|
|
||||||
lbryum_servers = Servers("SPV wallet servers", [
|
lbryum_servers = Servers("SPV wallet servers", [
|
||||||
('spv11.lbry.com', 50001),
|
('spv11.lbry.com', 50001),
|
||||||
('spv12.lbry.com', 50001),
|
('spv12.lbry.com', 50001),
|
||||||
|
@ -603,21 +703,27 @@ class Config(CLIConfig):
|
||||||
('spv17.lbry.com', 50001),
|
('spv17.lbry.com', 50001),
|
||||||
('spv18.lbry.com', 50001),
|
('spv18.lbry.com', 50001),
|
||||||
('spv19.lbry.com', 50001),
|
('spv19.lbry.com', 50001),
|
||||||
|
('hub.lbry.grin.io', 50001),
|
||||||
|
('hub.lizard.technology', 50001),
|
||||||
|
('s1.lbry.network', 50001),
|
||||||
])
|
])
|
||||||
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
|
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
|
||||||
|
('dht.lbry.grin.io', 4444), # Grin
|
||||||
|
('dht.lbry.madiator.com', 4444), # Madiator
|
||||||
|
('dht.lbry.pigg.es', 4444), # Pigges
|
||||||
('lbrynet1.lbry.com', 4444), # US EAST
|
('lbrynet1.lbry.com', 4444), # US EAST
|
||||||
('lbrynet2.lbry.com', 4444), # US WEST
|
('lbrynet2.lbry.com', 4444), # US WEST
|
||||||
('lbrynet3.lbry.com', 4444), # EU
|
('lbrynet3.lbry.com', 4444), # EU
|
||||||
('lbrynet4.lbry.com', 4444) # ASIA
|
('lbrynet4.lbry.com', 4444), # ASIA
|
||||||
|
('dht.lizard.technology', 4444), # Jack
|
||||||
|
('s2.lbry.network', 4444),
|
||||||
])
|
])
|
||||||
|
|
||||||
comment_server = String("Comment server API URL", "https://comments.lbry.com/api")
|
|
||||||
|
|
||||||
# blockchain
|
# blockchain
|
||||||
blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main')
|
blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main')
|
||||||
|
|
||||||
# daemon
|
# daemon
|
||||||
save_files = Toggle("Save downloaded files when calling `get` by default", True)
|
save_files = Toggle("Save downloaded files when calling `get` by default", False)
|
||||||
components_to_skip = Strings("components which will be skipped during start-up of daemon", [])
|
components_to_skip = Strings("components which will be skipped during start-up of daemon", [])
|
||||||
share_usage_data = Toggle(
|
share_usage_data = Toggle(
|
||||||
"Whether to share usage stats and diagnostic info with LBRY.", False,
|
"Whether to share usage stats and diagnostic info with LBRY.", False,
|
||||||
|
@ -636,9 +742,10 @@ class Config(CLIConfig):
|
||||||
|
|
||||||
coin_selection_strategy = StringChoice(
|
coin_selection_strategy = StringChoice(
|
||||||
"Strategy to use when selecting UTXOs for a transaction",
|
"Strategy to use when selecting UTXOs for a transaction",
|
||||||
STRATEGIES, "standard")
|
STRATEGIES, "prefer_confirmed"
|
||||||
|
)
|
||||||
|
|
||||||
transaction_cache_size = Integer("Transaction cache size", 100_000)
|
transaction_cache_size = Integer("Transaction cache size", 2 ** 17)
|
||||||
save_resolved_claims = Toggle(
|
save_resolved_claims = Toggle(
|
||||||
"Save content claims to the database when they are resolved to keep file_list up to date, "
|
"Save content claims to the database when they are resolved to keep file_list up to date, "
|
||||||
"only disable this if file_x commands are not needed", True
|
"only disable this if file_x commands are not needed", True
|
||||||
|
@ -655,6 +762,7 @@ class Config(CLIConfig):
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.set_default_paths()
|
self.set_default_paths()
|
||||||
|
self.known_hubs = KnownHubsList(self)
|
||||||
|
|
||||||
def set_default_paths(self):
|
def set_default_paths(self):
|
||||||
if 'darwin' in sys.platform.lower():
|
if 'darwin' in sys.platform.lower():
|
||||||
|
@ -676,7 +784,7 @@ class Config(CLIConfig):
|
||||||
return os.path.join(self.data_dir, 'lbrynet.log')
|
return os.path.join(self.data_dir, 'lbrynet.log')
|
||||||
|
|
||||||
|
|
||||||
def get_windows_directories() -> typing.Tuple[str, str, str]:
|
def get_windows_directories() -> Tuple[str, str, str]:
|
||||||
from lbry.winpaths import get_path, FOLDERID, UserHandle, \
|
from lbry.winpaths import get_path, FOLDERID, UserHandle, \
|
||||||
PathNotFoundException # pylint: disable=import-outside-toplevel
|
PathNotFoundException # pylint: disable=import-outside-toplevel
|
||||||
|
|
||||||
|
@ -698,14 +806,14 @@ def get_windows_directories() -> typing.Tuple[str, str, str]:
|
||||||
return data_dir, lbryum_dir, download_dir
|
return data_dir, lbryum_dir, download_dir
|
||||||
|
|
||||||
|
|
||||||
def get_darwin_directories() -> typing.Tuple[str, str, str]:
|
def get_darwin_directories() -> Tuple[str, str, str]:
|
||||||
data_dir = user_data_dir('LBRY')
|
data_dir = user_data_dir('LBRY')
|
||||||
lbryum_dir = os.path.expanduser('~/.lbryum')
|
lbryum_dir = os.path.expanduser('~/.lbryum')
|
||||||
download_dir = os.path.expanduser('~/Downloads')
|
download_dir = os.path.expanduser('~/Downloads')
|
||||||
return data_dir, lbryum_dir, download_dir
|
return data_dir, lbryum_dir, download_dir
|
||||||
|
|
||||||
|
|
||||||
def get_linux_directories() -> typing.Tuple[str, str, str]:
|
def get_linux_directories() -> Tuple[str, str, str]:
|
||||||
try:
|
try:
|
||||||
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
|
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
|
||||||
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read())
|
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read())
|
||||||
|
|
|
@ -67,7 +67,7 @@ class ConnectionManager:
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
last = time.perf_counter()
|
last = time.perf_counter()
|
||||||
await asyncio.sleep(0.1, loop=self.loop)
|
await asyncio.sleep(0.1)
|
||||||
self._status['incoming_bps'].clear()
|
self._status['incoming_bps'].clear()
|
||||||
self._status['outgoing_bps'].clear()
|
self._status['outgoing_bps'].clear()
|
||||||
now = time.perf_counter()
|
now = time.perf_counter()
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
import asyncio
|
import asyncio
|
||||||
import typing
|
import typing
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from prometheus_client import Counter, Gauge
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.dht.node import Node
|
from lbry.dht.node import Node
|
||||||
from lbry.extras.daemon.storage import SQLiteStorage
|
from lbry.extras.daemon.storage import SQLiteStorage
|
||||||
|
@ -9,45 +12,59 @@ log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BlobAnnouncer:
|
class BlobAnnouncer:
|
||||||
|
announcements_sent_metric = Counter(
|
||||||
|
"announcements_sent", "Number of announcements sent and their respective status.", namespace="dht_node",
|
||||||
|
labelnames=("peers", "error"),
|
||||||
|
)
|
||||||
|
announcement_queue_size_metric = Gauge(
|
||||||
|
"announcement_queue_size", "Number of hashes waiting to be announced.", namespace="dht_node",
|
||||||
|
labelnames=("scope",)
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
|
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.node = node
|
self.node = node
|
||||||
self.storage = storage
|
self.storage = storage
|
||||||
self.announce_task: asyncio.Task = None
|
self.announce_task: asyncio.Task = None
|
||||||
self.announce_queue: typing.List[str] = []
|
self.announce_queue: typing.List[str] = []
|
||||||
|
self._done = asyncio.Event()
|
||||||
|
self.announced = set()
|
||||||
|
|
||||||
async def _submit_announcement(self, blob_hash):
|
async def _run_consumer(self):
|
||||||
try:
|
while self.announce_queue:
|
||||||
peers = len(await self.node.announce_blob(blob_hash))
|
try:
|
||||||
if peers > 4:
|
blob_hash = self.announce_queue.pop()
|
||||||
return blob_hash
|
peers = len(await self.node.announce_blob(blob_hash))
|
||||||
else:
|
self.announcements_sent_metric.labels(peers=peers, error=False).inc()
|
||||||
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
|
if peers > 4:
|
||||||
except Exception as err:
|
self.announced.add(blob_hash)
|
||||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
else:
|
||||||
raise err
|
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
|
||||||
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
|
except Exception as err:
|
||||||
|
self.announcements_sent_metric.labels(peers=0, error=True).inc()
|
||||||
|
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
|
||||||
|
|
||||||
async def _announce(self, batch_size: typing.Optional[int] = 10):
|
async def _announce(self, batch_size: typing.Optional[int] = 10):
|
||||||
while batch_size:
|
while batch_size:
|
||||||
if not self.node.joined.is_set():
|
if not self.node.joined.is_set():
|
||||||
await self.node.joined.wait()
|
await self.node.joined.wait()
|
||||||
await asyncio.sleep(60, loop=self.loop)
|
await asyncio.sleep(60)
|
||||||
if not self.node.protocol.routing_table.get_peers():
|
if not self.node.protocol.routing_table.get_peers():
|
||||||
log.warning("No peers in DHT, announce round skipped")
|
log.warning("No peers in DHT, announce round skipped")
|
||||||
continue
|
continue
|
||||||
self.announce_queue.extend(await self.storage.get_blobs_to_announce())
|
self.announce_queue.extend(await self.storage.get_blobs_to_announce())
|
||||||
|
self.announcement_queue_size_metric.labels(scope="global").set(len(self.announce_queue))
|
||||||
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
|
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
|
||||||
while len(self.announce_queue) > 0:
|
while len(self.announce_queue) > 0:
|
||||||
log.info("%i blobs to announce", len(self.announce_queue))
|
log.info("%i blobs to announce", len(self.announce_queue))
|
||||||
announced = await asyncio.gather(*[
|
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)])
|
||||||
self._submit_announcement(
|
announced = list(filter(None, self.announced))
|
||||||
self.announce_queue.pop()) for _ in range(batch_size) if self.announce_queue
|
|
||||||
], loop=self.loop)
|
|
||||||
announced = list(filter(None, announced))
|
|
||||||
if announced:
|
if announced:
|
||||||
await self.storage.update_last_announced_blobs(announced)
|
await self.storage.update_last_announced_blobs(announced)
|
||||||
log.info("announced %i blobs", len(announced))
|
log.info("announced %i blobs", len(announced))
|
||||||
|
self.announced.clear()
|
||||||
|
self._done.set()
|
||||||
|
self._done.clear()
|
||||||
|
|
||||||
def start(self, batch_size: typing.Optional[int] = 10):
|
def start(self, batch_size: typing.Optional[int] = 10):
|
||||||
assert not self.announce_task or self.announce_task.done(), "already running"
|
assert not self.announce_task or self.announce_task.done(), "already running"
|
||||||
|
@ -56,3 +73,6 @@ class BlobAnnouncer:
|
||||||
def stop(self):
|
def stop(self):
|
||||||
if self.announce_task and not self.announce_task.done():
|
if self.announce_task and not self.announce_task.done():
|
||||||
self.announce_task.cancel()
|
self.announce_task.cancel()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
return self._done.wait()
|
||||||
|
|
|
@ -20,7 +20,6 @@ MAYBE_PING_DELAY = 300 # 5 minutes
|
||||||
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
|
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
|
||||||
RPC_ID_LENGTH = 20
|
RPC_ID_LENGTH = 20
|
||||||
PROTOCOL_VERSION = 1
|
PROTOCOL_VERSION = 1
|
||||||
BOTTOM_OUT_LIMIT = 3
|
|
||||||
MSG_SIZE_LIMIT = 1400
|
MSG_SIZE_LIMIT = 1400
|
||||||
|
|
||||||
|
|
||||||
|
|
146
lbry/dht/node.py
146
lbry/dht/node.py
|
@ -1,9 +1,11 @@
|
||||||
import logging
|
import logging
|
||||||
import asyncio
|
import asyncio
|
||||||
import typing
|
import typing
|
||||||
import binascii
|
|
||||||
import socket
|
import socket
|
||||||
from lbry.utils import resolve_host
|
|
||||||
|
from prometheus_client import Gauge
|
||||||
|
|
||||||
|
from lbry.utils import aclosing, resolve_host
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
from lbry.dht.peer import make_kademlia_peer
|
from lbry.dht.peer import make_kademlia_peer
|
||||||
from lbry.dht.protocol.distance import Distance
|
from lbry.dht.protocol.distance import Distance
|
||||||
|
@ -18,20 +20,32 @@ log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Node:
|
class Node:
|
||||||
|
storing_peers_metric = Gauge(
|
||||||
|
"storing_peers", "Number of peers storing blobs announced to this node", namespace="dht_node",
|
||||||
|
labelnames=("scope",),
|
||||||
|
)
|
||||||
|
stored_blob_with_x_bytes_colliding = Gauge(
|
||||||
|
"stored_blobs_x_bytes_colliding", "Number of blobs with at least X bytes colliding with this node id prefix",
|
||||||
|
namespace="dht_node", labelnames=("amount",)
|
||||||
|
)
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
|
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
|
||||||
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
|
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
|
||||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX,
|
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False,
|
||||||
storage: typing.Optional['SQLiteStorage'] = None):
|
storage: typing.Optional['SQLiteStorage'] = None):
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.internal_udp_port = internal_udp_port
|
self.internal_udp_port = internal_udp_port
|
||||||
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
|
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
|
||||||
split_buckets_under_index)
|
split_buckets_under_index, is_bootstrap_node)
|
||||||
self.listening_port: asyncio.DatagramTransport = None
|
self.listening_port: asyncio.DatagramTransport = None
|
||||||
self.joined = asyncio.Event(loop=self.loop)
|
self.joined = asyncio.Event()
|
||||||
self._join_task: asyncio.Task = None
|
self._join_task: asyncio.Task = None
|
||||||
self._refresh_task: asyncio.Task = None
|
self._refresh_task: asyncio.Task = None
|
||||||
self._storage = storage
|
self._storage = storage
|
||||||
|
|
||||||
|
@property
|
||||||
|
def stored_blob_hashes(self):
|
||||||
|
return self.protocol.data_store.keys()
|
||||||
|
|
||||||
async def refresh_node(self, force_once=False):
|
async def refresh_node(self, force_once=False):
|
||||||
while True:
|
while True:
|
||||||
# remove peers with expired blob announcements from the datastore
|
# remove peers with expired blob announcements from the datastore
|
||||||
|
@ -41,17 +55,21 @@ class Node:
|
||||||
# add all peers in the routing table
|
# add all peers in the routing table
|
||||||
total_peers.extend(self.protocol.routing_table.get_peers())
|
total_peers.extend(self.protocol.routing_table.get_peers())
|
||||||
# add all the peers who have announced blobs to us
|
# add all the peers who have announced blobs to us
|
||||||
total_peers.extend(self.protocol.data_store.get_storing_contacts())
|
storing_peers = self.protocol.data_store.get_storing_contacts()
|
||||||
|
self.storing_peers_metric.labels("global").set(len(storing_peers))
|
||||||
|
total_peers.extend(storing_peers)
|
||||||
|
|
||||||
|
counts = {0: 0, 1: 0, 2: 0}
|
||||||
|
node_id = self.protocol.node_id
|
||||||
|
for blob_hash in self.protocol.data_store.keys():
|
||||||
|
bytes_colliding = 0 if blob_hash[0] != node_id[0] else 2 if blob_hash[1] == node_id[1] else 1
|
||||||
|
counts[bytes_colliding] += 1
|
||||||
|
self.stored_blob_with_x_bytes_colliding.labels(amount=0).set(counts[0])
|
||||||
|
self.stored_blob_with_x_bytes_colliding.labels(amount=1).set(counts[1])
|
||||||
|
self.stored_blob_with_x_bytes_colliding.labels(amount=2).set(counts[2])
|
||||||
|
|
||||||
# get ids falling in the midpoint of each bucket that hasn't been recently updated
|
# get ids falling in the midpoint of each bucket that hasn't been recently updated
|
||||||
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
|
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
|
||||||
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
|
|
||||||
# populate/split the buckets further
|
|
||||||
buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts()
|
|
||||||
if buckets_with_contacts <= 3:
|
|
||||||
for i in range(buckets_with_contacts):
|
|
||||||
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
|
|
||||||
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
|
|
||||||
|
|
||||||
if self.protocol.routing_table.get_peers():
|
if self.protocol.routing_table.get_peers():
|
||||||
# if we have node ids to look up, perform the iterative search until we have k results
|
# if we have node ids to look up, perform the iterative search until we have k results
|
||||||
|
@ -61,7 +79,7 @@ class Node:
|
||||||
else:
|
else:
|
||||||
if force_once:
|
if force_once:
|
||||||
break
|
break
|
||||||
fut = asyncio.Future(loop=self.loop)
|
fut = asyncio.Future()
|
||||||
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
|
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
|
||||||
await fut
|
await fut
|
||||||
continue
|
continue
|
||||||
|
@ -75,12 +93,12 @@ class Node:
|
||||||
if force_once:
|
if force_once:
|
||||||
break
|
break
|
||||||
|
|
||||||
fut = asyncio.Future(loop=self.loop)
|
fut = asyncio.Future()
|
||||||
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
|
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
|
||||||
await fut
|
await fut
|
||||||
|
|
||||||
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
|
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
|
||||||
hash_value = binascii.unhexlify(blob_hash.encode())
|
hash_value = bytes.fromhex(blob_hash)
|
||||||
assert len(hash_value) == constants.HASH_LENGTH
|
assert len(hash_value) == constants.HASH_LENGTH
|
||||||
peers = await self.peer_search(hash_value)
|
peers = await self.peer_search(hash_value)
|
||||||
|
|
||||||
|
@ -90,12 +108,12 @@ class Node:
|
||||||
for peer in peers:
|
for peer in peers:
|
||||||
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
|
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
|
||||||
stored_to_tup = await asyncio.gather(
|
stored_to_tup = await asyncio.gather(
|
||||||
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop
|
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers)
|
||||||
)
|
)
|
||||||
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
|
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
|
||||||
if stored_to:
|
if stored_to:
|
||||||
log.debug(
|
log.debug(
|
||||||
"Stored %s to %i of %i attempted peers", binascii.hexlify(hash_value).decode()[:8],
|
"Stored %s to %i of %i attempted peers", hash_value.hex()[:8],
|
||||||
len(stored_to), len(peers)
|
len(stored_to), len(peers)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
@ -164,39 +182,36 @@ class Node:
|
||||||
for address, udp_port in known_node_urls or []
|
for address, udp_port in known_node_urls or []
|
||||||
]))
|
]))
|
||||||
except socket.gaierror:
|
except socket.gaierror:
|
||||||
await asyncio.sleep(30, loop=self.loop)
|
await asyncio.sleep(30)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.protocol.peer_manager.reset()
|
self.protocol.peer_manager.reset()
|
||||||
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
|
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
|
||||||
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
|
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
|
||||||
|
|
||||||
await asyncio.sleep(1, loop=self.loop)
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
|
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
|
||||||
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
|
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
|
||||||
|
|
||||||
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
||||||
bottom_out_limit: int = constants.BOTTOM_OUT_LIMIT,
|
|
||||||
max_results: int = constants.K) -> IterativeNodeFinder:
|
max_results: int = constants.K) -> IterativeNodeFinder:
|
||||||
|
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
|
||||||
return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
|
return IterativeNodeFinder(self.loop, self.protocol, key, max_results, shortlist)
|
||||||
key, bottom_out_limit, max_results, None, shortlist)
|
|
||||||
|
|
||||||
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
||||||
bottom_out_limit: int = 40,
|
|
||||||
max_results: int = -1) -> IterativeValueFinder:
|
max_results: int = -1) -> IterativeValueFinder:
|
||||||
|
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
|
||||||
return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
|
return IterativeValueFinder(self.loop, self.protocol, key, max_results, shortlist)
|
||||||
key, bottom_out_limit, max_results, None, shortlist)
|
|
||||||
|
|
||||||
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
|
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
|
||||||
bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None
|
shortlist: typing.Optional[typing.List['KademliaPeer']] = None
|
||||||
) -> typing.List['KademliaPeer']:
|
) -> typing.List['KademliaPeer']:
|
||||||
peers = []
|
peers = []
|
||||||
async for iteration_peers in self.get_iterative_node_finder(
|
async with aclosing(self.get_iterative_node_finder(
|
||||||
node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results):
|
node_id, shortlist=shortlist, max_results=max_results)) as node_finder:
|
||||||
peers.extend(iteration_peers)
|
async for iteration_peers in node_finder:
|
||||||
|
peers.extend(iteration_peers)
|
||||||
distance = Distance(node_id)
|
distance = Distance(node_id)
|
||||||
peers.sort(key=lambda peer: distance(peer.node_id))
|
peers.sort(key=lambda peer: distance(peer.node_id))
|
||||||
return peers[:count]
|
return peers[:count]
|
||||||
|
@ -222,39 +237,46 @@ class Node:
|
||||||
|
|
||||||
# prioritize peers who reply to a dht ping first
|
# prioritize peers who reply to a dht ping first
|
||||||
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
|
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
|
||||||
|
async with aclosing(self.get_iterative_value_finder(bytes.fromhex(blob_hash))) as value_finder:
|
||||||
async for results in self.get_iterative_value_finder(binascii.unhexlify(blob_hash.encode())):
|
async for results in value_finder:
|
||||||
to_put = []
|
to_put = []
|
||||||
for peer in results:
|
for peer in results:
|
||||||
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
|
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
|
||||||
continue
|
continue
|
||||||
is_good = self.protocol.peer_manager.peer_is_good(peer)
|
is_good = self.protocol.peer_manager.peer_is_good(peer)
|
||||||
if is_good:
|
if is_good:
|
||||||
# the peer has replied recently over UDP, it can probably be reached on the TCP port
|
# the peer has replied recently over UDP, it can probably be reached on the TCP port
|
||||||
to_put.append(peer)
|
to_put.append(peer)
|
||||||
elif is_good is None:
|
elif is_good is None:
|
||||||
if not peer.udp_port:
|
if not peer.udp_port:
|
||||||
# TODO: use the same port for TCP and UDP
|
# TODO: use the same port for TCP and UDP
|
||||||
# the udp port must be guessed
|
# the udp port must be guessed
|
||||||
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
|
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
|
||||||
# including on a network with several nodes, then assume the udp port is proportionately
|
# including on a network with several nodes, then assume the udp port is proportionately
|
||||||
# based on a starting port of 4444
|
# based on a starting port of 4444
|
||||||
udp_port_to_try = peer.tcp_port
|
udp_port_to_try = peer.tcp_port
|
||||||
if 3400 > peer.tcp_port > 3332:
|
if 3400 > peer.tcp_port > 3332:
|
||||||
udp_port_to_try = (peer.tcp_port - 3333) + 4444
|
udp_port_to_try = (peer.tcp_port - 3333) + 4444
|
||||||
self.loop.create_task(put_into_result_queue_after_pong(
|
self.loop.create_task(put_into_result_queue_after_pong(
|
||||||
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
|
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
|
||||||
))
|
))
|
||||||
|
else:
|
||||||
|
self.loop.create_task(put_into_result_queue_after_pong(peer))
|
||||||
else:
|
else:
|
||||||
self.loop.create_task(put_into_result_queue_after_pong(peer))
|
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
|
||||||
else:
|
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
|
||||||
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
|
if to_put:
|
||||||
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
|
result_queue.put_nowait(to_put)
|
||||||
if to_put:
|
|
||||||
result_queue.put_nowait(to_put)
|
|
||||||
|
|
||||||
def accumulate_peers(self, search_queue: asyncio.Queue,
|
def accumulate_peers(self, search_queue: asyncio.Queue,
|
||||||
peer_queue: typing.Optional[asyncio.Queue] = None
|
peer_queue: typing.Optional[asyncio.Queue] = None
|
||||||
) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
|
) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
|
||||||
queue = peer_queue or asyncio.Queue(loop=self.loop)
|
queue = peer_queue or asyncio.Queue()
|
||||||
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))
|
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))
|
||||||
|
|
||||||
|
|
||||||
|
async def get_kademlia_peers_from_hosts(peer_list: typing.List[typing.Tuple[str, int]]) -> typing.List['KademliaPeer']:
|
||||||
|
peer_address_list = [(await resolve_host(url, port, proto='tcp'), port) for url, port in peer_list]
|
||||||
|
kademlia_peer_list = [make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
|
||||||
|
for address, port in peer_address_list]
|
||||||
|
return kademlia_peer_list
|
||||||
|
|
|
@ -1,18 +1,21 @@
|
||||||
import typing
|
import typing
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import ipaddress
|
|
||||||
from binascii import hexlify
|
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
|
|
||||||
|
from prometheus_client import Gauge
|
||||||
|
|
||||||
|
from lbry.utils import is_valid_public_ipv4 as _is_valid_public_ipv4, LRUCache
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
|
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
|
||||||
|
|
||||||
|
ALLOW_LOCALHOST = False
|
||||||
|
CACHE_SIZE = 16384
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@lru_cache(1024)
|
@lru_cache(CACHE_SIZE)
|
||||||
def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str],
|
def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str],
|
||||||
udp_port: typing.Optional[int] = None,
|
udp_port: typing.Optional[int] = None,
|
||||||
tcp_port: typing.Optional[int] = None,
|
tcp_port: typing.Optional[int] = None,
|
||||||
|
@ -20,42 +23,32 @@ def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional
|
||||||
return KademliaPeer(address, node_id, udp_port, tcp_port=tcp_port, allow_localhost=allow_localhost)
|
return KademliaPeer(address, node_id, udp_port, tcp_port=tcp_port, allow_localhost=allow_localhost)
|
||||||
|
|
||||||
|
|
||||||
# the ipaddress module does not show these subnets as reserved
|
|
||||||
CARRIER_GRADE_NAT_SUBNET = ipaddress.ip_network('100.64.0.0/10')
|
|
||||||
IPV4_TO_6_RELAY_SUBNET = ipaddress.ip_network('192.88.99.0/24')
|
|
||||||
|
|
||||||
ALLOW_LOCALHOST = False
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_public_ipv4(address, allow_localhost: bool = False):
|
def is_valid_public_ipv4(address, allow_localhost: bool = False):
|
||||||
allow_localhost = bool(allow_localhost or ALLOW_LOCALHOST)
|
allow_localhost = bool(allow_localhost or ALLOW_LOCALHOST)
|
||||||
try:
|
return _is_valid_public_ipv4(address, allow_localhost)
|
||||||
parsed_ip = ipaddress.ip_address(address)
|
|
||||||
if parsed_ip.is_loopback and allow_localhost:
|
|
||||||
return True
|
|
||||||
|
|
||||||
if any((parsed_ip.version != 4, parsed_ip.is_unspecified, parsed_ip.is_link_local, parsed_ip.is_loopback,
|
|
||||||
parsed_ip.is_multicast, parsed_ip.is_reserved, parsed_ip.is_private, parsed_ip.is_reserved)):
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
return not any((CARRIER_GRADE_NAT_SUBNET.supernet_of(ipaddress.ip_network(f"{address}/32")),
|
|
||||||
IPV4_TO_6_RELAY_SUBNET.supernet_of(ipaddress.ip_network(f"{address}/32"))))
|
|
||||||
except (ipaddress.AddressValueError, ValueError):
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class PeerManager:
|
class PeerManager:
|
||||||
|
peer_manager_keys_metric = Gauge(
|
||||||
|
"peer_manager_keys", "Number of keys tracked by PeerManager dicts (sum)", namespace="dht_node",
|
||||||
|
labelnames=("scope",)
|
||||||
|
)
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop):
|
def __init__(self, loop: asyncio.AbstractEventLoop):
|
||||||
self._loop = loop
|
self._loop = loop
|
||||||
self._rpc_failures: typing.Dict[
|
self._rpc_failures: typing.Dict[
|
||||||
typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]]
|
typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]]
|
||||||
] = {}
|
] = LRUCache(CACHE_SIZE)
|
||||||
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = {}
|
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
|
||||||
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = {}
|
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
|
||||||
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = {}
|
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
|
||||||
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = {}
|
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = LRUCache(CACHE_SIZE)
|
||||||
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = {}
|
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = LRUCache(CACHE_SIZE)
|
||||||
self._node_tokens: typing.Dict[bytes, (float, bytes)] = {}
|
self._node_tokens: typing.Dict[bytes, (float, bytes)] = LRUCache(CACHE_SIZE)
|
||||||
|
|
||||||
|
def count_cache_keys(self):
|
||||||
|
return len(self._rpc_failures) + len(self._last_replied) + len(self._last_sent) + len(
|
||||||
|
self._last_requested) + len(self._node_id_mapping) + len(self._node_id_reverse_mapping) + len(
|
||||||
|
self._node_tokens)
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested):
|
for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested):
|
||||||
|
@ -105,6 +98,10 @@ class PeerManager:
|
||||||
self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id))
|
self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id))
|
||||||
self._node_id_mapping[(address, udp_port)] = node_id
|
self._node_id_mapping[(address, udp_port)] = node_id
|
||||||
self._node_id_reverse_mapping[node_id] = (address, udp_port)
|
self._node_id_reverse_mapping[node_id] = (address, udp_port)
|
||||||
|
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
|
||||||
|
|
||||||
|
def get_node_id_for_endpoint(self, address, port):
|
||||||
|
return self._node_id_mapping.get((address, port))
|
||||||
|
|
||||||
def prune(self): # TODO: periodically call this
|
def prune(self): # TODO: periodically call this
|
||||||
now = self._loop.time()
|
now = self._loop.time()
|
||||||
|
@ -156,9 +153,10 @@ class PeerManager:
|
||||||
def peer_is_good(self, peer: 'KademliaPeer'):
|
def peer_is_good(self, peer: 'KademliaPeer'):
|
||||||
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
|
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
|
||||||
|
|
||||||
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
|
|
||||||
node_id, address, tcp_port = decode_compact_address(compact_address)
|
def decode_tcp_peer_from_compact_address(compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
|
||||||
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
|
node_id, address, tcp_port = decode_compact_address(compact_address)
|
||||||
|
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
|
||||||
|
|
||||||
|
|
||||||
@dataclass(unsafe_hash=True)
|
@dataclass(unsafe_hash=True)
|
||||||
|
@ -173,11 +171,11 @@ class KademliaPeer:
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
if self._node_id is not None:
|
if self._node_id is not None:
|
||||||
if not len(self._node_id) == constants.HASH_LENGTH:
|
if not len(self._node_id) == constants.HASH_LENGTH:
|
||||||
raise ValueError("invalid node_id: {}".format(hexlify(self._node_id).decode()))
|
raise ValueError("invalid node_id: {}".format(self._node_id.hex()))
|
||||||
if self.udp_port is not None and not 1 <= self.udp_port <= 65535:
|
if self.udp_port is not None and not 1024 <= self.udp_port <= 65535:
|
||||||
raise ValueError("invalid udp port")
|
raise ValueError(f"invalid udp port: {self.address}:{self.udp_port}")
|
||||||
if self.tcp_port is not None and not 1 <= self.tcp_port <= 65535:
|
if self.tcp_port is not None and not 1024 <= self.tcp_port <= 65535:
|
||||||
raise ValueError("invalid tcp port")
|
raise ValueError(f"invalid tcp port: {self.address}:{self.tcp_port}")
|
||||||
if not is_valid_public_ipv4(self.address, self.allow_localhost):
|
if not is_valid_public_ipv4(self.address, self.allow_localhost):
|
||||||
raise ValueError(f"invalid ip address: '{self.address}'")
|
raise ValueError(f"invalid ip address: '{self.address}'")
|
||||||
|
|
||||||
|
@ -196,3 +194,6 @@ class KademliaPeer:
|
||||||
|
|
||||||
def compact_ip(self):
|
def compact_ip(self):
|
||||||
return make_compact_ip(self.address)
|
return make_compact_ip(self.address)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"{self.__class__.__name__}({self.node_id.hex()[:8]}@{self.address}:{self.udp_port}-{self.tcp_port})"
|
||||||
|
|
|
@ -16,6 +16,12 @@ class DictDataStore:
|
||||||
self._peer_manager = peer_manager
|
self._peer_manager = peer_manager
|
||||||
self.completed_blobs: typing.Set[str] = set()
|
self.completed_blobs: typing.Set[str] = set()
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
return self._data_store.keys()
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return self._data_store.__len__()
|
||||||
|
|
||||||
def removed_expired_peers(self):
|
def removed_expired_peers(self):
|
||||||
now = self.loop.time()
|
now = self.loop.time()
|
||||||
keys = list(self._data_store.keys())
|
keys = list(self._data_store.keys())
|
||||||
|
|
|
@ -1,18 +1,17 @@
|
||||||
import asyncio
|
import asyncio
|
||||||
from binascii import hexlify
|
|
||||||
from itertools import chain
|
from itertools import chain
|
||||||
from collections import defaultdict
|
from collections import defaultdict, OrderedDict
|
||||||
|
from collections.abc import AsyncIterator
|
||||||
import typing
|
import typing
|
||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING
|
from typing import TYPE_CHECKING
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
from lbry.dht.error import RemoteException, TransportNotConnected
|
from lbry.dht.error import RemoteException, TransportNotConnected
|
||||||
from lbry.dht.protocol.distance import Distance
|
from lbry.dht.protocol.distance import Distance
|
||||||
from lbry.dht.peer import make_kademlia_peer
|
from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address
|
||||||
from lbry.dht.serialization.datagram import PAGE_KEY
|
from lbry.dht.serialization.datagram import PAGE_KEY
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from lbry.dht.protocol.routing_table import TreeRoutingTable
|
|
||||||
from lbry.dht.protocol.protocol import KademliaProtocol
|
from lbry.dht.protocol.protocol import KademliaProtocol
|
||||||
from lbry.dht.peer import PeerManager, KademliaPeer
|
from lbry.dht.peer import PeerManager, KademliaPeer
|
||||||
|
|
||||||
|
@ -27,6 +26,15 @@ class FindResponse:
|
||||||
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
|
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
|
||||||
|
for contact_triple in self.get_close_triples():
|
||||||
|
node_id, address, udp_port = contact_triple
|
||||||
|
try:
|
||||||
|
yield make_kademlia_peer(node_id, address, udp_port)
|
||||||
|
except ValueError:
|
||||||
|
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
|
||||||
|
peer_info.udp_port, address, udp_port)
|
||||||
|
|
||||||
|
|
||||||
class FindNodeResponse(FindResponse):
|
class FindNodeResponse(FindResponse):
|
||||||
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
|
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
|
||||||
|
@ -57,57 +65,33 @@ class FindValueResponse(FindResponse):
|
||||||
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
|
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
|
||||||
|
|
||||||
|
|
||||||
def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
|
class IterativeFinder(AsyncIterator):
|
||||||
shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']:
|
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||||
"""
|
protocol: 'KademliaProtocol', key: bytes,
|
||||||
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table
|
max_results: typing.Optional[int] = constants.K,
|
||||||
|
|
||||||
:param routing_table: a TreeRoutingTable
|
|
||||||
:param key: a 48 byte hash
|
|
||||||
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
|
|
||||||
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
|
|
||||||
"""
|
|
||||||
if len(key) != constants.HASH_LENGTH:
|
|
||||||
raise ValueError("invalid key length: %i" % len(key))
|
|
||||||
return shortlist or routing_table.find_close_peers(key)
|
|
||||||
|
|
||||||
|
|
||||||
class IterativeFinder:
|
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
|
||||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
|
||||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
|
|
||||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
|
||||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||||
if len(key) != constants.HASH_LENGTH:
|
if len(key) != constants.HASH_LENGTH:
|
||||||
raise ValueError("invalid key length: %i" % len(key))
|
raise ValueError("invalid key length: %i" % len(key))
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.peer_manager = peer_manager
|
self.peer_manager = protocol.peer_manager
|
||||||
self.routing_table = routing_table
|
|
||||||
self.protocol = protocol
|
self.protocol = protocol
|
||||||
|
|
||||||
self.key = key
|
self.key = key
|
||||||
self.bottom_out_limit = bottom_out_limit
|
self.max_results = max(constants.K, max_results)
|
||||||
self.max_results = max_results
|
|
||||||
self.exclude = exclude or []
|
|
||||||
|
|
||||||
self.active: typing.Set['KademliaPeer'] = set()
|
self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
|
||||||
self.contacted: typing.Set['KademliaPeer'] = set()
|
self.contacted: typing.Set['KademliaPeer'] = set()
|
||||||
self.distance = Distance(key)
|
self.distance = Distance(key)
|
||||||
|
|
||||||
self.closest_peer: typing.Optional['KademliaPeer'] = None
|
self.iteration_queue = asyncio.Queue()
|
||||||
self.prev_closest_peer: typing.Optional['KademliaPeer'] = None
|
|
||||||
|
|
||||||
self.iteration_queue = asyncio.Queue(loop=self.loop)
|
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
|
||||||
|
|
||||||
self.running_probes: typing.Set[asyncio.Task] = set()
|
|
||||||
self.iteration_count = 0
|
self.iteration_count = 0
|
||||||
self.bottom_out_count = 0
|
|
||||||
self.running = False
|
self.running = False
|
||||||
self.tasks: typing.List[asyncio.Task] = []
|
self.tasks: typing.List[asyncio.Task] = []
|
||||||
self.delayed_calls: typing.List[asyncio.Handle] = []
|
for peer in shortlist:
|
||||||
for peer in get_shortlist(routing_table, key, shortlist):
|
|
||||||
if peer.node_id:
|
if peer.node_id:
|
||||||
self._add_active(peer)
|
self._add_active(peer, force=True)
|
||||||
else:
|
else:
|
||||||
# seed nodes
|
# seed nodes
|
||||||
self._schedule_probe(peer)
|
self._schedule_probe(peer)
|
||||||
|
@ -139,66 +123,79 @@ class IterativeFinder:
|
||||||
"""
|
"""
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def _is_closer(self, peer: 'KademliaPeer') -> bool:
|
def _add_active(self, peer, force=False):
|
||||||
return not self.closest_peer or self.distance.is_closer(peer.node_id, self.closest_peer.node_id)
|
if not force and self.peer_manager.peer_is_good(peer) is False:
|
||||||
|
return
|
||||||
def _add_active(self, peer):
|
if peer in self.contacted:
|
||||||
|
return
|
||||||
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
|
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
|
||||||
self.active.add(peer)
|
self.active[peer] = self.distance(peer.node_id)
|
||||||
if self._is_closer(peer):
|
self.active = OrderedDict(sorted(self.active.items(), key=lambda item: item[1]))
|
||||||
self.prev_closest_peer = self.closest_peer
|
|
||||||
self.closest_peer = peer
|
|
||||||
|
|
||||||
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
|
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
|
||||||
self._add_active(peer)
|
self._add_active(peer)
|
||||||
for contact_triple in response.get_close_triples():
|
for new_peer in response.get_close_kademlia_peers(peer):
|
||||||
node_id, address, udp_port = contact_triple
|
self._add_active(new_peer)
|
||||||
try:
|
|
||||||
self._add_active(make_kademlia_peer(node_id, address, udp_port))
|
|
||||||
except ValueError:
|
|
||||||
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address,
|
|
||||||
peer.udp_port, address, udp_port)
|
|
||||||
self.check_result_ready(response)
|
self.check_result_ready(response)
|
||||||
|
self._log_state(reason="check result")
|
||||||
|
|
||||||
|
def _reset_closest(self, peer):
|
||||||
|
if peer in self.active:
|
||||||
|
del self.active[peer]
|
||||||
|
|
||||||
async def _send_probe(self, peer: 'KademliaPeer'):
|
async def _send_probe(self, peer: 'KademliaPeer'):
|
||||||
try:
|
try:
|
||||||
response = await self.send_probe(peer)
|
response = await self.send_probe(peer)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
self.active.discard(peer)
|
self._reset_closest(peer)
|
||||||
return
|
return
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
log.debug("%s[%x] cancelled probe",
|
||||||
|
type(self).__name__, id(self))
|
||||||
|
raise
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
log.warning(str(err))
|
log.warning(str(err))
|
||||||
self.active.discard(peer)
|
self._reset_closest(peer)
|
||||||
return
|
return
|
||||||
except TransportNotConnected:
|
except TransportNotConnected:
|
||||||
return self.aclose()
|
await self._aclose(reason="not connected")
|
||||||
|
return
|
||||||
except RemoteException:
|
except RemoteException:
|
||||||
|
self._reset_closest(peer)
|
||||||
return
|
return
|
||||||
return await self._handle_probe_result(peer, response)
|
return await self._handle_probe_result(peer, response)
|
||||||
|
|
||||||
async def _search_round(self):
|
def _search_round(self):
|
||||||
"""
|
"""
|
||||||
Send up to constants.alpha (5) probes to closest active peers
|
Send up to constants.alpha (5) probes to closest active peers
|
||||||
"""
|
"""
|
||||||
|
|
||||||
added = 0
|
added = 0
|
||||||
to_probe = list(self.active - self.contacted)
|
for index, peer in enumerate(self.active.keys()):
|
||||||
to_probe.sort(key=lambda peer: self.distance(self.key))
|
if index == 0:
|
||||||
for peer in to_probe:
|
log.debug("%s[%x] closest to probe: %s",
|
||||||
if added >= constants.ALPHA:
|
type(self).__name__, id(self),
|
||||||
|
peer.node_id.hex()[:8])
|
||||||
|
if peer in self.contacted:
|
||||||
|
continue
|
||||||
|
if len(self.running_probes) >= constants.ALPHA:
|
||||||
|
break
|
||||||
|
if index > (constants.K + len(self.running_probes)):
|
||||||
break
|
break
|
||||||
origin_address = (peer.address, peer.udp_port)
|
origin_address = (peer.address, peer.udp_port)
|
||||||
if origin_address in self.exclude:
|
|
||||||
continue
|
|
||||||
if peer.node_id == self.protocol.node_id:
|
if peer.node_id == self.protocol.node_id:
|
||||||
continue
|
continue
|
||||||
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
|
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
|
||||||
continue
|
continue
|
||||||
self._schedule_probe(peer)
|
self._schedule_probe(peer)
|
||||||
added += 1
|
added += 1
|
||||||
log.debug("running %d probes", len(self.running_probes))
|
log.debug("%s[%x] running %d probes for key %s",
|
||||||
|
type(self).__name__, id(self),
|
||||||
|
len(self.running_probes), self.key.hex()[:8])
|
||||||
if not added and not self.running_probes:
|
if not added and not self.running_probes:
|
||||||
log.debug("search for %s exhausted", hexlify(self.key)[:8])
|
log.debug("%s[%x] search for %s exhausted",
|
||||||
|
type(self).__name__, id(self),
|
||||||
|
self.key.hex()[:8])
|
||||||
self.search_exhausted()
|
self.search_exhausted()
|
||||||
|
|
||||||
def _schedule_probe(self, peer: 'KademliaPeer'):
|
def _schedule_probe(self, peer: 'KademliaPeer'):
|
||||||
|
@ -207,33 +204,24 @@ class IterativeFinder:
|
||||||
t = self.loop.create_task(self._send_probe(peer))
|
t = self.loop.create_task(self._send_probe(peer))
|
||||||
|
|
||||||
def callback(_):
|
def callback(_):
|
||||||
self.running_probes.difference_update({
|
self.running_probes.pop(peer, None)
|
||||||
probe for probe in self.running_probes if probe.done() or probe == t
|
if self.running:
|
||||||
})
|
self._search_round()
|
||||||
if not self.running_probes:
|
|
||||||
self.tasks.append(self.loop.create_task(self._search_task(0.0)))
|
|
||||||
|
|
||||||
t.add_done_callback(callback)
|
t.add_done_callback(callback)
|
||||||
self.running_probes.add(t)
|
self.running_probes[peer] = t
|
||||||
|
|
||||||
async def _search_task(self, delay: typing.Optional[float] = constants.ITERATIVE_LOOKUP_DELAY):
|
def _log_state(self, reason="?"):
|
||||||
try:
|
log.debug("%s[%x] [%s] %s: %i active nodes %i contacted %i produced %i queued",
|
||||||
if self.running:
|
type(self).__name__, id(self), self.key.hex()[:8],
|
||||||
await self._search_round()
|
reason, len(self.active), len(self.contacted),
|
||||||
if self.running:
|
self.iteration_count, self.iteration_queue.qsize())
|
||||||
self.delayed_calls.append(self.loop.call_later(delay, self._search))
|
|
||||||
except (asyncio.CancelledError, StopAsyncIteration, TransportNotConnected):
|
|
||||||
if self.running:
|
|
||||||
self.loop.call_soon(self.aclose)
|
|
||||||
|
|
||||||
def _search(self):
|
|
||||||
self.tasks.append(self.loop.create_task(self._search_task()))
|
|
||||||
|
|
||||||
def __aiter__(self):
|
def __aiter__(self):
|
||||||
if self.running:
|
if self.running:
|
||||||
raise Exception("already running")
|
raise Exception("already running")
|
||||||
self.running = True
|
self.running = True
|
||||||
self._search()
|
self.loop.call_soon(self._search_round)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
async def __anext__(self) -> typing.List['KademliaPeer']:
|
async def __anext__(self) -> typing.List['KademliaPeer']:
|
||||||
|
@ -246,47 +234,57 @@ class IterativeFinder:
|
||||||
raise StopAsyncIteration
|
raise StopAsyncIteration
|
||||||
self.iteration_count += 1
|
self.iteration_count += 1
|
||||||
return result
|
return result
|
||||||
except (asyncio.CancelledError, StopAsyncIteration):
|
except asyncio.CancelledError:
|
||||||
self.loop.call_soon(self.aclose)
|
await self._aclose(reason="cancelled")
|
||||||
|
raise
|
||||||
|
except StopAsyncIteration:
|
||||||
|
await self._aclose(reason="no more results")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def aclose(self):
|
async def _aclose(self, reason="?"):
|
||||||
|
log.debug("%s[%x] [%s] shutdown because %s: %i active nodes %i contacted %i produced %i queued",
|
||||||
|
type(self).__name__, id(self), self.key.hex()[:8],
|
||||||
|
reason, len(self.active), len(self.contacted),
|
||||||
|
self.iteration_count, self.iteration_queue.qsize())
|
||||||
self.running = False
|
self.running = False
|
||||||
self.iteration_queue.put_nowait(None)
|
self.iteration_queue.put_nowait(None)
|
||||||
for task in chain(self.tasks, self.running_probes, self.delayed_calls):
|
for task in chain(self.tasks, self.running_probes.values()):
|
||||||
task.cancel()
|
task.cancel()
|
||||||
self.tasks.clear()
|
self.tasks.clear()
|
||||||
self.running_probes.clear()
|
self.running_probes.clear()
|
||||||
self.delayed_calls.clear()
|
|
||||||
|
|
||||||
|
async def aclose(self):
|
||||||
|
if self.running:
|
||||||
|
await self._aclose(reason="aclose")
|
||||||
|
log.debug("%s[%x] [%s] async close completed",
|
||||||
|
type(self).__name__, id(self), self.key.hex()[:8])
|
||||||
|
|
||||||
class IterativeNodeFinder(IterativeFinder):
|
class IterativeNodeFinder(IterativeFinder):
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
protocol: 'KademliaProtocol', key: bytes,
|
||||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
|
max_results: typing.Optional[int] = constants.K,
|
||||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
|
||||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||||
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
|
super().__init__(loop, protocol, key, max_results, shortlist)
|
||||||
shortlist)
|
|
||||||
self.yielded_peers: typing.Set['KademliaPeer'] = set()
|
self.yielded_peers: typing.Set['KademliaPeer'] = set()
|
||||||
|
|
||||||
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
|
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
|
||||||
log.debug("probing %s:%d %s", peer.address, peer.udp_port, hexlify(peer.node_id)[:8] if peer.node_id else '')
|
log.debug("probe %s:%d (%s) for NODE %s",
|
||||||
|
peer.address, peer.udp_port, peer.node_id.hex()[:8] if peer.node_id else '', self.key.hex()[:8])
|
||||||
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
|
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
|
||||||
return FindNodeResponse(self.key, response)
|
return FindNodeResponse(self.key, response)
|
||||||
|
|
||||||
def search_exhausted(self):
|
def search_exhausted(self):
|
||||||
self.put_result(self.active, finish=True)
|
self.put_result(self.active.keys(), finish=True)
|
||||||
|
|
||||||
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
|
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
|
||||||
not_yet_yielded = [
|
not_yet_yielded = [
|
||||||
peer for peer in from_iter
|
peer for peer in from_iter
|
||||||
if peer not in self.yielded_peers
|
if peer not in self.yielded_peers
|
||||||
and peer.node_id != self.protocol.node_id
|
and peer.node_id != self.protocol.node_id
|
||||||
and self.peer_manager.peer_is_good(peer) is not False
|
and self.peer_manager.peer_is_good(peer) is True # return only peers who answered
|
||||||
]
|
]
|
||||||
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
|
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
|
||||||
to_yield = not_yet_yielded[:min(constants.K, len(not_yet_yielded))]
|
to_yield = not_yet_yielded[:max(constants.K, self.max_results)]
|
||||||
if to_yield:
|
if to_yield:
|
||||||
self.yielded_peers.update(to_yield)
|
self.yielded_peers.update(to_yield)
|
||||||
self.iteration_queue.put_nowait(to_yield)
|
self.iteration_queue.put_nowait(to_yield)
|
||||||
|
@ -298,27 +296,15 @@ class IterativeNodeFinder(IterativeFinder):
|
||||||
|
|
||||||
if found:
|
if found:
|
||||||
log.debug("found")
|
log.debug("found")
|
||||||
return self.put_result(self.active, finish=True)
|
return self.put_result(self.active.keys(), finish=True)
|
||||||
if self.prev_closest_peer and self.closest_peer and not self._is_closer(self.prev_closest_peer):
|
|
||||||
# log.info("improving, %i %i %i %i %i", len(self.shortlist), len(self.active), len(self.contacted),
|
|
||||||
# self.bottom_out_count, self.iteration_count)
|
|
||||||
self.bottom_out_count = 0
|
|
||||||
elif self.prev_closest_peer and self.closest_peer:
|
|
||||||
self.bottom_out_count += 1
|
|
||||||
log.info("bottom out %i %i %i", len(self.active), len(self.contacted), self.bottom_out_count)
|
|
||||||
if self.bottom_out_count >= self.bottom_out_limit or self.iteration_count >= self.bottom_out_limit:
|
|
||||||
log.info("limit hit")
|
|
||||||
self.put_result(self.active, True)
|
|
||||||
|
|
||||||
|
|
||||||
class IterativeValueFinder(IterativeFinder):
|
class IterativeValueFinder(IterativeFinder):
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
protocol: 'KademliaProtocol', key: bytes,
|
||||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
|
max_results: typing.Optional[int] = constants.K,
|
||||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
|
||||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||||
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
|
super().__init__(loop, protocol, key, max_results, shortlist)
|
||||||
shortlist)
|
|
||||||
self.blob_peers: typing.Set['KademliaPeer'] = set()
|
self.blob_peers: typing.Set['KademliaPeer'] = set()
|
||||||
# this tracks the index of the most recent page we requested from each peer
|
# this tracks the index of the most recent page we requested from each peer
|
||||||
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
|
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
|
||||||
|
@ -326,6 +312,8 @@ class IterativeValueFinder(IterativeFinder):
|
||||||
self.discovered_peers: typing.Dict['KademliaPeer', typing.Set['KademliaPeer']] = defaultdict(set)
|
self.discovered_peers: typing.Dict['KademliaPeer', typing.Set['KademliaPeer']] = defaultdict(set)
|
||||||
|
|
||||||
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
|
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
|
||||||
|
log.debug("probe %s:%d (%s) for VALUE %s",
|
||||||
|
peer.address, peer.udp_port, peer.node_id.hex()[:8], self.key.hex()[:8])
|
||||||
page = self.peer_pages[peer]
|
page = self.peer_pages[peer]
|
||||||
response = await self.protocol.get_rpc_peer(peer).find_value(self.key, page=page)
|
response = await self.protocol.get_rpc_peer(peer).find_value(self.key, page=page)
|
||||||
parsed = FindValueResponse(self.key, response)
|
parsed = FindValueResponse(self.key, response)
|
||||||
|
@ -335,7 +323,7 @@ class IterativeValueFinder(IterativeFinder):
|
||||||
decoded_peers = set()
|
decoded_peers = set()
|
||||||
for compact_addr in parsed.found_compact_addresses:
|
for compact_addr in parsed.found_compact_addresses:
|
||||||
try:
|
try:
|
||||||
decoded_peers.add(self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr))
|
decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
log.warning("misbehaving peer %s:%i returned invalid peer for blob",
|
log.warning("misbehaving peer %s:%i returned invalid peer for blob",
|
||||||
peer.address, peer.udp_port)
|
peer.address, peer.udp_port)
|
||||||
|
@ -347,7 +335,6 @@ class IterativeValueFinder(IterativeFinder):
|
||||||
already_known + len(parsed.found_compact_addresses))
|
already_known + len(parsed.found_compact_addresses))
|
||||||
if len(self.discovered_peers[peer]) != already_known + len(parsed.found_compact_addresses):
|
if len(self.discovered_peers[peer]) != already_known + len(parsed.found_compact_addresses):
|
||||||
log.warning("misbehaving peer %s:%i returned duplicate peers for blob", peer.address, peer.udp_port)
|
log.warning("misbehaving peer %s:%i returned duplicate peers for blob", peer.address, peer.udp_port)
|
||||||
parsed.found_compact_addresses.clear()
|
|
||||||
elif len(parsed.found_compact_addresses) >= constants.K and self.peer_pages[peer] < parsed.pages:
|
elif len(parsed.found_compact_addresses) >= constants.K and self.peer_pages[peer] < parsed.pages:
|
||||||
# the peer returned a full page and indicates it has more
|
# the peer returned a full page and indicates it has more
|
||||||
self.peer_pages[peer] += 1
|
self.peer_pages[peer] += 1
|
||||||
|
@ -358,26 +345,15 @@ class IterativeValueFinder(IterativeFinder):
|
||||||
|
|
||||||
def check_result_ready(self, response: FindValueResponse):
|
def check_result_ready(self, response: FindValueResponse):
|
||||||
if response.found:
|
if response.found:
|
||||||
blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)
|
blob_peers = [decode_tcp_peer_from_compact_address(compact_addr)
|
||||||
for compact_addr in response.found_compact_addresses]
|
for compact_addr in response.found_compact_addresses]
|
||||||
to_yield = []
|
to_yield = []
|
||||||
self.bottom_out_count = 0
|
|
||||||
for blob_peer in blob_peers:
|
for blob_peer in blob_peers:
|
||||||
if blob_peer not in self.blob_peers:
|
if blob_peer not in self.blob_peers:
|
||||||
self.blob_peers.add(blob_peer)
|
self.blob_peers.add(blob_peer)
|
||||||
to_yield.append(blob_peer)
|
to_yield.append(blob_peer)
|
||||||
if to_yield:
|
if to_yield:
|
||||||
# log.info("found %i new peers for blob", len(to_yield))
|
|
||||||
self.iteration_queue.put_nowait(to_yield)
|
self.iteration_queue.put_nowait(to_yield)
|
||||||
# if self.max_results and len(self.blob_peers) >= self.max_results:
|
|
||||||
# log.info("enough blob peers found")
|
|
||||||
# if not self.finished.is_set():
|
|
||||||
# self.finished.set()
|
|
||||||
elif self.prev_closest_peer and self.closest_peer:
|
|
||||||
self.bottom_out_count += 1
|
|
||||||
if self.bottom_out_count >= self.bottom_out_limit:
|
|
||||||
log.info("blob peer search bottomed out")
|
|
||||||
self.iteration_queue.put_nowait(None)
|
|
||||||
|
|
||||||
def get_initial_result(self) -> typing.List['KademliaPeer']:
|
def get_initial_result(self) -> typing.List['KademliaPeer']:
|
||||||
if self.protocol.data_store.has_peers_for_blob(self.key):
|
if self.protocol.data_store.has_peers_for_blob(self.key):
|
||||||
|
|
|
@ -3,12 +3,14 @@ import socket
|
||||||
import functools
|
import functools
|
||||||
import hashlib
|
import hashlib
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import time
|
||||||
import typing
|
import typing
|
||||||
import binascii
|
|
||||||
import random
|
import random
|
||||||
from asyncio.protocols import DatagramProtocol
|
from asyncio.protocols import DatagramProtocol
|
||||||
from asyncio.transports import DatagramTransport
|
from asyncio.transports import DatagramTransport
|
||||||
|
|
||||||
|
from prometheus_client import Gauge, Counter, Histogram
|
||||||
|
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
from lbry.dht.serialization.bencoding import DecodeError
|
from lbry.dht.serialization.bencoding import DecodeError
|
||||||
from lbry.dht.serialization.datagram import decode_datagram, ErrorDatagram, ResponseDatagram, RequestDatagram
|
from lbry.dht.serialization.datagram import decode_datagram, ErrorDatagram, ResponseDatagram, RequestDatagram
|
||||||
|
@ -31,6 +33,11 @@ OLD_PROTOCOL_ERRORS = {
|
||||||
|
|
||||||
|
|
||||||
class KademliaRPC:
|
class KademliaRPC:
|
||||||
|
stored_blob_metric = Gauge(
|
||||||
|
"stored_blobs", "Number of blobs announced by other peers", namespace="dht_node",
|
||||||
|
labelnames=("scope",),
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, protocol: 'KademliaProtocol', loop: asyncio.AbstractEventLoop, peer_port: int = 3333):
|
def __init__(self, protocol: 'KademliaProtocol', loop: asyncio.AbstractEventLoop, peer_port: int = 3333):
|
||||||
self.protocol = protocol
|
self.protocol = protocol
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
|
@ -62,6 +69,7 @@ class KademliaRPC:
|
||||||
self.protocol.data_store.add_peer_to_blob(
|
self.protocol.data_store.add_peer_to_blob(
|
||||||
rpc_contact, blob_hash
|
rpc_contact, blob_hash
|
||||||
)
|
)
|
||||||
|
self.stored_blob_metric.labels("global").set(len(self.protocol.data_store))
|
||||||
return b'OK'
|
return b'OK'
|
||||||
|
|
||||||
def find_node(self, rpc_contact: 'KademliaPeer', key: bytes) -> typing.List[typing.Tuple[bytes, str, int]]:
|
def find_node(self, rpc_contact: 'KademliaPeer', key: bytes) -> typing.List[typing.Tuple[bytes, str, int]]:
|
||||||
|
@ -97,7 +105,7 @@ class KademliaRPC:
|
||||||
if not rpc_contact.tcp_port or peer.compact_address_tcp() != rpc_contact.compact_address_tcp()
|
if not rpc_contact.tcp_port or peer.compact_address_tcp() != rpc_contact.compact_address_tcp()
|
||||||
]
|
]
|
||||||
# if we don't have k storing peers to return and we have this hash locally, include our contact information
|
# if we don't have k storing peers to return and we have this hash locally, include our contact information
|
||||||
if len(peers) < constants.K and binascii.hexlify(key).decode() in self.protocol.data_store.completed_blobs:
|
if len(peers) < constants.K and key.hex() in self.protocol.data_store.completed_blobs:
|
||||||
peers.append(self.compact_address())
|
peers.append(self.compact_address())
|
||||||
if not peers:
|
if not peers:
|
||||||
response[PAGE_KEY] = 0
|
response[PAGE_KEY] = 0
|
||||||
|
@ -210,6 +218,10 @@ class PingQueue:
|
||||||
def running(self):
|
def running(self):
|
||||||
return self._running
|
return self._running
|
||||||
|
|
||||||
|
@property
|
||||||
|
def busy(self):
|
||||||
|
return self._running and (any(self._running_pings) or any(self._pending_contacts))
|
||||||
|
|
||||||
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
|
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
|
||||||
delay = delay if delay is not None else self._default_delay
|
delay = delay if delay is not None else self._default_delay
|
||||||
now = self._loop.time()
|
now = self._loop.time()
|
||||||
|
@ -221,7 +233,7 @@ class PingQueue:
|
||||||
async def ping_task():
|
async def ping_task():
|
||||||
try:
|
try:
|
||||||
if self._protocol.peer_manager.peer_is_good(peer):
|
if self._protocol.peer_manager.peer_is_good(peer):
|
||||||
if peer not in self._protocol.routing_table.get_peers():
|
if not self._protocol.routing_table.get_peer(peer.node_id):
|
||||||
self._protocol.add_peer(peer)
|
self._protocol.add_peer(peer)
|
||||||
return
|
return
|
||||||
await self._protocol.get_rpc_peer(peer).ping()
|
await self._protocol.get_rpc_peer(peer).ping()
|
||||||
|
@ -241,7 +253,7 @@ class PingQueue:
|
||||||
del self._pending_contacts[peer]
|
del self._pending_contacts[peer]
|
||||||
self.maybe_ping(peer)
|
self.maybe_ping(peer)
|
||||||
break
|
break
|
||||||
await asyncio.sleep(1, loop=self._loop)
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
assert not self._running
|
assert not self._running
|
||||||
|
@ -260,9 +272,33 @@ class PingQueue:
|
||||||
|
|
||||||
|
|
||||||
class KademliaProtocol(DatagramProtocol):
|
class KademliaProtocol(DatagramProtocol):
|
||||||
|
request_sent_metric = Counter(
|
||||||
|
"request_sent", "Number of requests send from DHT RPC protocol", namespace="dht_node",
|
||||||
|
labelnames=("method",),
|
||||||
|
)
|
||||||
|
request_success_metric = Counter(
|
||||||
|
"request_success", "Number of successful requests", namespace="dht_node",
|
||||||
|
labelnames=("method",),
|
||||||
|
)
|
||||||
|
request_error_metric = Counter(
|
||||||
|
"request_error", "Number of errors returned from request to other peers", namespace="dht_node",
|
||||||
|
labelnames=("method",),
|
||||||
|
)
|
||||||
|
HISTOGRAM_BUCKETS = (
|
||||||
|
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 3.0, 3.5, 4.0, 4.50, 5.0, 5.50, 6.0, float('inf')
|
||||||
|
)
|
||||||
|
response_time_metric = Histogram(
|
||||||
|
"response_time", "Response times of DHT RPC requests", namespace="dht_node", buckets=HISTOGRAM_BUCKETS,
|
||||||
|
labelnames=("method",)
|
||||||
|
)
|
||||||
|
received_request_metric = Counter(
|
||||||
|
"received_request", "Number of received DHT RPC requests", namespace="dht_node",
|
||||||
|
labelnames=("method",),
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
|
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
|
||||||
udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
|
udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
|
||||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
|
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_boostrap_node: bool = False):
|
||||||
self.peer_manager = peer_manager
|
self.peer_manager = peer_manager
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.node_id = node_id
|
self.node_id = node_id
|
||||||
|
@ -277,15 +313,16 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
self.transport: DatagramTransport = None
|
self.transport: DatagramTransport = None
|
||||||
self.old_token_secret = constants.generate_id()
|
self.old_token_secret = constants.generate_id()
|
||||||
self.token_secret = constants.generate_id()
|
self.token_secret = constants.generate_id()
|
||||||
self.routing_table = TreeRoutingTable(self.loop, self.peer_manager, self.node_id, split_buckets_under_index)
|
self.routing_table = TreeRoutingTable(
|
||||||
|
self.loop, self.peer_manager, self.node_id, split_buckets_under_index, is_bootstrap_node=is_boostrap_node)
|
||||||
self.data_store = DictDataStore(self.loop, self.peer_manager)
|
self.data_store = DictDataStore(self.loop, self.peer_manager)
|
||||||
self.ping_queue = PingQueue(self.loop, self)
|
self.ping_queue = PingQueue(self.loop, self)
|
||||||
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
|
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
|
||||||
self.rpc_timeout = rpc_timeout
|
self.rpc_timeout = rpc_timeout
|
||||||
self._split_lock = asyncio.Lock(loop=self.loop)
|
self._split_lock = asyncio.Lock()
|
||||||
self._to_remove: typing.Set['KademliaPeer'] = set()
|
self._to_remove: typing.Set['KademliaPeer'] = set()
|
||||||
self._to_add: typing.Set['KademliaPeer'] = set()
|
self._to_add: typing.Set['KademliaPeer'] = set()
|
||||||
self._wakeup_routing_task = asyncio.Event(loop=self.loop)
|
self._wakeup_routing_task = asyncio.Event()
|
||||||
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
|
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
|
||||||
|
|
||||||
@functools.lru_cache(128)
|
@functools.lru_cache(128)
|
||||||
|
@ -324,72 +361,10 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
return args, {}
|
return args, {}
|
||||||
|
|
||||||
async def _add_peer(self, peer: 'KademliaPeer'):
|
async def _add_peer(self, peer: 'KademliaPeer'):
|
||||||
if not peer.node_id:
|
async def probe(some_peer: 'KademliaPeer'):
|
||||||
log.warning("Tried adding a peer with no node id!")
|
rpc_peer = self.get_rpc_peer(some_peer)
|
||||||
return False
|
await rpc_peer.ping()
|
||||||
for my_peer in self.routing_table.get_peers():
|
return await self.routing_table.add_peer(peer, probe)
|
||||||
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
|
|
||||||
self.routing_table.remove_peer(my_peer)
|
|
||||||
self.routing_table.join_buckets()
|
|
||||||
bucket_index = self.routing_table.kbucket_index(peer.node_id)
|
|
||||||
if self.routing_table.buckets[bucket_index].add_peer(peer):
|
|
||||||
return True
|
|
||||||
|
|
||||||
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
|
|
||||||
if self.routing_table.should_split(bucket_index, peer.node_id):
|
|
||||||
self.routing_table.split_bucket(bucket_index)
|
|
||||||
# Retry the insertion attempt
|
|
||||||
result = await self._add_peer(peer)
|
|
||||||
self.routing_table.join_buckets()
|
|
||||||
return result
|
|
||||||
else:
|
|
||||||
# We can't split the k-bucket
|
|
||||||
#
|
|
||||||
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
|
|
||||||
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
|
|
||||||
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
|
|
||||||
#
|
|
||||||
# A reasonable extension to this is BEP 0005, which extends the above:
|
|
||||||
#
|
|
||||||
# Not all nodes that we learn about are equal. Some are "good" and some are not.
|
|
||||||
# Many nodes using the DHT are able to send queries and receive responses,
|
|
||||||
# but are not able to respond to queries from other nodes. It is important that
|
|
||||||
# each node's routing table must contain only known good nodes. A good node is
|
|
||||||
# a node has responded to one of our queries within the last 15 minutes. A node
|
|
||||||
# is also good if it has ever responded to one of our queries and has sent us a
|
|
||||||
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
|
|
||||||
# questionable. Nodes become bad when they fail to respond to multiple queries
|
|
||||||
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
|
|
||||||
#
|
|
||||||
# When there are bad or questionable nodes in the bucket, the least recent is selected for
|
|
||||||
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
|
|
||||||
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
|
|
||||||
# is ignored if the pinged node replies.
|
|
||||||
|
|
||||||
not_good_contacts = self.routing_table.buckets[bucket_index].get_bad_or_unknown_peers()
|
|
||||||
not_recently_replied = []
|
|
||||||
for my_peer in not_good_contacts:
|
|
||||||
last_replied = self.peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
|
|
||||||
if not last_replied or last_replied + 60 < self.loop.time():
|
|
||||||
not_recently_replied.append(my_peer)
|
|
||||||
if not_recently_replied:
|
|
||||||
to_replace = not_recently_replied[0]
|
|
||||||
else:
|
|
||||||
to_replace = self.routing_table.buckets[bucket_index].peers[0]
|
|
||||||
last_replied = self.peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
|
|
||||||
if last_replied and last_replied + 60 > self.loop.time():
|
|
||||||
return False
|
|
||||||
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
|
|
||||||
try:
|
|
||||||
to_replace_rpc = self.get_rpc_peer(to_replace)
|
|
||||||
await to_replace_rpc.ping()
|
|
||||||
return False
|
|
||||||
except asyncio.TimeoutError:
|
|
||||||
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
|
|
||||||
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
|
|
||||||
if to_replace in self.routing_table.buckets[bucket_index]:
|
|
||||||
self.routing_table.buckets[bucket_index].remove_peer(to_replace)
|
|
||||||
return await self._add_peer(peer)
|
|
||||||
|
|
||||||
def add_peer(self, peer: 'KademliaPeer'):
|
def add_peer(self, peer: 'KademliaPeer'):
|
||||||
if peer.node_id == self.node_id:
|
if peer.node_id == self.node_id:
|
||||||
|
@ -407,16 +382,15 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
async with self._split_lock:
|
async with self._split_lock:
|
||||||
peer = self._to_remove.pop()
|
peer = self._to_remove.pop()
|
||||||
self.routing_table.remove_peer(peer)
|
self.routing_table.remove_peer(peer)
|
||||||
self.routing_table.join_buckets()
|
|
||||||
while self._to_add:
|
while self._to_add:
|
||||||
async with self._split_lock:
|
async with self._split_lock:
|
||||||
await self._add_peer(self._to_add.pop())
|
await self._add_peer(self._to_add.pop())
|
||||||
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop)
|
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1))
|
||||||
self._wakeup_routing_task.clear()
|
self._wakeup_routing_task.clear()
|
||||||
|
|
||||||
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
|
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
|
||||||
assert sender_contact.node_id != self.node_id, (binascii.hexlify(sender_contact.node_id)[:8].decode(),
|
assert sender_contact.node_id != self.node_id, (sender_contact.node_id.hex()[:8],
|
||||||
binascii.hexlify(self.node_id)[:8].decode())
|
self.node_id.hex()[:8])
|
||||||
method = message.method
|
method = message.method
|
||||||
if method not in [b'ping', b'store', b'findNode', b'findValue']:
|
if method not in [b'ping', b'store', b'findNode', b'findValue']:
|
||||||
raise AttributeError('Invalid method: %s' % message.method.decode())
|
raise AttributeError('Invalid method: %s' % message.method.decode())
|
||||||
|
@ -448,11 +422,15 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
|
|
||||||
def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram):
|
def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram):
|
||||||
# This is an RPC method request
|
# This is an RPC method request
|
||||||
|
self.received_request_metric.labels(method=request_datagram.method).inc()
|
||||||
self.peer_manager.report_last_requested(address[0], address[1])
|
self.peer_manager.report_last_requested(address[0], address[1])
|
||||||
try:
|
peer = self.routing_table.get_peer(request_datagram.node_id)
|
||||||
peer = self.routing_table.get_peer(request_datagram.node_id)
|
if not peer:
|
||||||
except IndexError:
|
try:
|
||||||
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
|
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
|
||||||
|
except ValueError as err:
|
||||||
|
log.warning("error replying to %s: %s", address[0], str(err))
|
||||||
|
return
|
||||||
try:
|
try:
|
||||||
self._handle_rpc(peer, request_datagram)
|
self._handle_rpc(peer, request_datagram)
|
||||||
# if the contact is not known to be bad (yet) and we haven't yet queried it, send it a ping so that it
|
# if the contact is not known to be bad (yet) and we haven't yet queried it, send it a ping so that it
|
||||||
|
@ -552,12 +530,12 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
address[0], address[1], OLD_PROTOCOL_ERRORS[error_datagram.response]
|
address[0], address[1], OLD_PROTOCOL_ERRORS[error_datagram.response]
|
||||||
)
|
)
|
||||||
|
|
||||||
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: # pylint: disable=arguments-differ
|
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: # pylint: disable=arguments-renamed
|
||||||
try:
|
try:
|
||||||
message = decode_datagram(datagram)
|
message = decode_datagram(datagram)
|
||||||
except (ValueError, TypeError, DecodeError):
|
except (ValueError, TypeError, DecodeError):
|
||||||
self.peer_manager.report_failure(address[0], address[1])
|
self.peer_manager.report_failure(address[0], address[1])
|
||||||
log.warning("Couldn't decode dht datagram from %s: %s", address, binascii.hexlify(datagram).decode())
|
log.warning("Couldn't decode dht datagram from %s: %s", address, datagram.hex())
|
||||||
return
|
return
|
||||||
|
|
||||||
if isinstance(message, RequestDatagram):
|
if isinstance(message, RequestDatagram):
|
||||||
|
@ -572,14 +550,19 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
self._send(peer, request)
|
self._send(peer, request)
|
||||||
response_fut = self.sent_messages[request.rpc_id][1]
|
response_fut = self.sent_messages[request.rpc_id][1]
|
||||||
try:
|
try:
|
||||||
|
self.request_sent_metric.labels(method=request.method).inc()
|
||||||
|
start = time.perf_counter()
|
||||||
response = await asyncio.wait_for(response_fut, self.rpc_timeout)
|
response = await asyncio.wait_for(response_fut, self.rpc_timeout)
|
||||||
|
self.response_time_metric.labels(method=request.method).observe(time.perf_counter() - start)
|
||||||
self.peer_manager.report_last_replied(peer.address, peer.udp_port)
|
self.peer_manager.report_last_replied(peer.address, peer.udp_port)
|
||||||
|
self.request_success_metric.labels(method=request.method).inc()
|
||||||
return response
|
return response
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
if not response_fut.done():
|
if not response_fut.done():
|
||||||
response_fut.cancel()
|
response_fut.cancel()
|
||||||
raise
|
raise
|
||||||
except (asyncio.TimeoutError, RemoteException):
|
except (asyncio.TimeoutError, RemoteException):
|
||||||
|
self.request_error_metric.labels(method=request.method).inc()
|
||||||
self.peer_manager.report_failure(peer.address, peer.udp_port)
|
self.peer_manager.report_failure(peer.address, peer.udp_port)
|
||||||
if self.peer_manager.peer_is_good(peer) is False:
|
if self.peer_manager.peer_is_good(peer) is False:
|
||||||
self.remove_peer(peer)
|
self.remove_peer(peer)
|
||||||
|
@ -599,7 +582,7 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
if len(data) > constants.MSG_SIZE_LIMIT:
|
if len(data) > constants.MSG_SIZE_LIMIT:
|
||||||
log.warning("cannot send datagram larger than %i bytes (packet is %i bytes)",
|
log.warning("cannot send datagram larger than %i bytes (packet is %i bytes)",
|
||||||
constants.MSG_SIZE_LIMIT, len(data))
|
constants.MSG_SIZE_LIMIT, len(data))
|
||||||
log.debug("Packet is too large to send: %s", binascii.hexlify(data[:3500]).decode())
|
log.debug("Packet is too large to send: %s", data[:3500].hex())
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"cannot send datagram larger than {constants.MSG_SIZE_LIMIT} bytes (packet is {len(data)} bytes)"
|
f"cannot send datagram larger than {constants.MSG_SIZE_LIMIT} bytes (packet is {len(data)} bytes)"
|
||||||
)
|
)
|
||||||
|
@ -659,13 +642,13 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
res = await self.get_rpc_peer(peer).store(hash_value)
|
res = await self.get_rpc_peer(peer).store(hash_value)
|
||||||
if res != b"OK":
|
if res != b"OK":
|
||||||
raise ValueError(res)
|
raise ValueError(res)
|
||||||
log.debug("Stored %s to %s", binascii.hexlify(hash_value).decode()[:8], peer)
|
log.debug("Stored %s to %s", hash_value.hex()[:8], peer)
|
||||||
return peer.node_id, True
|
return peer.node_id, True
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return await __store()
|
return await __store()
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
log.debug("Timeout while storing blob_hash %s at %s", binascii.hexlify(hash_value).decode()[:8], peer)
|
log.debug("Timeout while storing blob_hash %s at %s", hash_value.hex()[:8], peer)
|
||||||
return peer.node_id, False
|
return peer.node_id, False
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
log.error("Unexpected response: %s", err)
|
log.error("Unexpected response: %s", err)
|
||||||
|
|
|
@ -4,7 +4,11 @@ import logging
|
||||||
import typing
|
import typing
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
|
from prometheus_client import Gauge
|
||||||
|
|
||||||
|
from lbry import utils
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
|
from lbry.dht.error import RemoteException
|
||||||
from lbry.dht.protocol.distance import Distance
|
from lbry.dht.protocol.distance import Distance
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.dht.peer import KademliaPeer, PeerManager
|
from lbry.dht.peer import KademliaPeer, PeerManager
|
||||||
|
@ -13,10 +17,20 @@ log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class KBucket:
|
class KBucket:
|
||||||
""" Description - later
|
|
||||||
"""
|
"""
|
||||||
|
Kademlia K-bucket implementation.
|
||||||
|
"""
|
||||||
|
peer_in_routing_table_metric = Gauge(
|
||||||
|
"peers_in_routing_table", "Number of peers on routing table", namespace="dht_node",
|
||||||
|
labelnames=("scope",)
|
||||||
|
)
|
||||||
|
peer_with_x_bit_colliding_metric = Gauge(
|
||||||
|
"peer_x_bit_colliding", "Number of peers with at least X bits colliding with this node id",
|
||||||
|
namespace="dht_node", labelnames=("amount",)
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, node_id: bytes):
|
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int,
|
||||||
|
node_id: bytes, capacity: int = constants.K):
|
||||||
"""
|
"""
|
||||||
@param range_min: The lower boundary for the range in the n-bit ID
|
@param range_min: The lower boundary for the range in the n-bit ID
|
||||||
space covered by this k-bucket
|
space covered by this k-bucket
|
||||||
|
@ -24,12 +38,12 @@ class KBucket:
|
||||||
covered by this k-bucket
|
covered by this k-bucket
|
||||||
"""
|
"""
|
||||||
self._peer_manager = peer_manager
|
self._peer_manager = peer_manager
|
||||||
self.last_accessed = 0
|
|
||||||
self.range_min = range_min
|
self.range_min = range_min
|
||||||
self.range_max = range_max
|
self.range_max = range_max
|
||||||
self.peers: typing.List['KademliaPeer'] = []
|
self.peers: typing.List['KademliaPeer'] = []
|
||||||
self._node_id = node_id
|
self._node_id = node_id
|
||||||
self._distance_to_self = Distance(node_id)
|
self._distance_to_self = Distance(node_id)
|
||||||
|
self.capacity = capacity
|
||||||
|
|
||||||
def add_peer(self, peer: 'KademliaPeer') -> bool:
|
def add_peer(self, peer: 'KademliaPeer') -> bool:
|
||||||
""" Add contact to _contact list in the right order. This will move the
|
""" Add contact to _contact list in the right order. This will move the
|
||||||
|
@ -50,24 +64,25 @@ class KBucket:
|
||||||
self.peers.append(peer)
|
self.peers.append(peer)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
for i in range(len(self.peers)):
|
for i, _ in enumerate(self.peers):
|
||||||
local_peer = self.peers[i]
|
local_peer = self.peers[i]
|
||||||
if local_peer.node_id == peer.node_id:
|
if local_peer.node_id == peer.node_id:
|
||||||
self.peers.remove(local_peer)
|
self.peers.remove(local_peer)
|
||||||
self.peers.append(peer)
|
self.peers.append(peer)
|
||||||
return True
|
return True
|
||||||
if len(self.peers) < constants.K:
|
if len(self.peers) < self.capacity:
|
||||||
self.peers.append(peer)
|
self.peers.append(peer)
|
||||||
|
self.peer_in_routing_table_metric.labels("global").inc()
|
||||||
|
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
|
||||||
|
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).inc()
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
# raise BucketFull("No space in bucket to insert contact")
|
|
||||||
|
|
||||||
def get_peer(self, node_id: bytes) -> 'KademliaPeer':
|
def get_peer(self, node_id: bytes) -> 'KademliaPeer':
|
||||||
for peer in self.peers:
|
for peer in self.peers:
|
||||||
if peer.node_id == node_id:
|
if peer.node_id == node_id:
|
||||||
return peer
|
return peer
|
||||||
raise IndexError(node_id)
|
|
||||||
|
|
||||||
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
|
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
|
||||||
""" Returns a list containing up to the first count number of contacts
|
""" Returns a list containing up to the first count number of contacts
|
||||||
|
@ -124,6 +139,9 @@ class KBucket:
|
||||||
|
|
||||||
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
||||||
self.peers.remove(peer)
|
self.peers.remove(peer)
|
||||||
|
self.peer_in_routing_table_metric.labels("global").dec()
|
||||||
|
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
|
||||||
|
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).dec()
|
||||||
|
|
||||||
def key_in_range(self, key: bytes) -> bool:
|
def key_in_range(self, key: bytes) -> bool:
|
||||||
""" Tests whether the specified key (i.e. node ID) is in the range
|
""" Tests whether the specified key (i.e. node ID) is in the range
|
||||||
|
@ -161,24 +179,36 @@ class TreeRoutingTable:
|
||||||
version of the Kademlia paper, in section 2.4. It does, however, use the
|
version of the Kademlia paper, in section 2.4. It does, however, use the
|
||||||
ping RPC-based k-bucket eviction algorithm described in section 2.2 of
|
ping RPC-based k-bucket eviction algorithm described in section 2.2 of
|
||||||
that paper.
|
that paper.
|
||||||
|
|
||||||
|
BOOTSTRAP MODE: if set to True, we always add all peers. This is so a
|
||||||
|
bootstrap node does not get a bias towards its own node id and replies are
|
||||||
|
the best it can provide (joining peer knows its neighbors immediately).
|
||||||
|
Over time, this will need to be optimized so we use the disk as holding
|
||||||
|
everything in memory won't be feasible anymore.
|
||||||
|
See: https://github.com/bittorrent/bootstrap-dht
|
||||||
"""
|
"""
|
||||||
|
bucket_in_routing_table_metric = Gauge(
|
||||||
|
"buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node",
|
||||||
|
labelnames=("scope",)
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
|
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
|
||||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
|
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False):
|
||||||
self._loop = loop
|
self._loop = loop
|
||||||
self._peer_manager = peer_manager
|
self._peer_manager = peer_manager
|
||||||
self._parent_node_id = parent_node_id
|
self._parent_node_id = parent_node_id
|
||||||
self._split_buckets_under_index = split_buckets_under_index
|
self._split_buckets_under_index = split_buckets_under_index
|
||||||
self.buckets: typing.List[KBucket] = [
|
self.buckets: typing.List[KBucket] = [
|
||||||
KBucket(
|
KBucket(
|
||||||
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id
|
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id,
|
||||||
|
capacity=1 << 32 if is_bootstrap_node else constants.K
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
def get_peers(self) -> typing.List['KademliaPeer']:
|
def get_peers(self) -> typing.List['KademliaPeer']:
|
||||||
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
|
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
|
||||||
|
|
||||||
def should_split(self, bucket_index: int, to_add: bytes) -> bool:
|
def _should_split(self, bucket_index: int, to_add: bytes) -> bool:
|
||||||
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
|
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
|
||||||
if bucket_index < self._split_buckets_under_index:
|
if bucket_index < self._split_buckets_under_index:
|
||||||
return True
|
return True
|
||||||
|
@ -203,39 +233,32 @@ class TreeRoutingTable:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
|
def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
|
||||||
"""
|
return self.buckets[self._kbucket_index(contact_id)].get_peer(contact_id)
|
||||||
@raise IndexError: No contact with the specified contact ID is known
|
|
||||||
by this node
|
|
||||||
"""
|
|
||||||
return self.buckets[self.kbucket_index(contact_id)].get_peer(contact_id)
|
|
||||||
|
|
||||||
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
|
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
|
||||||
bucket_index = start_index
|
|
||||||
refresh_ids = []
|
refresh_ids = []
|
||||||
now = int(self._loop.time())
|
for offset, _ in enumerate(self.buckets[start_index:]):
|
||||||
for bucket in self.buckets[start_index:]:
|
refresh_ids.append(self._midpoint_id_in_bucket_range(start_index + offset))
|
||||||
if force or now - bucket.last_accessed >= constants.REFRESH_INTERVAL:
|
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
|
||||||
to_search = self.midpoint_id_in_bucket_range(bucket_index)
|
# populate/split the buckets further
|
||||||
refresh_ids.append(to_search)
|
buckets_with_contacts = self.buckets_with_contacts()
|
||||||
bucket_index += 1
|
if buckets_with_contacts <= 3:
|
||||||
|
for i in range(buckets_with_contacts):
|
||||||
|
refresh_ids.append(self._random_id_in_bucket_range(i))
|
||||||
|
refresh_ids.append(self._random_id_in_bucket_range(i))
|
||||||
return refresh_ids
|
return refresh_ids
|
||||||
|
|
||||||
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
||||||
if not peer.node_id:
|
if not peer.node_id:
|
||||||
return
|
return
|
||||||
bucket_index = self.kbucket_index(peer.node_id)
|
bucket_index = self._kbucket_index(peer.node_id)
|
||||||
try:
|
try:
|
||||||
self.buckets[bucket_index].remove_peer(peer)
|
self.buckets[bucket_index].remove_peer(peer)
|
||||||
|
self._join_buckets()
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return
|
return
|
||||||
|
|
||||||
def touch_kbucket(self, key: bytes) -> None:
|
def _kbucket_index(self, key: bytes) -> int:
|
||||||
self.touch_kbucket_by_index(self.kbucket_index(key))
|
|
||||||
|
|
||||||
def touch_kbucket_by_index(self, bucket_index: int):
|
|
||||||
self.buckets[bucket_index].last_accessed = int(self._loop.time())
|
|
||||||
|
|
||||||
def kbucket_index(self, key: bytes) -> int:
|
|
||||||
i = 0
|
i = 0
|
||||||
for bucket in self.buckets:
|
for bucket in self.buckets:
|
||||||
if bucket.key_in_range(key):
|
if bucket.key_in_range(key):
|
||||||
|
@ -244,19 +267,19 @@ class TreeRoutingTable:
|
||||||
i += 1
|
i += 1
|
||||||
return i
|
return i
|
||||||
|
|
||||||
def random_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
def _random_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||||
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
|
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
|
||||||
return Distance(
|
return Distance(
|
||||||
self._parent_node_id
|
self._parent_node_id
|
||||||
)(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big')
|
)(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big')
|
||||||
|
|
||||||
def midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
def _midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||||
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
|
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
|
||||||
return Distance(self._parent_node_id)(
|
return Distance(self._parent_node_id)(
|
||||||
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big')
|
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big')
|
||||||
).to_bytes(constants.HASH_LENGTH, 'big')
|
).to_bytes(constants.HASH_LENGTH, 'big')
|
||||||
|
|
||||||
def split_bucket(self, old_bucket_index: int) -> None:
|
def _split_bucket(self, old_bucket_index: int) -> None:
|
||||||
""" Splits the specified k-bucket into two new buckets which together
|
""" Splits the specified k-bucket into two new buckets which together
|
||||||
cover the same range in the key/ID space
|
cover the same range in the key/ID space
|
||||||
|
|
||||||
|
@ -279,8 +302,9 @@ class TreeRoutingTable:
|
||||||
# ...and remove them from the old bucket
|
# ...and remove them from the old bucket
|
||||||
for contact in new_bucket.peers:
|
for contact in new_bucket.peers:
|
||||||
old_bucket.remove_peer(contact)
|
old_bucket.remove_peer(contact)
|
||||||
|
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
|
||||||
|
|
||||||
def join_buckets(self):
|
def _join_buckets(self):
|
||||||
if len(self.buckets) == 1:
|
if len(self.buckets) == 1:
|
||||||
return
|
return
|
||||||
to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0]
|
to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0]
|
||||||
|
@ -302,14 +326,8 @@ class TreeRoutingTable:
|
||||||
elif can_go_higher:
|
elif can_go_higher:
|
||||||
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
|
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
|
||||||
self.buckets.remove(bucket)
|
self.buckets.remove(bucket)
|
||||||
return self.join_buckets()
|
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
|
||||||
|
return self._join_buckets()
|
||||||
def contact_in_routing_table(self, address_tuple: typing.Tuple[str, int]) -> bool:
|
|
||||||
for bucket in self.buckets:
|
|
||||||
for contact in bucket.get_peers(sort_distance_to=False):
|
|
||||||
if address_tuple[0] == contact.address and address_tuple[1] == contact.udp_port:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def buckets_with_contacts(self) -> int:
|
def buckets_with_contacts(self) -> int:
|
||||||
count = 0
|
count = 0
|
||||||
|
@ -317,3 +335,70 @@ class TreeRoutingTable:
|
||||||
if len(bucket) > 0:
|
if len(bucket) > 0:
|
||||||
count += 1
|
count += 1
|
||||||
return count
|
return count
|
||||||
|
|
||||||
|
async def add_peer(self, peer: 'KademliaPeer', probe: typing.Callable[['KademliaPeer'], typing.Awaitable]):
|
||||||
|
if not peer.node_id:
|
||||||
|
log.warning("Tried adding a peer with no node id!")
|
||||||
|
return False
|
||||||
|
for my_peer in self.get_peers():
|
||||||
|
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
|
||||||
|
self.remove_peer(my_peer)
|
||||||
|
self._join_buckets()
|
||||||
|
bucket_index = self._kbucket_index(peer.node_id)
|
||||||
|
if self.buckets[bucket_index].add_peer(peer):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
|
||||||
|
if self._should_split(bucket_index, peer.node_id):
|
||||||
|
self._split_bucket(bucket_index)
|
||||||
|
# Retry the insertion attempt
|
||||||
|
result = await self.add_peer(peer, probe)
|
||||||
|
self._join_buckets()
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
# We can't split the k-bucket
|
||||||
|
#
|
||||||
|
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
|
||||||
|
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
|
||||||
|
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
|
||||||
|
#
|
||||||
|
# A reasonable extension to this is BEP 0005, which extends the above:
|
||||||
|
#
|
||||||
|
# Not all nodes that we learn about are equal. Some are "good" and some are not.
|
||||||
|
# Many nodes using the DHT are able to send queries and receive responses,
|
||||||
|
# but are not able to respond to queries from other nodes. It is important that
|
||||||
|
# each node's routing table must contain only known good nodes. A good node is
|
||||||
|
# a node has responded to one of our queries within the last 15 minutes. A node
|
||||||
|
# is also good if it has ever responded to one of our queries and has sent us a
|
||||||
|
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
|
||||||
|
# questionable. Nodes become bad when they fail to respond to multiple queries
|
||||||
|
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
|
||||||
|
#
|
||||||
|
# When there are bad or questionable nodes in the bucket, the least recent is selected for
|
||||||
|
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
|
||||||
|
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
|
||||||
|
# is ignored if the pinged node replies.
|
||||||
|
|
||||||
|
not_good_contacts = self.buckets[bucket_index].get_bad_or_unknown_peers()
|
||||||
|
not_recently_replied = []
|
||||||
|
for my_peer in not_good_contacts:
|
||||||
|
last_replied = self._peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
|
||||||
|
if not last_replied or last_replied + 60 < self._loop.time():
|
||||||
|
not_recently_replied.append(my_peer)
|
||||||
|
if not_recently_replied:
|
||||||
|
to_replace = not_recently_replied[0]
|
||||||
|
else:
|
||||||
|
to_replace = self.buckets[bucket_index].peers[0]
|
||||||
|
last_replied = self._peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
|
||||||
|
if last_replied and last_replied + 60 > self._loop.time():
|
||||||
|
return False
|
||||||
|
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
|
||||||
|
try:
|
||||||
|
await probe(to_replace)
|
||||||
|
return False
|
||||||
|
except (asyncio.TimeoutError, RemoteException):
|
||||||
|
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
|
||||||
|
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
|
||||||
|
if to_replace in self.buckets[bucket_index]:
|
||||||
|
self.buckets[bucket_index].remove_peer(to_replace)
|
||||||
|
return await self.add_peer(peer, probe)
|
||||||
|
|
|
@ -144,7 +144,7 @@ class ErrorDatagram(KademliaDatagramBase):
|
||||||
self.response = response.decode()
|
self.response = response.decode()
|
||||||
|
|
||||||
|
|
||||||
def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDatagram, ErrorDatagram]:
|
def _decode_datagram(datagram: bytes):
|
||||||
msg_types = {
|
msg_types = {
|
||||||
REQUEST_TYPE: RequestDatagram,
|
REQUEST_TYPE: RequestDatagram,
|
||||||
RESPONSE_TYPE: ResponseDatagram,
|
RESPONSE_TYPE: ResponseDatagram,
|
||||||
|
@ -152,26 +152,36 @@ def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDa
|
||||||
}
|
}
|
||||||
|
|
||||||
primitive: typing.Dict = bdecode(datagram)
|
primitive: typing.Dict = bdecode(datagram)
|
||||||
if primitive[0] in [REQUEST_TYPE, ERROR_TYPE, RESPONSE_TYPE]: # pylint: disable=unsubscriptable-object
|
|
||||||
datagram_type = primitive[0] # pylint: disable=unsubscriptable-object
|
converted = {
|
||||||
|
str(k).encode() if not isinstance(k, bytes) else k: v for k, v in primitive.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
if converted[b'0'] in [REQUEST_TYPE, ERROR_TYPE, RESPONSE_TYPE]: # pylint: disable=unsubscriptable-object
|
||||||
|
datagram_type = converted[b'0'] # pylint: disable=unsubscriptable-object
|
||||||
else:
|
else:
|
||||||
raise ValueError("invalid datagram type")
|
raise ValueError("invalid datagram type")
|
||||||
datagram_class = msg_types[datagram_type]
|
datagram_class = msg_types[datagram_type]
|
||||||
decoded = {
|
decoded = {
|
||||||
k: primitive[i] # pylint: disable=unsubscriptable-object
|
k: converted[str(i).encode()] # pylint: disable=unsubscriptable-object
|
||||||
for i, k in enumerate(datagram_class.required_fields)
|
for i, k in enumerate(datagram_class.required_fields)
|
||||||
if i in primitive # pylint: disable=unsupported-membership-test
|
if str(i).encode() in converted # pylint: disable=unsupported-membership-test
|
||||||
}
|
}
|
||||||
for i, _ in enumerate(OPTIONAL_FIELDS):
|
for i, _ in enumerate(OPTIONAL_FIELDS):
|
||||||
if i + OPTIONAL_ARG_OFFSET in primitive:
|
if str(i + OPTIONAL_ARG_OFFSET).encode() in converted:
|
||||||
decoded[i + OPTIONAL_ARG_OFFSET] = primitive[i + OPTIONAL_ARG_OFFSET]
|
decoded[i + OPTIONAL_ARG_OFFSET] = converted[str(i + OPTIONAL_ARG_OFFSET).encode()]
|
||||||
|
return decoded, datagram_class
|
||||||
|
|
||||||
|
|
||||||
|
def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDatagram, ErrorDatagram]:
|
||||||
|
decoded, datagram_class = _decode_datagram(datagram)
|
||||||
return datagram_class(**decoded)
|
return datagram_class(**decoded)
|
||||||
|
|
||||||
|
|
||||||
def make_compact_ip(address: str) -> bytearray:
|
def make_compact_ip(address: str) -> bytearray:
|
||||||
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
|
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
|
||||||
if len(compact_ip) != 4:
|
if len(compact_ip) != 4:
|
||||||
raise ValueError(f"invalid IPv4 length")
|
raise ValueError("invalid IPv4 length")
|
||||||
return compact_ip
|
return compact_ip
|
||||||
|
|
||||||
|
|
||||||
|
@ -180,7 +190,7 @@ def make_compact_address(node_id: bytes, address: str, port: int) -> bytearray:
|
||||||
if not 0 < port < 65536:
|
if not 0 < port < 65536:
|
||||||
raise ValueError(f'Invalid port: {port}')
|
raise ValueError(f'Invalid port: {port}')
|
||||||
if len(node_id) != constants.HASH_BITS // 8:
|
if len(node_id) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid node node_id length")
|
raise ValueError("invalid node node_id length")
|
||||||
return compact_ip + port.to_bytes(2, 'big') + node_id
|
return compact_ip + port.to_bytes(2, 'big') + node_id
|
||||||
|
|
||||||
|
|
||||||
|
@ -191,5 +201,5 @@ def decode_compact_address(compact_address: bytes) -> typing.Tuple[bytes, str, i
|
||||||
if not 0 < port < 65536:
|
if not 0 < port < 65536:
|
||||||
raise ValueError(f'Invalid port: {port}')
|
raise ValueError(f'Invalid port: {port}')
|
||||||
if len(node_id) != constants.HASH_BITS // 8:
|
if len(node_id) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid node node_id length")
|
raise ValueError("invalid node node_id length")
|
||||||
return node_id, address, port
|
return node_id, address, port
|
||||||
|
|
|
@ -34,6 +34,11 @@ Code | Name | Message
|
||||||
**11x** | InputValue(ValueError) | Invalid argument value provided to command.
|
**11x** | InputValue(ValueError) | Invalid argument value provided to command.
|
||||||
111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid.
|
111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid.
|
||||||
112 | InputValueIsNone | None or null is not valid value for argument '{argument}'.
|
112 | InputValueIsNone | None or null is not valid value for argument '{argument}'.
|
||||||
|
113 | ConflictingInputValue | Only '{first_argument}' or '{second_argument}' is allowed, not both.
|
||||||
|
114 | InputStringIsBlank | {argument} cannot be blank.
|
||||||
|
115 | EmptyPublishedFile | Cannot publish empty file: {file_path}
|
||||||
|
116 | MissingPublishedFile | File does not exist: {file_path}
|
||||||
|
117 | InvalidStreamURL | Invalid LBRY stream URL: '{url}' -- When an URL cannot be downloaded, such as '@Channel/' or a collection
|
||||||
**2xx** | Configuration | Configuration errors.
|
**2xx** | Configuration | Configuration errors.
|
||||||
201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues.
|
201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues.
|
||||||
202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args.
|
202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args.
|
||||||
|
@ -51,15 +56,22 @@ Code | Name | Message
|
||||||
405 | ChannelKeyNotFound | Channel signing key not found.
|
405 | ChannelKeyNotFound | Channel signing key not found.
|
||||||
406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key.
|
406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key.
|
||||||
407 | DataDownload | Failed to download blob. *generic*
|
407 | DataDownload | Failed to download blob. *generic*
|
||||||
|
408 | PrivateKeyNotFound | Couldn't find private key for {key} '{value}'.
|
||||||
410 | Resolve | Failed to resolve '{url}'.
|
410 | Resolve | Failed to resolve '{url}'.
|
||||||
411 | ResolveTimeout | Failed to resolve '{url}' within the timeout.
|
411 | ResolveTimeout | Failed to resolve '{url}' within the timeout.
|
||||||
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{claim_id(censor_hash)}'.
|
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{censor_id}'.
|
||||||
420 | KeyFeeAboveMaxAllowed | {message}
|
420 | KeyFeeAboveMaxAllowed | {message}
|
||||||
421 | InvalidPassword | Password is invalid.
|
421 | InvalidPassword | Password is invalid.
|
||||||
422 | IncompatibleWalletServer | '{server}:{port}' has an incompatibly old version.
|
422 | IncompatibleWalletServer | '{server}:{port}' has an incompatibly old version.
|
||||||
|
423 | TooManyClaimSearchParameters | {key} cant have more than {limit} items.
|
||||||
|
424 | AlreadyPurchased | You already have a purchase for claim_id '{claim_id_hex}'. Use --allow-duplicate-purchase flag to override.
|
||||||
431 | ServerPaymentInvalidAddress | Invalid address from wallet server: '{address}' - skipping payment round.
|
431 | ServerPaymentInvalidAddress | Invalid address from wallet server: '{address}' - skipping payment round.
|
||||||
432 | ServerPaymentWalletLocked | Cannot spend funds with locked wallet, skipping payment round.
|
432 | ServerPaymentWalletLocked | Cannot spend funds with locked wallet, skipping payment round.
|
||||||
433 | ServerPaymentFeeAboveMaxAllowed | Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.
|
433 | ServerPaymentFeeAboveMaxAllowed | Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.
|
||||||
|
434 | WalletNotLoaded | Wallet {wallet_id} is not loaded.
|
||||||
|
435 | WalletAlreadyLoaded | Wallet {wallet_path} is already loaded.
|
||||||
|
436 | WalletNotFound | Wallet not found at {wallet_path}.
|
||||||
|
437 | WalletAlreadyExists | Wallet {wallet_path} already exists, use `wallet_add` to load it.
|
||||||
**5xx** | Blob | **Blobs**
|
**5xx** | Blob | **Blobs**
|
||||||
500 | BlobNotFound | Blob not found.
|
500 | BlobNotFound | Blob not found.
|
||||||
501 | BlobPermissionDenied | Permission denied to read blob.
|
501 | BlobPermissionDenied | Permission denied to read blob.
|
||||||
|
|
|
@ -76,6 +76,45 @@ class InputValueIsNoneError(InputValueError):
|
||||||
super().__init__(f"None or null is not valid value for argument '{argument}'.")
|
super().__init__(f"None or null is not valid value for argument '{argument}'.")
|
||||||
|
|
||||||
|
|
||||||
|
class ConflictingInputValueError(InputValueError):
|
||||||
|
|
||||||
|
def __init__(self, first_argument, second_argument):
|
||||||
|
self.first_argument = first_argument
|
||||||
|
self.second_argument = second_argument
|
||||||
|
super().__init__(f"Only '{first_argument}' or '{second_argument}' is allowed, not both.")
|
||||||
|
|
||||||
|
|
||||||
|
class InputStringIsBlankError(InputValueError):
|
||||||
|
|
||||||
|
def __init__(self, argument):
|
||||||
|
self.argument = argument
|
||||||
|
super().__init__(f"{argument} cannot be blank.")
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyPublishedFileError(InputValueError):
|
||||||
|
|
||||||
|
def __init__(self, file_path):
|
||||||
|
self.file_path = file_path
|
||||||
|
super().__init__(f"Cannot publish empty file: {file_path}")
|
||||||
|
|
||||||
|
|
||||||
|
class MissingPublishedFileError(InputValueError):
|
||||||
|
|
||||||
|
def __init__(self, file_path):
|
||||||
|
self.file_path = file_path
|
||||||
|
super().__init__(f"File does not exist: {file_path}")
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidStreamURLError(InputValueError):
|
||||||
|
"""
|
||||||
|
When an URL cannot be downloaded, such as '@Channel/' or a collection
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, url):
|
||||||
|
self.url = url
|
||||||
|
super().__init__(f"Invalid LBRY stream URL: '{url}'")
|
||||||
|
|
||||||
|
|
||||||
class ConfigurationError(BaseError):
|
class ConfigurationError(BaseError):
|
||||||
"""
|
"""
|
||||||
Configuration errors.
|
Configuration errors.
|
||||||
|
@ -199,6 +238,14 @@ class DataDownloadError(WalletError):
|
||||||
super().__init__("Failed to download blob. *generic*")
|
super().__init__("Failed to download blob. *generic*")
|
||||||
|
|
||||||
|
|
||||||
|
class PrivateKeyNotFoundError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, key, value):
|
||||||
|
self.key = key
|
||||||
|
self.value = value
|
||||||
|
super().__init__(f"Couldn't find private key for {key} '{value}'.")
|
||||||
|
|
||||||
|
|
||||||
class ResolveError(WalletError):
|
class ResolveError(WalletError):
|
||||||
|
|
||||||
def __init__(self, url):
|
def __init__(self, url):
|
||||||
|
@ -215,10 +262,11 @@ class ResolveTimeoutError(WalletError):
|
||||||
|
|
||||||
class ResolveCensoredError(WalletError):
|
class ResolveCensoredError(WalletError):
|
||||||
|
|
||||||
def __init__(self, url, censor_hash):
|
def __init__(self, url, censor_id, censor_row):
|
||||||
self.url = url
|
self.url = url
|
||||||
self.censor_hash = censor_hash
|
self.censor_id = censor_id
|
||||||
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{claim_id(censor_hash)}'.")
|
self.censor_row = censor_row
|
||||||
|
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.")
|
||||||
|
|
||||||
|
|
||||||
class KeyFeeAboveMaxAllowedError(WalletError):
|
class KeyFeeAboveMaxAllowedError(WalletError):
|
||||||
|
@ -242,6 +290,24 @@ class IncompatibleWalletServerError(WalletError):
|
||||||
super().__init__(f"'{server}:{port}' has an incompatibly old version.")
|
super().__init__(f"'{server}:{port}' has an incompatibly old version.")
|
||||||
|
|
||||||
|
|
||||||
|
class TooManyClaimSearchParametersError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, key, limit):
|
||||||
|
self.key = key
|
||||||
|
self.limit = limit
|
||||||
|
super().__init__(f"{key} cant have more than {limit} items.")
|
||||||
|
|
||||||
|
|
||||||
|
class AlreadyPurchasedError(WalletError):
|
||||||
|
"""
|
||||||
|
allow-duplicate-purchase flag to override.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, claim_id_hex):
|
||||||
|
self.claim_id_hex = claim_id_hex
|
||||||
|
super().__init__(f"You already have a purchase for claim_id '{claim_id_hex}'. Use")
|
||||||
|
|
||||||
|
|
||||||
class ServerPaymentInvalidAddressError(WalletError):
|
class ServerPaymentInvalidAddressError(WalletError):
|
||||||
|
|
||||||
def __init__(self, address):
|
def __init__(self, address):
|
||||||
|
@ -263,6 +329,34 @@ class ServerPaymentFeeAboveMaxAllowedError(WalletError):
|
||||||
super().__init__(f"Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.")
|
super().__init__(f"Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.")
|
||||||
|
|
||||||
|
|
||||||
|
class WalletNotLoadedError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, wallet_id):
|
||||||
|
self.wallet_id = wallet_id
|
||||||
|
super().__init__(f"Wallet {wallet_id} is not loaded.")
|
||||||
|
|
||||||
|
|
||||||
|
class WalletAlreadyLoadedError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, wallet_path):
|
||||||
|
self.wallet_path = wallet_path
|
||||||
|
super().__init__(f"Wallet {wallet_path} is already loaded.")
|
||||||
|
|
||||||
|
|
||||||
|
class WalletNotFoundError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, wallet_path):
|
||||||
|
self.wallet_path = wallet_path
|
||||||
|
super().__init__(f"Wallet not found at {wallet_path}.")
|
||||||
|
|
||||||
|
|
||||||
|
class WalletAlreadyExistsError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, wallet_path):
|
||||||
|
self.wallet_path = wallet_path
|
||||||
|
super().__init__(f"Wallet {wallet_path} already exists, use `wallet_add` to load it.")
|
||||||
|
|
||||||
|
|
||||||
class BlobError(BaseError):
|
class BlobError(BaseError):
|
||||||
"""
|
"""
|
||||||
**Blobs**
|
**Blobs**
|
||||||
|
|
|
@ -63,7 +63,7 @@ class ErrorClass:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_fields(args):
|
def get_fields(args):
|
||||||
if len(args) > 1:
|
if len(args) > 1:
|
||||||
return f''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
|
return ''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
|
@ -101,7 +101,7 @@ class ArgumentParser(argparse.ArgumentParser):
|
||||||
self._optionals.title = 'Options'
|
self._optionals.title = 'Options'
|
||||||
if group_name is None:
|
if group_name is None:
|
||||||
self.epilog = (
|
self.epilog = (
|
||||||
f"Run 'lbrynet COMMAND --help' for more information on a command or group."
|
"Run 'lbrynet COMMAND --help' for more information on a command or group."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.epilog = (
|
self.epilog = (
|
||||||
|
@ -226,6 +226,9 @@ def get_argument_parser():
|
||||||
def ensure_directory_exists(path: str):
|
def ensure_directory_exists(path: str):
|
||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
|
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
|
||||||
|
use_effective_ids = os.access in os.supports_effective_ids
|
||||||
|
if not os.access(path, os.W_OK, effective_ids=use_effective_ids):
|
||||||
|
raise PermissionError(f"The following directory is not writable: {path}")
|
||||||
|
|
||||||
|
|
||||||
LOG_MODULES = 'lbry', 'aioupnp'
|
LOG_MODULES = 'lbry', 'aioupnp'
|
||||||
|
|
|
@ -18,6 +18,7 @@ DOWNLOAD_STARTED = 'Download Started'
|
||||||
DOWNLOAD_ERRORED = 'Download Errored'
|
DOWNLOAD_ERRORED = 'Download Errored'
|
||||||
DOWNLOAD_FINISHED = 'Download Finished'
|
DOWNLOAD_FINISHED = 'Download Finished'
|
||||||
HEARTBEAT = 'Heartbeat'
|
HEARTBEAT = 'Heartbeat'
|
||||||
|
DISK_SPACE = 'Disk Space'
|
||||||
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
|
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
|
||||||
NEW_CHANNEL = 'New Channel'
|
NEW_CHANNEL = 'New Channel'
|
||||||
CREDITS_SENT = 'Credits Sent'
|
CREDITS_SENT = 'Credits Sent'
|
||||||
|
@ -132,7 +133,7 @@ class AnalyticsManager:
|
||||||
async def run(self):
|
async def run(self):
|
||||||
while True:
|
while True:
|
||||||
if self.enabled:
|
if self.enabled:
|
||||||
self.external_ip = await utils.get_external_ip()
|
self.external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||||
await self._send_heartbeat()
|
await self._send_heartbeat()
|
||||||
await asyncio.sleep(1800)
|
await asyncio.sleep(1800)
|
||||||
|
|
||||||
|
@ -169,6 +170,15 @@ class AnalyticsManager:
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def send_disk_space_used(self, storage_used, storage_limit, is_from_network_quota):
|
||||||
|
await self.track(
|
||||||
|
self._event(DISK_SPACE, {
|
||||||
|
'used': storage_used,
|
||||||
|
'limit': storage_limit,
|
||||||
|
'from_network_quota': is_from_network_quota
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
async def send_server_startup(self):
|
async def send_server_startup(self):
|
||||||
await self.track(self._event(SERVER_STARTUP))
|
await self.track(self._event(SERVER_STARTUP))
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from lbry.conf import Config
|
|
||||||
from lbry.extras.cli import execute_command
|
from lbry.extras.cli import execute_command
|
||||||
|
from lbry.conf import Config
|
||||||
|
|
||||||
|
|
||||||
def daemon_rpc(conf: Config, method: str, **kwargs):
|
def daemon_rpc(conf: Config, method: str, **kwargs):
|
||||||
|
|
|
@ -1,66 +0,0 @@
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
import hashlib
|
|
||||||
import binascii
|
|
||||||
|
|
||||||
import ecdsa
|
|
||||||
from lbry import utils
|
|
||||||
from lbry.crypto.hash import sha256
|
|
||||||
from lbry.wallet.transaction import Output
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_encoded_signature(signature):
|
|
||||||
signature = signature.encode() if isinstance(signature, str) else signature
|
|
||||||
r = int(signature[:int(len(signature) / 2)], 16)
|
|
||||||
s = int(signature[int(len(signature) / 2):], 16)
|
|
||||||
return ecdsa.util.sigencode_der(r, s, len(signature) * 4)
|
|
||||||
|
|
||||||
|
|
||||||
def cid2hash(claim_id: str) -> bytes:
|
|
||||||
return binascii.unhexlify(claim_id.encode())[::-1]
|
|
||||||
|
|
||||||
|
|
||||||
def is_comment_signed_by_channel(comment: dict, channel: Output, abandon=False):
|
|
||||||
if isinstance(channel, Output):
|
|
||||||
try:
|
|
||||||
signing_field = comment['comment_id'] if abandon else comment['comment']
|
|
||||||
pieces = [
|
|
||||||
comment['signing_ts'].encode(),
|
|
||||||
cid2hash(comment['channel_id']),
|
|
||||||
signing_field.encode()
|
|
||||||
]
|
|
||||||
return Output.is_signature_valid(
|
|
||||||
get_encoded_signature(comment['signature']),
|
|
||||||
sha256(b''.join(pieces)),
|
|
||||||
channel.claim.channel.public_key_bytes
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def sign_comment(comment: dict, channel: Output, abandon=False):
|
|
||||||
timestamp = str(int(time.time()))
|
|
||||||
signing_field = comment['comment_id'] if abandon else comment['comment']
|
|
||||||
pieces = [timestamp.encode(), channel.claim_hash, signing_field.encode()]
|
|
||||||
digest = sha256(b''.join(pieces))
|
|
||||||
signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
|
|
||||||
comment.update({
|
|
||||||
'signature': binascii.hexlify(signature).decode(),
|
|
||||||
'signing_ts': timestamp
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
async def jsonrpc_post(url: str, method: str, params: dict = None, **kwargs) -> any:
|
|
||||||
params = params or {}
|
|
||||||
params.update(kwargs)
|
|
||||||
json_body = {'jsonrpc': '2.0', 'id': None, 'method': method, 'params': params}
|
|
||||||
async with utils.aiohttp_request('POST', url, json=json_body) as response:
|
|
||||||
try:
|
|
||||||
result = await response.json()
|
|
||||||
return result['result'] if 'result' in result else result
|
|
||||||
except Exception as cte:
|
|
||||||
log.exception('Unable to decode response from server: %s', cte)
|
|
||||||
return await response.text()
|
|
|
@ -37,7 +37,7 @@ class Component(metaclass=ComponentType):
|
||||||
def running(self):
|
def running(self):
|
||||||
return self._running
|
return self._running
|
||||||
|
|
||||||
async def get_status(self):
|
async def get_status(self): # pylint: disable=no-self-use
|
||||||
return
|
return
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
|
|
|
@ -42,7 +42,7 @@ class ComponentManager:
|
||||||
self.analytics_manager = analytics_manager
|
self.analytics_manager = analytics_manager
|
||||||
self.component_classes = {}
|
self.component_classes = {}
|
||||||
self.components = set()
|
self.components = set()
|
||||||
self.started = asyncio.Event(loop=self.loop)
|
self.started = asyncio.Event()
|
||||||
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
|
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
|
||||||
|
|
||||||
for component_name, component_class in self.default_component_classes.items():
|
for component_name, component_class in self.default_component_classes.items():
|
||||||
|
@ -118,7 +118,7 @@ class ComponentManager:
|
||||||
component._setup() for component in stage if not component.running
|
component._setup() for component in stage if not component.running
|
||||||
]
|
]
|
||||||
if needing_start:
|
if needing_start:
|
||||||
await asyncio.wait(needing_start)
|
await asyncio.wait(map(asyncio.create_task, needing_start))
|
||||||
self.started.set()
|
self.started.set()
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
|
@ -131,7 +131,7 @@ class ComponentManager:
|
||||||
component._stop() for component in stage if component.running
|
component._stop() for component in stage if component.running
|
||||||
]
|
]
|
||||||
if needing_stop:
|
if needing_stop:
|
||||||
await asyncio.wait(needing_stop)
|
await asyncio.wait(map(asyncio.create_task, needing_stop))
|
||||||
|
|
||||||
def all_components_running(self, *component_names):
|
def all_components_running(self, *component_names):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -4,6 +4,7 @@ import asyncio
|
||||||
import logging
|
import logging
|
||||||
import binascii
|
import binascii
|
||||||
import typing
|
import typing
|
||||||
|
|
||||||
import base58
|
import base58
|
||||||
|
|
||||||
from aioupnp import __version__ as aioupnp_version
|
from aioupnp import __version__ as aioupnp_version
|
||||||
|
@ -15,7 +16,9 @@ from lbry.dht.node import Node
|
||||||
from lbry.dht.peer import is_valid_public_ipv4
|
from lbry.dht.peer import is_valid_public_ipv4
|
||||||
from lbry.dht.blob_announcer import BlobAnnouncer
|
from lbry.dht.blob_announcer import BlobAnnouncer
|
||||||
from lbry.blob.blob_manager import BlobManager
|
from lbry.blob.blob_manager import BlobManager
|
||||||
|
from lbry.blob.disk_space_manager import DiskSpaceManager
|
||||||
from lbry.blob_exchange.server import BlobServer
|
from lbry.blob_exchange.server import BlobServer
|
||||||
|
from lbry.stream.background_downloader import BackgroundDownloader
|
||||||
from lbry.stream.stream_manager import StreamManager
|
from lbry.stream.stream_manager import StreamManager
|
||||||
from lbry.file.file_manager import FileManager
|
from lbry.file.file_manager import FileManager
|
||||||
from lbry.extras.daemon.component import Component
|
from lbry.extras.daemon.component import Component
|
||||||
|
@ -24,10 +27,8 @@ from lbry.extras.daemon.storage import SQLiteStorage
|
||||||
from lbry.torrent.torrent_manager import TorrentManager
|
from lbry.torrent.torrent_manager import TorrentManager
|
||||||
from lbry.wallet import WalletManager
|
from lbry.wallet import WalletManager
|
||||||
from lbry.wallet.usage_payment import WalletServerPayer
|
from lbry.wallet.usage_payment import WalletServerPayer
|
||||||
try:
|
from lbry.torrent.tracker import TrackerClient
|
||||||
from lbry.torrent.session import TorrentSession
|
from lbry.torrent.session import TorrentSession
|
||||||
except ImportError:
|
|
||||||
TorrentSession = None
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -40,9 +41,12 @@ WALLET_SERVER_PAYMENTS_COMPONENT = "wallet_server_payments"
|
||||||
DHT_COMPONENT = "dht"
|
DHT_COMPONENT = "dht"
|
||||||
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
|
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
|
||||||
FILE_MANAGER_COMPONENT = "file_manager"
|
FILE_MANAGER_COMPONENT = "file_manager"
|
||||||
|
DISK_SPACE_COMPONENT = "disk_space"
|
||||||
|
BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
|
||||||
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
|
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
|
||||||
UPNP_COMPONENT = "upnp"
|
UPNP_COMPONENT = "upnp"
|
||||||
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
|
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
|
||||||
|
TRACKER_ANNOUNCER_COMPONENT = "tracker_announcer_component"
|
||||||
LIBTORRENT_COMPONENT = "libtorrent_component"
|
LIBTORRENT_COMPONENT = "libtorrent_component"
|
||||||
|
|
||||||
|
|
||||||
|
@ -59,7 +63,7 @@ class DatabaseComponent(Component):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_current_db_revision():
|
def get_current_db_revision():
|
||||||
return 14
|
return 15
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def revision_filename(self):
|
def revision_filename(self):
|
||||||
|
@ -119,13 +123,14 @@ class WalletComponent(Component):
|
||||||
async def get_status(self):
|
async def get_status(self):
|
||||||
if self.wallet_manager is None:
|
if self.wallet_manager is None:
|
||||||
return
|
return
|
||||||
session_pool = self.wallet_manager.ledger.network.session_pool
|
is_connected = self.wallet_manager.ledger.network.is_connected
|
||||||
sessions = session_pool.sessions
|
sessions = []
|
||||||
connected = None
|
connected = None
|
||||||
if self.wallet_manager.ledger.network.client:
|
if is_connected:
|
||||||
addr_and_port = self.wallet_manager.ledger.network.client.server_address_and_port
|
addr, port = self.wallet_manager.ledger.network.client.server
|
||||||
if addr_and_port:
|
connected = f"{addr}:{port}"
|
||||||
connected = f"{addr_and_port[0]}:{addr_and_port[1]}"
|
sessions.append(self.wallet_manager.ledger.network.client)
|
||||||
|
|
||||||
result = {
|
result = {
|
||||||
'connected': connected,
|
'connected': connected,
|
||||||
'connected_features': self.wallet_manager.ledger.network.server_features,
|
'connected_features': self.wallet_manager.ledger.network.server_features,
|
||||||
|
@ -137,8 +142,8 @@ class WalletComponent(Component):
|
||||||
'availability': session.available,
|
'availability': session.available,
|
||||||
} for session in sessions
|
} for session in sessions
|
||||||
],
|
],
|
||||||
'known_servers': len(sessions),
|
'known_servers': len(self.wallet_manager.ledger.network.known_hubs),
|
||||||
'available_servers': len(list(session_pool.available_sessions))
|
'available_servers': 1 if is_connected else 0
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.wallet_manager.ledger.network.remote_height:
|
if self.wallet_manager.ledger.network.remote_height:
|
||||||
|
@ -274,7 +279,7 @@ class DHTComponent(Component):
|
||||||
external_ip = upnp_component.external_ip
|
external_ip = upnp_component.external_ip
|
||||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||||
if not external_ip:
|
if not external_ip:
|
||||||
external_ip = await utils.get_external_ip()
|
external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||||
if not external_ip:
|
if not external_ip:
|
||||||
log.warning("failed to get external ip")
|
log.warning("failed to get external ip")
|
||||||
|
|
||||||
|
@ -288,6 +293,7 @@ class DHTComponent(Component):
|
||||||
peer_port=self.external_peer_port,
|
peer_port=self.external_peer_port,
|
||||||
rpc_timeout=self.conf.node_rpc_timeout,
|
rpc_timeout=self.conf.node_rpc_timeout,
|
||||||
split_buckets_under_index=self.conf.split_buckets_under_index,
|
split_buckets_under_index=self.conf.split_buckets_under_index,
|
||||||
|
is_bootstrap_node=self.conf.is_bootstrap_node,
|
||||||
storage=storage
|
storage=storage
|
||||||
)
|
)
|
||||||
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
|
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
|
||||||
|
@ -328,7 +334,7 @@ class HashAnnouncerComponent(Component):
|
||||||
|
|
||||||
class FileManagerComponent(Component):
|
class FileManagerComponent(Component):
|
||||||
component_name = FILE_MANAGER_COMPONENT
|
component_name = FILE_MANAGER_COMPONENT
|
||||||
depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT, LIBTORRENT_COMPONENT]
|
depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT]
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
def __init__(self, component_manager):
|
||||||
super().__init__(component_manager)
|
super().__init__(component_manager)
|
||||||
|
@ -351,7 +357,6 @@ class FileManagerComponent(Component):
|
||||||
wallet = self.component_manager.get_component(WALLET_COMPONENT)
|
wallet = self.component_manager.get_component(WALLET_COMPONENT)
|
||||||
node = self.component_manager.get_component(DHT_COMPONENT) \
|
node = self.component_manager.get_component(DHT_COMPONENT) \
|
||||||
if self.component_manager.has_component(DHT_COMPONENT) else None
|
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||||
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT) if TorrentSession else None
|
|
||||||
log.info('Starting the file manager')
|
log.info('Starting the file manager')
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
self.file_manager = FileManager(
|
self.file_manager = FileManager(
|
||||||
|
@ -360,7 +365,8 @@ class FileManagerComponent(Component):
|
||||||
self.file_manager.source_managers['stream'] = StreamManager(
|
self.file_manager.source_managers['stream'] = StreamManager(
|
||||||
loop, self.conf, blob_manager, wallet, storage, node,
|
loop, self.conf, blob_manager, wallet, storage, node,
|
||||||
)
|
)
|
||||||
if TorrentSession:
|
if self.component_manager.has_component(LIBTORRENT_COMPONENT):
|
||||||
|
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
|
||||||
self.file_manager.source_managers['torrent'] = TorrentManager(
|
self.file_manager.source_managers['torrent'] = TorrentManager(
|
||||||
loop, self.conf, torrent, storage, self.component_manager.analytics_manager
|
loop, self.conf, torrent, storage, self.component_manager.analytics_manager
|
||||||
)
|
)
|
||||||
|
@ -368,7 +374,106 @@ class FileManagerComponent(Component):
|
||||||
log.info('Done setting up file manager')
|
log.info('Done setting up file manager')
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
self.file_manager.stop()
|
await self.file_manager.stop()
|
||||||
|
|
||||||
|
|
||||||
|
class BackgroundDownloaderComponent(Component):
|
||||||
|
MIN_PREFIX_COLLIDING_BITS = 8
|
||||||
|
component_name = BACKGROUND_DOWNLOADER_COMPONENT
|
||||||
|
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT]
|
||||||
|
|
||||||
|
def __init__(self, component_manager):
|
||||||
|
super().__init__(component_manager)
|
||||||
|
self.background_task: typing.Optional[asyncio.Task] = None
|
||||||
|
self.download_loop_delay_seconds = 60
|
||||||
|
self.ongoing_download: typing.Optional[asyncio.Task] = None
|
||||||
|
self.space_manager: typing.Optional[DiskSpaceManager] = None
|
||||||
|
self.blob_manager: typing.Optional[BlobManager] = None
|
||||||
|
self.background_downloader: typing.Optional[BackgroundDownloader] = None
|
||||||
|
self.dht_node: typing.Optional[Node] = None
|
||||||
|
self.space_available: typing.Optional[int] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_busy(self):
|
||||||
|
return bool(self.ongoing_download and not self.ongoing_download.done())
|
||||||
|
|
||||||
|
@property
|
||||||
|
def component(self) -> 'BackgroundDownloaderComponent':
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def get_status(self):
|
||||||
|
return {'running': self.background_task is not None and not self.background_task.done(),
|
||||||
|
'available_free_space_mb': self.space_available,
|
||||||
|
'ongoing_download': self.is_busy}
|
||||||
|
|
||||||
|
async def download_blobs_in_background(self):
|
||||||
|
while True:
|
||||||
|
self.space_available = await self.space_manager.get_free_space_mb(True)
|
||||||
|
if not self.is_busy and self.space_available > 10:
|
||||||
|
self._download_next_close_blob_hash()
|
||||||
|
await asyncio.sleep(self.download_loop_delay_seconds)
|
||||||
|
|
||||||
|
def _download_next_close_blob_hash(self):
|
||||||
|
node_id = self.dht_node.protocol.node_id
|
||||||
|
for blob_hash in self.dht_node.stored_blob_hashes:
|
||||||
|
if blob_hash.hex() in self.blob_manager.completed_blob_hashes:
|
||||||
|
continue
|
||||||
|
if utils.get_colliding_prefix_bits(node_id, blob_hash) >= self.MIN_PREFIX_COLLIDING_BITS:
|
||||||
|
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash.hex()))
|
||||||
|
return
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT)
|
||||||
|
if not self.component_manager.has_component(DHT_COMPONENT):
|
||||||
|
return
|
||||||
|
self.dht_node = self.component_manager.get_component(DHT_COMPONENT)
|
||||||
|
self.blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||||
|
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||||
|
self.background_downloader = BackgroundDownloader(self.conf, storage, self.blob_manager, self.dht_node)
|
||||||
|
self.background_task = asyncio.create_task(self.download_blobs_in_background())
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
if self.ongoing_download and not self.ongoing_download.done():
|
||||||
|
self.ongoing_download.cancel()
|
||||||
|
if self.background_task:
|
||||||
|
self.background_task.cancel()
|
||||||
|
|
||||||
|
|
||||||
|
class DiskSpaceComponent(Component):
|
||||||
|
component_name = DISK_SPACE_COMPONENT
|
||||||
|
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT]
|
||||||
|
|
||||||
|
def __init__(self, component_manager):
|
||||||
|
super().__init__(component_manager)
|
||||||
|
self.disk_space_manager: typing.Optional[DiskSpaceManager] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def component(self) -> typing.Optional[DiskSpaceManager]:
|
||||||
|
return self.disk_space_manager
|
||||||
|
|
||||||
|
async def get_status(self):
|
||||||
|
if self.disk_space_manager:
|
||||||
|
space_used = await self.disk_space_manager.get_space_used_mb(cached=True)
|
||||||
|
return {
|
||||||
|
'total_used_mb': space_used['total'],
|
||||||
|
'published_blobs_storage_used_mb': space_used['private_storage'],
|
||||||
|
'content_blobs_storage_used_mb': space_used['content_storage'],
|
||||||
|
'seed_blobs_storage_used_mb': space_used['network_storage'],
|
||||||
|
'running': self.disk_space_manager.running,
|
||||||
|
}
|
||||||
|
return {'space_used': '0', 'network_seeding_space_used': '0', 'running': False}
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
db = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||||
|
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||||
|
self.disk_space_manager = DiskSpaceManager(
|
||||||
|
self.conf, db, blob_manager,
|
||||||
|
analytics=self.component_manager.analytics_manager
|
||||||
|
)
|
||||||
|
await self.disk_space_manager.start()
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
await self.disk_space_manager.stop()
|
||||||
|
|
||||||
|
|
||||||
class TorrentComponent(Component):
|
class TorrentComponent(Component):
|
||||||
|
@ -390,9 +495,8 @@ class TorrentComponent(Component):
|
||||||
}
|
}
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
if TorrentSession:
|
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
|
||||||
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
|
await self.torrent_session.bind() # TODO: specify host/port
|
||||||
await self.torrent_session.bind() # TODO: specify host/port
|
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
if self.torrent_session:
|
if self.torrent_session:
|
||||||
|
@ -447,7 +551,7 @@ class UPnPComponent(Component):
|
||||||
while True:
|
while True:
|
||||||
if now:
|
if now:
|
||||||
await self._maintain_redirects()
|
await self._maintain_redirects()
|
||||||
await asyncio.sleep(360, loop=self.component_manager.loop)
|
await asyncio.sleep(360)
|
||||||
|
|
||||||
async def _maintain_redirects(self):
|
async def _maintain_redirects(self):
|
||||||
# setup the gateway if necessary
|
# setup the gateway if necessary
|
||||||
|
@ -456,8 +560,6 @@ class UPnPComponent(Component):
|
||||||
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
|
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
|
||||||
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
|
||||||
raise
|
|
||||||
log.warning("upnp discovery failed: %s", err)
|
log.warning("upnp discovery failed: %s", err)
|
||||||
self.upnp = None
|
self.upnp = None
|
||||||
|
|
||||||
|
@ -472,11 +574,15 @@ class UPnPComponent(Component):
|
||||||
pass
|
pass
|
||||||
if external_ip and not is_valid_public_ipv4(external_ip):
|
if external_ip and not is_valid_public_ipv4(external_ip):
|
||||||
log.warning("UPnP returned a private/reserved ip - %s, checking lbry.com fallback", external_ip)
|
log.warning("UPnP returned a private/reserved ip - %s, checking lbry.com fallback", external_ip)
|
||||||
external_ip = await utils.get_external_ip()
|
external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||||
if self.external_ip and self.external_ip != external_ip:
|
if self.external_ip and self.external_ip != external_ip:
|
||||||
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
|
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
|
||||||
if external_ip:
|
if external_ip:
|
||||||
self.external_ip = external_ip
|
self.external_ip = external_ip
|
||||||
|
dht_component = self.component_manager.get_component(DHT_COMPONENT)
|
||||||
|
if dht_component:
|
||||||
|
dht_node = dht_component.component
|
||||||
|
dht_node.protocol.external_ip = external_ip
|
||||||
# assert self.external_ip is not None # TODO: handle going/starting offline
|
# assert self.external_ip is not None # TODO: handle going/starting offline
|
||||||
|
|
||||||
if not self.upnp_redirects and self.upnp: # setup missing redirects
|
if not self.upnp_redirects and self.upnp: # setup missing redirects
|
||||||
|
@ -530,13 +636,15 @@ class UPnPComponent(Component):
|
||||||
async def start(self):
|
async def start(self):
|
||||||
log.info("detecting external ip")
|
log.info("detecting external ip")
|
||||||
if not self.use_upnp:
|
if not self.use_upnp:
|
||||||
self.external_ip = await utils.get_external_ip()
|
self.external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||||
return
|
return
|
||||||
success = False
|
success = False
|
||||||
await self._maintain_redirects()
|
await self._maintain_redirects()
|
||||||
if self.upnp:
|
if self.upnp:
|
||||||
if not self.upnp_redirects and not all([x in self.component_manager.skip_components for x in
|
if not self.upnp_redirects and not all(
|
||||||
(DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)]):
|
x in self.component_manager.skip_components
|
||||||
|
for x in (DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)
|
||||||
|
):
|
||||||
log.error("failed to setup upnp")
|
log.error("failed to setup upnp")
|
||||||
else:
|
else:
|
||||||
success = True
|
success = True
|
||||||
|
@ -545,9 +653,9 @@ class UPnPComponent(Component):
|
||||||
else:
|
else:
|
||||||
log.error("failed to setup upnp")
|
log.error("failed to setup upnp")
|
||||||
if not self.external_ip:
|
if not self.external_ip:
|
||||||
self.external_ip = await utils.get_external_ip()
|
self.external_ip, probed_url = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||||
if self.external_ip:
|
if self.external_ip:
|
||||||
log.info("detected external ip using lbry.com fallback")
|
log.info("detected external ip using %s fallback", probed_url)
|
||||||
if self.component_manager.analytics_manager:
|
if self.component_manager.analytics_manager:
|
||||||
self.component_manager.loop.create_task(
|
self.component_manager.loop.create_task(
|
||||||
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
|
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
|
||||||
|
@ -563,7 +671,7 @@ class UPnPComponent(Component):
|
||||||
log.info("Removing upnp redirects: %s", self.upnp_redirects)
|
log.info("Removing upnp redirects: %s", self.upnp_redirects)
|
||||||
await asyncio.wait([
|
await asyncio.wait([
|
||||||
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
|
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
|
||||||
], loop=self.component_manager.loop)
|
])
|
||||||
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
|
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
|
||||||
self._maintain_redirects_task.cancel()
|
self._maintain_redirects_task.cancel()
|
||||||
|
|
||||||
|
@ -594,3 +702,49 @@ class ExchangeRateManagerComponent(Component):
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
self.exchange_rate_manager.stop()
|
self.exchange_rate_manager.stop()
|
||||||
|
|
||||||
|
|
||||||
|
class TrackerAnnouncerComponent(Component):
|
||||||
|
component_name = TRACKER_ANNOUNCER_COMPONENT
|
||||||
|
depends_on = [FILE_MANAGER_COMPONENT]
|
||||||
|
|
||||||
|
def __init__(self, component_manager):
|
||||||
|
super().__init__(component_manager)
|
||||||
|
self.file_manager = None
|
||||||
|
self.announce_task = None
|
||||||
|
self.tracker_client: typing.Optional[TrackerClient] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def component(self):
|
||||||
|
return self.tracker_client
|
||||||
|
|
||||||
|
@property
|
||||||
|
def running(self):
|
||||||
|
return self._running and self.announce_task and not self.announce_task.done()
|
||||||
|
|
||||||
|
async def announce_forever(self):
|
||||||
|
while True:
|
||||||
|
sleep_seconds = 60.0
|
||||||
|
announce_sd_hashes = []
|
||||||
|
for file in self.file_manager.get_filtered():
|
||||||
|
if not file.downloader:
|
||||||
|
continue
|
||||||
|
announce_sd_hashes.append(bytes.fromhex(file.sd_hash))
|
||||||
|
await self.tracker_client.announce_many(*announce_sd_hashes)
|
||||||
|
await asyncio.sleep(sleep_seconds)
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
node = self.component_manager.get_component(DHT_COMPONENT) \
|
||||||
|
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||||
|
node_id = node.protocol.node_id if node else None
|
||||||
|
self.tracker_client = TrackerClient(node_id, self.conf.tcp_port, lambda: self.conf.tracker_servers)
|
||||||
|
await self.tracker_client.start()
|
||||||
|
self.file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
|
||||||
|
self.announce_task = asyncio.create_task(self.announce_forever())
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
self.file_manager = None
|
||||||
|
if self.announce_task and not self.announce_task.done():
|
||||||
|
self.announce_task.cancel()
|
||||||
|
self.announce_task = None
|
||||||
|
self.tracker_client.stop()
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,9 +2,10 @@ import json
|
||||||
import time
|
import time
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
|
from statistics import median
|
||||||
from decimal import Decimal
|
from decimal import Decimal
|
||||||
from typing import Optional, Iterable, Type
|
from typing import Optional, Iterable, Type
|
||||||
from aiohttp.client_exceptions import ContentTypeError
|
from aiohttp.client_exceptions import ContentTypeError, ClientConnectionError
|
||||||
from lbry.error import InvalidExchangeRateResponseError, CurrencyConversionError
|
from lbry.error import InvalidExchangeRateResponseError, CurrencyConversionError
|
||||||
from lbry.utils import aiohttp_request
|
from lbry.utils import aiohttp_request
|
||||||
from lbry.wallet.dewies import lbc_to_dewies
|
from lbry.wallet.dewies import lbc_to_dewies
|
||||||
|
@ -58,9 +59,12 @@ class MarketFeed:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
async def get_response(self):
|
async def get_response(self):
|
||||||
async with aiohttp_request('get', self.url, params=self.params, timeout=self.request_timeout) as response:
|
async with aiohttp_request(
|
||||||
|
'get', self.url, params=self.params,
|
||||||
|
timeout=self.request_timeout, headers={"User-Agent": "lbrynet"}
|
||||||
|
) as response:
|
||||||
try:
|
try:
|
||||||
self._last_response = await response.json()
|
self._last_response = await response.json(content_type=None)
|
||||||
except ContentTypeError as e:
|
except ContentTypeError as e:
|
||||||
self._last_response = {}
|
self._last_response = {}
|
||||||
log.warning("Could not parse exchange rate response from %s: %s", self.name, e.message)
|
log.warning("Could not parse exchange rate response from %s: %s", self.name, e.message)
|
||||||
|
@ -75,18 +79,21 @@ class MarketFeed:
|
||||||
log.debug("Saving rate update %f for %s from %s", rate, self.market, self.name)
|
log.debug("Saving rate update %f for %s from %s", rate, self.market, self.name)
|
||||||
self.rate = ExchangeRate(self.market, rate, int(time.time()))
|
self.rate = ExchangeRate(self.market, rate, int(time.time()))
|
||||||
self.last_check = time.time()
|
self.last_check = time.time()
|
||||||
self.event.set()
|
|
||||||
return self.rate
|
return self.rate
|
||||||
except asyncio.CancelledError:
|
|
||||||
raise
|
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
log.warning("Timed out fetching exchange rate from %s.", self.name)
|
log.warning("Timed out fetching exchange rate from %s.", self.name)
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
log.warning("Could not parse exchange rate response from %s: %s", self.name, e.doc)
|
msg = e.doc if '<html>' not in e.doc else 'unexpected content type.'
|
||||||
|
log.warning("Could not parse exchange rate response from %s: %s", self.name, msg)
|
||||||
|
log.debug(e.doc)
|
||||||
except InvalidExchangeRateResponseError as e:
|
except InvalidExchangeRateResponseError as e:
|
||||||
log.warning(str(e))
|
log.warning(str(e))
|
||||||
|
except ClientConnectionError as e:
|
||||||
|
log.warning("Error trying to connect to exchange rate %s: %s", self.name, str(e))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.exception("Exchange rate error (%s from %s):", self.market, self.name)
|
log.exception("Exchange rate error (%s from %s):", self.market, self.name)
|
||||||
|
finally:
|
||||||
|
self.event.set()
|
||||||
|
|
||||||
async def keep_updated(self):
|
async def keep_updated(self):
|
||||||
while True:
|
while True:
|
||||||
|
@ -104,70 +111,92 @@ class MarketFeed:
|
||||||
self.event.clear()
|
self.event.clear()
|
||||||
|
|
||||||
|
|
||||||
class BittrexFeed(MarketFeed):
|
class BaseBittrexFeed(MarketFeed):
|
||||||
name = "Bittrex"
|
name = "Bittrex"
|
||||||
market = "BTCLBC"
|
market = None
|
||||||
url = "https://bittrex.com/api/v1.1/public/getmarkethistory"
|
url = None
|
||||||
params = {'market': 'BTC-LBC', 'count': 50}
|
|
||||||
fee = 0.0025
|
fee = 0.0025
|
||||||
|
|
||||||
|
def get_rate_from_response(self, json_response):
|
||||||
|
if 'lastTradeRate' not in json_response:
|
||||||
|
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||||
|
return 1.0 / float(json_response['lastTradeRate'])
|
||||||
|
|
||||||
|
|
||||||
|
class BittrexBTCFeed(BaseBittrexFeed):
|
||||||
|
market = "BTCLBC"
|
||||||
|
url = "https://api.bittrex.com/v3/markets/LBC-BTC/ticker"
|
||||||
|
|
||||||
|
|
||||||
|
class BittrexUSDFeed(BaseBittrexFeed):
|
||||||
|
market = "USDLBC"
|
||||||
|
url = "https://api.bittrex.com/v3/markets/LBC-USD/ticker"
|
||||||
|
|
||||||
|
|
||||||
|
class BaseCoinExFeed(MarketFeed):
|
||||||
|
name = "CoinEx"
|
||||||
|
market = None
|
||||||
|
url = None
|
||||||
|
|
||||||
|
def get_rate_from_response(self, json_response):
|
||||||
|
if 'data' not in json_response or \
|
||||||
|
'ticker' not in json_response['data'] or \
|
||||||
|
'last' not in json_response['data']['ticker']:
|
||||||
|
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||||
|
return 1.0 / float(json_response['data']['ticker']['last'])
|
||||||
|
|
||||||
|
|
||||||
|
class CoinExBTCFeed(BaseCoinExFeed):
|
||||||
|
market = "BTCLBC"
|
||||||
|
url = "https://api.coinex.com/v1/market/ticker?market=LBCBTC"
|
||||||
|
|
||||||
|
|
||||||
|
class CoinExUSDFeed(BaseCoinExFeed):
|
||||||
|
market = "USDLBC"
|
||||||
|
url = "https://api.coinex.com/v1/market/ticker?market=LBCUSDT"
|
||||||
|
|
||||||
|
|
||||||
|
class BaseHotbitFeed(MarketFeed):
|
||||||
|
name = "hotbit"
|
||||||
|
market = None
|
||||||
|
url = "https://api.hotbit.io/api/v1/market.last"
|
||||||
|
|
||||||
def get_rate_from_response(self, json_response):
|
def get_rate_from_response(self, json_response):
|
||||||
if 'result' not in json_response:
|
if 'result' not in json_response:
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||||
trades = json_response['result']
|
return 1.0 / float(json_response['result'])
|
||||||
if len(trades) == 0:
|
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'trades not found')
|
|
||||||
totals = sum([i['Total'] for i in trades])
|
|
||||||
qtys = sum([i['Quantity'] for i in trades])
|
|
||||||
if totals <= 0 or qtys <= 0:
|
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'quantities were not positive')
|
|
||||||
vwap = totals / qtys
|
|
||||||
return float(1.0 / vwap)
|
|
||||||
|
|
||||||
|
|
||||||
class LBRYFeed(MarketFeed):
|
class HotbitBTCFeed(BaseHotbitFeed):
|
||||||
name = "lbry.com"
|
|
||||||
market = "BTCLBC"
|
market = "BTCLBC"
|
||||||
url = "https://api.lbry.com/lbc/exchange_rate"
|
params = {"market": "LBC/BTC"}
|
||||||
|
|
||||||
def get_rate_from_response(self, json_response):
|
|
||||||
if 'data' not in json_response:
|
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
|
||||||
return 1.0 / json_response['data']['lbc_btc']
|
|
||||||
|
|
||||||
|
|
||||||
class LBRYBTCFeed(LBRYFeed):
|
class HotbitUSDFeed(BaseHotbitFeed):
|
||||||
market = "USDBTC"
|
market = "USDLBC"
|
||||||
|
params = {"market": "LBC/USDT"}
|
||||||
def get_rate_from_response(self, json_response):
|
|
||||||
if 'data' not in json_response:
|
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
|
||||||
return 1.0 / json_response['data']['btc_usd']
|
|
||||||
|
|
||||||
|
|
||||||
class CryptonatorFeed(MarketFeed):
|
class UPbitBTCFeed(MarketFeed):
|
||||||
name = "cryptonator.com"
|
name = "UPbit"
|
||||||
market = "BTCLBC"
|
market = "BTCLBC"
|
||||||
url = "https://api.cryptonator.com/api/ticker/btc-lbc"
|
url = "https://api.upbit.com/v1/ticker"
|
||||||
|
params = {"markets": "BTC-LBC"}
|
||||||
|
|
||||||
def get_rate_from_response(self, json_response):
|
def get_rate_from_response(self, json_response):
|
||||||
if 'ticker' not in json_response or len(json_response['ticker']) == 0 or \
|
if "error" in json_response or len(json_response) != 1 or 'trade_price' not in json_response[0]:
|
||||||
'success' not in json_response or json_response['success'] is not True:
|
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||||
return float(json_response['ticker']['price'])
|
return 1.0 / float(json_response[0]['trade_price'])
|
||||||
|
|
||||||
|
|
||||||
class CryptonatorBTCFeed(CryptonatorFeed):
|
|
||||||
market = "USDBTC"
|
|
||||||
url = "https://api.cryptonator.com/api/ticker/usd-btc"
|
|
||||||
|
|
||||||
|
|
||||||
FEEDS: Iterable[Type[MarketFeed]] = (
|
FEEDS: Iterable[Type[MarketFeed]] = (
|
||||||
LBRYFeed,
|
BittrexBTCFeed,
|
||||||
LBRYBTCFeed,
|
BittrexUSDFeed,
|
||||||
BittrexFeed,
|
CoinExBTCFeed,
|
||||||
# CryptonatorFeed,
|
CoinExUSDFeed,
|
||||||
# CryptonatorBTCFeed,
|
# HotbitBTCFeed,
|
||||||
|
# HotbitUSDFeed,
|
||||||
|
# UPbitBTCFeed,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -191,20 +220,23 @@ class ExchangeRateManager:
|
||||||
source.stop()
|
source.stop()
|
||||||
|
|
||||||
def convert_currency(self, from_currency, to_currency, amount):
|
def convert_currency(self, from_currency, to_currency, amount):
|
||||||
rates = [market.rate for market in self.market_feeds]
|
log.debug(
|
||||||
log.debug("Converting %f %s to %s, rates: %s", amount, from_currency, to_currency, rates)
|
"Converting %f %s to %s, rates: %s",
|
||||||
|
amount, from_currency, to_currency,
|
||||||
|
[market.rate for market in self.market_feeds]
|
||||||
|
)
|
||||||
if from_currency == to_currency:
|
if from_currency == to_currency:
|
||||||
return round(amount, 8)
|
return round(amount, 8)
|
||||||
|
|
||||||
|
rates = []
|
||||||
for market in self.market_feeds:
|
for market in self.market_feeds:
|
||||||
if (market.has_rate and market.is_online and
|
if (market.has_rate and market.is_online and
|
||||||
market.rate.currency_pair == (from_currency, to_currency)):
|
market.rate.currency_pair == (from_currency, to_currency)):
|
||||||
return round(amount * Decimal(market.rate.spot), 8)
|
rates.append(market.rate.spot)
|
||||||
for market in self.market_feeds:
|
|
||||||
if (market.has_rate and market.is_online and
|
if rates:
|
||||||
market.rate.currency_pair[0] == from_currency):
|
return round(amount * Decimal(median(rates)), 8)
|
||||||
return round(self.convert_currency(
|
|
||||||
market.rate.currency_pair[1], to_currency, amount * Decimal(market.rate.spot)), 8)
|
|
||||||
raise CurrencyConversionError(
|
raise CurrencyConversionError(
|
||||||
f'Unable to convert {amount} from {from_currency} to {to_currency}')
|
f'Unable to convert {amount} from {from_currency} to {to_currency}')
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ from lbry.schema.claim import Claim
|
||||||
from lbry.schema.support import Support
|
from lbry.schema.support import Support
|
||||||
from lbry.torrent.torrent_manager import TorrentSource
|
from lbry.torrent.torrent_manager import TorrentSource
|
||||||
from lbry.wallet import Wallet, Ledger, Account, Transaction, Output
|
from lbry.wallet import Wallet, Ledger, Account, Transaction, Output
|
||||||
from lbry.wallet.bip32 import PubKey
|
from lbry.wallet.bip32 import PublicKey
|
||||||
from lbry.wallet.dewies import dewies_to_lbc
|
from lbry.wallet.dewies import dewies_to_lbc
|
||||||
from lbry.stream.managed_stream import ManagedStream
|
from lbry.stream.managed_stream import ManagedStream
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
self.ledger = ledger
|
self.ledger = ledger
|
||||||
self.include_protobuf = include_protobuf
|
self.include_protobuf = include_protobuf
|
||||||
|
|
||||||
def default(self, obj): # pylint: disable=method-hidden,arguments-differ,too-many-return-statements
|
def default(self, obj): # pylint: disable=method-hidden,arguments-renamed,too-many-return-statements
|
||||||
if isinstance(obj, Account):
|
if isinstance(obj, Account):
|
||||||
return self.encode_account(obj)
|
return self.encode_account(obj)
|
||||||
if isinstance(obj, Wallet):
|
if isinstance(obj, Wallet):
|
||||||
|
@ -138,7 +138,7 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
return self.encode_claim(obj)
|
return self.encode_claim(obj)
|
||||||
if isinstance(obj, Support):
|
if isinstance(obj, Support):
|
||||||
return obj.to_dict()
|
return obj.to_dict()
|
||||||
if isinstance(obj, PubKey):
|
if isinstance(obj, PublicKey):
|
||||||
return obj.extended_key_string()
|
return obj.extended_key_string()
|
||||||
if isinstance(obj, datetime):
|
if isinstance(obj, datetime):
|
||||||
return obj.strftime("%Y%m%dT%H:%M:%S")
|
return obj.strftime("%Y%m%dT%H:%M:%S")
|
||||||
|
@ -234,8 +234,6 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
output['value_type'] = txo.claim.claim_type
|
output['value_type'] = txo.claim.claim_type
|
||||||
if txo.claim.is_channel:
|
if txo.claim.is_channel:
|
||||||
output['has_signing_key'] = txo.has_private_key
|
output['has_signing_key'] = txo.has_private_key
|
||||||
elif txo.script.is_support_claim_data:
|
|
||||||
output['value_type'] = 'emoji'
|
|
||||||
if check_signature and txo.signable.is_signed:
|
if check_signature and txo.signable.is_signed:
|
||||||
if txo.channel is not None:
|
if txo.channel is not None:
|
||||||
output['signing_channel'] = self.encode_output(txo.channel)
|
output['signing_channel'] = self.encode_output(txo.channel)
|
||||||
|
@ -330,8 +328,8 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
result.update({
|
result.update({
|
||||||
'streaming_url': managed_stream.stream_url,
|
'streaming_url': managed_stream.stream_url,
|
||||||
'stream_hash': managed_stream.stream_hash,
|
'stream_hash': managed_stream.stream_hash,
|
||||||
'stream_name': managed_stream.descriptor.stream_name,
|
'stream_name': managed_stream.stream_name,
|
||||||
'suggested_file_name': managed_stream.descriptor.suggested_file_name,
|
'suggested_file_name': managed_stream.suggested_file_name,
|
||||||
'sd_hash': managed_stream.descriptor.sd_hash,
|
'sd_hash': managed_stream.descriptor.sd_hash,
|
||||||
'mime_type': managed_stream.mime_type,
|
'mime_type': managed_stream.mime_type,
|
||||||
'key': managed_stream.descriptor.key,
|
'key': managed_stream.descriptor.key,
|
||||||
|
|
|
@ -35,6 +35,10 @@ def migrate_db(conf, start, end):
|
||||||
from .migrate12to13 import do_migration
|
from .migrate12to13 import do_migration
|
||||||
elif current == 13:
|
elif current == 13:
|
||||||
from .migrate13to14 import do_migration
|
from .migrate13to14 import do_migration
|
||||||
|
elif current == 14:
|
||||||
|
from .migrate14to15 import do_migration
|
||||||
|
elif current == 15:
|
||||||
|
from .migrate15to16 import do_migration
|
||||||
else:
|
else:
|
||||||
raise Exception(f"DB migration of version {current} to {current+1} is not available")
|
raise Exception(f"DB migration of version {current} to {current+1} is not available")
|
||||||
try:
|
try:
|
||||||
|
|
16
lbry/extras/daemon/migrator/migrate14to15.py
Normal file
16
lbry/extras/daemon/migrator/migrate14to15.py
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
import os
|
||||||
|
import sqlite3
|
||||||
|
|
||||||
|
|
||||||
|
def do_migration(conf):
|
||||||
|
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||||
|
connection = sqlite3.connect(db_path)
|
||||||
|
cursor = connection.cursor()
|
||||||
|
|
||||||
|
cursor.executescript("""
|
||||||
|
alter table blob add column added_on integer not null default 0;
|
||||||
|
alter table blob add column is_mine integer not null default 1;
|
||||||
|
""")
|
||||||
|
|
||||||
|
connection.commit()
|
||||||
|
connection.close()
|
17
lbry/extras/daemon/migrator/migrate15to16.py
Normal file
17
lbry/extras/daemon/migrator/migrate15to16.py
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
import os
|
||||||
|
import sqlite3
|
||||||
|
|
||||||
|
|
||||||
|
def do_migration(conf):
|
||||||
|
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||||
|
connection = sqlite3.connect(db_path)
|
||||||
|
cursor = connection.cursor()
|
||||||
|
|
||||||
|
cursor.executescript("""
|
||||||
|
update blob set should_announce=0
|
||||||
|
where should_announce=1 and
|
||||||
|
blob.blob_hash in (select stream_blob.blob_hash from stream_blob where position=0);
|
||||||
|
""")
|
||||||
|
|
||||||
|
connection.commit()
|
||||||
|
connection.close()
|
|
@ -20,7 +20,7 @@ def do_migration(conf):
|
||||||
"left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall()
|
"left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall()
|
||||||
blobs_by_stream = {}
|
blobs_by_stream = {}
|
||||||
for stream_hash, position, iv, blob_hash, blob_length in blobs:
|
for stream_hash, position, iv, blob_hash, blob_length in blobs:
|
||||||
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, blob_hash))
|
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, 0, blob_hash))
|
||||||
|
|
||||||
for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams:
|
for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams:
|
||||||
sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename,
|
sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename,
|
||||||
|
|
|
@ -170,8 +170,8 @@ def get_all_lbry_files(transaction: sqlite3.Connection) -> typing.List[typing.Di
|
||||||
def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
|
def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
|
||||||
# add all blobs, except the last one, which is empty
|
# add all blobs, except the last one, which is empty
|
||||||
transaction.executemany(
|
transaction.executemany(
|
||||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
|
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||||
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0)
|
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0, blob.added_on, blob.is_mine)
|
||||||
for blob in (descriptor.blobs[:-1] if len(descriptor.blobs) > 1 else descriptor.blobs) + [sd_blob])
|
for blob in (descriptor.blobs[:-1] if len(descriptor.blobs) > 1 else descriptor.blobs) + [sd_blob])
|
||||||
).fetchall()
|
).fetchall()
|
||||||
# associate the blobs to the stream
|
# associate the blobs to the stream
|
||||||
|
@ -187,8 +187,8 @@ def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descripto
|
||||||
).fetchall()
|
).fetchall()
|
||||||
# ensure should_announce is set regardless if insert was ignored
|
# ensure should_announce is set regardless if insert was ignored
|
||||||
transaction.execute(
|
transaction.execute(
|
||||||
"update blob set should_announce=1 where blob_hash in (?, ?)",
|
"update blob set should_announce=1 where blob_hash in (?)",
|
||||||
(sd_blob.blob_hash, descriptor.blobs[0].blob_hash,)
|
(sd_blob.blob_hash,)
|
||||||
).fetchall()
|
).fetchall()
|
||||||
|
|
||||||
|
|
||||||
|
@ -242,7 +242,9 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
should_announce integer not null default 0,
|
should_announce integer not null default 0,
|
||||||
status text not null,
|
status text not null,
|
||||||
last_announced_time integer,
|
last_announced_time integer,
|
||||||
single_announce integer
|
single_announce integer,
|
||||||
|
added_on integer not null,
|
||||||
|
is_mine integer not null default 0
|
||||||
);
|
);
|
||||||
|
|
||||||
create table if not exists stream (
|
create table if not exists stream (
|
||||||
|
@ -335,6 +337,7 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
tcp_port integer,
|
tcp_port integer,
|
||||||
unique (address, udp_port)
|
unique (address, udp_port)
|
||||||
);
|
);
|
||||||
|
create index if not exists blob_data on blob(blob_hash, blob_length, is_mine);
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, conf: Config, path, loop=None, time_getter: typing.Optional[typing.Callable[[], float]] = None):
|
def __init__(self, conf: Config, path, loop=None, time_getter: typing.Optional[typing.Callable[[], float]] = None):
|
||||||
|
@ -356,19 +359,19 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
|
|
||||||
# # # # # # # # # blob functions # # # # # # # # #
|
# # # # # # # # # blob functions # # # # # # # # #
|
||||||
|
|
||||||
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int], finished=False):
|
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int, int, int], finished=False):
|
||||||
def _add_blobs(transaction: sqlite3.Connection):
|
def _add_blobs(transaction: sqlite3.Connection):
|
||||||
transaction.executemany(
|
transaction.executemany(
|
||||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
|
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||||
(
|
(
|
||||||
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0)
|
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0, added_on, is_mine)
|
||||||
for blob_hash, length in blob_hashes_and_lengths
|
for blob_hash, length, added_on, is_mine in blob_hashes_and_lengths
|
||||||
)
|
)
|
||||||
).fetchall()
|
).fetchall()
|
||||||
if finished:
|
if finished:
|
||||||
transaction.executemany(
|
transaction.executemany(
|
||||||
"update blob set status='finished' where blob.blob_hash=?", (
|
"update blob set status='finished' where blob.blob_hash=?", (
|
||||||
(blob_hash, ) for blob_hash, _ in blob_hashes_and_lengths
|
(blob_hash, ) for blob_hash, _, _, _ in blob_hashes_and_lengths
|
||||||
)
|
)
|
||||||
).fetchall()
|
).fetchall()
|
||||||
return await self.db.run(_add_blobs)
|
return await self.db.run(_add_blobs)
|
||||||
|
@ -378,6 +381,11 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
"select status from blob where blob_hash=?", blob_hash
|
"select status from blob where blob_hash=?", blob_hash
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def set_announce(self, *blob_hashes):
|
||||||
|
return self.db.execute_fetchall(
|
||||||
|
"update blob set should_announce=1 where blob_hash in (?, ?)", blob_hashes
|
||||||
|
)
|
||||||
|
|
||||||
def update_last_announced_blobs(self, blob_hashes: typing.List[str]):
|
def update_last_announced_blobs(self, blob_hashes: typing.List[str]):
|
||||||
def _update_last_announced_blobs(transaction: sqlite3.Connection):
|
def _update_last_announced_blobs(transaction: sqlite3.Connection):
|
||||||
last_announced = self.time_getter()
|
last_announced = self.time_getter()
|
||||||
|
@ -435,6 +443,62 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
def get_all_blob_hashes(self):
|
def get_all_blob_hashes(self):
|
||||||
return self.run_and_return_list("select blob_hash from blob")
|
return self.run_and_return_list("select blob_hash from blob")
|
||||||
|
|
||||||
|
async def get_stored_blobs(self, is_mine: bool, is_network_blob=False):
|
||||||
|
is_mine = 1 if is_mine else 0
|
||||||
|
if is_network_blob:
|
||||||
|
return await self.db.execute_fetchall(
|
||||||
|
"select blob.blob_hash, blob.blob_length, blob.added_on "
|
||||||
|
"from blob left join stream_blob using (blob_hash) "
|
||||||
|
"where stream_blob.stream_hash is null and blob.is_mine=? and blob.status='finished'"
|
||||||
|
"order by blob.blob_length desc, blob.added_on asc",
|
||||||
|
(is_mine,)
|
||||||
|
)
|
||||||
|
|
||||||
|
sd_blobs = await self.db.execute_fetchall(
|
||||||
|
"select blob.blob_hash, blob.blob_length, blob.added_on "
|
||||||
|
"from blob join stream on blob.blob_hash=stream.sd_hash join file using (stream_hash) "
|
||||||
|
"where blob.is_mine=? order by blob.added_on asc",
|
||||||
|
(is_mine,)
|
||||||
|
)
|
||||||
|
content_blobs = await self.db.execute_fetchall(
|
||||||
|
"select blob.blob_hash, blob.blob_length, blob.added_on "
|
||||||
|
"from blob join stream_blob using (blob_hash) cross join stream using (stream_hash)"
|
||||||
|
"cross join file using (stream_hash)"
|
||||||
|
"where blob.is_mine=? and blob.status='finished' order by blob.added_on asc, blob.blob_length asc",
|
||||||
|
(is_mine,)
|
||||||
|
)
|
||||||
|
return content_blobs + sd_blobs
|
||||||
|
|
||||||
|
async def get_stored_blob_disk_usage(self):
|
||||||
|
total, network_size, content_size, private_size = await self.db.execute_fetchone("""
|
||||||
|
select coalesce(sum(blob_length), 0) as total,
|
||||||
|
coalesce(sum(case when
|
||||||
|
stream_blob.stream_hash is null
|
||||||
|
then blob_length else 0 end), 0) as network_storage,
|
||||||
|
coalesce(sum(case when
|
||||||
|
stream_blob.blob_hash is not null and is_mine=0
|
||||||
|
then blob_length else 0 end), 0) as content_storage,
|
||||||
|
coalesce(sum(case when
|
||||||
|
is_mine=1
|
||||||
|
then blob_length else 0 end), 0) as private_storage
|
||||||
|
from blob left join stream_blob using (blob_hash)
|
||||||
|
where blob_hash not in (select sd_hash from stream) and blob.status="finished"
|
||||||
|
""")
|
||||||
|
return {
|
||||||
|
'network_storage': network_size,
|
||||||
|
'content_storage': content_size,
|
||||||
|
'private_storage': private_size,
|
||||||
|
'total': total
|
||||||
|
}
|
||||||
|
|
||||||
|
async def update_blob_ownership(self, sd_hash, is_mine: bool):
|
||||||
|
is_mine = 1 if is_mine else 0
|
||||||
|
await self.db.execute_fetchall(
|
||||||
|
"update blob set is_mine = ? where blob_hash in ("
|
||||||
|
" select blob_hash from blob natural join stream_blob natural join stream where sd_hash = ?"
|
||||||
|
") OR blob_hash = ?", (is_mine, sd_hash, sd_hash)
|
||||||
|
)
|
||||||
|
|
||||||
def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]:
|
def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]:
|
||||||
def _sync_blobs(transaction: sqlite3.Connection) -> typing.Set[str]:
|
def _sync_blobs(transaction: sqlite3.Connection) -> typing.Set[str]:
|
||||||
finished_blob_hashes = tuple(
|
finished_blob_hashes = tuple(
|
||||||
|
@ -470,7 +534,8 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
def _get_blobs_for_stream(transaction):
|
def _get_blobs_for_stream(transaction):
|
||||||
crypt_blob_infos = []
|
crypt_blob_infos = []
|
||||||
stream_blobs = transaction.execute(
|
stream_blobs = transaction.execute(
|
||||||
"select blob_hash, position, iv from stream_blob where stream_hash=? "
|
"select s.blob_hash, s.position, s.iv, b.added_on "
|
||||||
|
"from stream_blob s left outer join blob b on b.blob_hash=s.blob_hash where stream_hash=? "
|
||||||
"order by position asc", (stream_hash, )
|
"order by position asc", (stream_hash, )
|
||||||
).fetchall()
|
).fetchall()
|
||||||
if only_completed:
|
if only_completed:
|
||||||
|
@ -490,9 +555,10 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
for blob_hash, length in lengths:
|
for blob_hash, length in lengths:
|
||||||
blob_length_dict[blob_hash] = length
|
blob_length_dict[blob_hash] = length
|
||||||
|
|
||||||
for blob_hash, position, iv in stream_blobs:
|
current_time = time.time()
|
||||||
|
for blob_hash, position, iv, added_on in stream_blobs:
|
||||||
blob_length = blob_length_dict.get(blob_hash, 0)
|
blob_length = blob_length_dict.get(blob_hash, 0)
|
||||||
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, blob_hash))
|
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, added_on or current_time, blob_hash))
|
||||||
if not blob_hash:
|
if not blob_hash:
|
||||||
break
|
break
|
||||||
return crypt_blob_infos
|
return crypt_blob_infos
|
||||||
|
@ -570,6 +636,10 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
log.debug("update file status %s -> %s", stream_hash, new_status)
|
log.debug("update file status %s -> %s", stream_hash, new_status)
|
||||||
return self.db.execute_fetchall("update file set status=? where stream_hash=?", (new_status, stream_hash))
|
return self.db.execute_fetchall("update file set status=? where stream_hash=?", (new_status, stream_hash))
|
||||||
|
|
||||||
|
def stop_all_files(self):
|
||||||
|
log.debug("stopping all files")
|
||||||
|
return self.db.execute_fetchall("update file set status=?", ("stopped",))
|
||||||
|
|
||||||
async def change_file_download_dir_and_file_name(self, stream_hash: str, download_dir: typing.Optional[str],
|
async def change_file_download_dir_and_file_name(self, stream_hash: str, download_dir: typing.Optional[str],
|
||||||
file_name: typing.Optional[str]):
|
file_name: typing.Optional[str]):
|
||||||
if not file_name or not download_dir:
|
if not file_name or not download_dir:
|
||||||
|
@ -617,7 +687,7 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
).fetchall()
|
).fetchall()
|
||||||
download_dir = binascii.hexlify(self.conf.download_dir.encode()).decode()
|
download_dir = binascii.hexlify(self.conf.download_dir.encode()).decode()
|
||||||
transaction.executemany(
|
transaction.executemany(
|
||||||
f"update file set download_directory=? where stream_hash=?",
|
"update file set download_directory=? where stream_hash=?",
|
||||||
((download_dir, stream_hash) for stream_hash in stream_hashes)
|
((download_dir, stream_hash) for stream_hash in stream_hashes)
|
||||||
).fetchall()
|
).fetchall()
|
||||||
await self.db.run_with_foreign_keys_disabled(_recover)
|
await self.db.run_with_foreign_keys_disabled(_recover)
|
||||||
|
@ -723,7 +793,7 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
|
|
||||||
await self.db.run(_save_claims)
|
await self.db.run(_save_claims)
|
||||||
if update_file_callbacks:
|
if update_file_callbacks:
|
||||||
await asyncio.wait(update_file_callbacks)
|
await asyncio.wait(map(asyncio.create_task, update_file_callbacks))
|
||||||
if claim_id_to_supports:
|
if claim_id_to_supports:
|
||||||
await self.save_supports(claim_id_to_supports)
|
await self.save_supports(claim_id_to_supports)
|
||||||
|
|
||||||
|
@ -861,6 +931,6 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
transaction.execute('delete from peer').fetchall()
|
transaction.execute('delete from peer').fetchall()
|
||||||
transaction.executemany(
|
transaction.executemany(
|
||||||
'insert into peer(node_id, address, udp_port, tcp_port) values (?, ?, ?, ?)',
|
'insert into peer(node_id, address, udp_port, tcp_port) values (?, ?, ?, ?)',
|
||||||
tuple([(binascii.hexlify(p.node_id), p.address, p.udp_port, p.tcp_port) for p in peers])
|
((binascii.hexlify(p.node_id), p.address, p.udp_port, p.tcp_port) for p in peers)
|
||||||
).fetchall()
|
).fetchall()
|
||||||
return await self.db.run(_save_kademlia_peers)
|
return await self.db.run(_save_kademlia_peers)
|
||||||
|
|
|
@ -5,6 +5,7 @@ from typing import Optional
|
||||||
from aiohttp.web import Request
|
from aiohttp.web import Request
|
||||||
from lbry.error import ResolveError, DownloadSDTimeoutError, InsufficientFundsError
|
from lbry.error import ResolveError, DownloadSDTimeoutError, InsufficientFundsError
|
||||||
from lbry.error import ResolveTimeoutError, DownloadDataTimeoutError, KeyFeeAboveMaxAllowedError
|
from lbry.error import ResolveTimeoutError, DownloadDataTimeoutError, KeyFeeAboveMaxAllowedError
|
||||||
|
from lbry.error import InvalidStreamURLError
|
||||||
from lbry.stream.managed_stream import ManagedStream
|
from lbry.stream.managed_stream import ManagedStream
|
||||||
from lbry.torrent.torrent_manager import TorrentSource
|
from lbry.torrent.torrent_manager import TorrentSource
|
||||||
from lbry.utils import cache_concurrent
|
from lbry.utils import cache_concurrent
|
||||||
|
@ -12,11 +13,12 @@ from lbry.schema.url import URL
|
||||||
from lbry.wallet.dewies import dewies_to_lbc
|
from lbry.wallet.dewies import dewies_to_lbc
|
||||||
from lbry.file.source_manager import SourceManager
|
from lbry.file.source_manager import SourceManager
|
||||||
from lbry.file.source import ManagedDownloadSource
|
from lbry.file.source import ManagedDownloadSource
|
||||||
|
from lbry.extras.daemon.storage import StoredContentClaim
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.conf import Config
|
from lbry.conf import Config
|
||||||
from lbry.extras.daemon.analytics import AnalyticsManager
|
from lbry.extras.daemon.analytics import AnalyticsManager
|
||||||
from lbry.extras.daemon.storage import SQLiteStorage
|
from lbry.extras.daemon.storage import SQLiteStorage
|
||||||
from lbry.wallet import WalletManager, Output
|
from lbry.wallet import WalletManager
|
||||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
@ -48,10 +50,10 @@ class FileManager:
|
||||||
await manager.started.wait()
|
await manager.started.wait()
|
||||||
self.started.set()
|
self.started.set()
|
||||||
|
|
||||||
def stop(self):
|
async def stop(self):
|
||||||
for manager in self.source_managers.values():
|
for manager in self.source_managers.values():
|
||||||
# fixme: pop or not?
|
# fixme: pop or not?
|
||||||
manager.stop()
|
await manager.stop()
|
||||||
self.started.clear()
|
self.started.clear()
|
||||||
|
|
||||||
@cache_concurrent
|
@cache_concurrent
|
||||||
|
@ -81,18 +83,22 @@ class FileManager:
|
||||||
payment = None
|
payment = None
|
||||||
try:
|
try:
|
||||||
# resolve the claim
|
# resolve the claim
|
||||||
if not URL.parse(uri).has_stream:
|
try:
|
||||||
raise ResolveError("cannot download a channel claim, specify a /path")
|
if not URL.parse(uri).has_stream:
|
||||||
|
raise InvalidStreamURLError(uri)
|
||||||
|
except ValueError:
|
||||||
|
raise InvalidStreamURLError(uri)
|
||||||
try:
|
try:
|
||||||
resolved_result = await asyncio.wait_for(
|
resolved_result = await asyncio.wait_for(
|
||||||
self.wallet_manager.ledger.resolve(wallet.accounts, [uri], include_purchase_receipt=True),
|
self.wallet_manager.ledger.resolve(
|
||||||
resolve_timeout
|
wallet.accounts, [uri],
|
||||||
|
include_purchase_receipt=True,
|
||||||
|
include_is_my_output=True
|
||||||
|
), resolve_timeout
|
||||||
)
|
)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
raise ResolveTimeoutError(uri)
|
raise ResolveTimeoutError(uri)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
if isinstance(err, asyncio.CancelledError):
|
|
||||||
raise
|
|
||||||
log.exception("Unexpected error resolving stream:")
|
log.exception("Unexpected error resolving stream:")
|
||||||
raise ResolveError(f"Unexpected error resolving stream: {str(err)}")
|
raise ResolveError(f"Unexpected error resolving stream: {str(err)}")
|
||||||
if 'error' in resolved_result:
|
if 'error' in resolved_result:
|
||||||
|
@ -114,9 +120,11 @@ class FileManager:
|
||||||
if claim.stream.source.bt_infohash:
|
if claim.stream.source.bt_infohash:
|
||||||
source_manager = self.source_managers['torrent']
|
source_manager = self.source_managers['torrent']
|
||||||
existing = source_manager.get_filtered(bt_infohash=claim.stream.source.bt_infohash)
|
existing = source_manager.get_filtered(bt_infohash=claim.stream.source.bt_infohash)
|
||||||
else:
|
elif claim.stream.source.sd_hash:
|
||||||
source_manager = self.source_managers['stream']
|
source_manager = self.source_managers['stream']
|
||||||
existing = source_manager.get_filtered(sd_hash=claim.stream.source.sd_hash)
|
existing = source_manager.get_filtered(sd_hash=claim.stream.source.sd_hash)
|
||||||
|
else:
|
||||||
|
raise ResolveError(f"There is nothing to download at {uri} - Source is unknown or unset")
|
||||||
|
|
||||||
# resume or update an existing stream, if the stream changed: download it and delete the old one after
|
# resume or update an existing stream, if the stream changed: download it and delete the old one after
|
||||||
to_replace, updated_stream = None, None
|
to_replace, updated_stream = None, None
|
||||||
|
@ -170,7 +178,14 @@ class FileManager:
|
||||||
# pay fee
|
# pay fee
|
||||||
####################
|
####################
|
||||||
|
|
||||||
if not to_replace and txo.has_price and not txo.purchase_receipt:
|
needs_purchasing = (
|
||||||
|
not to_replace and
|
||||||
|
not txo.is_my_output and
|
||||||
|
txo.has_price and
|
||||||
|
not txo.purchase_receipt
|
||||||
|
)
|
||||||
|
|
||||||
|
if needs_purchasing:
|
||||||
payment = await self.wallet_manager.create_purchase_transaction(
|
payment = await self.wallet_manager.create_purchase_transaction(
|
||||||
wallet.accounts, txo, exchange_rate_manager
|
wallet.accounts, txo, exchange_rate_manager
|
||||||
)
|
)
|
||||||
|
@ -178,21 +193,24 @@ class FileManager:
|
||||||
####################
|
####################
|
||||||
# make downloader and wait for start
|
# make downloader and wait for start
|
||||||
####################
|
####################
|
||||||
|
# temporary with fields we know so downloader can start. Missing fields are populated later.
|
||||||
|
stored_claim = StoredContentClaim(outpoint=outpoint, claim_id=txo.claim_id, name=txo.claim_name,
|
||||||
|
amount=txo.amount, height=txo.tx_ref.height,
|
||||||
|
serialized=claim.to_bytes().hex())
|
||||||
|
|
||||||
if not claim.stream.source.bt_infohash:
|
if not claim.stream.source.bt_infohash:
|
||||||
# fixme: this shouldnt be here
|
# fixme: this shouldnt be here
|
||||||
stream = ManagedStream(
|
stream = ManagedStream(
|
||||||
self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash,
|
self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash,
|
||||||
download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment,
|
download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment,
|
||||||
analytics_manager=self.analytics_manager
|
analytics_manager=self.analytics_manager, claim=stored_claim
|
||||||
)
|
)
|
||||||
stream.downloader.node = source_manager.node
|
stream.downloader.node = source_manager.node
|
||||||
else:
|
else:
|
||||||
stream = TorrentSource(
|
stream = TorrentSource(
|
||||||
self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash,
|
self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash,
|
||||||
file_name=file_name, download_directory=download_directory or self.config.download_dir,
|
file_name=file_name, download_directory=download_directory or self.config.download_dir,
|
||||||
status=ManagedStream.STATUS_RUNNING,
|
status=ManagedStream.STATUS_RUNNING, claim=stored_claim, analytics_manager=self.analytics_manager,
|
||||||
analytics_manager=self.analytics_manager,
|
|
||||||
torrent_session=source_manager.torrent_session
|
torrent_session=source_manager.torrent_session
|
||||||
)
|
)
|
||||||
log.info("starting download for %s", uri)
|
log.info("starting download for %s", uri)
|
||||||
|
@ -224,15 +242,14 @@ class FileManager:
|
||||||
claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier)
|
claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier)
|
||||||
stream.set_claim(claim_info, claim)
|
stream.set_claim(claim_info, claim)
|
||||||
if save_file:
|
if save_file:
|
||||||
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download),
|
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download))
|
||||||
loop=self.loop)
|
|
||||||
return stream
|
return stream
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
error = DownloadDataTimeoutError(stream.sd_hash)
|
error = DownloadDataTimeoutError(stream.sd_hash)
|
||||||
raise error
|
raise error
|
||||||
except Exception as err: # forgive data timeout, don't delete stream
|
except (Exception, asyncio.CancelledError) as err: # forgive data timeout, don't delete stream
|
||||||
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
|
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
|
||||||
KeyFeeAboveMaxAllowedError)
|
KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError)
|
||||||
if isinstance(err, expected):
|
if isinstance(err, expected):
|
||||||
log.warning("Failed to download %s: %s", uri, str(err))
|
log.warning("Failed to download %s: %s", uri, str(err))
|
||||||
elif isinstance(err, asyncio.CancelledError):
|
elif isinstance(err, asyncio.CancelledError):
|
||||||
|
|
|
@ -45,11 +45,12 @@ class ManagedDownloadSource:
|
||||||
self.purchase_receipt = None
|
self.purchase_receipt = None
|
||||||
self._added_on = added_on
|
self._added_on = added_on
|
||||||
self.analytics_manager = analytics_manager
|
self.analytics_manager = analytics_manager
|
||||||
|
self.downloader = None
|
||||||
|
|
||||||
self.saving = asyncio.Event(loop=self.loop)
|
self.saving = asyncio.Event()
|
||||||
self.finished_writing = asyncio.Event(loop=self.loop)
|
self.finished_writing = asyncio.Event()
|
||||||
self.started_writing = asyncio.Event(loop=self.loop)
|
self.started_writing = asyncio.Event()
|
||||||
self.finished_write_attempt = asyncio.Event(loop=self.loop)
|
self.finished_write_attempt = asyncio.Event()
|
||||||
|
|
||||||
# @classmethod
|
# @classmethod
|
||||||
# async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str,
|
# async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str,
|
||||||
|
@ -66,7 +67,7 @@ class ManagedDownloadSource:
|
||||||
async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None):
|
async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def stop_tasks(self):
|
async def stop_tasks(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):
|
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):
|
||||||
|
|
|
@ -54,16 +54,16 @@ class SourceManager:
|
||||||
self.storage = storage
|
self.storage = storage
|
||||||
self.analytics_manager = analytics_manager
|
self.analytics_manager = analytics_manager
|
||||||
self._sources: typing.Dict[str, ManagedDownloadSource] = {}
|
self._sources: typing.Dict[str, ManagedDownloadSource] = {}
|
||||||
self.started = asyncio.Event(loop=self.loop)
|
self.started = asyncio.Event()
|
||||||
|
|
||||||
def add(self, source: ManagedDownloadSource):
|
def add(self, source: ManagedDownloadSource):
|
||||||
self._sources[source.identifier] = source
|
self._sources[source.identifier] = source
|
||||||
|
|
||||||
def remove(self, source: ManagedDownloadSource):
|
async def remove(self, source: ManagedDownloadSource):
|
||||||
if source.identifier not in self._sources:
|
if source.identifier not in self._sources:
|
||||||
return
|
return
|
||||||
self._sources.pop(source.identifier)
|
self._sources.pop(source.identifier)
|
||||||
source.stop_tasks()
|
await source.stop_tasks()
|
||||||
|
|
||||||
async def initialize_from_database(self):
|
async def initialize_from_database(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
@ -72,10 +72,10 @@ class SourceManager:
|
||||||
await self.initialize_from_database()
|
await self.initialize_from_database()
|
||||||
self.started.set()
|
self.started.set()
|
||||||
|
|
||||||
def stop(self):
|
async def stop(self):
|
||||||
while self._sources:
|
while self._sources:
|
||||||
_, source = self._sources.popitem()
|
_, source = self._sources.popitem()
|
||||||
source.stop_tasks()
|
await source.stop_tasks()
|
||||||
self.started.clear()
|
self.started.clear()
|
||||||
|
|
||||||
async def create(self, file_path: str, key: Optional[bytes] = None,
|
async def create(self, file_path: str, key: Optional[bytes] = None,
|
||||||
|
@ -83,7 +83,7 @@ class SourceManager:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
|
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
|
||||||
self.remove(source)
|
await self.remove(source)
|
||||||
if delete_file and source.output_file_exists:
|
if delete_file and source.output_file_exists:
|
||||||
os.remove(source.full_path)
|
os.remove(source.full_path)
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ class SourceManager:
|
||||||
else:
|
else:
|
||||||
streams = list(self._sources.values())
|
streams = list(self._sources.values())
|
||||||
if sort_by:
|
if sort_by:
|
||||||
streams.sort(key=lambda s: getattr(s, sort_by))
|
streams.sort(key=lambda s: getattr(s, sort_by) or "")
|
||||||
if reverse:
|
if reverse:
|
||||||
streams.reverse()
|
streams.reverse()
|
||||||
return streams
|
return streams
|
||||||
|
|
|
@ -69,8 +69,8 @@ class VideoFileAnalyzer:
|
||||||
version = str(e)
|
version = str(e)
|
||||||
if code != 0 or not version.startswith("ffmpeg"):
|
if code != 0 or not version.startswith("ffmpeg"):
|
||||||
log.warning("Unable to run ffmpeg, but it was requested. Code: %d; Message: %s", code, version)
|
log.warning("Unable to run ffmpeg, but it was requested. Code: %d; Message: %s", code, version)
|
||||||
raise FileNotFoundError(f"Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
|
raise FileNotFoundError("Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
|
||||||
f"and ensure that it is callable via PATH or conf.ffmpeg_path")
|
"and ensure that it is callable via PATH or conf.ffmpeg_path")
|
||||||
log.debug("Using %s at %s", version.splitlines()[0].split(" Copyright")[0], self._which_ffmpeg)
|
log.debug("Using %s at %s", version.splitlines()[0].split(" Copyright")[0], self._which_ffmpeg)
|
||||||
return version
|
return version
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,44 @@
|
||||||
|
import time
|
||||||
import logging
|
import logging
|
||||||
|
import asyncio
|
||||||
|
import asyncio.tasks
|
||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
from prometheus_client import generate_latest as prom_generate_latest
|
from prometheus_client import generate_latest as prom_generate_latest
|
||||||
|
from prometheus_client import Counter, Histogram, Gauge
|
||||||
|
|
||||||
|
|
||||||
|
PROBES_IN_FLIGHT = Counter("probes_in_flight", "Number of loop probes in flight", namespace='asyncio')
|
||||||
|
PROBES_FINISHED = Counter("probes_finished", "Number of finished loop probes", namespace='asyncio')
|
||||||
|
PROBE_TIMES = Histogram("probe_times", "Loop probe times", namespace='asyncio')
|
||||||
|
TASK_COUNT = Gauge("running_tasks", "Number of running tasks", namespace='asyncio')
|
||||||
|
|
||||||
|
|
||||||
|
def get_loop_metrics(delay=1):
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
def callback(started):
|
||||||
|
PROBE_TIMES.observe(time.perf_counter() - started - delay)
|
||||||
|
PROBES_FINISHED.inc()
|
||||||
|
|
||||||
|
async def monitor_loop_responsiveness():
|
||||||
|
while True:
|
||||||
|
now = time.perf_counter()
|
||||||
|
loop.call_later(delay, callback, now)
|
||||||
|
PROBES_IN_FLIGHT.inc()
|
||||||
|
TASK_COUNT.set(len(asyncio.tasks._all_tasks))
|
||||||
|
await asyncio.sleep(delay)
|
||||||
|
|
||||||
|
return loop.create_task(monitor_loop_responsiveness())
|
||||||
|
|
||||||
|
|
||||||
class PrometheusServer:
|
class PrometheusServer:
|
||||||
def __init__(self, logger=None):
|
def __init__(self, logger=None):
|
||||||
self.runner = None
|
self.runner = None
|
||||||
self.logger = logger or logging.getLogger(__name__)
|
self.logger = logger or logging.getLogger(__name__)
|
||||||
|
self._monitor_loop_task = None
|
||||||
|
|
||||||
async def start(self, interface: str, port: int):
|
async def start(self, interface: str, port: int):
|
||||||
|
self.logger.info("start prometheus metrics")
|
||||||
prom_app = web.Application()
|
prom_app = web.Application()
|
||||||
prom_app.router.add_get('/metrics', self.handle_metrics_get_request)
|
prom_app.router.add_get('/metrics', self.handle_metrics_get_request)
|
||||||
self.runner = web.AppRunner(prom_app)
|
self.runner = web.AppRunner(prom_app)
|
||||||
|
@ -16,7 +46,10 @@ class PrometheusServer:
|
||||||
|
|
||||||
metrics_site = web.TCPSite(self.runner, interface, port, shutdown_timeout=.5)
|
metrics_site = web.TCPSite(self.runner, interface, port, shutdown_timeout=.5)
|
||||||
await metrics_site.start()
|
await metrics_site.start()
|
||||||
self.logger.info('metrics server listening on %s:%i', *metrics_site._server.sockets[0].getsockname()[:2])
|
self.logger.info(
|
||||||
|
'prometheus metrics server listening on %s:%i', *metrics_site._server.sockets[0].getsockname()[:2]
|
||||||
|
)
|
||||||
|
self._monitor_loop_task = get_loop_metrics()
|
||||||
|
|
||||||
async def handle_metrics_get_request(self, request: web.Request):
|
async def handle_metrics_get_request(self, request: web.Request):
|
||||||
try:
|
try:
|
||||||
|
@ -29,4 +62,7 @@ class PrometheusServer:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
|
if self._monitor_loop_task and not self._monitor_loop_task.done():
|
||||||
|
self._monitor_loop_task.cancel()
|
||||||
|
self._monitor_loop_task = None
|
||||||
await self.runner.cleanup()
|
await self.runner.cleanup()
|
||||||
|
|
|
@ -2,4 +2,5 @@ build:
|
||||||
rm types/v2/* -rf
|
rm types/v2/* -rf
|
||||||
touch types/v2/__init__.py
|
touch types/v2/__init__.py
|
||||||
cd types/v2/ && protoc --python_out=. -I ../../../../../types/v2/proto/ ../../../../../types/v2/proto/*.proto
|
cd types/v2/ && protoc --python_out=. -I ../../../../../types/v2/proto/ ../../../../../types/v2/proto/*.proto
|
||||||
|
cd types/v2/ && cp ../../../../../types/jsonschema/* ./
|
||||||
sed -e 's/^import\ \(.*\)_pb2\ /from . import\ \1_pb2\ /g' -i types/v2/*.py
|
sed -e 's/^import\ \(.*\)_pb2\ /from . import\ \1_pb2\ /g' -i types/v2/*.py
|
||||||
|
|
24
lbry/schema/README.md
Normal file
24
lbry/schema/README.md
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
Schema
|
||||||
|
=====
|
||||||
|
|
||||||
|
Those files are generated from the [types repo](https://github.com/lbryio/types). If you are modifying/adding a new type, make sure it is cloned in the same root folder as the SDK repo, like:
|
||||||
|
|
||||||
|
```
|
||||||
|
repos/
|
||||||
|
- lbry-sdk/
|
||||||
|
- types/
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, [download protoc 3.2.0](https://github.com/protocolbuffers/protobuf/releases/tag/v3.2.0), add it to your PATH. On linux it is:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ~/.local/bin
|
||||||
|
wget https://github.com/protocolbuffers/protobuf/releases/download/v3.2.0/protoc-3.2.0-linux-x86_64.zip
|
||||||
|
unzip protoc-3.2.0-linux-x86_64.zip bin/protoc -d..
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, `make` should update everything in place.
|
||||||
|
|
||||||
|
|
||||||
|
### Why protoc 3.2.0?
|
||||||
|
Different/newer versions will generate larger diffs and we need to make sure they are good. In theory, we can just update to latest and it will all work, but it is a good practice to check blockchain data and retro compatibility before bumping versions (if you do, please update this section!).
|
|
@ -10,6 +10,7 @@ from google.protobuf.json_format import MessageToDict
|
||||||
|
|
||||||
from lbry.crypto.base58 import Base58
|
from lbry.crypto.base58 import Base58
|
||||||
from lbry.constants import COIN
|
from lbry.constants import COIN
|
||||||
|
from lbry.error import MissingPublishedFileError, EmptyPublishedFileError
|
||||||
|
|
||||||
from lbry.schema.mime_types import guess_media_type
|
from lbry.schema.mime_types import guess_media_type
|
||||||
from lbry.schema.base import Metadata, BaseMessageList
|
from lbry.schema.base import Metadata, BaseMessageList
|
||||||
|
@ -32,6 +33,17 @@ def calculate_sha384_file_hash(file_path):
|
||||||
return sha384.digest()
|
return sha384.digest()
|
||||||
|
|
||||||
|
|
||||||
|
def country_int_to_str(country: int) -> str:
|
||||||
|
r = LocationMessage.Country.Name(country)
|
||||||
|
return r[1:] if r.startswith('R') else r
|
||||||
|
|
||||||
|
|
||||||
|
def country_str_to_int(country: str) -> int:
|
||||||
|
if len(country) == 3:
|
||||||
|
country = 'R' + country
|
||||||
|
return LocationMessage.Country.Value(country)
|
||||||
|
|
||||||
|
|
||||||
class Dimmensional(Metadata):
|
class Dimmensional(Metadata):
|
||||||
|
|
||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
|
@ -128,10 +140,10 @@ class Source(Metadata):
|
||||||
self.name = os.path.basename(file_path)
|
self.name = os.path.basename(file_path)
|
||||||
self.media_type, stream_type = guess_media_type(file_path)
|
self.media_type, stream_type = guess_media_type(file_path)
|
||||||
if not os.path.isfile(file_path):
|
if not os.path.isfile(file_path):
|
||||||
raise Exception(f"File does not exist: {file_path}")
|
raise MissingPublishedFileError(file_path)
|
||||||
self.size = os.path.getsize(file_path)
|
self.size = os.path.getsize(file_path)
|
||||||
if self.size == 0:
|
if self.size == 0:
|
||||||
raise Exception(f"Cannot publish empty file: {file_path}")
|
raise EmptyPublishedFileError(file_path)
|
||||||
self.file_hash_bytes = calculate_sha384_file_hash(file_path)
|
self.file_hash_bytes = calculate_sha384_file_hash(file_path)
|
||||||
return stream_type
|
return stream_type
|
||||||
|
|
||||||
|
@ -423,14 +435,11 @@ class Language(Metadata):
|
||||||
@property
|
@property
|
||||||
def region(self) -> str:
|
def region(self) -> str:
|
||||||
if self.message.region:
|
if self.message.region:
|
||||||
r = LocationMessage.Country.Name(self.message.region)
|
return country_int_to_str(self.message.region)
|
||||||
return r[1:] if r.startswith('R') else r
|
|
||||||
|
|
||||||
@region.setter
|
@region.setter
|
||||||
def region(self, region: str):
|
def region(self, region: str):
|
||||||
if len(region) == 3:
|
self.message.region = country_str_to_int(region)
|
||||||
region = 'R'+region
|
|
||||||
self.message.region = LocationMessage.Country.Value(region)
|
|
||||||
|
|
||||||
|
|
||||||
class LanguageList(BaseMessageList[Language]):
|
class LanguageList(BaseMessageList[Language]):
|
||||||
|
|
|
@ -2,6 +2,9 @@ import logging
|
||||||
from typing import List
|
from typing import List
|
||||||
from binascii import hexlify, unhexlify
|
from binascii import hexlify, unhexlify
|
||||||
|
|
||||||
|
from asn1crypto.keys import PublicKeyInfo
|
||||||
|
from coincurve import PublicKey as cPublicKey
|
||||||
|
|
||||||
from google.protobuf.json_format import MessageToDict
|
from google.protobuf.json_format import MessageToDict
|
||||||
from google.protobuf.message import DecodeError
|
from google.protobuf.message import DecodeError
|
||||||
from hachoir.core.log import log as hachoir_log
|
from hachoir.core.log import log as hachoir_log
|
||||||
|
@ -303,6 +306,10 @@ class Stream(BaseClaim):
|
||||||
def has_fee(self) -> bool:
|
def has_fee(self) -> bool:
|
||||||
return self.message.HasField('fee')
|
return self.message.HasField('fee')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def has_source(self) -> bool:
|
||||||
|
return self.message.HasField('source')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def source(self) -> Source:
|
def source(self) -> Source:
|
||||||
return Source(self.message.source)
|
return Source(self.message.source)
|
||||||
|
@ -342,7 +349,7 @@ class Channel(BaseClaim):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def public_key(self) -> str:
|
def public_key(self) -> str:
|
||||||
return hexlify(self.message.public_key).decode()
|
return hexlify(self.public_key_bytes).decode()
|
||||||
|
|
||||||
@public_key.setter
|
@public_key.setter
|
||||||
def public_key(self, sd_public_key: str):
|
def public_key(self, sd_public_key: str):
|
||||||
|
@ -350,7 +357,11 @@ class Channel(BaseClaim):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def public_key_bytes(self) -> bytes:
|
def public_key_bytes(self) -> bytes:
|
||||||
return self.message.public_key
|
if len(self.message.public_key) == 33:
|
||||||
|
return self.message.public_key
|
||||||
|
public_key_info = PublicKeyInfo.load(self.message.public_key)
|
||||||
|
public_key = cPublicKey(public_key_info.native['public_key'])
|
||||||
|
return public_key.format(compressed=True)
|
||||||
|
|
||||||
@public_key_bytes.setter
|
@public_key_bytes.setter
|
||||||
def public_key_bytes(self, public_key: bytes):
|
def public_key_bytes(self, public_key: bytes):
|
||||||
|
@ -387,6 +398,12 @@ class Repost(BaseClaim):
|
||||||
|
|
||||||
claim_type = Claim.REPOST
|
claim_type = Claim.REPOST
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
claim = super().to_dict()
|
||||||
|
if claim.pop('claim_hash', None):
|
||||||
|
claim['claim_id'] = self.reference.claim_id
|
||||||
|
return claim
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def reference(self) -> ClaimReference:
|
def reference(self) -> ClaimReference:
|
||||||
return ClaimReference(self.message)
|
return ClaimReference(self.message)
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
import os
|
import os
|
||||||
|
import filetype
|
||||||
|
import logging
|
||||||
|
|
||||||
types_map = {
|
types_map = {
|
||||||
# http://www.iana.org/assignments/media-types
|
# http://www.iana.org/assignments/media-types
|
||||||
|
@ -46,8 +48,8 @@ types_map = {
|
||||||
'.ksh': ('text/plain', 'document'),
|
'.ksh': ('text/plain', 'document'),
|
||||||
'.latex': ('application/x-latex', 'binary'),
|
'.latex': ('application/x-latex', 'binary'),
|
||||||
'.m1v': ('video/mpeg', 'video'),
|
'.m1v': ('video/mpeg', 'video'),
|
||||||
'.m3u': ('application/vnd.apple.mpegurl', 'audio'),
|
'.m3u': ('application/x-mpegurl', 'audio'),
|
||||||
'.m3u8': ('application/vnd.apple.mpegurl', 'video'),
|
'.m3u8': ('application/x-mpegurl', 'video'),
|
||||||
'.man': ('application/x-troff-man', 'document'),
|
'.man': ('application/x-troff-man', 'document'),
|
||||||
'.markdown': ('text/markdown', 'document'),
|
'.markdown': ('text/markdown', 'document'),
|
||||||
'.md': ('text/markdown', 'document'),
|
'.md': ('text/markdown', 'document'),
|
||||||
|
@ -66,6 +68,7 @@ types_map = {
|
||||||
'.mpeg': ('video/mpeg', 'video'),
|
'.mpeg': ('video/mpeg', 'video'),
|
||||||
'.mpg': ('video/mpeg', 'video'),
|
'.mpg': ('video/mpeg', 'video'),
|
||||||
'.ms': ('application/x-troff-ms', 'binary'),
|
'.ms': ('application/x-troff-ms', 'binary'),
|
||||||
|
'.m4s': ('video/iso.segment', 'binary'),
|
||||||
'.nc': ('application/x-netcdf', 'binary'),
|
'.nc': ('application/x-netcdf', 'binary'),
|
||||||
'.nws': ('message/rfc822', 'document'),
|
'.nws': ('message/rfc822', 'document'),
|
||||||
'.o': ('application/octet-stream', 'binary'),
|
'.o': ('application/octet-stream', 'binary'),
|
||||||
|
@ -119,10 +122,12 @@ types_map = {
|
||||||
'.tif': ('image/tiff', 'image'),
|
'.tif': ('image/tiff', 'image'),
|
||||||
'.tiff': ('image/tiff', 'image'),
|
'.tiff': ('image/tiff', 'image'),
|
||||||
'.tr': ('application/x-troff', 'binary'),
|
'.tr': ('application/x-troff', 'binary'),
|
||||||
|
'.ts': ('video/mp2t', 'video'),
|
||||||
'.tsv': ('text/tab-separated-values', 'document'),
|
'.tsv': ('text/tab-separated-values', 'document'),
|
||||||
'.txt': ('text/plain', 'document'),
|
'.txt': ('text/plain', 'document'),
|
||||||
'.ustar': ('application/x-ustar', 'binary'),
|
'.ustar': ('application/x-ustar', 'binary'),
|
||||||
'.vcf': ('text/x-vcard', 'document'),
|
'.vcf': ('text/x-vcard', 'document'),
|
||||||
|
'.vtt': ('text/vtt', 'document'),
|
||||||
'.wav': ('audio/x-wav', 'audio'),
|
'.wav': ('audio/x-wav', 'audio'),
|
||||||
'.webm': ('video/webm', 'video'),
|
'.webm': ('video/webm', 'video'),
|
||||||
'.wiz': ('application/msword', 'document'),
|
'.wiz': ('application/msword', 'document'),
|
||||||
|
@ -163,10 +168,38 @@ types_map = {
|
||||||
'.wmv': ('video/x-ms-wmv', 'video')
|
'.wmv': ('video/x-ms-wmv', 'video')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# maps detected extensions to the possible analogs
|
||||||
|
# i.e. .cbz file is actually a .zip
|
||||||
|
synonyms_map = {
|
||||||
|
'.zip': ['.cbz'],
|
||||||
|
'.rar': ['.cbr'],
|
||||||
|
'.ar': ['.a']
|
||||||
|
}
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def guess_media_type(path):
|
def guess_media_type(path):
|
||||||
_, ext = os.path.splitext(path)
|
_, ext = os.path.splitext(path)
|
||||||
extension = ext.strip().lower()
|
extension = ext.strip().lower()
|
||||||
|
|
||||||
|
try:
|
||||||
|
kind = filetype.guess(path)
|
||||||
|
if kind:
|
||||||
|
real_extension = f".{kind.extension}"
|
||||||
|
|
||||||
|
if extension != real_extension:
|
||||||
|
if extension:
|
||||||
|
log.warning(f"file extension does not match it's contents: {path}, identified as {real_extension}")
|
||||||
|
else:
|
||||||
|
log.debug(f"file {path} does not have extension, identified by it's contents as {real_extension}")
|
||||||
|
|
||||||
|
if extension not in synonyms_map.get(real_extension, []):
|
||||||
|
extension = real_extension
|
||||||
|
|
||||||
|
except OSError as error:
|
||||||
|
pass
|
||||||
|
|
||||||
if extension[1:]:
|
if extension[1:]:
|
||||||
if extension in types_map:
|
if extension in types_map:
|
||||||
return types_map[extension]
|
return types_map[extension]
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import base64
|
import base64
|
||||||
import struct
|
from typing import List, Union, Optional, NamedTuple
|
||||||
from typing import List
|
|
||||||
from binascii import hexlify
|
from binascii import hexlify
|
||||||
from itertools import chain
|
from itertools import chain
|
||||||
|
|
||||||
|
@ -16,47 +15,70 @@ BLOCKED = ErrorMessage.Code.Name(ErrorMessage.BLOCKED)
|
||||||
def set_reference(reference, claim_hash, rows):
|
def set_reference(reference, claim_hash, rows):
|
||||||
if claim_hash:
|
if claim_hash:
|
||||||
for txo in rows:
|
for txo in rows:
|
||||||
if claim_hash == txo['claim_hash']:
|
if claim_hash == txo.claim_hash:
|
||||||
reference.tx_hash = txo['txo_hash'][:32]
|
reference.tx_hash = txo.tx_hash
|
||||||
reference.nout = struct.unpack('<I', txo['txo_hash'][32:])[0]
|
reference.nout = txo.position
|
||||||
reference.height = txo['height']
|
reference.height = txo.height
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
class ResolveResult(NamedTuple):
|
||||||
|
name: str
|
||||||
|
normalized_name: str
|
||||||
|
claim_hash: bytes
|
||||||
|
tx_num: int
|
||||||
|
position: int
|
||||||
|
tx_hash: bytes
|
||||||
|
height: int
|
||||||
|
amount: int
|
||||||
|
short_url: str
|
||||||
|
is_controlling: bool
|
||||||
|
canonical_url: str
|
||||||
|
creation_height: int
|
||||||
|
activation_height: int
|
||||||
|
expiration_height: int
|
||||||
|
effective_amount: int
|
||||||
|
support_amount: int
|
||||||
|
reposted: int
|
||||||
|
last_takeover_height: Optional[int]
|
||||||
|
claims_in_channel: Optional[int]
|
||||||
|
channel_hash: Optional[bytes]
|
||||||
|
reposted_claim_hash: Optional[bytes]
|
||||||
|
signature_valid: Optional[bool]
|
||||||
|
|
||||||
|
|
||||||
class Censor:
|
class Censor:
|
||||||
|
|
||||||
__slots__ = 'streams', 'channels', 'censored', 'total'
|
NOT_CENSORED = 0
|
||||||
|
SEARCH = 1
|
||||||
|
RESOLVE = 2
|
||||||
|
|
||||||
def __init__(self, streams: dict = None, channels: dict = None):
|
__slots__ = 'censor_type', 'censored'
|
||||||
self.streams = streams or {}
|
|
||||||
self.channels = channels or {}
|
def __init__(self, censor_type):
|
||||||
|
self.censor_type = censor_type
|
||||||
self.censored = {}
|
self.censored = {}
|
||||||
self.total = 0
|
|
||||||
|
|
||||||
def censor(self, row) -> bool:
|
def is_censored(self, row):
|
||||||
was_censored = False
|
return (row.get('censor_type') or self.NOT_CENSORED) >= self.censor_type
|
||||||
for claim_hash, lookup in (
|
|
||||||
(row['claim_hash'], self.streams),
|
|
||||||
(row['claim_hash'], self.channels),
|
|
||||||
(row['channel_hash'], self.channels),
|
|
||||||
(row['reposted_claim_hash'], self.streams),
|
|
||||||
(row['reposted_claim_hash'], self.channels)):
|
|
||||||
censoring_channel_hash = lookup.get(claim_hash)
|
|
||||||
if censoring_channel_hash:
|
|
||||||
was_censored = True
|
|
||||||
self.censored.setdefault(censoring_channel_hash, 0)
|
|
||||||
self.censored[censoring_channel_hash] += 1
|
|
||||||
break
|
|
||||||
if was_censored:
|
|
||||||
self.total += 1
|
|
||||||
return was_censored
|
|
||||||
|
|
||||||
def to_message(self, outputs: OutputsMessage, extra_txo_rows):
|
def apply(self, rows):
|
||||||
outputs.blocked_total = self.total
|
return [row for row in rows if not self.censor(row)]
|
||||||
|
|
||||||
|
def censor(self, row) -> Optional[bytes]:
|
||||||
|
if self.is_censored(row):
|
||||||
|
censoring_channel_hash = bytes.fromhex(row['censoring_channel_id'])[::-1]
|
||||||
|
self.censored.setdefault(censoring_channel_hash, set())
|
||||||
|
self.censored[censoring_channel_hash].add(row['tx_hash'])
|
||||||
|
return censoring_channel_hash
|
||||||
|
return None
|
||||||
|
|
||||||
|
def to_message(self, outputs: OutputsMessage, extra_txo_rows: dict):
|
||||||
for censoring_channel_hash, count in self.censored.items():
|
for censoring_channel_hash, count in self.censored.items():
|
||||||
blocked = outputs.blocked.add()
|
blocked = outputs.blocked.add()
|
||||||
blocked.count = count
|
blocked.count = len(count)
|
||||||
set_reference(blocked.channel, censoring_channel_hash, extra_txo_rows)
|
set_reference(blocked.channel, censoring_channel_hash, extra_txo_rows)
|
||||||
|
outputs.blocked_total += len(count)
|
||||||
|
|
||||||
|
|
||||||
class Outputs:
|
class Outputs:
|
||||||
|
@ -120,10 +142,10 @@ class Outputs:
|
||||||
'expiration_height': claim.expiration_height,
|
'expiration_height': claim.expiration_height,
|
||||||
'effective_amount': claim.effective_amount,
|
'effective_amount': claim.effective_amount,
|
||||||
'support_amount': claim.support_amount,
|
'support_amount': claim.support_amount,
|
||||||
'trending_group': claim.trending_group,
|
# 'trending_group': claim.trending_group,
|
||||||
'trending_mixed': claim.trending_mixed,
|
# 'trending_mixed': claim.trending_mixed,
|
||||||
'trending_local': claim.trending_local,
|
# 'trending_local': claim.trending_local,
|
||||||
'trending_global': claim.trending_global,
|
# 'trending_global': claim.trending_global,
|
||||||
}
|
}
|
||||||
if claim.HasField('channel'):
|
if claim.HasField('channel'):
|
||||||
txo.channel = tx_map[claim.channel.tx_hash].outputs[claim.channel.nout]
|
txo.channel = tx_map[claim.channel.tx_hash].outputs[claim.channel.nout]
|
||||||
|
@ -167,44 +189,54 @@ class Outputs:
|
||||||
page.total = total
|
page.total = total
|
||||||
if blocked is not None:
|
if blocked is not None:
|
||||||
blocked.to_message(page, extra_txo_rows)
|
blocked.to_message(page, extra_txo_rows)
|
||||||
for row in txo_rows:
|
|
||||||
cls.row_to_message(row, page.txos.add(), extra_txo_rows)
|
|
||||||
for row in extra_txo_rows:
|
for row in extra_txo_rows:
|
||||||
cls.row_to_message(row, page.extra_txos.add(), extra_txo_rows)
|
txo_message: 'OutputsMessage' = page.extra_txos.add()
|
||||||
|
if not isinstance(row, Exception):
|
||||||
|
if row.channel_hash:
|
||||||
|
set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows)
|
||||||
|
if row.reposted_claim_hash:
|
||||||
|
set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows)
|
||||||
|
cls.encode_txo(txo_message, row)
|
||||||
|
|
||||||
|
for row in txo_rows:
|
||||||
|
# cls.row_to_message(row, page.txos.add(), extra_txo_rows)
|
||||||
|
txo_message: 'OutputsMessage' = page.txos.add()
|
||||||
|
cls.encode_txo(txo_message, row)
|
||||||
|
if not isinstance(row, Exception):
|
||||||
|
if row.channel_hash:
|
||||||
|
set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows)
|
||||||
|
if row.reposted_claim_hash:
|
||||||
|
set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows)
|
||||||
|
elif isinstance(row, ResolveCensoredError):
|
||||||
|
set_reference(txo_message.error.blocked.channel, row.censor_id, extra_txo_rows)
|
||||||
return page.SerializeToString()
|
return page.SerializeToString()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def row_to_message(cls, txo, txo_message, extra_txo_rows):
|
def encode_txo(cls, txo_message, resolve_result: Union['ResolveResult', Exception]):
|
||||||
if isinstance(txo, Exception):
|
if isinstance(resolve_result, Exception):
|
||||||
txo_message.error.text = txo.args[0]
|
txo_message.error.text = resolve_result.args[0]
|
||||||
if isinstance(txo, ValueError):
|
if isinstance(resolve_result, ValueError):
|
||||||
txo_message.error.code = ErrorMessage.INVALID
|
txo_message.error.code = ErrorMessage.INVALID
|
||||||
elif isinstance(txo, LookupError):
|
elif isinstance(resolve_result, LookupError):
|
||||||
txo_message.error.code = ErrorMessage.NOT_FOUND
|
txo_message.error.code = ErrorMessage.NOT_FOUND
|
||||||
elif isinstance(txo, ResolveCensoredError):
|
elif isinstance(resolve_result, ResolveCensoredError):
|
||||||
txo_message.error.code = ErrorMessage.BLOCKED
|
txo_message.error.code = ErrorMessage.BLOCKED
|
||||||
set_reference(txo_message.error.blocked.channel, txo.censor_hash, extra_txo_rows)
|
|
||||||
return
|
return
|
||||||
txo_message.tx_hash = txo['txo_hash'][:32]
|
txo_message.tx_hash = resolve_result.tx_hash
|
||||||
txo_message.nout, = struct.unpack('<I', txo['txo_hash'][32:])
|
txo_message.nout = resolve_result.position
|
||||||
txo_message.height = txo['height']
|
txo_message.height = resolve_result.height
|
||||||
txo_message.claim.short_url = txo['short_url']
|
txo_message.claim.short_url = resolve_result.short_url
|
||||||
txo_message.claim.reposted = txo['reposted']
|
txo_message.claim.reposted = resolve_result.reposted
|
||||||
if txo['canonical_url'] is not None:
|
txo_message.claim.is_controlling = resolve_result.is_controlling
|
||||||
txo_message.claim.canonical_url = txo['canonical_url']
|
txo_message.claim.creation_height = resolve_result.creation_height
|
||||||
txo_message.claim.is_controlling = bool(txo['is_controlling'])
|
txo_message.claim.activation_height = resolve_result.activation_height
|
||||||
if txo['last_take_over_height'] is not None:
|
txo_message.claim.expiration_height = resolve_result.expiration_height
|
||||||
txo_message.claim.take_over_height = txo['last_take_over_height']
|
txo_message.claim.effective_amount = resolve_result.effective_amount
|
||||||
txo_message.claim.creation_height = txo['creation_height']
|
txo_message.claim.support_amount = resolve_result.support_amount
|
||||||
txo_message.claim.activation_height = txo['activation_height']
|
|
||||||
txo_message.claim.expiration_height = txo['expiration_height']
|
if resolve_result.canonical_url is not None:
|
||||||
if txo['claims_in_channel'] is not None:
|
txo_message.claim.canonical_url = resolve_result.canonical_url
|
||||||
txo_message.claim.claims_in_channel = txo['claims_in_channel']
|
if resolve_result.last_takeover_height is not None:
|
||||||
txo_message.claim.effective_amount = txo['effective_amount']
|
txo_message.claim.take_over_height = resolve_result.last_takeover_height
|
||||||
txo_message.claim.support_amount = txo['support_amount']
|
if resolve_result.claims_in_channel is not None:
|
||||||
txo_message.claim.trending_group = txo['trending_group']
|
txo_message.claim.claims_in_channel = resolve_result.claims_in_channel
|
||||||
txo_message.claim.trending_mixed = txo['trending_mixed']
|
|
||||||
txo_message.claim.trending_local = txo['trending_local']
|
|
||||||
txo_message.claim.trending_global = txo['trending_global']
|
|
||||||
set_reference(txo_message.claim.channel, txo['channel_hash'], extra_txo_rows)
|
|
||||||
set_reference(txo_message.claim.repost, txo['reposted_claim_hash'], extra_txo_rows)
|
|
||||||
|
|
|
@ -13,3 +13,11 @@ class Support(Signable):
|
||||||
@emoji.setter
|
@emoji.setter
|
||||||
def emoji(self, emoji: str):
|
def emoji(self, emoji: str):
|
||||||
self.message.emoji = emoji
|
self.message.emoji = emoji
|
||||||
|
|
||||||
|
@property
|
||||||
|
def comment(self) -> str:
|
||||||
|
return self.message.comment
|
||||||
|
|
||||||
|
@comment.setter
|
||||||
|
def comment(self, comment: str):
|
||||||
|
self.message.comment = comment
|
||||||
|
|
|
@ -1,13 +1,11 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||||
# source: result.proto
|
# source: result.proto
|
||||||
|
"""Generated protocol buffer code."""
|
||||||
import sys
|
|
||||||
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
|
|
||||||
from google.protobuf import descriptor as _descriptor
|
from google.protobuf import descriptor as _descriptor
|
||||||
from google.protobuf import message as _message
|
from google.protobuf import message as _message
|
||||||
from google.protobuf import reflection as _reflection
|
from google.protobuf import reflection as _reflection
|
||||||
from google.protobuf import symbol_database as _symbol_database
|
from google.protobuf import symbol_database as _symbol_database
|
||||||
from google.protobuf import descriptor_pb2
|
|
||||||
# @@protoc_insertion_point(imports)
|
# @@protoc_insertion_point(imports)
|
||||||
|
|
||||||
_sym_db = _symbol_database.Default()
|
_sym_db = _symbol_database.Default()
|
||||||
|
@ -19,9 +17,10 @@ DESCRIPTOR = _descriptor.FileDescriptor(
|
||||||
name='result.proto',
|
name='result.proto',
|
||||||
package='pb',
|
package='pb',
|
||||||
syntax='proto3',
|
syntax='proto3',
|
||||||
serialized_pb=_b('\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xaf\x03\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_group\x18\x16 \x01(\r\x12\x16\n\x0etrending_mixed\x18\x17 \x01(\x02\x12\x16\n\x0etrending_local\x18\x18 \x01(\x02\x12\x17\n\x0ftrending_global\x18\x19 \x01(\x02\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.Outputb\x06proto3')
|
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
|
||||||
|
create_key=_descriptor._internal_create_key,
|
||||||
|
serialized_pb=b'\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xe6\x02\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_score\x18\x16 \x01(\x01\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.OutputB&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
|
||||||
)
|
)
|
||||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -30,28 +29,33 @@ _ERROR_CODE = _descriptor.EnumDescriptor(
|
||||||
full_name='pb.Error.Code',
|
full_name='pb.Error.Code',
|
||||||
filename=None,
|
filename=None,
|
||||||
file=DESCRIPTOR,
|
file=DESCRIPTOR,
|
||||||
|
create_key=_descriptor._internal_create_key,
|
||||||
values=[
|
values=[
|
||||||
_descriptor.EnumValueDescriptor(
|
_descriptor.EnumValueDescriptor(
|
||||||
name='UNKNOWN_CODE', index=0, number=0,
|
name='UNKNOWN_CODE', index=0, number=0,
|
||||||
options=None,
|
serialized_options=None,
|
||||||
type=None),
|
type=None,
|
||||||
|
create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.EnumValueDescriptor(
|
_descriptor.EnumValueDescriptor(
|
||||||
name='NOT_FOUND', index=1, number=1,
|
name='NOT_FOUND', index=1, number=1,
|
||||||
options=None,
|
serialized_options=None,
|
||||||
type=None),
|
type=None,
|
||||||
|
create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.EnumValueDescriptor(
|
_descriptor.EnumValueDescriptor(
|
||||||
name='INVALID', index=2, number=2,
|
name='INVALID', index=2, number=2,
|
||||||
options=None,
|
serialized_options=None,
|
||||||
type=None),
|
type=None,
|
||||||
|
create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.EnumValueDescriptor(
|
_descriptor.EnumValueDescriptor(
|
||||||
name='BLOCKED', index=3, number=3,
|
name='BLOCKED', index=3, number=3,
|
||||||
options=None,
|
serialized_options=None,
|
||||||
type=None),
|
type=None,
|
||||||
|
create_key=_descriptor._internal_create_key),
|
||||||
],
|
],
|
||||||
containing_type=None,
|
containing_type=None,
|
||||||
options=None,
|
serialized_options=None,
|
||||||
serialized_start=817,
|
serialized_start=744,
|
||||||
serialized_end=882,
|
serialized_end=809,
|
||||||
)
|
)
|
||||||
_sym_db.RegisterEnumDescriptor(_ERROR_CODE)
|
_sym_db.RegisterEnumDescriptor(_ERROR_CODE)
|
||||||
|
|
||||||
|
@ -62,6 +66,7 @@ _OUTPUTS = _descriptor.Descriptor(
|
||||||
filename=None,
|
filename=None,
|
||||||
file=DESCRIPTOR,
|
file=DESCRIPTOR,
|
||||||
containing_type=None,
|
containing_type=None,
|
||||||
|
create_key=_descriptor._internal_create_key,
|
||||||
fields=[
|
fields=[
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='txos', full_name='pb.Outputs.txos', index=0,
|
name='txos', full_name='pb.Outputs.txos', index=0,
|
||||||
|
@ -69,49 +74,49 @@ _OUTPUTS = _descriptor.Descriptor(
|
||||||
has_default_value=False, default_value=[],
|
has_default_value=False, default_value=[],
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='extra_txos', full_name='pb.Outputs.extra_txos', index=1,
|
name='extra_txos', full_name='pb.Outputs.extra_txos', index=1,
|
||||||
number=2, type=11, cpp_type=10, label=3,
|
number=2, type=11, cpp_type=10, label=3,
|
||||||
has_default_value=False, default_value=[],
|
has_default_value=False, default_value=[],
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='total', full_name='pb.Outputs.total', index=2,
|
name='total', full_name='pb.Outputs.total', index=2,
|
||||||
number=3, type=13, cpp_type=3, label=1,
|
number=3, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='offset', full_name='pb.Outputs.offset', index=3,
|
name='offset', full_name='pb.Outputs.offset', index=3,
|
||||||
number=4, type=13, cpp_type=3, label=1,
|
number=4, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='blocked', full_name='pb.Outputs.blocked', index=4,
|
name='blocked', full_name='pb.Outputs.blocked', index=4,
|
||||||
number=5, type=11, cpp_type=10, label=3,
|
number=5, type=11, cpp_type=10, label=3,
|
||||||
has_default_value=False, default_value=[],
|
has_default_value=False, default_value=[],
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='blocked_total', full_name='pb.Outputs.blocked_total', index=5,
|
name='blocked_total', full_name='pb.Outputs.blocked_total', index=5,
|
||||||
number=6, type=13, cpp_type=3, label=1,
|
number=6, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
],
|
],
|
||||||
extensions=[
|
extensions=[
|
||||||
],
|
],
|
||||||
nested_types=[],
|
nested_types=[],
|
||||||
enum_types=[
|
enum_types=[
|
||||||
],
|
],
|
||||||
options=None,
|
serialized_options=None,
|
||||||
is_extendable=False,
|
is_extendable=False,
|
||||||
syntax='proto3',
|
syntax='proto3',
|
||||||
extension_ranges=[],
|
extension_ranges=[],
|
||||||
|
@ -128,56 +133,59 @@ _OUTPUT = _descriptor.Descriptor(
|
||||||
filename=None,
|
filename=None,
|
||||||
file=DESCRIPTOR,
|
file=DESCRIPTOR,
|
||||||
containing_type=None,
|
containing_type=None,
|
||||||
|
create_key=_descriptor._internal_create_key,
|
||||||
fields=[
|
fields=[
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='tx_hash', full_name='pb.Output.tx_hash', index=0,
|
name='tx_hash', full_name='pb.Output.tx_hash', index=0,
|
||||||
number=1, type=12, cpp_type=9, label=1,
|
number=1, type=12, cpp_type=9, label=1,
|
||||||
has_default_value=False, default_value=_b(""),
|
has_default_value=False, default_value=b"",
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='nout', full_name='pb.Output.nout', index=1,
|
name='nout', full_name='pb.Output.nout', index=1,
|
||||||
number=2, type=13, cpp_type=3, label=1,
|
number=2, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='height', full_name='pb.Output.height', index=2,
|
name='height', full_name='pb.Output.height', index=2,
|
||||||
number=3, type=13, cpp_type=3, label=1,
|
number=3, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='claim', full_name='pb.Output.claim', index=3,
|
name='claim', full_name='pb.Output.claim', index=3,
|
||||||
number=7, type=11, cpp_type=10, label=1,
|
number=7, type=11, cpp_type=10, label=1,
|
||||||
has_default_value=False, default_value=None,
|
has_default_value=False, default_value=None,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='error', full_name='pb.Output.error', index=4,
|
name='error', full_name='pb.Output.error', index=4,
|
||||||
number=15, type=11, cpp_type=10, label=1,
|
number=15, type=11, cpp_type=10, label=1,
|
||||||
has_default_value=False, default_value=None,
|
has_default_value=False, default_value=None,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
],
|
],
|
||||||
extensions=[
|
extensions=[
|
||||||
],
|
],
|
||||||
nested_types=[],
|
nested_types=[],
|
||||||
enum_types=[
|
enum_types=[
|
||||||
],
|
],
|
||||||
options=None,
|
serialized_options=None,
|
||||||
is_extendable=False,
|
is_extendable=False,
|
||||||
syntax='proto3',
|
syntax='proto3',
|
||||||
extension_ranges=[],
|
extension_ranges=[],
|
||||||
oneofs=[
|
oneofs=[
|
||||||
_descriptor.OneofDescriptor(
|
_descriptor.OneofDescriptor(
|
||||||
name='meta', full_name='pb.Output.meta',
|
name='meta', full_name='pb.Output.meta',
|
||||||
index=0, containing_type=None, fields=[]),
|
index=0, containing_type=None,
|
||||||
|
create_key=_descriptor._internal_create_key,
|
||||||
|
fields=[]),
|
||||||
],
|
],
|
||||||
serialized_start=174,
|
serialized_start=174,
|
||||||
serialized_end=297,
|
serialized_end=297,
|
||||||
|
@ -190,6 +198,7 @@ _CLAIMMETA = _descriptor.Descriptor(
|
||||||
filename=None,
|
filename=None,
|
||||||
file=DESCRIPTOR,
|
file=DESCRIPTOR,
|
||||||
containing_type=None,
|
containing_type=None,
|
||||||
|
create_key=_descriptor._internal_create_key,
|
||||||
fields=[
|
fields=[
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='channel', full_name='pb.ClaimMeta.channel', index=0,
|
name='channel', full_name='pb.ClaimMeta.channel', index=0,
|
||||||
|
@ -197,133 +206,112 @@ _CLAIMMETA = _descriptor.Descriptor(
|
||||||
has_default_value=False, default_value=None,
|
has_default_value=False, default_value=None,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='repost', full_name='pb.ClaimMeta.repost', index=1,
|
name='repost', full_name='pb.ClaimMeta.repost', index=1,
|
||||||
number=2, type=11, cpp_type=10, label=1,
|
number=2, type=11, cpp_type=10, label=1,
|
||||||
has_default_value=False, default_value=None,
|
has_default_value=False, default_value=None,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='short_url', full_name='pb.ClaimMeta.short_url', index=2,
|
name='short_url', full_name='pb.ClaimMeta.short_url', index=2,
|
||||||
number=3, type=9, cpp_type=9, label=1,
|
number=3, type=9, cpp_type=9, label=1,
|
||||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='canonical_url', full_name='pb.ClaimMeta.canonical_url', index=3,
|
name='canonical_url', full_name='pb.ClaimMeta.canonical_url', index=3,
|
||||||
number=4, type=9, cpp_type=9, label=1,
|
number=4, type=9, cpp_type=9, label=1,
|
||||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='is_controlling', full_name='pb.ClaimMeta.is_controlling', index=4,
|
name='is_controlling', full_name='pb.ClaimMeta.is_controlling', index=4,
|
||||||
number=5, type=8, cpp_type=7, label=1,
|
number=5, type=8, cpp_type=7, label=1,
|
||||||
has_default_value=False, default_value=False,
|
has_default_value=False, default_value=False,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='take_over_height', full_name='pb.ClaimMeta.take_over_height', index=5,
|
name='take_over_height', full_name='pb.ClaimMeta.take_over_height', index=5,
|
||||||
number=6, type=13, cpp_type=3, label=1,
|
number=6, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='creation_height', full_name='pb.ClaimMeta.creation_height', index=6,
|
name='creation_height', full_name='pb.ClaimMeta.creation_height', index=6,
|
||||||
number=7, type=13, cpp_type=3, label=1,
|
number=7, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='activation_height', full_name='pb.ClaimMeta.activation_height', index=7,
|
name='activation_height', full_name='pb.ClaimMeta.activation_height', index=7,
|
||||||
number=8, type=13, cpp_type=3, label=1,
|
number=8, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='expiration_height', full_name='pb.ClaimMeta.expiration_height', index=8,
|
name='expiration_height', full_name='pb.ClaimMeta.expiration_height', index=8,
|
||||||
number=9, type=13, cpp_type=3, label=1,
|
number=9, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='claims_in_channel', full_name='pb.ClaimMeta.claims_in_channel', index=9,
|
name='claims_in_channel', full_name='pb.ClaimMeta.claims_in_channel', index=9,
|
||||||
number=10, type=13, cpp_type=3, label=1,
|
number=10, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='reposted', full_name='pb.ClaimMeta.reposted', index=10,
|
name='reposted', full_name='pb.ClaimMeta.reposted', index=10,
|
||||||
number=11, type=13, cpp_type=3, label=1,
|
number=11, type=13, cpp_type=3, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='effective_amount', full_name='pb.ClaimMeta.effective_amount', index=11,
|
name='effective_amount', full_name='pb.ClaimMeta.effective_amount', index=11,
|
||||||
number=20, type=4, cpp_type=4, label=1,
|
number=20, type=4, cpp_type=4, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='support_amount', full_name='pb.ClaimMeta.support_amount', index=12,
|
name='support_amount', full_name='pb.ClaimMeta.support_amount', index=12,
|
||||||
number=21, type=4, cpp_type=4, label=1,
|
number=21, type=4, cpp_type=4, label=1,
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='trending_group', full_name='pb.ClaimMeta.trending_group', index=13,
|
name='trending_score', full_name='pb.ClaimMeta.trending_score', index=13,
|
||||||
number=22, type=13, cpp_type=3, label=1,
|
number=22, type=1, cpp_type=5, label=1,
|
||||||
has_default_value=False, default_value=0,
|
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
|
||||||
is_extension=False, extension_scope=None,
|
|
||||||
options=None),
|
|
||||||
_descriptor.FieldDescriptor(
|
|
||||||
name='trending_mixed', full_name='pb.ClaimMeta.trending_mixed', index=14,
|
|
||||||
number=23, type=2, cpp_type=6, label=1,
|
|
||||||
has_default_value=False, default_value=float(0),
|
has_default_value=False, default_value=float(0),
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
|
||||||
name='trending_local', full_name='pb.ClaimMeta.trending_local', index=15,
|
|
||||||
number=24, type=2, cpp_type=6, label=1,
|
|
||||||
has_default_value=False, default_value=float(0),
|
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
|
||||||
is_extension=False, extension_scope=None,
|
|
||||||
options=None),
|
|
||||||
_descriptor.FieldDescriptor(
|
|
||||||
name='trending_global', full_name='pb.ClaimMeta.trending_global', index=16,
|
|
||||||
number=25, type=2, cpp_type=6, label=1,
|
|
||||||
has_default_value=False, default_value=float(0),
|
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
|
||||||
is_extension=False, extension_scope=None,
|
|
||||||
options=None),
|
|
||||||
],
|
],
|
||||||
extensions=[
|
extensions=[
|
||||||
],
|
],
|
||||||
nested_types=[],
|
nested_types=[],
|
||||||
enum_types=[
|
enum_types=[
|
||||||
],
|
],
|
||||||
options=None,
|
serialized_options=None,
|
||||||
is_extendable=False,
|
is_extendable=False,
|
||||||
syntax='proto3',
|
syntax='proto3',
|
||||||
extension_ranges=[],
|
extension_ranges=[],
|
||||||
oneofs=[
|
oneofs=[
|
||||||
],
|
],
|
||||||
serialized_start=300,
|
serialized_start=300,
|
||||||
serialized_end=731,
|
serialized_end=658,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -333,6 +321,7 @@ _ERROR = _descriptor.Descriptor(
|
||||||
filename=None,
|
filename=None,
|
||||||
file=DESCRIPTOR,
|
file=DESCRIPTOR,
|
||||||
containing_type=None,
|
containing_type=None,
|
||||||
|
create_key=_descriptor._internal_create_key,
|
||||||
fields=[
|
fields=[
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='code', full_name='pb.Error.code', index=0,
|
name='code', full_name='pb.Error.code', index=0,
|
||||||
|
@ -340,21 +329,21 @@ _ERROR = _descriptor.Descriptor(
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='text', full_name='pb.Error.text', index=1,
|
name='text', full_name='pb.Error.text', index=1,
|
||||||
number=2, type=9, cpp_type=9, label=1,
|
number=2, type=9, cpp_type=9, label=1,
|
||||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='blocked', full_name='pb.Error.blocked', index=2,
|
name='blocked', full_name='pb.Error.blocked', index=2,
|
||||||
number=3, type=11, cpp_type=10, label=1,
|
number=3, type=11, cpp_type=10, label=1,
|
||||||
has_default_value=False, default_value=None,
|
has_default_value=False, default_value=None,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
],
|
],
|
||||||
extensions=[
|
extensions=[
|
||||||
],
|
],
|
||||||
|
@ -362,14 +351,14 @@ _ERROR = _descriptor.Descriptor(
|
||||||
enum_types=[
|
enum_types=[
|
||||||
_ERROR_CODE,
|
_ERROR_CODE,
|
||||||
],
|
],
|
||||||
options=None,
|
serialized_options=None,
|
||||||
is_extendable=False,
|
is_extendable=False,
|
||||||
syntax='proto3',
|
syntax='proto3',
|
||||||
extension_ranges=[],
|
extension_ranges=[],
|
||||||
oneofs=[
|
oneofs=[
|
||||||
],
|
],
|
||||||
serialized_start=734,
|
serialized_start=661,
|
||||||
serialized_end=882,
|
serialized_end=809,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -379,6 +368,7 @@ _BLOCKED = _descriptor.Descriptor(
|
||||||
filename=None,
|
filename=None,
|
||||||
file=DESCRIPTOR,
|
file=DESCRIPTOR,
|
||||||
containing_type=None,
|
containing_type=None,
|
||||||
|
create_key=_descriptor._internal_create_key,
|
||||||
fields=[
|
fields=[
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='count', full_name='pb.Blocked.count', index=0,
|
name='count', full_name='pb.Blocked.count', index=0,
|
||||||
|
@ -386,28 +376,28 @@ _BLOCKED = _descriptor.Descriptor(
|
||||||
has_default_value=False, default_value=0,
|
has_default_value=False, default_value=0,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
_descriptor.FieldDescriptor(
|
_descriptor.FieldDescriptor(
|
||||||
name='channel', full_name='pb.Blocked.channel', index=1,
|
name='channel', full_name='pb.Blocked.channel', index=1,
|
||||||
number=2, type=11, cpp_type=10, label=1,
|
number=2, type=11, cpp_type=10, label=1,
|
||||||
has_default_value=False, default_value=None,
|
has_default_value=False, default_value=None,
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||||
],
|
],
|
||||||
extensions=[
|
extensions=[
|
||||||
],
|
],
|
||||||
nested_types=[],
|
nested_types=[],
|
||||||
enum_types=[
|
enum_types=[
|
||||||
],
|
],
|
||||||
options=None,
|
serialized_options=None,
|
||||||
is_extendable=False,
|
is_extendable=False,
|
||||||
syntax='proto3',
|
syntax='proto3',
|
||||||
extension_ranges=[],
|
extension_ranges=[],
|
||||||
oneofs=[
|
oneofs=[
|
||||||
],
|
],
|
||||||
serialized_start=884,
|
serialized_start=811,
|
||||||
serialized_end=937,
|
serialized_end=864,
|
||||||
)
|
)
|
||||||
|
|
||||||
_OUTPUTS.fields_by_name['txos'].message_type = _OUTPUT
|
_OUTPUTS.fields_by_name['txos'].message_type = _OUTPUT
|
||||||
|
@ -432,41 +422,43 @@ DESCRIPTOR.message_types_by_name['Output'] = _OUTPUT
|
||||||
DESCRIPTOR.message_types_by_name['ClaimMeta'] = _CLAIMMETA
|
DESCRIPTOR.message_types_by_name['ClaimMeta'] = _CLAIMMETA
|
||||||
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
|
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
|
||||||
DESCRIPTOR.message_types_by_name['Blocked'] = _BLOCKED
|
DESCRIPTOR.message_types_by_name['Blocked'] = _BLOCKED
|
||||||
|
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||||
|
|
||||||
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), dict(
|
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), {
|
||||||
DESCRIPTOR = _OUTPUTS,
|
'DESCRIPTOR' : _OUTPUTS,
|
||||||
__module__ = 'result_pb2'
|
'__module__' : 'result_pb2'
|
||||||
# @@protoc_insertion_point(class_scope:pb.Outputs)
|
# @@protoc_insertion_point(class_scope:pb.Outputs)
|
||||||
))
|
})
|
||||||
_sym_db.RegisterMessage(Outputs)
|
_sym_db.RegisterMessage(Outputs)
|
||||||
|
|
||||||
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), dict(
|
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), {
|
||||||
DESCRIPTOR = _OUTPUT,
|
'DESCRIPTOR' : _OUTPUT,
|
||||||
__module__ = 'result_pb2'
|
'__module__' : 'result_pb2'
|
||||||
# @@protoc_insertion_point(class_scope:pb.Output)
|
# @@protoc_insertion_point(class_scope:pb.Output)
|
||||||
))
|
})
|
||||||
_sym_db.RegisterMessage(Output)
|
_sym_db.RegisterMessage(Output)
|
||||||
|
|
||||||
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), dict(
|
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), {
|
||||||
DESCRIPTOR = _CLAIMMETA,
|
'DESCRIPTOR' : _CLAIMMETA,
|
||||||
__module__ = 'result_pb2'
|
'__module__' : 'result_pb2'
|
||||||
# @@protoc_insertion_point(class_scope:pb.ClaimMeta)
|
# @@protoc_insertion_point(class_scope:pb.ClaimMeta)
|
||||||
))
|
})
|
||||||
_sym_db.RegisterMessage(ClaimMeta)
|
_sym_db.RegisterMessage(ClaimMeta)
|
||||||
|
|
||||||
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), dict(
|
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), {
|
||||||
DESCRIPTOR = _ERROR,
|
'DESCRIPTOR' : _ERROR,
|
||||||
__module__ = 'result_pb2'
|
'__module__' : 'result_pb2'
|
||||||
# @@protoc_insertion_point(class_scope:pb.Error)
|
# @@protoc_insertion_point(class_scope:pb.Error)
|
||||||
))
|
})
|
||||||
_sym_db.RegisterMessage(Error)
|
_sym_db.RegisterMessage(Error)
|
||||||
|
|
||||||
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), dict(
|
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), {
|
||||||
DESCRIPTOR = _BLOCKED,
|
'DESCRIPTOR' : _BLOCKED,
|
||||||
__module__ = 'result_pb2'
|
'__module__' : 'result_pb2'
|
||||||
# @@protoc_insertion_point(class_scope:pb.Blocked)
|
# @@protoc_insertion_point(class_scope:pb.Blocked)
|
||||||
))
|
})
|
||||||
_sym_db.RegisterMessage(Blocked)
|
_sym_db.RegisterMessage(Blocked)
|
||||||
|
|
||||||
|
|
||||||
|
DESCRIPTOR._options = None
|
||||||
# @@protoc_insertion_point(module_scope)
|
# @@protoc_insertion_point(module_scope)
|
||||||
|
|
|
@ -19,7 +19,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
|
||||||
name='support.proto',
|
name='support.proto',
|
||||||
package='pb',
|
package='pb',
|
||||||
syntax='proto3',
|
syntax='proto3',
|
||||||
serialized_pb=_b('\n\rsupport.proto\x12\x02pb\"\x18\n\x07Support\x12\r\n\x05\x65moji\x18\x01 \x01(\tb\x06proto3')
|
serialized_pb=_b('\n\rsupport.proto\x12\x02pb\")\n\x07Support\x12\r\n\x05\x65moji\x18\x01 \x01(\t\x12\x0f\n\x07\x63omment\x18\x02 \x01(\tb\x06proto3')
|
||||||
)
|
)
|
||||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||||
|
|
||||||
|
@ -40,6 +40,13 @@ _SUPPORT = _descriptor.Descriptor(
|
||||||
message_type=None, enum_type=None, containing_type=None,
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
is_extension=False, extension_scope=None,
|
is_extension=False, extension_scope=None,
|
||||||
options=None),
|
options=None),
|
||||||
|
_descriptor.FieldDescriptor(
|
||||||
|
name='comment', full_name='pb.Support.comment', index=1,
|
||||||
|
number=2, type=9, cpp_type=9, label=1,
|
||||||
|
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||||
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
|
is_extension=False, extension_scope=None,
|
||||||
|
options=None),
|
||||||
],
|
],
|
||||||
extensions=[
|
extensions=[
|
||||||
],
|
],
|
||||||
|
@ -53,7 +60,7 @@ _SUPPORT = _descriptor.Descriptor(
|
||||||
oneofs=[
|
oneofs=[
|
||||||
],
|
],
|
||||||
serialized_start=21,
|
serialized_start=21,
|
||||||
serialized_end=45,
|
serialized_end=62,
|
||||||
)
|
)
|
||||||
|
|
||||||
DESCRIPTOR.message_types_by_name['Support'] = _SUPPORT
|
DESCRIPTOR.message_types_by_name['Support'] = _SUPPORT
|
||||||
|
|
139
lbry/schema/types/v2/wallet.json
Normal file
139
lbry/schema/types/v2/wallet.json
Normal file
|
@ -0,0 +1,139 @@
|
||||||
|
{
|
||||||
|
"title": "Wallet",
|
||||||
|
"description": "An LBC wallet",
|
||||||
|
"type": "object",
|
||||||
|
"required": ["name", "version", "accounts", "preferences"],
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"description": "Human readable name for this wallet",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"version": {
|
||||||
|
"description": "Wallet spec version",
|
||||||
|
"type": "integer",
|
||||||
|
"$comment": "Should this be a string? We may need some sort of decimal type if we want exact decimal versions."
|
||||||
|
},
|
||||||
|
"accounts": {
|
||||||
|
"description": "Accounts associated with this wallet",
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"required": ["address_generator", "certificates", "encrypted", "ledger", "modified_on", "name", "private_key", "public_key", "seed"],
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"address_generator": {
|
||||||
|
"description": "Higher level manager of either singular or deterministically generated addresses",
|
||||||
|
"type": "object",
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"required": ["name", "change", "receiving"],
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"description": "type of address generator: a deterministic chain of addresses",
|
||||||
|
"enum": ["deterministic-chain"],
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"change": {
|
||||||
|
"$ref": "#/$defs/address_manager",
|
||||||
|
"description": "Manager for deterministically generated change address (not used for single address)"
|
||||||
|
},
|
||||||
|
"receiving": {
|
||||||
|
"$ref": "#/$defs/address_manager",
|
||||||
|
"description": "Manager for deterministically generated receiving address (not used for single address)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"required": ["name"],
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"description": "type of address generator: a single address",
|
||||||
|
"enum": ["single-address"],
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"certificates": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Channel keys. Mapping from public key address to pem-formatted private key.",
|
||||||
|
"additionalProperties": {"type": "string"}
|
||||||
|
},
|
||||||
|
"encrypted": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Whether private key and seed are encrypted with a password"
|
||||||
|
},
|
||||||
|
"ledger": {
|
||||||
|
"description": "Which network to use",
|
||||||
|
"type": "string",
|
||||||
|
"examples": [
|
||||||
|
"lbc_mainnet",
|
||||||
|
"lbc_testnet"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"modified_on": {
|
||||||
|
"description": "last modified time in Unix Time",
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"description": "Name for account, possibly human readable",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"private_key": {
|
||||||
|
"description": "Private key for address if `address_generator` is a single address. Root of chain of private keys for addresses if `address_generator` is a deterministic chain of addresses. Encrypted if `encrypted` is true.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"public_key": {
|
||||||
|
"description": "Public key for address if `address_generator` is a single address. Root of chain of public keys for addresses if `address_generator` is a deterministic chain of addresses.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"seed": {
|
||||||
|
"description": "Human readable representation of `private_key`. encrypted if `encrypted` is set to `true`",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"preferences": {
|
||||||
|
"description": "Timestamped application-level preferences. Values can be objects or of a primitive type.",
|
||||||
|
"$comment": "enable-sync is seen in example wallet. encrypt-on-disk is seen in example wallet. they both have a boolean `value` field. Do we want them explicitly defined here? local and shared seem to have at least a similar structure (type, value [yes, again], version), value being the free-form part. Should we define those here? Or can there be any key under preferences, and `value` be literally be anything in any form?",
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"type": "object",
|
||||||
|
"required": ["ts", "value"],
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"ts": {
|
||||||
|
"type": "number",
|
||||||
|
"description": "When the item was set, in Unix time format.",
|
||||||
|
"$comment": "Do we want a string (decimal)?"
|
||||||
|
},
|
||||||
|
"value": {
|
||||||
|
"$comment": "Sometimes this has been an object, sometimes just a boolean. I don't want to prescribe anything."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"$defs": {
|
||||||
|
"address_manager": {
|
||||||
|
"description": "Manager for deterministically generated addresses",
|
||||||
|
"type": "object",
|
||||||
|
"required": ["gap", "maximum_uses_per_address"],
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"gap": {
|
||||||
|
"description": "Maximum allowed consecutive generated addresses with no transactions",
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"maximum_uses_per_address": {
|
||||||
|
"description": "Maximum number of uses for each generated address",
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -22,8 +22,7 @@ def _create_url_regex():
|
||||||
return _group(
|
return _group(
|
||||||
_named(name+"_name", prefix + invalid_names_regex) +
|
_named(name+"_name", prefix + invalid_names_regex) +
|
||||||
_oneof(
|
_oneof(
|
||||||
_group('#' + _named(name+"_claim_id", "[0-9a-f]{1,40}")),
|
_group('[:#]' + _named(name+"_claim_id", "[0-9a-f]{1,40}")),
|
||||||
_group(':' + _named(name+"_sequence", '[1-9][0-9]*')),
|
|
||||||
_group(r'\$' + _named(name+"_amount_order", '[1-9][0-9]*'))
|
_group(r'\$' + _named(name+"_amount_order", '[1-9][0-9]*'))
|
||||||
) + '?'
|
) + '?'
|
||||||
)
|
)
|
||||||
|
@ -50,28 +49,31 @@ def normalize_name(name):
|
||||||
class PathSegment(NamedTuple):
|
class PathSegment(NamedTuple):
|
||||||
name: str
|
name: str
|
||||||
claim_id: str = None
|
claim_id: str = None
|
||||||
sequence: int = None
|
|
||||||
amount_order: int = None
|
amount_order: int = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def normalized(self):
|
def normalized(self):
|
||||||
return normalize_name(self.name)
|
return normalize_name(self.name)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_shortid(self):
|
||||||
|
return self.claim_id is not None and len(self.claim_id) < 40
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_fullid(self):
|
||||||
|
return self.claim_id is not None and len(self.claim_id) == 40
|
||||||
|
|
||||||
def to_dict(self):
|
def to_dict(self):
|
||||||
q = {'name': self.name}
|
q = {'name': self.name}
|
||||||
if self.claim_id is not None:
|
if self.claim_id is not None:
|
||||||
q['claim_id'] = self.claim_id
|
q['claim_id'] = self.claim_id
|
||||||
if self.sequence is not None:
|
|
||||||
q['sequence'] = self.sequence
|
|
||||||
if self.amount_order is not None:
|
if self.amount_order is not None:
|
||||||
q['amount_order'] = self.amount_order
|
q['amount_order'] = self.amount_order
|
||||||
return q
|
return q
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
if self.claim_id is not None:
|
if self.claim_id is not None:
|
||||||
return f"{self.name}#{self.claim_id}"
|
return f"{self.name}:{self.claim_id}"
|
||||||
elif self.sequence is not None:
|
|
||||||
return f"{self.name}:{self.sequence}"
|
|
||||||
elif self.amount_order is not None:
|
elif self.amount_order is not None:
|
||||||
return f"{self.name}${self.amount_order}"
|
return f"{self.name}${self.amount_order}"
|
||||||
return self.name
|
return self.name
|
||||||
|
@ -118,7 +120,6 @@ class URL(NamedTuple):
|
||||||
segments[segment] = PathSegment(
|
segments[segment] = PathSegment(
|
||||||
parts[f'{segment}_name'],
|
parts[f'{segment}_name'],
|
||||||
parts[f'{segment}_claim_id'],
|
parts[f'{segment}_claim_id'],
|
||||||
parts[f'{segment}_sequence'],
|
|
||||||
parts[f'{segment}_amount_order']
|
parts[f'{segment}_amount_order']
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
31
lbry/stream/background_downloader.py
Normal file
31
lbry/stream/background_downloader.py
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from lbry.stream.downloader import StreamDownloader
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BackgroundDownloader:
|
||||||
|
def __init__(self, conf, storage, blob_manager, dht_node=None):
|
||||||
|
self.storage = storage
|
||||||
|
self.blob_manager = blob_manager
|
||||||
|
self.node = dht_node
|
||||||
|
self.conf = conf
|
||||||
|
|
||||||
|
async def download_blobs(self, sd_hash):
|
||||||
|
downloader = StreamDownloader(asyncio.get_running_loop(), self.conf, self.blob_manager, sd_hash)
|
||||||
|
try:
|
||||||
|
await downloader.start(self.node, save_stream=False)
|
||||||
|
for blob_info in downloader.descriptor.blobs[:-1]:
|
||||||
|
await downloader.download_stream_blob(blob_info)
|
||||||
|
except ValueError:
|
||||||
|
return
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
log.debug("Cancelled background downloader")
|
||||||
|
raise
|
||||||
|
except Exception:
|
||||||
|
log.error("Unexpected download error on background downloader")
|
||||||
|
finally:
|
||||||
|
downloader.stop()
|
|
@ -4,6 +4,7 @@ import binascii
|
||||||
import logging
|
import logging
|
||||||
import typing
|
import typing
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import time
|
||||||
import re
|
import re
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from cryptography.hazmat.primitives.ciphers.algorithms import AES
|
from cryptography.hazmat.primitives.ciphers.algorithms import AES
|
||||||
|
@ -152,15 +153,19 @@ class StreamDescriptor:
|
||||||
h.update(self.old_sort_json())
|
h.update(self.old_sort_json())
|
||||||
return h.hexdigest()
|
return h.hexdigest()
|
||||||
|
|
||||||
async def make_sd_blob(self, blob_file_obj: typing.Optional[AbstractBlob] = None,
|
async def make_sd_blob(
|
||||||
old_sort: typing.Optional[bool] = False,
|
self, blob_file_obj: typing.Optional[AbstractBlob] = None, old_sort: typing.Optional[bool] = False,
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None):
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
|
||||||
|
added_on: float = None, is_mine: bool = False
|
||||||
|
):
|
||||||
sd_hash = self.calculate_sd_hash() if not old_sort else self.calculate_old_sort_sd_hash()
|
sd_hash = self.calculate_sd_hash() if not old_sort else self.calculate_old_sort_sd_hash()
|
||||||
if not old_sort:
|
if not old_sort:
|
||||||
sd_data = self.as_json()
|
sd_data = self.as_json()
|
||||||
else:
|
else:
|
||||||
sd_data = self.old_sort_json()
|
sd_data = self.old_sort_json()
|
||||||
sd_blob = blob_file_obj or BlobFile(self.loop, sd_hash, len(sd_data), blob_completed_callback, self.blob_dir)
|
sd_blob = blob_file_obj or BlobFile(
|
||||||
|
self.loop, sd_hash, len(sd_data), blob_completed_callback, self.blob_dir, added_on, is_mine
|
||||||
|
)
|
||||||
if blob_file_obj:
|
if blob_file_obj:
|
||||||
blob_file_obj.set_length(len(sd_data))
|
blob_file_obj.set_length(len(sd_data))
|
||||||
if not sd_blob.get_is_verified():
|
if not sd_blob.get_is_verified():
|
||||||
|
@ -183,18 +188,19 @@ class StreamDescriptor:
|
||||||
raise InvalidStreamDescriptorError("Does not decode as valid JSON")
|
raise InvalidStreamDescriptorError("Does not decode as valid JSON")
|
||||||
if decoded['blobs'][-1]['length'] != 0:
|
if decoded['blobs'][-1]['length'] != 0:
|
||||||
raise InvalidStreamDescriptorError("Does not end with a zero-length blob.")
|
raise InvalidStreamDescriptorError("Does not end with a zero-length blob.")
|
||||||
if any([blob_info['length'] == 0 for blob_info in decoded['blobs'][:-1]]):
|
if any(blob_info['length'] == 0 for blob_info in decoded['blobs'][:-1]):
|
||||||
raise InvalidStreamDescriptorError("Contains zero-length data blob")
|
raise InvalidStreamDescriptorError("Contains zero-length data blob")
|
||||||
if 'blob_hash' in decoded['blobs'][-1]:
|
if 'blob_hash' in decoded['blobs'][-1]:
|
||||||
raise InvalidStreamDescriptorError("Stream terminator blob should not have a hash")
|
raise InvalidStreamDescriptorError("Stream terminator blob should not have a hash")
|
||||||
if any([i != blob_info['blob_num'] for i, blob_info in enumerate(decoded['blobs'])]):
|
if any(i != blob_info['blob_num'] for i, blob_info in enumerate(decoded['blobs'])):
|
||||||
raise InvalidStreamDescriptorError("Stream contains out of order or skipped blobs")
|
raise InvalidStreamDescriptorError("Stream contains out of order or skipped blobs")
|
||||||
|
added_on = time.time()
|
||||||
descriptor = cls(
|
descriptor = cls(
|
||||||
loop, blob_dir,
|
loop, blob_dir,
|
||||||
binascii.unhexlify(decoded['stream_name']).decode(),
|
binascii.unhexlify(decoded['stream_name']).decode(),
|
||||||
decoded['key'],
|
decoded['key'],
|
||||||
binascii.unhexlify(decoded['suggested_file_name']).decode(),
|
binascii.unhexlify(decoded['suggested_file_name']).decode(),
|
||||||
[BlobInfo(info['blob_num'], info['length'], info['iv'], info.get('blob_hash'))
|
[BlobInfo(info['blob_num'], info['length'], info['iv'], added_on, info.get('blob_hash'))
|
||||||
for info in decoded['blobs']],
|
for info in decoded['blobs']],
|
||||||
decoded['stream_hash'],
|
decoded['stream_hash'],
|
||||||
blob.blob_hash
|
blob.blob_hash
|
||||||
|
@ -252,20 +258,25 @@ class StreamDescriptor:
|
||||||
iv_generator = iv_generator or random_iv_generator()
|
iv_generator = iv_generator or random_iv_generator()
|
||||||
key = key or os.urandom(AES.block_size // 8)
|
key = key or os.urandom(AES.block_size // 8)
|
||||||
blob_num = -1
|
blob_num = -1
|
||||||
|
added_on = time.time()
|
||||||
async for blob_bytes in file_reader(file_path):
|
async for blob_bytes in file_reader(file_path):
|
||||||
blob_num += 1
|
blob_num += 1
|
||||||
blob_info = await BlobFile.create_from_unencrypted(
|
blob_info = await BlobFile.create_from_unencrypted(
|
||||||
loop, blob_dir, key, next(iv_generator), blob_bytes, blob_num, blob_completed_callback
|
loop, blob_dir, key, next(iv_generator), blob_bytes, blob_num, added_on, True, blob_completed_callback
|
||||||
)
|
)
|
||||||
blobs.append(blob_info)
|
blobs.append(blob_info)
|
||||||
blobs.append(
|
blobs.append(
|
||||||
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode())) # add the stream terminator
|
# add the stream terminator
|
||||||
|
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode(), added_on, None, True)
|
||||||
|
)
|
||||||
file_name = os.path.basename(file_path)
|
file_name = os.path.basename(file_path)
|
||||||
suggested_file_name = sanitize_file_name(file_name)
|
suggested_file_name = sanitize_file_name(file_name)
|
||||||
descriptor = cls(
|
descriptor = cls(
|
||||||
loop, blob_dir, file_name, binascii.hexlify(key).decode(), suggested_file_name, blobs
|
loop, blob_dir, file_name, binascii.hexlify(key).decode(), suggested_file_name, blobs
|
||||||
)
|
)
|
||||||
sd_blob = await descriptor.make_sd_blob(old_sort=old_sort, blob_completed_callback=blob_completed_callback)
|
sd_blob = await descriptor.make_sd_blob(
|
||||||
|
old_sort=old_sort, blob_completed_callback=blob_completed_callback, added_on=added_on, is_mine=True
|
||||||
|
)
|
||||||
descriptor.sd_hash = sd_blob.blob_hash
|
descriptor.sd_hash = sd_blob.blob_hash
|
||||||
return descriptor
|
return descriptor
|
||||||
|
|
||||||
|
|
|
@ -3,11 +3,13 @@ import typing
|
||||||
import logging
|
import logging
|
||||||
import binascii
|
import binascii
|
||||||
|
|
||||||
from lbry.dht.peer import make_kademlia_peer
|
from lbry.dht.node import get_kademlia_peers_from_hosts
|
||||||
from lbry.error import DownloadSDTimeoutError
|
from lbry.error import DownloadSDTimeoutError
|
||||||
from lbry.utils import resolve_host, lru_cache_concurrent
|
from lbry.utils import lru_cache_concurrent
|
||||||
from lbry.stream.descriptor import StreamDescriptor
|
from lbry.stream.descriptor import StreamDescriptor
|
||||||
from lbry.blob_exchange.downloader import BlobDownloader
|
from lbry.blob_exchange.downloader import BlobDownloader
|
||||||
|
from lbry.torrent.tracker import enqueue_tracker_search
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.conf import Config
|
from lbry.conf import Config
|
||||||
from lbry.dht.node import Node
|
from lbry.dht.node import Node
|
||||||
|
@ -25,8 +27,8 @@ class StreamDownloader:
|
||||||
self.config = config
|
self.config = config
|
||||||
self.blob_manager = blob_manager
|
self.blob_manager = blob_manager
|
||||||
self.sd_hash = sd_hash
|
self.sd_hash = sd_hash
|
||||||
self.search_queue = asyncio.Queue(loop=loop) # blob hashes to feed into the iterative finder
|
self.search_queue = asyncio.Queue() # blob hashes to feed into the iterative finder
|
||||||
self.peer_queue = asyncio.Queue(loop=loop) # new peers to try
|
self.peer_queue = asyncio.Queue() # new peers to try
|
||||||
self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue)
|
self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue)
|
||||||
self.descriptor: typing.Optional[StreamDescriptor] = descriptor
|
self.descriptor: typing.Optional[StreamDescriptor] = descriptor
|
||||||
self.node: typing.Optional['Node'] = None
|
self.node: typing.Optional['Node'] = None
|
||||||
|
@ -40,7 +42,7 @@ class StreamDownloader:
|
||||||
async def cached_read_blob(blob_info: 'BlobInfo') -> bytes:
|
async def cached_read_blob(blob_info: 'BlobInfo') -> bytes:
|
||||||
return await self.read_blob(blob_info, 2)
|
return await self.read_blob(blob_info, 2)
|
||||||
|
|
||||||
if self.blob_manager.decrypted_blob_lru_cache:
|
if self.blob_manager.decrypted_blob_lru_cache is not None:
|
||||||
cached_read_blob = lru_cache_concurrent(override_lru_cache=self.blob_manager.decrypted_blob_lru_cache)(
|
cached_read_blob = lru_cache_concurrent(override_lru_cache=self.blob_manager.decrypted_blob_lru_cache)(
|
||||||
cached_read_blob
|
cached_read_blob
|
||||||
)
|
)
|
||||||
|
@ -48,26 +50,19 @@ class StreamDownloader:
|
||||||
self.cached_read_blob = cached_read_blob
|
self.cached_read_blob = cached_read_blob
|
||||||
|
|
||||||
async def add_fixed_peers(self):
|
async def add_fixed_peers(self):
|
||||||
def _delayed_add_fixed_peers():
|
def _add_fixed_peers(fixed_peers):
|
||||||
|
self.peer_queue.put_nowait(fixed_peers)
|
||||||
self.added_fixed_peers = True
|
self.added_fixed_peers = True
|
||||||
self.peer_queue.put_nowait([
|
|
||||||
make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
|
|
||||||
for address, port in addresses
|
|
||||||
])
|
|
||||||
|
|
||||||
if not self.config.fixed_peers:
|
if not self.config.fixed_peers:
|
||||||
return
|
return
|
||||||
addresses = [
|
|
||||||
(await resolve_host(url, port, proto='tcp'), port)
|
|
||||||
for url, port in self.config.fixed_peers
|
|
||||||
]
|
|
||||||
if 'dht' in self.config.components_to_skip or not self.node or not \
|
if 'dht' in self.config.components_to_skip or not self.node or not \
|
||||||
len(self.node.protocol.routing_table.get_peers()) > 0:
|
len(self.node.protocol.routing_table.get_peers()) > 0:
|
||||||
self.fixed_peers_delay = 0.0
|
self.fixed_peers_delay = 0.0
|
||||||
else:
|
else:
|
||||||
self.fixed_peers_delay = self.config.fixed_peer_delay
|
self.fixed_peers_delay = self.config.fixed_peer_delay
|
||||||
|
fixed_peers = await get_kademlia_peers_from_hosts(self.config.fixed_peers)
|
||||||
self.fixed_peers_handle = self.loop.call_later(self.fixed_peers_delay, _delayed_add_fixed_peers)
|
self.fixed_peers_handle = self.loop.call_later(self.fixed_peers_delay, _add_fixed_peers, fixed_peers)
|
||||||
|
|
||||||
async def load_descriptor(self, connection_id: int = 0):
|
async def load_descriptor(self, connection_id: int = 0):
|
||||||
# download or get the sd blob
|
# download or get the sd blob
|
||||||
|
@ -77,7 +72,7 @@ class StreamDownloader:
|
||||||
now = self.loop.time()
|
now = self.loop.time()
|
||||||
sd_blob = await asyncio.wait_for(
|
sd_blob = await asyncio.wait_for(
|
||||||
self.blob_downloader.download_blob(self.sd_hash, connection_id),
|
self.blob_downloader.download_blob(self.sd_hash, connection_id),
|
||||||
self.config.blob_download_timeout, loop=self.loop
|
self.config.blob_download_timeout
|
||||||
)
|
)
|
||||||
log.info("downloaded sd blob %s", self.sd_hash)
|
log.info("downloaded sd blob %s", self.sd_hash)
|
||||||
self.time_to_descriptor = self.loop.time() - now
|
self.time_to_descriptor = self.loop.time() - now
|
||||||
|
@ -90,7 +85,7 @@ class StreamDownloader:
|
||||||
)
|
)
|
||||||
log.info("loaded stream manifest %s", self.sd_hash)
|
log.info("loaded stream manifest %s", self.sd_hash)
|
||||||
|
|
||||||
async def start(self, node: typing.Optional['Node'] = None, connection_id: int = 0):
|
async def start(self, node: typing.Optional['Node'] = None, connection_id: int = 0, save_stream=True):
|
||||||
# set up peer accumulation
|
# set up peer accumulation
|
||||||
self.node = node or self.node # fixme: this shouldnt be set here!
|
self.node = node or self.node # fixme: this shouldnt be set here!
|
||||||
if self.node:
|
if self.node:
|
||||||
|
@ -98,6 +93,7 @@ class StreamDownloader:
|
||||||
self.accumulate_task.cancel()
|
self.accumulate_task.cancel()
|
||||||
_, self.accumulate_task = self.node.accumulate_peers(self.search_queue, self.peer_queue)
|
_, self.accumulate_task = self.node.accumulate_peers(self.search_queue, self.peer_queue)
|
||||||
await self.add_fixed_peers()
|
await self.add_fixed_peers()
|
||||||
|
enqueue_tracker_search(bytes.fromhex(self.sd_hash), self.peer_queue)
|
||||||
# start searching for peers for the sd hash
|
# start searching for peers for the sd hash
|
||||||
self.search_queue.put_nowait(self.sd_hash)
|
self.search_queue.put_nowait(self.sd_hash)
|
||||||
log.info("searching for peers for stream %s", self.sd_hash)
|
log.info("searching for peers for stream %s", self.sd_hash)
|
||||||
|
@ -105,11 +101,7 @@ class StreamDownloader:
|
||||||
if not self.descriptor:
|
if not self.descriptor:
|
||||||
await self.load_descriptor(connection_id)
|
await self.load_descriptor(connection_id)
|
||||||
|
|
||||||
# add the head blob to the peer search
|
if not await self.blob_manager.storage.stream_exists(self.sd_hash) and save_stream:
|
||||||
self.search_queue.put_nowait(self.descriptor.blobs[0].blob_hash)
|
|
||||||
log.info("added head blob to peer search for stream %s", self.sd_hash)
|
|
||||||
|
|
||||||
if not await self.blob_manager.storage.stream_exists(self.sd_hash):
|
|
||||||
await self.blob_manager.storage.store_stream(
|
await self.blob_manager.storage.store_stream(
|
||||||
self.blob_manager.get_blob(self.sd_hash, length=self.descriptor.length), self.descriptor
|
self.blob_manager.get_blob(self.sd_hash, length=self.descriptor.length), self.descriptor
|
||||||
)
|
)
|
||||||
|
@ -119,7 +111,7 @@ class StreamDownloader:
|
||||||
raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}")
|
raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}")
|
||||||
blob = await asyncio.wait_for(
|
blob = await asyncio.wait_for(
|
||||||
self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id),
|
self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id),
|
||||||
self.config.blob_download_timeout * 10, loop=self.loop
|
self.config.blob_download_timeout * 10
|
||||||
)
|
)
|
||||||
return blob
|
return blob
|
||||||
|
|
||||||
|
|
|
@ -16,10 +16,8 @@ from lbry.file.source import ManagedDownloadSource
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.conf import Config
|
from lbry.conf import Config
|
||||||
from lbry.schema.claim import Claim
|
|
||||||
from lbry.blob.blob_manager import BlobManager
|
from lbry.blob.blob_manager import BlobManager
|
||||||
from lbry.blob.blob_info import BlobInfo
|
from lbry.blob.blob_info import BlobInfo
|
||||||
from lbry.dht.node import Node
|
|
||||||
from lbry.extras.daemon.analytics import AnalyticsManager
|
from lbry.extras.daemon.analytics import AnalyticsManager
|
||||||
from lbry.wallet.transaction import Transaction
|
from lbry.wallet.transaction import Transaction
|
||||||
|
|
||||||
|
@ -62,9 +60,9 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
self.file_output_task: typing.Optional[asyncio.Task] = None
|
self.file_output_task: typing.Optional[asyncio.Task] = None
|
||||||
self.delayed_stop_task: typing.Optional[asyncio.Task] = None
|
self.delayed_stop_task: typing.Optional[asyncio.Task] = None
|
||||||
self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = []
|
self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = []
|
||||||
self.fully_reflected = asyncio.Event(loop=self.loop)
|
self.fully_reflected = asyncio.Event()
|
||||||
self.streaming = asyncio.Event(loop=self.loop)
|
self.streaming = asyncio.Event()
|
||||||
self._running = asyncio.Event(loop=self.loop)
|
self._running = asyncio.Event()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def sd_hash(self) -> str:
|
def sd_hash(self) -> str:
|
||||||
|
@ -84,7 +82,19 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def file_name(self) -> Optional[str]:
|
def file_name(self) -> Optional[str]:
|
||||||
return self._file_name or (self.descriptor.suggested_file_name if self.descriptor else None)
|
return self._file_name or self.suggested_file_name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def suggested_file_name(self) -> Optional[str]:
|
||||||
|
first_option = ((self.descriptor and self.descriptor.suggested_file_name) or '').strip()
|
||||||
|
return sanitize_file_name(first_option or (self.stream_claim_info and self.stream_claim_info.claim and
|
||||||
|
self.stream_claim_info.claim.stream.source.name))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def stream_name(self) -> Optional[str]:
|
||||||
|
first_option = ((self.descriptor and self.descriptor.stream_name) or '').strip()
|
||||||
|
return first_option or (self.stream_claim_info and self.stream_claim_info.claim and
|
||||||
|
self.stream_claim_info.claim.stream.source.name)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def written_bytes(self) -> int:
|
def written_bytes(self) -> int:
|
||||||
|
@ -118,7 +128,7 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def mime_type(self):
|
def mime_type(self):
|
||||||
return guess_media_type(os.path.basename(self.descriptor.suggested_file_name))[0]
|
return guess_media_type(os.path.basename(self.suggested_file_name))[0]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def download_path(self):
|
def download_path(self):
|
||||||
|
@ -151,7 +161,7 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
log.info("start downloader for stream (sd hash: %s)", self.sd_hash)
|
log.info("start downloader for stream (sd hash: %s)", self.sd_hash)
|
||||||
self._running.set()
|
self._running.set()
|
||||||
try:
|
try:
|
||||||
await asyncio.wait_for(self.downloader.start(), timeout, loop=self.loop)
|
await asyncio.wait_for(self.downloader.start(), timeout)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
self._running.clear()
|
self._running.clear()
|
||||||
raise DownloadSDTimeoutError(self.sd_hash)
|
raise DownloadSDTimeoutError(self.sd_hash)
|
||||||
|
@ -164,7 +174,7 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
if not self._file_name:
|
if not self._file_name:
|
||||||
self._file_name = await get_next_available_file_name(
|
self._file_name = await get_next_available_file_name(
|
||||||
self.loop, self.download_directory,
|
self.loop, self.download_directory,
|
||||||
self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
|
self._file_name or sanitize_file_name(self.suggested_file_name)
|
||||||
)
|
)
|
||||||
file_name, download_dir = self._file_name, self.download_directory
|
file_name, download_dir = self._file_name, self.download_directory
|
||||||
else:
|
else:
|
||||||
|
@ -181,7 +191,7 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
Stop any running save/stream tasks as well as the downloader and update the status in the database
|
Stop any running save/stream tasks as well as the downloader and update the status in the database
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.stop_tasks()
|
await self.stop_tasks()
|
||||||
if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING:
|
if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING:
|
||||||
await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED)
|
await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED)
|
||||||
|
|
||||||
|
@ -254,7 +264,7 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
self.finished_writing.clear()
|
self.finished_writing.clear()
|
||||||
self.started_writing.clear()
|
self.started_writing.clear()
|
||||||
try:
|
try:
|
||||||
open(output_path, 'wb').close()
|
open(output_path, 'wb').close() # pylint: disable=consider-using-with
|
||||||
async for blob_info, decrypted in self._aiter_read_stream(connection_id=self.SAVING_ID):
|
async for blob_info, decrypted in self._aiter_read_stream(connection_id=self.SAVING_ID):
|
||||||
log.info("write blob %i/%i", blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
|
log.info("write blob %i/%i", blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
|
||||||
await self.loop.run_in_executor(None, self._write_decrypted_blob, output_path, decrypted)
|
await self.loop.run_in_executor(None, self._write_decrypted_blob, output_path, decrypted)
|
||||||
|
@ -269,7 +279,7 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id,
|
log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id,
|
||||||
self.sd_hash[:6], self.full_path)
|
self.sd_hash[:6], self.full_path)
|
||||||
await self.blob_manager.storage.set_saved_file(self.stream_hash)
|
await self.blob_manager.storage.set_saved_file(self.stream_hash)
|
||||||
except Exception as err:
|
except (Exception, asyncio.CancelledError) as err:
|
||||||
if os.path.isfile(output_path):
|
if os.path.isfile(output_path):
|
||||||
log.warning("removing incomplete download %s for %s", output_path, self.sd_hash)
|
log.warning("removing incomplete download %s for %s", output_path, self.sd_hash)
|
||||||
os.remove(output_path)
|
os.remove(output_path)
|
||||||
|
@ -296,14 +306,14 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
self.download_directory = download_directory or self.download_directory or self.config.download_dir
|
self.download_directory = download_directory or self.download_directory or self.config.download_dir
|
||||||
if not self.download_directory:
|
if not self.download_directory:
|
||||||
raise ValueError("no directory to download to")
|
raise ValueError("no directory to download to")
|
||||||
if not (file_name or self._file_name or self.descriptor.suggested_file_name):
|
if not (file_name or self._file_name or self.suggested_file_name):
|
||||||
raise ValueError("no file name to download to")
|
raise ValueError("no file name to download to")
|
||||||
if not os.path.isdir(self.download_directory):
|
if not os.path.isdir(self.download_directory):
|
||||||
log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory)
|
log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory)
|
||||||
os.mkdir(self.download_directory)
|
os.mkdir(self.download_directory)
|
||||||
self._file_name = await get_next_available_file_name(
|
self._file_name = await get_next_available_file_name(
|
||||||
self.loop, self.download_directory,
|
self.loop, self.download_directory,
|
||||||
file_name or self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
|
file_name or self._file_name or sanitize_file_name(self.suggested_file_name)
|
||||||
)
|
)
|
||||||
await self.blob_manager.storage.change_file_download_dir_and_file_name(
|
await self.blob_manager.storage.change_file_download_dir_and_file_name(
|
||||||
self.stream_hash, self.download_directory, self.file_name
|
self.stream_hash, self.download_directory, self.file_name
|
||||||
|
@ -311,15 +321,16 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
await self.update_status(ManagedStream.STATUS_RUNNING)
|
await self.update_status(ManagedStream.STATUS_RUNNING)
|
||||||
self.file_output_task = self.loop.create_task(self._save_file(self.full_path))
|
self.file_output_task = self.loop.create_task(self._save_file(self.full_path))
|
||||||
try:
|
try:
|
||||||
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout, loop=self.loop)
|
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id)
|
log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id)
|
||||||
self.stop_tasks()
|
await self.stop_tasks()
|
||||||
await self.update_status(ManagedStream.STATUS_STOPPED)
|
await self.update_status(ManagedStream.STATUS_STOPPED)
|
||||||
|
|
||||||
def stop_tasks(self):
|
async def stop_tasks(self):
|
||||||
if self.file_output_task and not self.file_output_task.done():
|
if self.file_output_task and not self.file_output_task.done():
|
||||||
self.file_output_task.cancel()
|
self.file_output_task.cancel()
|
||||||
|
await asyncio.gather(self.file_output_task, return_exceptions=True)
|
||||||
self.file_output_task = None
|
self.file_output_task = None
|
||||||
while self.streaming_responses:
|
while self.streaming_responses:
|
||||||
req, response = self.streaming_responses.pop()
|
req, response = self.streaming_responses.pop()
|
||||||
|
@ -354,18 +365,24 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
self.reflector_progress = int((i + 1) / len(we_have) * 100)
|
self.reflector_progress = int((i + 1) / len(we_have) * 100)
|
||||||
except (asyncio.TimeoutError, ValueError):
|
except (asyncio.TimeoutError, ValueError):
|
||||||
return sent
|
return sent
|
||||||
except ConnectionRefusedError:
|
except ConnectionError:
|
||||||
return sent
|
return sent
|
||||||
except OSError:
|
except (OSError, Exception, asyncio.CancelledError) as err:
|
||||||
# raised if a blob is deleted while it's being sent
|
if isinstance(err, asyncio.CancelledError):
|
||||||
|
log.warning("stopped uploading %s#%s to reflector", self.claim_name, self.claim_id)
|
||||||
|
elif isinstance(err, OSError):
|
||||||
|
log.warning(
|
||||||
|
"stopped uploading %s#%s to reflector because blobs were deleted or moved", self.claim_name,
|
||||||
|
self.claim_id
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
log.exception("unexpected error reflecting %s#%s", self.claim_name, self.claim_id)
|
||||||
return sent
|
return sent
|
||||||
finally:
|
finally:
|
||||||
if protocol.transport:
|
if protocol.transport:
|
||||||
protocol.transport.close()
|
protocol.transport.close()
|
||||||
self.uploading_to_reflector = False
|
self.uploading_to_reflector = False
|
||||||
if not self.fully_reflected.is_set():
|
|
||||||
self.fully_reflected.set()
|
|
||||||
await self.blob_manager.storage.update_reflected_stream(self.sd_hash, f"{host}:{port}")
|
|
||||||
return sent
|
return sent
|
||||||
|
|
||||||
async def update_content_claim(self, claim_info: Optional[typing.Dict] = None):
|
async def update_content_claim(self, claim_info: Optional[typing.Dict] = None):
|
||||||
|
@ -385,7 +402,7 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
self.sd_hash[:6])
|
self.sd_hash[:6])
|
||||||
await self.stop()
|
await self.stop()
|
||||||
return
|
return
|
||||||
await asyncio.sleep(1, loop=self.loop)
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]:
|
def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]:
|
||||||
if '=' in get_range:
|
if '=' in get_range:
|
||||||
|
|
|
@ -59,17 +59,17 @@ class StreamReflectorClient(asyncio.Protocol):
|
||||||
return
|
return
|
||||||
|
|
||||||
async def send_request(self, request_dict: typing.Dict, timeout: int = 180):
|
async def send_request(self, request_dict: typing.Dict, timeout: int = 180):
|
||||||
msg = json.dumps(request_dict)
|
msg = json.dumps(request_dict, sort_keys=True)
|
||||||
try:
|
try:
|
||||||
self.transport.write(msg.encode())
|
self.transport.write(msg.encode())
|
||||||
self.pending_request = self.loop.create_task(asyncio.wait_for(self.response_queue.get(), timeout))
|
self.pending_request = self.loop.create_task(asyncio.wait_for(self.response_queue.get(), timeout))
|
||||||
return await self.pending_request
|
return await self.pending_request
|
||||||
except (AttributeError, asyncio.CancelledError):
|
except (AttributeError, asyncio.CancelledError) as err:
|
||||||
# attribute error happens when we transport.write after disconnect
|
# attribute error happens when we transport.write after disconnect
|
||||||
# cancelled error happens when the pending_request task is cancelled by a disconnect
|
# cancelled error happens when the pending_request task is cancelled by a disconnect
|
||||||
if self.transport:
|
if self.transport:
|
||||||
self.transport.close()
|
self.transport.close()
|
||||||
raise asyncio.TimeoutError()
|
raise err if isinstance(err, asyncio.CancelledError) else asyncio.CancelledError()
|
||||||
finally:
|
finally:
|
||||||
self.pending_request = None
|
self.pending_request = None
|
||||||
|
|
||||||
|
@ -94,8 +94,16 @@ class StreamReflectorClient(asyncio.Protocol):
|
||||||
needed = response.get('needed_blobs', [])
|
needed = response.get('needed_blobs', [])
|
||||||
sent_sd = False
|
sent_sd = False
|
||||||
if response['send_sd_blob']:
|
if response['send_sd_blob']:
|
||||||
await sd_blob.sendfile(self)
|
try:
|
||||||
received = await asyncio.wait_for(self.response_queue.get(), 30)
|
sent = await sd_blob.sendfile(self)
|
||||||
|
if sent == -1:
|
||||||
|
log.warning("failed to send sd blob")
|
||||||
|
raise asyncio.CancelledError()
|
||||||
|
received = await asyncio.wait_for(self.response_queue.get(), 30)
|
||||||
|
except asyncio.CancelledError as err:
|
||||||
|
if self.transport:
|
||||||
|
self.transport.close()
|
||||||
|
raise err
|
||||||
if received.get('received_sd_blob'):
|
if received.get('received_sd_blob'):
|
||||||
sent_sd = True
|
sent_sd = True
|
||||||
if not needed:
|
if not needed:
|
||||||
|
@ -118,8 +126,16 @@ class StreamReflectorClient(asyncio.Protocol):
|
||||||
if 'send_blob' not in response:
|
if 'send_blob' not in response:
|
||||||
raise ValueError("I don't know whether to send the blob or not!")
|
raise ValueError("I don't know whether to send the blob or not!")
|
||||||
if response['send_blob']:
|
if response['send_blob']:
|
||||||
await blob.sendfile(self)
|
try:
|
||||||
received = await asyncio.wait_for(self.response_queue.get(), 30)
|
sent = await blob.sendfile(self)
|
||||||
|
if sent == -1:
|
||||||
|
log.warning("failed to send blob")
|
||||||
|
raise asyncio.CancelledError()
|
||||||
|
received = await asyncio.wait_for(self.response_queue.get(), 30)
|
||||||
|
except asyncio.CancelledError as err:
|
||||||
|
if self.transport:
|
||||||
|
self.transport.close()
|
||||||
|
raise err
|
||||||
if received.get('received_blob'):
|
if received.get('received_blob'):
|
||||||
self.reflected_blobs.append(blob.blob_hash)
|
self.reflected_blobs.append(blob.blob_hash)
|
||||||
log.info("Sent reflector blob %s", blob.blob_hash[:8])
|
log.info("Sent reflector blob %s", blob.blob_hash[:8])
|
||||||
|
|
|
@ -17,11 +17,11 @@ log = logging.getLogger(__name__)
|
||||||
class ReflectorServerProtocol(asyncio.Protocol):
|
class ReflectorServerProtocol(asyncio.Protocol):
|
||||||
def __init__(self, blob_manager: 'BlobManager', response_chunk_size: int = 10000,
|
def __init__(self, blob_manager: 'BlobManager', response_chunk_size: int = 10000,
|
||||||
stop_event: asyncio.Event = None, incoming_event: asyncio.Event = None,
|
stop_event: asyncio.Event = None, incoming_event: asyncio.Event = None,
|
||||||
not_incoming_event: asyncio.Event = None):
|
not_incoming_event: asyncio.Event = None, partial_event: asyncio.Event = None):
|
||||||
self.loop = asyncio.get_event_loop()
|
self.loop = asyncio.get_event_loop()
|
||||||
self.blob_manager = blob_manager
|
self.blob_manager = blob_manager
|
||||||
self.server_task: asyncio.Task = None
|
self.server_task: asyncio.Task = None
|
||||||
self.started_listening = asyncio.Event(loop=self.loop)
|
self.started_listening = asyncio.Event()
|
||||||
self.buf = b''
|
self.buf = b''
|
||||||
self.transport: asyncio.StreamWriter = None
|
self.transport: asyncio.StreamWriter = None
|
||||||
self.writer: typing.Optional['HashBlobWriter'] = None
|
self.writer: typing.Optional['HashBlobWriter'] = None
|
||||||
|
@ -29,11 +29,12 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
||||||
self.descriptor: typing.Optional['StreamDescriptor'] = None
|
self.descriptor: typing.Optional['StreamDescriptor'] = None
|
||||||
self.sd_blob: typing.Optional['BlobFile'] = None
|
self.sd_blob: typing.Optional['BlobFile'] = None
|
||||||
self.received = []
|
self.received = []
|
||||||
self.incoming = incoming_event or asyncio.Event(loop=self.loop)
|
self.incoming = incoming_event or asyncio.Event()
|
||||||
self.not_incoming = not_incoming_event or asyncio.Event(loop=self.loop)
|
self.not_incoming = not_incoming_event or asyncio.Event()
|
||||||
self.stop_event = stop_event or asyncio.Event(loop=self.loop)
|
self.stop_event = stop_event or asyncio.Event()
|
||||||
self.chunk_size = response_chunk_size
|
self.chunk_size = response_chunk_size
|
||||||
self.wait_for_stop_task: typing.Optional[asyncio.Task] = None
|
self.wait_for_stop_task: typing.Optional[asyncio.Task] = None
|
||||||
|
self.partial_event = partial_event
|
||||||
|
|
||||||
async def wait_for_stop(self):
|
async def wait_for_stop(self):
|
||||||
await self.stop_event.wait()
|
await self.stop_event.wait()
|
||||||
|
@ -93,7 +94,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
||||||
self.incoming.set()
|
self.incoming.set()
|
||||||
self.send_response({"send_sd_blob": True})
|
self.send_response({"send_sd_blob": True})
|
||||||
try:
|
try:
|
||||||
await asyncio.wait_for(self.sd_blob.verified.wait(), 30, loop=self.loop)
|
await asyncio.wait_for(self.sd_blob.verified.wait(), 30)
|
||||||
self.descriptor = await StreamDescriptor.from_stream_descriptor_blob(
|
self.descriptor = await StreamDescriptor.from_stream_descriptor_blob(
|
||||||
self.loop, self.blob_manager.blob_dir, self.sd_blob
|
self.loop, self.blob_manager.blob_dir, self.sd_blob
|
||||||
)
|
)
|
||||||
|
@ -115,10 +116,14 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
||||||
if self.writer:
|
if self.writer:
|
||||||
self.writer.close_handle()
|
self.writer.close_handle()
|
||||||
self.writer = None
|
self.writer = None
|
||||||
self.send_response({"send_sd_blob": False, 'needed': [
|
|
||||||
blob.blob_hash for blob in self.descriptor.blobs[:-1]
|
needs = [blob.blob_hash
|
||||||
if not self.blob_manager.get_blob(blob.blob_hash).get_is_verified()
|
for blob in self.descriptor.blobs[:-1]
|
||||||
]})
|
if not self.blob_manager.get_blob(blob.blob_hash).get_is_verified()]
|
||||||
|
if needs and not self.partial_event.is_set():
|
||||||
|
needs = needs[:3]
|
||||||
|
self.partial_event.set()
|
||||||
|
self.send_response({"send_sd_blob": False, 'needed_blobs': needs})
|
||||||
return
|
return
|
||||||
return
|
return
|
||||||
elif self.descriptor:
|
elif self.descriptor:
|
||||||
|
@ -135,7 +140,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
||||||
self.incoming.set()
|
self.incoming.set()
|
||||||
self.send_response({"send_blob": True})
|
self.send_response({"send_blob": True})
|
||||||
try:
|
try:
|
||||||
await asyncio.wait_for(blob.verified.wait(), 30, loop=self.loop)
|
await asyncio.wait_for(blob.verified.wait(), 30)
|
||||||
self.send_response({"received_blob": True})
|
self.send_response({"received_blob": True})
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
self.send_response({"received_blob": False})
|
self.send_response({"received_blob": False})
|
||||||
|
@ -153,29 +158,29 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
||||||
class ReflectorServer:
|
class ReflectorServer:
|
||||||
def __init__(self, blob_manager: 'BlobManager', response_chunk_size: int = 10000,
|
def __init__(self, blob_manager: 'BlobManager', response_chunk_size: int = 10000,
|
||||||
stop_event: asyncio.Event = None, incoming_event: asyncio.Event = None,
|
stop_event: asyncio.Event = None, incoming_event: asyncio.Event = None,
|
||||||
not_incoming_event: asyncio.Event = None):
|
not_incoming_event: asyncio.Event = None, partial_needs=False):
|
||||||
self.loop = asyncio.get_event_loop()
|
self.loop = asyncio.get_event_loop()
|
||||||
self.blob_manager = blob_manager
|
self.blob_manager = blob_manager
|
||||||
self.server_task: typing.Optional[asyncio.Task] = None
|
self.server_task: typing.Optional[asyncio.Task] = None
|
||||||
self.started_listening = asyncio.Event(loop=self.loop)
|
self.started_listening = asyncio.Event()
|
||||||
self.stopped_listening = asyncio.Event(loop=self.loop)
|
self.stopped_listening = asyncio.Event()
|
||||||
self.incoming_event = incoming_event or asyncio.Event(loop=self.loop)
|
self.incoming_event = incoming_event or asyncio.Event()
|
||||||
self.not_incoming_event = not_incoming_event or asyncio.Event(loop=self.loop)
|
self.not_incoming_event = not_incoming_event or asyncio.Event()
|
||||||
self.response_chunk_size = response_chunk_size
|
self.response_chunk_size = response_chunk_size
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
|
self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants
|
||||||
|
|
||||||
def start_server(self, port: int, interface: typing.Optional[str] = '0.0.0.0'):
|
def start_server(self, port: int, interface: typing.Optional[str] = '0.0.0.0'):
|
||||||
if self.server_task is not None:
|
if self.server_task is not None:
|
||||||
raise Exception("already running")
|
raise Exception("already running")
|
||||||
|
|
||||||
async def _start_server():
|
async def _start_server():
|
||||||
server = await self.loop.create_server(
|
partial_event = asyncio.Event()
|
||||||
lambda: ReflectorServerProtocol(
|
if not self.partial_needs:
|
||||||
self.blob_manager, self.response_chunk_size, self.stop_event, self.incoming_event,
|
partial_event.set()
|
||||||
self.not_incoming_event
|
server = await self.loop.create_server(lambda: ReflectorServerProtocol(
|
||||||
),
|
self.blob_manager, self.response_chunk_size, self.stop_event, self.incoming_event,
|
||||||
interface, port
|
self.not_incoming_event, partial_event), interface, port)
|
||||||
)
|
|
||||||
self.started_listening.set()
|
self.started_listening.set()
|
||||||
self.stopped_listening.clear()
|
self.stopped_listening.clear()
|
||||||
log.info("Reflector server listening on TCP %s:%i", interface, port)
|
log.info("Reflector server listening on TCP %s:%i", interface, port)
|
||||||
|
|
|
@ -54,7 +54,7 @@ class StreamManager(SourceManager):
|
||||||
self.re_reflect_task: Optional[asyncio.Task] = None
|
self.re_reflect_task: Optional[asyncio.Task] = None
|
||||||
self.update_stream_finished_futs: typing.List[asyncio.Future] = []
|
self.update_stream_finished_futs: typing.List[asyncio.Future] = []
|
||||||
self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {}
|
self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {}
|
||||||
self.started = asyncio.Event(loop=self.loop)
|
self.started = asyncio.Event()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def streams(self):
|
def streams(self):
|
||||||
|
@ -70,6 +70,7 @@ class StreamManager(SourceManager):
|
||||||
|
|
||||||
async def recover_streams(self, file_infos: typing.List[typing.Dict]):
|
async def recover_streams(self, file_infos: typing.List[typing.Dict]):
|
||||||
to_restore = []
|
to_restore = []
|
||||||
|
to_check = []
|
||||||
|
|
||||||
async def recover_stream(sd_hash: str, stream_hash: str, stream_name: str,
|
async def recover_stream(sd_hash: str, stream_hash: str, stream_name: str,
|
||||||
suggested_file_name: str, key: str,
|
suggested_file_name: str, key: str,
|
||||||
|
@ -82,6 +83,7 @@ class StreamManager(SourceManager):
|
||||||
if not descriptor:
|
if not descriptor:
|
||||||
return
|
return
|
||||||
to_restore.append((descriptor, sd_blob, content_fee))
|
to_restore.append((descriptor, sd_blob, content_fee))
|
||||||
|
to_check.extend([sd_blob.blob_hash] + [blob.blob_hash for blob in descriptor.blobs[:-1]])
|
||||||
|
|
||||||
await asyncio.gather(*[
|
await asyncio.gather(*[
|
||||||
recover_stream(
|
recover_stream(
|
||||||
|
@ -93,6 +95,8 @@ class StreamManager(SourceManager):
|
||||||
|
|
||||||
if to_restore:
|
if to_restore:
|
||||||
await self.storage.recover_streams(to_restore, self.config.download_dir)
|
await self.storage.recover_streams(to_restore, self.config.download_dir)
|
||||||
|
if to_check:
|
||||||
|
await self.blob_manager.ensure_completed_blobs_status(to_check)
|
||||||
|
|
||||||
# if self.blob_manager._save_blobs:
|
# if self.blob_manager._save_blobs:
|
||||||
# log.info("Recovered %i/%i attempted streams", len(to_restore), len(file_infos))
|
# log.info("Recovered %i/%i attempted streams", len(to_restore), len(file_infos))
|
||||||
|
@ -146,7 +150,7 @@ class StreamManager(SourceManager):
|
||||||
file_info['added_on'], file_info['fully_reflected']
|
file_info['added_on'], file_info['fully_reflected']
|
||||||
)))
|
)))
|
||||||
if add_stream_tasks:
|
if add_stream_tasks:
|
||||||
await asyncio.gather(*add_stream_tasks, loop=self.loop)
|
await asyncio.gather(*add_stream_tasks)
|
||||||
log.info("Started stream manager with %i files", len(self._sources))
|
log.info("Started stream manager with %i files", len(self._sources))
|
||||||
if not self.node:
|
if not self.node:
|
||||||
log.info("no DHT node given, resuming downloads trusting that we can contact reflector")
|
log.info("no DHT node given, resuming downloads trusting that we can contact reflector")
|
||||||
|
@ -155,18 +159,15 @@ class StreamManager(SourceManager):
|
||||||
self.resume_saving_task = asyncio.ensure_future(asyncio.gather(
|
self.resume_saving_task = asyncio.ensure_future(asyncio.gather(
|
||||||
*(self._sources[sd_hash].save_file(file_name, download_directory)
|
*(self._sources[sd_hash].save_file(file_name, download_directory)
|
||||||
for (file_name, download_directory, sd_hash) in to_resume_saving),
|
for (file_name, download_directory, sd_hash) in to_resume_saving),
|
||||||
loop=self.loop
|
|
||||||
))
|
))
|
||||||
|
|
||||||
async def reflect_streams(self):
|
async def reflect_streams(self):
|
||||||
try:
|
try:
|
||||||
return await self._reflact_streams()
|
return await self._reflect_streams()
|
||||||
except asyncio.CancelledError:
|
|
||||||
raise
|
|
||||||
except Exception:
|
except Exception:
|
||||||
log.exception("reflector task encountered an unexpected error!")
|
log.exception("reflector task encountered an unexpected error!")
|
||||||
|
|
||||||
async def _reflact_streams(self):
|
async def _reflect_streams(self):
|
||||||
# todo: those debug statements are temporary for #2987 - remove them if its closed
|
# todo: those debug statements are temporary for #2987 - remove them if its closed
|
||||||
while True:
|
while True:
|
||||||
if self.config.reflect_streams and self.config.reflector_servers:
|
if self.config.reflect_streams and self.config.reflector_servers:
|
||||||
|
@ -182,21 +183,21 @@ class StreamManager(SourceManager):
|
||||||
batch.append(self.reflect_stream(stream))
|
batch.append(self.reflect_stream(stream))
|
||||||
if len(batch) >= self.config.concurrent_reflector_uploads:
|
if len(batch) >= self.config.concurrent_reflector_uploads:
|
||||||
log.debug("waiting for batch of %s reflecting streams", len(batch))
|
log.debug("waiting for batch of %s reflecting streams", len(batch))
|
||||||
await asyncio.gather(*batch, loop=self.loop)
|
await asyncio.gather(*batch)
|
||||||
log.debug("done processing %s streams", len(batch))
|
log.debug("done processing %s streams", len(batch))
|
||||||
batch = []
|
batch = []
|
||||||
if batch:
|
if batch:
|
||||||
log.debug("waiting for batch of %s reflecting streams", len(batch))
|
log.debug("waiting for batch of %s reflecting streams", len(batch))
|
||||||
await asyncio.gather(*batch, loop=self.loop)
|
await asyncio.gather(*batch)
|
||||||
log.debug("done processing %s streams", len(batch))
|
log.debug("done processing %s streams", len(batch))
|
||||||
await asyncio.sleep(300, loop=self.loop)
|
await asyncio.sleep(300)
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
await super().start()
|
await super().start()
|
||||||
self.re_reflect_task = self.loop.create_task(self.reflect_streams())
|
self.re_reflect_task = self.loop.create_task(self.reflect_streams())
|
||||||
|
|
||||||
def stop(self):
|
async def stop(self):
|
||||||
super().stop()
|
await super().stop()
|
||||||
if self.resume_saving_task and not self.resume_saving_task.done():
|
if self.resume_saving_task and not self.resume_saving_task.done():
|
||||||
self.resume_saving_task.cancel()
|
self.resume_saving_task.cancel()
|
||||||
if self.re_reflect_task and not self.re_reflect_task.done():
|
if self.re_reflect_task and not self.re_reflect_task.done():
|
||||||
|
@ -215,7 +216,7 @@ class StreamManager(SourceManager):
|
||||||
server, port = random.choice(self.config.reflector_servers)
|
server, port = random.choice(self.config.reflector_servers)
|
||||||
if stream.sd_hash in self.running_reflector_uploads:
|
if stream.sd_hash in self.running_reflector_uploads:
|
||||||
return self.running_reflector_uploads[stream.sd_hash]
|
return self.running_reflector_uploads[stream.sd_hash]
|
||||||
task = self.loop.create_task(stream.upload_to_reflector(server, port))
|
task = self.loop.create_task(self._retriable_reflect_stream(stream, server, port))
|
||||||
self.running_reflector_uploads[stream.sd_hash] = task
|
self.running_reflector_uploads[stream.sd_hash] = task
|
||||||
task.add_done_callback(
|
task.add_done_callback(
|
||||||
lambda _: None if stream.sd_hash not in self.running_reflector_uploads else
|
lambda _: None if stream.sd_hash not in self.running_reflector_uploads else
|
||||||
|
@ -223,6 +224,14 @@ class StreamManager(SourceManager):
|
||||||
)
|
)
|
||||||
return task
|
return task
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _retriable_reflect_stream(stream, host, port):
|
||||||
|
sent = await stream.upload_to_reflector(host, port)
|
||||||
|
while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0:
|
||||||
|
stream.reflector_progress = 0
|
||||||
|
sent = await stream.upload_to_reflector(host, port)
|
||||||
|
return sent
|
||||||
|
|
||||||
async def create(self, file_path: str, key: Optional[bytes] = None,
|
async def create(self, file_path: str, key: Optional[bytes] = None,
|
||||||
iv_generator: Optional[typing.Generator[bytes, None, None]] = None) -> ManagedStream:
|
iv_generator: Optional[typing.Generator[bytes, None, None]] = None) -> ManagedStream:
|
||||||
descriptor = await StreamDescriptor.create_stream(
|
descriptor = await StreamDescriptor.create_stream(
|
||||||
|
@ -230,7 +239,7 @@ class StreamManager(SourceManager):
|
||||||
blob_completed_callback=self.blob_manager.blob_completed
|
blob_completed_callback=self.blob_manager.blob_completed
|
||||||
)
|
)
|
||||||
await self.storage.store_stream(
|
await self.storage.store_stream(
|
||||||
self.blob_manager.get_blob(descriptor.sd_hash), descriptor
|
self.blob_manager.get_blob(descriptor.sd_hash, is_mine=True), descriptor
|
||||||
)
|
)
|
||||||
row_id = await self.storage.save_published_file(
|
row_id = await self.storage.save_published_file(
|
||||||
descriptor.stream_hash, os.path.basename(file_path), os.path.dirname(file_path), 0
|
descriptor.stream_hash, os.path.basename(file_path), os.path.dirname(file_path), 0
|
||||||
|
@ -251,7 +260,7 @@ class StreamManager(SourceManager):
|
||||||
return
|
return
|
||||||
if source.identifier in self.running_reflector_uploads:
|
if source.identifier in self.running_reflector_uploads:
|
||||||
self.running_reflector_uploads[source.identifier].cancel()
|
self.running_reflector_uploads[source.identifier].cancel()
|
||||||
source.stop_tasks()
|
await source.stop_tasks()
|
||||||
if source.identifier in self.streams:
|
if source.identifier in self.streams:
|
||||||
del self.streams[source.identifier]
|
del self.streams[source.identifier]
|
||||||
blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]]
|
blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]]
|
||||||
|
|
199
lbry/testcase.py
199
lbry/testcase.py
|
@ -17,18 +17,21 @@ from functools import partial
|
||||||
from lbry.wallet import WalletManager, Wallet, Ledger, Account, Transaction
|
from lbry.wallet import WalletManager, Wallet, Ledger, Account, Transaction
|
||||||
from lbry.conf import Config
|
from lbry.conf import Config
|
||||||
from lbry.wallet.util import satoshis_to_coins
|
from lbry.wallet.util import satoshis_to_coins
|
||||||
|
from lbry.wallet.dewies import lbc_to_dewies
|
||||||
from lbry.wallet.orchstr8 import Conductor
|
from lbry.wallet.orchstr8 import Conductor
|
||||||
from lbry.wallet.orchstr8.node import BlockchainNode, WalletNode
|
from lbry.wallet.orchstr8.node import LBCWalletNode, WalletNode
|
||||||
|
from lbry.schema.claim import Claim
|
||||||
|
|
||||||
from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty
|
from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty
|
||||||
from lbry.extras.daemon.components import Component, WalletComponent
|
from lbry.extras.daemon.components import Component, WalletComponent
|
||||||
from lbry.extras.daemon.components import (
|
from lbry.extras.daemon.components import (
|
||||||
DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
|
DHT_COMPONENT,
|
||||||
UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
|
HASH_ANNOUNCER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
|
||||||
|
UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, LIBTORRENT_COMPONENT
|
||||||
)
|
)
|
||||||
from lbry.extras.daemon.componentmanager import ComponentManager
|
from lbry.extras.daemon.componentmanager import ComponentManager
|
||||||
from lbry.extras.daemon.exchange_rate_manager import (
|
from lbry.extras.daemon.exchange_rate_manager import (
|
||||||
ExchangeRateManager, ExchangeRate, LBRYFeed, LBRYBTCFeed
|
ExchangeRateManager, ExchangeRate, BittrexBTCFeed, BittrexUSDFeed
|
||||||
)
|
)
|
||||||
from lbry.extras.daemon.storage import SQLiteStorage
|
from lbry.extras.daemon.storage import SQLiteStorage
|
||||||
from lbry.blob.blob_manager import BlobManager
|
from lbry.blob.blob_manager import BlobManager
|
||||||
|
@ -83,6 +86,7 @@ class AsyncioTestCase(unittest.TestCase):
|
||||||
# https://bugs.python.org/issue32972
|
# https://bugs.python.org/issue32972
|
||||||
|
|
||||||
LOOP_SLOW_CALLBACK_DURATION = 0.2
|
LOOP_SLOW_CALLBACK_DURATION = 0.2
|
||||||
|
TIMEOUT = 120.0
|
||||||
|
|
||||||
maxDiff = None
|
maxDiff = None
|
||||||
|
|
||||||
|
@ -130,15 +134,18 @@ class AsyncioTestCase(unittest.TestCase):
|
||||||
|
|
||||||
with outcome.testPartExecutor(self):
|
with outcome.testPartExecutor(self):
|
||||||
self.setUp()
|
self.setUp()
|
||||||
|
self.add_timeout()
|
||||||
self.loop.run_until_complete(self.asyncSetUp())
|
self.loop.run_until_complete(self.asyncSetUp())
|
||||||
if outcome.success:
|
if outcome.success:
|
||||||
outcome.expecting_failure = expecting_failure
|
outcome.expecting_failure = expecting_failure
|
||||||
with outcome.testPartExecutor(self, isTest=True):
|
with outcome.testPartExecutor(self, isTest=True):
|
||||||
maybe_coroutine = testMethod()
|
maybe_coroutine = testMethod()
|
||||||
if asyncio.iscoroutine(maybe_coroutine):
|
if asyncio.iscoroutine(maybe_coroutine):
|
||||||
|
self.add_timeout()
|
||||||
self.loop.run_until_complete(maybe_coroutine)
|
self.loop.run_until_complete(maybe_coroutine)
|
||||||
outcome.expecting_failure = False
|
outcome.expecting_failure = False
|
||||||
with outcome.testPartExecutor(self):
|
with outcome.testPartExecutor(self):
|
||||||
|
self.add_timeout()
|
||||||
self.loop.run_until_complete(self.asyncTearDown())
|
self.loop.run_until_complete(self.asyncTearDown())
|
||||||
self.tearDown()
|
self.tearDown()
|
||||||
|
|
||||||
|
@ -186,8 +193,25 @@ class AsyncioTestCase(unittest.TestCase):
|
||||||
with outcome.testPartExecutor(self):
|
with outcome.testPartExecutor(self):
|
||||||
maybe_coroutine = function(*args, **kwargs)
|
maybe_coroutine = function(*args, **kwargs)
|
||||||
if asyncio.iscoroutine(maybe_coroutine):
|
if asyncio.iscoroutine(maybe_coroutine):
|
||||||
|
self.add_timeout()
|
||||||
self.loop.run_until_complete(maybe_coroutine)
|
self.loop.run_until_complete(maybe_coroutine)
|
||||||
|
|
||||||
|
def cancel(self):
|
||||||
|
for task in asyncio.all_tasks(self.loop):
|
||||||
|
if not task.done():
|
||||||
|
task.print_stack()
|
||||||
|
task.cancel()
|
||||||
|
|
||||||
|
def add_timeout(self):
|
||||||
|
if self.TIMEOUT:
|
||||||
|
self.loop.call_later(self.TIMEOUT, self.check_timeout, time())
|
||||||
|
|
||||||
|
def check_timeout(self, started):
|
||||||
|
if time() - started >= self.TIMEOUT:
|
||||||
|
self.cancel()
|
||||||
|
else:
|
||||||
|
self.loop.call_later(self.TIMEOUT, self.check_timeout, started)
|
||||||
|
|
||||||
|
|
||||||
class AdvanceTimeTestCase(AsyncioTestCase):
|
class AdvanceTimeTestCase(AsyncioTestCase):
|
||||||
|
|
||||||
|
@ -212,7 +236,7 @@ class IntegrationTestCase(AsyncioTestCase):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.conductor: Optional[Conductor] = None
|
self.conductor: Optional[Conductor] = None
|
||||||
self.blockchain: Optional[BlockchainNode] = None
|
self.blockchain: Optional[LBCWalletNode] = None
|
||||||
self.wallet_node: Optional[WalletNode] = None
|
self.wallet_node: Optional[WalletNode] = None
|
||||||
self.manager: Optional[WalletManager] = None
|
self.manager: Optional[WalletManager] = None
|
||||||
self.ledger: Optional[Ledger] = None
|
self.ledger: Optional[Ledger] = None
|
||||||
|
@ -221,13 +245,15 @@ class IntegrationTestCase(AsyncioTestCase):
|
||||||
|
|
||||||
async def asyncSetUp(self):
|
async def asyncSetUp(self):
|
||||||
self.conductor = Conductor(seed=self.SEED)
|
self.conductor = Conductor(seed=self.SEED)
|
||||||
await self.conductor.start_blockchain()
|
await self.conductor.start_lbcd()
|
||||||
self.addCleanup(self.conductor.stop_blockchain)
|
self.addCleanup(self.conductor.stop_lbcd)
|
||||||
|
await self.conductor.start_lbcwallet()
|
||||||
|
self.addCleanup(self.conductor.stop_lbcwallet)
|
||||||
await self.conductor.start_spv()
|
await self.conductor.start_spv()
|
||||||
self.addCleanup(self.conductor.stop_spv)
|
self.addCleanup(self.conductor.stop_spv)
|
||||||
await self.conductor.start_wallet()
|
await self.conductor.start_wallet()
|
||||||
self.addCleanup(self.conductor.stop_wallet)
|
self.addCleanup(self.conductor.stop_wallet)
|
||||||
self.blockchain = self.conductor.blockchain_node
|
self.blockchain = self.conductor.lbcwallet_node
|
||||||
self.wallet_node = self.conductor.wallet_node
|
self.wallet_node = self.conductor.wallet_node
|
||||||
self.manager = self.wallet_node.manager
|
self.manager = self.wallet_node.manager
|
||||||
self.ledger = self.wallet_node.ledger
|
self.ledger = self.wallet_node.ledger
|
||||||
|
@ -241,6 +267,13 @@ class IntegrationTestCase(AsyncioTestCase):
|
||||||
def broadcast(self, tx):
|
def broadcast(self, tx):
|
||||||
return self.ledger.broadcast(tx)
|
return self.ledger.broadcast(tx)
|
||||||
|
|
||||||
|
async def broadcast_and_confirm(self, tx, ledger=None):
|
||||||
|
ledger = ledger or self.ledger
|
||||||
|
notifications = asyncio.create_task(ledger.wait(tx))
|
||||||
|
await ledger.broadcast(tx)
|
||||||
|
await notifications
|
||||||
|
await self.generate_and_wait(1, [tx.id], ledger)
|
||||||
|
|
||||||
async def on_header(self, height):
|
async def on_header(self, height):
|
||||||
if self.ledger.headers.height < height:
|
if self.ledger.headers.height < height:
|
||||||
await self.ledger.on_header.where(
|
await self.ledger.on_header.where(
|
||||||
|
@ -248,11 +281,29 @@ class IntegrationTestCase(AsyncioTestCase):
|
||||||
)
|
)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def on_transaction_id(self, txid, ledger=None):
|
async def send_to_address_and_wait(self, address, amount, blocks_to_generate=0, ledger=None):
|
||||||
return (ledger or self.ledger).on_transaction.where(
|
tx_watch = []
|
||||||
lambda e: e.tx.id == txid
|
txid = None
|
||||||
|
done = False
|
||||||
|
watcher = (ledger or self.ledger).on_transaction.where(
|
||||||
|
lambda e: e.tx.id == txid or done or tx_watch.append(e.tx.id)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
txid = await self.blockchain.send_to_address(address, amount)
|
||||||
|
done = txid in tx_watch
|
||||||
|
await watcher
|
||||||
|
|
||||||
|
await self.generate_and_wait(blocks_to_generate, [txid], ledger)
|
||||||
|
return txid
|
||||||
|
|
||||||
|
async def generate_and_wait(self, blocks_to_generate, txids, ledger=None):
|
||||||
|
if blocks_to_generate > 0:
|
||||||
|
watcher = (ledger or self.ledger).on_transaction.where(
|
||||||
|
lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda
|
||||||
|
)
|
||||||
|
await self.generate(blocks_to_generate)
|
||||||
|
await watcher
|
||||||
|
|
||||||
def on_address_update(self, address):
|
def on_address_update(self, address):
|
||||||
return self.ledger.on_transaction.where(
|
return self.ledger.on_transaction.where(
|
||||||
lambda e: e.address == address
|
lambda e: e.address == address
|
||||||
|
@ -263,6 +314,22 @@ class IntegrationTestCase(AsyncioTestCase):
|
||||||
lambda e: e.tx.id == tx.id and e.address == address
|
lambda e: e.tx.id == tx.id and e.address == address
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def generate(self, blocks):
|
||||||
|
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
|
||||||
|
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||||
|
self.conductor.spv_node.server.synchronized.clear()
|
||||||
|
await self.blockchain.generate(blocks)
|
||||||
|
height = self.blockchain.block_expected
|
||||||
|
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||||
|
while True:
|
||||||
|
await self.conductor.spv_node.server.synchronized.wait()
|
||||||
|
self.conductor.spv_node.server.synchronized.clear()
|
||||||
|
if self.conductor.spv_node.server.db.db_height < height:
|
||||||
|
continue
|
||||||
|
if self.conductor.spv_node.server._es_height < height:
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
class FakeExchangeRateManager(ExchangeRateManager):
|
class FakeExchangeRateManager(ExchangeRateManager):
|
||||||
|
|
||||||
|
@ -281,8 +348,8 @@ class FakeExchangeRateManager(ExchangeRateManager):
|
||||||
|
|
||||||
def get_fake_exchange_rate_manager(rates=None):
|
def get_fake_exchange_rate_manager(rates=None):
|
||||||
return FakeExchangeRateManager(
|
return FakeExchangeRateManager(
|
||||||
[LBRYFeed(), LBRYBTCFeed()],
|
[BittrexBTCFeed(), BittrexUSDFeed()],
|
||||||
rates or {'BTCLBC': 3.0, 'USDBTC': 2.0}
|
rates or {'BTCLBC': 3.0, 'USDLBC': 2.0}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -320,26 +387,32 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
self.server_blob_manager = None
|
self.server_blob_manager = None
|
||||||
self.server = None
|
self.server = None
|
||||||
self.reflector = None
|
self.reflector = None
|
||||||
|
self.skip_libtorrent = True
|
||||||
|
|
||||||
async def asyncSetUp(self):
|
async def asyncSetUp(self):
|
||||||
await super().asyncSetUp()
|
|
||||||
|
|
||||||
logging.getLogger('lbry.blob_exchange').setLevel(self.VERBOSITY)
|
logging.getLogger('lbry.blob_exchange').setLevel(self.VERBOSITY)
|
||||||
logging.getLogger('lbry.daemon').setLevel(self.VERBOSITY)
|
logging.getLogger('lbry.daemon').setLevel(self.VERBOSITY)
|
||||||
logging.getLogger('lbry.stream').setLevel(self.VERBOSITY)
|
logging.getLogger('lbry.stream').setLevel(self.VERBOSITY)
|
||||||
logging.getLogger('lbry.wallet').setLevel(self.VERBOSITY)
|
logging.getLogger('lbry.wallet').setLevel(self.VERBOSITY)
|
||||||
|
|
||||||
|
await super().asyncSetUp()
|
||||||
|
|
||||||
self.daemon = await self.add_daemon(self.wallet_node)
|
self.daemon = await self.add_daemon(self.wallet_node)
|
||||||
|
|
||||||
await self.account.ensure_address_gap()
|
await self.account.ensure_address_gap()
|
||||||
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
|
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
|
||||||
sendtxid = await self.blockchain.send_to_address(address, 10)
|
await self.send_to_address_and_wait(address, 10, 6)
|
||||||
await self.confirm_tx(sendtxid)
|
|
||||||
await self.generate(5)
|
|
||||||
|
|
||||||
server_tmp_dir = tempfile.mkdtemp()
|
server_tmp_dir = tempfile.mkdtemp()
|
||||||
self.addCleanup(shutil.rmtree, server_tmp_dir)
|
self.addCleanup(shutil.rmtree, server_tmp_dir)
|
||||||
self.server_config = Config()
|
self.server_config = Config(
|
||||||
|
data_dir=server_tmp_dir,
|
||||||
|
wallet_dir=server_tmp_dir,
|
||||||
|
save_files=True,
|
||||||
|
download_dir=server_tmp_dir
|
||||||
|
)
|
||||||
|
self.server_config.transaction_cache_size = 10000
|
||||||
self.server_storage = SQLiteStorage(self.server_config, ':memory:')
|
self.server_storage = SQLiteStorage(self.server_config, ':memory:')
|
||||||
await self.server_storage.open()
|
await self.server_storage.open()
|
||||||
|
|
||||||
|
@ -362,6 +435,7 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
await daemon.stop()
|
await daemon.stop()
|
||||||
|
|
||||||
async def add_daemon(self, wallet_node=None, seed=None):
|
async def add_daemon(self, wallet_node=None, seed=None):
|
||||||
|
start_wallet_node = False
|
||||||
if wallet_node is None:
|
if wallet_node is None:
|
||||||
wallet_node = WalletNode(
|
wallet_node = WalletNode(
|
||||||
self.wallet_node.manager_class,
|
self.wallet_node.manager_class,
|
||||||
|
@ -369,31 +443,42 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
port=self.extra_wallet_node_port
|
port=self.extra_wallet_node_port
|
||||||
)
|
)
|
||||||
self.extra_wallet_node_port += 1
|
self.extra_wallet_node_port += 1
|
||||||
await wallet_node.start(self.conductor.spv_node, seed=seed)
|
start_wallet_node = True
|
||||||
self.extra_wallet_nodes.append(wallet_node)
|
|
||||||
|
|
||||||
upload_dir = os.path.join(wallet_node.data_path, 'uploads')
|
upload_dir = os.path.join(wallet_node.data_path, 'uploads')
|
||||||
os.mkdir(upload_dir)
|
os.mkdir(upload_dir)
|
||||||
|
|
||||||
conf = Config()
|
conf = Config(
|
||||||
conf.data_dir = wallet_node.data_path
|
# needed during instantiation to access known_hubs path
|
||||||
conf.wallet_dir = wallet_node.data_path
|
data_dir=wallet_node.data_path,
|
||||||
conf.download_dir = wallet_node.data_path
|
wallet_dir=wallet_node.data_path,
|
||||||
|
save_files=True,
|
||||||
|
download_dir=wallet_node.data_path
|
||||||
|
)
|
||||||
conf.upload_dir = upload_dir # not a real conf setting
|
conf.upload_dir = upload_dir # not a real conf setting
|
||||||
conf.share_usage_data = False
|
conf.share_usage_data = False
|
||||||
conf.use_upnp = False
|
conf.use_upnp = False
|
||||||
conf.reflect_streams = True
|
conf.reflect_streams = True
|
||||||
conf.blockchain_name = 'lbrycrd_regtest'
|
conf.blockchain_name = 'lbrycrd_regtest'
|
||||||
conf.lbryum_servers = [('127.0.0.1', 50001)]
|
conf.lbryum_servers = [(self.conductor.spv_node.hostname, self.conductor.spv_node.port)]
|
||||||
conf.reflector_servers = [('127.0.0.1', 5566)]
|
conf.reflector_servers = [('127.0.0.1', 5566)]
|
||||||
conf.fixed_peers = [('127.0.0.1', 5567)]
|
conf.fixed_peers = [('127.0.0.1', 5567)]
|
||||||
conf.known_dht_nodes = []
|
conf.known_dht_nodes = []
|
||||||
conf.blob_lru_cache_size = self.blob_lru_cache_size
|
conf.blob_lru_cache_size = self.blob_lru_cache_size
|
||||||
|
conf.transaction_cache_size = 10000
|
||||||
conf.components_to_skip = [
|
conf.components_to_skip = [
|
||||||
DHT_COMPONENT, UPNP_COMPONENT, HASH_ANNOUNCER_COMPONENT,
|
DHT_COMPONENT, UPNP_COMPONENT, HASH_ANNOUNCER_COMPONENT,
|
||||||
PEER_PROTOCOL_SERVER_COMPONENT
|
PEER_PROTOCOL_SERVER_COMPONENT
|
||||||
]
|
]
|
||||||
wallet_node.manager.config = conf
|
if self.skip_libtorrent:
|
||||||
|
conf.components_to_skip.append(LIBTORRENT_COMPONENT)
|
||||||
|
|
||||||
|
if start_wallet_node:
|
||||||
|
await wallet_node.start(self.conductor.spv_node, seed=seed, config=conf)
|
||||||
|
self.extra_wallet_nodes.append(wallet_node)
|
||||||
|
else:
|
||||||
|
wallet_node.manager.config = conf
|
||||||
|
wallet_node.manager.ledger.config['known_hubs'] = conf.known_hubs
|
||||||
|
|
||||||
def wallet_maker(component_manager):
|
def wallet_maker(component_manager):
|
||||||
wallet_component = WalletComponent(component_manager)
|
wallet_component = WalletComponent(component_manager)
|
||||||
|
@ -404,7 +489,7 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
daemon = Daemon(conf, ComponentManager(
|
daemon = Daemon(conf, ComponentManager(
|
||||||
conf, skip_components=conf.components_to_skip, wallet=wallet_maker,
|
conf, skip_components=conf.components_to_skip, wallet=wallet_maker,
|
||||||
exchange_rate_manager=partial(ExchangeRateManagerComponent, rates={
|
exchange_rate_manager=partial(ExchangeRateManagerComponent, rates={
|
||||||
'BTCLBC': 1.0, 'USDBTC': 2.0
|
'BTCLBC': 1.0, 'USDLBC': 2.0
|
||||||
})
|
})
|
||||||
))
|
))
|
||||||
await daemon.initialize()
|
await daemon.initialize()
|
||||||
|
@ -414,9 +499,14 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
|
|
||||||
async def confirm_tx(self, txid, ledger=None):
|
async def confirm_tx(self, txid, ledger=None):
|
||||||
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
|
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
|
||||||
await self.on_transaction_id(txid, ledger)
|
# await (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
|
||||||
await self.generate(1)
|
on_tx = (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
|
||||||
await self.on_transaction_id(txid, ledger)
|
await asyncio.wait([self.generate(1), on_tx], timeout=5)
|
||||||
|
|
||||||
|
# # actually, if it's in the mempool or in the block we're fine
|
||||||
|
# await self.generate_and_wait(1, [txid], ledger=ledger)
|
||||||
|
# return txid
|
||||||
|
|
||||||
return txid
|
return txid
|
||||||
|
|
||||||
async def on_transaction_dict(self, tx):
|
async def on_transaction_dict(self, tx):
|
||||||
|
@ -431,11 +521,6 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
addresses.add(txo['address'])
|
addresses.add(txo['address'])
|
||||||
return list(addresses)
|
return list(addresses)
|
||||||
|
|
||||||
async def generate(self, blocks):
|
|
||||||
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
|
|
||||||
await self.blockchain.generate(blocks)
|
|
||||||
await self.ledger.on_header.where(self.blockchain.is_expected_block)
|
|
||||||
|
|
||||||
async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True):
|
async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True):
|
||||||
txid = await self.blockchain._cli_cmnd('claimname', name, value, amount)
|
txid = await self.blockchain._cli_cmnd('claimname', name, value, amount)
|
||||||
if confirm:
|
if confirm:
|
||||||
|
@ -456,12 +541,27 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
""" Synchronous version of `out` method. """
|
""" Synchronous version of `out` method. """
|
||||||
return json.loads(jsonrpc_dumps_pretty(value, ledger=self.ledger))['result']
|
return json.loads(jsonrpc_dumps_pretty(value, ledger=self.ledger))['result']
|
||||||
|
|
||||||
async def confirm_and_render(self, awaitable, confirm) -> Transaction:
|
async def confirm_and_render(self, awaitable, confirm, return_tx=False) -> Transaction:
|
||||||
tx = await awaitable
|
tx = await awaitable
|
||||||
if confirm:
|
if confirm:
|
||||||
await self.ledger.wait(tx)
|
await self.ledger.wait(tx)
|
||||||
await self.generate(1)
|
await self.generate(1)
|
||||||
await self.ledger.wait(tx, self.blockchain.block_expected)
|
await self.ledger.wait(tx, self.blockchain.block_expected)
|
||||||
|
if not return_tx:
|
||||||
|
return self.sout(tx)
|
||||||
|
return tx
|
||||||
|
|
||||||
|
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None, blocking=False):
|
||||||
|
account = (daemon or self.daemon).wallet_manager.default_account
|
||||||
|
claim_address = await account.receiving.get_or_create_usable_address()
|
||||||
|
claim = Claim()
|
||||||
|
claim.channel.public_key_bytes = pubkey_bytes
|
||||||
|
tx = await Transaction.claim_create(
|
||||||
|
name, claim, lbc_to_dewies(price),
|
||||||
|
claim_address, [self.account], self.account
|
||||||
|
)
|
||||||
|
await tx.sign([self.account])
|
||||||
|
await (daemon or self.daemon).broadcast_or_release(tx, blocking)
|
||||||
return self.sout(tx)
|
return self.sout(tx)
|
||||||
|
|
||||||
def create_upload_file(self, data, prefix=None, suffix=None):
|
def create_upload_file(self, data, prefix=None, suffix=None):
|
||||||
|
@ -473,19 +573,19 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
|
|
||||||
async def stream_create(
|
async def stream_create(
|
||||||
self, name='hovercraft', bid='1.0', file_path=None,
|
self, name='hovercraft', bid='1.0', file_path=None,
|
||||||
data=b'hi!', confirm=True, prefix=None, suffix=None, **kwargs):
|
data=b'hi!', confirm=True, prefix=None, suffix=None, return_tx=False, **kwargs):
|
||||||
if file_path is None:
|
if file_path is None and data is not None:
|
||||||
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
|
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
|
||||||
return await self.confirm_and_render(
|
return await self.confirm_and_render(
|
||||||
self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm
|
self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm, return_tx
|
||||||
)
|
)
|
||||||
|
|
||||||
async def stream_update(
|
async def stream_update(
|
||||||
self, claim_id, data=None, prefix=None, suffix=None, confirm=True, **kwargs):
|
self, claim_id, data=None, prefix=None, suffix=None, confirm=True, return_tx=False, **kwargs):
|
||||||
if data is not None:
|
if data is not None:
|
||||||
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
|
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
|
||||||
return await self.confirm_and_render(
|
return await self.confirm_and_render(
|
||||||
self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm
|
self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm, return_tx
|
||||||
)
|
)
|
||||||
return await self.confirm_and_render(
|
return await self.confirm_and_render(
|
||||||
self.daemon.jsonrpc_stream_update(claim_id, **kwargs), confirm
|
self.daemon.jsonrpc_stream_update(claim_id, **kwargs), confirm
|
||||||
|
@ -561,6 +661,11 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
self.daemon.jsonrpc_support_abandon(*args, **kwargs), confirm
|
self.daemon.jsonrpc_support_abandon(*args, **kwargs), confirm
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def account_send(self, *args, confirm=True, **kwargs):
|
||||||
|
return await self.confirm_and_render(
|
||||||
|
self.daemon.jsonrpc_account_send(*args, **kwargs), confirm
|
||||||
|
)
|
||||||
|
|
||||||
async def wallet_send(self, *args, confirm=True, **kwargs):
|
async def wallet_send(self, *args, confirm=True, **kwargs):
|
||||||
return await self.confirm_and_render(
|
return await self.confirm_and_render(
|
||||||
self.daemon.jsonrpc_wallet_send(*args, **kwargs), confirm
|
self.daemon.jsonrpc_wallet_send(*args, **kwargs), confirm
|
||||||
|
@ -574,12 +679,21 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
await asyncio.wait([self.ledger.wait(tx, self.blockchain.block_expected) for tx in txs])
|
await asyncio.wait([self.ledger.wait(tx, self.blockchain.block_expected) for tx in txs])
|
||||||
return self.sout(txs)
|
return self.sout(txs)
|
||||||
|
|
||||||
|
async def blob_clean(self):
|
||||||
|
return await self.out(self.daemon.jsonrpc_blob_clean())
|
||||||
|
|
||||||
|
async def status(self):
|
||||||
|
return await self.out(self.daemon.jsonrpc_status())
|
||||||
|
|
||||||
async def resolve(self, uri, **kwargs):
|
async def resolve(self, uri, **kwargs):
|
||||||
return (await self.out(self.daemon.jsonrpc_resolve(uri, **kwargs)))[uri]
|
return (await self.out(self.daemon.jsonrpc_resolve(uri, **kwargs)))[uri]
|
||||||
|
|
||||||
async def claim_search(self, **kwargs):
|
async def claim_search(self, **kwargs):
|
||||||
return (await self.out(self.daemon.jsonrpc_claim_search(**kwargs)))['items']
|
return (await self.out(self.daemon.jsonrpc_claim_search(**kwargs)))['items']
|
||||||
|
|
||||||
|
async def get_claim_by_claim_id(self, claim_id):
|
||||||
|
return await self.out(self.ledger.get_claim_by_claim_id(claim_id))
|
||||||
|
|
||||||
async def file_list(self, *args, **kwargs):
|
async def file_list(self, *args, **kwargs):
|
||||||
return (await self.out(self.daemon.jsonrpc_file_list(*args, **kwargs)))['items']
|
return (await self.out(self.daemon.jsonrpc_file_list(*args, **kwargs)))['items']
|
||||||
|
|
||||||
|
@ -604,6 +718,9 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
async def transaction_list(self, *args, **kwargs):
|
async def transaction_list(self, *args, **kwargs):
|
||||||
return (await self.out(self.daemon.jsonrpc_transaction_list(*args, **kwargs)))['items']
|
return (await self.out(self.daemon.jsonrpc_transaction_list(*args, **kwargs)))['items']
|
||||||
|
|
||||||
|
async def blob_list(self, *args, **kwargs):
|
||||||
|
return (await self.out(self.daemon.jsonrpc_blob_list(*args, **kwargs)))['items']
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_claim_id(tx):
|
def get_claim_id(tx):
|
||||||
return tx['outputs'][0]['claim_id']
|
return tx['outputs'][0]['claim_id']
|
||||||
|
|
|
@ -10,47 +10,13 @@ from typing import Optional
|
||||||
import libtorrent
|
import libtorrent
|
||||||
|
|
||||||
|
|
||||||
NOTIFICATION_MASKS = [
|
|
||||||
"error",
|
|
||||||
"peer",
|
|
||||||
"port_mapping",
|
|
||||||
"storage",
|
|
||||||
"tracker",
|
|
||||||
"debug",
|
|
||||||
"status",
|
|
||||||
"progress",
|
|
||||||
"ip_block",
|
|
||||||
"dht",
|
|
||||||
"stats",
|
|
||||||
"session_log",
|
|
||||||
"torrent_log",
|
|
||||||
"peer_log",
|
|
||||||
"incoming_request",
|
|
||||||
"dht_log",
|
|
||||||
"dht_operation",
|
|
||||||
"port_mapping_log",
|
|
||||||
"picker_log",
|
|
||||||
"file_progress",
|
|
||||||
"piece_progress",
|
|
||||||
"upload",
|
|
||||||
"block_progress"
|
|
||||||
]
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted?
|
DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted?
|
||||||
libtorrent.add_torrent_params_flags_t.flag_auto_managed
|
libtorrent.add_torrent_params_flags_t.flag_auto_managed
|
||||||
| libtorrent.add_torrent_params_flags_t.flag_update_subscribe
|
| libtorrent.add_torrent_params_flags_t.flag_update_subscribe
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_notification_type(notification) -> str:
|
|
||||||
for i, notification_type in enumerate(NOTIFICATION_MASKS):
|
|
||||||
if (1 << i) & notification:
|
|
||||||
return notification_type
|
|
||||||
raise ValueError("unrecognized notification type")
|
|
||||||
|
|
||||||
|
|
||||||
class TorrentHandle:
|
class TorrentHandle:
|
||||||
def __init__(self, loop, executor, handle):
|
def __init__(self, loop, executor, handle):
|
||||||
self._loop = loop
|
self._loop = loop
|
||||||
|
@ -121,7 +87,7 @@ class TorrentHandle:
|
||||||
self._show_status()
|
self._show_status()
|
||||||
if self.finished.is_set():
|
if self.finished.is_set():
|
||||||
break
|
break
|
||||||
await asyncio.sleep(0.1, loop=self._loop)
|
await asyncio.sleep(0.1)
|
||||||
|
|
||||||
async def pause(self):
|
async def pause(self):
|
||||||
await self._loop.run_in_executor(
|
await self._loop.run_in_executor(
|
||||||
|
@ -156,10 +122,8 @@ class TorrentSession:
|
||||||
async def bind(self, interface: str = '0.0.0.0', port: int = 10889):
|
async def bind(self, interface: str = '0.0.0.0', port: int = 10889):
|
||||||
settings = {
|
settings = {
|
||||||
'listen_interfaces': f"{interface}:{port}",
|
'listen_interfaces': f"{interface}:{port}",
|
||||||
'enable_outgoing_utp': True,
|
'enable_natpmp': False,
|
||||||
'enable_incoming_utp': True,
|
'enable_upnp': False
|
||||||
'enable_outgoing_tcp': False,
|
|
||||||
'enable_incoming_tcp': False
|
|
||||||
}
|
}
|
||||||
self._session = await self._loop.run_in_executor(
|
self._session = await self._loop.run_in_executor(
|
||||||
self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member
|
self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member
|
||||||
|
@ -186,7 +150,7 @@ class TorrentSession:
|
||||||
await self._loop.run_in_executor(
|
await self._loop.run_in_executor(
|
||||||
self._executor, self._pop_alerts
|
self._executor, self._pop_alerts
|
||||||
)
|
)
|
||||||
await asyncio.sleep(1, loop=self._loop)
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
async def pause(self):
|
async def pause(self):
|
||||||
await self._loop.run_in_executor(
|
await self._loop.run_in_executor(
|
||||||
|
|
|
@ -36,7 +36,7 @@ class Torrent:
|
||||||
def __init__(self, loop, handle):
|
def __init__(self, loop, handle):
|
||||||
self._loop = loop
|
self._loop = loop
|
||||||
self._handle = handle
|
self._handle = handle
|
||||||
self.finished = asyncio.Event(loop=loop)
|
self.finished = asyncio.Event()
|
||||||
|
|
||||||
def _threaded_update_status(self):
|
def _threaded_update_status(self):
|
||||||
status = self._handle.status()
|
status = self._handle.status()
|
||||||
|
@ -58,7 +58,7 @@ class Torrent:
|
||||||
log.info("finished downloading torrent!")
|
log.info("finished downloading torrent!")
|
||||||
await self.pause()
|
await self.pause()
|
||||||
break
|
break
|
||||||
await asyncio.sleep(1, loop=self._loop)
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
async def pause(self):
|
async def pause(self):
|
||||||
log.info("pause torrent")
|
log.info("pause torrent")
|
||||||
|
|
|
@ -74,7 +74,7 @@ class TorrentSource(ManagedDownloadSource):
|
||||||
def bt_infohash(self):
|
def bt_infohash(self):
|
||||||
return self.identifier
|
return self.identifier
|
||||||
|
|
||||||
def stop_tasks(self):
|
async def stop_tasks(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -118,8 +118,8 @@ class TorrentManager(SourceManager):
|
||||||
async def start(self):
|
async def start(self):
|
||||||
await super().start()
|
await super().start()
|
||||||
|
|
||||||
def stop(self):
|
async def stop(self):
|
||||||
super().stop()
|
await super().stop()
|
||||||
log.info("finished stopping the torrent manager")
|
log.info("finished stopping the torrent manager")
|
||||||
|
|
||||||
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
|
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
|
||||||
|
|
285
lbry/torrent/tracker.py
Normal file
285
lbry/torrent/tracker.py
Normal file
|
@ -0,0 +1,285 @@
|
||||||
|
import random
|
||||||
|
import socket
|
||||||
|
import string
|
||||||
|
import struct
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
import ipaddress
|
||||||
|
from collections import namedtuple
|
||||||
|
from functools import reduce
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from lbry.dht.node import get_kademlia_peers_from_hosts
|
||||||
|
from lbry.utils import resolve_host, async_timed_cache, cache_concurrent
|
||||||
|
from lbry.wallet.stream import StreamController
|
||||||
|
from lbry import version
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
CONNECTION_EXPIRES_AFTER_SECONDS = 50
|
||||||
|
PREFIX = 'LB' # todo: PR BEP20 to add ourselves
|
||||||
|
DEFAULT_TIMEOUT_SECONDS = 10.0
|
||||||
|
DEFAULT_CONCURRENCY_LIMIT = 100
|
||||||
|
# see: http://bittorrent.org/beps/bep_0015.html and http://xbtt.sourceforge.net/udp_tracker_protocol.html
|
||||||
|
ConnectRequest = namedtuple("ConnectRequest", ["connection_id", "action", "transaction_id"])
|
||||||
|
ConnectResponse = namedtuple("ConnectResponse", ["action", "transaction_id", "connection_id"])
|
||||||
|
AnnounceRequest = namedtuple("AnnounceRequest",
|
||||||
|
["connection_id", "action", "transaction_id", "info_hash", "peer_id", "downloaded", "left",
|
||||||
|
"uploaded", "event", "ip_addr", "key", "num_want", "port"])
|
||||||
|
AnnounceResponse = namedtuple("AnnounceResponse",
|
||||||
|
["action", "transaction_id", "interval", "leechers", "seeders", "peers"])
|
||||||
|
CompactIPv4Peer = namedtuple("CompactPeer", ["address", "port"])
|
||||||
|
ScrapeRequest = namedtuple("ScrapeRequest", ["connection_id", "action", "transaction_id", "infohashes"])
|
||||||
|
ScrapeResponse = namedtuple("ScrapeResponse", ["action", "transaction_id", "items"])
|
||||||
|
ScrapeResponseItem = namedtuple("ScrapeResponseItem", ["seeders", "completed", "leechers"])
|
||||||
|
ErrorResponse = namedtuple("ErrorResponse", ["action", "transaction_id", "message"])
|
||||||
|
structs = {
|
||||||
|
ConnectRequest: struct.Struct(">QII"),
|
||||||
|
ConnectResponse: struct.Struct(">IIQ"),
|
||||||
|
AnnounceRequest: struct.Struct(">QII20s20sQQQIIIiH"),
|
||||||
|
AnnounceResponse: struct.Struct(">IIIII"),
|
||||||
|
CompactIPv4Peer: struct.Struct(">IH"),
|
||||||
|
ScrapeRequest: struct.Struct(">QII"),
|
||||||
|
ScrapeResponse: struct.Struct(">II"),
|
||||||
|
ScrapeResponseItem: struct.Struct(">III"),
|
||||||
|
ErrorResponse: struct.Struct(">II")
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def decode(cls, data, offset=0):
|
||||||
|
decoder = structs[cls]
|
||||||
|
if cls is AnnounceResponse:
|
||||||
|
return AnnounceResponse(*decoder.unpack_from(data, offset),
|
||||||
|
peers=[decode(CompactIPv4Peer, data, index) for index in range(20, len(data), 6)])
|
||||||
|
elif cls is ScrapeResponse:
|
||||||
|
return ScrapeResponse(*decoder.unpack_from(data, offset),
|
||||||
|
items=[decode(ScrapeResponseItem, data, index) for index in range(8, len(data), 12)])
|
||||||
|
elif cls is ErrorResponse:
|
||||||
|
return ErrorResponse(*decoder.unpack_from(data, offset), data[decoder.size:])
|
||||||
|
return cls(*decoder.unpack_from(data, offset))
|
||||||
|
|
||||||
|
|
||||||
|
def encode(obj):
|
||||||
|
if isinstance(obj, ScrapeRequest):
|
||||||
|
return structs[ScrapeRequest].pack(*obj[:-1]) + b''.join(obj.infohashes)
|
||||||
|
elif isinstance(obj, ErrorResponse):
|
||||||
|
return structs[ErrorResponse].pack(*obj[:-1]) + obj.message
|
||||||
|
elif isinstance(obj, AnnounceResponse):
|
||||||
|
return structs[AnnounceResponse].pack(*obj[:-1]) + b''.join([encode(peer) for peer in obj.peers])
|
||||||
|
return structs[type(obj)].pack(*obj)
|
||||||
|
|
||||||
|
|
||||||
|
def make_peer_id(random_part: Optional[str] = None) -> bytes:
|
||||||
|
# see https://wiki.theory.org/BitTorrentSpecification#peer_id and https://www.bittorrent.org/beps/bep_0020.html
|
||||||
|
# not to confuse with node id; peer id identifies uniquely the software, version and instance
|
||||||
|
random_part = random_part or ''.join(random.choice(string.ascii_letters) for _ in range(20))
|
||||||
|
return f"{PREFIX}-{'-'.join(map(str, version))}-{random_part}"[:20].encode()
|
||||||
|
|
||||||
|
|
||||||
|
class UDPTrackerClientProtocol(asyncio.DatagramProtocol):
|
||||||
|
def __init__(self, timeout: float = DEFAULT_TIMEOUT_SECONDS):
|
||||||
|
self.transport = None
|
||||||
|
self.data_queue = {}
|
||||||
|
self.timeout = timeout
|
||||||
|
self.semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY_LIMIT)
|
||||||
|
|
||||||
|
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
|
||||||
|
self.transport = transport
|
||||||
|
|
||||||
|
async def request(self, obj, tracker_ip, tracker_port):
|
||||||
|
self.data_queue[obj.transaction_id] = asyncio.get_running_loop().create_future()
|
||||||
|
try:
|
||||||
|
async with self.semaphore:
|
||||||
|
self.transport.sendto(encode(obj), (tracker_ip, tracker_port))
|
||||||
|
return await asyncio.wait_for(self.data_queue[obj.transaction_id], self.timeout)
|
||||||
|
finally:
|
||||||
|
self.data_queue.pop(obj.transaction_id, None)
|
||||||
|
|
||||||
|
async def connect(self, tracker_ip, tracker_port):
|
||||||
|
transaction_id = random.getrandbits(32)
|
||||||
|
return decode(ConnectResponse,
|
||||||
|
await self.request(ConnectRequest(0x41727101980, 0, transaction_id), tracker_ip, tracker_port))
|
||||||
|
|
||||||
|
@cache_concurrent
|
||||||
|
@async_timed_cache(CONNECTION_EXPIRES_AFTER_SECONDS)
|
||||||
|
async def ensure_connection_id(self, peer_id, tracker_ip, tracker_port):
|
||||||
|
# peer_id is just to ensure cache coherency
|
||||||
|
return (await self.connect(tracker_ip, tracker_port)).connection_id
|
||||||
|
|
||||||
|
async def announce(self, info_hash, peer_id, port, tracker_ip, tracker_port, stopped=False):
|
||||||
|
connection_id = await self.ensure_connection_id(peer_id, tracker_ip, tracker_port)
|
||||||
|
# this should make the key deterministic but unique per info hash + peer id
|
||||||
|
key = int.from_bytes(info_hash[:4], "big") ^ int.from_bytes(peer_id[:4], "big") ^ port
|
||||||
|
transaction_id = random.getrandbits(32)
|
||||||
|
req = AnnounceRequest(
|
||||||
|
connection_id, 1, transaction_id, info_hash, peer_id, 0, 0, 0, 3 if stopped else 1, 0, key, -1, port)
|
||||||
|
return decode(AnnounceResponse, await self.request(req, tracker_ip, tracker_port))
|
||||||
|
|
||||||
|
async def scrape(self, infohashes, tracker_ip, tracker_port, connection_id=None):
|
||||||
|
connection_id = await self.ensure_connection_id(None, tracker_ip, tracker_port)
|
||||||
|
transaction_id = random.getrandbits(32)
|
||||||
|
reply = await self.request(
|
||||||
|
ScrapeRequest(connection_id, 2, transaction_id, infohashes), tracker_ip, tracker_port)
|
||||||
|
return decode(ScrapeResponse, reply), connection_id
|
||||||
|
|
||||||
|
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
|
||||||
|
if len(data) < 8:
|
||||||
|
return
|
||||||
|
transaction_id = int.from_bytes(data[4:8], byteorder="big", signed=False)
|
||||||
|
if transaction_id in self.data_queue:
|
||||||
|
if not self.data_queue[transaction_id].done():
|
||||||
|
if data[3] == 3:
|
||||||
|
return self.data_queue[transaction_id].set_exception(Exception(decode(ErrorResponse, data).message))
|
||||||
|
return self.data_queue[transaction_id].set_result(data)
|
||||||
|
log.debug("unexpected packet (can be a response for a previously timed out request): %s", data.hex())
|
||||||
|
|
||||||
|
def connection_lost(self, exc: Exception = None) -> None:
|
||||||
|
self.transport = None
|
||||||
|
|
||||||
|
|
||||||
|
class TrackerClient:
|
||||||
|
event_controller = StreamController()
|
||||||
|
|
||||||
|
def __init__(self, node_id, announce_port, get_servers, timeout=10.0):
|
||||||
|
self.client = UDPTrackerClientProtocol(timeout=timeout)
|
||||||
|
self.transport = None
|
||||||
|
self.peer_id = make_peer_id(node_id.hex() if node_id else None)
|
||||||
|
self.announce_port = announce_port
|
||||||
|
self._get_servers = get_servers
|
||||||
|
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
|
||||||
|
self.tasks = {}
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
|
||||||
|
lambda: self.client, local_addr=("0.0.0.0", 0))
|
||||||
|
self.event_controller.stream.listen(
|
||||||
|
lambda request: self.on_hash(request[1], request[2]) if request[0] == 'search' else None)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
while self.tasks:
|
||||||
|
self.tasks.popitem()[1].cancel()
|
||||||
|
if self.transport is not None:
|
||||||
|
self.transport.close()
|
||||||
|
self.client = None
|
||||||
|
self.transport = None
|
||||||
|
self.event_controller.close()
|
||||||
|
|
||||||
|
def on_hash(self, info_hash, on_announcement=None):
|
||||||
|
if info_hash not in self.tasks:
|
||||||
|
task = asyncio.create_task(self.get_peer_list(info_hash, on_announcement=on_announcement))
|
||||||
|
task.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
|
||||||
|
self.tasks[info_hash] = task
|
||||||
|
|
||||||
|
async def announce_many(self, *info_hashes, stopped=False):
|
||||||
|
await asyncio.gather(
|
||||||
|
*[self._announce_many(server, info_hashes, stopped=stopped) for server in self._get_servers()],
|
||||||
|
return_exceptions=True)
|
||||||
|
|
||||||
|
async def _announce_many(self, server, info_hashes, stopped=False):
|
||||||
|
tracker_ip = await resolve_host(*server, 'udp')
|
||||||
|
still_good_info_hashes = {
|
||||||
|
info_hash for (info_hash, (next_announcement, _)) in self.results.get(tracker_ip, {}).items()
|
||||||
|
if time.time() < next_announcement
|
||||||
|
}
|
||||||
|
results = await asyncio.gather(
|
||||||
|
*[self._probe_server(info_hash, tracker_ip, server[1], stopped=stopped)
|
||||||
|
for info_hash in info_hashes if info_hash not in still_good_info_hashes],
|
||||||
|
return_exceptions=True)
|
||||||
|
if results:
|
||||||
|
errors = sum([1 for result in results if result is None or isinstance(result, Exception)])
|
||||||
|
log.info("Tracker: finished announcing %d files to %s:%d, %d errors", len(results), *server, errors)
|
||||||
|
|
||||||
|
async def get_peer_list(self, info_hash, stopped=False, on_announcement=None, no_port=False):
|
||||||
|
found = []
|
||||||
|
probes = [self._probe_server(info_hash, *server, stopped, no_port) for server in self._get_servers()]
|
||||||
|
for done in asyncio.as_completed(probes):
|
||||||
|
result = await done
|
||||||
|
if result is not None:
|
||||||
|
await asyncio.gather(*filter(asyncio.iscoroutine, [on_announcement(result)] if on_announcement else []))
|
||||||
|
found.append(result)
|
||||||
|
return found
|
||||||
|
|
||||||
|
async def get_kademlia_peer_list(self, info_hash):
|
||||||
|
responses = await self.get_peer_list(info_hash, no_port=True)
|
||||||
|
return await announcement_to_kademlia_peers(*responses)
|
||||||
|
|
||||||
|
async def _probe_server(self, info_hash, tracker_host, tracker_port, stopped=False, no_port=False):
|
||||||
|
result = None
|
||||||
|
try:
|
||||||
|
tracker_host = await resolve_host(tracker_host, tracker_port, 'udp')
|
||||||
|
except socket.error:
|
||||||
|
log.warning("DNS failure while resolving tracker host: %s, skipping.", tracker_host)
|
||||||
|
return
|
||||||
|
self.results.setdefault(tracker_host, {})
|
||||||
|
if info_hash in self.results[tracker_host]:
|
||||||
|
next_announcement, result = self.results[tracker_host][info_hash]
|
||||||
|
if time.time() < next_announcement:
|
||||||
|
return result
|
||||||
|
try:
|
||||||
|
result = await self.client.announce(
|
||||||
|
info_hash, self.peer_id, 0 if no_port else self.announce_port, tracker_host, tracker_port, stopped)
|
||||||
|
self.results[tracker_host][info_hash] = (time.time() + result.interval, result)
|
||||||
|
except asyncio.TimeoutError: # todo: this is UDP, timeout is common, we need a better metric for failures
|
||||||
|
self.results[tracker_host][info_hash] = (time.time() + 60.0, result)
|
||||||
|
log.debug("Tracker timed out: %s:%d", tracker_host, tracker_port)
|
||||||
|
return None
|
||||||
|
log.debug("Announced: %s found %d peers for %s", tracker_host, len(result.peers), info_hash.hex()[:8])
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def enqueue_tracker_search(info_hash: bytes, peer_q: asyncio.Queue):
|
||||||
|
async def on_announcement(announcement: AnnounceResponse):
|
||||||
|
peers = await announcement_to_kademlia_peers(announcement)
|
||||||
|
log.info("Found %d peers from tracker for %s", len(peers), info_hash.hex()[:8])
|
||||||
|
peer_q.put_nowait(peers)
|
||||||
|
TrackerClient.event_controller.add(('search', info_hash, on_announcement))
|
||||||
|
|
||||||
|
|
||||||
|
def announcement_to_kademlia_peers(*announcements: AnnounceResponse):
|
||||||
|
peers = [
|
||||||
|
(str(ipaddress.ip_address(peer.address)), peer.port)
|
||||||
|
for announcement in announcements for peer in announcement.peers if peer.port > 1024 # no privileged or 0
|
||||||
|
]
|
||||||
|
return get_kademlia_peers_from_hosts(peers)
|
||||||
|
|
||||||
|
|
||||||
|
class UDPTrackerServerProtocol(asyncio.DatagramProtocol): # for testing. Not suitable for production
|
||||||
|
def __init__(self):
|
||||||
|
self.transport = None
|
||||||
|
self.known_conns = set()
|
||||||
|
self.peers = {}
|
||||||
|
|
||||||
|
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
|
||||||
|
self.transport = transport
|
||||||
|
|
||||||
|
def add_peer(self, info_hash, ip_address: str, port: int):
|
||||||
|
self.peers.setdefault(info_hash, [])
|
||||||
|
self.peers[info_hash].append(encode_peer(ip_address, port))
|
||||||
|
|
||||||
|
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
|
||||||
|
if len(data) < 16:
|
||||||
|
return
|
||||||
|
action = int.from_bytes(data[8:12], "big", signed=False)
|
||||||
|
if action == 0:
|
||||||
|
req = decode(ConnectRequest, data)
|
||||||
|
connection_id = random.getrandbits(32)
|
||||||
|
self.known_conns.add(connection_id)
|
||||||
|
return self.transport.sendto(encode(ConnectResponse(0, req.transaction_id, connection_id)), addr)
|
||||||
|
elif action == 1:
|
||||||
|
req = decode(AnnounceRequest, data)
|
||||||
|
if req.connection_id not in self.known_conns:
|
||||||
|
resp = encode(ErrorResponse(3, req.transaction_id, b'Connection ID missmatch.\x00'))
|
||||||
|
else:
|
||||||
|
compact_address = encode_peer(addr[0], req.port)
|
||||||
|
if req.event != 3:
|
||||||
|
self.add_peer(req.info_hash, addr[0], req.port)
|
||||||
|
elif compact_address in self.peers.get(req.info_hash, []):
|
||||||
|
self.peers[req.info_hash].remove(compact_address)
|
||||||
|
peers = [decode(CompactIPv4Peer, peer) for peer in self.peers[req.info_hash]]
|
||||||
|
resp = encode(AnnounceResponse(1, req.transaction_id, 1700, 0, len(peers), peers))
|
||||||
|
return self.transport.sendto(resp, addr)
|
||||||
|
|
||||||
|
|
||||||
|
def encode_peer(ip_address: str, port: int):
|
||||||
|
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), ip_address.split('.'), bytearray())
|
||||||
|
return compact_ip + port.to_bytes(2, "big", signed=False)
|
249
lbry/utils.py
249
lbry/utils.py
|
@ -20,8 +20,10 @@ import pkg_resources
|
||||||
|
|
||||||
import certifi
|
import certifi
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
from prometheus_client import Counter
|
||||||
from lbry.schema.claim import Claim
|
from lbry.schema.claim import Claim
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@ -102,10 +104,6 @@ def check_connection(server="lbry.com", port=80, timeout=5) -> bool:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
async def async_check_connection(server="lbry.com", port=80, timeout=1) -> bool:
|
|
||||||
return await asyncio.get_event_loop().run_in_executor(None, check_connection, server, port, timeout)
|
|
||||||
|
|
||||||
|
|
||||||
def random_string(length=10, chars=string.ascii_lowercase):
|
def random_string(length=10, chars=string.ascii_lowercase):
|
||||||
return ''.join([random.choice(chars) for _ in range(length)])
|
return ''.join([random.choice(chars) for _ in range(length)])
|
||||||
|
|
||||||
|
@ -132,21 +130,16 @@ def get_sd_hash(stream_info):
|
||||||
def json_dumps_pretty(obj, **kwargs):
|
def json_dumps_pretty(obj, **kwargs):
|
||||||
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '), **kwargs)
|
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '), **kwargs)
|
||||||
|
|
||||||
|
try:
|
||||||
def cancel_task(task: typing.Optional[asyncio.Task]):
|
# the standard contextlib.aclosing() is available in 3.10+
|
||||||
if task and not task.done():
|
from contextlib import aclosing # pylint: disable=unused-import
|
||||||
task.cancel()
|
except ImportError:
|
||||||
|
@contextlib.asynccontextmanager
|
||||||
|
async def aclosing(thing):
|
||||||
def cancel_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
|
try:
|
||||||
for task in tasks:
|
yield thing
|
||||||
cancel_task(task)
|
finally:
|
||||||
|
await thing.aclose()
|
||||||
|
|
||||||
def drain_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
|
|
||||||
while tasks:
|
|
||||||
cancel_task(tasks.pop())
|
|
||||||
|
|
||||||
|
|
||||||
def async_timed_cache(duration: int):
|
def async_timed_cache(duration: int):
|
||||||
def wrapper(func):
|
def wrapper(func):
|
||||||
|
@ -157,7 +150,7 @@ def async_timed_cache(duration: int):
|
||||||
async def _inner(*args, **kwargs) -> typing.Any:
|
async def _inner(*args, **kwargs) -> typing.Any:
|
||||||
loop = asyncio.get_running_loop()
|
loop = asyncio.get_running_loop()
|
||||||
time_now = loop.time()
|
time_now = loop.time()
|
||||||
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
|
key = (args, tuple(kwargs.items()))
|
||||||
if key in cache and (time_now - cache[key][1] < duration):
|
if key in cache and (time_now - cache[key][1] < duration):
|
||||||
return cache[key][0]
|
return cache[key][0]
|
||||||
to_cache = await func(*args, **kwargs)
|
to_cache = await func(*args, **kwargs)
|
||||||
|
@ -175,7 +168,7 @@ def cache_concurrent(async_fn):
|
||||||
|
|
||||||
@functools.wraps(async_fn)
|
@functools.wraps(async_fn)
|
||||||
async def wrapper(*args, **kwargs):
|
async def wrapper(*args, **kwargs):
|
||||||
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
|
key = (args, tuple(kwargs.items()))
|
||||||
cache[key] = cache.get(key) or asyncio.create_task(async_fn(*args, **kwargs))
|
cache[key] = cache.get(key) or asyncio.create_task(async_fn(*args, **kwargs))
|
||||||
try:
|
try:
|
||||||
return await cache[key]
|
return await cache[key]
|
||||||
|
@ -189,6 +182,8 @@ def cache_concurrent(async_fn):
|
||||||
async def resolve_host(url: str, port: int, proto: str) -> str:
|
async def resolve_host(url: str, port: int, proto: str) -> str:
|
||||||
if proto not in ['udp', 'tcp']:
|
if proto not in ['udp', 'tcp']:
|
||||||
raise Exception("invalid protocol")
|
raise Exception("invalid protocol")
|
||||||
|
if url.lower() == 'localhost':
|
||||||
|
return '127.0.0.1'
|
||||||
try:
|
try:
|
||||||
if ipaddress.ip_address(url):
|
if ipaddress.ip_address(url):
|
||||||
return url
|
return url
|
||||||
|
@ -203,18 +198,44 @@ async def resolve_host(url: str, port: int, proto: str) -> str:
|
||||||
))[0][4][0]
|
))[0][4][0]
|
||||||
|
|
||||||
|
|
||||||
class LRUCache:
|
class LRUCacheWithMetrics:
|
||||||
__slots__ = [
|
__slots__ = [
|
||||||
'capacity',
|
'capacity',
|
||||||
'cache'
|
'cache',
|
||||||
|
'_track_metrics',
|
||||||
|
'hits',
|
||||||
|
'misses'
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, capacity):
|
def __init__(self, capacity: int, metric_name: typing.Optional[str] = None, namespace: str = "daemon_cache"):
|
||||||
self.capacity = capacity
|
self.capacity = capacity
|
||||||
self.cache = collections.OrderedDict()
|
self.cache = collections.OrderedDict()
|
||||||
|
if metric_name is None:
|
||||||
|
self._track_metrics = False
|
||||||
|
self.hits = self.misses = None
|
||||||
|
else:
|
||||||
|
self._track_metrics = True
|
||||||
|
try:
|
||||||
|
self.hits = Counter(
|
||||||
|
f"{metric_name}_cache_hit_count", "Number of cache hits", namespace=namespace
|
||||||
|
)
|
||||||
|
self.misses = Counter(
|
||||||
|
f"{metric_name}_cache_miss_count", "Number of cache misses", namespace=namespace
|
||||||
|
)
|
||||||
|
except ValueError as err:
|
||||||
|
log.debug("failed to set up prometheus %s_cache_miss_count metric: %s", metric_name, err)
|
||||||
|
self._track_metrics = False
|
||||||
|
self.hits = self.misses = None
|
||||||
|
|
||||||
def get(self, key):
|
def get(self, key, default=None):
|
||||||
value = self.cache.pop(key)
|
try:
|
||||||
|
value = self.cache.pop(key)
|
||||||
|
if self._track_metrics:
|
||||||
|
self.hits.inc()
|
||||||
|
except KeyError:
|
||||||
|
if self._track_metrics:
|
||||||
|
self.misses.inc()
|
||||||
|
return default
|
||||||
self.cache[key] = value
|
self.cache[key] = value
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
@ -226,22 +247,97 @@ class LRUCache:
|
||||||
self.cache.popitem(last=False)
|
self.cache.popitem(last=False)
|
||||||
self.cache[key] = value
|
self.cache[key] = value
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.cache.clear()
|
||||||
|
|
||||||
|
def pop(self, key):
|
||||||
|
return self.cache.pop(key)
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
return self.set(key, value)
|
||||||
|
|
||||||
|
def __getitem__(self, item):
|
||||||
|
return self.get(item)
|
||||||
|
|
||||||
def __contains__(self, item) -> bool:
|
def __contains__(self, item) -> bool:
|
||||||
return item in self.cache
|
return item in self.cache
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.cache)
|
||||||
|
|
||||||
|
def __delitem__(self, key):
|
||||||
|
self.cache.pop(key)
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
self.clear()
|
||||||
|
|
||||||
|
|
||||||
|
class LRUCache:
|
||||||
|
__slots__ = [
|
||||||
|
'capacity',
|
||||||
|
'cache'
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, capacity: int):
|
||||||
|
self.capacity = capacity
|
||||||
|
self.cache = collections.OrderedDict()
|
||||||
|
|
||||||
|
def get(self, key, default=None):
|
||||||
|
try:
|
||||||
|
value = self.cache.pop(key)
|
||||||
|
except KeyError:
|
||||||
|
return default
|
||||||
|
self.cache[key] = value
|
||||||
|
return value
|
||||||
|
|
||||||
|
def set(self, key, value):
|
||||||
|
try:
|
||||||
|
self.cache.pop(key)
|
||||||
|
except KeyError:
|
||||||
|
if len(self.cache) >= self.capacity:
|
||||||
|
self.cache.popitem(last=False)
|
||||||
|
self.cache[key] = value
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
return self.cache.items()
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.cache.clear()
|
||||||
|
|
||||||
|
def pop(self, key, default=None):
|
||||||
|
return self.cache.pop(key, default)
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
return self.set(key, value)
|
||||||
|
|
||||||
|
def __getitem__(self, item):
|
||||||
|
return self.get(item)
|
||||||
|
|
||||||
|
def __contains__(self, item) -> bool:
|
||||||
|
return item in self.cache
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.cache)
|
||||||
|
|
||||||
|
def __delitem__(self, key):
|
||||||
|
self.cache.pop(key)
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
self.clear()
|
||||||
|
|
||||||
|
|
||||||
def lru_cache_concurrent(cache_size: typing.Optional[int] = None,
|
def lru_cache_concurrent(cache_size: typing.Optional[int] = None,
|
||||||
override_lru_cache: typing.Optional[LRUCache] = None):
|
override_lru_cache: typing.Optional[LRUCacheWithMetrics] = None):
|
||||||
if not cache_size and override_lru_cache is None:
|
if not cache_size and override_lru_cache is None:
|
||||||
raise ValueError("invalid cache size")
|
raise ValueError("invalid cache size")
|
||||||
concurrent_cache = {}
|
concurrent_cache = {}
|
||||||
lru_cache = override_lru_cache or LRUCache(cache_size)
|
lru_cache = override_lru_cache if override_lru_cache is not None else LRUCacheWithMetrics(cache_size)
|
||||||
|
|
||||||
def wrapper(async_fn):
|
def wrapper(async_fn):
|
||||||
|
|
||||||
@functools.wraps(async_fn)
|
@functools.wraps(async_fn)
|
||||||
async def _inner(*args, **kwargs):
|
async def _inner(*args, **kwargs):
|
||||||
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
|
key = (args, tuple(kwargs.items()))
|
||||||
if key in lru_cache:
|
if key in lru_cache:
|
||||||
return lru_cache.get(key)
|
return lru_cache.get(key)
|
||||||
|
|
||||||
|
@ -266,18 +362,86 @@ def get_ssl_context() -> ssl.SSLContext:
|
||||||
@contextlib.asynccontextmanager
|
@contextlib.asynccontextmanager
|
||||||
async def aiohttp_request(method, url, **kwargs) -> typing.AsyncContextManager[aiohttp.ClientResponse]:
|
async def aiohttp_request(method, url, **kwargs) -> typing.AsyncContextManager[aiohttp.ClientResponse]:
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
async with session.request(method, url, ssl=get_ssl_context(), **kwargs) as response:
|
async with session.request(method, url, **kwargs) as response:
|
||||||
yield response
|
yield response
|
||||||
|
|
||||||
|
|
||||||
async def get_external_ip() -> typing.Optional[str]: # used if upnp is disabled or non-functioning
|
# the ipaddress module does not show these subnets as reserved
|
||||||
|
CARRIER_GRADE_NAT_SUBNET = ipaddress.ip_network('100.64.0.0/10')
|
||||||
|
IPV4_TO_6_RELAY_SUBNET = ipaddress.ip_network('192.88.99.0/24')
|
||||||
|
|
||||||
|
|
||||||
|
def is_valid_public_ipv4(address, allow_localhost: bool = False, allow_lan: bool = False):
|
||||||
|
try:
|
||||||
|
parsed_ip = ipaddress.ip_address(address)
|
||||||
|
if parsed_ip.is_loopback and allow_localhost:
|
||||||
|
return True
|
||||||
|
if allow_lan and parsed_ip.is_private:
|
||||||
|
return True
|
||||||
|
if any((parsed_ip.version != 4, parsed_ip.is_unspecified, parsed_ip.is_link_local, parsed_ip.is_loopback,
|
||||||
|
parsed_ip.is_multicast, parsed_ip.is_reserved, parsed_ip.is_private)):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return not any((CARRIER_GRADE_NAT_SUBNET.supernet_of(ipaddress.ip_network(f"{address}/32")),
|
||||||
|
IPV4_TO_6_RELAY_SUBNET.supernet_of(ipaddress.ip_network(f"{address}/32"))))
|
||||||
|
except (ipaddress.AddressValueError, ValueError):
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
async def fallback_get_external_ip(): # used if spv servers can't be used for ip detection
|
||||||
try:
|
try:
|
||||||
async with aiohttp_request("get", "https://api.lbry.com/ip") as resp:
|
async with aiohttp_request("get", "https://api.lbry.com/ip") as resp:
|
||||||
response = await resp.json()
|
response = await resp.json()
|
||||||
if response['success']:
|
if response['success']:
|
||||||
return response['data']['ip']
|
return response['data']['ip'], None
|
||||||
except Exception:
|
except Exception:
|
||||||
return
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
|
async def _get_external_ip(default_servers) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
|
||||||
|
# used if upnp is disabled or non-functioning
|
||||||
|
from lbry.wallet.udp import SPVStatusClientProtocol # pylint: disable=C0415
|
||||||
|
|
||||||
|
hostname_to_ip = {}
|
||||||
|
ip_to_hostnames = collections.defaultdict(list)
|
||||||
|
|
||||||
|
async def resolve_spv(server, port):
|
||||||
|
try:
|
||||||
|
server_addr = await resolve_host(server, port, 'udp')
|
||||||
|
hostname_to_ip[server] = (server_addr, port)
|
||||||
|
ip_to_hostnames[(server_addr, port)].append(server)
|
||||||
|
except Exception:
|
||||||
|
log.exception("error looking up dns for spv servers")
|
||||||
|
|
||||||
|
# accumulate the dns results
|
||||||
|
await asyncio.gather(*(resolve_spv(server, port) for (server, port) in default_servers))
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
pong_responses = asyncio.Queue()
|
||||||
|
connection = SPVStatusClientProtocol(pong_responses)
|
||||||
|
try:
|
||||||
|
await loop.create_datagram_endpoint(lambda: connection, ('0.0.0.0', 0))
|
||||||
|
# could raise OSError if it cant bind
|
||||||
|
randomized_servers = list(ip_to_hostnames.keys())
|
||||||
|
random.shuffle(randomized_servers)
|
||||||
|
for server in randomized_servers:
|
||||||
|
connection.ping(server)
|
||||||
|
try:
|
||||||
|
_, pong = await asyncio.wait_for(pong_responses.get(), 1)
|
||||||
|
if is_valid_public_ipv4(pong.ip_address):
|
||||||
|
return pong.ip_address, ip_to_hostnames[server][0]
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
pass
|
||||||
|
return None, None
|
||||||
|
finally:
|
||||||
|
connection.close()
|
||||||
|
|
||||||
|
|
||||||
|
async def get_external_ip(default_servers) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
|
||||||
|
ip_from_spv_servers = await _get_external_ip(default_servers)
|
||||||
|
if not ip_from_spv_servers[1]:
|
||||||
|
return await fallback_get_external_ip()
|
||||||
|
return ip_from_spv_servers
|
||||||
|
|
||||||
|
|
||||||
def is_running_from_bundle():
|
def is_running_from_bundle():
|
||||||
|
@ -286,8 +450,8 @@ def is_running_from_bundle():
|
||||||
|
|
||||||
|
|
||||||
class LockWithMetrics(asyncio.Lock):
|
class LockWithMetrics(asyncio.Lock):
|
||||||
def __init__(self, acquire_metric, held_time_metric, loop=None):
|
def __init__(self, acquire_metric, held_time_metric):
|
||||||
super().__init__(loop=loop)
|
super().__init__()
|
||||||
self._acquire_metric = acquire_metric
|
self._acquire_metric = acquire_metric
|
||||||
self._lock_held_time_metric = held_time_metric
|
self._lock_held_time_metric = held_time_metric
|
||||||
self._lock_acquired_time = None
|
self._lock_acquired_time = None
|
||||||
|
@ -305,3 +469,18 @@ class LockWithMetrics(asyncio.Lock):
|
||||||
return super().release()
|
return super().release()
|
||||||
finally:
|
finally:
|
||||||
self._lock_held_time_metric.observe(time.perf_counter() - self._lock_acquired_time)
|
self._lock_held_time_metric.observe(time.perf_counter() - self._lock_acquired_time)
|
||||||
|
|
||||||
|
|
||||||
|
def get_colliding_prefix_bits(first_value: bytes, second_value: bytes):
|
||||||
|
"""
|
||||||
|
Calculates the amount of colliding prefix bits between <first_value> and <second_value>.
|
||||||
|
This is given by the amount of bits that are the same until the first different one (via XOR),
|
||||||
|
starting from the most significant bit to the least significant bit.
|
||||||
|
:param first_value: first value to compare, bigger than size.
|
||||||
|
:param second_value: second value to compare, bigger than size.
|
||||||
|
:return: amount of prefix colliding bits.
|
||||||
|
"""
|
||||||
|
assert len(first_value) == len(second_value), "length should be the same"
|
||||||
|
size = len(first_value) * 8
|
||||||
|
first_value, second_value = int.from_bytes(first_value, "big"), int.from_bytes(second_value, "big")
|
||||||
|
return size - (first_value ^ second_value).bit_length()
|
||||||
|
|
|
@ -1,17 +1,23 @@
|
||||||
__node_daemon__ = 'lbrycrdd'
|
__lbcd__ = 'lbcd'
|
||||||
__node_cli__ = 'lbrycrd-cli'
|
__lbcctl__ = 'lbcctl'
|
||||||
__node_bin__ = ''
|
__lbcwallet__ = 'lbcwallet'
|
||||||
__node_url__ = (
|
__lbcd_url__ = (
|
||||||
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.4.5/lbrycrd-linux-1745.zip'
|
'https://github.com/lbryio/lbcd/releases/download/' +
|
||||||
|
'v0.22.100-rc.0/lbcd_0.22.100-rc.0_TARGET_PLATFORM.tar.gz'
|
||||||
|
)
|
||||||
|
__lbcwallet_url__ = (
|
||||||
|
'https://github.com/lbryio/lbcwallet/releases/download/' +
|
||||||
|
'v0.13.100-alpha.0/lbcwallet_0.13.100-alpha.0_TARGET_PLATFORM.tar.gz'
|
||||||
)
|
)
|
||||||
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
|
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
|
||||||
|
|
||||||
from .wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
|
from lbry.wallet.wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
|
||||||
from .manager import WalletManager
|
from lbry.wallet.manager import WalletManager
|
||||||
from .network import Network
|
from lbry.wallet.network import Network
|
||||||
from .ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
|
from lbry.wallet.ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
|
||||||
from .account import Account, AddressManager, SingleKey, HierarchicalDeterministic
|
from lbry.wallet.account import Account, AddressManager, SingleKey, HierarchicalDeterministic, \
|
||||||
from .transaction import Transaction, Output, Input
|
DeterministicChannelKeyManager
|
||||||
from .script import OutputScript, InputScript
|
from lbry.wallet.transaction import Transaction, Output, Input
|
||||||
from .database import SQLiteMixin, Database
|
from lbry.wallet.script import OutputScript, InputScript
|
||||||
from .header import Headers
|
from lbry.wallet.database import SQLiteMixin, Database
|
||||||
|
from lbry.wallet.header import Headers
|
||||||
|
|
|
@ -5,18 +5,16 @@ import logging
|
||||||
import typing
|
import typing
|
||||||
import asyncio
|
import asyncio
|
||||||
import random
|
import random
|
||||||
from functools import partial
|
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from string import hexdigits
|
from string import hexdigits
|
||||||
from typing import Type, Dict, Tuple, Optional, Any, List
|
from typing import Type, Dict, Tuple, Optional, Any, List
|
||||||
|
|
||||||
import ecdsa
|
|
||||||
from lbry.error import InvalidPasswordError
|
from lbry.error import InvalidPasswordError
|
||||||
from lbry.crypto.crypt import aes_encrypt, aes_decrypt
|
from lbry.crypto.crypt import aes_encrypt, aes_decrypt
|
||||||
|
|
||||||
from .bip32 import PrivateKey, PubKey, from_extended_key_string
|
from .bip32 import PrivateKey, PublicKey, KeyPath, from_extended_key_string
|
||||||
from .mnemonic import Mnemonic
|
from .mnemonic import Mnemonic
|
||||||
from .constants import COIN, CLAIM_TYPES, TXO_TYPES
|
from .constants import COIN, TXO_TYPES
|
||||||
from .transaction import Transaction, Input, Output
|
from .transaction import Transaction, Input, Output
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
|
@ -35,6 +33,49 @@ def validate_claim_id(claim_id):
|
||||||
raise Exception("Claim id is not hex encoded")
|
raise Exception("Claim id is not hex encoded")
|
||||||
|
|
||||||
|
|
||||||
|
class DeterministicChannelKeyManager:
|
||||||
|
|
||||||
|
def __init__(self, account: 'Account'):
|
||||||
|
self.account = account
|
||||||
|
self.last_known = 0
|
||||||
|
self.cache = {}
|
||||||
|
self._private_key: Optional[PrivateKey] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def private_key(self):
|
||||||
|
if self._private_key is None:
|
||||||
|
if self.account.private_key is not None:
|
||||||
|
self._private_key = self.account.private_key.child(KeyPath.CHANNEL)
|
||||||
|
return self._private_key
|
||||||
|
|
||||||
|
def maybe_generate_deterministic_key_for_channel(self, txo):
|
||||||
|
if self.private_key is None:
|
||||||
|
return
|
||||||
|
next_private_key = self.private_key.child(self.last_known)
|
||||||
|
public_key = next_private_key.public_key
|
||||||
|
public_key_bytes = public_key.pubkey_bytes
|
||||||
|
if txo.claim.channel.public_key_bytes == public_key_bytes:
|
||||||
|
self.cache[public_key.address] = next_private_key
|
||||||
|
self.last_known += 1
|
||||||
|
|
||||||
|
async def ensure_cache_primed(self):
|
||||||
|
if self.private_key is not None:
|
||||||
|
await self.generate_next_key()
|
||||||
|
|
||||||
|
async def generate_next_key(self) -> PrivateKey:
|
||||||
|
db = self.account.ledger.db
|
||||||
|
while True:
|
||||||
|
next_private_key = self.private_key.child(self.last_known)
|
||||||
|
public_key = next_private_key.public_key
|
||||||
|
self.cache[public_key.address] = next_private_key
|
||||||
|
if not await db.is_channel_key_used(self.account, public_key):
|
||||||
|
return next_private_key
|
||||||
|
self.last_known += 1
|
||||||
|
|
||||||
|
def get_private_key_from_pubkey_hash(self, pubkey_hash) -> PrivateKey:
|
||||||
|
return self.cache.get(pubkey_hash)
|
||||||
|
|
||||||
|
|
||||||
class AddressManager:
|
class AddressManager:
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
|
@ -80,7 +121,7 @@ class AddressManager:
|
||||||
def get_private_key(self, index: int) -> PrivateKey:
|
def get_private_key(self, index: int) -> PrivateKey:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def get_public_key(self, index: int) -> PubKey:
|
def get_public_key(self, index: int) -> PublicKey:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
async def get_max_gap(self):
|
async def get_max_gap(self):
|
||||||
|
@ -97,7 +138,8 @@ class AddressManager:
|
||||||
return [r['address'] for r in records]
|
return [r['address'] for r in records]
|
||||||
|
|
||||||
async def get_or_create_usable_address(self) -> str:
|
async def get_or_create_usable_address(self) -> str:
|
||||||
addresses = await self.get_addresses(only_usable=True, limit=10)
|
async with self.address_generator_lock:
|
||||||
|
addresses = await self.get_addresses(only_usable=True, limit=10)
|
||||||
if addresses:
|
if addresses:
|
||||||
return random.choice(addresses)
|
return random.choice(addresses)
|
||||||
addresses = await self.ensure_address_gap()
|
addresses = await self.ensure_address_gap()
|
||||||
|
@ -119,8 +161,8 @@ class HierarchicalDeterministic(AddressManager):
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_dict(cls, account: 'Account', d: dict) -> Tuple[AddressManager, AddressManager]:
|
def from_dict(cls, account: 'Account', d: dict) -> Tuple[AddressManager, AddressManager]:
|
||||||
return (
|
return (
|
||||||
cls(account, 0, **d.get('receiving', {'gap': 20, 'maximum_uses_per_address': 1})),
|
cls(account, KeyPath.RECEIVE, **d.get('receiving', {'gap': 20, 'maximum_uses_per_address': 1})),
|
||||||
cls(account, 1, **d.get('change', {'gap': 6, 'maximum_uses_per_address': 1}))
|
cls(account, KeyPath.CHANGE, **d.get('change', {'gap': 6, 'maximum_uses_per_address': 1}))
|
||||||
)
|
)
|
||||||
|
|
||||||
def merge(self, d: dict):
|
def merge(self, d: dict):
|
||||||
|
@ -133,7 +175,7 @@ class HierarchicalDeterministic(AddressManager):
|
||||||
def get_private_key(self, index: int) -> PrivateKey:
|
def get_private_key(self, index: int) -> PrivateKey:
|
||||||
return self.account.private_key.child(self.chain_number).child(index)
|
return self.account.private_key.child(self.chain_number).child(index)
|
||||||
|
|
||||||
def get_public_key(self, index: int) -> PubKey:
|
def get_public_key(self, index: int) -> PublicKey:
|
||||||
return self.account.public_key.child(self.chain_number).child(index)
|
return self.account.public_key.child(self.chain_number).child(index)
|
||||||
|
|
||||||
async def get_max_gap(self) -> int:
|
async def get_max_gap(self) -> int:
|
||||||
|
@ -193,7 +235,7 @@ class SingleKey(AddressManager):
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_dict(cls, account: 'Account', d: dict) \
|
def from_dict(cls, account: 'Account', d: dict) \
|
||||||
-> Tuple[AddressManager, AddressManager]:
|
-> Tuple[AddressManager, AddressManager]:
|
||||||
same_address_manager = cls(account, account.public_key, 0)
|
same_address_manager = cls(account, account.public_key, KeyPath.RECEIVE)
|
||||||
return same_address_manager, same_address_manager
|
return same_address_manager, same_address_manager
|
||||||
|
|
||||||
def to_dict_instance(self):
|
def to_dict_instance(self):
|
||||||
|
@ -202,7 +244,7 @@ class SingleKey(AddressManager):
|
||||||
def get_private_key(self, index: int) -> PrivateKey:
|
def get_private_key(self, index: int) -> PrivateKey:
|
||||||
return self.account.private_key
|
return self.account.private_key
|
||||||
|
|
||||||
def get_public_key(self, index: int) -> PubKey:
|
def get_public_key(self, index: int) -> PublicKey:
|
||||||
return self.account.public_key
|
return self.account.public_key
|
||||||
|
|
||||||
async def get_max_gap(self) -> int:
|
async def get_max_gap(self) -> int:
|
||||||
|
@ -224,9 +266,6 @@ class SingleKey(AddressManager):
|
||||||
|
|
||||||
class Account:
|
class Account:
|
||||||
|
|
||||||
mnemonic_class = Mnemonic
|
|
||||||
private_key_class = PrivateKey
|
|
||||||
public_key_class = PubKey
|
|
||||||
address_generators: Dict[str, Type[AddressManager]] = {
|
address_generators: Dict[str, Type[AddressManager]] = {
|
||||||
SingleKey.name: SingleKey,
|
SingleKey.name: SingleKey,
|
||||||
HierarchicalDeterministic.name: HierarchicalDeterministic,
|
HierarchicalDeterministic.name: HierarchicalDeterministic,
|
||||||
|
@ -234,7 +273,7 @@ class Account:
|
||||||
|
|
||||||
def __init__(self, ledger: 'Ledger', wallet: 'Wallet', name: str,
|
def __init__(self, ledger: 'Ledger', wallet: 'Wallet', name: str,
|
||||||
seed: str, private_key_string: str, encrypted: bool,
|
seed: str, private_key_string: str, encrypted: bool,
|
||||||
private_key: Optional[PrivateKey], public_key: PubKey,
|
private_key: Optional[PrivateKey], public_key: PublicKey,
|
||||||
address_generator: dict, modified_on: float, channel_keys: dict) -> None:
|
address_generator: dict, modified_on: float, channel_keys: dict) -> None:
|
||||||
self.ledger = ledger
|
self.ledger = ledger
|
||||||
self.wallet = wallet
|
self.wallet = wallet
|
||||||
|
@ -245,13 +284,14 @@ class Account:
|
||||||
self.private_key_string = private_key_string
|
self.private_key_string = private_key_string
|
||||||
self.init_vectors: Dict[str, bytes] = {}
|
self.init_vectors: Dict[str, bytes] = {}
|
||||||
self.encrypted = encrypted
|
self.encrypted = encrypted
|
||||||
self.private_key = private_key
|
self.private_key: Optional[PrivateKey] = private_key
|
||||||
self.public_key = public_key
|
self.public_key: PublicKey = public_key
|
||||||
generator_name = address_generator.get('name', HierarchicalDeterministic.name)
|
generator_name = address_generator.get('name', HierarchicalDeterministic.name)
|
||||||
self.address_generator = self.address_generators[generator_name]
|
self.address_generator = self.address_generators[generator_name]
|
||||||
self.receiving, self.change = self.address_generator.from_dict(self, address_generator)
|
self.receiving, self.change = self.address_generator.from_dict(self, address_generator)
|
||||||
self.address_managers = {am.chain_number: am for am in {self.receiving, self.change}}
|
self.address_managers = {am.chain_number: am for am in (self.receiving, self.change)}
|
||||||
self.channel_keys = channel_keys
|
self.channel_keys = channel_keys
|
||||||
|
self.deterministic_channel_keys = DeterministicChannelKeyManager(self)
|
||||||
ledger.add_account(self)
|
ledger.add_account(self)
|
||||||
wallet.add_account(self)
|
wallet.add_account(self)
|
||||||
|
|
||||||
|
@ -266,19 +306,19 @@ class Account:
|
||||||
name: str = None, address_generator: dict = None):
|
name: str = None, address_generator: dict = None):
|
||||||
return cls.from_dict(ledger, wallet, {
|
return cls.from_dict(ledger, wallet, {
|
||||||
'name': name,
|
'name': name,
|
||||||
'seed': cls.mnemonic_class().make_seed(),
|
'seed': Mnemonic().make_seed(),
|
||||||
'address_generator': address_generator or {}
|
'address_generator': address_generator or {}
|
||||||
})
|
})
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_private_key_from_seed(cls, ledger: 'Ledger', seed: str, password: str):
|
def get_private_key_from_seed(cls, ledger: 'Ledger', seed: str, password: str):
|
||||||
return cls.private_key_class.from_seed(
|
return PrivateKey.from_seed(
|
||||||
ledger, cls.mnemonic_class.mnemonic_to_seed(seed, password or 'lbryum')
|
ledger, Mnemonic.mnemonic_to_seed(seed, password or 'lbryum')
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def keys_from_dict(cls, ledger: 'Ledger', d: dict) \
|
def keys_from_dict(cls, ledger: 'Ledger', d: dict) \
|
||||||
-> Tuple[str, Optional[PrivateKey], PubKey]:
|
-> Tuple[str, Optional[PrivateKey], PublicKey]:
|
||||||
seed = d.get('seed', '')
|
seed = d.get('seed', '')
|
||||||
private_key_string = d.get('private_key', '')
|
private_key_string = d.get('private_key', '')
|
||||||
private_key = None
|
private_key = None
|
||||||
|
@ -311,7 +351,7 @@ class Account:
|
||||||
private_key=private_key,
|
private_key=private_key,
|
||||||
public_key=public_key,
|
public_key=public_key,
|
||||||
address_generator=d.get('address_generator', {}),
|
address_generator=d.get('address_generator', {}),
|
||||||
modified_on=d.get('modified_on', int(time.time())),
|
modified_on=int(d.get('modified_on', time.time())),
|
||||||
channel_keys=d.get('certificates', {})
|
channel_keys=d.get('certificates', {})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -343,7 +383,7 @@ class Account:
|
||||||
def merge(self, d: dict):
|
def merge(self, d: dict):
|
||||||
if d.get('modified_on', 0) > self.modified_on:
|
if d.get('modified_on', 0) > self.modified_on:
|
||||||
self.name = d['name']
|
self.name = d['name']
|
||||||
self.modified_on = d.get('modified_on', int(time.time()))
|
self.modified_on = int(d.get('modified_on', time.time()))
|
||||||
assert self.address_generator.name == d['address_generator']['name']
|
assert self.address_generator.name == d['address_generator']['name']
|
||||||
for chain_name in ('change', 'receiving'):
|
for chain_name in ('change', 'receiving'):
|
||||||
if chain_name in d['address_generator']:
|
if chain_name in d['address_generator']:
|
||||||
|
@ -449,7 +489,7 @@ class Account:
|
||||||
assert not self.encrypted, "Cannot get private key on encrypted wallet account."
|
assert not self.encrypted, "Cannot get private key on encrypted wallet account."
|
||||||
return self.address_managers[chain].get_private_key(index)
|
return self.address_managers[chain].get_private_key(index)
|
||||||
|
|
||||||
def get_public_key(self, chain: int, index: int) -> PubKey:
|
def get_public_key(self, chain: int, index: int) -> PublicKey:
|
||||||
return self.address_managers[chain].get_public_key(index)
|
return self.address_managers[chain].get_public_key(index)
|
||||||
|
|
||||||
def get_balance(self, confirmations=0, include_claims=False, read_only=False, **constraints):
|
def get_balance(self, confirmations=0, include_claims=False, read_only=False, **constraints):
|
||||||
|
@ -520,33 +560,30 @@ class Account:
|
||||||
|
|
||||||
return tx
|
return tx
|
||||||
|
|
||||||
def add_channel_private_key(self, private_key):
|
async def generate_channel_private_key(self):
|
||||||
public_key_bytes = private_key.get_verifying_key().to_der()
|
return await self.deterministic_channel_keys.generate_next_key()
|
||||||
channel_pubkey_hash = self.ledger.public_key_to_address(public_key_bytes)
|
|
||||||
self.channel_keys[channel_pubkey_hash] = private_key.to_pem().decode()
|
|
||||||
|
|
||||||
async def get_channel_private_key(self, public_key_bytes):
|
def add_channel_private_key(self, private_key: PrivateKey):
|
||||||
|
self.channel_keys[private_key.address] = private_key.to_pem().decode()
|
||||||
|
|
||||||
|
async def get_channel_private_key(self, public_key_bytes) -> PrivateKey:
|
||||||
channel_pubkey_hash = self.ledger.public_key_to_address(public_key_bytes)
|
channel_pubkey_hash = self.ledger.public_key_to_address(public_key_bytes)
|
||||||
private_key_pem = self.channel_keys.get(channel_pubkey_hash)
|
private_key_pem = self.channel_keys.get(channel_pubkey_hash)
|
||||||
if private_key_pem:
|
if private_key_pem:
|
||||||
return await asyncio.get_event_loop().run_in_executor(
|
return PrivateKey.from_pem(self.ledger, private_key_pem)
|
||||||
None, ecdsa.SigningKey.from_pem, private_key_pem, sha256
|
return self.deterministic_channel_keys.get_private_key_from_pubkey_hash(channel_pubkey_hash)
|
||||||
)
|
|
||||||
|
|
||||||
async def maybe_migrate_certificates(self):
|
async def maybe_migrate_certificates(self):
|
||||||
def to_der(private_key_pem):
|
|
||||||
return ecdsa.SigningKey.from_pem(private_key_pem, hashfunc=sha256).get_verifying_key().to_der()
|
|
||||||
|
|
||||||
if not self.channel_keys:
|
if not self.channel_keys:
|
||||||
return
|
return
|
||||||
channel_keys = {}
|
channel_keys = {}
|
||||||
for private_key_pem in self.channel_keys.values():
|
for private_key_pem in self.channel_keys.values():
|
||||||
if not isinstance(private_key_pem, str):
|
if not isinstance(private_key_pem, str):
|
||||||
continue
|
continue
|
||||||
if "-----BEGIN EC PRIVATE KEY-----" not in private_key_pem:
|
if not private_key_pem.startswith("-----BEGIN"):
|
||||||
continue
|
continue
|
||||||
public_key_der = await asyncio.get_event_loop().run_in_executor(None, to_der, private_key_pem)
|
private_key = PrivateKey.from_pem(self.ledger, private_key_pem)
|
||||||
channel_keys[self.ledger.public_key_to_address(public_key_der)] = private_key_pem
|
channel_keys[private_key.address] = private_key_pem
|
||||||
if self.channel_keys != channel_keys:
|
if self.channel_keys != channel_keys:
|
||||||
self.channel_keys = channel_keys
|
self.channel_keys = channel_keys
|
||||||
self.wallet.save()
|
self.wallet.save()
|
||||||
|
@ -566,35 +603,14 @@ class Account:
|
||||||
if gap_changed:
|
if gap_changed:
|
||||||
self.wallet.save()
|
self.wallet.save()
|
||||||
|
|
||||||
async def get_detailed_balance(self, confirmations=0, reserved_subtotals=False, read_only=False):
|
async def get_detailed_balance(self, confirmations=0, read_only=False):
|
||||||
tips_balance, supports_balance, claims_balance = 0, 0, 0
|
constraints = {}
|
||||||
get_total_balance = partial(self.get_balance, read_only=read_only, confirmations=confirmations,
|
if confirmations > 0:
|
||||||
include_claims=True)
|
height = self.ledger.headers.height - (confirmations-1)
|
||||||
total = await get_total_balance()
|
constraints.update({'height__lte': height, 'height__gt': 0})
|
||||||
if reserved_subtotals:
|
return await self.ledger.db.get_detailed_balance(
|
||||||
claims_balance = await get_total_balance(txo_type__in=CLAIM_TYPES)
|
accounts=[self], read_only=read_only, **constraints
|
||||||
for txo in await self.get_support_summary():
|
)
|
||||||
if confirmations > 0 and not 0 < txo.tx_ref.height <= self.ledger.headers.height - (confirmations - 1):
|
|
||||||
continue
|
|
||||||
if txo.is_my_input:
|
|
||||||
supports_balance += txo.amount
|
|
||||||
else:
|
|
||||||
tips_balance += txo.amount
|
|
||||||
reserved = claims_balance + supports_balance + tips_balance
|
|
||||||
else:
|
|
||||||
reserved = await self.get_balance(
|
|
||||||
confirmations=confirmations, include_claims=True, txo_type__gt=0
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
'total': total,
|
|
||||||
'available': total - reserved,
|
|
||||||
'reserved': reserved,
|
|
||||||
'reserved_subtotals': {
|
|
||||||
'claims': claims_balance,
|
|
||||||
'supports': supports_balance,
|
|
||||||
'tips': tips_balance
|
|
||||||
} if reserved_subtotals else None
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_transaction_history(self, read_only=False, **constraints):
|
def get_transaction_history(self, read_only=False, **constraints):
|
||||||
return self.ledger.get_transaction_history(
|
return self.ledger.get_transaction_history(
|
||||||
|
|
|
@ -1,10 +1,21 @@
|
||||||
from coincurve import PublicKey, PrivateKey as _PrivateKey
|
from asn1crypto.keys import PrivateKeyInfo, ECPrivateKey
|
||||||
|
from coincurve import PublicKey as cPublicKey, PrivateKey as cPrivateKey
|
||||||
|
from coincurve.utils import (
|
||||||
|
pem_to_der, lib as libsecp256k1, ffi as libsecp256k1_ffi
|
||||||
|
)
|
||||||
|
from coincurve.ecdsa import CDATA_SIG_LENGTH
|
||||||
|
|
||||||
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
|
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
|
||||||
from lbry.crypto.base58 import Base58
|
from lbry.crypto.base58 import Base58
|
||||||
from .util import cachedproperty
|
from .util import cachedproperty
|
||||||
|
|
||||||
|
|
||||||
|
class KeyPath:
|
||||||
|
RECEIVE = 0
|
||||||
|
CHANGE = 1
|
||||||
|
CHANNEL = 2
|
||||||
|
|
||||||
|
|
||||||
class DerivationError(Exception):
|
class DerivationError(Exception):
|
||||||
""" Raised when an invalid derivation occurs. """
|
""" Raised when an invalid derivation occurs. """
|
||||||
|
|
||||||
|
@ -46,9 +57,11 @@ class _KeyBase:
|
||||||
if len(raw_serkey) != 33:
|
if len(raw_serkey) != 33:
|
||||||
raise ValueError('raw_serkey must have length 33')
|
raise ValueError('raw_serkey must have length 33')
|
||||||
|
|
||||||
return (ver_bytes + bytes((self.depth,))
|
return (
|
||||||
+ self.parent_fingerprint() + self.n.to_bytes(4, 'big')
|
ver_bytes + bytes((self.depth,))
|
||||||
+ self.chain_code + raw_serkey)
|
+ self.parent_fingerprint() + self.n.to_bytes(4, 'big')
|
||||||
|
+ self.chain_code + raw_serkey
|
||||||
|
)
|
||||||
|
|
||||||
def identifier(self):
|
def identifier(self):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
@ -69,26 +82,30 @@ class _KeyBase:
|
||||||
return Base58.encode_check(self.extended_key())
|
return Base58.encode_check(self.extended_key())
|
||||||
|
|
||||||
|
|
||||||
class PubKey(_KeyBase):
|
class PublicKey(_KeyBase):
|
||||||
""" A BIP32 public key. """
|
""" A BIP32 public key. """
|
||||||
|
|
||||||
def __init__(self, ledger, pubkey, chain_code, n, depth, parent=None):
|
def __init__(self, ledger, pubkey, chain_code, n, depth, parent=None):
|
||||||
super().__init__(ledger, chain_code, n, depth, parent)
|
super().__init__(ledger, chain_code, n, depth, parent)
|
||||||
if isinstance(pubkey, PublicKey):
|
if isinstance(pubkey, cPublicKey):
|
||||||
self.verifying_key = pubkey
|
self.verifying_key = pubkey
|
||||||
else:
|
else:
|
||||||
self.verifying_key = self._verifying_key_from_pubkey(pubkey)
|
self.verifying_key = self._verifying_key_from_pubkey(pubkey)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_compressed(cls, public_key_bytes, ledger=None) -> 'PublicKey':
|
||||||
|
return cls(ledger, public_key_bytes, bytes((0,)*32), 0, 0)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _verifying_key_from_pubkey(cls, pubkey):
|
def _verifying_key_from_pubkey(cls, pubkey):
|
||||||
""" Converts a 33-byte compressed pubkey into an PublicKey object. """
|
""" Converts a 33-byte compressed pubkey into an coincurve.PublicKey object. """
|
||||||
if not isinstance(pubkey, (bytes, bytearray)):
|
if not isinstance(pubkey, (bytes, bytearray)):
|
||||||
raise TypeError('pubkey must be raw bytes')
|
raise TypeError('pubkey must be raw bytes')
|
||||||
if len(pubkey) != 33:
|
if len(pubkey) != 33:
|
||||||
raise ValueError('pubkey must be 33 bytes')
|
raise ValueError('pubkey must be 33 bytes')
|
||||||
if pubkey[0] not in (2, 3):
|
if pubkey[0] not in (2, 3):
|
||||||
raise ValueError('invalid pubkey prefix byte')
|
raise ValueError('invalid pubkey prefix byte')
|
||||||
return PublicKey(pubkey)
|
return cPublicKey(pubkey)
|
||||||
|
|
||||||
@cachedproperty
|
@cachedproperty
|
||||||
def pubkey_bytes(self):
|
def pubkey_bytes(self):
|
||||||
|
@ -103,7 +120,7 @@ class PubKey(_KeyBase):
|
||||||
def ec_point(self):
|
def ec_point(self):
|
||||||
return self.verifying_key.point()
|
return self.verifying_key.point()
|
||||||
|
|
||||||
def child(self, n: int):
|
def child(self, n: int) -> 'PublicKey':
|
||||||
""" Return the derived child extended pubkey at index N. """
|
""" Return the derived child extended pubkey at index N. """
|
||||||
if not 0 <= n < (1 << 31):
|
if not 0 <= n < (1 << 31):
|
||||||
raise ValueError('invalid BIP32 public key child number')
|
raise ValueError('invalid BIP32 public key child number')
|
||||||
|
@ -111,7 +128,7 @@ class PubKey(_KeyBase):
|
||||||
msg = self.pubkey_bytes + n.to_bytes(4, 'big')
|
msg = self.pubkey_bytes + n.to_bytes(4, 'big')
|
||||||
L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name
|
L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name
|
||||||
derived_key = self.verifying_key.add(L_b)
|
derived_key = self.verifying_key.add(L_b)
|
||||||
return PubKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
|
return PublicKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
|
||||||
|
|
||||||
def identifier(self):
|
def identifier(self):
|
||||||
""" Return the key's identifier as 20 bytes. """
|
""" Return the key's identifier as 20 bytes. """
|
||||||
|
@ -124,6 +141,36 @@ class PubKey(_KeyBase):
|
||||||
self.pubkey_bytes
|
self.pubkey_bytes
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def verify(self, signature, digest) -> bool:
|
||||||
|
""" Verify that a signature is valid for a 32 byte digest. """
|
||||||
|
|
||||||
|
if len(signature) != 64:
|
||||||
|
raise ValueError('Signature must be 64 bytes long.')
|
||||||
|
|
||||||
|
if len(digest) != 32:
|
||||||
|
raise ValueError('Digest must be 32 bytes long.')
|
||||||
|
|
||||||
|
key = self.verifying_key
|
||||||
|
|
||||||
|
raw_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
|
||||||
|
|
||||||
|
parsed = libsecp256k1.secp256k1_ecdsa_signature_parse_compact(
|
||||||
|
key.context.ctx, raw_signature, signature
|
||||||
|
)
|
||||||
|
assert parsed == 1
|
||||||
|
|
||||||
|
normalized_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
|
||||||
|
|
||||||
|
libsecp256k1.secp256k1_ecdsa_signature_normalize(
|
||||||
|
key.context.ctx, normalized_signature, raw_signature
|
||||||
|
)
|
||||||
|
|
||||||
|
verified = libsecp256k1.secp256k1_ecdsa_verify(
|
||||||
|
key.context.ctx, normalized_signature, digest, key.public_key
|
||||||
|
)
|
||||||
|
|
||||||
|
return bool(verified)
|
||||||
|
|
||||||
|
|
||||||
class PrivateKey(_KeyBase):
|
class PrivateKey(_KeyBase):
|
||||||
"""A BIP32 private key."""
|
"""A BIP32 private key."""
|
||||||
|
@ -132,7 +179,7 @@ class PrivateKey(_KeyBase):
|
||||||
|
|
||||||
def __init__(self, ledger, privkey, chain_code, n, depth, parent=None):
|
def __init__(self, ledger, privkey, chain_code, n, depth, parent=None):
|
||||||
super().__init__(ledger, chain_code, n, depth, parent)
|
super().__init__(ledger, chain_code, n, depth, parent)
|
||||||
if isinstance(privkey, _PrivateKey):
|
if isinstance(privkey, cPrivateKey):
|
||||||
self.signing_key = privkey
|
self.signing_key = privkey
|
||||||
else:
|
else:
|
||||||
self.signing_key = self._signing_key_from_privkey(privkey)
|
self.signing_key = self._signing_key_from_privkey(privkey)
|
||||||
|
@ -140,7 +187,7 @@ class PrivateKey(_KeyBase):
|
||||||
@classmethod
|
@classmethod
|
||||||
def _signing_key_from_privkey(cls, private_key):
|
def _signing_key_from_privkey(cls, private_key):
|
||||||
""" Converts a 32-byte private key into an coincurve.PrivateKey object. """
|
""" Converts a 32-byte private key into an coincurve.PrivateKey object. """
|
||||||
return _PrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
|
return cPrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _private_key_secret_exponent(cls, private_key):
|
def _private_key_secret_exponent(cls, private_key):
|
||||||
|
@ -152,24 +199,40 @@ class PrivateKey(_KeyBase):
|
||||||
return int.from_bytes(private_key, 'big')
|
return int.from_bytes(private_key, 'big')
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_seed(cls, ledger, seed):
|
def from_seed(cls, ledger, seed) -> 'PrivateKey':
|
||||||
# This hard-coded message string seems to be coin-independent...
|
# This hard-coded message string seems to be coin-independent...
|
||||||
hmac = hmac_sha512(b'Bitcoin seed', seed)
|
hmac = hmac_sha512(b'Bitcoin seed', seed)
|
||||||
privkey, chain_code = hmac[:32], hmac[32:]
|
privkey, chain_code = hmac[:32], hmac[32:]
|
||||||
return cls(ledger, privkey, chain_code, 0, 0)
|
return cls(ledger, privkey, chain_code, 0, 0)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_pem(cls, ledger, pem) -> 'PrivateKey':
|
||||||
|
der = pem_to_der(pem.encode())
|
||||||
|
try:
|
||||||
|
key_int = ECPrivateKey.load(der).native['private_key']
|
||||||
|
except ValueError:
|
||||||
|
key_int = PrivateKeyInfo.load(der).native['private_key']['private_key']
|
||||||
|
private_key = cPrivateKey.from_int(key_int)
|
||||||
|
return cls(ledger, private_key, bytes((0,)*32), 0, 0)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_bytes(cls, ledger, key_bytes) -> 'PrivateKey':
|
||||||
|
return cls(ledger, cPrivateKey(key_bytes), bytes((0,)*32), 0, 0)
|
||||||
|
|
||||||
@cachedproperty
|
@cachedproperty
|
||||||
def private_key_bytes(self):
|
def private_key_bytes(self):
|
||||||
""" Return the serialized private key (no leading zero byte). """
|
""" Return the serialized private key (no leading zero byte). """
|
||||||
return self.signing_key.secret
|
return self.signing_key.secret
|
||||||
|
|
||||||
@cachedproperty
|
@cachedproperty
|
||||||
def public_key(self):
|
def public_key(self) -> PublicKey:
|
||||||
""" Return the corresponding extended public key. """
|
""" Return the corresponding extended public key. """
|
||||||
verifying_key = self.signing_key.public_key
|
verifying_key = self.signing_key.public_key
|
||||||
parent_pubkey = self.parent.public_key if self.parent else None
|
parent_pubkey = self.parent.public_key if self.parent else None
|
||||||
return PubKey(self.ledger, verifying_key, self.chain_code, self.n, self.depth,
|
return PublicKey(
|
||||||
parent_pubkey)
|
self.ledger, verifying_key, self.chain_code,
|
||||||
|
self.n, self.depth, parent_pubkey
|
||||||
|
)
|
||||||
|
|
||||||
def ec_point(self):
|
def ec_point(self):
|
||||||
return self.public_key.ec_point()
|
return self.public_key.ec_point()
|
||||||
|
@ -182,11 +245,12 @@ class PrivateKey(_KeyBase):
|
||||||
""" Return the private key encoded in Wallet Import Format. """
|
""" Return the private key encoded in Wallet Import Format. """
|
||||||
return self.ledger.private_key_to_wif(self.private_key_bytes)
|
return self.ledger.private_key_to_wif(self.private_key_bytes)
|
||||||
|
|
||||||
|
@property
|
||||||
def address(self):
|
def address(self):
|
||||||
""" The public key as a P2PKH address. """
|
""" The public key as a P2PKH address. """
|
||||||
return self.public_key.address
|
return self.public_key.address
|
||||||
|
|
||||||
def child(self, n):
|
def child(self, n) -> 'PrivateKey':
|
||||||
""" Return the derived child extended private key at index N."""
|
""" Return the derived child extended private key at index N."""
|
||||||
if not 0 <= n < (1 << 32):
|
if not 0 <= n < (1 << 32):
|
||||||
raise ValueError('invalid BIP32 private key child number')
|
raise ValueError('invalid BIP32 private key child number')
|
||||||
|
@ -205,6 +269,28 @@ class PrivateKey(_KeyBase):
|
||||||
""" Produce a signature for piece of data by double hashing it and signing the hash. """
|
""" Produce a signature for piece of data by double hashing it and signing the hash. """
|
||||||
return self.signing_key.sign(data, hasher=double_sha256)
|
return self.signing_key.sign(data, hasher=double_sha256)
|
||||||
|
|
||||||
|
def sign_compact(self, digest):
|
||||||
|
""" Produce a compact signature. """
|
||||||
|
key = self.signing_key
|
||||||
|
|
||||||
|
signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
|
||||||
|
signed = libsecp256k1.secp256k1_ecdsa_sign(
|
||||||
|
key.context.ctx, signature, digest, key.secret,
|
||||||
|
libsecp256k1_ffi.NULL, libsecp256k1_ffi.NULL
|
||||||
|
)
|
||||||
|
|
||||||
|
if not signed:
|
||||||
|
raise ValueError('The private key was invalid.')
|
||||||
|
|
||||||
|
serialized = libsecp256k1_ffi.new('unsigned char[%d]' % CDATA_SIG_LENGTH)
|
||||||
|
compacted = libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(
|
||||||
|
key.context.ctx, serialized, signature
|
||||||
|
)
|
||||||
|
if compacted != 1:
|
||||||
|
raise ValueError('The signature could not be compacted.')
|
||||||
|
|
||||||
|
return bytes(libsecp256k1_ffi.buffer(serialized, CDATA_SIG_LENGTH))
|
||||||
|
|
||||||
def identifier(self):
|
def identifier(self):
|
||||||
"""Return the key's identifier as 20 bytes."""
|
"""Return the key's identifier as 20 bytes."""
|
||||||
return self.public_key.identifier()
|
return self.public_key.identifier()
|
||||||
|
@ -216,9 +302,12 @@ class PrivateKey(_KeyBase):
|
||||||
b'\0' + self.private_key_bytes
|
b'\0' + self.private_key_bytes
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def to_pem(self):
|
||||||
|
return self.signing_key.to_pem()
|
||||||
|
|
||||||
|
|
||||||
def _from_extended_key(ledger, ekey):
|
def _from_extended_key(ledger, ekey):
|
||||||
"""Return a PubKey or PrivateKey from an extended key raw bytes."""
|
"""Return a PublicKey or PrivateKey from an extended key raw bytes."""
|
||||||
if not isinstance(ekey, (bytes, bytearray)):
|
if not isinstance(ekey, (bytes, bytearray)):
|
||||||
raise TypeError('extended key must be raw bytes')
|
raise TypeError('extended key must be raw bytes')
|
||||||
if len(ekey) != 78:
|
if len(ekey) != 78:
|
||||||
|
@ -230,7 +319,7 @@ def _from_extended_key(ledger, ekey):
|
||||||
|
|
||||||
if ekey[:4] == ledger.extended_public_key_prefix:
|
if ekey[:4] == ledger.extended_public_key_prefix:
|
||||||
pubkey = ekey[45:]
|
pubkey = ekey[45:]
|
||||||
key = PubKey(ledger, pubkey, chain_code, n, depth)
|
key = PublicKey(ledger, pubkey, chain_code, n, depth)
|
||||||
elif ekey[:4] == ledger.extended_private_key_prefix:
|
elif ekey[:4] == ledger.extended_private_key_prefix:
|
||||||
if ekey[45] != 0:
|
if ekey[45] != 0:
|
||||||
raise ValueError('invalid extended private key prefix byte')
|
raise ValueError('invalid extended private key prefix byte')
|
||||||
|
@ -248,6 +337,6 @@ def from_extended_key_string(ledger, ekey_str):
|
||||||
xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd
|
xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd
|
||||||
3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL
|
3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL
|
||||||
|
|
||||||
return a PubKey or PrivateKey.
|
return a PublicKey or PrivateKey.
|
||||||
"""
|
"""
|
||||||
return _from_extended_key(ledger, Base58.decode_check(ekey_str))
|
return _from_extended_key(ledger, Base58.decode_check(ekey_str))
|
||||||
|
|
|
@ -740,4 +740,506 @@ HASHES = {
|
||||||
738000: 'aebdf15b23eb7a37600f67d45bf6586b1d5bff3d5f3459adc2f6211ab3dd0bcb',
|
738000: 'aebdf15b23eb7a37600f67d45bf6586b1d5bff3d5f3459adc2f6211ab3dd0bcb',
|
||||||
739000: '3f5a894ac42f95f7d54ce25c42ea0baf1a05b2da0e9406978de0dc53484d8b04',
|
739000: '3f5a894ac42f95f7d54ce25c42ea0baf1a05b2da0e9406978de0dc53484d8b04',
|
||||||
740000: '55debc22f995d844eafa0a90296c9f4f433e2b7f38456fff45dd3c66cef04e37',
|
740000: '55debc22f995d844eafa0a90296c9f4f433e2b7f38456fff45dd3c66cef04e37',
|
||||||
|
741000: '927b47fc909b4b55c067bbd75d8638af1400fac076cb642e9500a747d849e458',
|
||||||
|
742000: '97fa3d83eb94114496e418c118f549ebfb8f6d123d0b40a12ecb093239557646',
|
||||||
|
743000: '482b66d8d5084703079c28e3ae69e5dee735f762d6fcf9743e75f04e139fd181',
|
||||||
|
744000: 'f406890d5c70808a58fb14429bad812a3185bdb9dace1aa57de76663f92b5013',
|
||||||
|
745000: '2bd0802cbb8aa4441a159104d39515a4ff6fc8dfe616bc83e88197847c78bcff',
|
||||||
|
746000: '24d090a7b6359db3d5d714a69ddc9a6f2e8ff8f044b723220a8ba32df785fd54',
|
||||||
|
747000: '07c4ce9ce5310ee472cf753ddb03c39c5fee6c910d491daffd38615205411633',
|
||||||
|
748000: 'ea913798c0f09d0a27eae7c852954c2c88b8c3b7f23f8fba26b68a3952d0ffde',
|
||||||
|
749000: '23f256adebfe35d49ba84ad49f3f71fc67f7745091c91f22e65f1cc2e23b8f2c',
|
||||||
|
750000: '96db12ee3a295f3d5c56d244e6e7493f58c08d3427e379940e5d4f891a41ec26',
|
||||||
|
751000: 'cedaf12415dac1314942e58ced80830b92fbfabc41f42a0b0f054f0672ef9822',
|
||||||
|
752000: '293606bcd9fbbee5584724301b2cf86bb69204820023e1fb46c238ddfbc660ab',
|
||||||
|
753000: 'f4d43cbb38b7d97919dedc0f5a6dc8007896c4f443b76f3e5693e25bc46760cf',
|
||||||
|
754000: 'fcaad22fd815311280fe451086516375d1d9d92b2990c7c351407df5aa19011e',
|
||||||
|
755000: 'b9276f10d1844cb5b0308766c8db960490ac34a73c4653d0a91202789a6ccb9b',
|
||||||
|
756000: '2fe5581f1110c1c8dcea46cad647551bd6bd640cb37738d863e189bd8f368347',
|
||||||
|
757000: 'b9d915f366f0b010429a52245b0fb02774157eb9fd8f66bce32dcd3acc71c2a1',
|
||||||
|
758000: '62d1854fc15db56b5d0e05ceeb54c1297966bf9dc7f7a0a14b42c059fc485d1b',
|
||||||
|
759000: 'f4ca9f69d16d092f4a0ea5102e6343b21204c4ea9cd9b22cddd77dbb5d68ade3',
|
||||||
|
760000: 'df3bb86641330d8cc7f55a2fd0da28251219e95babe960a308b18e08a7d88fc8',
|
||||||
|
761000: 'a93029475de4bc7569b6ae802d658cd91c84cc253772712a279f140a6c3b91b1',
|
||||||
|
762000: '307e289dc6ec8bcd62ca8831e4159d5edd780f2fae55ba55dd446225450f46f8',
|
||||||
|
763000: '293f73514abca24f374473bd0394179812952a04ea13dc60ef5ada5331fa274f',
|
||||||
|
764000: 'dd8b082db9281e3d9bacf15d6b352fda186d2d2923c7731844d0d4764dd71db8',
|
||||||
|
765000: '201239e562d2571bf47347b3522fff89632aecea3b2d8cef05151f88b2b0bcdb',
|
||||||
|
766000: '4a55a538b51b5650979e64521998cd5c5ad055ba9f3ac0e3e2a28febc6cc2798',
|
||||||
|
767000: '3916666f2adbb05ea98ec1961f9546b9afa0f6910ec95e42ce37267f2ae4f79c',
|
||||||
|
768000: 'dc0ad881eedcb5fd4954238f462080d6e7636b058d481698ed1c077e0ce2207e',
|
||||||
|
769000: 'eaf10a1e1ec6e129289b8479a05df03e0808f1f0946f1995de6524e9ebe7a461',
|
||||||
|
770000: '7200c64f22e32de7f999583361c933680fc9a2ffcb9a5ab73d3076fd49ec7537',
|
||||||
|
771000: 'd883111a2eeacff80ce31df35ab6c943805b9e48877b413fccf371e5dbfa7fb2',
|
||||||
|
772000: '3977d3c60edb9c80c97bb2b759b1659cbb650ad2d3a6f61d2caec83f1b2ae84c',
|
||||||
|
773000: '9c7175fb8646a1a82383b4c534fd01bcf92d65c43d87ae854d51a784b04dc77e',
|
||||||
|
774000: 'e0e92485f86e5fffa87b3497424e43b02a37710517d9d3f272392e8cdc56e5e9',
|
||||||
|
775000: '6395229113d3aa2105afbaeb8b59621a536fc61fe272314b2fc3bdda98dd66cc',
|
||||||
|
776000: 'b4b00207328b5f032bd4f0b634f91323ff520ada8c8bfec241b23c8e4bfd5a4e',
|
||||||
|
777000: '14cdc6f5f7b4bd5bad745dfe6fcd114e9194026412a2e1b3f345be2eef433d16',
|
||||||
|
778000: 'd3cd7b68be504c32117b670d38d59d44b02dcf3d65811efc2ca5531d902623cc',
|
||||||
|
779000: 'afcd220e4040cb5f92d4b38fc204e59822df2218f767f2c4b33597b238a35f77',
|
||||||
|
780000: '78252a9cfc289a70192ed8dd3dddeb1b9a4f9b8eff9a5d0ac259b3254472cf68',
|
||||||
|
781000: '02ebc3f17d947481a311b4771c254f1e002b6a9198d4a5258ce6c13165aadddc',
|
||||||
|
782000: '8dd9f1f372ee6d688a0bcdc3b342c77804ba5a646a218be4bc2aa02d846206c0',
|
||||||
|
783000: 'e46b0d02ec2ef488fae455665e107520e1bd2b4f35ca52af7ad8addd2f72fa73',
|
||||||
|
784000: '9ee8a8de94231e3ae3a610b82fdbca48dc14d9b80791d20af6c365a31822df6f',
|
||||||
|
785000: '21e1cc12def8173a50158b2833bd91a62140c61646f5e08aecaee3e6da20735e',
|
||||||
|
786000: 'b3e659f84d73de42888cc0f2b69bae71dd5fa6756a437a4b21958b182faa316e',
|
||||||
|
787000: 'a9be7ba00ea6a9ea6bd03d8412ec014ca7e8cda6bdc33382f165e702811b8836',
|
||||||
|
788000: 'a4c14729f8a68c03f5a0ccd890ac6a92b39c143f1f752fe81ad051eb52d8dce0',
|
||||||
|
789000: '5cf66d224e5645097efc9c3c0392b51c8ca8ea1295151921a7912a2f04ee1274',
|
||||||
|
790000: '676769ade71c33bc102bce416e66eb2c6794b03d7b8f5a590c87c380da463775',
|
||||||
|
791000: '0228e074451797bf6bfbc941bcafcbadc972d32e4e1e0c5da015513f65714217',
|
||||||
|
792000: '0fa3d00a1f19c5ac060e10a410cf7cea18eac5f89018d79ce51ac3fc66bbb365',
|
||||||
|
793000: '5f68d0868b424e32f5ce3d8e7d9f18979da7b831b8ef4e3974d62fb20ff53a97',
|
||||||
|
794000: '34508c56423739c00a837801b654b07decb274d02b383eff396d23c4d64bc0e9',
|
||||||
|
795000: '7f70910c855d1fd88cd7f9be8a3b94314ee408a31a2da6301404bf8deb07c12c',
|
||||||
|
796000: 'b74ab8813b1d2a0967fea0e66597572e5f0b5a285e21f5150fcc9d5f757de130',
|
||||||
|
797000: 'bba27b1491d907ab1baa456cb651dc5b071231b1b6ad27b62d351ca12c25dbfd',
|
||||||
|
798000: 'e75dcb15b2fc91f02e75e600dde9f6f46c09672533bc82a5d6916c4a2cd8613a',
|
||||||
|
799000: 'adf62c826a3e0b33af439a7881918ae4ce19c5fb2ca37d21243415f7d716aa65',
|
||||||
|
800000: 'd8f0ca13a8c8a19c254a3a6ba15150a34711dca96f2d877162cc44aa2acfb268',
|
||||||
|
801000: '2a8c7104c4040a2bc31913ae25e9361df5bac9477368c708f86c1ca640480887',
|
||||||
|
802000: '1f3b09d3561c4a8a056b263289bd492dc6c0d604c3fa195935e735d1c0ddc40e',
|
||||||
|
803000: '037769628c40a701fdb4b16d79084b8fbb319fde79770a7ac842f3cdc813099e',
|
||||||
|
804000: 'a0c6a089e5fa1e3589ca282085fe7201a5705776d81b257ffd252b2947fa6428',
|
||||||
|
805000: 'b2ac99bfc4a488e7b7624b31ee061991a6dd0881bb005cd13f3dd2e66a08fe19',
|
||||||
|
806000: 'ffe63cb999a278280b80a667d2dcb60c40e43a53f733914d8bec808b694ebf83',
|
||||||
|
807000: 'eddb09fc6c4869a59b520d0befb1fb6ac952333f3cc5de086539c85ea8558778',
|
||||||
|
808000: '0f4fb3f9172e52897ea992d9f3a2024126c4d2e63e9888739f11fb1f5e4c1f46',
|
||||||
|
809000: '9641dd720d23ced2f1cb6e5cf46ac4e547afb9f56263c4cf58e3b19d407cf401',
|
||||||
|
810000: 'de6dc953acd7e5ef213b3aaf1c4a9ee1d5b756bfce5525ee105214647e243a85',
|
||||||
|
811000: 'c52c83712ca12b24b2db1b4a575e7f352b1d560cbf702e121a03bdca9e8be23d',
|
||||||
|
812000: '83143734bb965318a53a38a7e403dcdb3e3fadedb01ab12c370417fc2a0655c0',
|
||||||
|
813000: 'e480deff10c5a84fc957e3aed936690e24b74dd08fa8858a8a953c2f7383b914',
|
||||||
|
814000: '810d33afcee07b9abe16c6cdc3a041038daa131c476b0daf48a080007f08b490',
|
||||||
|
815000: 'b4aeb9e16fddd27844b2d56bc2b221134039bb5642c9e9ba88372afbdeac3972',
|
||||||
|
816000: '86e73b67aae3d248011b8f66ed414cb8a9ba4b2a3cf7e32773cfbff055d719b7',
|
||||||
|
817000: '3ebb8b83752b48242016cb682f0f6bd14e15371bf1163a5933193eaa0edeb351',
|
||||||
|
818000: '4d925e17f642f220bbf317d3d5355d2f41fbce325f190f8c3b32dc0b337d24d6',
|
||||||
|
819000: 'b9cc126d620f6b99d90a00d35957b0e428aaaa7c986bc9e816a60e4334572961',
|
||||||
|
820000: '9c2f8c142bed1f94dca29276f7c83958be8cfe11773bb9b56c808fbcf7d3b1f8',
|
||||||
|
821000: 'e5509eb98895cfa12a8da5d54c1df3f52472ffcbdf707adbf84a4a9c5d356203',
|
||||||
|
822000: '764aada4802ebfe4ef935ab50af06a4f83aa556c49fdde3d9e12e1abd230c16b',
|
||||||
|
823000: '1dbd745c2e96a365d865f990d109137d32d42977f503af55d8c00b109d31d3c3',
|
||||||
|
824000: '954304a0b0c8f549c3bffd5ff46b5b8f05b0f0fde2a36f24fd5af9d774fb3079',
|
||||||
|
825000: '17808b14f2056c1a5d46cb7617e9de9be6a1a6084edbc1bdb778586467a72297',
|
||||||
|
826000: '3ca1167d4cac8b187829b23001b438617c43704b42462c4eb001b0d434cb9651',
|
||||||
|
827000: '246d1607245e4a202f420393ac2e30e9cbf5eb5570dc997073b897f6d8643023',
|
||||||
|
828000: '1764730a8dc3e89d02d168ff6bb54e8c903820b74711af6ff27bd0c8545577e7',
|
||||||
|
829000: 'd9f3ab0cd823c6305bd8b95a96188bb4f2ca90b4d66c5d12293e8b6192bac0f2',
|
||||||
|
830000: 'd4ff51f0092b04aedf8d39937680d8e8309b1be21d36e7833ed36f8e30aad6ea',
|
||||||
|
831000: '3e92e76721b962396dce52993fa7606552f0907b38f7b2bd7b21ada98c145f47',
|
||||||
|
832000: 'df12fcdb4cbe53ba627ace6de898298de175f8671d3d90170732d110fcdc34b8',
|
||||||
|
833000: '25167ff38ae4a5964b618cabe0a12d4de62ac7a4c47448cdb4499e09e108d5b9',
|
||||||
|
834000: 'd31f5309ea179a1e386e835fc372e47dcda6871a3a239abfba50c4f368994f13',
|
||||||
|
835000: 'aff7e8dd3e55ea807fcbe284014075f420b3a23f1b0eb47bacdc1c91d2899813',
|
||||||
|
836000: '3b5ac6d64c470739bb17d1544a285affb40f2d33e92687e5ba7c5ac602e0d72a',
|
||||||
|
837000: 'd5619cbfe4f27c55f2bf9351b4891636cf64fef88212a5eeeae7bd3de47fe0bd',
|
||||||
|
838000: '1f9102a49c6ac470cb5d0050e5300b1443840d6d65719b835e3bea484aafb2ec',
|
||||||
|
839000: '3f63e391f0fbc5787fbe4ace3bada3816261294ea1c6ee435001801023682f90',
|
||||||
|
840000: '777894fd12bd0d6dee7bcde2995c68e55e7094e3122da38571e4b6c4304b75e0',
|
||||||
|
841000: 'ceb0c598c788e25e43e25aa4beff5c7377035824844cf1675eaea537074df028',
|
||||||
|
842000: '8661cf2065dc713d2ba043f0b81f0effcc940eeb3e91906a21ff22c210561dcd',
|
||||||
|
843000: '0dc2766f90415009d0c86bedffee6ebcf58042eb08262c0c67c4e9ed86b2aec8',
|
||||||
|
844000: '26d072da864cab268a12794977b04ec44fb69ef3978e2342e82225974dac54dd',
|
||||||
|
845000: '95e93bb60be8d5f07a1f4d26290c914957a82fc9d26ae8a3f20082eda27406ff',
|
||||||
|
846000: 'f1bdc39af7705e58ab8b6c31dc70dce1e115db1cfd8cc9b037949dfbec82a59a',
|
||||||
|
847000: 'f5f10f06396ecf2765d8a081141d489737c1d8d57c281f28f57c4cb2f90db883',
|
||||||
|
848000: '331b8ef08605bae8d749893af9ed54f0df4f07a5a002108a2a0aea82d0360979',
|
||||||
|
849000: '75b5f6233ab9a1bbc3c8b2893e5b22a0aa98e7ea635261255dc3c281f67d2260',
|
||||||
|
850000: '5d7e6fe83e0ea1910a54a00090704737671d6f44df4228e21440ad1fc15e595f',
|
||||||
|
851000: '7822db25d3ff0f6695ee38bad91edf317b5c6611673d28f1d22053110bb558be',
|
||||||
|
852000: '2f0effad83a3561fc1a2806a562786a641d9ddb18d16bb9308006e7d324a21e9',
|
||||||
|
853000: 'f603b2eaff11d5296377d990651317d40a1b2599ad2c5250eab131090f4b9458',
|
||||||
|
854000: '34d59b26a50f18a9f250736d0f2e69d28b7e196fbef9b8a26c6b0b75c16aa194',
|
||||||
|
855000: '76dd1ffff3946c0878969886fcf177ce5ab5560df19ddf006f9bcb02ae3e4e4f',
|
||||||
|
856000: '74ff0b6f64e9dd5802fec2aac1d3ae194d28b9264114adaf0a882b46c8c918fe',
|
||||||
|
857000: '7b5badfa2e4f40aa597a504d7ebe83c3705a2c6169a8c168ce293db223bc2d32',
|
||||||
|
858000: '2bb0767a0f72b20d45ecfc3e34517dbda16d85758e040cf0e147f4cbd0cc57ac',
|
||||||
|
859000: '3d741b9c365a91ed76f85824b94d19ec19b608d232660840ba59c7aa4b2cb67f',
|
||||||
|
860000: 'd481a5a117878c0e3acd1f5844e150fb30e617577947d9846b1d214d703b71b0',
|
||||||
|
861000: '54033424e488a3f1ad6946d4a6d9acb48465d6b1dbe8e1c2504a54cc84d7cad4',
|
||||||
|
862000: '464bc3820a8cc8844dc9e26c388009e9982c656d46ef4b4fd0a2cb0e4eea0aaa',
|
||||||
|
863000: 'd1aa94be2174f66780c4f226b9da3f6712b0f37af8dec33360bea83ca261b342',
|
||||||
|
864000: '8c16008f11de5bc395d88cd802514ff647450f1bc136724b9aaf2ccce10a494f',
|
||||||
|
865000: '3dae86012e97a201e2e1a47c899001ac00f78dc108026ed7c4194858c6c6dd5a',
|
||||||
|
866000: 'afe5b0ccab995e1a1fa25fbc24c1d4b1a92c43042d03395f8743dcd806e72fd8',
|
||||||
|
867000: 'c83716ac171aa9ab0d414833db340fa30e82bfda6cc616d3038529caab9b5600',
|
||||||
|
868000: '8c409fe03cd35ef2d8e366818788b40eaeb4c8f6ae91450d75f4a66ca5f69cad',
|
||||||
|
869000: '1d47909ceba790b8e1ce2e9902ee2775ea99e58efdb95668f9803a8ccf95f286',
|
||||||
|
870000: '9adf5da1476388f053aa42de636da169d1cf1c9652cdf7cd9ad4fb18a0eb3388',
|
||||||
|
871000: '8ad57fb1e74bcba0b5614fbac003be2bb32275dd85b38f2d28a0585005a99cfc',
|
||||||
|
872000: '84a32e92012a356106e9657da8dab1a5491ea588fc29d411c69b20680c666420',
|
||||||
|
873000: 'adf5921bbbfaa43929f67e6a070975313b77b456e262c700a27be611fceb17ae',
|
||||||
|
874000: '09eaa7c4b18c79a46a2895190333f72336826223d5c986849a06f5153f49f2a5',
|
||||||
|
875000: '235d7e4f31966507312149ea4c5e294aa84c695cf840117f0ef5963be7a0bda1',
|
||||||
|
876000: '9aa9cb806ccbec0475ac330b496c5b2edeba38ba3f1e13ddd54a01457634a288',
|
||||||
|
877000: 'c1e7f9b2b20bb1c4c0deadbc786d31fdf36f262325342aa23d1a66e2846b22bc',
|
||||||
|
878000: 'ee0d2b20ac28ce23ab38698a57c6beff14f12b7af9d027c05cc92f652695f46b',
|
||||||
|
879000: '0eb0810f4b81d1845b0a88f05449408df2e45715c9210a656f45278c5fdf7956',
|
||||||
|
880000: 'e7d613027e3b4ca38d09bbef07998b57db237c6d67f1e8ea50024d2e0d9a1a72',
|
||||||
|
881000: '21af4d355d8756b8bf0369b2d79b5c824148ae069026ba5c14f9dd6b7555e1db',
|
||||||
|
882000: 'bc26f028e547ec44fc3864925bd1493211773b5cb9a9583ba4c1909b89fe0d33',
|
||||||
|
883000: '170a624f4be04cd2fd435cfb6ba1f31b9ef5d7b084a25dfa23cd118c2752029e',
|
||||||
|
884000: '46cccb7a12b4d01d07c211b7b8db41321cd73f30069df27bcdb3bb600c0272b0',
|
||||||
|
885000: '7c27f79d5a99baf0f81f2b09eb5c1bf905976a0f872e02bd4ca9e82f0ed50cb0',
|
||||||
|
886000: '256e3e00cecc72dbbfef5cea627ecf1d43b56edd5fd1642a2bc4e97c17056f34',
|
||||||
|
887000: '658ebac7dfa62bc7a22b1a9ba4e5b425a866f7550a6b40fd07de47119fd1f7e8',
|
||||||
|
888000: '497a9d02868605b9ff6e7f15948a83a7e07606829107e63c2e091c90c7a7b4d4',
|
||||||
|
889000: '561daaa7ebc87e586d37a96ecfbc72484d7eb602824f38f484ed333e78208e9e',
|
||||||
|
890000: 'ab5a8cb625b28343f8fac858eab6576c856dab88bde8cda02b80b3edfd307d71',
|
||||||
|
891000: '2e81d9fc885ddc09222b298ac9efbb73638a5721802b9256de6505ecf122dbaa',
|
||||||
|
892000: '73be08881b8832e986c0bb9a06c70fff346edb2afaf69630e47e4a4a90c5fece',
|
||||||
|
893000: 'd39079dcaa4d8af1c26f0edf7e16df43cd857a31e0aa4c4123226793f1ab497f',
|
||||||
|
894000: '0a3b677d72c590d4b1ff7a9b4098d6b52d0dc10d64c30c2766d18e6eb02872cd',
|
||||||
|
895000: 'a3bbba831f48c5b68e494ee63015b487782c64c5c24bb29436283360c28fd1e0',
|
||||||
|
896000: '20af178a192ca43975ab6c838fe97ca42ba6c682682eddbc6481efd153ecb0a2',
|
||||||
|
897000: '8d0ee14b9fdb853a09ab2951d26b8f7cb8bc8038b09513bd330ee4b0bdcc4780',
|
||||||
|
898000: 'c97fbb70f804408b131a98f9fb4c04cdf2df1655d3e8ff2e0d58ed8537349f4e',
|
||||||
|
899000: 'eba2be80478e8dec2d66ca40b853580c5dad040351c64c177e3d8c25aff6c1b6',
|
||||||
|
900000: 'c4dc344a993558418b93b3f60aaef0030e2a4116086577fbf1e2f544bdbddae1',
|
||||||
|
901000: '36d84229afa63045875fc8fea0c55de8eb90694b3a37cceb825c87abf1fea998',
|
||||||
|
902000: '8ca4890ecfc5e3f9d767e4fcdf318a1e3e3597675bbcfe534d64e76bc4e8fbf4',
|
||||||
|
903000: '8b9f6a7514033c57668ca94fb3758cc6d1ef37ac982c2ff5a9f0f206fcd8d0a8',
|
||||||
|
904000: 'e9ae813991f35ca89af2fe1f1b6adf9e93c6b1dd6a74f003ebbe699a30b252ea',
|
||||||
|
905000: 'd426489d01d4f4c829f2eb68a67721d2c0e1c71e8c33ef9253593447e8603462',
|
||||||
|
906000: '63000bbed97451e68d64485c02c1c3d90b4156237dac315f4e012ffb538e375b',
|
||||||
|
907000: '96759653a4e514541effa7ef86d9f22a272ddde7b069149d17e9d9203a1edafb',
|
||||||
|
908000: 'eec6477d2f3b71bde76dc2380d6e06aa8aa306ca56ba1dd15a31c22ae0db501b',
|
||||||
|
909000: 'd5c2984cf130335aa29296ba5b17672d00360fe0ec73977326180014908c0b55',
|
||||||
|
910000: '7b99cb1c94144f606937903e173bd9ef63bfffd3db8110693fa4c2caa0abc21f',
|
||||||
|
911000: '95eed0d9dd9869ac6f83fa67863e77f24df69bcb90fef70918f30b2400e24ea8',
|
||||||
|
912000: '34c3c8780c54ecced50f0a6b394309d09ee6ce37cd98794699c63771d1d91144',
|
||||||
|
913000: '536052ddcd445702160288ef3f669ce56868c085315556c9f5ca081ef0c0b9e1',
|
||||||
|
914000: '1bcd1fe9632f93a0a1fe7d8a1891a4fc6ef1be40ccf887524a9095ed7aa9fa44',
|
||||||
|
915000: '139bad9fa12ec72a37b62ad8511300ebfda89330fa5d5a83861f864b6adeae67',
|
||||||
|
916000: '81d15282214ff83e2a034212eb58abeafcb5664d3734bff13b22b4c093b20fea',
|
||||||
|
917000: 'f31081031cebe450e4450ef397d91790fc0068e98e6746cd0aab86d17e4448f5',
|
||||||
|
918000: '4af8eb28616ef0e859b5471650c7f8e910cd692a6b4ff3a7171a709db2f18e4e',
|
||||||
|
919000: '78a197b5f9733e9e4dc9820e1c79bd335beb19f6b87056e48e8e21fbe27d83d6',
|
||||||
|
920000: '33d20f86d1367f07d6731e1e2cc9305252b281b1b092403133924cc1052f501d',
|
||||||
|
921000: '6926f1e31e7fe9b8f7a81efa73d5635f8f28c1db1708e4d57f6e7ead951a4beb',
|
||||||
|
922000: '811e2335798eb54696a4b11ca3a44b9d79486262119383d542491afa9ae80204',
|
||||||
|
923000: '8f47ac365bc380885db809f2818ffc7dd2076aaa0f9bf6c180df1b4358dc842e',
|
||||||
|
924000: '535e79802c10630c17fb8fddec3ba2bf85eedbc0c076f3575f8189fe887ba993',
|
||||||
|
925000: 'ca43bd24d17d75d55e72e45549384b395c62e1daf0d3f58f296e18168b918fbf',
|
||||||
|
926000: '9a03be89e0725877d42296e6c995d9c48bb5f4bbd971f5a9add191af2d1c144b',
|
||||||
|
927000: 'a14e0ef6bd1bc221dbba99031c16ddbbd76394186677c29bdf07b89fa2a6efac',
|
||||||
|
928000: 'b16931bd7392e9db26be975b072024210fb5fe6ee22fc0809d51980aa8068a98',
|
||||||
|
929000: '4da56a2e66fcd98a70039d9061ea5eb0fb6d9460b437d2191e47441182419a04',
|
||||||
|
930000: '87e820e2237a54c4ea100bdd0145598f05add92185cd3d0929aa2d5099f4d5e0',
|
||||||
|
931000: '515b22c91172157c443a47cf213014aff144181a77e276e291535ab3762bb1ae',
|
||||||
|
932000: 'e130c6a9eb416f96256d1f90256a148957daa32f56af228d2d9ce6ff27ce2011',
|
||||||
|
933000: '30c992ec7a9a320fb4db260373121efc7b5e7fc744f4b31defbe6a7608e0749e',
|
||||||
|
934000: 'ec490fa0de6b1d78a4121a5044f501bbb3bd9e448c18121cea87eb8e3cadba41',
|
||||||
|
935000: '603e4ae6a6d936c79b3f1c9f9e88305930953b9b390dac442976a6e8395fc520',
|
||||||
|
936000: '2b756fe2de4328e598ed511b8828e5c2c6b5cdda1b5e7c1c26f8e0424c81afa9',
|
||||||
|
937000: '1ae0f15f14a0d4819e34a6c18de9428a9e43e17d75383bffa9ffb18358e93b63',
|
||||||
|
938000: 'cbd7001825ec87b8c6917d6e9e7dc5c8d7767788b6ffd61a61d0c612dbe5de66',
|
||||||
|
939000: 'd770d0395aa79076044783fb37a1bb173cb95c93ff1ba82c34a72c4d8e425a03',
|
||||||
|
940000: '3341d0a0349d091d88d233cd6ea6e0ad553d52039b4d47af51b8a8e7573a7916',
|
||||||
|
941000: '16123b8758e99344ebe6670cd95826881b274c31d4da2a051052955a32bade3a',
|
||||||
|
942000: 'ac7430961e77f902918fe79a52cbf6b523e3f2804ec83d0b17908e131ea9ea68',
|
||||||
|
943000: '2ad08a6877e4687dcb7a623adeddc88403e8082efd6de28328b351282dc141e2',
|
||||||
|
944000: '81382e8c1f47fa7c03fa1726f9b09ed1cd38140fe50683896eaa1b403d7e5fe3',
|
||||||
|
945000: '152bfbb166da04dab16030af28ae65b3275819eed1d0bbfc11eba65616ebefd6',
|
||||||
|
946000: '25b3da0962f87a0d3e4aec8b16483efbcab9514893a42fd31f4cb544ddc45a1f',
|
||||||
|
947000: '2cb738ba342436628ff292797e3d36c4752d71bdc1af87fe758d469d06e36e0e',
|
||||||
|
948000: 'b3683e18570fcc8b986720514539181ec43fb5dbc20fe314c56ab6bd31ab766a',
|
||||||
|
949000: '94ced5bfba55ccffc909bf098d537e047d8d4cbb79f5e2a74146073f39804865',
|
||||||
|
950000: 'b11543cd2aedae27f6ddc3d2b431c897fdcfe59ed3c926b0777bc1e99de4d12a',
|
||||||
|
951000: '21508881a7f80fcd0b9b27bbcfba634b39c6525f5313968c4605cd55b4fec446',
|
||||||
|
952000: 'f9b3ed919c9ca20cd2927d899ee7a86c93c2dd919dafb6fdb792f2d9f1895cb0',
|
||||||
|
953000: 'cf578d8e80eec4102dc1b5321f10b36020b3b32f4b5d4664c90c412ca2ef6b42',
|
||||||
|
954000: 'ed17c919ae5c4be835966b47f667d6082c75917b95584b2d2aff0e32f5c8aa98',
|
||||||
|
955000: '948ea467fa01a20122e2146669214fdd3bb025038554609f7299ece5bca63e39',
|
||||||
|
956000: 'b50ff4c02957ed8764215d25f206f6f1fe6d0eb712a378b937ff952dd479afd2',
|
||||||
|
957000: '169922a3e51517ba6104a883d29aac03a9d20b4d448bd2773137b0d790e3db6b',
|
||||||
|
958000: '92258ac2e8b53167dc30436d93f385d432bd549711ab9790ba4e8263c5c54382',
|
||||||
|
959000: '7ca824697459eb302bcd7fba9d255fb269555abe7cf9d2dd5e54e196d751e682',
|
||||||
|
960000: '89f9ec925d23698076d84f9e852ab04fc956ac4465827303de0c3bb0b685eb32',
|
||||||
|
961000: '41cf75cd71bc12b93674c416e8b01b7410eb9e09eb8727ad93ff0b833c9966c9',
|
||||||
|
962000: '7db1f1dbff3e389713067879bfedf9513ec74bb1e128b13fc2fe23ad55fd0306',
|
||||||
|
963000: 'a35e71c611b2227adeac824d151d2f09bdbecd5765a4e62c6e74a3e4290abc66',
|
||||||
|
964000: 'dc1811130e249d2208d6f85838512b4e5482efb0bd2f619164a68a0c60d7f248',
|
||||||
|
965000: '92f5e25dd1c03102720dd0c3136b1a0769901bf89fcc0262a5e24405f349ca07',
|
||||||
|
966000: '08243d780d8ba96a940f409b87d9c6b8a95c92804173b9156ada0dad35b628dc',
|
||||||
|
967000: 'cb769a8935bb6faeb981da74f4079babbbb89476f825cc897f43e79790295260',
|
||||||
|
968000: 'ff3fc27d2998f4dc4ac1ff378afe14c7d0f43cc328deb9c978ec0e067d1dfaf9',
|
||||||
|
969000: 'e41a3452f45d5f025627d08c9c41017679e9c4804371dd1cc02f3ed49f85dbb2',
|
||||||
|
970000: 'f5eaaf7ba6b47245a4a8096a7785c7b25dc6db342ac2ccbba0c321e97ab58284',
|
||||||
|
971000: '75414062f1d4ed675dadc8f04ba10147a484aaca1ae316dc0b896a92809b3db6',
|
||||||
|
972000: '5bcf2ee00133774c7d060a1a1863dfccc20d5127ecb542470f607dec2504fe6f',
|
||||||
|
973000: '07d15b9656ecde2cd86a9d22c3de8b6505d6bab2aa5a94560b0db9119f1f6f6c',
|
||||||
|
974000: '2059e7924d7a210a88f5a65abc61152506a82edccd27416e796c81b9b8003f13',
|
||||||
|
975000: '7fcf5d8b2c0e51cfbdaa2502a9da0bdb323646899dad37dacc39af9f9e16fc5c',
|
||||||
|
976000: '02acb8cf87a0900436eccfca50371948531041d7b8b410a902205f84dd7fb88e',
|
||||||
|
977000: '2636dfd5a47016c893265473e78ecbf2000769d886f0d01ee7a91e9397210d15',
|
||||||
|
978000: 'ce92f52a35096b94bea73a7d4e113bc4564a4a589b66f1ab86f61c822cf9ee76',
|
||||||
|
979000: '21b8102f5b76be0c8e20d537ebc78ebe46bfcea6b6d2dda950ce5b48e85f72d7',
|
||||||
|
980000: 'f4df0bd63b36105705de62266d654612d9804bad7069d41344de269657e6f084',
|
||||||
|
981000: 'f006cd2718d98d774a5cd18394db7744c812fa149c8a63e76bab934aee89f571',
|
||||||
|
982000: 'da5d6609265d9153022d823b0260aa07e7511ceff7a3fd2ca7ce83cb3900a661',
|
||||||
|
983000: '3a26f3f02aa145fa8c5268fbe10dd9c3546d7dda57489ca5d4b161beb0d5a6e2',
|
||||||
|
984000: '968e8cd37a1137797d40f39f106cae62d1e252b46c7473b9434ad5f870ee88fb',
|
||||||
|
985000: '3129c3bf20deace1a9c92646a9d769da7a07f18dcd5b7a7b1e8cf5fd5390f8e1',
|
||||||
|
986000: '6ce830ca5da322ddbb97fc572ea03218913d070e5910516b33c6113b02b23c21',
|
||||||
|
987000: '7fb1a8635623847132ab766a99b792953379f782d1115b9649f5f9c5a742ca04',
|
||||||
|
988000: '5e8e6c6da7f271129c20c4dd891dcb1df4f9d690ee7cf391c6b7fbd028a0da4c',
|
||||||
|
989000: '12919e34bb9a9ac1d2a01e221eb8c511117fc4e1b3ae15355d95caf4673bdb08',
|
||||||
|
990000: '016f8b18227a0c09da55594a98638ad5b0fbb4896e2ab6163ac40b6015b2811e',
|
||||||
|
991000: 'ddf8cd6e2f4ee07530ae7567cef4fa2c2fd4a655cb20e20422e66fd49bde6489',
|
||||||
|
992000: 'dca77707c0caa3a9605f3dadf593402339c29448869907fb31f6c624e942dcbd',
|
||||||
|
993000: 'de9acc4c7c482ecac741fd6acbbc3a333afab52f3fe5eea4130c0770299a56dd',
|
||||||
|
994000: '54420631f8a801a1b8f391088f599ee22cedc06f24bf67f18272feb8fe70c682',
|
||||||
|
995000: '4b44b26e3e2495716dfd86fc42594cd4b1e4b70bdab4f0905cce4cb9556e008a',
|
||||||
|
996000: 'd6e41fd301fc5f519c343ceb39c9ff845656a4482e4e182abdcd3963fd5fde1c',
|
||||||
|
997000: 'd68b6a509d742b182ffb5a98b0e585a2320a5d3fe6977ad3e6cd06835ef2ea55',
|
||||||
|
998000: '1efcdcbadbec54ce3a93a1857253614536c34f05a0b1924f24bff194dc3392e1',
|
||||||
|
999000: '10a7713e46f47527f3819b4a9257a03f3e207d18e4917d6bcb43fdea3ba82b9a',
|
||||||
|
1000000: '1b4ddb1436df05f07807d6337b93ee1aa8b600fd6a910a8fd5313a39e0440eec',
|
||||||
|
1001000: 'cde0df1abdae26d2c2bdc111be15fb33231c5e167bb8b8f8eec667d71379fee4',
|
||||||
|
1002000: 'd7ce7a96a3ca73a4dfd6a1780e23f834f339142519ea7f45d256c113e27e4857',
|
||||||
|
1003000: 'b1a9b1c562ec62b9dd746d336b4211afc37482d0274ff692a44fa17ac9fe9a28',
|
||||||
|
1004000: '7afd6d0fb0014fbe16a31c84d3f1731736eaeef35e40bb1a1f232fb00345deae',
|
||||||
|
1005000: '4af61ce4cda5de58277f7a67cadea5d3f6ce56e54785b188e32306e00b0414df',
|
||||||
|
1006000: '08e1fb7295efd4a48cb999d899a3d481b682ddbce738fecd88a6d32cbe8234f0',
|
||||||
|
1007000: '14a367a41603dd690541daee8aa4a2882260059e3f85bd8978b7431e8f7db844',
|
||||||
|
1008000: 'e673230e62aaefad0678611f94ff35ee8a6e18eb96438bdfb4b614f54f54dba7',
|
||||||
|
1009000: 'e191af8fb71d0d91419abd19443af3d3f23ee4fe359bb8c390429cc838132bde',
|
||||||
|
1010000: 'ffdba58f184cf60838b75b7899b6633e7cfd34cf36eded572c0133d07387bc49',
|
||||||
|
1011000: '40801af3a5546cb9d53e05e21b74be09de9a421b762ca1d52d2266f5c2055ce8',
|
||||||
|
1012000: '552519acebed0e38102f5270dc60b1da7a123600b6b94169ae74462ae454693f',
|
||||||
|
1013000: '1eee96f48418929927eaa9642777bc806d326cfffaf077bc8695a7ecd438d631',
|
||||||
|
1014000: 'a471093e1de2a8db586412d7351c8d88e44ea890f46e9b43251af427a0a4a879',
|
||||||
|
1015000: '57532f5a522295cc139f008bdcb7a1e6d02e6035d5221b2687c7c216f06297a2',
|
||||||
|
1016000: 'ec46dba07addcb6e62f58456a53c513d876f1c49ae7d76d230adb8debd26027d',
|
||||||
|
1017000: '33ea8d25f342a7465ed71e4bab2b91007991e0994c61d321e3625301a1390322',
|
||||||
|
1018000: '4871c03cc95d4ce0a39bd2cebbb001b2ea1cce1b3561bb841d88f43bb9d12ffd',
|
||||||
|
1019000: 'f5248257576eb2ff4139d6374cc7ce34121cc942598cf9e04d2bd572e09189bb',
|
||||||
|
1020000: 'e7785286897c85cfb0276957bff216039eeb11bc1ebca89d0bb586022caa5750',
|
||||||
|
1021000: 'a30220f17d060634c5f6a1ddc5ea34b01c18fb5eb7e0e8267b66bf5a49525627',
|
||||||
|
1022000: '6083ea49e64ac0d4507c674237cf87d30b90b285ec63d082e626df0223eb7c9c',
|
||||||
|
1023000: '1dc5596d716bc33ee0f56fc40c1f073155a58a7692935c9e5854ef3b65b76828',
|
||||||
|
1024000: '065adfee40dc33abff07fb55339571712b959bc1830dc60b6691e36eab1508ae',
|
||||||
|
1025000: 'bb6903752d31278570e774b80a80782179c78f099e58c3dc4cba7afea7a471c4',
|
||||||
|
1026000: 'f3050f3c2f3a76f5084856b0f089383517caa3f51530fbc29335308f5f170625',
|
||||||
|
1027000: '746ed3701510d07958d11a06f22dbb839d9858373dc5a33249dd69e91bab01fd',
|
||||||
|
1028000: '43f7a96ea6a45b78c29ad4a2f8680ef184438c2bd3686172b0564e0ae6dd7ba1',
|
||||||
|
1029000: 'cbb9916099c59e14fe61d284374f4feaa3d43afec59e4698ed92143576f24b34',
|
||||||
|
1030000: '2e805fc2331e32e586ea692bc3d4e6b11e1ec3f1cab6e331b459f9f1ac9a1f1e',
|
||||||
|
1031000: '04f324f8f6d4f9901cf65f78dc91d6010ea6cf125f5ac0253b57b5f1f79e81e0',
|
||||||
|
1032000: '60ca62f52fdfd858b0ee0fdb380648bde85ca14e2a73565205ed4ee0bc861c77',
|
||||||
|
1033000: 'eb60aac23d599d3099cf98ed8fc3213f1bc06bc1c677429b303e9c81f79f1340',
|
||||||
|
1034000: 'f0328df2daf119ce673ddfa7a39a84576985f701f7a7dec3f56f58c2019ebd4d',
|
||||||
|
1035000: 'f9d3cbce3854de168d8835c96917c01be6244c8f82641e8d9398dfffec4e7107',
|
||||||
|
1036000: '7dca97e6e1d6ed70aa7805f74b768009a270e7ebe1dd951e8727d1d2f2d271f2',
|
||||||
|
1037000: '5329504126b2845b3044f423b521e77ff58d7d242f24bf87c87f4d8d4e03a947',
|
||||||
|
1038000: '5bad3ad55e3daa415f3182a1f2a099fe1767e8fae34e9bb95d47e242b8971434',
|
||||||
|
1039000: 'c29729b8ba49ac0043fe4aa6fc971f8ac3eda68ff92970957ada39a2989b2491',
|
||||||
|
1040000: 'f303aebfc9267600c081d0c021065743f93790df6f5c924a86b773788e0c45be',
|
||||||
|
1041000: 'a1cbe5059fa2275707785b77970c36d79b12c1ba93121bc9064ab9b64abacf7b',
|
||||||
|
1042000: '004b0dd4e438abc54ae832d733df32a6ba35b75e6d3e0c9c1dee5a7950507295',
|
||||||
|
1043000: '31893a3fe7bb4f6dd546c7a8de4a65990e94046aab442d18c68b6bf6acd54518',
|
||||||
|
1044000: '2c4dd479948acc42946f94050810000b0539864ad24a67a7251bff1c4971b035',
|
||||||
|
1045000: '1cea782d60df35a88b30ae205ce37e30abc7cad2b22181722be150bd92c53814',
|
||||||
|
1046000: 'ee808f0efb0f2ef93e8599d8b7f0e2e7c3cdc42353e4ea5165028b961f43d548',
|
||||||
|
1047000: '75f057e2a8cb1d46e5c943d63cc56936a6bac8b1cb89300593845a20baf39765',
|
||||||
|
1048000: '2abcd227f5314baed85e3c5b49d3888a60085c1845c955a8bf96aa3dd6394798',
|
||||||
|
1049000: '5d0ec24b9acd5ab21b42f68e1f3142b7bf83433b98f2fa9794586c8eff45893e',
|
||||||
|
1050000: '1d364b13a4c17bd67a6d1e5f77c26d02faa014d7cd152b4da70380f168b8e0ff',
|
||||||
|
1051000: 'b9a20cec21de84433be9b85817dd4803e875d9275dbc02907b29888431859bae',
|
||||||
|
1052000: '424cb56b00407d73b309b2081dd0bf89213cf024e3aafb3090506aa0ba10f835',
|
||||||
|
1053000: '6df3041a32fafd6a4e08778546d077cf591e1a2a16e77fe7a610efc2b542a9ff',
|
||||||
|
1054000: '78f8dee794f3d4366019339d7ba74ad2b543ecd25dc575620f66e1d535411971',
|
||||||
|
1055000: '43b8e9dae5addd58a7cccf62ba57ab46ffdaa2dcd113cc8ca537e9101b54c096',
|
||||||
|
1056000: '86b7f3741343f85d93410b78cc3fbf03d49b60a664e908703016aa56a206ae7e',
|
||||||
|
1057000: 'b033cf6ec622be6a99dff536a2cf73b36d3c3f8c3835ee17e0dd357403e85c41',
|
||||||
|
1058000: 'a65a6db692a8358e399a5ac3c818902fdb60595262ae05531084848febead249',
|
||||||
|
1059000: 'f6d781d2e2fdb4b7b074d1d8123875d899cdbd6be375cb4288e86f1d14a929f6',
|
||||||
|
1060000: 'cd9019bb1de4926cca16a7bef1a46786f10a3260d467cda0775f73361795abc9',
|
||||||
|
1061000: 'ed4f5dc6f475f95b40595632fafd9e7e5eef388b6cc15772204c0b0e9ee4e542',
|
||||||
|
1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80',
|
||||||
|
1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465',
|
||||||
|
1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6',
|
||||||
|
1065000: '563188accc4a6e311bd5046516a92a233f11f891b2304d37f151c5a6002b6958',
|
||||||
|
1066000: '333f1b4e996fac87e32dec667533715b31f1736b4342806a81d568b5c5238456',
|
||||||
|
1067000: 'df59a0b7319d5269bdf55043d91ec62bbb30829bb7054da623717a394b6ed678',
|
||||||
|
1068000: '06d8b674a205393edaf20c1d837baadc9caf0b0a675645246263cc163302241d',
|
||||||
|
1069000: 'ac065c48fad1383039d39e23c8367bad7cf9a37e07a5294cd7b04af5827b9961',
|
||||||
|
1070000: '90cd8b50f94208bc459081356474a961f6b764a1217f8fd291f5e4828081b730',
|
||||||
|
1071000: '3c0aa207ba9eea45458ab4fa26d6a027862592adb9bcce30915816e777dc6cfc',
|
||||||
|
1072000: '3d556c08f2300b67b704d3cbf46e22866e3ac164472b5930e2ada23b08475a0f',
|
||||||
|
1073000: 'a39b5c54c24efe3066aa203358b96baea405cd59aac6b0b48930e77799b4dd7d',
|
||||||
|
1074000: 'e8c8273d5a50a60e8744716c9f31496fb29eca87b4d68643f4ecd7ec4e400e23',
|
||||||
|
1075000: 'b8043ae41a1d0d7d4310c85764fcba1424733df347ffc2e8cbda1fe6ccbb5153',
|
||||||
|
1076000: '58468db1f91805e767d334824d6bffe54e0f900d1fb2a89b105086a493053b3d',
|
||||||
|
1077000: '04a78749b58465efa3a56d1735cd082c1f0f796e26486c7136950dbaf6effaa4',
|
||||||
|
1078000: 'e1dd6b58c75b01a67d4a4594dc7b4b2ee9e7d7fa7b25fd6246ce0e86eff33c75',
|
||||||
|
1079000: 'd239af017a6bb664485b14ad15e0eb703775e43018a045a8612b3697794460da',
|
||||||
|
1080000: '29ae5503f8c1249fefeb63fd967a71a70588ee0db1c97497e16366163a684341',
|
||||||
|
1081000: '05103ab27469e0859cbcd3daf42faa2bae798f522534697c7f2b34f7a050ee0f',
|
||||||
|
1082000: '4553d2cb7e90b6db11d242e287fe96822e6cd60e6388b94bf9006411f202ba03',
|
||||||
|
1083000: '97995acd178b2a142d571d5ae1c2a3deaf93a909fd91fb9c541d57f73e32dc99',
|
||||||
|
1084000: '9e3f23376af14d76ab24cd54e321dec019af73ad61067d959ff90043acc5ffcc',
|
||||||
|
1085000: '81c056b14f13cee0d6d6c8079fdd5a1a84c3a5c76cc9448612e8ef6d3531300e',
|
||||||
|
1086000: '8a0004f6809bdd075915a804e43991dfe8f22e05679d2fdaf8e373f101bac5c2',
|
||||||
|
1087000: '27c45a4c9ad24e038f2ebe40835a1c49ac7221d7185082866ee354351ba87c7a',
|
||||||
|
1088000: 'fd27e21747117b00b4ada1cba161ac49edb57cca540f86ac5ba885050f08f824',
|
||||||
|
1089000: 'bff867335767103bc3ed15ede5b9fde88016f8ede15dc5bf3e81ea40dcfc61ae',
|
||||||
|
1090000: '608f75016d1db08888dd59640f63e838c19bdfa833c0cc177ad3d2b818b0db5b',
|
||||||
|
1091000: '90750b452bd4dedaab6b57fecbfe88f71ce3d5437fad7f9ec0fdd270445c7526',
|
||||||
|
1092000: '98287b39f9f1233017dc5d932e5c77f0521ca84587eb3f39f0e7b6c297c749af',
|
||||||
|
1093000: '68a5846ed05c9bb142197849106838765f90f15c10b2cc938eef49b95eaa9d33',
|
||||||
|
1094000: '5660a1aac2fc763a417fc656c8887fc8186bf613ae1ccbb1a664fb43ce1fa1d6',
|
||||||
|
1095000: '62bad3db418b3f4cad3596881b645b72479c71deb0d39c7a4c8bd1577dc225fd',
|
||||||
|
1096000: 'e0e4b2b183591f10dd5614c289412f2fb5e320b7d3278f7c028f42f591872666',
|
||||||
|
1097000: 'a233a233fc2aa5dab9e75106d91388343ef969458ea974f1409a2ab5fc441911',
|
||||||
|
1098000: '16dfa5fa6cbd1188e562697b5f00ac206960d0851ed84adf37ae975fd5ffdd6a',
|
||||||
|
1099000: 'b8a870b7dc6d3263730c00f59d52aa6cce35dc59aa8fba715034cc2d14927260',
|
||||||
|
1100000: 'a3cd7749743da22a3846dcc2edbf1df21b938e829419389e3bc09284797c5b43',
|
||||||
|
1101000: '75b14c2a95e2a095949729b7c0b624bd725a2de98404a8e3247b60c977d0198e',
|
||||||
|
1102000: '4d3af64d37064dd5f57e25d61f248a1e21c1b1cadd7bb1404e35c9fbe06f1fd4',
|
||||||
|
1103000: 'd73c92bfed358dfcd7659228974ab75ea2fc86f2301ee47133adad8075203872',
|
||||||
|
1104000: '30cd82354f37bc0b412123867c7e1835206022a7501853bf8c0d3df02f291645',
|
||||||
|
1105000: '1d2ef984f26693dce77460cd2694e5da46e675077e91a1cea26051733b01a7ef',
|
||||||
|
1106000: '51c076c304222fe3ca308ba6968c46fef448f85be13a095cecb75b90e7954698',
|
||||||
|
1107000: '99e2221339e16acc34c9816f2ef7b866c2dd753aa3cbe484ae831959a23ece68',
|
||||||
|
1108000: '0f1227c250296bfe88eb7eb41703f99f633cfe02870816111e0cadfe778ddb19',
|
||||||
|
1109000: 'b35447f1ad76f95bc4f5886e4028d33acb3ad7b5000dd15516d3f11ce4baa990',
|
||||||
|
1110000: 'ac7baff996062bfaaaddd7d496b17e3ec1c8d34b2143095645ff22fb3888ae00',
|
||||||
|
1111000: '430bbbdcca36b2d69b6a2dd8b07c583a060a467e5f9acbc6de62462e1f7c7036',
|
||||||
|
1112000: 'e5274dea029dc44baff55c05b0555f91b74d29ffd40e3a8c4e2c5b57f9d40bef',
|
||||||
|
1113000: 'cf43863249fa42cfe108220dd40169dac702b0dd9cf5cb699cf2fc96feda8371',
|
||||||
|
1114000: 'fa1c0e551784d21c451564124d2d730e616724f3e535de3c186bcdeb47e80a8f',
|
||||||
|
1115000: '49fe6ecee35a397b83b5a704e950ad028cfb4b7e7a524021e789f4acc0fd6ffe',
|
||||||
|
1116000: '74ecded36751aa8b7901b31f0d16d75d111fc3c40b567f649c04f74ed028aa5c',
|
||||||
|
1117000: 'd9ca760a22190bdf545766b47d963c738a4edcc27f4d15ca801b35751577cfa7',
|
||||||
|
1118000: 'c28d42f871682800ac4e867608227cfb6bc4c00b618e83a8556f201a1c28813c',
|
||||||
|
1119000: 'c5fafc4e1785b0b9e84bb052e392154a5ba1aefe612998017e90772bcd554e08',
|
||||||
|
1120000: 'aa054d428bc9ccee0761da92163817163413065fe1e67ef79a056c5233ea3476',
|
||||||
|
1121000: '0df295bb944218503bd1bf66d2ece0c50fd22dae3391b80673a7ad1e4e5c3934',
|
||||||
|
1122000: 'a13abb350a26673b3933b1de307a60a6845ca594d502599548c6253e21a6d8e8',
|
||||||
|
1123000: 'a4bc6a3abf9ed1f4b14338ff0f03f83456312bc91a93fa89ae6db493050115e1',
|
||||||
|
1124000: '65869938df99adf0dda76200291ce09a54c9bcc787e4bb62cd72c367db58f4f0',
|
||||||
|
1125000: 'ea5e918233b14c3c73d488a906e3741c61bdcafe0393bd0404168fe80c950a46',
|
||||||
|
1126000: 'ce88cd35104fcec51bcee77302e03162dc694802536f5b668786b2245e61bca5',
|
||||||
|
1127000: 'ea19c0c8d205be4be87d02c5301c9ed331e7d75e25b93d1c2137c248882af515',
|
||||||
|
1128000: '006f32d63c2a3adcf4fbad0b0629c97f1beab6446a9c27fbde9472f2d066219e',
|
||||||
|
1129000: '218e5392e1ecf471c3bbc3d79c24dee30ac8db315dbeb61317318efb3f221163',
|
||||||
|
1130000: '30b9da0bd8364e9cd5551b2529341a01a3b7257a238d15b2560e2c99fdb324e8',
|
||||||
|
1131000: '8a7f382cfa023d2eba6639443e67206f8883b57d23ce7e1339234b8bb3098a82',
|
||||||
|
1132000: 'bf9af68a6fe2112d8fe311dfd52334ae2e7b0bac6675c9ebfddb1f386c212668',
|
||||||
|
1133000: '1a30951e2be633502a47c255a93ddbb9ed231d6bb4c55a807c0e910b437766b3',
|
||||||
|
1134000: 'a9bcaf3300b7915e701a8e396eb13f0c7287576323420be7aab3c3ba48020f76',
|
||||||
|
1135000: '337eed9ed072b5ad862af2d3d651f1b49fa852abc590b7e1c2dc381b496f438a',
|
||||||
|
1136000: '208761dbc29ec58302d722a05e937a3cf9e78bfb6495be395dd7b54f02e169dc',
|
||||||
|
1137000: '4e5b67ff3324b64e268049fdc3d82982b847ee359d409ade6368864c38a111e5',
|
||||||
|
1138000: '55d1d0833021a664e85eec8cc90a0985e67cc80d28841aaa8c2231ec28087ebb',
|
||||||
|
1139000: 'e750ada1ec9fa0f2f2461ed68958c7d116a699a82ec12911da5563139f8df19e',
|
||||||
|
1140000: '9cf81407b6ccc8046f0233f97484166945758f7392bb54841c912fcb34cf205c',
|
||||||
|
1141000: 'fccf32b2fae03e3b6b562483776625f9843cd68734c55659e2069cde7e383170',
|
||||||
|
1142000: 'c3608c215dd6569da6c1871c4d72a09ab1caa9663647f2a9454b5693d5d72a65',
|
||||||
|
1143000: 'bd39cb8c4e529d15bbea6baeec66afe52ca18afe32bd812f28fbb0676647cdff',
|
||||||
|
1144000: '6e42d02538565ce7e2d9bf31a304f1fd0ac122d35d17a030160575815901b0b1',
|
||||||
|
1145000: 'b9722e1de2904ce1219140fffb1f4f9f5a041f885faa634404238d103c738b4c',
|
||||||
|
1146000: 'd4de4271459966cee774f538a243d7db0689b213b296463d42e45c93194d7861',
|
||||||
|
1147000: '51fadf109f22bb85574d0fbcbd0b20992983e89aee3d415a7b1c37c44775d9a9',
|
||||||
|
1148000: '137e1fe8da31680d21a42e7421eb608a883a497314e4404625ce44b0edadde6a',
|
||||||
|
1149000: 'cb87867eb04203ce15e0763a2f4389376cea75e0a2877f55e2911c575bef07a8',
|
||||||
|
1150000: '977528ca7953a2c9c19fefaa3aab7ebdec3ac324d74a07d83764ba25d9be0689',
|
||||||
|
1151000: 'a09c51c832600ded63a19201df008075273ea248fd406886e93a2cbaa3bba46b',
|
||||||
|
1152000: '0e5367cfa0f00dd932a5bcc00dcc807fa6825161806bed588e16a57947b4b32d',
|
||||||
|
1153000: '55a9de3dcde2efb56a3c5fea7d22b98c1e180db9a4d4f4f6be7aae1f1cbd7608',
|
||||||
|
1154000: 'abc58cf71c4691ebfaef920252730cf69abbe9de88b424c03051b9b03e85d45a',
|
||||||
|
1155000: '4f074ce73c8a096620b8a32498362eb66a072eae95d561f2d53557cd513ae785',
|
||||||
|
1156000: '540a838a0f0a8834466b17dd456d35b8acae2ec8419f8bd9a704d9ea439062ac',
|
||||||
|
1157000: 'd5310ac671abdb658ea028db86c23fc729af965f91d67a37218c1412cf32a1f5',
|
||||||
|
1158000: '162d906a07e6c35e7c3ebf7069a200521605a97920f5b589d31b19bfd7766ee2',
|
||||||
|
1159000: '600bd8f5e1e62219e220f4dcb650db5812e79956f95ae8a50e83126932685ee0',
|
||||||
|
1160000: '91319398d1a805fac8582c8485e6d84e7490d6cfa6e44e2c630665b6bce0e6b8',
|
||||||
|
1161000: 'f7ad3cff6ee76e1e3df4abe70c600e4af66e1df55bf7b03aee12251d4455a1d4',
|
||||||
|
1162000: '85b9fbba669c2a4d3f85cdb5123f9538c05bd66172b7236d756703f99258454d',
|
||||||
|
1163000: '966085d767d1e5e2e8baf8eda8c11472ec5351181c418b503585284009aaea79',
|
||||||
|
1164000: '1c94e1b531215c019b12caf407296d8868481f49524b7180c7161b0363c1f789',
|
||||||
|
1165000: '803b6bf93735aeae2cf607824e2adf0d754b58da2516c2da1e485c697e472143',
|
||||||
|
1166000: '872561a82f7991633d0927d25cb659d096bbe556fe6dac7a0b6a679820733069',
|
||||||
|
1167000: '6bd7cdd605a3179b54c8af88d1638bf8133fab12cbf0a78d37cf21eddf4395a1',
|
||||||
|
1168000: '79946f5758c1817239cc642d27298bd710983551a8236e49832c6d818b097337',
|
||||||
|
1169000: 'b0994c60728e74de4aa361f37fa85e5296ce3188ae4e0b66d7b34fe86a239c9c',
|
||||||
|
1170000: 'a54188a5a64e0cf8da2406d16a0ac3983b087fc7d6231b6f8abf92cf11dc78cd',
|
||||||
|
1171000: 'ec2924d98e470cc6359821e6468df2c15d60301861d443188730342581230ef2',
|
||||||
|
1172000: 'b4ac11116aa73ce19428009a80e583e19dc9bcd380f7f7ce272a92921d5868d2',
|
||||||
|
1173000: '501d3551f762999dd5a799f3c5658fff2a7f3aff0511488272cd7693fefb8f9d',
|
||||||
|
1174000: '4660074ea48a78ae453cb14b694b2844cc0fb63ed9352ed20d11158bbb5c1f28',
|
||||||
|
1175000: '0727f6b1d9f8fe5677a9ffa0d475f53f5a419ef90b80896c22c2c95de22175de',
|
||||||
|
1176000: '150633d6a35496c24a93c9e19817e90f649c56b7e2558f99e97325bfd5df8b17',
|
||||||
|
1177000: '0849e19f22571b62dba8ff02f6b5a064a7ac36e7ed491321b3663567e8e17294',
|
||||||
|
1178000: '770dd463e7bad80f689f12934e4ae06e24378d1545dcf211fd143beaef49464e',
|
||||||
|
1179000: '059d383dcc60a49b658b674d92fc35cab07b06329c58d73818b6387cb0c06534',
|
||||||
|
1180000: 'e547cb3c636243ca9ae4cfb92c30a0f583eda84e329a5c1e5f64a26fc6fc791e',
|
||||||
|
1181000: '4521a4396ab02f73d45d7a3393ea1c602d255778d52c12079c88bfbad32aab43',
|
||||||
|
1182000: '051cfe993e4b0b34233403a9e8c397dd50e8b78a30fb07e9c260604ee9e624a9',
|
||||||
|
1183000: '44a69c99bb8b85e84ae279f2d8e5400d51cb3d5f0bcd178db49d55548cd66191',
|
||||||
|
1184000: '2a1d23c9bb3c71a533e0c9d25b03bfa7e9db8e014645f3e7fbede6d99fff0191',
|
||||||
|
1185000: 'bb90d6c6d77819163a9e909ee621d874707cdb21c91b1d9e861b204cf37d0ffa',
|
||||||
|
1186000: '4a92051b738ea0e28c64c64f1eb6f0405bc7c3427bef91ff20f4c43cf084d750',
|
||||||
|
1187000: 'f782ac330ca20fb5d8a094ee0f0f8c086a76e3f03ecc6a2c42f8fd07e52e0f41',
|
||||||
|
1188000: '94cb7b653dd3d838c186420158cf0e73db73ec28deaf67d9a2ca902caba4141a',
|
||||||
|
1189000: 'c8128e59b9ec948de890184578a113478ea63f7d57cb75c2c8d5c001a5a724c0',
|
||||||
|
1190000: '4da643bd35e5b98932ae21515a6bffb9c72f2cd8d514cd2d7eac1922af785c3f',
|
||||||
|
1191000: '0f922d86658ac3f53c5f9db360c68ab3f3253a925f23e1323820e3384214719a',
|
||||||
|
1192000: '4c3ab631cf5ba0c236f7c64af6f790fc24448319de6f75dbd28df4e2648d0b7d',
|
||||||
|
1193000: 'eda118d1fac3470a1f8f01f5c78108c8ecdcd6420be30f6d20f1d1831e7b6975',
|
||||||
|
1194000: '5723fff88abd9bb5088476fa5f4221a61c6f8a718703a92f13248ad350abeea2',
|
||||||
|
1195000: '1715846f82d011919e3446c6ce675a65fb80338bd791d4e735702c4767d9adc4',
|
||||||
|
1196000: 'b497667996aee2db61e88f442e728be15ab0b2b64cfd43198691fcf6cdafacc8',
|
||||||
|
1197000: '309a6170d837b8cb334fb888a64ed4e47e6592747e93c8e9d1bf7d608cfef87d',
|
||||||
|
1198000: '3ea918ef64a67dec20051519e6aefaeb7aca2d8583baca9ad5c5bd07073e513a',
|
||||||
|
1199000: '4ec7b7361b0243e5b2996a16e3b27acd662126b95fe542a487c7030e47ea3667',
|
||||||
|
1200000: 'b829c742686fcd642d0f9443336d7e2c4eab81667c90ce553df1350ed10b4233',
|
||||||
|
1201000: '44c022887f1e126fd281b1cae26b2017fa6415a64b105762c87643204ce165a5',
|
||||||
|
1202000: 'b11cc739eb28a14f4e47be125aa7e62d6d6f90c8f8014ee70044ed506d53d938',
|
||||||
|
1203000: '997a7c5fd7a98b39c9ca0790519924d73c3567656b605c97a6fdb7b406c3c64d',
|
||||||
|
1204000: '7d25d872e17195ee277243f7a5a39aa64d8750cec62e4777146acf61a8e76b04',
|
||||||
|
1205000: 'ce8486ae745a4645bee081ef3291d9505174bed05b0668d963b2998b7643dbb0',
|
||||||
|
1206000: '46a0bcea3c411c600dffe3e06e3d1dfbf5879a7ec4dcf3848e794cefcbf2bc0b',
|
||||||
|
1207000: '37e6297bf6e4e2bdd40401d4d7f95e3e3bdafd4a7f76b9c52865cefc6b82b20b',
|
||||||
|
1208000: 'd09e3982a9827b8cf56a5a2f4031dc6b082926c1fd57b63beaaa6cfd534eb902',
|
||||||
|
1209000: '54ae9010a9f146c83464e7ee60b30d9dbee36418561abc4e8d61bce9baa2d21d',
|
||||||
|
1210000: '5dcfd33f8e5ac21c9ba8553758b8cd8afae7961cad428530b5109c2db2ebf39f',
|
||||||
|
1211000: '91c952348bb2c3dfac0d6531a3dac770ea6dab571af257530e9c55493c96bdd9',
|
||||||
|
1212000: 'e62cc3fe044a7f5de4c04a8aed5619548f9d5c6fad9f989d3382cb96de1d780d',
|
||||||
|
1213000: '66b46ffdca8acf1dd04528dadb28b6ac4ce38807c1b84abd685d4ddb3dc59a34',
|
||||||
|
1214000: '2ce4091756ad23746bab4906f46545953cadaf61deae0d78e8a10d4eb51866b1',
|
||||||
|
1215000: '83ce3ca087799cdc4b4c5e7cfeb4a127708724a7ca76aa5f7f4ec1ed48b5fca6',
|
||||||
|
1216000: '7d07b739b7991fbd74926281bf51bba9d5721afab39598720f9ff5f7410a6721',
|
||||||
|
1217000: '76adf49491670d0e8379058eacf0228f330f3c18955dfea1ebe43bc11ee065f3',
|
||||||
|
1218000: '77f422e7301a81692dec69e5c6d35fa988a00a4d820ad0ebb1d595add36558cc',
|
||||||
|
1219000: '8ba9d944f8c468c81799294aeea8dc05ed1bb90bb26552fcd190bd88fedcddf2',
|
||||||
|
1220000: '00330367c255e0fe51b374597995c53353bc5700ad7d603cbd4197141933fe9c',
|
||||||
|
1221000: '3ba8b316b7964f31fdf628ed869a6fd023680cca6611257a31efe22e4d17e578',
|
||||||
|
1222000: '016e58d3fb6a29a3f9281789359460e776e9feb2f0db500482b6e231e1272aef',
|
||||||
|
1223000: 'fdfe767c29a3de7acd913b627d1e5fa887a1af9974f6a8a6474db822468c785c',
|
||||||
|
1224000: '92239f6207bff3689c554e92b24fe2e7be4a2203104ad8ef08b2c6bedd9aeccf',
|
||||||
|
1225000: '9a2f2dd9527b533d3d743efc55236e73e15192171bc8d0cd910918d1ab00aef7',
|
||||||
|
1226000: 'eb8269c75b8c5f66e6ea88ad70883dddcf8a75a45198ca7a46eb0ec606a791bb',
|
||||||
|
1227000: '5c82e624390cd57942dc9d64344eaa3d8991e0437e01802473053245b706290c',
|
||||||
|
1228000: '51e9a7d727f07fc01be7c03e3dd854eb666697f05bf89259baac628520d4402c',
|
||||||
|
1229000: 'c4bfdb651c9abdeda717fb9c8a4c8a6c9c0f78c13d3e6cae3f24f504d734c643',
|
||||||
|
1230000: '9f1ce781d16f2334567cbfb22fff42c14d2b9290cc2883746f435a1fb127021d',
|
||||||
|
1231000: '5c996634b377412ae0a3d8f541f3cc4a354aab72c198aa23a5cfc2678cbabf09',
|
||||||
|
1232000: '86702316a2d1730fbae01a08f36fffe5bf6d3ebb7d76b35a1617713766698b46',
|
||||||
|
1233000: 'fb16b63916c0287cb9b01d0c5aad626ced1b73c49a374c9009703aa90fd27a82',
|
||||||
|
1234000: '7c6f7904602ccd86bfb05cb8d6b5547c989c57cb2e214e93f1220fa4fe29bcb0',
|
||||||
|
1235000: '898b0f20811f52aa5a6bd0c35eff86fca3fbe3b066e423644fa77b2e269d9513',
|
||||||
|
1236000: '39128910ef624b6a8bbd390a311b5587c0991cda834eed996d814fe410cac352',
|
||||||
|
1237000: 'a0709afeedb64af4168ce8cf3dbda667a248df8e91da96acb2333686a2b89325',
|
||||||
|
1238000: 'e00075e7ba8c18cc277bfc5115ae6ff6b9678e6e99efd6e45f549ef8a3981a3d',
|
||||||
|
1239000: '3fba891600738f2d37e279209d52bbe6dc7ce005eeed62048247c96f370e7cd5',
|
||||||
|
1240000: 'def9bf1bec9325db90bb070f532972cfdd74e814c2b5e74a4d5a7c09a963a5f1',
|
||||||
|
1241000: '6a5d187e32bc189ac786959e1fe846031b97ae1ce202c22e1bdb1d2a963005fd',
|
||||||
|
1242000: 'a74d7c0b104eaf76c53a3a31ce51b75bbd8e05b5e84c31f593f505a13d83634c',
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,7 +141,7 @@ class CoinSelector:
|
||||||
_) -> List[OutputEffectiveAmountEstimator]:
|
_) -> List[OutputEffectiveAmountEstimator]:
|
||||||
""" Accumulate UTXOs at random until there is enough to cover the target. """
|
""" Accumulate UTXOs at random until there is enough to cover the target. """
|
||||||
target = self.target + self.cost_of_change
|
target = self.target + self.cost_of_change
|
||||||
self.random.shuffle(txos, self.random.random)
|
self.random.shuffle(txos, random=self.random.random) # pylint: disable=deprecated-argument
|
||||||
selection = []
|
selection = []
|
||||||
amount = 0
|
amount = 0
|
||||||
for coin in txos:
|
for coin in txos:
|
||||||
|
|
|
@ -2,6 +2,7 @@ NULL_HASH32 = b'\x00'*32
|
||||||
|
|
||||||
CENT = 1000000
|
CENT = 1000000
|
||||||
COIN = 100*CENT
|
COIN = 100*CENT
|
||||||
|
DUST = 1000
|
||||||
|
|
||||||
TIMEOUT = 30.0
|
TIMEOUT = 30.0
|
||||||
|
|
||||||
|
|
|
@ -7,18 +7,23 @@ from binascii import hexlify
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from contextvars import ContextVar
|
from contextvars import ContextVar
|
||||||
from concurrent.futures.thread import ThreadPoolExecutor
|
|
||||||
from concurrent.futures.process import ProcessPoolExecutor
|
|
||||||
from typing import Tuple, List, Union, Callable, Any, Awaitable, Iterable, Dict, Optional
|
from typing import Tuple, List, Union, Callable, Any, Awaitable, Iterable, Dict, Optional
|
||||||
from datetime import date
|
from datetime import date
|
||||||
|
|
||||||
from prometheus_client import Gauge, Counter, Histogram
|
from prometheus_client import Gauge, Counter, Histogram
|
||||||
from lbry.utils import LockWithMetrics
|
from lbry.utils import LockWithMetrics
|
||||||
|
|
||||||
from .bip32 import PubKey
|
from .bip32 import PublicKey
|
||||||
from .transaction import Transaction, Output, OutputScript, TXRefImmutable, Input
|
from .transaction import Transaction, Output, OutputScript, TXRefImmutable, Input
|
||||||
from .constants import TXO_TYPES, CLAIM_TYPES
|
from .constants import TXO_TYPES, CLAIM_TYPES
|
||||||
from .util import date_to_julian_day
|
from .util import date_to_julian_day
|
||||||
|
|
||||||
|
from concurrent.futures.thread import ThreadPoolExecutor # pylint: disable=wrong-import-order
|
||||||
|
if platform.system() == 'Windows' or ({'ANDROID_ARGUMENT', 'KIVY_BUILD'} & os.environ.keys()):
|
||||||
|
from concurrent.futures.thread import ThreadPoolExecutor as ReaderExecutorClass # pylint: disable=reimported
|
||||||
|
else:
|
||||||
|
from concurrent.futures.process import ProcessPoolExecutor as ReaderExecutorClass
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
sqlite3.enable_callback_tracebacks(True)
|
sqlite3.enable_callback_tracebacks(True)
|
||||||
|
@ -62,12 +67,6 @@ def run_read_only_fetchone(sql, params):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
if platform.system() == 'Windows' or 'ANDROID_ARGUMENT' in os.environ:
|
|
||||||
ReaderExecutorClass = ThreadPoolExecutor
|
|
||||||
else:
|
|
||||||
ReaderExecutorClass = ProcessPoolExecutor
|
|
||||||
|
|
||||||
|
|
||||||
class AIOSQLite:
|
class AIOSQLite:
|
||||||
reader_executor: ReaderExecutorClass
|
reader_executor: ReaderExecutorClass
|
||||||
|
|
||||||
|
@ -84,10 +83,10 @@ class AIOSQLite:
|
||||||
"read_count", "Number of database reads", namespace="daemon_database"
|
"read_count", "Number of database reads", namespace="daemon_database"
|
||||||
)
|
)
|
||||||
acquire_write_lock_metric = Histogram(
|
acquire_write_lock_metric = Histogram(
|
||||||
f'write_lock_acquired', 'Time to acquire the write lock', namespace="daemon_database", buckets=HISTOGRAM_BUCKETS
|
'write_lock_acquired', 'Time to acquire the write lock', namespace="daemon_database", buckets=HISTOGRAM_BUCKETS
|
||||||
)
|
)
|
||||||
held_write_lock_metric = Histogram(
|
held_write_lock_metric = Histogram(
|
||||||
f'write_lock_held', 'Length of time the write lock is held for', namespace="daemon_database",
|
'write_lock_held', 'Length of time the write lock is held for', namespace="daemon_database",
|
||||||
buckets=HISTOGRAM_BUCKETS
|
buckets=HISTOGRAM_BUCKETS
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -123,7 +122,13 @@ class AIOSQLite:
|
||||||
if self._closing:
|
if self._closing:
|
||||||
return
|
return
|
||||||
self._closing = True
|
self._closing = True
|
||||||
await asyncio.get_event_loop().run_in_executor(self.writer_executor, self.writer_connection.close)
|
|
||||||
|
def __checkpoint_and_close(conn: sqlite3.Connection):
|
||||||
|
conn.execute("PRAGMA WAL_CHECKPOINT(FULL);")
|
||||||
|
log.info("DB checkpoint finished.")
|
||||||
|
conn.close()
|
||||||
|
await asyncio.get_event_loop().run_in_executor(
|
||||||
|
self.writer_executor, __checkpoint_and_close, self.writer_connection)
|
||||||
self.writer_executor.shutdown(wait=True)
|
self.writer_executor.shutdown(wait=True)
|
||||||
self.reader_executor.shutdown(wait=True)
|
self.reader_executor.shutdown(wait=True)
|
||||||
self.read_ready.clear()
|
self.read_ready.clear()
|
||||||
|
@ -147,7 +152,7 @@ class AIOSQLite:
|
||||||
self.waiting_reads_metric.inc()
|
self.waiting_reads_metric.inc()
|
||||||
self.read_count_metric.inc()
|
self.read_count_metric.inc()
|
||||||
try:
|
try:
|
||||||
while self.writers: # more writes can come in while we are waiting for the first
|
while self.writers and not self._closing: # more writes can come in while we are waiting for the first
|
||||||
if not urgent_read and still_waiting and self.urgent_read_done.is_set():
|
if not urgent_read and still_waiting and self.urgent_read_done.is_set():
|
||||||
# throttle the writes if they pile up
|
# throttle the writes if they pile up
|
||||||
self.urgent_read_done.clear()
|
self.urgent_read_done.clear()
|
||||||
|
@ -155,6 +160,8 @@ class AIOSQLite:
|
||||||
# wait until the running writes have finished
|
# wait until the running writes have finished
|
||||||
await self.read_ready.wait()
|
await self.read_ready.wait()
|
||||||
still_waiting = True
|
still_waiting = True
|
||||||
|
if self._closing:
|
||||||
|
raise asyncio.CancelledError()
|
||||||
return await asyncio.get_event_loop().run_in_executor(
|
return await asyncio.get_event_loop().run_in_executor(
|
||||||
self.reader_executor, read_only_fn, sql, parameters
|
self.reader_executor, read_only_fn, sql, parameters
|
||||||
)
|
)
|
||||||
|
@ -197,6 +204,8 @@ class AIOSQLite:
|
||||||
self.read_ready.clear()
|
self.read_ready.clear()
|
||||||
try:
|
try:
|
||||||
async with self.write_lock:
|
async with self.write_lock:
|
||||||
|
if self._closing:
|
||||||
|
raise asyncio.CancelledError()
|
||||||
return await asyncio.get_event_loop().run_in_executor(
|
return await asyncio.get_event_loop().run_in_executor(
|
||||||
self.writer_executor, lambda: self.__run_transaction(fun, *args, **kwargs)
|
self.writer_executor, lambda: self.__run_transaction(fun, *args, **kwargs)
|
||||||
)
|
)
|
||||||
|
@ -232,6 +241,8 @@ class AIOSQLite:
|
||||||
self.read_ready.clear()
|
self.read_ready.clear()
|
||||||
try:
|
try:
|
||||||
async with self.write_lock:
|
async with self.write_lock:
|
||||||
|
if self._closing:
|
||||||
|
raise asyncio.CancelledError()
|
||||||
return await asyncio.get_event_loop().run_in_executor(
|
return await asyncio.get_event_loop().run_in_executor(
|
||||||
self.writer_executor, self.__run_transaction_with_foreign_keys_disabled, fun, args, kwargs
|
self.writer_executor, self.__run_transaction_with_foreign_keys_disabled, fun, args, kwargs
|
||||||
)
|
)
|
||||||
|
@ -377,14 +388,31 @@ def interpolate(sql, values):
|
||||||
return sql
|
return sql
|
||||||
|
|
||||||
|
|
||||||
def constrain_single_or_list(constraints, column, value, convert=lambda x: x):
|
def constrain_single_or_list(constraints, column, value, convert=lambda x: x, negate=False):
|
||||||
if value is not None:
|
if value is not None:
|
||||||
if isinstance(value, list):
|
if isinstance(value, list):
|
||||||
value = [convert(v) for v in value]
|
value = [convert(v) for v in value]
|
||||||
if len(value) == 1:
|
if len(value) == 1:
|
||||||
constraints[column] = value[0]
|
if negate:
|
||||||
|
constraints[f"{column}__or"] = {
|
||||||
|
f"{column}__is_null": True,
|
||||||
|
f"{column}__not": value[0]
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
constraints[column] = value[0]
|
||||||
elif len(value) > 1:
|
elif len(value) > 1:
|
||||||
constraints[f"{column}__in"] = value
|
if negate:
|
||||||
|
constraints[f"{column}__or"] = {
|
||||||
|
f"{column}__is_null": True,
|
||||||
|
f"{column}__not_in": value
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
constraints[f"{column}__in"] = value
|
||||||
|
elif negate:
|
||||||
|
constraints[f"{column}__or"] = {
|
||||||
|
f"{column}__is_null": True,
|
||||||
|
f"{column}__not": convert(value)
|
||||||
|
}
|
||||||
else:
|
else:
|
||||||
constraints[column] = convert(value)
|
constraints[column] = convert(value)
|
||||||
return constraints
|
return constraints
|
||||||
|
@ -419,9 +447,13 @@ class SQLiteMixin:
|
||||||
version = await self.db.execute_fetchone("SELECT version FROM version LIMIT 1;")
|
version = await self.db.execute_fetchone("SELECT version FROM version LIMIT 1;")
|
||||||
if version == (self.SCHEMA_VERSION,):
|
if version == (self.SCHEMA_VERSION,):
|
||||||
return
|
return
|
||||||
|
if version == ("1.5",) and self.SCHEMA_VERSION == "1.6":
|
||||||
|
await self.db.execute("ALTER TABLE txo ADD COLUMN has_source bool DEFAULT 1;")
|
||||||
|
await self.db.execute("UPDATE version SET version = ?", (self.SCHEMA_VERSION,))
|
||||||
|
return
|
||||||
await self.db.executescript('\n'.join(
|
await self.db.executescript('\n'.join(
|
||||||
f"DROP TABLE {table};" for table in tables
|
f"DROP TABLE {table};" for table in tables
|
||||||
))
|
) + '\n' + 'PRAGMA WAL_CHECKPOINT(FULL);' + '\n' + 'VACUUM;')
|
||||||
await self.db.execute(self.CREATE_VERSION_TABLE)
|
await self.db.execute(self.CREATE_VERSION_TABLE)
|
||||||
await self.db.execute("INSERT INTO version VALUES (?)", (self.SCHEMA_VERSION,))
|
await self.db.execute("INSERT INTO version VALUES (?)", (self.SCHEMA_VERSION,))
|
||||||
await self.db.executescript(self.CREATE_TABLES_QUERY)
|
await self.db.executescript(self.CREATE_TABLES_QUERY)
|
||||||
|
@ -475,7 +507,7 @@ def _get_spendable_utxos(transaction: sqlite3.Connection, accounts: List, decode
|
||||||
amount_to_reserve: int, reserved_amount: int, floor: int, ceiling: int,
|
amount_to_reserve: int, reserved_amount: int, floor: int, ceiling: int,
|
||||||
fee_per_byte: int) -> int:
|
fee_per_byte: int) -> int:
|
||||||
accounts_fmt = ",".join(["?"] * len(accounts))
|
accounts_fmt = ",".join(["?"] * len(accounts))
|
||||||
txo_query = f"""
|
txo_query = """
|
||||||
SELECT tx.txid, txo.txoid, tx.raw, tx.height, txo.position as nout, tx.is_verified, txo.amount FROM txo
|
SELECT tx.txid, txo.txoid, tx.raw, tx.height, txo.position as nout, tx.is_verified, txo.amount FROM txo
|
||||||
INNER JOIN account_address USING (address)
|
INNER JOIN account_address USING (address)
|
||||||
LEFT JOIN txi USING (txoid)
|
LEFT JOIN txi USING (txoid)
|
||||||
|
@ -565,7 +597,7 @@ def get_and_reserve_spendable_utxos(transaction: sqlite3.Connection, accounts: L
|
||||||
|
|
||||||
class Database(SQLiteMixin):
|
class Database(SQLiteMixin):
|
||||||
|
|
||||||
SCHEMA_VERSION = "1.3"
|
SCHEMA_VERSION = "1.6"
|
||||||
|
|
||||||
PRAGMAS = """
|
PRAGMAS = """
|
||||||
pragma journal_mode=WAL;
|
pragma journal_mode=WAL;
|
||||||
|
@ -619,6 +651,7 @@ class Database(SQLiteMixin):
|
||||||
txo_type integer not null default 0,
|
txo_type integer not null default 0,
|
||||||
claim_id text,
|
claim_id text,
|
||||||
claim_name text,
|
claim_name text,
|
||||||
|
has_source bool,
|
||||||
|
|
||||||
channel_id text,
|
channel_id text,
|
||||||
reposted_claim_id text
|
reposted_claim_id text
|
||||||
|
@ -663,7 +696,8 @@ class Database(SQLiteMixin):
|
||||||
'address': txo.get_address(self.ledger),
|
'address': txo.get_address(self.ledger),
|
||||||
'position': txo.position,
|
'position': txo.position,
|
||||||
'amount': txo.amount,
|
'amount': txo.amount,
|
||||||
'script': sqlite3.Binary(txo.script.source)
|
'script': sqlite3.Binary(txo.script.source),
|
||||||
|
'has_source': False,
|
||||||
}
|
}
|
||||||
if txo.is_claim:
|
if txo.is_claim:
|
||||||
if txo.can_decode_claim:
|
if txo.can_decode_claim:
|
||||||
|
@ -671,12 +705,18 @@ class Database(SQLiteMixin):
|
||||||
row['txo_type'] = TXO_TYPES.get(claim.claim_type, TXO_TYPES['stream'])
|
row['txo_type'] = TXO_TYPES.get(claim.claim_type, TXO_TYPES['stream'])
|
||||||
if claim.is_repost:
|
if claim.is_repost:
|
||||||
row['reposted_claim_id'] = claim.repost.reference.claim_id
|
row['reposted_claim_id'] = claim.repost.reference.claim_id
|
||||||
|
row['has_source'] = True
|
||||||
if claim.is_signed:
|
if claim.is_signed:
|
||||||
row['channel_id'] = claim.signing_channel_id
|
row['channel_id'] = claim.signing_channel_id
|
||||||
|
if claim.is_stream:
|
||||||
|
row['has_source'] = claim.stream.has_source
|
||||||
else:
|
else:
|
||||||
row['txo_type'] = TXO_TYPES['stream']
|
row['txo_type'] = TXO_TYPES['stream']
|
||||||
elif txo.is_support:
|
elif txo.is_support:
|
||||||
row['txo_type'] = TXO_TYPES['support']
|
row['txo_type'] = TXO_TYPES['support']
|
||||||
|
support = txo.can_decode_support
|
||||||
|
if support and support.is_signed:
|
||||||
|
row['channel_id'] = support.signing_channel_id
|
||||||
elif txo.purchase is not None:
|
elif txo.purchase is not None:
|
||||||
row['txo_type'] = TXO_TYPES['purchase']
|
row['txo_type'] = TXO_TYPES['purchase']
|
||||||
row['claim_id'] = txo.purchased_claim_id
|
row['claim_id'] = txo.purchased_claim_id
|
||||||
|
@ -730,9 +770,10 @@ class Database(SQLiteMixin):
|
||||||
conn.execute(*self._insert_sql(
|
conn.execute(*self._insert_sql(
|
||||||
"txo", self.txo_to_row(tx, txo), ignore_duplicate=True
|
"txo", self.txo_to_row(tx, txo), ignore_duplicate=True
|
||||||
)).fetchall()
|
)).fetchall()
|
||||||
elif txo.script.is_pay_script_hash:
|
elif txo.script.is_pay_script_hash and is_my_input:
|
||||||
# TODO: implement script hash payments
|
conn.execute(*self._insert_sql(
|
||||||
log.warning('Database.save_transaction_io: pay script hash is not implemented!')
|
"txo", self.txo_to_row(tx, txo), ignore_duplicate=True
|
||||||
|
)).fetchall()
|
||||||
|
|
||||||
def save_transaction_io(self, tx: Transaction, address, txhash, history):
|
def save_transaction_io(self, tx: Transaction, address, txhash, history):
|
||||||
return self.save_transaction_io_batch([tx], address, txhash, history)
|
return self.save_transaction_io_batch([tx], address, txhash, history)
|
||||||
|
@ -935,16 +976,20 @@ class Database(SQLiteMixin):
|
||||||
sql.append("LEFT JOIN txi ON (txi.position=0 AND txi.txid=txo.txid)")
|
sql.append("LEFT JOIN txi ON (txi.position=0 AND txi.txid=txo.txid)")
|
||||||
return await self.db.execute_fetchall(*query(' '.join(sql), **constraints), read_only=read_only)
|
return await self.db.execute_fetchall(*query(' '.join(sql), **constraints), read_only=read_only)
|
||||||
|
|
||||||
async def get_txos(self, wallet=None, no_tx=False, read_only=False, **constraints):
|
async def get_txos(
|
||||||
|
self, wallet=None, no_tx=False, no_channel_info=False, read_only=False, **constraints
|
||||||
|
) -> List[Output]:
|
||||||
include_is_spent = constraints.get('include_is_spent', False)
|
include_is_spent = constraints.get('include_is_spent', False)
|
||||||
include_is_my_input = constraints.get('include_is_my_input', False)
|
include_is_my_input = constraints.get('include_is_my_input', False)
|
||||||
include_is_my_output = constraints.pop('include_is_my_output', False)
|
include_is_my_output = constraints.pop('include_is_my_output', False)
|
||||||
include_received_tips = constraints.pop('include_received_tips', False)
|
include_received_tips = constraints.pop('include_received_tips', False)
|
||||||
|
|
||||||
select_columns = [
|
select_columns = [
|
||||||
"tx.txid, raw, tx.height, tx.position as tx_position, tx.is_verified, "
|
"tx.txid, tx.height, tx.position as tx_position, tx.is_verified, "
|
||||||
"txo_type, txo.position as txo_position, amount, script"
|
"txo_type, txo.position as txo_position, amount, script"
|
||||||
]
|
]
|
||||||
|
if not no_tx:
|
||||||
|
select_columns.append("raw")
|
||||||
|
|
||||||
my_accounts = {a.public_key.address for a in wallet.accounts} if wallet else set()
|
my_accounts = {a.public_key.address for a in wallet.accounts} if wallet else set()
|
||||||
my_accounts_sql = ""
|
my_accounts_sql = ""
|
||||||
|
@ -983,7 +1028,7 @@ class Database(SQLiteMixin):
|
||||||
|
|
||||||
if 'order_by' not in constraints or constraints['order_by'] == 'height':
|
if 'order_by' not in constraints or constraints['order_by'] == 'height':
|
||||||
constraints['order_by'] = [
|
constraints['order_by'] = [
|
||||||
"tx.height=0 DESC", "tx.height DESC", "tx.position DESC", "txo.position"
|
"tx.height in (0, -1) DESC", "tx.height DESC", "tx.position DESC", "txo.position"
|
||||||
]
|
]
|
||||||
elif constraints.get('order_by', None) == 'none':
|
elif constraints.get('order_by', None) == 'none':
|
||||||
del constraints['order_by']
|
del constraints['order_by']
|
||||||
|
@ -1022,32 +1067,33 @@ class Database(SQLiteMixin):
|
||||||
txo.received_tips = row['received_tips']
|
txo.received_tips = row['received_tips']
|
||||||
txos.append(txo)
|
txos.append(txo)
|
||||||
|
|
||||||
channel_ids = set()
|
if not no_channel_info:
|
||||||
for txo in txos:
|
channel_ids = set()
|
||||||
if txo.is_claim and txo.can_decode_claim:
|
|
||||||
if txo.claim.is_signed:
|
|
||||||
channel_ids.add(txo.claim.signing_channel_id)
|
|
||||||
if txo.claim.is_channel and wallet:
|
|
||||||
for account in wallet.accounts:
|
|
||||||
private_key = await account.get_channel_private_key(
|
|
||||||
txo.claim.channel.public_key_bytes
|
|
||||||
)
|
|
||||||
if private_key:
|
|
||||||
txo.private_key = private_key
|
|
||||||
break
|
|
||||||
|
|
||||||
if channel_ids:
|
|
||||||
channels = {
|
|
||||||
txo.claim_id: txo for txo in
|
|
||||||
(await self.get_channels(
|
|
||||||
wallet=wallet,
|
|
||||||
claim_id__in=channel_ids,
|
|
||||||
read_only=read_only
|
|
||||||
))
|
|
||||||
}
|
|
||||||
for txo in txos:
|
for txo in txos:
|
||||||
if txo.is_claim and txo.can_decode_claim:
|
if txo.is_claim and txo.can_decode_claim:
|
||||||
txo.channel = channels.get(txo.claim.signing_channel_id, None)
|
if txo.claim.is_signed:
|
||||||
|
channel_ids.add(txo.claim.signing_channel_id)
|
||||||
|
if txo.claim.is_channel and wallet:
|
||||||
|
for account in wallet.accounts:
|
||||||
|
private_key = await account.get_channel_private_key(
|
||||||
|
txo.claim.channel.public_key_bytes
|
||||||
|
)
|
||||||
|
if private_key:
|
||||||
|
txo.private_key = private_key
|
||||||
|
break
|
||||||
|
|
||||||
|
if channel_ids:
|
||||||
|
channels = {
|
||||||
|
txo.claim_id: txo for txo in
|
||||||
|
(await self.get_channels(
|
||||||
|
wallet=wallet,
|
||||||
|
claim_id__in=channel_ids,
|
||||||
|
read_only=read_only
|
||||||
|
))
|
||||||
|
}
|
||||||
|
for txo in txos:
|
||||||
|
if txo.is_claim and txo.can_decode_claim:
|
||||||
|
txo.channel = channels.get(txo.claim.signing_channel_id, None)
|
||||||
|
|
||||||
return txos
|
return txos
|
||||||
|
|
||||||
|
@ -1109,6 +1155,41 @@ class Database(SQLiteMixin):
|
||||||
)
|
)
|
||||||
return balance[0]['total'] or 0
|
return balance[0]['total'] or 0
|
||||||
|
|
||||||
|
async def get_detailed_balance(self, accounts, read_only=False, **constraints):
|
||||||
|
constraints['accounts'] = accounts
|
||||||
|
result = (await self.select_txos(
|
||||||
|
f"COALESCE(SUM(amount), 0) AS total,"
|
||||||
|
f"COALESCE(SUM("
|
||||||
|
f" CASE WHEN"
|
||||||
|
f" txo_type NOT IN ({TXO_TYPES['other']}, {TXO_TYPES['purchase']})"
|
||||||
|
f" THEN amount ELSE 0 END), 0) AS reserved,"
|
||||||
|
f"COALESCE(SUM("
|
||||||
|
f" CASE WHEN"
|
||||||
|
f" txo_type IN ({','.join(map(str, CLAIM_TYPES))})"
|
||||||
|
f" THEN amount ELSE 0 END), 0) AS claims,"
|
||||||
|
f"COALESCE(SUM(CASE WHEN txo_type = {TXO_TYPES['support']} THEN amount ELSE 0 END), 0) AS supports,"
|
||||||
|
f"COALESCE(SUM("
|
||||||
|
f" CASE WHEN"
|
||||||
|
f" txo_type = {TXO_TYPES['support']} AND"
|
||||||
|
f" TXI.address IS NOT NULL AND"
|
||||||
|
f" TXI.address IN (SELECT address FROM account_address WHERE account = :$account__in0)"
|
||||||
|
f" THEN amount ELSE 0 END), 0) AS my_supports",
|
||||||
|
is_spent=False,
|
||||||
|
include_is_my_input=True,
|
||||||
|
read_only=read_only,
|
||||||
|
**constraints
|
||||||
|
))[0]
|
||||||
|
return {
|
||||||
|
"total": result["total"],
|
||||||
|
"available": result["total"] - result["reserved"],
|
||||||
|
"reserved": result["reserved"],
|
||||||
|
"reserved_subtotals": {
|
||||||
|
"claims": result["claims"],
|
||||||
|
"supports": result["my_supports"],
|
||||||
|
"tips": result["supports"] - result["my_supports"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async def select_addresses(self, cols, read_only=False, **constraints):
|
async def select_addresses(self, cols, read_only=False, **constraints):
|
||||||
return await self.db.execute_fetchall(*query(
|
return await self.db.execute_fetchall(*query(
|
||||||
f"SELECT {cols} FROM pubkey_address JOIN account_address USING (address)",
|
f"SELECT {cols} FROM pubkey_address JOIN account_address USING (address)",
|
||||||
|
@ -1123,13 +1204,14 @@ class Database(SQLiteMixin):
|
||||||
addresses = await self.select_addresses(', '.join(cols), read_only=read_only, **constraints)
|
addresses = await self.select_addresses(', '.join(cols), read_only=read_only, **constraints)
|
||||||
if 'pubkey' in cols:
|
if 'pubkey' in cols:
|
||||||
for address in addresses:
|
for address in addresses:
|
||||||
address['pubkey'] = PubKey(
|
address['pubkey'] = PublicKey(
|
||||||
self.ledger, address.pop('pubkey'), address.pop('chain_code'),
|
self.ledger, address.pop('pubkey'), address.pop('chain_code'),
|
||||||
address.pop('n'), address.pop('depth')
|
address.pop('n'), address.pop('depth')
|
||||||
)
|
)
|
||||||
return addresses
|
return addresses
|
||||||
|
|
||||||
async def get_address_count(self, cols=None, read_only=False, **constraints):
|
async def get_address_count(self, cols=None, read_only=False, **constraints):
|
||||||
|
self._clean_txo_constraints_for_aggregation(constraints)
|
||||||
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
|
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
|
||||||
return count[0]['total'] or 0
|
return count[0]['total'] or 0
|
||||||
|
|
||||||
|
@ -1163,6 +1245,18 @@ class Database(SQLiteMixin):
|
||||||
async def set_address_history(self, address, history):
|
async def set_address_history(self, address, history):
|
||||||
await self._set_address_history(address, history)
|
await self._set_address_history(address, history)
|
||||||
|
|
||||||
|
async def is_channel_key_used(self, account, key: PublicKey):
|
||||||
|
channels = await self.get_txos(
|
||||||
|
accounts=[account], txo_type=TXO_TYPES['channel'],
|
||||||
|
no_tx=True, no_channel_info=True
|
||||||
|
)
|
||||||
|
other_key_bytes = key.pubkey_bytes
|
||||||
|
for channel in channels:
|
||||||
|
claim = channel.can_decode_claim
|
||||||
|
if claim and claim.channel.public_key_bytes == other_key_bytes:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def constrain_purchases(constraints):
|
def constrain_purchases(constraints):
|
||||||
accounts = constraints.pop('accounts', None)
|
accounts = constraints.pop('accounts', None)
|
||||||
|
@ -1254,13 +1348,16 @@ class Database(SQLiteMixin):
|
||||||
self.constrain_collections(constraints)
|
self.constrain_collections(constraints)
|
||||||
return self.get_utxo_count(**constraints)
|
return self.get_utxo_count(**constraints)
|
||||||
|
|
||||||
async def release_all_outputs(self, account):
|
async def release_all_outputs(self, account=None):
|
||||||
await self.db.execute_fetchall(
|
if account is None:
|
||||||
"UPDATE txo SET is_reserved = 0 WHERE"
|
await self.db.execute_fetchall("UPDATE txo SET is_reserved = 0 WHERE is_reserved = 1")
|
||||||
" is_reserved = 1 AND txo.address IN ("
|
else:
|
||||||
" SELECT address from account_address WHERE account = ?"
|
await self.db.execute_fetchall(
|
||||||
" )", (account.public_key.address, )
|
"UPDATE txo SET is_reserved = 0 WHERE"
|
||||||
)
|
" is_reserved = 1 AND txo.address IN ("
|
||||||
|
" SELECT address from account_address WHERE account = ?"
|
||||||
|
" )", (account.public_key.address, )
|
||||||
|
)
|
||||||
|
|
||||||
def get_supports_summary(self, read_only=False, **constraints):
|
def get_supports_summary(self, read_only=False, **constraints):
|
||||||
return self.get_txos(
|
return self.get_txos(
|
||||||
|
|
|
@ -10,25 +10,24 @@ from collections import defaultdict
|
||||||
from binascii import hexlify, unhexlify
|
from binascii import hexlify, unhexlify
|
||||||
from typing import Dict, Tuple, Type, Iterable, List, Optional, DefaultDict, NamedTuple
|
from typing import Dict, Tuple, Type, Iterable, List, Optional, DefaultDict, NamedTuple
|
||||||
|
|
||||||
import pylru
|
|
||||||
from lbry.schema.result import Outputs, INVALID, NOT_FOUND
|
from lbry.schema.result import Outputs, INVALID, NOT_FOUND
|
||||||
from lbry.schema.url import URL
|
from lbry.schema.url import URL
|
||||||
from lbry.crypto.hash import hash160, double_sha256, sha256
|
from lbry.crypto.hash import hash160, double_sha256, sha256
|
||||||
from lbry.crypto.base58 import Base58
|
from lbry.crypto.base58 import Base58
|
||||||
|
from lbry.utils import LRUCacheWithMetrics
|
||||||
|
|
||||||
from .tasks import TaskGroup
|
from lbry.wallet.tasks import TaskGroup
|
||||||
from .database import Database
|
from lbry.wallet.database import Database
|
||||||
from .stream import StreamController
|
from lbry.wallet.stream import StreamController
|
||||||
from .dewies import dewies_to_lbc
|
from lbry.wallet.dewies import dewies_to_lbc
|
||||||
from .account import Account, AddressManager, SingleKey
|
from lbry.wallet.account import Account, AddressManager, SingleKey
|
||||||
from .network import Network
|
from lbry.wallet.network import Network
|
||||||
from .transaction import Transaction, Output
|
from lbry.wallet.transaction import Transaction, Output
|
||||||
from .header import Headers, UnvalidatedHeaders
|
from lbry.wallet.header import Headers, UnvalidatedHeaders
|
||||||
from .checkpoints import HASHES
|
from lbry.wallet.checkpoints import HASHES
|
||||||
from .constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
|
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
|
||||||
from .bip32 import PubKey, PrivateKey
|
from lbry.wallet.bip32 import PublicKey, PrivateKey
|
||||||
from .coinselection import CoinSelector
|
from lbry.wallet.coinselection import CoinSelector
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -107,7 +106,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
target_timespan = 150
|
target_timespan = 150
|
||||||
|
|
||||||
default_fee_per_byte = 50
|
default_fee_per_byte = 50
|
||||||
default_fee_per_name_char = 200000
|
default_fee_per_name_char = 0
|
||||||
|
|
||||||
checkpoints = HASHES
|
checkpoints = HASHES
|
||||||
|
|
||||||
|
@ -124,7 +123,6 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
self.network: Network = self.config.get('network') or Network(self)
|
self.network: Network = self.config.get('network') or Network(self)
|
||||||
self.network.on_header.listen(self.receive_header)
|
self.network.on_header.listen(self.receive_header)
|
||||||
self.network.on_status.listen(self.process_status_update)
|
self.network.on_status.listen(self.process_status_update)
|
||||||
self.network.on_connected.listen(self.join_network)
|
|
||||||
|
|
||||||
self.accounts = []
|
self.accounts = []
|
||||||
self.fee_per_byte: int = self.config.get('fee_per_byte', self.default_fee_per_byte)
|
self.fee_per_byte: int = self.config.get('fee_per_byte', self.default_fee_per_byte)
|
||||||
|
@ -157,7 +155,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
self._on_ready_controller = StreamController()
|
self._on_ready_controller = StreamController()
|
||||||
self.on_ready = self._on_ready_controller.stream
|
self.on_ready = self._on_ready_controller.stream
|
||||||
|
|
||||||
self._tx_cache = pylru.lrucache(self.config.get("tx_cache_size", 100_000))
|
self._tx_cache = LRUCacheWithMetrics(self.config.get("tx_cache_size", 1024), metric_name='tx')
|
||||||
self._update_tasks = TaskGroup()
|
self._update_tasks = TaskGroup()
|
||||||
self._other_tasks = TaskGroup() # that we dont need to start
|
self._other_tasks = TaskGroup() # that we dont need to start
|
||||||
self._utxo_reservation_lock = asyncio.Lock()
|
self._utxo_reservation_lock = asyncio.Lock()
|
||||||
|
@ -169,7 +167,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
self._known_addresses_out_of_sync = set()
|
self._known_addresses_out_of_sync = set()
|
||||||
|
|
||||||
self.fee_per_name_char = self.config.get('fee_per_name_char', self.default_fee_per_name_char)
|
self.fee_per_name_char = self.config.get('fee_per_name_char', self.default_fee_per_name_char)
|
||||||
self._balance_cache = pylru.lrucache(100000)
|
self._balance_cache = LRUCacheWithMetrics(2 ** 15)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_id(cls):
|
def get_id(cls):
|
||||||
|
@ -180,15 +178,25 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
raw_address = cls.pubkey_address_prefix + h160
|
raw_address = cls.pubkey_address_prefix + h160
|
||||||
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
|
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def hash160_to_script_address(cls, h160):
|
||||||
|
raw_address = cls.script_address_prefix + h160
|
||||||
|
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def address_to_hash160(address):
|
def address_to_hash160(address):
|
||||||
return Base58.decode(address)[1:21]
|
return Base58.decode(address)[1:21]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def is_valid_address(cls, address):
|
def is_pubkey_address(cls, address):
|
||||||
decoded = Base58.decode_check(address)
|
decoded = Base58.decode_check(address)
|
||||||
return decoded[0] == cls.pubkey_address_prefix[0]
|
return decoded[0] == cls.pubkey_address_prefix[0]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def is_script_address(cls, address):
|
||||||
|
decoded = Base58.decode_check(address)
|
||||||
|
return decoded[0] == cls.script_address_prefix[0]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def public_key_to_address(cls, public_key):
|
def public_key_to_address(cls, public_key):
|
||||||
return cls.hash160_to_address(hash160(public_key))
|
return cls.hash160_to_address(hash160(public_key))
|
||||||
|
@ -218,7 +226,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
return account.get_private_key(address_info['chain'], address_info['pubkey'].n)
|
return account.get_private_key(address_info['chain'], address_info['pubkey'].n)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
async def get_public_key_for_address(self, wallet, address) -> Optional[PubKey]:
|
async def get_public_key_for_address(self, wallet, address) -> Optional[PublicKey]:
|
||||||
match = await self._get_account_and_address_info_for_address(wallet, address)
|
match = await self._get_account_and_address_info_for_address(wallet, address)
|
||||||
if match:
|
if match:
|
||||||
_, address_info = match
|
_, address_info = match
|
||||||
|
@ -233,7 +241,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
async def get_effective_amount_estimators(self, funding_accounts: Iterable[Account]):
|
async def get_effective_amount_estimators(self, funding_accounts: Iterable[Account]):
|
||||||
estimators = []
|
estimators = []
|
||||||
for account in funding_accounts:
|
for account in funding_accounts:
|
||||||
utxos = await account.get_utxos()
|
utxos = await account.get_utxos(no_tx=True, no_channel_info=True)
|
||||||
for utxo in utxos:
|
for utxo in utxos:
|
||||||
estimators.append(utxo.get_estimator(self))
|
estimators.append(utxo.get_estimator(self))
|
||||||
return estimators
|
return estimators
|
||||||
|
@ -321,16 +329,19 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
async def start(self):
|
async def start(self):
|
||||||
if not os.path.exists(self.path):
|
if not os.path.exists(self.path):
|
||||||
os.mkdir(self.path)
|
os.mkdir(self.path)
|
||||||
await asyncio.wait([
|
await asyncio.wait(map(asyncio.create_task, [
|
||||||
self.db.open(),
|
self.db.open(),
|
||||||
self.headers.open()
|
self.headers.open()
|
||||||
])
|
]))
|
||||||
fully_synced = self.on_ready.first
|
fully_synced = self.on_ready.first
|
||||||
asyncio.create_task(self.network.start())
|
asyncio.create_task(self.network.start())
|
||||||
await self.network.on_connected.first
|
await self.network.on_connected.first
|
||||||
async with self._header_processing_lock:
|
async with self._header_processing_lock:
|
||||||
await self._update_tasks.add(self.initial_headers_sync())
|
await self._update_tasks.add(self.initial_headers_sync())
|
||||||
|
self.network.on_connected.listen(self.join_network)
|
||||||
|
asyncio.ensure_future(self.join_network())
|
||||||
await fully_synced
|
await fully_synced
|
||||||
|
await self.db.release_all_outputs()
|
||||||
await asyncio.gather(*(a.maybe_migrate_certificates() for a in self.accounts))
|
await asyncio.gather(*(a.maybe_migrate_certificates() for a in self.accounts))
|
||||||
await asyncio.gather(*(a.save_max_gap() for a in self.accounts))
|
await asyncio.gather(*(a.save_max_gap() for a in self.accounts))
|
||||||
if len(self.accounts) > 10:
|
if len(self.accounts) > 10:
|
||||||
|
@ -354,6 +365,10 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
await self.db.close()
|
await self.db.close()
|
||||||
await self.headers.close()
|
await self.headers.close()
|
||||||
|
|
||||||
|
async def tasks_are_done(self):
|
||||||
|
await self._update_tasks.done.wait()
|
||||||
|
await self._other_tasks.done.wait()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def local_height_including_downloaded_height(self):
|
def local_height_including_downloaded_height(self):
|
||||||
return max(self.headers.height, self._download_height)
|
return max(self.headers.height, self._download_height)
|
||||||
|
@ -414,6 +429,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
"Blockchain Reorganization: attempting rewind to height %s from starting height %s",
|
"Blockchain Reorganization: attempting rewind to height %s from starting height %s",
|
||||||
height, height+rewound
|
height, height+rewound
|
||||||
)
|
)
|
||||||
|
self._tx_cache.clear()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise IndexError(f"headers.connect() returned negative number ({added})")
|
raise IndexError(f"headers.connect() returned negative number ({added})")
|
||||||
|
@ -450,14 +466,15 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
async def subscribe_accounts(self):
|
async def subscribe_accounts(self):
|
||||||
if self.network.is_connected and self.accounts:
|
if self.network.is_connected and self.accounts:
|
||||||
log.info("Subscribe to %i accounts", len(self.accounts))
|
log.info("Subscribe to %i accounts", len(self.accounts))
|
||||||
await asyncio.wait([
|
await asyncio.wait(map(asyncio.create_task, [
|
||||||
self.subscribe_account(a) for a in self.accounts
|
self.subscribe_account(a) for a in self.accounts
|
||||||
])
|
]))
|
||||||
|
|
||||||
async def subscribe_account(self, account: Account):
|
async def subscribe_account(self, account: Account):
|
||||||
for address_manager in account.address_managers.values():
|
for address_manager in account.address_managers.values():
|
||||||
await self.subscribe_addresses(address_manager, await address_manager.get_addresses())
|
await self.subscribe_addresses(address_manager, await address_manager.get_addresses())
|
||||||
await account.ensure_address_gap()
|
await account.ensure_address_gap()
|
||||||
|
await account.deterministic_channel_keys.ensure_cache_primed()
|
||||||
|
|
||||||
async def unsubscribe_account(self, account: Account):
|
async def unsubscribe_account(self, account: Account):
|
||||||
for address in await account.get_addresses():
|
for address in await account.get_addresses():
|
||||||
|
@ -478,12 +495,14 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
for address, remote_status in zip(batch, results):
|
for address, remote_status in zip(batch, results):
|
||||||
self._update_tasks.add(self.update_history(address, remote_status, address_manager))
|
self._update_tasks.add(self.update_history(address, remote_status, address_manager))
|
||||||
addresses_remaining = addresses_remaining[batch_size:]
|
addresses_remaining = addresses_remaining[batch_size:]
|
||||||
log.info("subscribed to %i/%i addresses on %s:%i", len(addresses) - len(addresses_remaining),
|
if self.network.client and self.network.client.server_address_and_port:
|
||||||
len(addresses), *self.network.client.server_address_and_port)
|
log.info("subscribed to %i/%i addresses on %s:%i", len(addresses) - len(addresses_remaining),
|
||||||
log.info(
|
len(addresses), *self.network.client.server_address_and_port)
|
||||||
"finished subscribing to %i addresses on %s:%i", len(addresses),
|
if self.network.client and self.network.client.server_address_and_port:
|
||||||
*self.network.client.server_address_and_port
|
log.info(
|
||||||
)
|
"finished subscribing to %i addresses on %s:%i", len(addresses),
|
||||||
|
*self.network.client.server_address_and_port
|
||||||
|
)
|
||||||
|
|
||||||
def process_status_update(self, update):
|
def process_status_update(self, update):
|
||||||
address, remote_status = update
|
address, remote_status = update
|
||||||
|
@ -510,11 +529,8 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
)
|
)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
acquire_lock_tasks = []
|
|
||||||
synced_txs = []
|
|
||||||
to_request = {}
|
to_request = {}
|
||||||
pending_synced_history = {}
|
pending_synced_history = {}
|
||||||
updated_cached_items = {}
|
|
||||||
already_synced = set()
|
already_synced = set()
|
||||||
|
|
||||||
already_synced_offset = 0
|
already_synced_offset = 0
|
||||||
|
@ -524,17 +540,6 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
already_synced.add((txid, remote_height))
|
already_synced.add((txid, remote_height))
|
||||||
already_synced_offset += 1
|
already_synced_offset += 1
|
||||||
continue
|
continue
|
||||||
cache_item = self._tx_cache.get(txid)
|
|
||||||
if cache_item is None:
|
|
||||||
cache_item = TransactionCacheItem()
|
|
||||||
self._tx_cache[txid] = cache_item
|
|
||||||
|
|
||||||
for txid, remote_height in remote_history[already_synced_offset:]:
|
|
||||||
cache_item = self._tx_cache[txid]
|
|
||||||
acquire_lock_tasks.append(asyncio.create_task(cache_item.lock.acquire()))
|
|
||||||
|
|
||||||
if acquire_lock_tasks:
|
|
||||||
await asyncio.wait(acquire_lock_tasks)
|
|
||||||
|
|
||||||
tx_indexes = {}
|
tx_indexes = {}
|
||||||
|
|
||||||
|
@ -542,32 +547,22 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
tx_indexes[txid] = i
|
tx_indexes[txid] = i
|
||||||
if (txid, remote_height) in already_synced:
|
if (txid, remote_height) in already_synced:
|
||||||
continue
|
continue
|
||||||
cache_item = self._tx_cache.get(txid)
|
|
||||||
cache_item.pending_verifications += 1
|
|
||||||
updated_cached_items[txid] = cache_item
|
|
||||||
|
|
||||||
assert cache_item is not None, 'cache item is none'
|
|
||||||
assert cache_item.lock.locked(), 'cache lock is not held?'
|
|
||||||
# tx = cache_item.tx
|
|
||||||
# if cache_item.tx is not None and \
|
|
||||||
# cache_item.tx.height >= remote_height and \
|
|
||||||
# (cache_item.tx.is_verified or remote_height < 1):
|
|
||||||
# synced_txs.append(cache_item.tx) # cached tx is already up-to-date
|
|
||||||
# pending_synced_history[i] = f'{tx.id}:{tx.height}:'
|
|
||||||
# continue
|
|
||||||
to_request[i] = (txid, remote_height)
|
to_request[i] = (txid, remote_height)
|
||||||
|
|
||||||
log.debug(
|
log.debug(
|
||||||
"request %i transactions, %i/%i for %s are already synced", len(to_request), len(synced_txs),
|
"request %i transactions, %i/%i for %s are already synced", len(to_request), len(already_synced),
|
||||||
len(remote_history), address
|
len(remote_history), address
|
||||||
)
|
)
|
||||||
requested_txes = await self._request_transaction_batch(to_request, len(remote_history), address)
|
remote_history_txids = {txid for txid, _ in remote_history}
|
||||||
for tx in requested_txes:
|
async for tx in self.request_synced_transactions(to_request, remote_history_txids, address):
|
||||||
|
self.maybe_has_channel_key(tx)
|
||||||
pending_synced_history[tx_indexes[tx.id]] = f"{tx.id}:{tx.height}:"
|
pending_synced_history[tx_indexes[tx.id]] = f"{tx.id}:{tx.height}:"
|
||||||
synced_txs.append(tx)
|
if len(pending_synced_history) % 100 == 0:
|
||||||
|
log.info("Syncing address %s: %d/%d", address, len(pending_synced_history), len(to_request))
|
||||||
|
log.info("Sync finished for address %s: %d/%d", address, len(pending_synced_history), len(to_request))
|
||||||
|
|
||||||
assert len(pending_synced_history) == len(remote_history), \
|
assert len(pending_synced_history) == len(remote_history), \
|
||||||
f"{len(pending_synced_history)} vs {len(remote_history)}"
|
f"{len(pending_synced_history)} vs {len(remote_history)} for {address}"
|
||||||
synced_history = ""
|
synced_history = ""
|
||||||
for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())):
|
for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())):
|
||||||
assert i == remote_i, f"{i} vs {remote_i}"
|
assert i == remote_i, f"{i} vs {remote_i}"
|
||||||
|
@ -575,21 +570,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
if f"{txid}:{height}:" != pending_synced_history[i]:
|
if f"{txid}:{height}:" != pending_synced_history[i]:
|
||||||
log.warning("history mismatch: %s vs %s", remote_history[remote_i], pending_synced_history[i])
|
log.warning("history mismatch: %s vs %s", remote_history[remote_i], pending_synced_history[i])
|
||||||
synced_history += pending_synced_history[i]
|
synced_history += pending_synced_history[i]
|
||||||
|
await self.db.set_address_history(address, synced_history)
|
||||||
cache_size = self.config.get("tx_cache_size", 100_000)
|
|
||||||
for txid, cache_item in updated_cached_items.items():
|
|
||||||
cache_item.pending_verifications -= 1
|
|
||||||
if cache_item.pending_verifications < 0:
|
|
||||||
log.warning("config value tx cache size %i needs to be increased", cache_size)
|
|
||||||
cache_item.pending_verifications = 0
|
|
||||||
try:
|
|
||||||
cache_item.lock.release()
|
|
||||||
except RuntimeError:
|
|
||||||
log.warning("lock was already released?")
|
|
||||||
|
|
||||||
await self.db.save_transaction_io_batch(
|
|
||||||
[], address, self.address_to_hash160(address), synced_history
|
|
||||||
)
|
|
||||||
|
|
||||||
if address_manager is None:
|
if address_manager is None:
|
||||||
address_manager = await self.get_address_manager_for_address(address)
|
address_manager = await self.get_address_manager_for_address(address)
|
||||||
|
@ -597,13 +578,6 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
if address_manager is not None:
|
if address_manager is not None:
|
||||||
await address_manager.ensure_address_gap()
|
await address_manager.ensure_address_gap()
|
||||||
|
|
||||||
for txid, cache_item in updated_cached_items.items():
|
|
||||||
if self._tx_cache.get(txid) is not cache_item:
|
|
||||||
log.warning("tx cache corrupted while syncing %s, reattempt sync=%s", address, reattempt_update)
|
|
||||||
if reattempt_update:
|
|
||||||
return await self.update_history(address, remote_status, address_manager, False)
|
|
||||||
return False
|
|
||||||
|
|
||||||
local_status, local_history = \
|
local_status, local_history = \
|
||||||
await self.get_local_status_and_history(address, synced_history)
|
await self.get_local_status_and_history(address, synced_history)
|
||||||
|
|
||||||
|
@ -635,150 +609,111 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
log.debug("finished syncing transaction history for %s, %i known txs", address, len(local_history))
|
log.debug("finished syncing transaction history for %s, %i known txs", address, len(local_history))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
async def cache_transaction(self, txid, remote_height, check_local=True):
|
|
||||||
cache_item = self._tx_cache.get(txid)
|
|
||||||
if cache_item is None:
|
|
||||||
cache_item = self._tx_cache[txid] = TransactionCacheItem()
|
|
||||||
elif cache_item.tx is not None and \
|
|
||||||
cache_item.tx.height >= remote_height and \
|
|
||||||
(cache_item.tx.is_verified or remote_height < 1):
|
|
||||||
return cache_item.tx # cached tx is already up-to-date
|
|
||||||
|
|
||||||
cache_item.pending_verifications += 1
|
|
||||||
try:
|
|
||||||
async with cache_item.lock:
|
|
||||||
tx = cache_item.tx
|
|
||||||
if tx is None and check_local:
|
|
||||||
# check local db
|
|
||||||
tx = cache_item.tx = await self.db.get_transaction(txid=txid)
|
|
||||||
merkle = None
|
|
||||||
if tx is None:
|
|
||||||
# fetch from network
|
|
||||||
_raw, merkle = await self.network.retriable_call(
|
|
||||||
self.network.get_transaction_and_merkle, txid, remote_height
|
|
||||||
)
|
|
||||||
tx = Transaction(unhexlify(_raw), height=merkle['block_height'])
|
|
||||||
cache_item.tx = tx # make sure it's saved before caching it
|
|
||||||
tx.height = remote_height
|
|
||||||
if merkle and 0 < remote_height < len(self.headers):
|
|
||||||
merkle_root = self.get_root_of_merkle_tree(merkle['merkle'], merkle['pos'], tx.hash)
|
|
||||||
header = await self.headers.get(remote_height)
|
|
||||||
tx.position = merkle['pos']
|
|
||||||
tx.is_verified = merkle_root == header['merkle_root']
|
|
||||||
return tx
|
|
||||||
finally:
|
|
||||||
cache_item.pending_verifications -= 1
|
|
||||||
|
|
||||||
async def maybe_verify_transaction(self, tx, remote_height, merkle=None):
|
async def maybe_verify_transaction(self, tx, remote_height, merkle=None):
|
||||||
tx.height = remote_height
|
tx.height = remote_height
|
||||||
cached = self._tx_cache.get(tx.id)
|
if 0 < remote_height < len(self.headers):
|
||||||
if not cached:
|
|
||||||
# cache txs looked up by transaction_show too
|
|
||||||
cached = TransactionCacheItem()
|
|
||||||
self._tx_cache[tx.id] = cached
|
|
||||||
cached.tx = tx
|
|
||||||
if 0 < remote_height < len(self.headers) and cached.pending_verifications <= 1:
|
|
||||||
# can't be tx.pending_verifications == 1 because we have to handle the transaction_show case
|
# can't be tx.pending_verifications == 1 because we have to handle the transaction_show case
|
||||||
if not merkle:
|
if not merkle:
|
||||||
merkle = await self.network.retriable_call(self.network.get_merkle, tx.id, remote_height)
|
merkle = await self.network.retriable_call(self.network.get_merkle, tx.id, remote_height)
|
||||||
|
if 'merkle' not in merkle:
|
||||||
|
return
|
||||||
merkle_root = self.get_root_of_merkle_tree(merkle['merkle'], merkle['pos'], tx.hash)
|
merkle_root = self.get_root_of_merkle_tree(merkle['merkle'], merkle['pos'], tx.hash)
|
||||||
header = await self.headers.get(remote_height)
|
header = await self.headers.get(remote_height)
|
||||||
tx.position = merkle['pos']
|
tx.position = merkle['pos']
|
||||||
tx.is_verified = merkle_root == header['merkle_root']
|
tx.is_verified = merkle_root == header['merkle_root']
|
||||||
|
return tx
|
||||||
|
|
||||||
async def _request_transaction_batch(self, to_request, remote_history_size, address):
|
def maybe_has_channel_key(self, tx):
|
||||||
header_cache = {}
|
for txo in tx._outputs:
|
||||||
|
if txo.can_decode_claim and txo.claim.is_channel:
|
||||||
|
for account in self.accounts:
|
||||||
|
account.deterministic_channel_keys.maybe_generate_deterministic_key_for_channel(txo)
|
||||||
|
|
||||||
|
async def request_transactions(self, to_request: Tuple[Tuple[str, int], ...], cached=False):
|
||||||
batches = [[]]
|
batches = [[]]
|
||||||
remote_heights = {}
|
remote_heights = {}
|
||||||
synced_txs = []
|
cache_hits = set()
|
||||||
heights_in_batch = 0
|
|
||||||
last_height = 0
|
for txid, height in sorted(to_request, key=lambda x: x[1]):
|
||||||
for idx in sorted(to_request):
|
if cached:
|
||||||
txid = to_request[idx][0]
|
cached_tx = self._tx_cache.get(txid)
|
||||||
height = to_request[idx][1]
|
if cached_tx is not None:
|
||||||
|
if cached_tx.tx is not None and cached_tx.tx.is_verified:
|
||||||
|
cache_hits.add(txid)
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
self._tx_cache[txid] = TransactionCacheItem()
|
||||||
remote_heights[txid] = height
|
remote_heights[txid] = height
|
||||||
if height != last_height:
|
if len(batches[-1]) == 100:
|
||||||
heights_in_batch += 1
|
|
||||||
last_height = height
|
|
||||||
if len(batches[-1]) == 100 or heights_in_batch == 20:
|
|
||||||
batches.append([])
|
batches.append([])
|
||||||
heights_in_batch = 1
|
|
||||||
batches[-1].append(txid)
|
batches[-1].append(txid)
|
||||||
if not batches[-1]:
|
if not batches[-1]:
|
||||||
batches.pop()
|
batches.pop()
|
||||||
|
if cached and cache_hits:
|
||||||
|
yield {txid: self._tx_cache[txid].tx for txid in cache_hits}
|
||||||
|
|
||||||
last_showed_synced_count = 0
|
|
||||||
|
|
||||||
async def _single_batch(batch):
|
|
||||||
this_batch_synced = []
|
|
||||||
batch_result = await self.network.retriable_call(self.network.get_transaction_batch, batch)
|
|
||||||
for txid, (raw, merkle) in batch_result.items():
|
|
||||||
remote_height = remote_heights[txid]
|
|
||||||
merkle_height = merkle['block_height']
|
|
||||||
cache_item = self._tx_cache.get(txid)
|
|
||||||
if cache_item is None:
|
|
||||||
cache_item = TransactionCacheItem()
|
|
||||||
self._tx_cache[txid] = cache_item
|
|
||||||
tx = cache_item.tx or Transaction(unhexlify(raw), height=remote_height)
|
|
||||||
tx.height = remote_height
|
|
||||||
cache_item.tx = tx
|
|
||||||
if 'merkle' in merkle and remote_heights[txid] > 0:
|
|
||||||
merkle_root = self.get_root_of_merkle_tree(merkle['merkle'], merkle['pos'], tx.hash)
|
|
||||||
try:
|
|
||||||
header = header_cache.get(remote_heights[txid]) or (await self.headers.get(merkle_height))
|
|
||||||
except IndexError:
|
|
||||||
log.warning("failed to verify %s at height %i", tx.id, merkle_height)
|
|
||||||
else:
|
|
||||||
header_cache[remote_heights[txid]] = header
|
|
||||||
tx.position = merkle['pos']
|
|
||||||
tx.is_verified = merkle_root == header['merkle_root']
|
|
||||||
check_db_for_txos = []
|
|
||||||
|
|
||||||
for txi in tx.inputs:
|
|
||||||
if txi.txo_ref.txo is not None:
|
|
||||||
continue
|
|
||||||
cache_item = self._tx_cache.get(txi.txo_ref.tx_ref.id)
|
|
||||||
if cache_item is not None:
|
|
||||||
if cache_item.tx is not None:
|
|
||||||
txi.txo_ref = cache_item.tx.outputs[txi.txo_ref.position].ref
|
|
||||||
else:
|
|
||||||
check_db_for_txos.append(txi.txo_ref.id)
|
|
||||||
|
|
||||||
referenced_txos = {} if not check_db_for_txos else {
|
|
||||||
txo.id: txo for txo in await self.db.get_txos(
|
|
||||||
txoid__in=check_db_for_txos, order_by='txo.txoid', no_tx=True
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
for txi in tx.inputs:
|
|
||||||
if txi.txo_ref.txo is not None:
|
|
||||||
continue
|
|
||||||
referenced_txo = referenced_txos.get(txi.txo_ref.id)
|
|
||||||
if referenced_txo is not None:
|
|
||||||
txi.txo_ref = referenced_txo.ref
|
|
||||||
continue
|
|
||||||
cache_item = self._tx_cache.get(txi.txo_ref.id)
|
|
||||||
if cache_item is None:
|
|
||||||
cache_item = self._tx_cache[txi.txo_ref.id] = TransactionCacheItem()
|
|
||||||
if cache_item.tx is not None:
|
|
||||||
txi.txo_ref = cache_item.tx.ref
|
|
||||||
|
|
||||||
synced_txs.append(tx)
|
|
||||||
this_batch_synced.append(tx)
|
|
||||||
await self.db.save_transaction_io_batch(
|
|
||||||
this_batch_synced, address, self.address_to_hash160(address), ""
|
|
||||||
)
|
|
||||||
await asyncio.wait([
|
|
||||||
self._on_transaction_controller.add(TransactionEvent(address, tx))
|
|
||||||
for tx in this_batch_synced
|
|
||||||
])
|
|
||||||
nonlocal last_showed_synced_count
|
|
||||||
if last_showed_synced_count + 100 < len(synced_txs):
|
|
||||||
log.info("synced %i/%i transactions for %s", len(synced_txs), remote_history_size, address)
|
|
||||||
last_showed_synced_count = len(synced_txs)
|
|
||||||
for batch in batches:
|
for batch in batches:
|
||||||
await _single_batch(batch)
|
txs = await self._single_batch(batch, remote_heights)
|
||||||
return synced_txs
|
if cached:
|
||||||
|
for txid, tx in txs.items():
|
||||||
|
self._tx_cache[txid].tx = tx
|
||||||
|
yield txs
|
||||||
|
|
||||||
|
async def request_synced_transactions(self, to_request, remote_history, address):
|
||||||
|
async for txs in self.request_transactions(((txid, height) for txid, height in to_request.values())):
|
||||||
|
for tx in txs.values():
|
||||||
|
yield tx
|
||||||
|
await self._sync_and_save_batch(address, remote_history, txs)
|
||||||
|
|
||||||
|
async def _single_batch(self, batch, remote_heights):
|
||||||
|
heights = {remote_heights[txid] for txid in batch}
|
||||||
|
unrestriced = 0 < min(heights) < max(heights) < max(self.headers.checkpoints or [0])
|
||||||
|
batch_result = await self.network.retriable_call(self.network.get_transaction_batch, batch, not unrestriced)
|
||||||
|
txs = {}
|
||||||
|
for txid, (raw, merkle) in batch_result.items():
|
||||||
|
remote_height = remote_heights[txid]
|
||||||
|
tx = Transaction(unhexlify(raw), height=remote_height)
|
||||||
|
txs[tx.id] = tx
|
||||||
|
await self.maybe_verify_transaction(tx, remote_height, merkle)
|
||||||
|
return txs
|
||||||
|
|
||||||
|
async def _sync_and_save_batch(self, address, remote_history, pending_txs):
|
||||||
|
await asyncio.gather(*(self._sync(tx, remote_history, pending_txs) for tx in pending_txs.values()))
|
||||||
|
await self.db.save_transaction_io_batch(
|
||||||
|
pending_txs.values(), address, self.address_to_hash160(address), ""
|
||||||
|
)
|
||||||
|
while pending_txs:
|
||||||
|
self._on_transaction_controller.add(TransactionEvent(address, pending_txs.popitem()[1]))
|
||||||
|
|
||||||
|
async def _sync(self, tx, remote_history, pending_txs):
|
||||||
|
check_db_for_txos = {}
|
||||||
|
for txi in tx.inputs:
|
||||||
|
if txi.txo_ref.txo is not None:
|
||||||
|
continue
|
||||||
|
wanted_txid = txi.txo_ref.tx_ref.id
|
||||||
|
if wanted_txid not in remote_history:
|
||||||
|
continue
|
||||||
|
if wanted_txid in pending_txs:
|
||||||
|
txi.txo_ref = pending_txs[wanted_txid].outputs[txi.txo_ref.position].ref
|
||||||
|
else:
|
||||||
|
check_db_for_txos[txi] = txi.txo_ref.id
|
||||||
|
|
||||||
|
referenced_txos = {} if not check_db_for_txos else {
|
||||||
|
txo.id: txo for txo in await self.db.get_txos(
|
||||||
|
txoid__in=list(check_db_for_txos.values()), order_by='txo.txoid', no_tx=True
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
for txi in check_db_for_txos:
|
||||||
|
if txi.txo_ref.id in referenced_txos:
|
||||||
|
txi.txo_ref = referenced_txos[txi.txo_ref.id].ref
|
||||||
|
else:
|
||||||
|
tx_from_db = await self.db.get_transaction(txid=txi.txo_ref.tx_ref.id)
|
||||||
|
if tx_from_db is None:
|
||||||
|
log.warning("%s not on db, not on cache, but on remote history!", txi.txo_ref.id)
|
||||||
|
else:
|
||||||
|
txi.txo_ref = tx_from_db.outputs[txi.txo_ref.position].ref
|
||||||
|
return tx
|
||||||
|
|
||||||
async def get_address_manager_for_address(self, address) -> Optional[AddressManager]:
|
async def get_address_manager_for_address(self, address) -> Optional[AddressManager]:
|
||||||
details = await self.db.get_address(address=address)
|
details = await self.db.get_address(address=address)
|
||||||
|
@ -787,6 +722,15 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
return account.address_managers[details['chain']]
|
return account.address_managers[details['chain']]
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
async def broadcast_or_release(self, tx, blocking=False):
|
||||||
|
try:
|
||||||
|
await self.broadcast(tx)
|
||||||
|
except:
|
||||||
|
await self.release_tx(tx)
|
||||||
|
raise
|
||||||
|
if blocking:
|
||||||
|
await self.wait(tx, timeout=None)
|
||||||
|
|
||||||
def broadcast(self, tx):
|
def broadcast(self, tx):
|
||||||
# broadcast can't be a retriable call yet
|
# broadcast can't be a retriable call yet
|
||||||
return self.network.broadcast(hexlify(tx.raw).decode())
|
return self.network.broadcast(hexlify(tx.raw).decode())
|
||||||
|
@ -800,13 +744,15 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
self.hash160_to_address(txi.txo_ref.txo.pubkey_hash)
|
self.hash160_to_address(txi.txo_ref.txo.pubkey_hash)
|
||||||
)
|
)
|
||||||
for txo in tx.outputs:
|
for txo in tx.outputs:
|
||||||
if txo.has_address:
|
if txo.is_pubkey_hash:
|
||||||
addresses.add(self.hash160_to_address(txo.pubkey_hash))
|
addresses.add(self.hash160_to_address(txo.pubkey_hash))
|
||||||
|
elif txo.is_script_hash:
|
||||||
|
addresses.add(self.hash160_to_script_address(txo.script_hash))
|
||||||
start = int(time.perf_counter())
|
start = int(time.perf_counter())
|
||||||
while timeout and (int(time.perf_counter()) - start) <= timeout:
|
while timeout and (int(time.perf_counter()) - start) <= timeout:
|
||||||
if await self._wait_round(tx, height, addresses):
|
if await self._wait_round(tx, height, addresses):
|
||||||
return
|
return
|
||||||
raise asyncio.TimeoutError('Timed out waiting for transaction.')
|
raise asyncio.TimeoutError(f'Timed out waiting for transaction. {tx.id}')
|
||||||
|
|
||||||
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
|
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
|
||||||
records = await self.db.get_addresses(address__in=addresses)
|
records = await self.db.get_addresses(address__in=addresses)
|
||||||
|
@ -825,7 +771,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
))[1] if record['history'] else []
|
))[1] if record['history'] else []
|
||||||
for txid, local_height in local_history:
|
for txid, local_height in local_history:
|
||||||
if txid == tx.id:
|
if txid == tx.id:
|
||||||
if local_height >= height:
|
if local_height >= height or (local_height == 0 and height > local_height):
|
||||||
return True
|
return True
|
||||||
log.warning(
|
log.warning(
|
||||||
"local history has higher height than remote for %s (%i vs %i)", txid,
|
"local history has higher height than remote for %s (%i vs %i)", txid,
|
||||||
|
@ -845,12 +791,11 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
include_sent_tips=False,
|
include_sent_tips=False,
|
||||||
include_received_tips=False) -> Tuple[List[Output], dict, int, int]:
|
include_received_tips=False) -> Tuple[List[Output], dict, int, int]:
|
||||||
encoded_outputs = await query
|
encoded_outputs = await query
|
||||||
outputs = Outputs.from_base64(encoded_outputs or b'') # TODO: why is the server returning None?
|
outputs = Outputs.from_base64(encoded_outputs or '') # TODO: why is the server returning None?
|
||||||
txs = []
|
txs: List[Transaction] = []
|
||||||
if len(outputs.txs) > 0:
|
if len(outputs.txs) > 0:
|
||||||
txs: List[Transaction] = await asyncio.gather(*(
|
async for tx in self.request_transactions(tuple(outputs.txs), cached=True):
|
||||||
self.cache_transaction(*tx) for tx in outputs.txs
|
txs.extend(tx.values())
|
||||||
))
|
|
||||||
|
|
||||||
_txos, blocked = outputs.inflate(txs)
|
_txos, blocked = outputs.inflate(txs)
|
||||||
|
|
||||||
|
@ -923,12 +868,17 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
return txos, blocked, outputs.offset, outputs.total
|
return txos, blocked, outputs.offset, outputs.total
|
||||||
|
|
||||||
async def resolve(self, accounts, urls, **kwargs):
|
async def resolve(self, accounts, urls, **kwargs):
|
||||||
resolve = partial(self.network.retriable_call, self.network.resolve)
|
|
||||||
urls_copy = list(urls)
|
|
||||||
txos = []
|
txos = []
|
||||||
|
urls_copy = list(urls)
|
||||||
|
resolve = partial(self.network.retriable_call, self.network.resolve)
|
||||||
while urls_copy:
|
while urls_copy:
|
||||||
batch, urls_copy = urls_copy[:500], urls_copy[500:]
|
batch, urls_copy = urls_copy[:100], urls_copy[100:]
|
||||||
txos.extend((await self._inflate_outputs(resolve(batch), accounts, **kwargs))[0])
|
txos.extend(
|
||||||
|
(await self._inflate_outputs(
|
||||||
|
resolve(batch), accounts, **kwargs
|
||||||
|
))[0]
|
||||||
|
)
|
||||||
|
|
||||||
assert len(urls) == len(txos), "Mismatch between urls requested for resolve and responses received."
|
assert len(urls) == len(txos), "Mismatch between urls requested for resolve and responses received."
|
||||||
result = {}
|
result = {}
|
||||||
for url, txo in zip(urls, txos):
|
for url, txo in zip(urls, txos):
|
||||||
|
@ -941,8 +891,13 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
result[url] = txo
|
result[url] = txo
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
async def sum_supports(self, new_sdk_server, **kwargs) -> List[Dict]:
|
||||||
|
return await self.network.sum_supports(new_sdk_server, **kwargs)
|
||||||
|
|
||||||
async def claim_search(
|
async def claim_search(
|
||||||
self, accounts, include_purchase_receipt=False, include_is_my_output=False,
|
self, accounts,
|
||||||
|
include_purchase_receipt=False,
|
||||||
|
include_is_my_output=False,
|
||||||
**kwargs) -> Tuple[List[Output], dict, int, int]:
|
**kwargs) -> Tuple[List[Output], dict, int, int]:
|
||||||
return await self._inflate_outputs(
|
return await self._inflate_outputs(
|
||||||
self.network.claim_search(**kwargs), accounts,
|
self.network.claim_search(**kwargs), accounts,
|
||||||
|
@ -950,9 +905,21 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
include_is_my_output=include_is_my_output
|
include_is_my_output=include_is_my_output
|
||||||
)
|
)
|
||||||
|
|
||||||
async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
|
# async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
|
||||||
for claim in (await self.claim_search(accounts, claim_id=claim_id, **kwargs))[0]:
|
# return await self.network.get_claim_by_id(claim_id)
|
||||||
return claim
|
|
||||||
|
async def get_claim_by_claim_id(self, claim_id, accounts=None, include_purchase_receipt=False,
|
||||||
|
include_is_my_output=False):
|
||||||
|
accounts = accounts or []
|
||||||
|
# return await self.network.get_claim_by_id(claim_id)
|
||||||
|
inflated = await self._inflate_outputs(
|
||||||
|
self.network.get_claim_by_id(claim_id), accounts,
|
||||||
|
include_purchase_receipt=include_purchase_receipt,
|
||||||
|
include_is_my_output=include_is_my_output,
|
||||||
|
)
|
||||||
|
txos = inflated[0]
|
||||||
|
if txos:
|
||||||
|
return txos[0]
|
||||||
|
|
||||||
async def _report_state(self):
|
async def _report_state(self):
|
||||||
try:
|
try:
|
||||||
|
@ -971,9 +938,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
"%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ",
|
"%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ",
|
||||||
account.id, balance, total_receiving, account.receiving.gap, total_change,
|
account.id, balance, total_receiving, account.receiving.gap, total_change,
|
||||||
account.change.gap, channel_count, len(account.channel_keys), claim_count)
|
account.change.gap, channel_count, len(account.channel_keys), claim_count)
|
||||||
except Exception as err:
|
except Exception:
|
||||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
|
||||||
raise
|
|
||||||
log.exception(
|
log.exception(
|
||||||
'Failed to display wallet state, please file issue '
|
'Failed to display wallet state, please file issue '
|
||||||
'for this bug along with the traceback you see below:')
|
'for this bug along with the traceback you see below:')
|
||||||
|
@ -996,9 +961,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
claim_ids = [p.purchased_claim_id for p in purchases]
|
claim_ids = [p.purchased_claim_id for p in purchases]
|
||||||
try:
|
try:
|
||||||
resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
|
resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
|
||||||
except Exception as err:
|
except Exception:
|
||||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
|
||||||
raise
|
|
||||||
log.exception("Resolve failed while looking up purchased claim ids:")
|
log.exception("Resolve failed while looking up purchased claim ids:")
|
||||||
resolved = []
|
resolved = []
|
||||||
lookup = {claim.claim_id: claim for claim in resolved}
|
lookup = {claim.claim_id: claim for claim in resolved}
|
||||||
|
@ -1010,6 +973,11 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
return self.db.get_purchase_count(**constraints)
|
return self.db.get_purchase_count(**constraints)
|
||||||
|
|
||||||
async def _resolve_for_local_results(self, accounts, txos):
|
async def _resolve_for_local_results(self, accounts, txos):
|
||||||
|
txos = await self._resolve_for_local_claim_results(accounts, txos)
|
||||||
|
txos = await self._resolve_for_local_support_results(accounts, txos)
|
||||||
|
return txos
|
||||||
|
|
||||||
|
async def _resolve_for_local_claim_results(self, accounts, txos):
|
||||||
results = []
|
results = []
|
||||||
response = await self.resolve(
|
response = await self.resolve(
|
||||||
accounts, [txo.permanent_url for txo in txos if txo.can_decode_claim]
|
accounts, [txo.permanent_url for txo in txos if txo.can_decode_claim]
|
||||||
|
@ -1025,6 +993,23 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
results.append(txo)
|
results.append(txo)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
async def _resolve_for_local_support_results(self, accounts, txos):
|
||||||
|
channel_ids = set()
|
||||||
|
signed_support_txos = []
|
||||||
|
for txo in txos:
|
||||||
|
support = txo.can_decode_support
|
||||||
|
if support and support.signing_channel_id:
|
||||||
|
channel_ids.add(support.signing_channel_id)
|
||||||
|
signed_support_txos.append(txo)
|
||||||
|
if channel_ids:
|
||||||
|
channels = {
|
||||||
|
channel.claim_id: channel for channel in
|
||||||
|
(await self.claim_search(accounts, claim_ids=list(channel_ids)))[0]
|
||||||
|
}
|
||||||
|
for txo in signed_support_txos:
|
||||||
|
txo.channel = channels.get(txo.support.signing_channel_id)
|
||||||
|
return txos
|
||||||
|
|
||||||
async def get_claims(self, resolve=False, **constraints):
|
async def get_claims(self, resolve=False, **constraints):
|
||||||
claims = await self.db.get_claims(**constraints)
|
claims = await self.db.get_claims(**constraints)
|
||||||
if resolve:
|
if resolve:
|
||||||
|
@ -1053,12 +1038,10 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
return self.db.get_channel_count(**constraints)
|
return self.db.get_channel_count(**constraints)
|
||||||
|
|
||||||
async def resolve_collection(self, collection, offset=0, page_size=1):
|
async def resolve_collection(self, collection, offset=0, page_size=1):
|
||||||
claim_ids = collection.claim.collection.claims.ids[offset:page_size+offset]
|
claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset]
|
||||||
try:
|
try:
|
||||||
resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
|
resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
|
||||||
except Exception as err:
|
except Exception:
|
||||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
|
||||||
raise
|
|
||||||
log.exception("Resolve failed while looking up collection claim ids:")
|
log.exception("Resolve failed while looking up collection claim ids:")
|
||||||
return []
|
return []
|
||||||
claims = []
|
claims = []
|
||||||
|
@ -1073,8 +1056,10 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
claims.append(None)
|
claims.append(None)
|
||||||
return claims
|
return claims
|
||||||
|
|
||||||
async def get_collections(self, resolve_claims=0, **constraints):
|
async def get_collections(self, resolve_claims=0, resolve=False, **constraints):
|
||||||
collections = await self.db.get_collections(**constraints)
|
collections = await self.db.get_collections(**constraints)
|
||||||
|
if resolve:
|
||||||
|
collections = await self._resolve_for_local_results(constraints.get('accounts', []), collections)
|
||||||
if resolve_claims > 0:
|
if resolve_claims > 0:
|
||||||
for collection in collections:
|
for collection in collections:
|
||||||
collection.claims = await self.resolve_collection(collection, page_size=resolve_claims)
|
collection.claims = await self.resolve_collection(collection, page_size=resolve_claims)
|
||||||
|
@ -1102,17 +1087,17 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
'txid': tx.id,
|
'txid': tx.id,
|
||||||
'timestamp': ts,
|
'timestamp': ts,
|
||||||
'date': datetime.fromtimestamp(ts).isoformat(' ')[:-3] if tx.height > 0 else None,
|
'date': datetime.fromtimestamp(ts).isoformat(' ')[:-3] if tx.height > 0 else None,
|
||||||
'confirmations': (headers.height+1) - tx.height if tx.height > 0 else 0,
|
'confirmations': (headers.height + 1) - tx.height if tx.height > 0 else 0,
|
||||||
'claim_info': [],
|
'claim_info': [],
|
||||||
'update_info': [],
|
'update_info': [],
|
||||||
'support_info': [],
|
'support_info': [],
|
||||||
'abandon_info': [],
|
'abandon_info': [],
|
||||||
'purchase_info': []
|
'purchase_info': []
|
||||||
}
|
}
|
||||||
is_my_inputs = all([txi.is_my_input for txi in tx.inputs])
|
is_my_inputs = all(txi.is_my_input for txi in tx.inputs)
|
||||||
if is_my_inputs:
|
if is_my_inputs:
|
||||||
# fees only matter if we are the ones paying them
|
# fees only matter if we are the ones paying them
|
||||||
item['value'] = dewies_to_lbc(tx.net_account_balance+tx.fee)
|
item['value'] = dewies_to_lbc(tx.net_account_balance + tx.fee)
|
||||||
item['fee'] = dewies_to_lbc(-tx.fee)
|
item['fee'] = dewies_to_lbc(-tx.fee)
|
||||||
else:
|
else:
|
||||||
# someone else paid the fees
|
# someone else paid the fees
|
||||||
|
@ -1135,13 +1120,13 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
if txi.txo_ref.txo is not None:
|
if txi.txo_ref.txo is not None:
|
||||||
other_txo = txi.txo_ref.txo
|
other_txo = txi.txo_ref.txo
|
||||||
if (other_txo.is_claim or other_txo.script.is_support_claim) \
|
if (other_txo.is_claim or other_txo.script.is_support_claim) \
|
||||||
and other_txo.claim_id == txo.claim_id:
|
and other_txo.claim_id == txo.claim_id:
|
||||||
previous = other_txo
|
previous = other_txo
|
||||||
break
|
break
|
||||||
if previous is not None:
|
if previous is not None:
|
||||||
item['update_info'].append({
|
item['update_info'].append({
|
||||||
'address': txo.get_address(self),
|
'address': txo.get_address(self),
|
||||||
'balance_delta': dewies_to_lbc(previous.amount-txo.amount),
|
'balance_delta': dewies_to_lbc(previous.amount - txo.amount),
|
||||||
'amount': dewies_to_lbc(txo.amount),
|
'amount': dewies_to_lbc(txo.amount),
|
||||||
'claim_id': txo.claim_id,
|
'claim_id': txo.claim_id,
|
||||||
'claim_name': txo.claim_name,
|
'claim_name': txo.claim_name,
|
||||||
|
@ -1219,8 +1204,8 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
for account in accounts:
|
for account in accounts:
|
||||||
balance = self._balance_cache.get(account.id)
|
balance = self._balance_cache.get(account.id)
|
||||||
if not balance:
|
if not balance:
|
||||||
balance = self._balance_cache[account.id] =\
|
balance = self._balance_cache[account.id] = \
|
||||||
await account.get_detailed_balance(confirmations, reserved_subtotals=True)
|
await account.get_detailed_balance(confirmations)
|
||||||
for key, value in balance.items():
|
for key, value in balance.items():
|
||||||
if key == 'reserved_subtotals':
|
if key == 'reserved_subtotals':
|
||||||
for subkey, subvalue in value.items():
|
for subkey, subvalue in value.items():
|
||||||
|
|
|
@ -3,20 +3,21 @@ import json
|
||||||
import typing
|
import typing
|
||||||
import logging
|
import logging
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from binascii import unhexlify
|
from binascii import unhexlify
|
||||||
from decimal import Decimal
|
from decimal import Decimal
|
||||||
from typing import List, Type, MutableSequence, MutableMapping, Optional
|
from typing import List, Type, MutableSequence, MutableMapping, Optional
|
||||||
|
|
||||||
from lbry.error import KeyFeeAboveMaxAllowedError
|
from lbry.error import KeyFeeAboveMaxAllowedError, WalletNotLoadedError
|
||||||
from lbry.conf import Config
|
from lbry.conf import Config, NOT_SET
|
||||||
|
|
||||||
from .dewies import dewies_to_lbc
|
from lbry.wallet.dewies import dewies_to_lbc
|
||||||
from .account import Account
|
from lbry.wallet.account import Account
|
||||||
from .ledger import Ledger, LedgerRegistry
|
from lbry.wallet.ledger import Ledger, LedgerRegistry
|
||||||
from .transaction import Transaction, Output
|
from lbry.wallet.transaction import Transaction, Output
|
||||||
from .database import Database
|
from lbry.wallet.database import Database
|
||||||
from .wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
|
from lbry.wallet.wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
|
||||||
from .rpc.jsonrpc import CodeMessageError
|
from lbry.wallet.rpc.jsonrpc import CodeMessageError
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||||
|
@ -95,7 +96,7 @@ class WalletManager:
|
||||||
for wallet in self.wallets:
|
for wallet in self.wallets:
|
||||||
if wallet.id == wallet_id:
|
if wallet.id == wallet_id:
|
||||||
return wallet
|
return wallet
|
||||||
raise ValueError(f"Couldn't find wallet: {wallet_id}.")
|
raise WalletNotLoadedError(wallet_id)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_balance(wallet):
|
def get_balance(wallet):
|
||||||
|
@ -182,10 +183,17 @@ class WalletManager:
|
||||||
|
|
||||||
ledger_config = {
|
ledger_config = {
|
||||||
'auto_connect': True,
|
'auto_connect': True,
|
||||||
|
'explicit_servers': [],
|
||||||
|
'hub_timeout': config.hub_timeout,
|
||||||
'default_servers': config.lbryum_servers,
|
'default_servers': config.lbryum_servers,
|
||||||
|
'known_hubs': config.known_hubs,
|
||||||
|
'jurisdiction': config.jurisdiction,
|
||||||
|
'concurrent_hub_requests': config.concurrent_hub_requests,
|
||||||
'data_path': config.wallet_dir,
|
'data_path': config.wallet_dir,
|
||||||
'tx_cache_size': config.transaction_cache_size
|
'tx_cache_size': config.transaction_cache_size
|
||||||
}
|
}
|
||||||
|
if 'LBRY_FEE_PER_NAME_CHAR' in os.environ:
|
||||||
|
ledger_config['fee_per_name_char'] = int(os.environ.get('LBRY_FEE_PER_NAME_CHAR'))
|
||||||
|
|
||||||
wallets_directory = os.path.join(config.wallet_dir, 'wallets')
|
wallets_directory = os.path.join(config.wallet_dir, 'wallets')
|
||||||
if not os.path.exists(wallets_directory):
|
if not os.path.exists(wallets_directory):
|
||||||
|
@ -195,6 +203,10 @@ class WalletManager:
|
||||||
os.path.join(wallets_directory, 'default_wallet')
|
os.path.join(wallets_directory, 'default_wallet')
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if Config.lbryum_servers.is_set_to_default(config):
|
||||||
|
with config.update_config() as c:
|
||||||
|
c.lbryum_servers = NOT_SET
|
||||||
|
|
||||||
manager = cls.from_config({
|
manager = cls.from_config({
|
||||||
'ledgers': {ledger_id: ledger_config},
|
'ledgers': {ledger_id: ledger_config},
|
||||||
'wallets': [
|
'wallets': [
|
||||||
|
@ -225,9 +237,16 @@ class WalletManager:
|
||||||
async def reset(self):
|
async def reset(self):
|
||||||
self.ledger.config = {
|
self.ledger.config = {
|
||||||
'auto_connect': True,
|
'auto_connect': True,
|
||||||
'default_servers': self.config.lbryum_servers,
|
'explicit_servers': [],
|
||||||
|
'default_servers': Config.lbryum_servers.default,
|
||||||
|
'known_hubs': self.config.known_hubs,
|
||||||
|
'jurisdiction': self.config.jurisdiction,
|
||||||
|
'hub_timeout': self.config.hub_timeout,
|
||||||
|
'concurrent_hub_requests': self.config.concurrent_hub_requests,
|
||||||
'data_path': self.config.wallet_dir,
|
'data_path': self.config.wallet_dir,
|
||||||
}
|
}
|
||||||
|
if Config.lbryum_servers.is_set(self.config):
|
||||||
|
self.ledger.config['explicit_servers'] = self.config.lbryum_servers
|
||||||
await self.ledger.stop()
|
await self.ledger.stop()
|
||||||
await self.ledger.start()
|
await self.ledger.start()
|
||||||
|
|
||||||
|
@ -298,10 +317,4 @@ class WalletManager:
|
||||||
)
|
)
|
||||||
|
|
||||||
async def broadcast_or_release(self, tx, blocking=False):
|
async def broadcast_or_release(self, tx, blocking=False):
|
||||||
try:
|
await self.ledger.broadcast_or_release(tx, blocking=blocking)
|
||||||
await self.ledger.broadcast(tx)
|
|
||||||
except:
|
|
||||||
await self.ledger.release_tx(tx)
|
|
||||||
raise
|
|
||||||
if blocking:
|
|
||||||
await self.ledger.wait(tx, timeout=None)
|
|
||||||
|
|
|
@ -1,34 +1,40 @@
|
||||||
import logging
|
import logging
|
||||||
import asyncio
|
import asyncio
|
||||||
import json
|
import json
|
||||||
|
import socket
|
||||||
|
import random
|
||||||
from time import perf_counter
|
from time import perf_counter
|
||||||
from operator import itemgetter
|
from collections import defaultdict
|
||||||
from typing import Dict, Optional, Tuple
|
from typing import Dict, Optional, Tuple
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
from lbry import __version__
|
from lbry import __version__
|
||||||
|
from lbry.utils import resolve_host
|
||||||
from lbry.error import IncompatibleWalletServerError
|
from lbry.error import IncompatibleWalletServerError
|
||||||
from lbry.wallet.rpc import RPCSession as BaseClientSession, Connector, RPCError, ProtocolError
|
from lbry.wallet.rpc import RPCSession as BaseClientSession, Connector, RPCError, ProtocolError
|
||||||
from lbry.wallet.stream import StreamController
|
from lbry.wallet.stream import StreamController
|
||||||
|
from lbry.wallet.udp import SPVStatusClientProtocol, SPVPong
|
||||||
|
from lbry.conf import KnownHubsList
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ClientSession(BaseClientSession):
|
class ClientSession(BaseClientSession):
|
||||||
def __init__(self, *args, network, server, timeout=30, on_connect_callback=None, **kwargs):
|
def __init__(self, *args, network: 'Network', server, timeout=30, concurrency=32, **kwargs):
|
||||||
self.network = network
|
self.network = network
|
||||||
self.server = server
|
self.server = server
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self._on_disconnect_controller = StreamController()
|
|
||||||
self.on_disconnected = self._on_disconnect_controller.stream
|
|
||||||
self.framer.max_size = self.max_errors = 1 << 32
|
self.framer.max_size = self.max_errors = 1 << 32
|
||||||
self.timeout = timeout
|
self.timeout = timeout
|
||||||
self.max_seconds_idle = timeout * 2
|
self.max_seconds_idle = timeout * 2
|
||||||
self.response_time: Optional[float] = None
|
self.response_time: Optional[float] = None
|
||||||
self.connection_latency: Optional[float] = None
|
self.connection_latency: Optional[float] = None
|
||||||
self._response_samples = 0
|
self._response_samples = 0
|
||||||
self.pending_amount = 0
|
self._concurrency = asyncio.Semaphore(concurrency)
|
||||||
self._on_connect_cb = on_connect_callback or (lambda: None)
|
|
||||||
self.trigger_urgent_reconnect = asyncio.Event()
|
@property
|
||||||
|
def concurrency(self):
|
||||||
|
return self._concurrency._value
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def available(self):
|
def available(self):
|
||||||
|
@ -54,9 +60,9 @@ class ClientSession(BaseClientSession):
|
||||||
return result
|
return result
|
||||||
|
|
||||||
async def send_request(self, method, args=()):
|
async def send_request(self, method, args=()):
|
||||||
self.pending_amount += 1
|
log.debug("send %s%s to %s:%i (%i timeout)", method, tuple(args), self.server[0], self.server[1], self.timeout)
|
||||||
log.debug("send %s%s to %s:%i", method, tuple(args), *self.server)
|
|
||||||
try:
|
try:
|
||||||
|
await self._concurrency.acquire()
|
||||||
if method == 'server.version':
|
if method == 'server.version':
|
||||||
return await self.send_timed_server_version_request(args, self.timeout)
|
return await self.send_timed_server_version_request(args, self.timeout)
|
||||||
request = asyncio.ensure_future(super().send_request(method, args))
|
request = asyncio.ensure_future(super().send_request(method, args))
|
||||||
|
@ -66,7 +72,7 @@ class ClientSession(BaseClientSession):
|
||||||
log.debug("Time since last packet: %s", perf_counter() - self.last_packet_received)
|
log.debug("Time since last packet: %s", perf_counter() - self.last_packet_received)
|
||||||
if (perf_counter() - self.last_packet_received) < self.timeout:
|
if (perf_counter() - self.last_packet_received) < self.timeout:
|
||||||
continue
|
continue
|
||||||
log.info("timeout sending %s to %s:%i", method, *self.server)
|
log.warning("timeout sending %s to %s:%i", method, *self.server)
|
||||||
raise asyncio.TimeoutError
|
raise asyncio.TimeoutError
|
||||||
if done:
|
if done:
|
||||||
try:
|
try:
|
||||||
|
@ -86,43 +92,11 @@ class ClientSession(BaseClientSession):
|
||||||
self.synchronous_close()
|
self.synchronous_close()
|
||||||
raise
|
raise
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
log.info("cancelled sending %s to %s:%i", method, *self.server)
|
log.warning("cancelled sending %s to %s:%i", method, *self.server)
|
||||||
# self.synchronous_close()
|
# self.synchronous_close()
|
||||||
raise
|
raise
|
||||||
finally:
|
finally:
|
||||||
self.pending_amount -= 1
|
self._concurrency.release()
|
||||||
|
|
||||||
async def ensure_session(self):
|
|
||||||
# Handles reconnecting and maintaining a session alive
|
|
||||||
# TODO: change to 'ping' on newer protocol (above 1.2)
|
|
||||||
retry_delay = default_delay = 1.0
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
if self.is_closing():
|
|
||||||
await self.create_connection(self.timeout)
|
|
||||||
await self.ensure_server_version()
|
|
||||||
self._on_connect_cb()
|
|
||||||
if (perf_counter() - self.last_send) > self.max_seconds_idle or self.response_time is None:
|
|
||||||
await self.ensure_server_version()
|
|
||||||
retry_delay = default_delay
|
|
||||||
except RPCError as e:
|
|
||||||
await self.close()
|
|
||||||
log.debug("Server error, ignoring for 1h: %s:%d -- %s", *self.server, e.message)
|
|
||||||
retry_delay = 60 * 60
|
|
||||||
except IncompatibleWalletServerError:
|
|
||||||
await self.close()
|
|
||||||
retry_delay = 60 * 60
|
|
||||||
log.debug("Wallet server has an incompatible version, retrying in 1h: %s:%d", *self.server)
|
|
||||||
except (asyncio.TimeoutError, OSError):
|
|
||||||
await self.close()
|
|
||||||
retry_delay = min(60, retry_delay * 2)
|
|
||||||
log.debug("Wallet server timeout (retry in %s seconds): %s:%d", retry_delay, *self.server)
|
|
||||||
try:
|
|
||||||
await asyncio.wait_for(self.trigger_urgent_reconnect.wait(), timeout=retry_delay)
|
|
||||||
except asyncio.TimeoutError:
|
|
||||||
pass
|
|
||||||
finally:
|
|
||||||
self.trigger_urgent_reconnect.clear()
|
|
||||||
|
|
||||||
async def ensure_server_version(self, required=None, timeout=3):
|
async def ensure_server_version(self, required=None, timeout=3):
|
||||||
required = required or self.network.PROTOCOL_VERSION
|
required = required or self.network.PROTOCOL_VERSION
|
||||||
|
@ -133,6 +107,25 @@ class ClientSession(BaseClientSession):
|
||||||
raise IncompatibleWalletServerError(*self.server)
|
raise IncompatibleWalletServerError(*self.server)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
async def keepalive_loop(self, timeout=3, max_idle=60):
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
now = perf_counter()
|
||||||
|
if min(self.last_send, self.last_packet_received) + max_idle < now:
|
||||||
|
await asyncio.wait_for(
|
||||||
|
self.send_request('server.ping', []), timeout=timeout
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
await asyncio.sleep(max(0, max_idle - (now - self.last_send)))
|
||||||
|
except (Exception, asyncio.CancelledError) as err:
|
||||||
|
if isinstance(err, asyncio.CancelledError):
|
||||||
|
log.info("closing connection to %s:%i", *self.server)
|
||||||
|
else:
|
||||||
|
log.exception("lost connection to spv")
|
||||||
|
finally:
|
||||||
|
if not self.is_closing():
|
||||||
|
self._close()
|
||||||
|
|
||||||
async def create_connection(self, timeout=6):
|
async def create_connection(self, timeout=6):
|
||||||
connector = Connector(lambda: self, *self.server)
|
connector = Connector(lambda: self, *self.server)
|
||||||
start = perf_counter()
|
start = perf_counter()
|
||||||
|
@ -149,7 +142,9 @@ class ClientSession(BaseClientSession):
|
||||||
self.response_time = None
|
self.response_time = None
|
||||||
self.connection_latency = None
|
self.connection_latency = None
|
||||||
self._response_samples = 0
|
self._response_samples = 0
|
||||||
self._on_disconnect_controller.add(True)
|
# self._on_disconnect_controller.add(True)
|
||||||
|
if self.network:
|
||||||
|
self.network.disconnect()
|
||||||
|
|
||||||
|
|
||||||
class Network:
|
class Network:
|
||||||
|
@ -159,13 +154,11 @@ class Network:
|
||||||
|
|
||||||
def __init__(self, ledger):
|
def __init__(self, ledger):
|
||||||
self.ledger = ledger
|
self.ledger = ledger
|
||||||
self.session_pool = SessionPool(network=self, timeout=self.config.get('connect_timeout', 6))
|
|
||||||
self.client: Optional[ClientSession] = None
|
self.client: Optional[ClientSession] = None
|
||||||
self.server_features = None
|
self.server_features = None
|
||||||
self._switch_task: Optional[asyncio.Task] = None
|
# self._switch_task: Optional[asyncio.Task] = None
|
||||||
self.running = False
|
self.running = False
|
||||||
self.remote_height: int = 0
|
self.remote_height: int = 0
|
||||||
self._concurrency = asyncio.Semaphore(16)
|
|
||||||
|
|
||||||
self._on_connected_controller = StreamController()
|
self._on_connected_controller = StreamController()
|
||||||
self.on_connected = self._on_connected_controller.stream
|
self.on_connected = self._on_connected_controller.stream
|
||||||
|
@ -176,88 +169,249 @@ class Network:
|
||||||
self._on_status_controller = StreamController(merge_repeated_events=True)
|
self._on_status_controller = StreamController(merge_repeated_events=True)
|
||||||
self.on_status = self._on_status_controller.stream
|
self.on_status = self._on_status_controller.stream
|
||||||
|
|
||||||
|
self._on_hub_controller = StreamController(merge_repeated_events=True)
|
||||||
|
self.on_hub = self._on_hub_controller.stream
|
||||||
|
|
||||||
self.subscription_controllers = {
|
self.subscription_controllers = {
|
||||||
'blockchain.headers.subscribe': self._on_header_controller,
|
'blockchain.headers.subscribe': self._on_header_controller,
|
||||||
'blockchain.address.subscribe': self._on_status_controller,
|
'blockchain.address.subscribe': self._on_status_controller,
|
||||||
|
'blockchain.peers.subscribe': self._on_hub_controller,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.aiohttp_session: Optional[aiohttp.ClientSession] = None
|
||||||
|
self._urgent_need_reconnect = asyncio.Event()
|
||||||
|
self._loop_task: Optional[asyncio.Task] = None
|
||||||
|
self._keepalive_task: Optional[asyncio.Task] = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def config(self):
|
def config(self):
|
||||||
return self.ledger.config
|
return self.ledger.config
|
||||||
|
|
||||||
async def switch_forever(self):
|
@property
|
||||||
while self.running:
|
def known_hubs(self):
|
||||||
if self.is_connected:
|
if 'known_hubs' not in self.config:
|
||||||
await self.client.on_disconnected.first
|
return KnownHubsList()
|
||||||
self.server_features = None
|
return self.config['known_hubs']
|
||||||
self.client = None
|
|
||||||
continue
|
@property
|
||||||
self.client = await self.session_pool.wait_for_fastest_session()
|
def jurisdiction(self):
|
||||||
log.info("Switching to SPV wallet server: %s:%d", *self.client.server)
|
return self.config.get("jurisdiction")
|
||||||
try:
|
|
||||||
self.server_features = await self.get_server_features()
|
def disconnect(self):
|
||||||
self._update_remote_height((await self.subscribe_headers(),))
|
if self._keepalive_task and not self._keepalive_task.done():
|
||||||
self._on_connected_controller.add(True)
|
self._keepalive_task.cancel()
|
||||||
log.info("Subscribed to headers: %s:%d", *self.client.server)
|
self._keepalive_task = None
|
||||||
except (asyncio.TimeoutError, ConnectionError):
|
|
||||||
log.info("Switching to %s:%d timed out, closing and retrying.", *self.client.server)
|
|
||||||
self.client.synchronous_close()
|
|
||||||
self.server_features = None
|
|
||||||
self.client = None
|
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
self.running = True
|
if not self.running:
|
||||||
self._switch_task = asyncio.ensure_future(self.switch_forever())
|
self.running = True
|
||||||
# this may become unnecessary when there are no more bugs found,
|
self.aiohttp_session = aiohttp.ClientSession()
|
||||||
# but for now it helps understanding log reports
|
self.on_header.listen(self._update_remote_height)
|
||||||
self._switch_task.add_done_callback(lambda _: log.info("Wallet client switching task stopped."))
|
self.on_hub.listen(self._update_hubs)
|
||||||
self.session_pool.start(self.config['default_servers'])
|
self._loop_task = asyncio.create_task(self.network_loop())
|
||||||
self.on_header.listen(self._update_remote_height)
|
self._urgent_need_reconnect.set()
|
||||||
|
|
||||||
|
def loop_task_done_callback(f):
|
||||||
|
try:
|
||||||
|
f.result()
|
||||||
|
except (Exception, asyncio.CancelledError):
|
||||||
|
if self.running:
|
||||||
|
log.exception("wallet server connection loop crashed")
|
||||||
|
|
||||||
|
self._loop_task.add_done_callback(loop_task_done_callback)
|
||||||
|
|
||||||
|
async def resolve_spv_dns(self):
|
||||||
|
hostname_to_ip = {}
|
||||||
|
ip_to_hostnames = defaultdict(list)
|
||||||
|
|
||||||
|
async def resolve_spv(server, port):
|
||||||
|
try:
|
||||||
|
server_addr = await resolve_host(server, port, 'udp')
|
||||||
|
hostname_to_ip[server] = (server_addr, port)
|
||||||
|
ip_to_hostnames[(server_addr, port)].append(server)
|
||||||
|
except socket.error:
|
||||||
|
log.warning("error looking up dns for spv server %s:%i", server, port)
|
||||||
|
except Exception:
|
||||||
|
log.exception("error looking up dns for spv server %s:%i", server, port)
|
||||||
|
|
||||||
|
# accumulate the dns results
|
||||||
|
if self.config.get('explicit_servers', []):
|
||||||
|
hubs = self.config['explicit_servers']
|
||||||
|
elif self.known_hubs:
|
||||||
|
hubs = self.known_hubs
|
||||||
|
else:
|
||||||
|
hubs = self.config['default_servers']
|
||||||
|
await asyncio.gather(*(resolve_spv(server, port) for (server, port) in hubs))
|
||||||
|
return hostname_to_ip, ip_to_hostnames
|
||||||
|
|
||||||
|
async def get_n_fastest_spvs(self, timeout=3.0) -> Dict[Tuple[str, int], Optional[SPVPong]]:
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
pong_responses = asyncio.Queue()
|
||||||
|
connection = SPVStatusClientProtocol(pong_responses)
|
||||||
|
sent_ping_timestamps = {}
|
||||||
|
_, ip_to_hostnames = await self.resolve_spv_dns()
|
||||||
|
n = len(ip_to_hostnames)
|
||||||
|
log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config.get('explicit_servers', [])))
|
||||||
|
pongs = {}
|
||||||
|
known_hubs = self.known_hubs
|
||||||
|
try:
|
||||||
|
await loop.create_datagram_endpoint(lambda: connection, ('0.0.0.0', 0))
|
||||||
|
# could raise OSError if it cant bind
|
||||||
|
start = perf_counter()
|
||||||
|
for server in ip_to_hostnames:
|
||||||
|
connection.ping(server)
|
||||||
|
sent_ping_timestamps[server] = perf_counter()
|
||||||
|
while len(pongs) < n:
|
||||||
|
(remote, ts), pong = await asyncio.wait_for(pong_responses.get(), timeout - (perf_counter() - start))
|
||||||
|
latency = ts - start
|
||||||
|
log.info("%s:%i has latency of %sms (available: %s, height: %i)",
|
||||||
|
'/'.join(ip_to_hostnames[remote]), remote[1], round(latency * 1000, 2),
|
||||||
|
pong.available, pong.height)
|
||||||
|
|
||||||
|
known_hubs.hubs.setdefault((ip_to_hostnames[remote][0], remote[1]), {}).update(
|
||||||
|
{"country": pong.country_name}
|
||||||
|
)
|
||||||
|
if pong.available:
|
||||||
|
pongs[(ip_to_hostnames[remote][0], remote[1])] = pong
|
||||||
|
return pongs
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
if pongs:
|
||||||
|
log.info("%i/%i probed spv servers are accepting connections", len(pongs), len(ip_to_hostnames))
|
||||||
|
return pongs
|
||||||
|
else:
|
||||||
|
log.warning("%i spv status probes failed, retrying later. servers tried: %s",
|
||||||
|
len(sent_ping_timestamps),
|
||||||
|
', '.join('/'.join(hosts) + f' ({ip})' for ip, hosts in ip_to_hostnames.items()))
|
||||||
|
random_server = random.choice(list(ip_to_hostnames.keys()))
|
||||||
|
host, port = random_server
|
||||||
|
log.warning("trying fallback to randomly selected spv: %s:%i", host, port)
|
||||||
|
known_hubs.hubs.setdefault((host, port), {})
|
||||||
|
return {(host, port): None}
|
||||||
|
finally:
|
||||||
|
connection.close()
|
||||||
|
|
||||||
|
async def connect_to_fastest(self) -> Optional[ClientSession]:
|
||||||
|
fastest_spvs = await self.get_n_fastest_spvs()
|
||||||
|
for (host, port), pong in fastest_spvs.items():
|
||||||
|
if (pong is not None and self.jurisdiction is not None) and \
|
||||||
|
(pong.country_name != self.jurisdiction):
|
||||||
|
continue
|
||||||
|
client = ClientSession(network=self, server=(host, port), timeout=self.config.get('hub_timeout', 30),
|
||||||
|
concurrency=self.config.get('concurrent_hub_requests', 30))
|
||||||
|
try:
|
||||||
|
await client.create_connection()
|
||||||
|
log.info("Connected to spv server %s:%i", host, port)
|
||||||
|
await client.ensure_server_version()
|
||||||
|
return client
|
||||||
|
except (asyncio.TimeoutError, ConnectionError, OSError, IncompatibleWalletServerError, RPCError):
|
||||||
|
log.warning("Connecting to %s:%d failed", host, port)
|
||||||
|
client._close()
|
||||||
|
return
|
||||||
|
|
||||||
|
async def network_loop(self):
|
||||||
|
sleep_delay = 30
|
||||||
|
while self.running:
|
||||||
|
await asyncio.wait(
|
||||||
|
map(asyncio.create_task, [asyncio.sleep(30), self._urgent_need_reconnect.wait()]),
|
||||||
|
return_when=asyncio.FIRST_COMPLETED
|
||||||
|
)
|
||||||
|
if self._urgent_need_reconnect.is_set():
|
||||||
|
sleep_delay = 30
|
||||||
|
self._urgent_need_reconnect.clear()
|
||||||
|
if not self.is_connected:
|
||||||
|
client = await self.connect_to_fastest()
|
||||||
|
if not client:
|
||||||
|
log.warning("failed to connect to any spv servers, retrying later")
|
||||||
|
sleep_delay *= 2
|
||||||
|
sleep_delay = min(sleep_delay, 300)
|
||||||
|
continue
|
||||||
|
log.debug("get spv server features %s:%i", *client.server)
|
||||||
|
features = await client.send_request('server.features', [])
|
||||||
|
self.client, self.server_features = client, features
|
||||||
|
log.debug("discover other hubs %s:%i", *client.server)
|
||||||
|
await self._update_hubs(await client.send_request('server.peers.subscribe', []))
|
||||||
|
log.info("subscribe to headers %s:%i", *client.server)
|
||||||
|
self._update_remote_height((await self.subscribe_headers(),))
|
||||||
|
self._on_connected_controller.add(True)
|
||||||
|
server_str = "%s:%i" % client.server
|
||||||
|
log.info("maintaining connection to spv server %s", server_str)
|
||||||
|
self._keepalive_task = asyncio.create_task(self.client.keepalive_loop())
|
||||||
|
try:
|
||||||
|
if not self._urgent_need_reconnect.is_set():
|
||||||
|
await asyncio.wait(
|
||||||
|
[self._keepalive_task, asyncio.create_task(self._urgent_need_reconnect.wait())],
|
||||||
|
return_when=asyncio.FIRST_COMPLETED
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
await self._keepalive_task
|
||||||
|
if self._urgent_need_reconnect.is_set():
|
||||||
|
log.warning("urgent reconnect needed")
|
||||||
|
if self._keepalive_task and not self._keepalive_task.done():
|
||||||
|
self._keepalive_task.cancel()
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
self._keepalive_task = None
|
||||||
|
self.client = None
|
||||||
|
self.server_features = None
|
||||||
|
log.info("connection lost to %s", server_str)
|
||||||
|
log.info("network loop finished")
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
if self.running:
|
self.running = False
|
||||||
self.running = False
|
self.disconnect()
|
||||||
self._switch_task.cancel()
|
if self._loop_task and not self._loop_task.done():
|
||||||
self.session_pool.stop()
|
self._loop_task.cancel()
|
||||||
|
self._loop_task = None
|
||||||
|
if self.aiohttp_session:
|
||||||
|
await self.aiohttp_session.close()
|
||||||
|
self.aiohttp_session = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def is_connected(self):
|
def is_connected(self):
|
||||||
return self.client and not self.client.is_closing()
|
return self.client and not self.client.is_closing()
|
||||||
|
|
||||||
def rpc(self, list_or_method, args, restricted=True):
|
def rpc(self, list_or_method, args, restricted=True, session: Optional[ClientSession] = None):
|
||||||
session = self.client if restricted else self.session_pool.fastest_session
|
if session or self.is_connected:
|
||||||
if session and not session.is_closing():
|
session = session or self.client
|
||||||
return session.send_request(list_or_method, args)
|
return session.send_request(list_or_method, args)
|
||||||
else:
|
else:
|
||||||
self.session_pool.trigger_nodelay_connect()
|
self._urgent_need_reconnect.set()
|
||||||
raise ConnectionError("Attempting to send rpc request when connection is not available.")
|
raise ConnectionError("Attempting to send rpc request when connection is not available.")
|
||||||
|
|
||||||
async def retriable_call(self, function, *args, **kwargs):
|
async def retriable_call(self, function, *args, **kwargs):
|
||||||
async with self._concurrency:
|
while self.running:
|
||||||
while self.running:
|
if not self.is_connected:
|
||||||
if not self.is_connected:
|
log.warning("Wallet server unavailable, waiting for it to come back and retry.")
|
||||||
log.warning("Wallet server unavailable, waiting for it to come back and retry.")
|
self._urgent_need_reconnect.set()
|
||||||
await self.on_connected.first
|
await self.on_connected.first
|
||||||
await self.session_pool.wait_for_fastest_session()
|
try:
|
||||||
try:
|
return await function(*args, **kwargs)
|
||||||
return await function(*args, **kwargs)
|
except asyncio.TimeoutError:
|
||||||
except asyncio.TimeoutError:
|
log.warning("Wallet server call timed out, retrying.")
|
||||||
log.warning("Wallet server call timed out, retrying.")
|
except ConnectionError:
|
||||||
except ConnectionError:
|
log.warning("connection error")
|
||||||
pass
|
|
||||||
raise asyncio.CancelledError() # if we got here, we are shutting down
|
raise asyncio.CancelledError() # if we got here, we are shutting down
|
||||||
|
|
||||||
def _update_remote_height(self, header_args):
|
def _update_remote_height(self, header_args):
|
||||||
self.remote_height = header_args[0]["height"]
|
self.remote_height = header_args[0]["height"]
|
||||||
|
|
||||||
|
async def _update_hubs(self, hubs):
|
||||||
|
if hubs and hubs != ['']:
|
||||||
|
try:
|
||||||
|
if self.known_hubs.add_hubs(hubs):
|
||||||
|
self.known_hubs.save()
|
||||||
|
except Exception:
|
||||||
|
log.exception("could not add hubs: %s", hubs)
|
||||||
|
|
||||||
def get_transaction(self, tx_hash, known_height=None):
|
def get_transaction(self, tx_hash, known_height=None):
|
||||||
# use any server if its old, otherwise restrict to who gave us the history
|
# use any server if its old, otherwise restrict to who gave us the history
|
||||||
restricted = known_height in (None, -1, 0) or 0 > known_height > self.remote_height - 10
|
restricted = known_height in (None, -1, 0) or 0 > known_height > self.remote_height - 10
|
||||||
return self.rpc('blockchain.transaction.get', [tx_hash], restricted)
|
return self.rpc('blockchain.transaction.get', [tx_hash], restricted)
|
||||||
|
|
||||||
def get_transaction_batch(self, txids):
|
def get_transaction_batch(self, txids, restricted=True):
|
||||||
# use any server if its old, otherwise restrict to who gave us the history
|
# use any server if its old, otherwise restrict to who gave us the history
|
||||||
return self.rpc('blockchain.transaction.get_batch', txids, True)
|
return self.rpc('blockchain.transaction.get_batch', txids, restricted)
|
||||||
|
|
||||||
def get_transaction_and_merkle(self, tx_hash, known_height=None):
|
def get_transaction_and_merkle(self, tx_hash, known_height=None):
|
||||||
# use any server if its old, otherwise restrict to who gave us the history
|
# use any server if its old, otherwise restrict to who gave us the history
|
||||||
|
@ -307,103 +461,20 @@ class Network:
|
||||||
def get_server_features(self):
|
def get_server_features(self):
|
||||||
return self.rpc('server.features', (), restricted=True)
|
return self.rpc('server.features', (), restricted=True)
|
||||||
|
|
||||||
def get_claims_by_ids(self, claim_ids):
|
# def get_claims_by_ids(self, claim_ids):
|
||||||
return self.rpc('blockchain.claimtrie.getclaimsbyids', claim_ids)
|
# return self.rpc('blockchain.claimtrie.getclaimsbyids', claim_ids)
|
||||||
|
|
||||||
def resolve(self, urls):
|
def get_claim_by_id(self, claim_id):
|
||||||
return self.rpc('blockchain.claimtrie.resolve', urls)
|
return self.rpc('blockchain.claimtrie.getclaimbyid', [claim_id])
|
||||||
|
|
||||||
def claim_search(self, **kwargs):
|
def resolve(self, urls, session_override=None):
|
||||||
return self.rpc('blockchain.claimtrie.search', kwargs)
|
return self.rpc('blockchain.claimtrie.resolve', urls, False, session_override)
|
||||||
|
|
||||||
|
def claim_search(self, session_override=None, **kwargs):
|
||||||
|
return self.rpc('blockchain.claimtrie.search', kwargs, False, session_override)
|
||||||
|
|
||||||
class SessionPool:
|
async def sum_supports(self, server, **kwargs):
|
||||||
|
message = {"method": "support_sum", "params": kwargs}
|
||||||
def __init__(self, network: Network, timeout: float):
|
async with self.aiohttp_session.post(server, json=message) as r:
|
||||||
self.network = network
|
result = await r.json()
|
||||||
self.sessions: Dict[ClientSession, Optional[asyncio.Task]] = dict()
|
return result['result']
|
||||||
self.timeout = timeout
|
|
||||||
self.new_connection_event = asyncio.Event()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def online(self):
|
|
||||||
return any(not session.is_closing() for session in self.sessions)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def available_sessions(self):
|
|
||||||
return (session for session in self.sessions if session.available)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def fastest_session(self):
|
|
||||||
if not self.online:
|
|
||||||
return None
|
|
||||||
return min(
|
|
||||||
[((session.response_time + session.connection_latency) * (session.pending_amount + 1), session)
|
|
||||||
for session in self.available_sessions] or [(0, None)],
|
|
||||||
key=itemgetter(0)
|
|
||||||
)[1]
|
|
||||||
|
|
||||||
def _get_session_connect_callback(self, session: ClientSession):
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
|
|
||||||
def callback():
|
|
||||||
duplicate_connections = [
|
|
||||||
s for s in self.sessions
|
|
||||||
if s is not session and s.server_address_and_port == session.server_address_and_port
|
|
||||||
]
|
|
||||||
already_connected = None if not duplicate_connections else duplicate_connections[0]
|
|
||||||
if already_connected:
|
|
||||||
self.sessions.pop(session).cancel()
|
|
||||||
session.synchronous_close()
|
|
||||||
log.debug("wallet server %s resolves to the same server as %s, rechecking in an hour",
|
|
||||||
session.server[0], already_connected.server[0])
|
|
||||||
loop.call_later(3600, self._connect_session, session.server)
|
|
||||||
return
|
|
||||||
self.new_connection_event.set()
|
|
||||||
log.info("connected to %s:%i", *session.server)
|
|
||||||
|
|
||||||
return callback
|
|
||||||
|
|
||||||
def _connect_session(self, server: Tuple[str, int]):
|
|
||||||
session = None
|
|
||||||
for s in self.sessions:
|
|
||||||
if s.server == server:
|
|
||||||
session = s
|
|
||||||
break
|
|
||||||
if not session:
|
|
||||||
session = ClientSession(
|
|
||||||
network=self.network, server=server
|
|
||||||
)
|
|
||||||
session._on_connect_cb = self._get_session_connect_callback(session)
|
|
||||||
task = self.sessions.get(session, None)
|
|
||||||
if not task or task.done():
|
|
||||||
task = asyncio.create_task(session.ensure_session())
|
|
||||||
task.add_done_callback(lambda _: self.ensure_connections())
|
|
||||||
self.sessions[session] = task
|
|
||||||
|
|
||||||
def start(self, default_servers):
|
|
||||||
for server in default_servers:
|
|
||||||
self._connect_session(server)
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
for session, task in self.sessions.items():
|
|
||||||
task.cancel()
|
|
||||||
session.synchronous_close()
|
|
||||||
self.sessions.clear()
|
|
||||||
|
|
||||||
def ensure_connections(self):
|
|
||||||
for session in self.sessions:
|
|
||||||
self._connect_session(session.server)
|
|
||||||
|
|
||||||
def trigger_nodelay_connect(self):
|
|
||||||
# used when other parts of the system sees we might have internet back
|
|
||||||
# bypasses the retry interval
|
|
||||||
for session in self.sessions:
|
|
||||||
session.trigger_urgent_reconnect.set()
|
|
||||||
|
|
||||||
async def wait_for_fastest_session(self):
|
|
||||||
while not self.fastest_session:
|
|
||||||
self.trigger_nodelay_connect()
|
|
||||||
self.new_connection_event.clear()
|
|
||||||
await self.new_connection_event.wait()
|
|
||||||
return self.fastest_session
|
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
from .node import Conductor
|
from lbry.wallet.orchstr8.node import Conductor
|
||||||
from .service import ConductorService
|
from lbry.wallet.orchstr8.service import ConductorService
|
||||||
|
|
|
@ -5,7 +5,9 @@ import aiohttp
|
||||||
|
|
||||||
from lbry import wallet
|
from lbry import wallet
|
||||||
from lbry.wallet.orchstr8.node import (
|
from lbry.wallet.orchstr8.node import (
|
||||||
Conductor, get_blockchain_node_from_ledger
|
Conductor,
|
||||||
|
get_lbcd_node_from_ledger,
|
||||||
|
get_lbcwallet_node_from_ledger
|
||||||
)
|
)
|
||||||
from lbry.wallet.orchstr8.service import ConductorService
|
from lbry.wallet.orchstr8.service import ConductorService
|
||||||
|
|
||||||
|
@ -16,10 +18,11 @@ def get_argument_parser():
|
||||||
)
|
)
|
||||||
subparsers = parser.add_subparsers(dest='command', help='sub-command help')
|
subparsers = parser.add_subparsers(dest='command', help='sub-command help')
|
||||||
|
|
||||||
subparsers.add_parser("download", help="Download blockchain node binary.")
|
subparsers.add_parser("download", help="Download lbcd and lbcwallet node binaries.")
|
||||||
|
|
||||||
start = subparsers.add_parser("start", help="Start orchstr8 service.")
|
start = subparsers.add_parser("start", help="Start orchstr8 service.")
|
||||||
start.add_argument("--blockchain", help="Hostname to start blockchain node.")
|
start.add_argument("--lbcd", help="Hostname to start lbcd node.")
|
||||||
|
start.add_argument("--lbcwallet", help="Hostname to start lbcwallet node.")
|
||||||
start.add_argument("--spv", help="Hostname to start SPV server.")
|
start.add_argument("--spv", help="Hostname to start SPV server.")
|
||||||
start.add_argument("--wallet", help="Hostname to start wallet daemon.")
|
start.add_argument("--wallet", help="Hostname to start wallet daemon.")
|
||||||
|
|
||||||
|
@ -47,7 +50,8 @@ def main():
|
||||||
|
|
||||||
if command == 'download':
|
if command == 'download':
|
||||||
logging.getLogger('blockchain').setLevel(logging.INFO)
|
logging.getLogger('blockchain').setLevel(logging.INFO)
|
||||||
get_blockchain_node_from_ledger(wallet).ensure()
|
get_lbcd_node_from_ledger(wallet).ensure()
|
||||||
|
get_lbcwallet_node_from_ledger(wallet).ensure()
|
||||||
|
|
||||||
elif command == 'generate':
|
elif command == 'generate':
|
||||||
loop.run_until_complete(run_remote_command(
|
loop.run_until_complete(run_remote_command(
|
||||||
|
@ -57,9 +61,12 @@ def main():
|
||||||
elif command == 'start':
|
elif command == 'start':
|
||||||
|
|
||||||
conductor = Conductor()
|
conductor = Conductor()
|
||||||
if getattr(args, 'blockchain', False):
|
if getattr(args, 'lbcd', False):
|
||||||
conductor.blockchain_node.hostname = args.blockchain
|
conductor.lbcd_node.hostname = args.lbcd
|
||||||
loop.run_until_complete(conductor.start_blockchain())
|
loop.run_until_complete(conductor.start_lbcd())
|
||||||
|
if getattr(args, 'lbcwallet', False):
|
||||||
|
conductor.lbcwallet_node.hostname = args.lbcwallet
|
||||||
|
loop.run_until_complete(conductor.start_lbcwallet())
|
||||||
if getattr(args, 'spv', False):
|
if getattr(args, 'spv', False):
|
||||||
conductor.spv_node.hostname = args.spv
|
conductor.spv_node.hostname = args.spv
|
||||||
loop.run_until_complete(conductor.start_spv())
|
loop.run_until_complete(conductor.start_spv())
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
# pylint: disable=import-error
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import shutil
|
import shutil
|
||||||
|
@ -7,31 +8,44 @@ import tarfile
|
||||||
import logging
|
import logging
|
||||||
import tempfile
|
import tempfile
|
||||||
import subprocess
|
import subprocess
|
||||||
import importlib
|
import platform
|
||||||
|
|
||||||
from binascii import hexlify
|
from binascii import hexlify
|
||||||
from typing import Type, Optional
|
from typing import Type, Optional
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
|
||||||
import lbry
|
import lbry
|
||||||
from lbry.wallet.server.server import Server
|
|
||||||
from lbry.wallet.server.env import Env
|
|
||||||
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
|
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
|
||||||
|
from lbry.conf import KnownHubsList, Config
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
try:
|
||||||
def get_spvserver_from_ledger(ledger_module):
|
from hub.herald.env import ServerEnv
|
||||||
spvserver_path, regtest_class_name = ledger_module.__spvserver__.rsplit('.', 1)
|
from hub.scribe.env import BlockchainEnv
|
||||||
spvserver_module = importlib.import_module(spvserver_path)
|
from hub.elastic_sync.env import ElasticEnv
|
||||||
return getattr(spvserver_module, regtest_class_name)
|
from hub.herald.service import HubServerService
|
||||||
|
from hub.elastic_sync.service import ElasticSyncService
|
||||||
|
from hub.scribe.service import BlockchainProcessorService
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def get_blockchain_node_from_ledger(ledger_module):
|
def get_lbcd_node_from_ledger(ledger_module):
|
||||||
return BlockchainNode(
|
return LBCDNode(
|
||||||
ledger_module.__node_url__,
|
ledger_module.__lbcd_url__,
|
||||||
os.path.join(ledger_module.__node_bin__, ledger_module.__node_daemon__),
|
ledger_module.__lbcd__,
|
||||||
os.path.join(ledger_module.__node_bin__, ledger_module.__node_cli__)
|
ledger_module.__lbcctl__
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_lbcwallet_node_from_ledger(ledger_module):
|
||||||
|
return LBCWalletNode(
|
||||||
|
ledger_module.__lbcwallet_url__,
|
||||||
|
ledger_module.__lbcwallet__,
|
||||||
|
ledger_module.__lbcctl__
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -39,40 +53,37 @@ class Conductor:
|
||||||
|
|
||||||
def __init__(self, seed=None):
|
def __init__(self, seed=None):
|
||||||
self.manager_module = WalletManager
|
self.manager_module = WalletManager
|
||||||
self.spv_module = get_spvserver_from_ledger(lbry.wallet)
|
self.lbcd_node = get_lbcd_node_from_ledger(lbry.wallet)
|
||||||
|
self.lbcwallet_node = get_lbcwallet_node_from_ledger(lbry.wallet)
|
||||||
self.blockchain_node = get_blockchain_node_from_ledger(lbry.wallet)
|
self.spv_node = SPVNode()
|
||||||
self.spv_node = SPVNode(self.spv_module)
|
|
||||||
self.wallet_node = WalletNode(
|
self.wallet_node = WalletNode(
|
||||||
self.manager_module, RegTestLedger, default_seed=seed
|
self.manager_module, RegTestLedger, default_seed=seed
|
||||||
)
|
)
|
||||||
|
self.lbcd_started = False
|
||||||
self.blockchain_started = False
|
self.lbcwallet_started = False
|
||||||
self.spv_started = False
|
self.spv_started = False
|
||||||
self.wallet_started = False
|
self.wallet_started = False
|
||||||
|
|
||||||
self.log = log.getChild('conductor')
|
self.log = log.getChild('conductor')
|
||||||
|
|
||||||
async def start_blockchain(self):
|
async def start_lbcd(self):
|
||||||
if not self.blockchain_started:
|
if not self.lbcd_started:
|
||||||
asyncio.create_task(self.blockchain_node.start())
|
await self.lbcd_node.start()
|
||||||
await self.blockchain_node.running.wait()
|
self.lbcd_started = True
|
||||||
await self.blockchain_node.generate(200)
|
|
||||||
self.blockchain_started = True
|
|
||||||
|
|
||||||
async def stop_blockchain(self):
|
async def stop_lbcd(self, cleanup=True):
|
||||||
if self.blockchain_started:
|
if self.lbcd_started:
|
||||||
await self.blockchain_node.stop(cleanup=True)
|
await self.lbcd_node.stop(cleanup)
|
||||||
self.blockchain_started = False
|
self.lbcd_started = False
|
||||||
|
|
||||||
async def start_spv(self):
|
async def start_spv(self):
|
||||||
if not self.spv_started:
|
if not self.spv_started:
|
||||||
await self.spv_node.start(self.blockchain_node)
|
await self.spv_node.start(self.lbcwallet_node)
|
||||||
self.spv_started = True
|
self.spv_started = True
|
||||||
|
|
||||||
async def stop_spv(self):
|
async def stop_spv(self, cleanup=True):
|
||||||
if self.spv_started:
|
if self.spv_started:
|
||||||
await self.spv_node.stop(cleanup=True)
|
await self.spv_node.stop(cleanup)
|
||||||
self.spv_started = False
|
self.spv_started = False
|
||||||
|
|
||||||
async def start_wallet(self):
|
async def start_wallet(self):
|
||||||
|
@ -80,13 +91,30 @@ class Conductor:
|
||||||
await self.wallet_node.start(self.spv_node)
|
await self.wallet_node.start(self.spv_node)
|
||||||
self.wallet_started = True
|
self.wallet_started = True
|
||||||
|
|
||||||
async def stop_wallet(self):
|
async def stop_wallet(self, cleanup=True):
|
||||||
if self.wallet_started:
|
if self.wallet_started:
|
||||||
await self.wallet_node.stop(cleanup=True)
|
await self.wallet_node.stop(cleanup)
|
||||||
self.wallet_started = False
|
self.wallet_started = False
|
||||||
|
|
||||||
|
async def start_lbcwallet(self, clean=True):
|
||||||
|
if not self.lbcwallet_started:
|
||||||
|
await self.lbcwallet_node.start()
|
||||||
|
if clean:
|
||||||
|
mining_addr = await self.lbcwallet_node.get_new_address()
|
||||||
|
self.lbcwallet_node.mining_addr = mining_addr
|
||||||
|
await self.lbcwallet_node.generate(200)
|
||||||
|
# unlock the wallet for the next 1 hour
|
||||||
|
await self.lbcwallet_node.wallet_passphrase("password", 3600)
|
||||||
|
self.lbcwallet_started = True
|
||||||
|
|
||||||
|
async def stop_lbcwallet(self, cleanup=True):
|
||||||
|
if self.lbcwallet_started:
|
||||||
|
await self.lbcwallet_node.stop(cleanup)
|
||||||
|
self.lbcwallet_started = False
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
await self.start_blockchain()
|
await self.start_lbcd()
|
||||||
|
await self.start_lbcwallet()
|
||||||
await self.start_spv()
|
await self.start_spv()
|
||||||
await self.start_wallet()
|
await self.start_wallet()
|
||||||
|
|
||||||
|
@ -94,7 +122,8 @@ class Conductor:
|
||||||
all_the_stops = [
|
all_the_stops = [
|
||||||
self.stop_wallet,
|
self.stop_wallet,
|
||||||
self.stop_spv,
|
self.stop_spv,
|
||||||
self.stop_blockchain
|
self.stop_lbcwallet,
|
||||||
|
self.stop_lbcd
|
||||||
]
|
]
|
||||||
for stop in all_the_stops:
|
for stop in all_the_stops:
|
||||||
try:
|
try:
|
||||||
|
@ -102,11 +131,18 @@ class Conductor:
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.exception('Exception raised while stopping services:', exc_info=e)
|
log.exception('Exception raised while stopping services:', exc_info=e)
|
||||||
|
|
||||||
|
async def clear_mempool(self):
|
||||||
|
await self.stop_lbcwallet(cleanup=False)
|
||||||
|
await self.stop_lbcd(cleanup=False)
|
||||||
|
await self.start_lbcd()
|
||||||
|
await self.start_lbcwallet(clean=False)
|
||||||
|
|
||||||
|
|
||||||
class WalletNode:
|
class WalletNode:
|
||||||
|
|
||||||
def __init__(self, manager_class: Type[WalletManager], ledger_class: Type[Ledger],
|
def __init__(self, manager_class: Type[WalletManager], ledger_class: Type[Ledger],
|
||||||
verbose: bool = False, port: int = 5280, default_seed: str = None) -> None:
|
verbose: bool = False, port: int = 5280, default_seed: str = None,
|
||||||
|
data_path: str = None) -> None:
|
||||||
self.manager_class = manager_class
|
self.manager_class = manager_class
|
||||||
self.ledger_class = ledger_class
|
self.ledger_class = ledger_class
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
|
@ -114,27 +150,34 @@ class WalletNode:
|
||||||
self.ledger: Optional[Ledger] = None
|
self.ledger: Optional[Ledger] = None
|
||||||
self.wallet: Optional[Wallet] = None
|
self.wallet: Optional[Wallet] = None
|
||||||
self.account: Optional[Account] = None
|
self.account: Optional[Account] = None
|
||||||
self.data_path: Optional[str] = None
|
self.data_path: str = data_path or tempfile.mkdtemp()
|
||||||
self.port = port
|
self.port = port
|
||||||
self.default_seed = default_seed
|
self.default_seed = default_seed
|
||||||
|
self.known_hubs = KnownHubsList()
|
||||||
|
|
||||||
async def start(self, spv_node: 'SPVNode', seed=None, connect=True):
|
async def start(self, spv_node: 'SPVNode', seed=None, connect=True, config=None):
|
||||||
self.data_path = tempfile.mkdtemp()
|
|
||||||
wallets_dir = os.path.join(self.data_path, 'wallets')
|
wallets_dir = os.path.join(self.data_path, 'wallets')
|
||||||
os.mkdir(wallets_dir)
|
|
||||||
wallet_file_name = os.path.join(wallets_dir, 'my_wallet.json')
|
wallet_file_name = os.path.join(wallets_dir, 'my_wallet.json')
|
||||||
with open(wallet_file_name, 'w') as wallet_file:
|
if not os.path.isdir(wallets_dir):
|
||||||
wallet_file.write('{"version": 1, "accounts": []}\n')
|
os.mkdir(wallets_dir)
|
||||||
|
with open(wallet_file_name, 'w') as wallet_file:
|
||||||
|
wallet_file.write('{"version": 1, "accounts": []}\n')
|
||||||
self.manager = self.manager_class.from_config({
|
self.manager = self.manager_class.from_config({
|
||||||
'ledgers': {
|
'ledgers': {
|
||||||
self.ledger_class.get_id(): {
|
self.ledger_class.get_id(): {
|
||||||
'api_port': self.port,
|
'api_port': self.port,
|
||||||
'default_servers': [(spv_node.hostname, spv_node.port)],
|
'explicit_servers': [(spv_node.hostname, spv_node.port)],
|
||||||
'data_path': self.data_path
|
'default_servers': Config.lbryum_servers.default,
|
||||||
|
'data_path': self.data_path,
|
||||||
|
'known_hubs': config.known_hubs if config else KnownHubsList(),
|
||||||
|
'hub_timeout': 30,
|
||||||
|
'concurrent_hub_requests': 32,
|
||||||
|
'fee_per_name_char': 200000
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
'wallets': [wallet_file_name]
|
'wallets': [wallet_file_name]
|
||||||
})
|
})
|
||||||
|
self.manager.config = config
|
||||||
self.ledger = self.manager.ledgers[self.ledger_class]
|
self.ledger = self.manager.ledgers[self.ledger_class]
|
||||||
self.wallet = self.manager.default_wallet
|
self.wallet = self.manager.default_wallet
|
||||||
if not self.wallet:
|
if not self.wallet:
|
||||||
|
@ -160,44 +203,83 @@ class WalletNode:
|
||||||
|
|
||||||
|
|
||||||
class SPVNode:
|
class SPVNode:
|
||||||
|
def __init__(self, node_number=1):
|
||||||
def __init__(self, coin_class, node_number=1):
|
self.node_number = node_number
|
||||||
self.coin_class = coin_class
|
|
||||||
self.controller = None
|
self.controller = None
|
||||||
self.data_path = None
|
self.data_path = None
|
||||||
self.server = None
|
self.server: Optional[HubServerService] = None
|
||||||
|
self.writer: Optional[BlockchainProcessorService] = None
|
||||||
|
self.es_writer: Optional[ElasticSyncService] = None
|
||||||
self.hostname = 'localhost'
|
self.hostname = 'localhost'
|
||||||
self.port = 50001 + node_number # avoid conflict with default daemon
|
self.port = 50001 + node_number # avoid conflict with default daemon
|
||||||
|
self.udp_port = self.port
|
||||||
|
self.elastic_notifier_port = 19080 + node_number
|
||||||
|
self.elastic_services = f'localhost:9200/localhost:{self.elastic_notifier_port}'
|
||||||
self.session_timeout = 600
|
self.session_timeout = 600
|
||||||
self.rpc_port = '0' # disabled by default
|
self.stopped = True
|
||||||
|
self.index_name = uuid4().hex
|
||||||
|
|
||||||
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None):
|
async def start(self, lbcwallet_node: 'LBCWalletNode', extraconf=None):
|
||||||
self.data_path = tempfile.mkdtemp()
|
if not self.stopped:
|
||||||
conf = {
|
log.warning("spv node is already running")
|
||||||
'DESCRIPTION': '',
|
return
|
||||||
'PAYMENT_ADDRESS': '',
|
self.stopped = False
|
||||||
'DAILY_FEE': '0',
|
try:
|
||||||
'DB_DIRECTORY': self.data_path,
|
self.data_path = tempfile.mkdtemp()
|
||||||
'DAEMON_URL': blockchain_node.rpc_url,
|
conf = {
|
||||||
'REORG_LIMIT': '100',
|
'description': '',
|
||||||
'HOST': self.hostname,
|
'payment_address': '',
|
||||||
'TCP_PORT': str(self.port),
|
'daily_fee': '0',
|
||||||
'SESSION_TIMEOUT': str(self.session_timeout),
|
'db_dir': self.data_path,
|
||||||
'MAX_QUERY_WORKERS': '0',
|
'daemon_url': lbcwallet_node.rpc_url,
|
||||||
'INDIVIDUAL_TAG_INDEXES': '',
|
'reorg_limit': 100,
|
||||||
'RPC_PORT': self.rpc_port
|
'host': self.hostname,
|
||||||
}
|
'tcp_port': self.port,
|
||||||
if extraconf:
|
'udp_port': self.udp_port,
|
||||||
conf.update(extraconf)
|
'elastic_services': self.elastic_services,
|
||||||
# TODO: don't use os.environ
|
'session_timeout': self.session_timeout,
|
||||||
os.environ.update(conf)
|
'max_query_workers': 0,
|
||||||
self.server = Server(Env(self.coin_class))
|
'es_index_prefix': self.index_name,
|
||||||
self.server.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5
|
'chain': 'regtest',
|
||||||
await self.server.start()
|
'index_address_status': False
|
||||||
|
}
|
||||||
|
if extraconf:
|
||||||
|
conf.update(extraconf)
|
||||||
|
self.writer = BlockchainProcessorService(
|
||||||
|
BlockchainEnv(db_dir=self.data_path, daemon_url=lbcwallet_node.rpc_url,
|
||||||
|
reorg_limit=100, max_query_workers=0, chain='regtest', index_address_status=False)
|
||||||
|
)
|
||||||
|
self.server = HubServerService(ServerEnv(**conf))
|
||||||
|
self.es_writer = ElasticSyncService(
|
||||||
|
ElasticEnv(
|
||||||
|
db_dir=self.data_path, reorg_limit=100, max_query_workers=0, chain='regtest',
|
||||||
|
elastic_notifier_port=self.elastic_notifier_port,
|
||||||
|
es_index_prefix=self.index_name,
|
||||||
|
filtering_channel_ids=(extraconf or {}).get('filtering_channel_ids'),
|
||||||
|
blocking_channel_ids=(extraconf or {}).get('blocking_channel_ids')
|
||||||
|
)
|
||||||
|
)
|
||||||
|
await self.writer.start()
|
||||||
|
await self.es_writer.start()
|
||||||
|
await self.server.start()
|
||||||
|
except Exception as e:
|
||||||
|
self.stopped = True
|
||||||
|
log.exception("failed to start spv node")
|
||||||
|
raise e
|
||||||
|
|
||||||
async def stop(self, cleanup=True):
|
async def stop(self, cleanup=True):
|
||||||
|
if self.stopped:
|
||||||
|
log.warning("spv node is already stopped")
|
||||||
|
return
|
||||||
try:
|
try:
|
||||||
await self.server.stop()
|
await self.server.stop()
|
||||||
|
await self.es_writer.delete_index()
|
||||||
|
await self.es_writer.stop()
|
||||||
|
await self.writer.stop()
|
||||||
|
self.stopped = True
|
||||||
|
except Exception as e:
|
||||||
|
log.exception("failed to stop spv node")
|
||||||
|
raise e
|
||||||
finally:
|
finally:
|
||||||
cleanup and self.cleanup()
|
cleanup and self.cleanup()
|
||||||
|
|
||||||
|
@ -205,18 +287,19 @@ class SPVNode:
|
||||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||||
|
|
||||||
|
|
||||||
class BlockchainProcess(asyncio.SubprocessProtocol):
|
class LBCDProcess(asyncio.SubprocessProtocol):
|
||||||
|
|
||||||
IGNORE_OUTPUT = [
|
IGNORE_OUTPUT = [
|
||||||
b'keypool keep',
|
b'keypool keep',
|
||||||
b'keypool reserve',
|
b'keypool reserve',
|
||||||
b'keypool return',
|
b'keypool return',
|
||||||
|
b'Block submitted',
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.ready = asyncio.Event()
|
self.ready = asyncio.Event()
|
||||||
self.stopped = asyncio.Event()
|
self.stopped = asyncio.Event()
|
||||||
self.log = log.getChild('blockchain')
|
self.log = log.getChild('lbcd')
|
||||||
|
|
||||||
def pipe_data_received(self, fd, data):
|
def pipe_data_received(self, fd, data):
|
||||||
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||||
|
@ -227,7 +310,7 @@ class BlockchainProcess(asyncio.SubprocessProtocol):
|
||||||
if b'Error:' in data:
|
if b'Error:' in data:
|
||||||
self.ready.set()
|
self.ready.set()
|
||||||
raise SystemError(data.decode())
|
raise SystemError(data.decode())
|
||||||
if b'Done loading' in data:
|
if b'RPCS: RPC server listening on' in data:
|
||||||
self.ready.set()
|
self.ready.set()
|
||||||
|
|
||||||
def process_exited(self):
|
def process_exited(self):
|
||||||
|
@ -235,39 +318,57 @@ class BlockchainProcess(asyncio.SubprocessProtocol):
|
||||||
self.ready.set()
|
self.ready.set()
|
||||||
|
|
||||||
|
|
||||||
class BlockchainNode:
|
class WalletProcess(asyncio.SubprocessProtocol):
|
||||||
|
|
||||||
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
|
IGNORE_OUTPUT = [
|
||||||
BECH32_ADDRESS = "bech32"
|
]
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.ready = asyncio.Event()
|
||||||
|
self.stopped = asyncio.Event()
|
||||||
|
self.log = log.getChild('lbcwallet')
|
||||||
|
self.transport: Optional[asyncio.transports.SubprocessTransport] = None
|
||||||
|
|
||||||
|
def pipe_data_received(self, fd, data):
|
||||||
|
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||||
|
if b'Error:' in data:
|
||||||
|
self.log.error(data.decode())
|
||||||
|
else:
|
||||||
|
self.log.info(data.decode())
|
||||||
|
if b'Error:' in data:
|
||||||
|
self.ready.set()
|
||||||
|
raise SystemError(data.decode())
|
||||||
|
if b'WLLT: Finished rescan' in data:
|
||||||
|
self.ready.set()
|
||||||
|
|
||||||
|
def process_exited(self):
|
||||||
|
self.stopped.set()
|
||||||
|
self.ready.set()
|
||||||
|
|
||||||
|
|
||||||
|
class LBCDNode:
|
||||||
def __init__(self, url, daemon, cli):
|
def __init__(self, url, daemon, cli):
|
||||||
self.latest_release_url = url
|
self.latest_release_url = url
|
||||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||||
self.daemon_bin = os.path.join(self.bin_dir, daemon)
|
self.daemon_bin = os.path.join(self.bin_dir, daemon)
|
||||||
self.cli_bin = os.path.join(self.bin_dir, cli)
|
self.cli_bin = os.path.join(self.bin_dir, cli)
|
||||||
self.log = log.getChild('blockchain')
|
self.log = log.getChild('lbcd')
|
||||||
self.data_path = None
|
self.data_path = tempfile.mkdtemp()
|
||||||
self.protocol = None
|
self.protocol = None
|
||||||
self.transport = None
|
self.transport = None
|
||||||
self.block_expected = 0
|
|
||||||
self.hostname = 'localhost'
|
self.hostname = 'localhost'
|
||||||
self.peerport = 9246 + 2 # avoid conflict with default peer port
|
self.peerport = 29246
|
||||||
self.rpcport = 9245 + 2 # avoid conflict with default rpc port
|
self.rpcport = 29245
|
||||||
self.rpcuser = 'rpcuser'
|
self.rpcuser = 'rpcuser'
|
||||||
self.rpcpassword = 'rpcpassword'
|
self.rpcpassword = 'rpcpassword'
|
||||||
self.stopped = False
|
self.stopped = True
|
||||||
self.restart_ready = asyncio.Event()
|
|
||||||
self.restart_ready.set()
|
|
||||||
self.running = asyncio.Event()
|
self.running = asyncio.Event()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def rpc_url(self):
|
def rpc_url(self):
|
||||||
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.rpcport}/'
|
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.rpcport}/'
|
||||||
|
|
||||||
def is_expected_block(self, e: BlockHeightEvent):
|
|
||||||
return self.block_expected == e.height
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def exists(self):
|
def exists(self):
|
||||||
return (
|
return (
|
||||||
|
@ -276,6 +377,12 @@ class BlockchainNode:
|
||||||
)
|
)
|
||||||
|
|
||||||
def download(self):
|
def download(self):
|
||||||
|
uname = platform.uname()
|
||||||
|
target_os = str.lower(uname.system)
|
||||||
|
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
|
||||||
|
target_platform = target_os + '_' + target_arch
|
||||||
|
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
|
||||||
|
|
||||||
downloaded_file = os.path.join(
|
downloaded_file = os.path.join(
|
||||||
self.bin_dir,
|
self.bin_dir,
|
||||||
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||||
|
@ -309,72 +416,206 @@ class BlockchainNode:
|
||||||
return self.exists or self.download()
|
return self.exists or self.download()
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
assert self.ensure()
|
if not self.stopped:
|
||||||
self.data_path = tempfile.mkdtemp()
|
return
|
||||||
loop = asyncio.get_event_loop()
|
self.stopped = False
|
||||||
asyncio.get_child_watcher().attach_loop(loop)
|
try:
|
||||||
command = [
|
assert self.ensure()
|
||||||
self.daemon_bin,
|
loop = asyncio.get_event_loop()
|
||||||
f'-datadir={self.data_path}', '-printtoconsole', '-regtest', '-server', '-txindex',
|
asyncio.get_child_watcher().attach_loop(loop)
|
||||||
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}',
|
command = [
|
||||||
f'-port={self.peerport}'
|
self.daemon_bin,
|
||||||
]
|
'--notls',
|
||||||
self.log.info(' '.join(command))
|
f'--datadir={self.data_path}',
|
||||||
while not self.stopped:
|
'--regtest', f'--listen=127.0.0.1:{self.peerport}', f'--rpclisten=127.0.0.1:{self.rpcport}',
|
||||||
if self.running.is_set():
|
'--txindex', f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}'
|
||||||
await asyncio.sleep(1)
|
]
|
||||||
continue
|
self.log.info(' '.join(command))
|
||||||
await self.restart_ready.wait()
|
self.transport, self.protocol = await loop.subprocess_exec(
|
||||||
try:
|
LBCDProcess, *command
|
||||||
self.transport, self.protocol = await loop.subprocess_exec(
|
)
|
||||||
BlockchainProcess, *command
|
await self.protocol.ready.wait()
|
||||||
)
|
assert not self.protocol.stopped.is_set()
|
||||||
await self.protocol.ready.wait()
|
self.running.set()
|
||||||
assert not self.protocol.stopped.is_set()
|
except asyncio.CancelledError:
|
||||||
self.running.set()
|
self.running.clear()
|
||||||
except asyncio.CancelledError:
|
self.stopped = True
|
||||||
self.running.clear()
|
raise
|
||||||
raise
|
except Exception as e:
|
||||||
except Exception as e:
|
self.running.clear()
|
||||||
self.running.clear()
|
self.stopped = True
|
||||||
log.exception('failed to start lbrycrdd', exc_info=e)
|
log.exception('failed to start lbcd', exc_info=e)
|
||||||
|
raise
|
||||||
|
|
||||||
async def stop(self, cleanup=True):
|
async def stop(self, cleanup=True):
|
||||||
|
if self.stopped:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
if self.transport:
|
||||||
|
self.transport.terminate()
|
||||||
|
await self.protocol.stopped.wait()
|
||||||
|
self.transport.close()
|
||||||
|
except Exception as e:
|
||||||
|
log.exception('failed to stop lbcd', exc_info=e)
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
self.log.info("Done shutting down " + self.daemon_bin)
|
||||||
|
self.stopped = True
|
||||||
|
if cleanup:
|
||||||
|
self.cleanup()
|
||||||
|
self.running.clear()
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
assert self.stopped
|
||||||
|
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||||
|
|
||||||
|
|
||||||
|
class LBCWalletNode:
|
||||||
|
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
|
||||||
|
BECH32_ADDRESS = "bech32"
|
||||||
|
|
||||||
|
def __init__(self, url, lbcwallet, cli):
|
||||||
|
self.latest_release_url = url
|
||||||
|
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||||
|
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||||
|
self.lbcwallet_bin = os.path.join(self.bin_dir, lbcwallet)
|
||||||
|
self.cli_bin = os.path.join(self.bin_dir, cli)
|
||||||
|
self.log = log.getChild('lbcwallet')
|
||||||
|
self.protocol = None
|
||||||
|
self.transport = None
|
||||||
|
self.hostname = 'localhost'
|
||||||
|
self.lbcd_rpcport = 29245
|
||||||
|
self.lbcwallet_rpcport = 29244
|
||||||
|
self.rpcuser = 'rpcuser'
|
||||||
|
self.rpcpassword = 'rpcpassword'
|
||||||
|
self.data_path = tempfile.mkdtemp()
|
||||||
self.stopped = True
|
self.stopped = True
|
||||||
|
self.running = asyncio.Event()
|
||||||
|
self.block_expected = 0
|
||||||
|
self.mining_addr = ''
|
||||||
|
|
||||||
|
@property
|
||||||
|
def rpc_url(self):
|
||||||
|
# FIXME: somehow the hub/sdk doesn't learn the blocks through the Walet RPC port, why?
|
||||||
|
# return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcwallet_rpcport}/'
|
||||||
|
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcd_rpcport}/'
|
||||||
|
|
||||||
|
def is_expected_block(self, e: BlockHeightEvent):
|
||||||
|
return self.block_expected == e.height
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exists(self):
|
||||||
|
return (
|
||||||
|
os.path.exists(self.lbcwallet_bin)
|
||||||
|
)
|
||||||
|
|
||||||
|
def download(self):
|
||||||
|
uname = platform.uname()
|
||||||
|
target_os = str.lower(uname.system)
|
||||||
|
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
|
||||||
|
target_platform = target_os + '_' + target_arch
|
||||||
|
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
|
||||||
|
|
||||||
|
downloaded_file = os.path.join(
|
||||||
|
self.bin_dir,
|
||||||
|
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||||
|
)
|
||||||
|
|
||||||
|
if not os.path.exists(self.bin_dir):
|
||||||
|
os.mkdir(self.bin_dir)
|
||||||
|
|
||||||
|
if not os.path.exists(downloaded_file):
|
||||||
|
self.log.info('Downloading: %s', self.latest_release_url)
|
||||||
|
with urllib.request.urlopen(self.latest_release_url) as response:
|
||||||
|
with open(downloaded_file, 'wb') as out_file:
|
||||||
|
shutil.copyfileobj(response, out_file)
|
||||||
|
|
||||||
|
self.log.info('Extracting: %s', downloaded_file)
|
||||||
|
|
||||||
|
if downloaded_file.endswith('.zip'):
|
||||||
|
with zipfile.ZipFile(downloaded_file) as dotzip:
|
||||||
|
dotzip.extractall(self.bin_dir)
|
||||||
|
# zipfile bug https://bugs.python.org/issue15795
|
||||||
|
os.chmod(self.lbcwallet_bin, 0o755)
|
||||||
|
|
||||||
|
elif downloaded_file.endswith('.tar.gz'):
|
||||||
|
with tarfile.open(downloaded_file) as tar:
|
||||||
|
tar.extractall(self.bin_dir)
|
||||||
|
|
||||||
|
return self.exists
|
||||||
|
|
||||||
|
def ensure(self):
|
||||||
|
return self.exists or self.download()
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
assert self.ensure()
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
asyncio.get_child_watcher().attach_loop(loop)
|
||||||
|
|
||||||
|
command = [
|
||||||
|
self.lbcwallet_bin,
|
||||||
|
'--noservertls', '--noclienttls',
|
||||||
|
'--regtest',
|
||||||
|
f'--rpcconnect=127.0.0.1:{self.lbcd_rpcport}', f'--rpclisten=127.0.0.1:{self.lbcwallet_rpcport}',
|
||||||
|
'--createtemp', f'--appdata={self.data_path}',
|
||||||
|
f'--username={self.rpcuser}', f'--password={self.rpcpassword}'
|
||||||
|
]
|
||||||
|
self.log.info(' '.join(command))
|
||||||
|
try:
|
||||||
|
self.transport, self.protocol = await loop.subprocess_exec(
|
||||||
|
WalletProcess, *command
|
||||||
|
)
|
||||||
|
self.protocol.transport = self.transport
|
||||||
|
await self.protocol.ready.wait()
|
||||||
|
assert not self.protocol.stopped.is_set()
|
||||||
|
self.running.set()
|
||||||
|
self.stopped = False
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
self.running.clear()
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
self.running.clear()
|
||||||
|
log.exception('failed to start lbcwallet', exc_info=e)
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
assert self.stopped
|
||||||
|
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||||
|
|
||||||
|
async def stop(self, cleanup=True):
|
||||||
|
if self.stopped:
|
||||||
|
return
|
||||||
try:
|
try:
|
||||||
self.transport.terminate()
|
self.transport.terminate()
|
||||||
await self.protocol.stopped.wait()
|
await self.protocol.stopped.wait()
|
||||||
self.transport.close()
|
self.transport.close()
|
||||||
|
except Exception as e:
|
||||||
|
log.exception('failed to stop lbcwallet', exc_info=e)
|
||||||
|
raise
|
||||||
finally:
|
finally:
|
||||||
|
self.log.info("Done shutting down " + self.lbcwallet_bin)
|
||||||
|
self.stopped = True
|
||||||
if cleanup:
|
if cleanup:
|
||||||
self.cleanup()
|
self.cleanup()
|
||||||
|
self.running.clear()
|
||||||
async def clear_mempool(self):
|
|
||||||
self.restart_ready.clear()
|
|
||||||
self.transport.terminate()
|
|
||||||
await self.protocol.stopped.wait()
|
|
||||||
self.transport.close()
|
|
||||||
self.running.clear()
|
|
||||||
os.remove(os.path.join(self.data_path, 'regtest', 'mempool.dat'))
|
|
||||||
self.restart_ready.set()
|
|
||||||
await self.running.wait()
|
|
||||||
|
|
||||||
def cleanup(self):
|
|
||||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
|
||||||
|
|
||||||
async def _cli_cmnd(self, *args):
|
async def _cli_cmnd(self, *args):
|
||||||
cmnd_args = [
|
cmnd_args = [
|
||||||
self.cli_bin, f'-datadir={self.data_path}', '-regtest',
|
self.cli_bin,
|
||||||
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}'
|
f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}', '--notls', '--regtest', '--wallet'
|
||||||
] + list(args)
|
] + list(args)
|
||||||
self.log.info(' '.join(cmnd_args))
|
self.log.info(' '.join(cmnd_args))
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
asyncio.get_child_watcher().attach_loop(loop)
|
asyncio.get_child_watcher().attach_loop(loop)
|
||||||
process = await asyncio.create_subprocess_exec(
|
process = await asyncio.create_subprocess_exec(
|
||||||
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||||
)
|
)
|
||||||
out, _ = await process.communicate()
|
out, err = await process.communicate()
|
||||||
result = out.decode().strip()
|
result = out.decode().strip()
|
||||||
|
err = err.decode().strip()
|
||||||
|
if len(result) <= 0 and err.startswith('-'):
|
||||||
|
raise Exception(err)
|
||||||
|
if err and 'creating a default config file' not in err:
|
||||||
|
log.warning(err)
|
||||||
self.log.info(result)
|
self.log.info(result)
|
||||||
if result.startswith('error code'):
|
if result.startswith('error code'):
|
||||||
raise Exception(result)
|
raise Exception(result)
|
||||||
|
@ -382,7 +623,14 @@ class BlockchainNode:
|
||||||
|
|
||||||
def generate(self, blocks):
|
def generate(self, blocks):
|
||||||
self.block_expected += blocks
|
self.block_expected += blocks
|
||||||
return self._cli_cmnd('generate', str(blocks))
|
return self._cli_cmnd('generatetoaddress', str(blocks), self.mining_addr)
|
||||||
|
|
||||||
|
def generate_to_address(self, blocks, addr):
|
||||||
|
self.block_expected += blocks
|
||||||
|
return self._cli_cmnd('generatetoaddress', str(blocks), addr)
|
||||||
|
|
||||||
|
def wallet_passphrase(self, passphrase, timeout):
|
||||||
|
return self._cli_cmnd('walletpassphrase', passphrase, str(timeout))
|
||||||
|
|
||||||
def invalidate_block(self, blockhash):
|
def invalidate_block(self, blockhash):
|
||||||
return self._cli_cmnd('invalidateblock', blockhash)
|
return self._cli_cmnd('invalidateblock', blockhash)
|
||||||
|
@ -399,11 +647,11 @@ class BlockchainNode:
|
||||||
def get_raw_change_address(self):
|
def get_raw_change_address(self):
|
||||||
return self._cli_cmnd('getrawchangeaddress')
|
return self._cli_cmnd('getrawchangeaddress')
|
||||||
|
|
||||||
def get_new_address(self, address_type):
|
def get_new_address(self, address_type='legacy'):
|
||||||
return self._cli_cmnd('getnewaddress', "", address_type)
|
return self._cli_cmnd('getnewaddress', "", address_type)
|
||||||
|
|
||||||
async def get_balance(self):
|
async def get_balance(self):
|
||||||
return float(await self._cli_cmnd('getbalance'))
|
return await self._cli_cmnd('getbalance')
|
||||||
|
|
||||||
def send_to_address(self, address, amount):
|
def send_to_address(self, address, amount):
|
||||||
return self._cli_cmnd('sendtoaddress', address, str(amount))
|
return self._cli_cmnd('sendtoaddress', address, str(amount))
|
||||||
|
@ -415,7 +663,10 @@ class BlockchainNode:
|
||||||
return self._cli_cmnd('createrawtransaction', json.dumps(inputs), json.dumps(outputs))
|
return self._cli_cmnd('createrawtransaction', json.dumps(inputs), json.dumps(outputs))
|
||||||
|
|
||||||
async def sign_raw_transaction_with_wallet(self, tx):
|
async def sign_raw_transaction_with_wallet(self, tx):
|
||||||
return json.loads(await self._cli_cmnd('signrawtransactionwithwallet', tx))['hex'].encode()
|
# the "withwallet" portion should only come into play if we are doing segwit.
|
||||||
|
# and "withwallet" doesn't exist on lbcd yet.
|
||||||
|
result = await self._cli_cmnd('signrawtransaction', tx)
|
||||||
|
return json.loads(result)['hex'].encode()
|
||||||
|
|
||||||
def decode_raw_transaction(self, tx):
|
def decode_raw_transaction(self, tx):
|
||||||
return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode())
|
return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode())
|
||||||
|
|
|
@ -61,8 +61,10 @@ class ConductorService:
|
||||||
#set_logging(
|
#set_logging(
|
||||||
# self.stack.ledger_module, logging.DEBUG, WebSocketLogHandler(self.send_message)
|
# self.stack.ledger_module, logging.DEBUG, WebSocketLogHandler(self.send_message)
|
||||||
#)
|
#)
|
||||||
self.stack.blockchain_started or await self.stack.start_blockchain()
|
self.stack.lbcd_started or await self.stack.start_lbcd()
|
||||||
self.send_message({'type': 'service', 'name': 'blockchain', 'port': self.stack.blockchain_node.port})
|
self.send_message({'type': 'service', 'name': 'lbcd', 'port': self.stack.lbcd_node.port})
|
||||||
|
self.stack.lbcwallet_started or await self.stack.start_lbcwallet()
|
||||||
|
self.send_message({'type': 'service', 'name': 'lbcwallet', 'port': self.stack.lbcwallet_node.port})
|
||||||
self.stack.spv_started or await self.stack.start_spv()
|
self.stack.spv_started or await self.stack.start_spv()
|
||||||
self.send_message({'type': 'service', 'name': 'spv', 'port': self.stack.spv_node.port})
|
self.send_message({'type': 'service', 'name': 'spv', 'port': self.stack.spv_node.port})
|
||||||
self.stack.wallet_started or await self.stack.start_wallet()
|
self.stack.wallet_started or await self.stack.start_wallet()
|
||||||
|
@ -74,7 +76,7 @@ class ConductorService:
|
||||||
async def generate(self, request):
|
async def generate(self, request):
|
||||||
data = await request.post()
|
data = await request.post()
|
||||||
blocks = data.get('blocks', 1)
|
blocks = data.get('blocks', 1)
|
||||||
await self.stack.blockchain_node.generate(int(blocks))
|
await self.stack.lbcwallet_node.generate(int(blocks))
|
||||||
return json_response({'blocks': blocks})
|
return json_response({'blocks': blocks})
|
||||||
|
|
||||||
async def transfer(self, request):
|
async def transfer(self, request):
|
||||||
|
@ -85,11 +87,14 @@ class ConductorService:
|
||||||
if not address:
|
if not address:
|
||||||
raise ValueError("No address was provided.")
|
raise ValueError("No address was provided.")
|
||||||
amount = data.get('amount', 1)
|
amount = data.get('amount', 1)
|
||||||
txid = await self.stack.blockchain_node.send_to_address(address, amount)
|
|
||||||
if self.stack.wallet_started:
|
if self.stack.wallet_started:
|
||||||
await self.stack.wallet_node.ledger.on_transaction.where(
|
watcher = self.stack.wallet_node.ledger.on_transaction.where(
|
||||||
lambda e: e.tx.id == txid and e.address == address
|
lambda e: e.address == address # and e.tx.id == txid -- might stall; see send_to_address_and_wait
|
||||||
)
|
)
|
||||||
|
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
|
||||||
|
await watcher
|
||||||
|
else:
|
||||||
|
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
|
||||||
return json_response({
|
return json_response({
|
||||||
'address': address,
|
'address': address,
|
||||||
'amount': amount,
|
'amount': amount,
|
||||||
|
@ -98,7 +103,7 @@ class ConductorService:
|
||||||
|
|
||||||
async def balance(self, _):
|
async def balance(self, _):
|
||||||
return json_response({
|
return json_response({
|
||||||
'balance': await self.stack.blockchain_node.get_balance()
|
'balance': await self.stack.lbcwallet_node.get_balance()
|
||||||
})
|
})
|
||||||
|
|
||||||
async def log(self, request):
|
async def log(self, request):
|
||||||
|
@ -129,7 +134,7 @@ class ConductorService:
|
||||||
'type': 'status',
|
'type': 'status',
|
||||||
'height': self.stack.wallet_node.ledger.headers.height,
|
'height': self.stack.wallet_node.ledger.headers.height,
|
||||||
'balance': satoshis_to_coins(await self.stack.wallet_node.account.get_balance()),
|
'balance': satoshis_to_coins(await self.stack.wallet_node.account.get_balance()),
|
||||||
'miner': await self.stack.blockchain_node.get_balance()
|
'miner': await self.stack.lbcwallet_node.get_balance()
|
||||||
})
|
})
|
||||||
|
|
||||||
def send_message(self, msg):
|
def send_message(self, msg):
|
||||||
|
|
|
@ -108,9 +108,6 @@ class Response:
|
||||||
|
|
||||||
class CodeMessageError(Exception):
|
class CodeMessageError(Exception):
|
||||||
|
|
||||||
def __init__(self, code, message):
|
|
||||||
super().__init__(code, message)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def code(self):
|
def code(self):
|
||||||
return self.args[0]
|
return self.args[0]
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue