Compare commits
2297 commits
Author | SHA1 | Date | |
---|---|---|---|
|
eb5da9511e | ||
|
8722ef840e | ||
|
6e75a1a89b | ||
|
ef3189de1d | ||
|
c2d2080034 | ||
|
d0b5a0a8fd | ||
|
1d0e17be21 | ||
|
4ef03bb1f4 | ||
|
4bd4bcdc27 | ||
|
e5ca967fa2 | ||
|
eed7d02e8b | ||
|
02aecad52b | ||
|
585962d930 | ||
|
ea4fba39a6 | ||
|
7a86406746 | ||
|
c8a3eb97a4 | ||
|
20213628d7 | ||
|
2d1649f972 | ||
|
5cb04b86a0 | ||
|
93ab6b3be3 | ||
|
b9762c3e64 | ||
|
82592d00ef | ||
|
c118174c1a | ||
|
d284acd8b8 | ||
|
235c98372d | ||
|
d2f5073ef4 | ||
|
84e5e43117 | ||
|
7bd025ae54 | ||
|
8f28ce65b0 | ||
|
d36e305129 | ||
|
2609dee8fb | ||
|
a2da86d4b5 | ||
|
aa16c7fee5 | ||
|
3266f72b82 | ||
|
77cd2a3f8a | ||
|
308e586e9a | ||
84beddfd77 | |||
|
6258651650 | ||
|
cc5f0b6630 | ||
|
f64d507d39 | ||
|
001819d5c2 | ||
|
8b4c046d28 | ||
|
2c20ad6c43 | ||
|
9e610cc54c | ||
|
b9d25c6d01 | ||
|
419b5b45f2 | ||
|
516c2dd5d0 | ||
|
b99102f9c9 | ||
|
8c6c7b655c | ||
|
48c6873fc4 | ||
|
15dc52bd9a | ||
|
52d555078f | ||
|
cc976bd010 | ||
|
9cc6992011 | ||
|
a1b87460c5 | ||
|
007e1115c4 | ||
|
20ae51b949 | ||
|
24e9c7b435 | ||
|
b452e76e1d | ||
|
341834c30d | ||
|
12bac730bd | ||
|
1027337833 | ||
|
97fef21f75 | ||
|
9dafd5f69b | ||
|
fd4f0b2049 | ||
|
734f0651a4 | ||
|
94deaf55df | ||
|
d957d46f96 | ||
|
0217aede3d | ||
|
e4e1600f51 | ||
|
d0aad8ccaf | ||
|
ab50cfa5c1 | ||
|
5a26aea398 | ||
|
bd1cebdb4c | ||
|
ec433f069f | ||
|
cd6d3fec9c | ||
|
8c474a69de | ||
|
8903056648 | ||
|
749a92d0e5 | ||
|
a7d7efecc7 | ||
|
c88f0797a3 | ||
|
137ebd503d | ||
|
c3f5dd780e | ||
|
20b1865879 | ||
|
231b982422 | ||
|
fd69401791 | ||
|
718d046833 | ||
|
e10f57d1ed | ||
|
8a033d58df | ||
|
c07c369a28 | ||
|
5be990fc55 | ||
|
8f26010c04 | ||
|
3021962e3d | ||
|
84d89ce5af | ||
|
0961cad716 | ||
|
5c543cb374 | ||
|
f78d7896a5 | ||
|
78a28de2aa | ||
|
45a255e7a2 | ||
|
d2738c2e72 | ||
|
a7c7ab7f7b | ||
|
988f288715 | ||
|
38e9b5b432 | ||
|
f7455600cc | ||
|
c7c2d6fe5a | ||
|
c6c0228970 | ||
|
8d9d2c76ae | ||
|
0b059a5445 | ||
|
ab67f417ee | ||
|
0e7a1aee0a | ||
|
d0497cf6b5 | ||
|
c38573d5de | ||
|
f077e56cec | ||
|
5e58c2f224 | ||
|
cc64789e96 | ||
|
b5c390ca04 | ||
|
da2ffb000e | ||
|
df77392fe0 | ||
|
9aa9ecdc0a | ||
|
43b45a939b | ||
|
e2922a434f | ||
|
0d6125de0b | ||
|
13af7800c2 | ||
|
47a5d37d7c | ||
|
4a3a7e318d | ||
|
85ff487af5 | ||
|
62eb9d5c75 | ||
|
cfe5c8de8a | ||
|
0497698c5b | ||
|
508bdb8e94 | ||
|
cd42f0d726 | ||
|
2706b66a92 | ||
|
29c2d5715d | ||
|
965389b759 | ||
|
174439f517 | ||
|
baf422fc03 | ||
|
61f7fbe230 | ||
|
c6c27925b7 | ||
|
be4c62cf32 | ||
|
443a1c32fa | ||
|
90c2a58470 | ||
|
adc79ec404 | ||
|
137d8ca4ac | ||
|
abf4d888af | ||
|
6c350e57dd | ||
|
fb7a93096e | ||
|
7ea88e7b31 | ||
|
2361e34541 | ||
|
be06378437 | ||
|
a334a93757 | ||
|
e3ee3892b2 | ||
|
d61accea1a | ||
|
e887453aa5 | ||
|
c3e4f0b988 | ||
|
318728aebd | ||
|
d8c1aaebc2 | ||
|
d7b65c15d2 | ||
|
972db80246 | ||
|
0d343ecb2f | ||
|
01cd95fe46 | ||
|
6dc57fc02c | ||
|
10df0c1fba | ||
|
ec751e5add | ||
|
3e3974f813 | ||
|
ec82486e15 | ||
|
e16f6b07b8 | ||
|
9a842c273b | ||
|
40f7d3ee4b | ||
|
1dc2f0458b | ||
|
3924b28cc3 | ||
|
020487b6a0 | ||
|
14037c9b2f | ||
|
0cb37a5c4b | ||
|
fa5f3e7e55 | ||
|
30aa0724ec | ||
|
059890e4e5 | ||
|
9654d4f003 | ||
|
956b52a2c1 | ||
|
2e975c8b61 | ||
|
656e299100 | ||
|
352e45b6b7 | ||
|
a9a1076362 | ||
|
6d370b0a12 | ||
|
c9fac27b66 | ||
|
59bc0b9682 | ||
|
ba60aeeebc | ||
|
dc427ecf6c | ||
|
87b4404767 | ||
|
ba9ac489c3 | ||
|
7049629ad7 | ||
|
3ae4aeea47 | ||
|
8becf1f69f | ||
|
582f79ba1c | ||
|
3c28d869f4 | ||
|
fe61b90610 | ||
|
c04fbb2908 | ||
|
571e71b28e | ||
|
39fcfcccfb | ||
|
2313d30996 | ||
|
ac7e94c6ed | ||
|
a391fe9fc7 | ||
|
ea8adc5367 | ||
|
0ea8ba72dd | ||
|
7a8d5da0e8 | ||
|
da30f003e8 | ||
|
6257948ad7 | ||
|
a7f606d62c | ||
|
1d95eb1549 | ||
|
e5e9873f79 | ||
|
530f9c72ea | ||
|
fad84c771c | ||
|
fe07aac79c | ||
|
91a6eae831 | ||
|
5852fcd287 | ||
|
4767bb9dee | ||
|
82d7f81f41 | ||
|
b036961954 | ||
|
5c708e1c6f | ||
|
9436600267 | ||
|
4ab29c4d5f | ||
|
6944c4a7c4 | ||
|
2735484fae | ||
|
03b0d5e250 | ||
|
629812337b | ||
|
e54cc8850c | ||
|
7cba51ca7d | ||
|
3dc145fe68 | ||
|
7d560df9fd | ||
|
b3f894e480 | ||
|
235cc5dc05 | ||
|
c276053301 | ||
|
2e85e29ef1 | ||
|
1169a02c8b | ||
|
a7cea4082e | ||
|
7e6ea97499 | ||
|
3c46cc4fdd | ||
|
6e5c7a1927 | ||
|
4e09b35012 | ||
|
16a2023bbd | ||
|
99fc7178c1 | ||
|
d4aca89a48 | ||
|
2918d8c7b4 | ||
|
407c570f8b | ||
|
e299a9c159 | ||
|
cc4a578578 | ||
|
0e4f1eae5b | ||
|
eccf0e6234 | ||
|
a3da041412 | ||
|
2f1617eee4 | ||
|
05124d41ae | ||
|
42fd1c962e | ||
|
47e432b4bb | ||
|
61c99abcf1 | ||
|
28fdd62945 | ||
|
3855db6c66 | ||
|
30acde0afc | ||
|
2d9c5742c7 | ||
|
43e50f7f04 | ||
|
888e9918a6 | ||
|
9e9a64d989 | ||
|
7acaecaed2 | ||
|
24eb189b7f | ||
|
2344aca146 | ||
|
758f9deafe | ||
|
7b425eb2ac | ||
|
30e8728f7f | ||
|
3989eef84b | ||
|
dc6f8c4fc4 | ||
|
2df8a1d99d | ||
|
4ea858fdd3 | ||
|
006391dd26 | ||
|
4a0bf8a702 | ||
|
d0e715feb9 | ||
|
fd73412f12 | ||
|
3819552861 | ||
|
ca6fd5b7b9 | ||
|
b8867cd18c | ||
|
8209eafc6b | ||
|
858e72a555 | ||
|
d3880fffa0 | ||
|
0a51898722 | ||
|
63cef81015 | ||
|
9279865078 | ||
|
fba7fc7aba | ||
|
a3d9d5bce7 | ||
|
23ecbc8ebe | ||
|
42b2dbd92e | ||
|
37eb55375a | ||
|
94bf357817 | ||
|
eca69391ef | ||
|
d0c5b32a90 | ||
|
84ef52cf4d | ||
|
8fb14bf713 | ||
|
16eb50a291 | ||
|
dd503fbb82 | ||
|
ae79314869 | ||
|
0cbc514a8e | ||
|
5777f3e15c | ||
|
8cdcd770c0 | ||
|
2d20458bc2 | ||
|
2bd2088248 | ||
|
5818270803 | ||
|
79a5f0e375 | ||
|
c830784f65 | ||
|
3fc538104d | ||
|
96490fdb15 | ||
|
5a0c225c6f | ||
|
c3e524cb8b | ||
|
9faf6e46ca | ||
|
e89acac235 | ||
|
200761ff13 | ||
|
cb78e95e3d | ||
|
f01cf98d62 | ||
|
c9c2495611 | ||
|
aac72fa512 | ||
|
c5e2f19dde | ||
|
34bd9e5cb4 | ||
|
ad489ed606 | ||
|
bb541901d9 | ||
|
ca4ba19a5e | ||
|
f05943ff79 | ||
|
7ded8a1333 | ||
|
c2478d4add | ||
|
f69747bc89 | ||
|
441cc950aa | ||
|
a76a0ac8c4 | ||
|
8b1009161a | ||
|
868a620e91 | ||
|
a0e34b0bc8 | ||
|
612dbcb2f3 | ||
|
b3614d965d | ||
|
5d7137255e | ||
|
6ff867ef55 | ||
|
c14915df29 | ||
|
7d4966e2ae | ||
|
3876e0317d | ||
|
0b2b10f759 | ||
|
9a79b33664 | ||
|
af1a6edd15 | ||
|
b78929f4d5 | ||
|
fb6e342043 | ||
|
0faa2d35da | ||
|
511e57c231 | ||
|
d762d675c4 | ||
|
3fdadee87c | ||
|
1aa4d9d585 | ||
|
8019f4bdb3 | ||
|
ca65c1ebc5 | ||
|
f0e47aae86 | ||
|
dc7cd545ba | ||
|
76bd59d82e | ||
|
461687ffb4 | ||
|
dd5b9ca81b | ||
|
89ed04f8a7 | ||
|
ec0d9f06c5 | ||
|
03b59ac6fc | ||
|
43ac3336d7 | ||
|
d12c78db74 | ||
|
bfaf1b0957 | ||
|
bb60c385d5 | ||
|
c96d1d9c32 | ||
|
7c7a0d4bdf | ||
|
cc829a7bf4 | ||
|
e0ea6383e2 | ||
|
bcec5dc2ae | ||
|
cba9c16a06 | ||
|
dd68fb077b | ||
|
c2294e97db | ||
|
c0f512ace7 | ||
|
3305eb67c6 | ||
|
c9d637b4da | ||
|
ae3e8fadf5 | ||
|
a1abd94387 | ||
|
9b463a8cab | ||
|
babc54a240 | ||
|
5836a93b21 | ||
|
557348e345 | ||
|
9adfec6b00 | ||
|
3a496902f8 | ||
|
b5ead91746 | ||
|
302461b446 | ||
|
ac201c718e | ||
|
f78e3825ca | ||
|
0618053bd4 | ||
|
8e6fa3490c | ||
|
8a1a1a4000 | ||
|
fd9dcbf9a8 | ||
|
beb8583436 | ||
|
b44e2c0b38 | ||
|
06e94640b5 | ||
|
ff36bdc802 | ||
|
46f576de46 | ||
|
7b09c34fce | ||
|
a22f50aa84 | ||
|
2d9130b4e0 | ||
|
470ee72462 | ||
|
add147b409 | ||
|
371df6e6c2 | ||
|
7ed5fe8f66 | ||
|
a6ca7a6f38 | ||
|
1c857b8dd8 | ||
|
87ff3f95ff | ||
|
5cb4c06d0c | ||
|
e7d9079389 | ||
|
9cdcff0e1e | ||
|
a4dce8cf9f | ||
|
aaa11c02bf | ||
|
d2ebbf5db6 | ||
|
e6efc1ad4a | ||
|
a8523996a9 | ||
|
f586de2bbe | ||
|
7df02303b2 | ||
|
f89c75e642 | ||
|
d2c1961101 | ||
|
2a4c5a48bf | ||
|
5f5f39a4aa | ||
|
df54cc04af | ||
|
0439616480 | ||
|
19fa274227 | ||
|
8076000c27 | ||
|
c80b30f070 | ||
|
486d5c48b0 | ||
|
4822792ee2 | ||
|
569f1d42b1 | ||
|
23c10faff5 | ||
|
1eaa195363 | ||
|
fb57cfa5d8 | ||
|
d33086c8f7 | ||
|
d815a6f02c | ||
|
8216f4a873 | ||
|
e4cc4521d9 | ||
|
6bd9b3744d | ||
|
f741b00768 | ||
|
5eb95d7dd4 | ||
|
e5268f43e7 | ||
|
54d6fb9da4 | ||
|
3d5c9cc1c2 | ||
|
442326f1d8 | ||
|
d66f46e07b | ||
|
757b53443d | ||
|
3436965b33 | ||
|
df71132957 | ||
|
1b322dc404 | ||
|
58341f4ff1 | ||
|
0d3ca80008 | ||
|
63437712cd | ||
|
26d0e87f46 | ||
|
2cad4fa1ce | ||
|
7bb293e5d6 | ||
|
e4777f9314 | ||
|
3508f562a7 | ||
|
1aa66c6038 | ||
|
e7458edb72 | ||
|
7f97013703 | ||
|
9e43060d41 | ||
|
d69486fb6e | ||
|
d4ebfdbc3c | ||
|
e00c3db71a | ||
|
11c3ea0b87 | ||
|
7531401623 | ||
|
e6c1dc251e | ||
|
dca7977051 | ||
|
d19e07d661 | ||
|
751ff6e21f | ||
|
3f6fe995b8 | ||
|
1e00fb369d | ||
|
54b522383a | ||
|
90a7de3b5c | ||
|
3fe1582432 | ||
|
85eddd2100 | ||
|
f5f8775c59 | ||
|
0ca98678f7 | ||
|
a19060c08d | ||
|
fa2ad88cc4 | ||
|
63cbcd0956 | ||
|
d6d0ebf8f4 | ||
|
0d810d92ca | ||
|
1ff914a6f4 | ||
|
5959b1be72 | ||
|
d12a214c05 | ||
|
3a83052f2e | ||
|
510b44ca92 | ||
|
15edb6756d | ||
|
fbfd02b08b | ||
|
b39c26fc86 | ||
|
95b2c8d175 | ||
|
d52748b09f | ||
|
34d18a3a9a | ||
|
3b27d6a9b5 | ||
|
703c391f99 | ||
|
4f1dc29df1 | ||
|
13667df374 | ||
|
8800d6985f | ||
|
364b8f2605 | ||
|
67b9ea9deb | ||
|
b78f2336a7 | ||
|
c7ba637c7d | ||
|
23a5ce3df7 | ||
|
8f88e28e50 | ||
|
9cf6139557 | ||
|
d556065a8b | ||
|
951716f7dc | ||
|
1ddc7ddda3 | ||
|
903ed9f3dc | ||
|
c42b76dcb8 | ||
|
a73582d9ae | ||
|
42c4fc7557 | ||
|
ddbbb6f1dd | ||
|
ff21a92330 | ||
|
07f76f7ad1 | ||
|
c90ccffd7b | ||
|
a00d5f18af | ||
|
1e391d211b | ||
|
d87f9672fa | ||
|
2b5838aa01 | ||
|
e10486d6ec | ||
|
1a74d6604d | ||
|
6d118536b6 | ||
|
ca4d758db9 | ||
|
dc18c26aa4 | ||
|
48505c2968 | ||
|
a98ea1e66a | ||
|
3dec697816 | ||
|
88fd41e597 | ||
|
b05d071a1c | ||
|
a27d3b9689 | ||
|
1facc0cd01 | ||
|
837f91d830 | ||
|
9c5f5aefb0 | ||
|
6b8d4a444b | ||
|
6bef09a3b1 | ||
|
e35319e5a2 | ||
|
0e548b3812 | ||
|
bfac02ccab | ||
|
7ea1a2b361 | ||
|
99df418f1d | ||
|
6416d8ce9c | ||
|
22b43a2b01 | ||
|
05e5d24c5e | ||
|
eabcc30367 | ||
|
f5e0ef5223 | ||
|
f46d9330b0 | ||
|
b62a0b4607 | ||
|
1f044321fb | ||
|
a841d49483 | ||
|
9509acc490 | ||
|
02d356ef12 | ||
|
d3516f299e | ||
|
79630767c2 | ||
|
084a76d075 | ||
|
bc6822e397 | ||
|
43432a9e48 | ||
|
d64a5bc12f | ||
|
b2922d18e2 | ||
|
ccf03fc07b | ||
|
a7c45da10c | ||
|
e03f01e24a | ||
|
0939589557 | ||
|
8167af9b4a | ||
|
4cf76123e5 | ||
|
01ee4b23e6 | ||
|
b198f79214 | ||
|
09db868a28 | ||
|
33e8ef75ff | ||
|
11dcb16b14 | ||
|
86f21da28b | ||
|
89cd6a9aa4 | ||
|
18e1256037 | ||
|
02cf478d91 | ||
|
6ec70192fe | ||
|
8c75098a9a | ||
|
72500f6948 | ||
|
37ec9ab464 | ||
|
82fe2a4c8d | ||
|
aa50e6ee66 | ||
|
91a07cfaee | ||
|
709f5e9a65 | ||
|
b2f9ef21cc | ||
|
be6b72edcd | ||
|
ece2d1e78a | ||
|
1ee1a5f2a1 | ||
|
a567326853 | ||
|
6231861dd6 | ||
|
1ff7b77ee0 | ||
|
9365708bb2 | ||
|
d23a0a8589 | ||
|
701b39b043 | ||
|
58ad1f3876 | ||
|
2138e7ea33 | ||
|
32f8c9e59f | ||
|
57028eab39 | ||
|
3a16edd8a6 | ||
|
165f3bb270 | ||
|
0ba75153f3 | ||
|
db2789990f | ||
|
acaf299bcb | ||
|
1940301824 | ||
|
34576e880d | ||
|
65c0668d40 | ||
|
53bd2bcbfe | ||
|
388724fccb | ||
|
231eabb013 | ||
|
54903fc2ea | ||
|
3a1baf0700 | ||
|
0c0e36b6f8 | ||
|
234c03db09 | ||
|
59db5e7889 | ||
|
28aa7da349 | ||
|
c51e344b87 | ||
|
54461dfa75 | ||
|
2d48e93f74 | ||
|
af22646322 | ||
|
722b42a93e | ||
|
8f9e7f77a7 | ||
|
09bb1ba494 | ||
|
d4137428ff | ||
|
b4d6c4f5b7 | ||
|
ffbe59ece5 | ||
|
fab9c90ccb | ||
|
fb1a774bc4 | ||
|
98bc7d1e0e | ||
|
f7622f24b2 | ||
|
f0a195a6d4 | ||
|
180ba27d84 | ||
|
f944671f86 | ||
|
def2903f7d | ||
|
0273a4e839 | ||
|
f8d2f02c5d | ||
|
25147d8897 | ||
|
0fb6f05fba | ||
|
4e4e899356 | ||
|
5a01dbf269 | ||
|
30b923b283 | ||
|
73ba381d20 | ||
|
1a5912877e | ||
|
813e506b68 | ||
|
077ca987f7 | ||
|
c632a7a6a5 | ||
|
e33e767510 | ||
|
ac82617aa9 | ||
|
a35dfd1fd1 | ||
|
c28aae9913 | ||
|
c26a99e65c | ||
|
ca57dcfc2f | ||
|
df5662dd69 | ||
|
8927a4889e | ||
|
1ac7831f3c | ||
|
292d272a94 | ||
|
a6ee8dc66e | ||
|
496f89f184 | ||
|
7a56eff1ac | ||
|
07e182aa16 | ||
|
7de06aa1e0 | ||
|
3955b64405 | ||
|
2bb55d681d | ||
|
f94e6ac527 | ||
|
b344f17b86 | ||
|
677b8cb633 | ||
|
6f3342e09e | ||
|
a1ddd762e0 | ||
|
68474e4057 | ||
|
a84b9ee396 | ||
|
b9c2ee745a | ||
|
c91a47fcaa | ||
|
615e489d8d | ||
|
c68f9f6f16 | ||
|
229cb85a6a | ||
|
e5c22fa665 | ||
|
8bcfff05d7 | ||
|
6416ee8151 | ||
|
f8eceb48e6 | ||
|
310c483bfa | ||
|
a8f20361aa | ||
|
290be69d99 | ||
|
3b96bd7ea0 | ||
|
dc2f22f5fa | ||
|
821be29f41 | ||
|
52ff1a12ff | ||
|
814699ef11 | ||
|
0c30838b25 | ||
|
cf66c2a1ee | ||
|
2ee419ffca | ||
|
bfb9d696d7 | ||
|
bb2a34dd6b | ||
|
ed652c0c56 | ||
|
1dc961d6eb | ||
|
d119fcfc98 | ||
|
4d3573724a | ||
|
8b37a66075 | ||
|
ba4f32075a | ||
|
218be22576 | ||
|
7688293716 | ||
|
458f8533c4 | ||
|
34502752fc | ||
|
d6758fd823 | ||
|
65700e790e | ||
|
7c34e4bb96 | ||
|
d0d6e3563b | ||
|
a2619f8c78 | ||
|
42d07fd2f0 | ||
|
8bea10960f | ||
|
9cbb19c304 | ||
|
1b94dfd712 | ||
|
9f3604d739 | ||
|
4a1b2be269 | ||
|
962dc1b55b | ||
|
07c86502f6 | ||
|
adb188e5d0 | ||
|
ce031dc6b8 | ||
|
18b5f03247 | ||
|
8a555ecf1c | ||
|
1b325b9acd | ||
|
1bdaddb319 | ||
|
7896e177ef | ||
|
ce8e659008 | ||
|
27be5deeb2 | ||
|
515f270c3a | ||
|
ffff3bd334 | ||
|
f493f13b25 | ||
|
e605c14b13 | ||
|
338488f16d | ||
|
2abc67c3e8 | ||
|
eb1ba143ec | ||
|
6f5bca0f67 | ||
|
407cd8dd4b | ||
|
62a4f0fc04 | ||
|
77cde411f1 | ||
|
3eb9d23108 | ||
|
410d4aeb21 | ||
|
0a28d216fd | ||
|
b69faf6920 | ||
|
efb92ea37a | ||
|
e77f9981df | ||
|
d27c2cc1e9 | ||
|
586b19675e | ||
|
f2907536b4 | ||
|
4aa4e35d1c | ||
|
9a11ac06bf | ||
|
aa3b18f848 | ||
|
103bdc151f | ||
|
6d4c1cd879 | ||
|
cacbe30871 | ||
|
bfeeacb230 | ||
|
04bb7b4919 | ||
|
b7df277a5c | ||
|
c681041b48 | ||
|
923834c784 | ||
|
588edf98be | ||
|
28c603ad5f | ||
|
6988a47e02 | ||
|
2c8ceb1217 | ||
|
ccac4ffa24 | ||
|
4258cef9bd | ||
|
62cc6dfe76 | ||
|
9f224a971b | ||
|
cf5dba9157 | ||
|
23035b9aa0 | ||
|
84908ec8ec | ||
|
dade49743b | ||
|
f29bf35c2a | ||
|
dfa6701c43 | ||
|
763ca69a73 | ||
|
6bf3b152bf | ||
|
aa19f85996 | ||
|
156d89567e | ||
|
ecc71baf61 | ||
|
90c743d963 | ||
|
b926293fa7 | ||
|
71a19191f8 | ||
|
38a0f20a33 | ||
|
c35192108c | ||
|
245b564f13 | ||
|
0d8d1ea4f3 | ||
|
27a427a363 | ||
|
2ff028a694 | ||
|
c211338218 | ||
|
8ac89af8bd | ||
|
bbbaf59591 | ||
|
169419896f | ||
|
0543dca502 | ||
|
cc6011d57a | ||
|
fc4407ef7e | ||
|
03735a125f | ||
|
5baeda9ff1 | ||
|
9b9794b5e0 | ||
|
0697d60a48 | ||
|
cfe6c82a31 | ||
|
3e30228d95 | ||
|
7264b53e5f | ||
|
60836d8523 | ||
|
ef89c2e47a | ||
|
2d9e3e1847 | ||
|
30136a9697 | ||
|
db7ccd66d3 | ||
|
cfe6483102 | ||
|
561566e723 | ||
|
c2dcc4c898 | ||
|
d09bfdc4ff | ||
|
358ef4536f | ||
|
5061a35e66 | ||
|
cd9a1e8c9e | ||
|
646902e75e | ||
|
40d26cb868 | ||
|
b64aa51c0c | ||
|
8206441834 | ||
|
d713783736 | ||
|
57dffaa2ce | ||
|
9e81dd2360 | ||
|
e2798969d7 | ||
|
1c31ec66f2 | ||
|
241f9fc7b0 | ||
|
270192486a | ||
|
a799503c97 | ||
|
9685928087 | ||
|
0e4b2fad99 | ||
|
3c4571a4e0 | ||
|
046147eb1d | ||
|
7834520e54 | ||
|
8e5b4d4b6f | ||
|
4544a074d9 | ||
|
9b78501392 | ||
|
f59ddcc88d | ||
|
a4955a2b79 | ||
|
92ae1a565b | ||
|
15a56ca25e | ||
|
9dcaa829ea | ||
|
9f65799a3d | ||
|
886587848b | ||
|
a97fc6dba8 | ||
|
c124e88d12 | ||
|
17f3870296 | ||
|
4626d42d08 | ||
|
e1e760055c | ||
|
45bf6c3bf3 | ||
|
fef0cc764d | ||
|
72049afcf6 | ||
|
d26c06dbf3 | ||
|
268decd655 | ||
|
7ae246c839 | ||
|
c7c454e4fb | ||
|
8e27297a81 | ||
|
2cdec72985 | ||
|
0085ac534d | ||
|
7828a79a96 | ||
|
5576c21e67 | ||
|
e49cfb1d2b | ||
|
1e541d0225 | ||
|
0974afd26d | ||
|
8d93594771 | ||
|
1136ac70e8 | ||
|
dc8d5a39ea | ||
|
8329e649b0 | ||
|
66da8b164f | ||
|
ea48577864 | ||
|
597146b136 | ||
|
30dd0c1e11 | ||
|
88772c4266 | ||
|
dc1d9e1c84 | ||
|
69ea65835d | ||
|
d5bae3a8c6 | ||
|
f14010bd5b | ||
|
87094fc83f | ||
|
7c179cfeab | ||
|
7582c221d1 | ||
|
c109895848 | ||
|
eccedada40 | ||
|
25d54accf8 | ||
|
d07685f0e9 | ||
|
2445c00c7e | ||
|
4c1d3ef514 | ||
|
4614c7d4c2 | ||
|
bbf1ef0dc3 | ||
|
3433c9e708 | ||
|
2cd5d75a2e | ||
|
2535b8adef | ||
|
4edab7bb7f | ||
|
fd8658e317 | ||
|
51d21d8c86 | ||
|
b4c3307cdf | ||
|
4e8d10cb44 | ||
|
e96875a425 | ||
|
5ab0035348 | ||
|
4ddff96b1e | ||
|
a08d84c1df | ||
|
21c71bfac1 | ||
|
6baaed3581 | ||
|
152dbfd5d1 | ||
|
a56d14086b | ||
|
aee87693f8 | ||
|
976b4affd9 | ||
|
e222b6ad9c | ||
|
19b17374e8 | ||
|
43989122bb | ||
|
72712d6047 | ||
|
0b52d2cc15 | ||
|
8304102136 | ||
|
3381aefcfa | ||
|
279a365cb1 | ||
|
2c9e00da56 | ||
|
f7cae69704 | ||
|
b7d58bcdbc | ||
|
13a856b843 | ||
|
8da38985c3 | ||
|
60cf6c6b97 | ||
|
35c2b34564 | ||
|
ef2e048efc | ||
|
6b3261aa33 | ||
|
1849c02cb6 | ||
|
1ec74a89e2 | ||
|
c591792de9 | ||
|
3108543ae5 | ||
|
1eb221c743 | ||
|
bebf6bc2e7 | ||
|
9e91cc2138 | ||
|
c5b939cfb7 | ||
|
5bd411ca27 | ||
|
a533cda6f0 | ||
|
fe4b07b8ae | ||
|
f9f2ccd904 | ||
|
d9e87d7c32 | ||
|
a0092c0770 | ||
|
3100131125 | ||
|
988880cf83 | ||
|
c3fb9672c4 | ||
|
0a2d94e425 | ||
|
8d9073cd31 | ||
|
d075961ffa | ||
|
7a72409b61 | ||
|
34fc530fba | ||
|
f257ff2f97 | ||
|
7ad5822c5b | ||
|
9a8f9f0a94 | ||
|
6421cecafb | ||
|
be544d6d89 | ||
|
3c89ecafdd | ||
|
35ec4eec52 | ||
|
e47f737a2f | ||
|
ac671a065b | ||
|
74116cc550 | ||
|
406070a5c3 | ||
|
0ccafd5b53 | ||
|
940f517aa3 | ||
|
216e5f65ad | ||
|
a74685d66d | ||
|
b7791d2845 | ||
|
d151a82d78 | ||
|
8ce61fbd52 | ||
|
90c24aade3 | ||
|
6b3f787fee | ||
|
4ebe4ce1b7 | ||
|
8c79740ee8 | ||
|
59d027ca02 | ||
|
37a7345a90 | ||
|
c519d4651b | ||
|
9b3b609e40 | ||
|
6254f53716 | ||
|
f05dc46432 | ||
|
3de0982a4a | ||
|
c2184fb3bf | ||
|
919c09fcb0 | ||
|
1d9dbd40ec | ||
|
0cd953a6f3 | ||
|
4db2b72351 | ||
|
dd54fcbdbd | ||
|
3123cf7ac6 | ||
|
6b579dd4ce | ||
|
16dfaa3e27 | ||
|
d7842b9f84 | ||
|
115034fccb | ||
|
309e957a85 | ||
|
d7007e402e | ||
|
91323a21cf | ||
|
fea893d76c | ||
|
761bc6ba4c | ||
|
75172feb4e | ||
|
3285fb1608 | ||
|
03a4c6910d | ||
|
485b958599 | ||
|
da47ba2f67 | ||
|
c39195488a | ||
|
227fb0ae9b | ||
|
b12ff5b503 | ||
|
0946c72b88 | ||
|
7d49b046d4 | ||
|
5f0426c840 | ||
|
73e239cc5f | ||
|
ad670f721a | ||
|
028a4a70cf | ||
|
77d7960347 | ||
|
39821146bd | ||
|
7d505a41ac | ||
|
e457b2f0d6 | ||
|
c9cf7fd4d4 | ||
|
b0371dd33d | ||
|
25e16c3565 | ||
|
7b39527863 | ||
|
d861b08866 | ||
|
fb438dc108 | ||
|
4e6b4f179b | ||
|
00d038c8f3 | ||
|
a9f6a68952 | ||
|
b9142bbc5a | ||
|
6c812f663e | ||
|
a93ec9783a | ||
|
2d184d77b6 | ||
|
bce299ccc7 | ||
|
235cebd14a | ||
|
a638aa9d53 | ||
|
67cce0ef7e | ||
|
82f4267bf6 | ||
|
45a9ca29c4 | ||
|
7f4e813277 | ||
|
3805ff4a0c | ||
|
464cfd475e | ||
|
fe469ae57f | ||
|
550ef9a1c4 | ||
|
935adfb51a | ||
|
3974df4a62 | ||
|
4870974161 | ||
|
8c4b0037f5 | ||
|
2c6f763ef2 | ||
|
ca28de02d8 | ||
|
bfc15ea029 | ||
|
6e8b8a5920 | ||
|
099f3b6a62 | ||
|
142d182bc1 | ||
|
1437871d88 | ||
|
352bf69409 | ||
|
9bdf3d23e1 | ||
|
be8ecfa707 | ||
|
51da0d0259 | ||
|
f55b78a994 | ||
|
e1a44c93f8 | ||
|
07e7087a09 | ||
|
2c79c7e2f6 | ||
|
09f6637fe0 | ||
|
3784db3308 | ||
|
2b950ff5dd | ||
|
09339c9cfb | ||
|
ccadd88af5 | ||
|
cc02a0efc2 | ||
|
43a1385b79 | ||
|
5101464e3b | ||
|
3d71478d38 | ||
|
4989ed445e | ||
|
d9413039ec | ||
|
eba0c9be34 | ||
|
48c9e9f3cc | ||
|
81ebde88db | ||
|
79ced9d0f8 | ||
|
a4058b84ce | ||
|
7bf211a52b | ||
|
d5f722792f | ||
|
0f02906c9b | ||
|
9582e228b1 | ||
|
45f20431f9 | ||
|
7554e6d7f9 | ||
|
cb8f26f177 | ||
|
b5dfce7861 | ||
|
2ca5a65544 | ||
|
17deb136db | ||
|
8c9710c76c | ||
|
32f7ecb261 | ||
|
fb77fde710 | ||
|
3c67bb90d7 | ||
|
dabb168853 | ||
|
45e5b3b219 | ||
|
a6b7469923 | ||
|
cb5dab3033 | ||
|
21d0038ff2 | ||
|
c094d8f2e8 | ||
|
c465d6a6c2 | ||
|
73d35bc985 | ||
|
2a8ccb065b | ||
|
8f04a50ce1 | ||
|
888aa5586b | ||
|
99f56f5d22 | ||
|
ad6281090d | ||
|
f0d334d3e2 | ||
|
5f829b048f | ||
|
1a961e66ff | ||
|
fdb0e22656 | ||
|
132ee1915f | ||
|
44bf4f3c8f | ||
|
6237767d5a | ||
|
dec9d96417 | ||
|
b167c87267 | ||
|
2280fe8e8e | ||
|
575d6dcd2d | ||
|
f729490c6b | ||
|
b32124cdd6 | ||
|
3d4321ee38 | ||
|
85034b382e | ||
|
77a51d1ad4 | ||
|
33e0cdc2d7 | ||
|
6519faa2fe | ||
|
5e3a234cbe | ||
|
e54c31d2d5 | ||
|
66c0537251 | ||
|
ac58516593 | ||
|
c3da6322b5 | ||
|
3d241500cf | ||
|
ded8224f66 | ||
|
f8814881a1 | ||
|
cc2852cd48 | ||
|
467637a9eb | ||
|
3cfc292d84 | ||
|
6acf94a810 | ||
|
31367fb4c4 | ||
|
12d6074e3b | ||
|
ff30386051 | ||
|
601f99ac16 | ||
|
87fe5c6101 | ||
|
68399ca31c | ||
|
2a6d7fd80f | ||
|
4725f510d8 | ||
|
be0ba22222 | ||
|
c8781392be | ||
|
b97164fcfb | ||
|
0dfb92281b | ||
|
4fe80c40da | ||
|
f0fac5115a | ||
|
46dd389d0d | ||
|
1e28e21ab5 | ||
|
7832c62c5d | ||
|
d025ee9dbe | ||
|
a9a9cb4319 | ||
|
aa727cb9b1 | ||
|
b8c9a99f20 | ||
|
aff995b0d0 | ||
|
2cc7e5dfdc | ||
|
5235a150b1 | ||
|
c6372ea9de | ||
|
7df4cc44c4 | ||
|
d47cf40544 | ||
|
7f5d88e95c | ||
|
d09663c066 | ||
|
ef97c9b69f | ||
|
d855e6c8b1 | ||
|
cd66f7eb43 | ||
|
6a35a7ba4c | ||
|
a3e146dc68 | ||
|
b81305a4a9 | ||
|
73884b34bc | ||
|
6166a34db2 | ||
|
6fa7da4b1c | ||
|
c3e426c491 | ||
|
21e023f0db | ||
|
063be001b3 | ||
|
5dff02e8bc | ||
|
60a59407d8 | ||
|
20a5aecfca | ||
|
c2e7b5a67d | ||
|
8f32303d07 | ||
|
891b1e7782 | ||
|
f26394fd3b | ||
|
4d83d42b4c | ||
|
57f1108df2 | ||
|
2641a9abe5 | ||
|
6b193ab350 | ||
|
b1bb37511c | ||
|
319187d6d6 | ||
|
02eb789f84 | ||
|
5a9338a27f | ||
|
eb6924277f | ||
|
325419404d | ||
|
bd8f371fd5 | ||
|
1783ff2845 | ||
|
d388527ffa | ||
|
19494088bd | ||
|
920dad524a | ||
|
ec89bcac8e | ||
|
a916c1f4ad | ||
|
a9a0ac92d7 | ||
|
da8a8bd1ef | ||
|
d9c746891d | ||
|
67817005b5 | ||
|
24d11de5a7 | ||
|
9251c87323 | ||
|
e12fab90d1 | ||
|
0a194b5b01 | ||
|
8d028adc53 | ||
|
dfca15395e | ||
|
e21f2362fe | ||
|
1ce328e8a9 | ||
|
038a5f999f | ||
|
5d3704c7ea | ||
|
87037c06c9 | ||
|
dd412c0f50 | ||
|
bf44befff6 | ||
|
e61874bb6f | ||
|
1e5331768f | ||
|
ec9a3a4f7c | ||
|
e439a3a8dc | ||
|
19f70d7a11 | ||
|
afe7ed5b05 | ||
|
d4bf004d74 | ||
|
e4d06a088b | ||
|
0929088b12 | ||
|
7b4838fc9b | ||
|
0cf9533248 | ||
|
84ff0b8a9f | ||
|
d467dcfeaf | ||
|
8e68ba4751 | ||
|
0f2a85ba9f | ||
|
7674a0a91e | ||
|
5bc1a66572 | ||
|
9b56067213 | ||
|
9a9df2fc3c | ||
|
9989d8d1d4 | ||
|
f9471f297e | ||
|
146b693e4a | ||
|
7295b7e329 | ||
|
e2441ea3e7 | ||
|
119e51912e | ||
|
dd950f5b0d | ||
|
78a9bad1e1 | ||
|
0c6eaf5484 | ||
|
1010068ddb | ||
|
82eec3d8d7 | ||
|
ee7b37d3f3 | ||
|
143d82d242 | ||
|
8b91b38855 | ||
|
1098f0d2a3 | ||
|
ab53cec022 | ||
|
6f5f8e5648 | ||
|
edfd707c22 | ||
|
1870f30af8 | ||
|
90106f5f08 | ||
|
9924b7b438 | ||
|
aa37faab0a | ||
|
dc10f8ce72 | ||
|
996686c1da | ||
|
488785d013 | ||
|
3abdc01230 | ||
|
8da04a584f | ||
|
27cc61d45e | ||
|
7371c30064 | ||
|
140d163895 | ||
|
dc33bdc1dc | ||
|
74df4fab83 | ||
|
1e5cd3d7a1 | ||
|
a54e9b64aa | ||
|
74660704e3 | ||
|
7439893a2a | ||
|
e27e49e9dc | ||
|
34ed729c59 | ||
|
adaeeca3fd | ||
|
dac75563d3 | ||
|
cbc76adcaa | ||
|
69a9cb383d | ||
|
4343073c00 | ||
|
fe60d4be88 | ||
|
ae337807f5 | ||
|
9ae30ac08e | ||
|
62fa85c0a4 | ||
|
7bb873dad9 | ||
|
5f6c1c14cb | ||
|
d43189ad33 | ||
|
fcad76fc51 | ||
|
97e6e1684e | ||
|
67a0d3e926 | ||
|
183fb9f9ff | ||
|
9815ddef1f | ||
|
f6d0847453 | ||
|
b0b9f0d65f | ||
|
0cec80f676 | ||
|
48c64143e3 | ||
|
a8712422bc | ||
|
97f65bd283 | ||
|
fd3c1c50f1 | ||
|
b153e4bb9f | ||
|
db9856a8db | ||
|
75ecea265d | ||
|
be8751cb73 | ||
|
fb25ecb4a1 | ||
|
f1cb7d27ac | ||
|
dee494e12f | ||
|
b13a121915 | ||
|
7486ee9537 | ||
|
4a20ccc28e | ||
|
f80dd2b307 | ||
|
b208cf6d32 | ||
|
39e78ff17e | ||
|
a8177ea7fe | ||
|
bedcfc154b | ||
|
4c38b4aa3c | ||
|
f6cfe266e0 | ||
|
4905e65f14 | ||
|
ccb250b410 | ||
|
aca57ffc62 | ||
|
7f375f42d8 | ||
|
eedcc2034d | ||
|
24c9a167d7 | ||
|
909df8ef1f | ||
|
3b27cb3671 | ||
|
3fe0db4a7d | ||
|
8b55814ab2 | ||
|
575e471553 | ||
|
0f5f1aebed | ||
|
50e17eb1ab | ||
|
158cc2f660 | ||
|
1066a31acd | ||
|
1f9d0f4582 | ||
|
a6d65233f1 | ||
|
eff2fe7a1b | ||
|
20efdc70b3 | ||
|
f0d8fb8f1a | ||
|
f7a380e9b7 | ||
|
e9c7cf6f63 | ||
|
68f1661452 | ||
|
36fd1b91ae | ||
|
a4ec430ac0 | ||
|
519614b2fd | ||
|
bf0118c8ef | ||
|
a4db0820bc | ||
|
ee7528413e | ||
|
7952fc8324 | ||
|
652773d2cf | ||
|
2a17787242 | ||
|
0a53ad5721 | ||
|
6da6bdc863 | ||
|
42ad2bb83f | ||
|
f309a65cb4 | ||
|
77e19ab1a4 | ||
|
1a996b6ef3 | ||
|
b882f1a010 | ||
|
82a030e6ff | ||
|
0758b85179 | ||
|
ab3d9bd080 | ||
|
8ff813f689 | ||
|
88ae72c0d3 | ||
|
312aa4be26 | ||
|
cbb06fce9d | ||
|
f259e497c4 | ||
|
dd4172ac66 | ||
|
66029e60d3 | ||
|
364f484f04 | ||
|
9dd5159414 | ||
|
13e38d6fd8 | ||
|
10dcb64715 | ||
|
7551b51e7d | ||
|
adb418aafc | ||
|
270da80d64 | ||
|
b2027cfd66 | ||
|
7f1f4eeac6 | ||
|
7a7446c8bd | ||
|
ddbae294e6 | ||
|
8c71b744f3 | ||
|
479b5d31a9 | ||
|
4cbf4230e8 | ||
|
6a610187e0 | ||
|
eb2a4aebba | ||
|
21a2e67755 | ||
|
3b9e312615 | ||
|
26dab04c9e | ||
|
00713c0d11 | ||
|
751b5f3027 | ||
|
e8261b000e | ||
|
41ecb70297 | ||
|
09ee104b8c | ||
|
e3a4964787 | ||
|
9bf72910a4 | ||
|
ee39e20e6d | ||
|
399d6db6f6 | ||
|
0821ce44b5 | ||
|
ea279111c6 | ||
|
674ce02e58 | ||
|
8dfa2767ec | ||
|
20dad7f07f | ||
|
751cc4c44d | ||
|
2318e6d8e9 | ||
|
61b4a492c3 | ||
|
9db3d01e09 | ||
|
8da73ad3dd | ||
|
b8c16d8ac5 | ||
|
429c0951f3 | ||
|
74e103c791 | ||
|
f941950ee2 | ||
|
846df2eef1 | ||
|
34ed058c97 | ||
|
eae0290978 | ||
|
561368570e | ||
|
3467d1fed0 | ||
|
d02ff232e5 | ||
|
2d1c6a5402 | ||
|
eab3b65629 | ||
|
20b435732a | ||
|
929617273d | ||
|
2717bf7d49 | ||
|
5cd2ebc960 | ||
|
9b4afe9816 | ||
|
23bb5598d5 | ||
|
af1d7813e9 | ||
|
16c2e5a585 | ||
|
c02750edbd | ||
|
7204ddafec | ||
|
faeba9a7e4 | ||
|
190d238a1f | ||
|
715451b5fb | ||
|
87f1895405 | ||
|
923d817751 | ||
|
0728209b66 | ||
|
b8b9dcc2ee | ||
|
f35e879852 | ||
|
34f4f12eb9 | ||
|
fa63bf758d | ||
|
f6b396ae64 | ||
|
2c7fd58e34 | ||
|
982f2c9634 | ||
|
f2fd42b47a | ||
|
1b4ccad938 | ||
|
a9de1ce8e0 | ||
|
ac752d5ec2 | ||
|
632d8d02d2 | ||
|
48aeb26e02 | ||
|
1694af8b5e | ||
|
83bcab9cd2 | ||
|
bdc7f4b3f5 | ||
|
39202a3d79 | ||
|
912065a121 | ||
|
c8466afac2 | ||
|
2619e162c1 | ||
|
e1112e17f8 | ||
|
92b2ead74c | ||
|
bbed9b94c1 | ||
|
73d07311db | ||
|
1cdff47477 | ||
|
511a5c3f82 | ||
|
853885e2ff | ||
|
d83936a66a | ||
|
5517d2bf56 | ||
|
f21ab49ac5 | ||
|
925a458abe | ||
|
76946c447f | ||
|
2faa29b1c4 | ||
|
6826cc311d | ||
|
5e17ce0a0b | ||
|
e8d299d3b6 | ||
|
7637aa2ab6 | ||
|
ab067d1d3a | ||
|
e6f84666c7 | ||
|
4c5429af15 | ||
|
0a0ac3b7c9 | ||
|
24833ce9fb | ||
|
ec2c18dc87 | ||
|
7384609e74 | ||
|
3047649650 | ||
|
d298dac3f3 | ||
|
1574bca8a8 | ||
|
ec2f6c6b80 | ||
|
838cc60161 | ||
|
310c61a5cc | ||
|
318cc15323 | ||
|
3a64ceb4d6 | ||
|
d0f21c0095 | ||
|
46dc15dd29 | ||
|
8dc654b513 | ||
|
7000ac3f3f | ||
|
43c2e8d8e9 | ||
|
0231139b01 | ||
|
d6ee6446dd | ||
|
7b666efcf8 | ||
|
eb5d2198fc | ||
|
34e44ebd1c | ||
|
bf2f4bc040 | ||
|
9dc4559aba | ||
|
eba8856261 | ||
|
a8c5aa471a | ||
|
52f6dcf092 | ||
|
dec79f3742 | ||
|
8bdcac0f3e | ||
|
8426b674a3 | ||
|
da391bcc8d | ||
|
2d7443d454 | ||
|
991987ed76 | ||
|
ec24ebf2cf | ||
|
6ba0976085 | ||
|
2b88d01a01 | ||
|
0c09f24cbf | ||
|
61d22afeba | ||
|
9f1ed6e8c3 | ||
|
bbc4113cac | ||
|
91194bf422 | ||
|
9c5f940b00 | ||
|
455b4043b8 | ||
|
bd83ee7931 | ||
|
f6bdf7c09a | ||
|
2db8afb8c2 | ||
|
e033129dd3 | ||
|
f9dc590100 | ||
|
8996aafe0d | ||
|
9dc25ef7af | ||
|
2b84f4d407 | ||
|
097c8b674c | ||
|
ba649d4b94 | ||
|
6ed1614db0 | ||
|
df5b6a8380 | ||
|
1f82a8b99e | ||
|
0c95d96f32 | ||
|
c2f5f84118 | ||
|
b3b5e3d8f0 | ||
|
506d3f3cd9 | ||
|
516a8c5ee5 | ||
|
2f81e9d374 | ||
|
2d8703bb8d | ||
|
76e60d9bc3 | ||
|
9d5370be5f | ||
|
fc1a06bc45 | ||
|
fce80374f4 | ||
|
420c9f10c2 | ||
|
5a39681a2e | ||
|
7a1b7db7c8 | ||
|
03a643da52 | ||
|
383f0ce450 | ||
|
c6c668676c | ||
|
7b01dde063 | ||
|
8c25f65024 | ||
|
1e478e3545 | ||
|
6c75a8978b | ||
|
640267fc2b | ||
|
33c7c3ee12 | ||
|
f9b41d34ae | ||
|
d4bec79451 | ||
|
ac1a8b4daf | ||
|
28838c1759 | ||
|
50ecb0dac9 | ||
|
e22bc01cbd | ||
|
6c28713a4c | ||
|
fc9023386c | ||
|
e6cae9bcc3 | ||
|
a9eeca1302 | ||
|
8c695e42ca | ||
|
0aa7fd47d5 | ||
|
70596042d6 | ||
|
caf616234b | ||
|
375187aa70 | ||
|
71eccdc0e3 | ||
|
639b1e48f5 | ||
|
0bb4cb4472 | ||
|
cc51543851 | ||
|
22540390e1 | ||
|
98565eb67c | ||
|
cf6a47ecb7 | ||
|
fa60b9f9d3 | ||
|
644120ca31 | ||
|
a50a625b3b | ||
|
7297c13331 | ||
|
f73399bfac | ||
|
a056cd78f7 | ||
|
a30f3c86c2 | ||
|
e70bdd86a7 | ||
|
bc9f33c2e0 | ||
|
872b89ee93 | ||
|
ae53062518 | ||
|
17f76c9cb3 | ||
|
5de944146a | ||
|
9dc6092cb0 | ||
|
a32a2ef04e | ||
|
ecfa0ae3da | ||
|
03595052ce | ||
|
1f94c53dd2 | ||
|
9c426373f2 | ||
|
c03e30a01f | ||
|
07f7a77ac0 | ||
|
3b9ea2c9a4 | ||
|
1beb13dd80 | ||
|
ddae84abb3 | ||
|
863b9a2c98 | ||
|
9d44bbdb48 | ||
|
8d93dd5adc | ||
|
48502961cf | ||
|
3c8bec61d3 | ||
|
7296c7df1a | ||
|
f3ee6603de | ||
|
ee0aabda1d | ||
|
08d37a4b0f | ||
|
f975ea99cb | ||
|
f030d41dc7 | ||
|
8d079bfcd1 | ||
|
d39b8654a6 | ||
|
ce7816a968 | ||
|
2ee572e68f | ||
|
4bbd850898 | ||
|
34eae6e608 | ||
|
6a0302fec6 | ||
|
c94cc293c2 | ||
|
cae7792a1e | ||
|
7f6b2fe4f1 | ||
|
eba430bbc0 | ||
|
01280c8d04 | ||
|
ec141ac9c9 | ||
|
590c892a6a | ||
|
ff8a50c366 | ||
|
46ef6c8ab7 | ||
|
b09eabc478 | ||
|
e49fcea6e3 | ||
|
68ed9f4ffc | ||
|
af94687d45 | ||
|
26964ecf0f | ||
|
77d19af359 | ||
|
120b82f243 | ||
|
a0fea30a11 | ||
|
8bb3e0a64d | ||
|
bbded12923 | ||
|
e8ba5d7606 | ||
|
af66b31a44 | ||
|
b000a40f28 | ||
|
3c85322523 | ||
|
a469b8bc04 | ||
|
78b8261a3a | ||
|
f20ca70c01 | ||
|
4e4148fc1c | ||
|
c22482f907 | ||
|
870e139fce | ||
|
4d58648c02 | ||
|
ebbb182537 | ||
|
1cd5377b45 | ||
|
1d1f0527ee | ||
|
37a5f77415 | ||
|
ced3c7efe4 | ||
|
c3b8f366ed | ||
|
de78876b1a | ||
|
64c25b049c | ||
|
8a4fe4f3ad | ||
|
8811b8c1fd | ||
|
190b01fdf9 | ||
|
f145d08c10 | ||
|
53382b7e15 | ||
|
6ad0242617 | ||
|
a7c2408c0a | ||
|
ce1eabaed6 | ||
|
f602541ede | ||
|
3f718e6efc | ||
|
ce7a985df6 | ||
|
6d83f7e7bd | ||
|
b73c00943c | ||
|
abaac8ef48 | ||
|
a2f8e7068e | ||
|
4d47873219 | ||
|
cf985486e5 | ||
|
b930c3fc93 | ||
|
dd26a96828 | ||
|
6865ddfc12 | ||
|
e888e69d4d | ||
|
2089059792 | ||
|
27739e0364 | ||
|
698ee271d6 | ||
|
543c75b293 | ||
|
b09c46f6f7 | ||
|
f2cc19e6aa | ||
|
814a0a123f | ||
|
179383540f | ||
|
8be1c8310d | ||
|
ef02d776ca | ||
|
750ff448ad | ||
|
e3abab6d4d | ||
|
d3ffae72fb | ||
|
87f751188e | ||
|
3469abaefd | ||
|
797364ee5c | ||
|
36c05fc4b9 | ||
|
79624febc0 | ||
|
0a9d4de126 | ||
|
b6f6994db4 | ||
|
ff7bed720a | ||
|
00c0f48b02 | ||
|
94c45cf2a0 | ||
|
58f77b2a1c | ||
|
7170e69b22 | ||
|
239ee2437c | ||
|
ced368db31 | ||
|
6a991e5c15 | ||
|
523d22262b | ||
|
08197a327e | ||
|
b7cb2a7aa5 | ||
|
decc5c74ef | ||
|
fbe0f886b6 | ||
|
7e23d6e2ef | ||
|
49458d1085 | ||
|
289c12bd9a | ||
|
9a6326b027 | ||
|
51f573f1ea | ||
|
c8b7cd8862 | ||
|
21c112d059 | ||
|
9432e1b5b2 | ||
|
8269d2f83c | ||
|
5a4b6be974 | ||
|
35e8ce60a9 | ||
|
8b6dd9f603 | ||
|
084f0ebdab | ||
|
58640c1521 | ||
|
7ffdfd12f8 | ||
|
cb9a30f285 | ||
|
e48bef809f | ||
|
f5d7570102 | ||
|
a600c60cf8 | ||
|
695eabd026 | ||
|
e81b51a647 | ||
|
b7e95ff090 | ||
|
3ca41be686 | ||
|
3152046173 | ||
|
3a98dc8a95 | ||
|
d737b28916 | ||
|
97c0dac876 | ||
|
006494b1fa | ||
|
149d343201 | ||
|
496cc79ba8 | ||
|
3591768745 | ||
|
d615f6761a | ||
|
5b29894048 | ||
|
d17f0cfa40 | ||
|
b2f70c7120 | ||
|
238707bd93 | ||
|
87cdf1e3a0 | ||
|
57fd47022e | ||
|
5eafd3bf6b | ||
|
640b5b0ea9 | ||
|
e4fb2f4680 | ||
|
a4909f54e4 | ||
|
f7065c6f0c | ||
|
64f7f837e7 | ||
|
130d36acd9 | ||
|
e94c28cfa2 | ||
|
a697dec30c | ||
|
bac09e9b9f | ||
|
b179f8f86a | ||
|
962d04ae17 | ||
|
f28e3bfe37 | ||
|
6474c86d32 | ||
|
769ea8cdfe | ||
|
052e77dd5a | ||
|
1a4b0cb12a | ||
|
f9aa95c987 | ||
|
a5d06fb4a4 | ||
|
d5e5d90bdc | ||
|
e62678e4e6 | ||
|
558ac24f7e | ||
|
6f22f6a59f | ||
|
5ec74f8abe | ||
|
16d7547e03 | ||
|
767112dcda | ||
|
81e23d1d8c | ||
|
ace90a4354 | ||
|
267e7096cc | ||
|
e7cded7511 | ||
|
33fbd715c0 | ||
|
25ba5b867c | ||
|
c8f431447c | ||
|
1b83a1d09a | ||
|
d2fb7a7151 | ||
|
7ad3447598 | ||
|
5e5bc8e705 | ||
|
886d1e8a19 | ||
|
6de7a035fa | ||
|
48d2497eb2 | ||
|
6494754ab9 | ||
|
a8153627c6 | ||
|
ed38966edb | ||
|
85551d1e54 | ||
|
ca31363180 | ||
|
a8f11eb3c3 | ||
|
151805121c | ||
|
0422d2a021 | ||
|
8586762dde | ||
|
ffdf70257b | ||
|
6079d60aa1 | ||
|
d46d012f8c | ||
|
baf926b360 | ||
|
82339869dc | ||
|
f70343bb63 | ||
|
4ab56ae3d1 | ||
|
39b4031684 | ||
|
71f8965393 | ||
|
66857e72a4 | ||
|
15eb5d47eb | ||
|
86242139da | ||
|
460bdc4148 | ||
|
1052126522 | ||
|
fdb42ac876 | ||
|
6486f986e8 | ||
|
bc4075e2ed | ||
|
11525c7d0d | ||
|
bb12ae9ce6 | ||
|
66b4ad0c44 | ||
|
4bff2d718e | ||
|
af7a7b4dc6 | ||
|
ec20d9a2a8 | ||
|
b0aee3d335 | ||
|
5e2ddbfd86 | ||
|
f086ebbb8e | ||
|
e5bf6a5bfc | ||
|
76fa86d54b | ||
|
60d89506a5 | ||
|
2893f1eb9e | ||
|
a08cbf412d | ||
|
0bd65356f9 | ||
|
2e2b39455f | ||
|
1f9fbe34e4 | ||
|
506d7ae50e | ||
|
342cb00625 | ||
|
952fc01efd | ||
|
3eebe301fe | ||
|
19c0a81c42 | ||
|
9fc7f9904b | ||
|
b04a516063 | ||
|
241e946d91 | ||
|
e45375dc26 | ||
|
ec8e243323 | ||
|
af0e9368d4 | ||
|
285483b81a | ||
|
73a91d5569 | ||
|
e24c78be32 | ||
|
b11184de68 | ||
|
61d02fc5d7 | ||
|
f0217f6821 | ||
|
09644914a6 | ||
|
b5c24d6a48 | ||
|
bdd2ac2c25 | ||
|
76376f0d33 | ||
|
9749da46ae | ||
|
1c05295e89 | ||
|
4d0f28215a | ||
|
0bba72bc5a | ||
|
87089b8e83 | ||
|
6a58148a89 | ||
|
15091052be | ||
|
5cd7e9a9b8 | ||
|
7cb530c334 | ||
|
5e0324cc91 | ||
|
6293e227ea | ||
|
d6d83a5c76 | ||
|
93fc883b90 | ||
|
dd21803598 | ||
|
af2f2282c2 | ||
|
869a76c9bb | ||
|
44b2964a6a | ||
|
8fddb57e0a | ||
|
c7a5a0cab0 | ||
|
c13aab3ffc | ||
|
95a9a76598 | ||
|
923cfa3d50 | ||
|
2c489168c2 | ||
|
f3292b4d34 | ||
|
9769829b72 | ||
|
36243d15cc | ||
|
f4645f570c | ||
|
61603ccfce | ||
|
a26cfc639c | ||
|
90602931d8 | ||
|
d1b330028c | ||
|
7a6b1930bf | ||
|
c271361552 | ||
|
17789bc814 | ||
|
d7c16e161a | ||
|
e71c17d7e7 | ||
|
17d87eb157 | ||
|
bf11bcc084 | ||
|
bd291109df | ||
|
ac89ba9b8d | ||
|
5ab634e375 | ||
|
bb1978d976 | ||
|
bea94ce8ac | ||
|
9561f93594 | ||
|
d44d5c3304 | ||
|
ec541e2057 | ||
|
8169bf6b97 | ||
|
56c8ad1221 | ||
|
6814f2e38c | ||
|
5f043b9a78 | ||
|
c9092cd1c7 | ||
|
86cc65d894 | ||
|
8ef2647fa9 | ||
|
3ff9e99416 | ||
|
1731046011 | ||
|
c3d96184b6 | ||
|
c255c606a7 | ||
|
cc69faa1fd | ||
|
0f53cd86c8 | ||
|
212c8f188d | ||
|
dec248adec | ||
|
98e6a066f4 | ||
|
c90db54a3d | ||
|
927d156933 | ||
|
c71f91073f | ||
|
ab28387692 | ||
|
c4905d02b9 | ||
|
ee39880fb5 | ||
|
926b3e56b9 | ||
|
e060df5367 | ||
|
a3294d4a0d | ||
|
19ce0ab246 | ||
|
75a1cc0d33 | ||
|
05228529b0 | ||
|
3cbeadfbc3 | ||
|
3d4938c0e2 | ||
|
1b850b8a2b | ||
|
ebd33f1869 | ||
|
ac846b4df3 | ||
|
0d427c9b90 | ||
|
dc6194f862 | ||
|
de50214e1f | ||
|
5ec4a88c35 | ||
|
31c141e757 | ||
|
9d5760d899 | ||
|
7945e1ea3c | ||
|
d94c40e371 | ||
|
31f22122e8 | ||
|
506582aa2b | ||
|
a02b251c9b | ||
|
34cbb6fa79 | ||
|
33679b56fd | ||
|
8897c4d560 | ||
|
f0c18ccbe7 | ||
|
e560d83c51 | ||
|
eca7addc67 | ||
|
263f3ba5c9 | ||
|
f514123ef0 | ||
|
9f461db0d0 | ||
|
7de8670616 | ||
|
8c7908b200 | ||
|
e7722e039f | ||
|
039bc0208a | ||
|
d57900a069 | ||
|
c1153302aa | ||
|
1086a3297f | ||
|
5c613934ca | ||
|
6d7e9092f8 | ||
|
8bb6f328dc | ||
|
7f03b13579 | ||
|
09008ea991 | ||
|
ee234212e6 | ||
|
277b243f52 | ||
|
0973ac753f | ||
|
f41cfbfb97 | ||
|
a70980c81a | ||
|
12a962f656 | ||
|
5936444f3e | ||
|
c7d42f00c6 | ||
|
b1318a9958 | ||
|
d2560d260c | ||
|
f40a61cf9a | ||
|
e0623578bf | ||
|
ead8daaa14 | ||
|
23b4b9e230 | ||
|
947017e334 | ||
|
7fd0d6507f | ||
|
ad1e9ef086 | ||
|
2f1d08e417 | ||
|
55f4eb80ba | ||
|
7c160ff65e | ||
|
fc5d5faaed | ||
|
5394f1763c | ||
|
050b67c9d6 | ||
|
4b87cb45ee | ||
|
f0e1db319c | ||
|
3950715237 | ||
|
ae9ba14b59 | ||
|
ad6c6fbe35 | ||
|
ffa5c20c88 | ||
|
5a6218eeca | ||
|
200531dd96 | ||
|
15a2f048ac | ||
|
d317a4042c | ||
|
c2d717aba5 | ||
|
ef74777df1 | ||
|
f6dbf99fb5 | ||
|
0b2d9e15b4 | ||
|
d14e5e75e8 | ||
|
df11ef34d2 | ||
|
4fc619262c | ||
|
2750eee86d | ||
|
71cb03345b | ||
|
22c8ca6c40 | ||
|
237a8965eb | ||
|
7d872c7863 | ||
|
0d08858dfb | ||
|
5c6d63c93d | ||
|
c8c10d2bb0 | ||
|
f4d3e9ea6a | ||
|
3713d3488d | ||
|
9b46d03c91 | ||
|
fe4e738580 | ||
|
205b0c4263 | ||
|
78eef25f1a | ||
|
c9305fd070 | ||
|
20774280b9 | ||
|
74bd2557af | ||
|
605fc8ecd8 | ||
|
6b745c53dc | ||
|
98c60b38fc | ||
|
be4515ec9a | ||
|
dcb1b64696 | ||
|
1190a60c7a | ||
|
82ecf207c6 | ||
|
84c7f1bf58 | ||
|
5ac453fe07 | ||
|
c2f39d653d | ||
|
748178f536 | ||
|
ffbef69dc7 | ||
|
3757124323 | ||
|
e65f214b3c | ||
|
f14004e56b | ||
|
dadc004dd6 | ||
|
3697d9e1f0 | ||
|
94524f7330 | ||
|
17d8a3e5d6 | ||
|
39996d7612 | ||
|
601ee4320f | ||
|
6e4f6fa92d | ||
|
b7103c29dd | ||
|
e53209e5ae | ||
|
501fbd3114 | ||
|
5626f43e2b | ||
|
0185cb48fb | ||
|
9a49eb06da | ||
|
3fbc012231 | ||
|
3938cf33d8 | ||
|
38b3d0c87c | ||
|
15a2fa6199 | ||
|
e834209d40 | ||
|
9607d21828 | ||
|
6fbbf36143 | ||
|
db25f3282e | ||
|
ceb92838e7 | ||
|
afb98f2cfc | ||
|
ed785fb087 | ||
|
3921180d77 | ||
|
ef36e0311e | ||
|
bf4cbe1204 | ||
|
28eee4da25 | ||
|
933ccf6deb | ||
|
9ac41322e5 | ||
|
c3233e03ef | ||
|
111871bb28 | ||
|
bf5b5f43e3 | ||
|
2ed8ebff09 | ||
|
34eb856d09 | ||
|
2d644bdfb0 | ||
|
741e5cf103 | ||
|
d745c04fe6 | ||
|
3a3c63956a | ||
|
153bdf576a | ||
|
6525ee6510 | ||
|
1780ddd329 | ||
|
85ad972ca8 | ||
|
a90b60799a | ||
|
47e8f74da9 | ||
|
fac28072ab | ||
|
ab77541f36 | ||
|
9eb769f340 | ||
|
1e81d6f848 | ||
|
dc393f4b77 | ||
|
dd1de530c3 | ||
|
74cd887249 | ||
|
b40bb35652 | ||
|
dfe257af86 | ||
|
ebb2a09107 | ||
|
c157909b4e | ||
|
a744239b22 | ||
|
9b4417be87 | ||
|
84f807e278 | ||
|
448635a945 | ||
|
638e3e6b3d | ||
|
bfe711bd42 | ||
|
d5eed91e7f | ||
|
15abf49211 | ||
|
b7eec0586c | ||
|
e639124a69 | ||
|
22bca7a16e | ||
|
6f66b354e5 | ||
|
f98e3320ae | ||
|
73c958222b | ||
|
e43230e46a | ||
|
dd07bc29eb | ||
|
790bbfcc99 | ||
|
994e70d43a | ||
|
7d5e19716a | ||
|
11530c675d | ||
|
1c474352fe | ||
|
3b2d635390 | ||
|
055c532d31 | ||
|
55e08f96a6 | ||
|
3e0a9180bc | ||
|
01ed71b243 | ||
|
595c058517 | ||
|
e04b0a6995 | ||
|
8961848ab1 | ||
|
6fb1c72b7d | ||
|
400d79d6ab | ||
|
ce6018f387 | ||
|
65d9dca917 | ||
|
2232b08351 | ||
|
0732ab6ad5 | ||
|
fb568768c5 | ||
|
7fd56e0add | ||
|
38a3f8cf4c | ||
|
8d8a5b36b6 | ||
|
a70cc7beb1 | ||
|
c5421699b1 | ||
|
d9f809864f | ||
|
bb23f509d7 | ||
|
254e184677 | ||
|
6c07141abd | ||
|
5822fc1d5b | ||
|
2cd7ea257c | ||
|
0a21b72f9c | ||
|
80280f6f4a | ||
|
810a8e76d1 | ||
|
bd3b4906d1 | ||
|
d9c740f014 | ||
|
ec63a18960 | ||
|
fab0618b6b | ||
|
13e57e7aa8 | ||
|
1299c9162c | ||
|
a64f33dbcc | ||
|
836b66e110 | ||
|
802119d789 | ||
|
f94135cadd | ||
|
474c0f980e | ||
|
2cf0f791f2 | ||
|
aec4d1dc60 | ||
|
8d28bb5a66 | ||
|
1d5e553f9c | ||
|
9d79c52d20 | ||
|
1ac03e2f1d | ||
|
801f05f45e | ||
|
345196aa3b | ||
|
a01b52421e | ||
|
86cedfe8b2 | ||
|
709128225b | ||
|
59a5bacb2e | ||
|
3a21df31ee | ||
|
0e3407ec2f | ||
|
fd6609e961 | ||
|
b0b5e045ff | ||
|
f293d7cccc | ||
|
974ed29c36 | ||
|
53eb033034 | ||
|
f2545b98ab | ||
|
1bf51e855d | ||
|
e4da2a695b | ||
|
1a905d9a42 | ||
|
7d9d0c15d3 | ||
|
d34f5c2712 | ||
|
a56dd66c98 | ||
|
3946bc6662 | ||
|
6647dd8f08 | ||
|
9371122bed | ||
|
734cd8ee08 | ||
|
d85d9d05bb | ||
|
1d7b87b3a9 | ||
|
f0cfde36f2 | ||
|
7e220d2741 | ||
|
c4b1351a43 | ||
|
90d2597bc3 | ||
|
c1508c94c2 | ||
|
1a0d805bad | ||
|
1a802469f5 | ||
|
646ae7e2d1 | ||
|
563d76ff9f | ||
|
54d0473e85 | ||
|
ec8a61c3af | ||
|
0315eaff8a | ||
|
b34f44c867 | ||
|
ea7056835f | ||
|
0bb4cdadd9 | ||
|
08f6520557 | ||
|
38b108752e | ||
|
0ee7870bdf | ||
|
37d46ecdb2 | ||
|
2a7911c7d6 | ||
|
fc2d9b4fd2 | ||
|
832020fa81 | ||
|
78606ed4b8 | ||
|
a04ba606e6 | ||
|
ebf2e7ee70 | ||
|
b31881f424 | ||
|
c0a6f6fd08 | ||
|
7e1b1ca730 | ||
|
163d176f0a | ||
|
0f7eab5100 | ||
|
cbf78d474a | ||
|
3615e0de34 | ||
|
e73c081d36 | ||
|
663bf6e3af | ||
|
60194cbafd | ||
|
7d2eb5faf7 | ||
|
5969f3d213 | ||
|
6ddde88389 | ||
|
e9992ed6b4 | ||
|
4db823a3be | ||
|
78732513c8 | ||
|
79557bb878 | ||
|
4094b02ae9 | ||
|
9cc7c118a8 | ||
|
d3dad51c76 | ||
|
5bf35de955 | ||
|
f8928c654b | ||
|
b3903f4ffd | ||
|
46c3f76edc | ||
|
3488408b7a | ||
|
b7b164b84c | ||
|
7c353b7d76 | ||
|
3d9841b61a | ||
|
87e2d2c7b0 | ||
|
ff59619af4 | ||
|
b79f2b86b5 | ||
|
386fd7a459 | ||
|
d4f41901ef | ||
|
f170da3e78 | ||
|
5dc15be98a | ||
|
88c7cfc745 | ||
|
867478697d | ||
|
d27e8cf73a | ||
|
4490caa4f7 | ||
|
1b25f2c531 | ||
|
9d911d1fa0 | ||
|
494feb9f6d | ||
|
2a04943a67 | ||
|
731b29ce91 | ||
|
28fbb70858 | ||
|
44f402c64e | ||
|
c7f391ca44 | ||
|
5ed9c5e168 | ||
|
51b1a86d19 | ||
|
20c46677d0 | ||
|
efb5f232f7 | ||
|
9edf9561b2 | ||
|
5d4eb018ee | ||
|
10fbce056b | ||
|
cbc6d6a572 | ||
|
fb1af9e3d2 | ||
|
c9e410a6f4 | ||
|
c8d72b59c0 | ||
|
cd71d4a2f7 | ||
|
ed0cc59e66 | ||
|
d4f1b48f52 | ||
|
08bec02170 | ||
|
c9f27b83e1 | ||
|
8327585b3b | ||
|
727815d6dd | ||
|
2968f74c6c | ||
|
431499f43f | ||
|
d809283d23 | ||
|
b49275fe6b | ||
|
87c7ce588e | ||
|
26d0a7c742 | ||
|
b01887c7ec | ||
|
745031c020 | ||
|
3ccfc09e01 | ||
|
e8750275c9 | ||
|
f0d7ea4cc6 | ||
|
ea5322af82 | ||
|
0b23f68fb2 | ||
|
1c00129f76 | ||
|
454700af05 | ||
|
d9b482d90a | ||
|
09c2c97069 | ||
|
88faf0ce78 | ||
|
aef83eec31 | ||
|
5bd222c266 | ||
|
d3e7f789b2 | ||
|
87c6e292f1 | ||
|
b597ad0b3f | ||
|
40dd99dea3 | ||
|
6ea2686219 | ||
|
6bca90c4f7 | ||
|
20fa7bd852 | ||
|
6b80119eff | ||
|
1fc5469eb1 | ||
|
bf2769a0c2 | ||
|
325b601bea | ||
|
126642912e | ||
|
57ee16d565 | ||
|
9f0fba063d | ||
|
757fb51415 | ||
|
b1c5655138 | ||
|
4d97dcaa93 | ||
|
96db1b4685 | ||
|
9e21c52d04 | ||
|
5fa801f9f2 | ||
|
0301768b79 | ||
|
4c6dedfa4f | ||
|
6652d55455 | ||
|
3e03dd3e80 | ||
|
84b8a22423 | ||
|
ada03e12fc | ||
|
d66c801350 | ||
|
91846939f6 | ||
|
c85127d76b | ||
|
166bf65e88 | ||
|
f68bdc406f | ||
|
8390482d6c | ||
|
783f454f8e | ||
|
86a67bd245 | ||
|
099b0b65c0 |
206
.github/workflows/main.yml
vendored
Normal file
|
@ -0,0 +1,206 @@
|
||||||
|
name: ci
|
||||||
|
on: ["push", "pull_request", "workflow_dispatch"]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
lint:
|
||||||
|
name: lint
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.9'
|
||||||
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pip
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pip-
|
||||||
|
- run: pip install --user --upgrade pip wheel
|
||||||
|
- run: pip install -e .[lint]
|
||||||
|
- run: make lint
|
||||||
|
|
||||||
|
tests-unit:
|
||||||
|
name: "tests / unit"
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- ubuntu-20.04
|
||||||
|
- macos-latest
|
||||||
|
- windows-latest
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.9'
|
||||||
|
- name: set pip cache dir
|
||||||
|
shell: bash
|
||||||
|
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
|
||||||
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.PIP_CACHE_DIR }}
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pip-
|
||||||
|
- id: os-name
|
||||||
|
uses: ASzc/change-string-case-action@v5
|
||||||
|
with:
|
||||||
|
string: ${{ runner.os }}
|
||||||
|
- run: python -m pip install --user --upgrade pip wheel
|
||||||
|
- if: startsWith(runner.os, 'linux')
|
||||||
|
run: pip install -e .[test]
|
||||||
|
- if: startsWith(runner.os, 'linux')
|
||||||
|
env:
|
||||||
|
HOME: /tmp
|
||||||
|
run: make test-unit-coverage
|
||||||
|
- if: startsWith(runner.os, 'linux') != true
|
||||||
|
run: pip install -e .[test]
|
||||||
|
- if: startsWith(runner.os, 'linux') != true
|
||||||
|
env:
|
||||||
|
HOME: /tmp
|
||||||
|
run: coverage run --source=lbry -m unittest tests/unit/test_conf.py
|
||||||
|
- name: submit coverage report
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
COVERALLS_FLAG_NAME: tests-unit-${{ steps.os-name.outputs.lowercase }}
|
||||||
|
COVERALLS_PARALLEL: true
|
||||||
|
run: |
|
||||||
|
pip install coveralls
|
||||||
|
coveralls --service=github
|
||||||
|
|
||||||
|
tests-integration:
|
||||||
|
name: "tests / integration"
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
test:
|
||||||
|
- datanetwork
|
||||||
|
- blockchain
|
||||||
|
- claims
|
||||||
|
- takeovers
|
||||||
|
- transactions
|
||||||
|
- other
|
||||||
|
steps:
|
||||||
|
- name: Configure sysctl limits
|
||||||
|
run: |
|
||||||
|
sudo swapoff -a
|
||||||
|
sudo sysctl -w vm.swappiness=1
|
||||||
|
sudo sysctl -w fs.file-max=262144
|
||||||
|
sudo sysctl -w vm.max_map_count=262144
|
||||||
|
- name: Runs Elasticsearch
|
||||||
|
uses: elastic/elastic-github-actions/elasticsearch@master
|
||||||
|
with:
|
||||||
|
stack-version: 7.12.1
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.9'
|
||||||
|
- if: matrix.test == 'other'
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y --no-install-recommends ffmpeg
|
||||||
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ./.tox
|
||||||
|
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: txo-integration-${{ matrix.test }}-
|
||||||
|
- run: pip install tox coverage coveralls
|
||||||
|
- if: matrix.test == 'claims'
|
||||||
|
run: rm -rf .tox
|
||||||
|
- run: tox -e ${{ matrix.test }}
|
||||||
|
- name: submit coverage report
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
COVERALLS_FLAG_NAME: tests-integration-${{ matrix.test }}
|
||||||
|
COVERALLS_PARALLEL: true
|
||||||
|
run: |
|
||||||
|
coverage combine tests
|
||||||
|
coveralls --service=github
|
||||||
|
|
||||||
|
|
||||||
|
coverage:
|
||||||
|
needs: ["tests-unit", "tests-integration"]
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- name: finalize coverage report submission
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
pip install coveralls
|
||||||
|
coveralls --service=github --finish
|
||||||
|
|
||||||
|
build:
|
||||||
|
needs: ["lint", "tests-unit", "tests-integration"]
|
||||||
|
name: "build / binary"
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- ubuntu-20.04
|
||||||
|
- macos-latest
|
||||||
|
- windows-latest
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.9'
|
||||||
|
- id: os-name
|
||||||
|
uses: ASzc/change-string-case-action@v5
|
||||||
|
with:
|
||||||
|
string: ${{ runner.os }}
|
||||||
|
- name: set pip cache dir
|
||||||
|
shell: bash
|
||||||
|
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
|
||||||
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ${{ env.PIP_CACHE_DIR }}
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pip-
|
||||||
|
- run: pip install pyinstaller==4.6
|
||||||
|
- run: pip install -e .
|
||||||
|
- if: startsWith(github.ref, 'refs/tags/v')
|
||||||
|
run: python docker/set_build.py
|
||||||
|
- if: startsWith(runner.os, 'linux') || startsWith(runner.os, 'mac')
|
||||||
|
name: Build & Run (Unix)
|
||||||
|
run: |
|
||||||
|
pyinstaller --onefile --name lbrynet lbry/extras/cli.py
|
||||||
|
dist/lbrynet --version
|
||||||
|
- if: startsWith(runner.os, 'windows')
|
||||||
|
name: Build & Run (Windows)
|
||||||
|
run: |
|
||||||
|
pip install pywin32==301
|
||||||
|
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
|
||||||
|
dist/lbrynet.exe --version
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: lbrynet-${{ steps.os-name.outputs.lowercase }}
|
||||||
|
path: dist/
|
||||||
|
|
||||||
|
release:
|
||||||
|
name: "release"
|
||||||
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
|
needs: ["build"]
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- uses: actions/download-artifact@v2
|
||||||
|
- name: upload binaries
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.RELEASE_API_TOKEN }}
|
||||||
|
run: |
|
||||||
|
pip install githubrelease
|
||||||
|
chmod +x lbrynet-macos/lbrynet
|
||||||
|
chmod +x lbrynet-linux/lbrynet
|
||||||
|
zip --junk-paths lbrynet-mac.zip lbrynet-macos/lbrynet
|
||||||
|
zip --junk-paths lbrynet-linux.zip lbrynet-linux/lbrynet
|
||||||
|
zip --junk-paths lbrynet-windows.zip lbrynet-windows/lbrynet.exe
|
||||||
|
ls -lh
|
||||||
|
githubrelease release lbryio/lbry-sdk info ${GITHUB_REF#refs/tags/}
|
||||||
|
githubrelease asset lbryio/lbry-sdk upload ${GITHUB_REF#refs/tags/} \
|
||||||
|
lbrynet-mac.zip lbrynet-linux.zip lbrynet-windows.zip
|
||||||
|
githubrelease release lbryio/lbry-sdk publish ${GITHUB_REF#refs/tags/}
|
||||||
|
|
22
.github/workflows/release.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
name: slack
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
name: "slack notification"
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: LoveToKnow/slackify-markdown-action@v1.0.0
|
||||||
|
id: markdown
|
||||||
|
with:
|
||||||
|
text: "There is a new SDK release: ${{github.event.release.html_url}}\n${{ github.event.release.body }}"
|
||||||
|
- uses: slackapi/slack-github-action@v1.14.0
|
||||||
|
env:
|
||||||
|
CHANGELOG: '<!channel> ${{ steps.markdown.outputs.text }}'
|
||||||
|
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_RELEASE_BOT_WEBHOOK }}
|
||||||
|
with:
|
||||||
|
payload: '{"type": "mrkdwn", "text": ${{ toJSON(env.CHANGELOG) }} }'
|
||||||
|
|
20
.gitignore
vendored
|
@ -1,2 +1,22 @@
|
||||||
/.idea
|
/.idea
|
||||||
/.DS_Store
|
/.DS_Store
|
||||||
|
/build
|
||||||
|
/dist
|
||||||
|
/.tox
|
||||||
|
/.coverage*
|
||||||
|
/lbry-venv
|
||||||
|
/venv
|
||||||
|
/lbry/blockchain
|
||||||
|
|
||||||
|
lbry.egg-info
|
||||||
|
__pycache__
|
||||||
|
_trial_temp/
|
||||||
|
trending*.log
|
||||||
|
|
||||||
|
/tests/integration/claims/files
|
||||||
|
/tests/.coverage.*
|
||||||
|
|
||||||
|
/lbry/wallet/bin
|
||||||
|
|
||||||
|
/.vscode
|
||||||
|
/.gitignore
|
||||||
|
|
190
.gitlab-ci.yml
|
@ -1,190 +0,0 @@
|
||||||
default:
|
|
||||||
image: python:3.7
|
|
||||||
|
|
||||||
|
|
||||||
#cache:
|
|
||||||
# directories:
|
|
||||||
# - $HOME/venv
|
|
||||||
# - $HOME/.cache/pip
|
|
||||||
# - $HOME/Library/Caches/pip
|
|
||||||
# - $HOME/Library/Caches/Homebrew
|
|
||||||
# - $TRAVIS_BUILD_DIR/.tox
|
|
||||||
|
|
||||||
|
|
||||||
stages:
|
|
||||||
- test
|
|
||||||
- build
|
|
||||||
- release
|
|
||||||
|
|
||||||
|
|
||||||
test:lint:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- make install tools
|
|
||||||
- make lint
|
|
||||||
|
|
||||||
test:lbry-unit:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- make install tools
|
|
||||||
- cd lbry && HOME=/tmp coverage run -p --source=lbry -m unittest discover -vv tests.unit
|
|
||||||
|
|
||||||
|
|
||||||
test:lbry-integ:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- pip install coverage tox-travis
|
|
||||||
- cd lbry && tox
|
|
||||||
|
|
||||||
|
|
||||||
.torba-tests: &torba_tests
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- pip install coverage tox-travis
|
|
||||||
- cd torba && tox
|
|
||||||
|
|
||||||
test:torba-unit:
|
|
||||||
before_script:
|
|
||||||
- export TESTTYPE=unit
|
|
||||||
<<: *torba_tests
|
|
||||||
|
|
||||||
test:torba-integ:
|
|
||||||
before_script:
|
|
||||||
- export TESTTYPE=integration
|
|
||||||
<<: *torba_tests
|
|
||||||
|
|
||||||
test:json-api:
|
|
||||||
stage: test
|
|
||||||
script:
|
|
||||||
- make install tools
|
|
||||||
- cd lbry && HOME=/tmp coverage run -p --source=lbry scripts/generate_json_api.py
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
.build:
|
|
||||||
stage: build
|
|
||||||
artifacts:
|
|
||||||
expire_in: 1 day
|
|
||||||
paths:
|
|
||||||
- lbrynet-${OS}.zip
|
|
||||||
script:
|
|
||||||
- pip install pyinstaller
|
|
||||||
- pip install -e torba/.
|
|
||||||
- python3.7 docker/set_build.py
|
|
||||||
- pip install -e lbry/.
|
|
||||||
- pyinstaller --onefile --name lbrynet lbry/lbry/extras/cli.py
|
|
||||||
- chmod +x dist/lbrynet
|
|
||||||
- zip --junk-paths ${CI_PROJECT_DIR}/lbrynet-${OS}.zip dist/lbrynet # gitlab expects artifacts to be in $CI_PROJECT_DIR
|
|
||||||
- openssl dgst -sha256 ${CI_PROJECT_DIR}/lbrynet-${OS}.zip | egrep -o [0-9a-f]+$ # get sha256 of asset. works on mac and ubuntu
|
|
||||||
- dist/lbrynet --version
|
|
||||||
|
|
||||||
build:linux:
|
|
||||||
extends: .build
|
|
||||||
image: ubuntu:18.04 # cant use python3.7 image - binary won't run on ubuntu
|
|
||||||
variables:
|
|
||||||
OS: linux
|
|
||||||
before_script:
|
|
||||||
- apt-get update
|
|
||||||
- apt-get install -y --no-install-recommends software-properties-common zip curl build-essential
|
|
||||||
- add-apt-repository -y ppa:deadsnakes/ppa
|
|
||||||
- apt-get install -y --no-install-recommends python3.7-dev python3-setuptools python3-wheel
|
|
||||||
- python3.7 <(curl -q https://bootstrap.pypa.io/get-pip.py) # make sure we get pip with python3.7
|
|
||||||
|
|
||||||
build:mac:
|
|
||||||
extends: .build
|
|
||||||
tags: [macos] # makes gitlab use the mac runner
|
|
||||||
variables:
|
|
||||||
OS: mac
|
|
||||||
GIT_DEPTH: 5
|
|
||||||
VENV: /tmp/gitlab-lbry-sdk-venv
|
|
||||||
before_script:
|
|
||||||
# - brew upgrade python || true
|
|
||||||
- python3 --version | grep -q '^Python 3\.7\.' # dont upgrade python on every run. just make sure we're on the right Python
|
|
||||||
# - pip3 install --user --upgrade pip virtualenv
|
|
||||||
- pip3 --version | grep -q '\(python 3\.7\)'
|
|
||||||
- virtualenv --python=python3.7 "${VENV}"
|
|
||||||
- source "${VENV}/bin/activate"
|
|
||||||
after_script:
|
|
||||||
- rm -rf "${VENV}"
|
|
||||||
|
|
||||||
build:windows:
|
|
||||||
extends: .build
|
|
||||||
tags: [windows] # makes gitlab use the windows runner
|
|
||||||
variables:
|
|
||||||
OS: windows
|
|
||||||
GIT_DEPTH: 5
|
|
||||||
before_script:
|
|
||||||
- ./docker/install_choco.ps1
|
|
||||||
- choco install -y --x86 python3 7zip checksum
|
|
||||||
# - python --version | findstr /B "Python 3\.7\." # dont upgrade python on every run. just make sure we're on the right Python
|
|
||||||
# - pip --version | findstr /E '\(python 3\.7\)'
|
|
||||||
- pip install virtualenv pywin32
|
|
||||||
- virtualenv venv
|
|
||||||
- venv/Scripts/activate.ps1
|
|
||||||
after_script:
|
|
||||||
- rmdir -Recurse venv
|
|
||||||
script:
|
|
||||||
- pip install pyinstaller
|
|
||||||
- pip install -e torba/.
|
|
||||||
- python docker/set_build.py
|
|
||||||
- pip install -e lbry/.
|
|
||||||
- pyinstaller --additional-hooks-dir=lbry/scripts/. --icon=lbry/icons/lbry256.ico -F -n lbrynet lbry/lbry/extras/cli.py
|
|
||||||
- 7z a -tzip $env:CI_PROJECT_DIR/lbrynet-${OS}.zip dist/lbrynet.exe
|
|
||||||
- checksum --type=sha256 --file=$env:CI_PROJECT_DIR/lbrynet-${OS}.zip
|
|
||||||
- dist/lbrynet.exe --version
|
|
||||||
|
|
||||||
|
|
||||||
# s3 = upload asset to s3 (build.lbry.io)
|
|
||||||
# could be done by making it a yaml alias and putting it right into the build step. that way if one OS fails, the others still get uploaded
|
|
||||||
.s3:
|
|
||||||
stage: release
|
|
||||||
variables:
|
|
||||||
GIT_STRATEGY: none
|
|
||||||
script:
|
|
||||||
- pip install awscli
|
|
||||||
- S3_PATH="daemon/gitlab-build-${CI_PIPELINE_ID}_commit-${CI_COMMIT_SHA:0:7}$( if [ ! -z ${CI_COMMIT_TAG} ]; then echo _tag-${CI_COMMIT_TAG}; else echo _branch-${CI_COMMIT_REF_NAME}; fi )"
|
|
||||||
- AWS_ACCESS_KEY_ID=${ARTIFACTS_KEY} AWS_SECRET_ACCESS_KEY=${ARTIFACTS_SECRET} AWS_REGION=${ARTIFACTS_REGION}
|
|
||||||
aws s3 cp lbrynet-${OS}.zip s3://${ARTIFACTS_BUCKET}/${S3_PATH}/lbrynet-${OS}.zip
|
|
||||||
|
|
||||||
s3:linux:
|
|
||||||
extends: .s3
|
|
||||||
variables: {OS: linux}
|
|
||||||
dependencies: ["build:linux"]
|
|
||||||
|
|
||||||
s3:mac:
|
|
||||||
extends: .s3
|
|
||||||
variables: {OS: mac}
|
|
||||||
dependencies: ["build:mac"]
|
|
||||||
|
|
||||||
s3:windows:
|
|
||||||
extends: .s3
|
|
||||||
variables: {OS: windows}
|
|
||||||
dependencies: ["build:windows"]
|
|
||||||
|
|
||||||
# release = upload assets to github when there's a tagged release
|
|
||||||
.release:
|
|
||||||
stage: release
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_TAG =~ /^v[0-9\.]+$/'
|
|
||||||
when: on_success
|
|
||||||
variables:
|
|
||||||
GIT_STRATEGY: none
|
|
||||||
script:
|
|
||||||
- pip install githubrelease
|
|
||||||
- githubrelease --no-progress --github-token ${GITHUB_CI_USER_ACCESS_TOKEN} asset lbryio/lbry-sdk upload ${CI_COMMIT_TAG} lbrynet-${OS}.zip
|
|
||||||
|
|
||||||
release:linux:
|
|
||||||
extends: .release
|
|
||||||
variables: {OS: linux}
|
|
||||||
dependencies: ["build:linux"]
|
|
||||||
|
|
||||||
release:mac:
|
|
||||||
extends: .release
|
|
||||||
variables: {OS: mac}
|
|
||||||
dependencies: ["build:mac"]
|
|
||||||
|
|
||||||
release:windows:
|
|
||||||
extends: .release
|
|
||||||
variables: {OS: windows}
|
|
||||||
dependencies: ["build:windows"]
|
|
||||||
|
|
142
.travis.yml
|
@ -1,142 +0,0 @@
|
||||||
dist: xenial
|
|
||||||
language: python
|
|
||||||
python: "3.7"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
include:
|
|
||||||
|
|
||||||
- stage: code quality
|
|
||||||
name: "pylint & mypy"
|
|
||||||
install:
|
|
||||||
- make install tools
|
|
||||||
script: make lint
|
|
||||||
|
|
||||||
- stage: test
|
|
||||||
name: "LBRY Unit Tests"
|
|
||||||
install:
|
|
||||||
- make install tools
|
|
||||||
script:
|
|
||||||
- cd lbry && HOME=/tmp coverage run -p --source=lbry -m unittest discover -vv tests.unit
|
|
||||||
after_success:
|
|
||||||
- coverage combine lbry/
|
|
||||||
|
|
||||||
- name: "LBRY Integration Tests"
|
|
||||||
install:
|
|
||||||
- pip install coverage tox-travis
|
|
||||||
- sudo mount -o mode=1777,nosuid,nodev -t tmpfs tmpfs /tmp
|
|
||||||
script: cd lbry && tox
|
|
||||||
after_success:
|
|
||||||
- coverage combine lbry
|
|
||||||
|
|
||||||
- &torba-tests
|
|
||||||
name: "Torba Unit Tests"
|
|
||||||
env: TESTTYPE=unit
|
|
||||||
install:
|
|
||||||
- pip install coverage tox-travis
|
|
||||||
script: cd torba && tox
|
|
||||||
after_success:
|
|
||||||
- coverage combine torba/tests
|
|
||||||
|
|
||||||
- <<: *torba-tests
|
|
||||||
name: "Torba Integration Tests"
|
|
||||||
env: TESTTYPE=integration
|
|
||||||
|
|
||||||
- name: "Run Examples"
|
|
||||||
install:
|
|
||||||
- make install tools
|
|
||||||
script:
|
|
||||||
- cd lbry && HOME=/tmp coverage run -p --source=lbry scripts/generate_json_api.py
|
|
||||||
after_success:
|
|
||||||
- coverage combine lbry
|
|
||||||
|
|
||||||
- &build
|
|
||||||
stage: build
|
|
||||||
name: "Linux"
|
|
||||||
env: OS=linux
|
|
||||||
install:
|
|
||||||
- pip install pyinstaller awscli
|
|
||||||
- python docker/set_build.py
|
|
||||||
- pip install -e torba/.
|
|
||||||
- pip install -e lbry/.
|
|
||||||
script:
|
|
||||||
- pyinstaller -F -n lbrynet lbry/lbry/extras/cli.py
|
|
||||||
- chmod +x dist/lbrynet
|
|
||||||
- zip --junk-paths lbrynet-${OS}.zip dist/lbrynet
|
|
||||||
- shasum -a 256 -b lbrynet-${OS}.zip
|
|
||||||
- dist/lbrynet --version
|
|
||||||
after_success:
|
|
||||||
- aws configure set aws_access_key_id $ARTIFACTS_KEY
|
|
||||||
- aws configure set aws_secret_access_key $ARTIFACTS_SECRET
|
|
||||||
- aws configure set region us-east-1
|
|
||||||
- export S3_PATH="daemon/build-${TRAVIS_BUILD_NUMBER}_commit-${TRAVIS_COMMIT:0:7}_branch-${TRAVIS_BRANCH}$([ ! -z ${TRAVIS_TAG} ] && echo _tag-${TRAVIS_TAG})"
|
|
||||||
- aws s3 cp lbrynet-${OS}.zip s3://build.lbry.io/${S3_PATH}/lbrynet-${OS}.zip
|
|
||||||
# deploy:
|
|
||||||
# provider: releases
|
|
||||||
# api_key:
|
|
||||||
# secure: "unnR+aSJ1937Cl1PyBBZzGuZvV5W5TGcXELhXTgyOeeI6FgO/j80qmbNxJDA7qdFH/hvVicQFWoflhZu2dxN5rYP5BQJW3q3XoOLY3XAc1s1vicFkwqn3TIfdFiJTz+/D9eBUBBhHKeYFxm3M+thvklTLgjKl6fflh14NfGuNTevK9yQke8wewW3f9UmFTo1qNOPF1OsTZRbwua6oQYa59P+KukoPt4Dsu1VtILtTkj7hfEsUL79cjotwO3gkhYftxbl/xeDSZWOt+9Nhb8ZKmQG/uDx4JiTMm5lWRk4QB7pUujZ1CftxCYWz/lJx9nuJpdCOgP624tcHymErNlD+vGLwMTNslcXGYkAJH6xvGyxBJ+Obc8vRVnZbRM26BfH34TcPK1ueRxHSrDUbzMIIUsgcoZAxBuim8uDPp+K7bGqiygzSs2vQfr9U5Jhe9/F8sPdtNctfJZEfgmthNTeVFjyNsGIfIt754uGSfACqM7wDLh6fbKx7M+FHlNyOdvYCrbKUOAYXmTikYIpVDvlaaeMO+N+uW8Rhvm1j+JU7CVwhMavLySaPVc6Dt5OxiMMmxw9mVrjW9bBPjS5AkrS5MOA13T5wapoLzH6+gE92U4HzA6ilMcwRaQPSFnK2JU7tzyt2Wy1PH4MjHowXI2WyICG1x510dD3tX1P/1px8ro="
|
|
||||||
# file: lbrynet-${OS}.zip
|
|
||||||
# skip_cleanup: true
|
|
||||||
# overwrite: true
|
|
||||||
# draft: true
|
|
||||||
# on:
|
|
||||||
# tags: true
|
|
||||||
|
|
||||||
- <<: *build
|
|
||||||
name: "Mac"
|
|
||||||
os: osx
|
|
||||||
osx_image: xcode8.3
|
|
||||||
language: shell
|
|
||||||
env: OS=mac
|
|
||||||
before_install:
|
|
||||||
- brew uninstall mercurial
|
|
||||||
- brew upgrade python || true
|
|
||||||
- pip3 install --user --upgrade pip virtualenv
|
|
||||||
- /Users/travis/Library/Python/3.7/bin/virtualenv --clear $HOME/venv
|
|
||||||
- source $HOME/venv/bin/activate
|
|
||||||
before_cache:
|
|
||||||
- brew cleanup
|
|
||||||
|
|
||||||
- <<: *build
|
|
||||||
name: "Windows"
|
|
||||||
os: windows
|
|
||||||
language: shell
|
|
||||||
env:
|
|
||||||
- OS=windows
|
|
||||||
- PATH=/c/Python37:/c/Python37/Scripts:/C/Windows/System32/downlevel:$PATH
|
|
||||||
before_install:
|
|
||||||
- choco install python --version=3.7.4 --x86
|
|
||||||
- python -m pip install --upgrade pip
|
|
||||||
- pip install pywin32
|
|
||||||
script:
|
|
||||||
- pyinstaller --additional-hooks-dir=lbry/scripts/. --icon=lbry/icons/lbry256.ico -F -n lbrynet lbry/lbry/extras/cli.py
|
|
||||||
- cd dist
|
|
||||||
- 7z a -tzip lbrynet-windows.zip lbrynet.exe
|
|
||||||
- sha256sum -b lbrynet-windows.zip
|
|
||||||
- ./lbrynet.exe --version
|
|
||||||
|
|
||||||
- if: tag IS present
|
|
||||||
stage: build
|
|
||||||
name: "Wallet Server Docker Image - Tagged Release"
|
|
||||||
script:
|
|
||||||
- set -e
|
|
||||||
- echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
|
|
||||||
- travis_retry docker build -t lbry/wallet-server:$TRAVIS_TAG -t lbry/wallet-server:latest-release -f docker/Dockerfile.wallet_server .
|
|
||||||
- docker push lbry/wallet-server:$TRAVIS_TAG
|
|
||||||
- docker push lbry/wallet-server:latest-release
|
|
||||||
|
|
||||||
- if: tag IS blank AND branch = master AND NOT type IN (pull_request)
|
|
||||||
stage: build
|
|
||||||
name: "Wallet Server Docker Image - Master"
|
|
||||||
script:
|
|
||||||
- set -e
|
|
||||||
- echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
|
|
||||||
- travis_retry docker build -t lbry/wallet-server:master -f docker/Dockerfile.wallet_server .
|
|
||||||
- docker push lbry/wallet-server:master
|
|
||||||
|
|
||||||
cache:
|
|
||||||
directories:
|
|
||||||
- $HOME/venv
|
|
||||||
- $HOME/.cache/pip
|
|
||||||
- $HOME/Library/Caches/pip
|
|
||||||
- $HOME/Library/Caches/Homebrew
|
|
||||||
- $TRAVIS_BUILD_DIR/.tox
|
|
1019
CHANGELOG.md
156
INSTALL.md
|
@ -9,20 +9,29 @@ Here's a video walkthrough of this setup, which is itself hosted by the LBRY net
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
Running `lbrynet` from source requires Python 3.7 or higher. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
|
Running `lbrynet` from source requires Python 3.7. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
|
||||||
|
|
||||||
After installing python 3, you'll need to install some additional libraries depending on your operating system.
|
After installing Python 3.7, you'll need to install some additional libraries depending on your operating system.
|
||||||
|
|
||||||
|
Because of [issue #2769](https://github.com/lbryio/lbry-sdk/issues/2769)
|
||||||
|
at the moment the `lbrynet` daemon will only work correctly with Python 3.7.
|
||||||
|
If Python 3.8+ is used, the daemon will start but the RPC server
|
||||||
|
may not accept messages, returning the following:
|
||||||
|
```
|
||||||
|
Could not connect to daemon. Are you sure it's running?
|
||||||
|
```
|
||||||
|
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/).
|
macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/).
|
||||||
|
|
||||||
These environment variables also need to be set:
|
These environment variables also need to be set:
|
||||||
1. PYTHONUNBUFFERED=1
|
```
|
||||||
2. EVENT_NOKQUEUE=1
|
PYTHONUNBUFFERED=1
|
||||||
|
EVENT_NOKQUEUE=1
|
||||||
|
```
|
||||||
|
|
||||||
Remaining dependencies can then be installed by running:
|
Remaining dependencies can then be installed by running:
|
||||||
|
|
||||||
```
|
```
|
||||||
brew install python protobuf
|
brew install python protobuf
|
||||||
```
|
```
|
||||||
|
@ -31,14 +40,17 @@ Assistance installing Python3: https://docs.python-guide.org/starting/install3/o
|
||||||
|
|
||||||
### Linux
|
### Linux
|
||||||
|
|
||||||
On Ubuntu (16.04 minimum, we recommend 18.04), install the following:
|
On Ubuntu (we recommend 18.04 or 20.04), install the following:
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo add-apt-repository ppa:deadsnakes/ppa
|
sudo add-apt-repository ppa:deadsnakes/ppa
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install build-essential python3.7 python3.7-dev git python3.7-venv libssl-dev python-protobuf
|
sudo apt-get install build-essential python3.7 python3.7-dev git python3.7-venv libssl-dev python-protobuf
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The [deadsnakes PPA](https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa) provides Python 3.7
|
||||||
|
for those Ubuntu distributions that no longer have it in their
|
||||||
|
official repositories.
|
||||||
|
|
||||||
On Raspbian, you will also need to install `python-pyparsing`.
|
On Raspbian, you will also need to install `python-pyparsing`.
|
||||||
|
|
||||||
If you're running another Linux distro, install the equivalent of the above packages for your system.
|
If you're running another Linux distro, install the equivalent of the above packages for your system.
|
||||||
|
@ -47,65 +59,119 @@ If you're running another Linux distro, install the equivalent of the above pack
|
||||||
|
|
||||||
### Linux/Mac
|
### Linux/Mac
|
||||||
|
|
||||||
To install on Linux/Mac:
|
Clone the repository:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/lbryio/lbry-sdk.git
|
||||||
|
cd lbry-sdk
|
||||||
|
```
|
||||||
|
|
||||||
```
|
Create a Python virtual environment for lbry-sdk:
|
||||||
Clone the repository:
|
```bash
|
||||||
$ git clone https://github.com/lbryio/lbry-sdk.git
|
python3.7 -m venv lbry-venv
|
||||||
$ cd lbry-sdk
|
```
|
||||||
|
|
||||||
Create a Python virtual environment for lbry-sdk:
|
Activate virtual environment:
|
||||||
$ python3.7 -m venv lbry-venv
|
```bash
|
||||||
|
source lbry-venv/bin/activate
|
||||||
|
```
|
||||||
|
|
||||||
Activating lbry-sdk virtual environment:
|
Make sure you're on Python 3.7+ as default in the virtual environment:
|
||||||
$ source lbry-venv/bin/activate
|
```bash
|
||||||
|
python --version
|
||||||
|
```
|
||||||
|
|
||||||
Make sure you're on Python 3.7+ (as the default Python in virtual environment):
|
Install packages:
|
||||||
$ python --version
|
```bash
|
||||||
|
make install
|
||||||
|
```
|
||||||
|
|
||||||
Install packages:
|
If you are on Linux and using PyCharm, generates initial configs:
|
||||||
$ make install
|
```bash
|
||||||
|
make idea
|
||||||
|
```
|
||||||
|
|
||||||
If you are on Linux and using PyCharm, generates initial configs:
|
To verify your installation, `which lbrynet` should return a path inside
|
||||||
$ make idea
|
of the `lbry-venv` folder.
|
||||||
```
|
```bash
|
||||||
|
(lbry-venv) $ which lbrynet
|
||||||
|
/opt/lbry-sdk/lbry-venv/bin/lbrynet
|
||||||
|
```
|
||||||
|
|
||||||
To verify your installation, `which lbrynet` should return a path inside of the `lbry-venv` folder created by the `python3.7 -m venv lbry-venv` command.
|
To exit the virtual environment simply use the command `deactivate`.
|
||||||
|
|
||||||
### Windows
|
### Windows
|
||||||
|
|
||||||
To install on Windows:
|
Clone the repository:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/lbryio/lbry-sdk.git
|
||||||
|
cd lbry-sdk
|
||||||
|
```
|
||||||
|
|
||||||
```
|
Create a Python virtual environment for lbry-sdk:
|
||||||
Clone the repository:
|
```bash
|
||||||
> git clone https://github.com/lbryio/lbry-sdk.git
|
python -m venv lbry-venv
|
||||||
> cd lbry-sdk
|
```
|
||||||
|
|
||||||
Create a Python virtual environment for lbry-sdk:
|
Activate virtual environment:
|
||||||
> python -m venv lbry-venv
|
```bash
|
||||||
|
lbry-venv\Scripts\activate
|
||||||
|
```
|
||||||
|
|
||||||
Activating lbry-sdk virtual environment:
|
Install packages:
|
||||||
> lbry-venv\Scripts\activate
|
```bash
|
||||||
|
pip install -e .
|
||||||
Install packages:
|
```
|
||||||
> cd torba
|
|
||||||
> pip install -e .
|
|
||||||
> cd ../lbry
|
|
||||||
> pip install -e .
|
|
||||||
```
|
|
||||||
|
|
||||||
## Run the tests
|
## Run the tests
|
||||||
|
### Elasticsearch
|
||||||
|
|
||||||
To run the unit tests from the repo directory:
|
For running integration tests, Elasticsearch is required to be available at localhost:9200/
|
||||||
|
|
||||||
```
|
The easiest way to start it is using docker with:
|
||||||
python -m unittest discover -s lbry tests.unit
|
```bash
|
||||||
```
|
make elastic-docker
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternative installation methods are available [at Elasticsearch website](https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html).
|
||||||
|
|
||||||
|
To run the unit and integration tests from the repo directory:
|
||||||
|
```
|
||||||
|
python -m unittest discover tests.unit
|
||||||
|
python -m unittest discover tests.integration
|
||||||
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
To start the API server:
|
To start the API server:
|
||||||
`lbrynet start`
|
```
|
||||||
|
lbrynet start
|
||||||
|
```
|
||||||
|
|
||||||
|
Whenever the code inside [lbry-sdk/lbry](./lbry)
|
||||||
|
is modified we should run `make install` to recompile the `lbrynet`
|
||||||
|
executable with the newest code.
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
When developing, remember to enter the environment,
|
||||||
|
and if you wish start the server interactively.
|
||||||
|
```bash
|
||||||
|
$ source lbry-venv/bin/activate
|
||||||
|
|
||||||
|
(lbry-venv) $ python lbry/extras/cli.py start
|
||||||
|
```
|
||||||
|
|
||||||
|
Parameters can be passed in the same way.
|
||||||
|
```bash
|
||||||
|
(lbry-venv) $ python lbry/extras/cli.py wallet balance
|
||||||
|
```
|
||||||
|
|
||||||
|
If a Python debugger (`pdb` or `ipdb`) is installed we can also start it
|
||||||
|
in this way, set up break points, and step through the code.
|
||||||
|
```bash
|
||||||
|
(lbry-venv) $ pip install ipdb
|
||||||
|
|
||||||
|
(lbry-venv) $ ipdb lbry/extras/cli.py
|
||||||
|
```
|
||||||
|
|
||||||
Happy hacking!
|
Happy hacking!
|
||||||
|
|
2
LICENSE
|
@ -1,6 +1,6 @@
|
||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2015-2019 LBRY Inc
|
Copyright (c) 2015-2022 LBRY Inc
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
|
||||||
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
include README.md
|
include README.md
|
||||||
include CHANGELOG.md
|
include CHANGELOG.md
|
||||||
include LICENSE
|
include LICENSE
|
||||||
recursive-include torba *.txt *.py
|
recursive-include lbry *.txt *.py
|
36
Makefile
|
@ -1,26 +1,26 @@
|
||||||
.PHONY: install tools lint test idea
|
.PHONY: install tools lint test test-unit test-unit-coverage test-integration idea
|
||||||
|
|
||||||
install:
|
install:
|
||||||
CFLAGS="-DSQLITE_MAX_VARIABLE_NUMBER=2500000" pip install -U https://github.com/rogerbinns/apsw/releases/download/3.30.1-r1/apsw-3.30.1-r1.zip \
|
pip install -e .
|
||||||
--global-option=fetch \
|
|
||||||
--global-option=--version --global-option=3.30.1 --global-option=--all \
|
|
||||||
--global-option=build --global-option=--enable --global-option=fts5
|
|
||||||
cd torba && pip install -e .
|
|
||||||
cd lbry && pip install -e .
|
|
||||||
|
|
||||||
tools:
|
|
||||||
pip install mypy==0.701
|
|
||||||
pip install coverage astroid pylint
|
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
cd lbry && pylint lbry
|
pylint --rcfile=setup.cfg lbry
|
||||||
cd torba && pylint --rcfile=setup.cfg torba
|
#mypy --ignore-missing-imports lbry
|
||||||
cd torba && mypy --ignore-missing-imports torba
|
|
||||||
|
|
||||||
test:
|
test: test-unit test-integration
|
||||||
cd lbry && tox
|
|
||||||
cd torba && tox
|
test-unit:
|
||||||
|
python -m unittest discover tests.unit
|
||||||
|
|
||||||
|
test-unit-coverage:
|
||||||
|
coverage run --source=lbry -m unittest discover -vv tests.unit
|
||||||
|
|
||||||
|
test-integration:
|
||||||
|
tox
|
||||||
|
|
||||||
idea:
|
idea:
|
||||||
mkdir -p .idea
|
mkdir -p .idea
|
||||||
cp -r lbry/scripts/idea/* .idea
|
cp -r scripts/idea/* .idea
|
||||||
|
|
||||||
|
elastic-docker:
|
||||||
|
docker run -d -v lbryhub:/usr/share/elasticsearch/data -p 9200:9200 -p 9300:9300 -e"ES_JAVA_OPTS=-Xms512m -Xmx512m" -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.12.1
|
||||||
|
|
18
README.md
|
@ -1,15 +1,15 @@
|
||||||
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![Build Status](https://travis-ci.org/lbryio/lbry-sdk.svg?branch=master)](https://travis-ci.org/lbryio/lbry-sdk)
|
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![build](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml/badge.svg)](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml) [![coverage](https://coveralls.io/repos/github/lbryio/lbry-sdk/badge.svg)](https://coveralls.io/github/lbryio/lbry-sdk)
|
||||||
|
|
||||||
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
|
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
|
||||||
|
|
||||||
LBRY SDK for Python is currently the most fully featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components include:
|
LBRY SDK for Python is currently the most fully featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components include:
|
||||||
|
|
||||||
* Built on Python 3.7+ and `asyncio`.
|
* Built on Python 3.7 and `asyncio`.
|
||||||
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/dht)).
|
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/dht)).
|
||||||
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/blob_exchange)).
|
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/blob_exchange)).
|
||||||
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/schema)).
|
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/schema)).
|
||||||
* Wallet implementation for the LBRY blockchain ([lbry.wallet](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/wallet)).
|
* Wallet implementation for the LBRY blockchain ([lbry.wallet](https://github.com/lbryio/lbry-sdk/tree/master/lbry/wallet)).
|
||||||
* Daemon with a JSON-RPC API to ease building end user applications in any language and for automating various tasks ([lbry.extras.daemon](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/extras/daemon)).
|
* Daemon with a JSON-RPC API to ease building end user applications in any language and for automating various tasks ([lbry.extras.daemon](https://github.com/lbryio/lbry-sdk/tree/master/lbry/extras/daemon)).
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ This project is MIT licensed. For the full license, see [LICENSE](LICENSE).
|
||||||
|
|
||||||
## Security
|
## Security
|
||||||
|
|
||||||
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our GPG key is here](https://lbry.com/faq/gpg-key) if you need it.
|
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
|
||||||
|
|
||||||
## Contact
|
## Contact
|
||||||
|
|
||||||
|
@ -53,4 +53,4 @@ The documentation for the API can be found [here](https://lbry.tech/api/sdk).
|
||||||
|
|
||||||
Daemon defaults, ports, and other settings are documented [here](https://lbry.tech/resources/daemon-settings).
|
Daemon defaults, ports, and other settings are documented [here](https://lbry.tech/resources/daemon-settings).
|
||||||
|
|
||||||
Settings can be configured using a daemon-settings.yml file. An example can be found [here](https://github.com/lbryio/lbry-sdk/blob/master/lbry/example_daemon_settings.yml).
|
Settings can be configured using a daemon-settings.yml file. An example can be found [here](https://github.com/lbryio/lbry-sdk/blob/master/example_daemon_settings.yml).
|
||||||
|
|
9
SECURITY.md
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
While we are not at v1.0 yet, only the latest release will be supported.
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
See https://lbry.com/faq/security
|
43
docker/Dockerfile.dht_node
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
FROM debian:10-slim
|
||||||
|
|
||||||
|
ARG user=lbry
|
||||||
|
ARG projects_dir=/home/$user
|
||||||
|
ARG db_dir=/database
|
||||||
|
|
||||||
|
ARG DOCKER_TAG
|
||||||
|
ARG DOCKER_COMMIT=docker
|
||||||
|
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get -y --no-install-recommends install \
|
||||||
|
wget \
|
||||||
|
automake libtool \
|
||||||
|
tar unzip \
|
||||||
|
build-essential \
|
||||||
|
pkg-config \
|
||||||
|
libleveldb-dev \
|
||||||
|
python3.7 \
|
||||||
|
python3-dev \
|
||||||
|
python3-pip \
|
||||||
|
python3-wheel \
|
||||||
|
python3-setuptools && \
|
||||||
|
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
||||||
|
|
||||||
|
COPY . $projects_dir
|
||||||
|
RUN chown -R $user:$user $projects_dir
|
||||||
|
RUN mkdir -p $db_dir
|
||||||
|
RUN chown -R $user:$user $db_dir
|
||||||
|
|
||||||
|
USER $user
|
||||||
|
WORKDIR $projects_dir
|
||||||
|
|
||||||
|
RUN python3 -m pip install -U setuptools pip
|
||||||
|
RUN make install
|
||||||
|
RUN python3 docker/set_build.py
|
||||||
|
RUN rm ~/.cache -rf
|
||||||
|
VOLUME $db_dir
|
||||||
|
ENTRYPOINT ["python3", "scripts/dht_node.py"]
|
||||||
|
|
|
@ -1,17 +1,26 @@
|
||||||
FROM ubuntu:19.10
|
FROM debian:10-slim
|
||||||
|
|
||||||
ARG user=lbry
|
ARG user=lbry
|
||||||
ARG db_dir=/database
|
ARG db_dir=/database
|
||||||
ARG projects_dir=/home/$user
|
ARG projects_dir=/home/$user
|
||||||
|
|
||||||
|
ARG DOCKER_TAG
|
||||||
|
ARG DOCKER_COMMIT=docker
|
||||||
|
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get -y --no-install-recommends install \
|
apt-get -y --no-install-recommends install \
|
||||||
wget \
|
wget \
|
||||||
|
tar unzip \
|
||||||
build-essential \
|
build-essential \
|
||||||
python3 \
|
automake libtool \
|
||||||
|
pkg-config \
|
||||||
|
libleveldb-dev \
|
||||||
|
python3.7 \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
python3-wheel \
|
python3-wheel \
|
||||||
|
python3-cffi \
|
||||||
python3-setuptools && \
|
python3-setuptools && \
|
||||||
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
@ -28,6 +37,7 @@ WORKDIR $projects_dir
|
||||||
|
|
||||||
RUN pip install uvloop
|
RUN pip install uvloop
|
||||||
RUN make install
|
RUN make install
|
||||||
|
RUN python3 docker/set_build.py
|
||||||
RUN rm ~/.cache -rf
|
RUN rm ~/.cache -rf
|
||||||
|
|
||||||
# entry point
|
# entry point
|
||||||
|
@ -39,7 +49,6 @@ ENV TCP_PORT=$tcp_port
|
||||||
ENV HOST=$host
|
ENV HOST=$host
|
||||||
ENV DAEMON_URL=$daemon_url
|
ENV DAEMON_URL=$daemon_url
|
||||||
ENV DB_DIRECTORY=$db_dir
|
ENV DB_DIRECTORY=$db_dir
|
||||||
ENV BANDWIDTH_LIMIT=1000000000000000000000000000000000000000000
|
|
||||||
ENV MAX_SESSIONS=1000000000
|
ENV MAX_SESSIONS=1000000000
|
||||||
ENV MAX_SEND=1000000000000000000
|
ENV MAX_SEND=1000000000000000000
|
||||||
ENV EVENT_LOOP_POLICY=uvloop
|
ENV EVENT_LOOP_POLICY=uvloop
|
||||||
|
|
45
docker/Dockerfile.web
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
FROM debian:10-slim
|
||||||
|
|
||||||
|
ARG user=lbry
|
||||||
|
ARG downloads_dir=/database
|
||||||
|
ARG projects_dir=/home/$user
|
||||||
|
|
||||||
|
ARG DOCKER_TAG
|
||||||
|
ARG DOCKER_COMMIT=docker
|
||||||
|
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get -y --no-install-recommends install \
|
||||||
|
wget \
|
||||||
|
automake libtool \
|
||||||
|
tar unzip \
|
||||||
|
build-essential \
|
||||||
|
pkg-config \
|
||||||
|
libleveldb-dev \
|
||||||
|
python3.7 \
|
||||||
|
python3-dev \
|
||||||
|
python3-pip \
|
||||||
|
python3-wheel \
|
||||||
|
python3-setuptools && \
|
||||||
|
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
||||||
|
RUN mkdir -p $downloads_dir
|
||||||
|
RUN chown -R $user:$user $downloads_dir
|
||||||
|
|
||||||
|
COPY . $projects_dir
|
||||||
|
RUN chown -R $user:$user $projects_dir
|
||||||
|
|
||||||
|
USER $user
|
||||||
|
WORKDIR $projects_dir
|
||||||
|
|
||||||
|
RUN pip install uvloop
|
||||||
|
RUN make install
|
||||||
|
RUN python3 docker/set_build.py
|
||||||
|
RUN rm ~/.cache -rf
|
||||||
|
|
||||||
|
# entry point
|
||||||
|
VOLUME $downloads_dir
|
||||||
|
COPY ./docker/webconf.yaml /webconf.yaml
|
||||||
|
ENTRYPOINT ["/home/lbry/.local/bin/lbrynet", "start", "--config=/webconf.yaml"]
|
9
docker/README.md
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
### How to run with docker-compose
|
||||||
|
1. Edit config file and after that fix permissions with
|
||||||
|
```
|
||||||
|
sudo chown -R 999:999 webconf.yaml
|
||||||
|
```
|
||||||
|
2. Start SDK with
|
||||||
|
```
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
|
@ -1,33 +1,49 @@
|
||||||
version: "3"
|
version: "3"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
lbrycrd:
|
|
||||||
wallet_server:
|
wallet_server:
|
||||||
|
es01:
|
||||||
|
|
||||||
services:
|
services:
|
||||||
lbrycrd:
|
|
||||||
image: lbry/lbrycrd:${LBRYCRD_TAG:-latest-release}
|
|
||||||
restart: always
|
|
||||||
ports: # accessible from host
|
|
||||||
- "9246:9246" # rpc port
|
|
||||||
expose: # internal to docker network. also this doesn't do anything. its for documentation only.
|
|
||||||
- "9245" # node-to-node comms port
|
|
||||||
volumes:
|
|
||||||
- "lbrycrd:/data/.lbrycrd"
|
|
||||||
environment:
|
|
||||||
- RUN_MODE=default
|
|
||||||
- SNAPSHOT_URL=${LBRYCRD_SNAPSHOT_URL-https://lbry.com/snapshot/blockchain}
|
|
||||||
- RPC_ALLOW_IP=0.0.0.0/0
|
|
||||||
wallet_server:
|
wallet_server:
|
||||||
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- lbrycrd
|
- es01
|
||||||
|
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
|
||||||
restart: always
|
restart: always
|
||||||
|
network_mode: host
|
||||||
ports:
|
ports:
|
||||||
- "50001:50001" # rpc port
|
- "50001:50001" # rpc port
|
||||||
- "50005:50005" # websocket port
|
- "2112:2112" # uncomment to enable prometheus
|
||||||
volumes:
|
volumes:
|
||||||
- "wallet_server:/database"
|
- "wallet_server:/database"
|
||||||
environment:
|
environment:
|
||||||
- SNAPSHOT_URL=${WALLET_SERVER_SNAPSHOT_URL-https://lbry.com/snapshot/wallet}
|
- DAEMON_URL=http://lbry:lbry@127.0.0.1:9245
|
||||||
- DAEMON_URL=http://lbry:lbry@lbrycrd:9245
|
- MAX_QUERY_WORKERS=4
|
||||||
|
- CACHE_MB=1024
|
||||||
|
- CACHE_ALL_TX_HASHES=
|
||||||
|
- CACHE_ALL_CLAIM_TXOS=
|
||||||
|
- MAX_SEND=1000000000000000000
|
||||||
|
- MAX_RECEIVE=1000000000000000000
|
||||||
|
- MAX_SESSIONS=100000
|
||||||
|
- HOST=0.0.0.0
|
||||||
|
- TCP_PORT=50001
|
||||||
|
- PROMETHEUS_PORT=2112
|
||||||
|
- FILTERING_CHANNEL_IDS=770bd7ecba84fd2f7607fb15aedd2b172c2e153f 95e5db68a3101df19763f3a5182e4b12ba393ee8
|
||||||
|
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6
|
||||||
|
es01:
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
|
||||||
|
container_name: es01
|
||||||
|
environment:
|
||||||
|
- node.name=es01
|
||||||
|
- discovery.type=single-node
|
||||||
|
- indices.query.bool.max_clause_count=8192
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- "ES_JAVA_OPTS=-Xms4g -Xmx4g" # no more than 32, remember to disable swap
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
volumes:
|
||||||
|
- es01:/usr/share/elasticsearch/data
|
||||||
|
ports:
|
||||||
|
- 127.0.0.1:9200:9200
|
||||||
|
|
9
docker/docker-compose.yml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
websdk:
|
||||||
|
image: vshyba/websdk
|
||||||
|
ports:
|
||||||
|
- '5279:5279'
|
||||||
|
- '5280:5280'
|
||||||
|
volumes:
|
||||||
|
- ./webconf.yaml:/webconf.yaml
|
7
docker/hooks/build
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
|
cd "$DIR/../.." ## make sure we're in the right place. Docker Hub screws this up sometimes
|
||||||
|
echo "docker build dir: $(pwd)"
|
||||||
|
|
||||||
|
docker build --build-arg DOCKER_TAG=$DOCKER_TAG --build-arg DOCKER_COMMIT=$SOURCE_COMMIT -f $DOCKERFILE_PATH -t $IMAGE_NAME .
|
|
@ -1,38 +1,43 @@
|
||||||
"""Set the build version to be 'qa', 'rc', 'release'"""
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import logging
|
import logging
|
||||||
|
import lbry.build_info as build_info_mod
|
||||||
|
|
||||||
log = logging.getLogger()
|
log = logging.getLogger()
|
||||||
log.addHandler(logging.StreamHandler())
|
log.addHandler(logging.StreamHandler())
|
||||||
log.setLevel(logging.DEBUG)
|
log.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
|
||||||
def get_build_type(ci_tag=None):
|
def _check_and_set(d: dict, key: str, value: str):
|
||||||
if not ci_tag:
|
try:
|
||||||
return "qa"
|
d[key]
|
||||||
log.debug("getting build type for tag: \"%s\"", ci_tag)
|
except KeyError:
|
||||||
if re.match(r'v\d+\.\d+\.\d+rc\d+$', ci_tag):
|
raise Exception(f"{key} var does not exist in {build_info_mod.__file__}")
|
||||||
return 'rc'
|
d[key] = value
|
||||||
elif re.match(r'v\d+\.\d+\.\d+$', ci_tag):
|
|
||||||
return 'release'
|
|
||||||
return 'qa'
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
build_info = {item: build_info_mod.__dict__[item] for item in dir(build_info_mod) if not item.startswith("__")}
|
||||||
build_type_path = os.path.join(root_dir, 'lbry', 'lbry', 'build_type.py')
|
|
||||||
log.debug("configuring build type file: %s", build_type_path)
|
commit_hash = os.getenv('DOCKER_COMMIT', os.getenv('GITHUB_SHA'))
|
||||||
commit_hash = os.getenv('CI_COMMIT_SHA', os.getenv('TRAVIS_COMMIT'))
|
|
||||||
if commit_hash is None:
|
if commit_hash is None:
|
||||||
raise ValueError("Commit hash not found in env vars")
|
raise ValueError("Commit hash not found in env vars")
|
||||||
commit_hash = commit_hash[:6]
|
_check_and_set(build_info, "COMMIT_HASH", commit_hash[:6])
|
||||||
build_type = get_build_type(os.getenv('CI_COMMIT_TAG', os.getenv('TRAVIS_TAG')))
|
|
||||||
log.debug("setting build type=%s, build commit=%s", build_type, commit_hash)
|
docker_tag = os.getenv('DOCKER_TAG')
|
||||||
with open(build_type_path, 'w') as f:
|
if docker_tag:
|
||||||
f.write(f"BUILD = \"{build_type}\"\nBUILD_COMMIT = \"{commit_hash}\"\n")
|
_check_and_set(build_info, "DOCKER_TAG", docker_tag)
|
||||||
|
_check_and_set(build_info, "BUILD", "docker")
|
||||||
|
else:
|
||||||
|
if re.match(r'refs/tags/v\d+\.\d+\.\d+$', str(os.getenv('GITHUB_REF'))):
|
||||||
|
_check_and_set(build_info, "BUILD", "release")
|
||||||
|
else:
|
||||||
|
_check_and_set(build_info, "BUILD", "qa")
|
||||||
|
|
||||||
|
log.debug("build info: %s", ", ".join([f"{k}={v}" for k, v in build_info.items()]))
|
||||||
|
with open(build_info_mod.__file__, 'w') as f:
|
||||||
|
f.write("\n".join([f"{k} = \"{v}\"" for k, v in build_info.items()]) + "\n")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -2,14 +2,24 @@
|
||||||
|
|
||||||
# entrypoint for wallet server Docker image
|
# entrypoint for wallet server Docker image
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
|
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
|
||||||
|
|
||||||
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/claims.db ]]; then
|
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
|
||||||
|
files="$(ls)"
|
||||||
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
|
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
|
||||||
wget --no-verbose -O wallet_snapshot.tar.bz2 "$SNAPSHOT_URL"
|
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
|
||||||
echo "Extracting snapshot..."
|
echo "Extracting snapshot..."
|
||||||
tar xvjf wallet_snapshot.tar.bz2 --directory /database
|
filename="$(grep -vf <(echo "$files") <(ls))" # finds the file that was not there before
|
||||||
rm wallet_snapshot.tar.bz2
|
case "$filename" in
|
||||||
|
*.tgz|*.tar.gz|*.tar.bz2 ) tar xvf "$filename" --directory /database ;;
|
||||||
|
*.zip ) unzip "$filename" -d /database ;;
|
||||||
|
* ) echo "Don't know how to extract ${filename}. SNAPSHOT COULD NOT BE LOADED" && exit 1 ;;
|
||||||
|
esac
|
||||||
|
rm "$filename"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
/home/lbry/.local/bin/torba-server "$@"
|
/home/lbry/.local/bin/lbry-hub-elastic-sync
|
||||||
|
echo 'starting server'
|
||||||
|
/home/lbry/.local/bin/lbry-hub "$@"
|
||||||
|
|
9
docker/webconf.yaml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
allowed_origin: "*"
|
||||||
|
max_key_fee: "0.0 USD"
|
||||||
|
save_files: false
|
||||||
|
save_blobs: false
|
||||||
|
streaming_server: "0.0.0.0:5280"
|
||||||
|
api: "0.0.0.0:5279"
|
||||||
|
data_dir: /tmp
|
||||||
|
download_dir: /tmp
|
||||||
|
wallet_dir: /tmp
|
5163
docs/api.json
Normal file
Before Width: | Height: | Size: 7.4 KiB After Width: | Height: | Size: 7.4 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 1.2 KiB After Width: | Height: | Size: 1.2 KiB |
Before Width: | Height: | Size: 2.6 KiB After Width: | Height: | Size: 2.6 KiB |
Before Width: | Height: | Size: 6.1 KiB After Width: | Height: | Size: 6.1 KiB |
Before Width: | Height: | Size: 97 KiB After Width: | Height: | Size: 97 KiB |
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 1.1 KiB |
Before Width: | Height: | Size: 361 KiB After Width: | Height: | Size: 361 KiB |
Before Width: | Height: | Size: 5.3 KiB After Width: | Height: | Size: 5.3 KiB |
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 31 KiB |
Before Width: | Height: | Size: 2.6 KiB After Width: | Height: | Size: 2.6 KiB |
1
lbry/.gitattributes
vendored
|
@ -1 +0,0 @@
|
||||||
/CHANGELOG.md merge=union
|
|
14
lbry/.gitignore
vendored
|
@ -1,14 +0,0 @@
|
||||||
.DS_Store
|
|
||||||
/build
|
|
||||||
/dist
|
|
||||||
/.tox
|
|
||||||
/.idea
|
|
||||||
/.coverage
|
|
||||||
/lbry-venv
|
|
||||||
|
|
||||||
lbry.egg-info
|
|
||||||
__pycache__
|
|
||||||
_trial_temp/
|
|
||||||
|
|
||||||
/tests/integration/files
|
|
||||||
/tests/.coverage.*
|
|
441
lbry/.pylintrc
|
@ -1,441 +0,0 @@
|
||||||
[MASTER]
|
|
||||||
|
|
||||||
# Specify a configuration file.
|
|
||||||
#rcfile=
|
|
||||||
|
|
||||||
# Python code to execute, usually for sys.path manipulation such as
|
|
||||||
# pygtk.require().
|
|
||||||
#init-hook=
|
|
||||||
|
|
||||||
# Add files or directories to the blacklist. They should be base names, not
|
|
||||||
# paths.
|
|
||||||
ignore=CVS,schema
|
|
||||||
|
|
||||||
# Add files or directories matching the regex patterns to the
|
|
||||||
# blacklist. The regex matches against base names, not paths.
|
|
||||||
# `\.#.*` - add emacs tmp files to the blacklist
|
|
||||||
ignore-patterns=\.#.*
|
|
||||||
|
|
||||||
# Pickle collected data for later comparisons.
|
|
||||||
persistent=yes
|
|
||||||
|
|
||||||
# List of plugins (as comma separated values of python modules names) to load,
|
|
||||||
# usually to register additional checkers.
|
|
||||||
load-plugins=
|
|
||||||
|
|
||||||
# Use multiple processes to speed up Pylint.
|
|
||||||
jobs=4
|
|
||||||
|
|
||||||
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
|
||||||
# active Python interpreter and may run arbitrary code.
|
|
||||||
unsafe-load-any-extension=no
|
|
||||||
|
|
||||||
# A comma-separated list of package or module names from where C extensions may
|
|
||||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
|
||||||
# run arbitrary code
|
|
||||||
extension-pkg-whitelist=apsw
|
|
||||||
|
|
||||||
# Allow optimization of some AST trees. This will activate a peephole AST
|
|
||||||
# optimizer, which will apply various small optimizations. For instance, it can
|
|
||||||
# be used to obtain the result of joining multiple strings with the addition
|
|
||||||
# operator. Joining a lot of strings can lead to a maximum recursion error in
|
|
||||||
# Pylint and this flag can prevent that. It has one side effect, the resulting
|
|
||||||
# AST will be different than the one from reality.
|
|
||||||
optimize-ast=no
|
|
||||||
|
|
||||||
|
|
||||||
[MESSAGES CONTROL]
|
|
||||||
|
|
||||||
# Only show warnings with the listed confidence levels. Leave empty to show
|
|
||||||
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
|
|
||||||
confidence=
|
|
||||||
|
|
||||||
# Enable the message, report, category or checker with the given id(s). You can
|
|
||||||
# either give multiple identifier separated by comma (,) or put this option
|
|
||||||
# multiple time (only on the command line, not in the configuration file where
|
|
||||||
# it should appear only once). See also the "--disable" option for examples.
|
|
||||||
#enable=
|
|
||||||
|
|
||||||
# Disable the message, report, category or checker with the given id(s). You
|
|
||||||
# can either give multiple identifiers separated by comma (,) or put this
|
|
||||||
# option multiple times (only on the command line, not in the configuration
|
|
||||||
# file where it should appear only once).You can also use "--disable=all" to
|
|
||||||
# disable everything first and then re-enable specific checks. For example, if
|
|
||||||
# you want to run only the similarities checker, you can use "--disable=all
|
|
||||||
# --enable=similarities". If you want to run only the classes checker, but have
|
|
||||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
|
||||||
# --disable=W"
|
|
||||||
disable=
|
|
||||||
anomalous-backslash-in-string,
|
|
||||||
arguments-differ,
|
|
||||||
attribute-defined-outside-init,
|
|
||||||
bad-continuation,
|
|
||||||
bare-except,
|
|
||||||
broad-except,
|
|
||||||
cell-var-from-loop,
|
|
||||||
consider-iterating-dictionary,
|
|
||||||
dangerous-default-value,
|
|
||||||
duplicate-code,
|
|
||||||
fixme,
|
|
||||||
invalid-name,
|
|
||||||
len-as-condition,
|
|
||||||
locally-disabled,
|
|
||||||
logging-not-lazy,
|
|
||||||
missing-docstring,
|
|
||||||
no-else-return,
|
|
||||||
no-init,
|
|
||||||
no-member,
|
|
||||||
no-self-use,
|
|
||||||
protected-access,
|
|
||||||
redefined-builtin,
|
|
||||||
redefined-outer-name,
|
|
||||||
redefined-variable-type,
|
|
||||||
relative-import,
|
|
||||||
signature-differs,
|
|
||||||
super-init-not-called,
|
|
||||||
too-few-public-methods,
|
|
||||||
too-many-arguments,
|
|
||||||
too-many-branches,
|
|
||||||
too-many-instance-attributes,
|
|
||||||
too-many-lines,
|
|
||||||
too-many-locals,
|
|
||||||
too-many-nested-blocks,
|
|
||||||
too-many-public-methods,
|
|
||||||
too-many-return-statements,
|
|
||||||
too-many-statements,
|
|
||||||
trailing-newlines,
|
|
||||||
undefined-loop-variable,
|
|
||||||
ungrouped-imports,
|
|
||||||
unnecessary-lambda,
|
|
||||||
unused-argument,
|
|
||||||
unused-variable,
|
|
||||||
wrong-import-order,
|
|
||||||
wrong-import-position,
|
|
||||||
deprecated-lambda,
|
|
||||||
simplifiable-if-statement,
|
|
||||||
unidiomatic-typecheck,
|
|
||||||
inconsistent-return-statements,
|
|
||||||
keyword-arg-before-vararg,
|
|
||||||
assignment-from-no-return,
|
|
||||||
useless-return,
|
|
||||||
assignment-from-none,
|
|
||||||
stop-iteration-return
|
|
||||||
|
|
||||||
|
|
||||||
[REPORTS]
|
|
||||||
|
|
||||||
# Set the output format. Available formats are text, parseable, colorized, msvs
|
|
||||||
# (visual studio) and html. You can also give a reporter class, eg
|
|
||||||
# mypackage.mymodule.MyReporterClass.
|
|
||||||
output-format=text
|
|
||||||
|
|
||||||
# Put messages in a separate file for each module / package specified on the
|
|
||||||
# command line instead of printing them on stdout. Reports (if any) will be
|
|
||||||
# written in a file name "pylint_global.[txt|html]".
|
|
||||||
files-output=no
|
|
||||||
|
|
||||||
# Tells whether to display a full report or only the messages
|
|
||||||
reports=no
|
|
||||||
|
|
||||||
# Python expression which should return a note less than 10 (10 is the highest
|
|
||||||
# note). You have access to the variables errors warning, statement which
|
|
||||||
# respectively contain the number of errors / warnings messages and the total
|
|
||||||
# number of statements analyzed. This is used by the global evaluation report
|
|
||||||
# (RP0004).
|
|
||||||
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
|
||||||
|
|
||||||
# Template used to display messages. This is a python new-style format string
|
|
||||||
# used to format the message information. See doc for all details
|
|
||||||
#msg-template=
|
|
||||||
|
|
||||||
|
|
||||||
[VARIABLES]
|
|
||||||
|
|
||||||
# Tells whether we should check for unused import in __init__ files.
|
|
||||||
init-import=no
|
|
||||||
|
|
||||||
# A regular expression matching the name of dummy variables (i.e. expectedly
|
|
||||||
# not used).
|
|
||||||
dummy-variables-rgx=_$|dummy
|
|
||||||
|
|
||||||
# List of additional names supposed to be defined in builtins. Remember that
|
|
||||||
# you should avoid to define new builtins when possible.
|
|
||||||
additional-builtins=
|
|
||||||
|
|
||||||
# List of strings which can identify a callback function by name. A callback
|
|
||||||
# name must start or end with one of those strings.
|
|
||||||
callbacks=cb_,_cb
|
|
||||||
|
|
||||||
|
|
||||||
[LOGGING]
|
|
||||||
|
|
||||||
# Logging modules to check that the string format arguments are in logging
|
|
||||||
# function parameter format
|
|
||||||
logging-modules=logging
|
|
||||||
|
|
||||||
|
|
||||||
[BASIC]
|
|
||||||
|
|
||||||
# List of builtins function names that should not be used, separated by a comma
|
|
||||||
bad-functions=map,filter,input
|
|
||||||
|
|
||||||
# Good variable names which should always be accepted, separated by a comma
|
|
||||||
# allow `d` as its used frequently for deferred callback chains
|
|
||||||
good-names=i,j,k,ex,Run,_,d
|
|
||||||
|
|
||||||
# Bad variable names which should always be refused, separated by a comma
|
|
||||||
bad-names=foo,bar,baz,toto,tutu,tata
|
|
||||||
|
|
||||||
# Colon-delimited sets of names that determine each other's naming style when
|
|
||||||
# the name regexes allow several styles.
|
|
||||||
name-group=
|
|
||||||
|
|
||||||
# Include a hint for the correct naming format with invalid-name
|
|
||||||
include-naming-hint=no
|
|
||||||
|
|
||||||
# Regular expression matching correct function names
|
|
||||||
function-rgx=[a-z_][a-z0-9_]{2,30}$
|
|
||||||
|
|
||||||
# Naming hint for function names
|
|
||||||
function-name-hint=[a-z_][a-z0-9_]{2,30}$
|
|
||||||
|
|
||||||
# Regular expression matching correct variable names
|
|
||||||
variable-rgx=[a-z_][a-z0-9_]{2,30}$
|
|
||||||
|
|
||||||
# Naming hint for variable names
|
|
||||||
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
|
|
||||||
|
|
||||||
# Regular expression matching correct constant names
|
|
||||||
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
|
||||||
|
|
||||||
# Naming hint for constant names
|
|
||||||
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
|
||||||
|
|
||||||
# Regular expression matching correct attribute names
|
|
||||||
attr-rgx=[a-z_][a-z0-9_]{2,30}$
|
|
||||||
|
|
||||||
# Naming hint for attribute names
|
|
||||||
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
|
|
||||||
|
|
||||||
# Regular expression matching correct argument names
|
|
||||||
argument-rgx=[a-z_][a-z0-9_]{2,30}$
|
|
||||||
|
|
||||||
# Naming hint for argument names
|
|
||||||
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
|
|
||||||
|
|
||||||
# Regular expression matching correct class attribute names
|
|
||||||
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
|
||||||
|
|
||||||
# Naming hint for class attribute names
|
|
||||||
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
|
||||||
|
|
||||||
# Regular expression matching correct inline iteration names
|
|
||||||
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
|
||||||
|
|
||||||
# Naming hint for inline iteration names
|
|
||||||
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
|
|
||||||
|
|
||||||
# Regular expression matching correct class names
|
|
||||||
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
|
||||||
|
|
||||||
# Naming hint for class names
|
|
||||||
class-name-hint=[A-Z_][a-zA-Z0-9]+$
|
|
||||||
|
|
||||||
# Regular expression matching correct module names
|
|
||||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
|
||||||
|
|
||||||
# Naming hint for module names
|
|
||||||
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
|
||||||
|
|
||||||
# Regular expression matching correct method names
|
|
||||||
method-rgx=[a-z_][a-z0-9_]{2,30}$
|
|
||||||
|
|
||||||
# Naming hint for method names
|
|
||||||
method-name-hint=[a-z_][a-z0-9_]{2,30}$
|
|
||||||
|
|
||||||
# Regular expression which should only match function or class names that do
|
|
||||||
# not require a docstring.
|
|
||||||
no-docstring-rgx=^_
|
|
||||||
|
|
||||||
# Minimum line length for functions/classes that require docstrings, shorter
|
|
||||||
# ones are exempt.
|
|
||||||
docstring-min-length=-1
|
|
||||||
|
|
||||||
|
|
||||||
[ELIF]
|
|
||||||
|
|
||||||
# Maximum number of nested blocks for function / method body
|
|
||||||
max-nested-blocks=5
|
|
||||||
|
|
||||||
|
|
||||||
[SPELLING]
|
|
||||||
|
|
||||||
# Spelling dictionary name. Available dictionaries: none. To make it working
|
|
||||||
# install python-enchant package.
|
|
||||||
spelling-dict=
|
|
||||||
|
|
||||||
# List of comma separated words that should not be checked.
|
|
||||||
spelling-ignore-words=
|
|
||||||
|
|
||||||
# A path to a file that contains private dictionary; one word per line.
|
|
||||||
spelling-private-dict-file=
|
|
||||||
|
|
||||||
# Tells whether to store unknown words to indicated private dictionary in
|
|
||||||
# --spelling-private-dict-file option instead of raising a message.
|
|
||||||
spelling-store-unknown-words=no
|
|
||||||
|
|
||||||
|
|
||||||
[FORMAT]
|
|
||||||
|
|
||||||
# Maximum number of characters on a single line.
|
|
||||||
max-line-length=120
|
|
||||||
|
|
||||||
# Regexp for a line that is allowed to be longer than the limit.
|
|
||||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
|
||||||
|
|
||||||
# Allow the body of an if to be on the same line as the test if there is no
|
|
||||||
# else.
|
|
||||||
single-line-if-stmt=no
|
|
||||||
|
|
||||||
# List of optional constructs for which whitespace checking is disabled. `dict-
|
|
||||||
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
|
|
||||||
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
|
|
||||||
# `empty-line` allows space-only lines.
|
|
||||||
no-space-check=trailing-comma,dict-separator
|
|
||||||
|
|
||||||
# Maximum number of lines in a module
|
|
||||||
max-module-lines=1000
|
|
||||||
|
|
||||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
|
||||||
# tab).
|
|
||||||
indent-string=' '
|
|
||||||
|
|
||||||
# Number of spaces of indent required inside a hanging or continued line.
|
|
||||||
indent-after-paren=4
|
|
||||||
|
|
||||||
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
|
||||||
expected-line-ending-format=
|
|
||||||
|
|
||||||
|
|
||||||
[MISCELLANEOUS]
|
|
||||||
|
|
||||||
# List of note tags to take in consideration, separated by a comma.
|
|
||||||
notes=FIXME,XXX,TODO
|
|
||||||
|
|
||||||
|
|
||||||
[SIMILARITIES]
|
|
||||||
|
|
||||||
# Minimum lines number of a similarity.
|
|
||||||
min-similarity-lines=4
|
|
||||||
|
|
||||||
# Ignore comments when computing similarities.
|
|
||||||
ignore-comments=yes
|
|
||||||
|
|
||||||
# Ignore docstrings when computing similarities.
|
|
||||||
ignore-docstrings=yes
|
|
||||||
|
|
||||||
# Ignore imports when computing similarities.
|
|
||||||
ignore-imports=no
|
|
||||||
|
|
||||||
|
|
||||||
[TYPECHECK]
|
|
||||||
|
|
||||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
|
||||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
|
||||||
ignore-mixin-members=yes
|
|
||||||
|
|
||||||
# List of module names for which member attributes should not be checked
|
|
||||||
# (useful for modules/projects where namespaces are manipulated during runtime
|
|
||||||
# and thus existing member attributes cannot be deduced by static analysis. It
|
|
||||||
# supports qualified module names, as well as Unix pattern matching.
|
|
||||||
ignored-modules=leveldb,distutils
|
|
||||||
# Ignoring distutils because: https://github.com/PyCQA/pylint/issues/73
|
|
||||||
|
|
||||||
# List of classes names for which member attributes should not be checked
|
|
||||||
# (useful for classes with attributes dynamically set). This supports can work
|
|
||||||
# with qualified names.
|
|
||||||
# ignored-classes=
|
|
||||||
|
|
||||||
|
|
||||||
# List of members which are set dynamically and missed by pylint inference
|
|
||||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
|
||||||
# expressions are accepted.
|
|
||||||
generated-members=lbrynet.lbrynet_daemon.LBRYDaemon.Parameters
|
|
||||||
|
|
||||||
|
|
||||||
[IMPORTS]
|
|
||||||
|
|
||||||
# Deprecated modules which should not be used, separated by a comma
|
|
||||||
deprecated-modules=regsub,TERMIOS,Bastion,rexec
|
|
||||||
|
|
||||||
# Create a graph of every (i.e. internal and external) dependencies in the
|
|
||||||
# given file (report RP0402 must not be disabled)
|
|
||||||
import-graph=
|
|
||||||
|
|
||||||
# Create a graph of external dependencies in the given file (report RP0402 must
|
|
||||||
# not be disabled)
|
|
||||||
ext-import-graph=
|
|
||||||
|
|
||||||
# Create a graph of internal dependencies in the given file (report RP0402 must
|
|
||||||
# not be disabled)
|
|
||||||
int-import-graph=
|
|
||||||
|
|
||||||
|
|
||||||
[DESIGN]
|
|
||||||
|
|
||||||
# Maximum number of arguments for function / method
|
|
||||||
max-args=10
|
|
||||||
|
|
||||||
# Argument names that match this expression will be ignored. Default to name
|
|
||||||
# with leading underscore
|
|
||||||
ignored-argument-names=_.*
|
|
||||||
|
|
||||||
# Maximum number of locals for function / method body
|
|
||||||
max-locals=15
|
|
||||||
|
|
||||||
# Maximum number of return / yield for function / method body
|
|
||||||
max-returns=6
|
|
||||||
|
|
||||||
# Maximum number of branch for function / method body
|
|
||||||
max-branches=12
|
|
||||||
|
|
||||||
# Maximum number of statements in function / method body
|
|
||||||
max-statements=50
|
|
||||||
|
|
||||||
# Maximum number of parents for a class (see R0901).
|
|
||||||
max-parents=8
|
|
||||||
|
|
||||||
# Maximum number of attributes for a class (see R0902).
|
|
||||||
max-attributes=7
|
|
||||||
|
|
||||||
# Minimum number of public methods for a class (see R0903).
|
|
||||||
min-public-methods=2
|
|
||||||
|
|
||||||
# Maximum number of public methods for a class (see R0904).
|
|
||||||
max-public-methods=20
|
|
||||||
|
|
||||||
# Maximum number of boolean expressions in a if statement
|
|
||||||
max-bool-expr=5
|
|
||||||
|
|
||||||
|
|
||||||
[CLASSES]
|
|
||||||
|
|
||||||
# List of method names used to declare (i.e. assign) instance attributes.
|
|
||||||
defining-attr-methods=__init__,__new__,setUp
|
|
||||||
|
|
||||||
# List of valid names for the first argument in a class method.
|
|
||||||
valid-classmethod-first-arg=cls
|
|
||||||
|
|
||||||
# List of valid names for the first argument in a metaclass class method.
|
|
||||||
valid-metaclass-classmethod-first-arg=mcs
|
|
||||||
|
|
||||||
# List of member names, which should be excluded from the protected access
|
|
||||||
# warning.
|
|
||||||
exclude-protected=_asdict,_fields,_replace,_source,_make
|
|
||||||
|
|
||||||
|
|
||||||
[EXCEPTIONS]
|
|
||||||
|
|
||||||
# Exceptions that will emit a warning when being caught. Defaults to
|
|
||||||
# "Exception"
|
|
||||||
overgeneral-exceptions=Exception
|
|
|
@ -1,56 +0,0 @@
|
||||||
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![Build Status](https://travis-ci.org/lbryio/lbry-sdk.svg?branch=master)](https://travis-ci.org/lbryio/lbry-sdk)
|
|
||||||
|
|
||||||
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
|
|
||||||
|
|
||||||
LBRY SDK for Python is currently the most full featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components:
|
|
||||||
|
|
||||||
* Built on Python 3.7+ and `asyncio`.
|
|
||||||
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/dht)).
|
|
||||||
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/blob_exchange)).
|
|
||||||
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/schema)).
|
|
||||||
* Wallet implementation for the LBRY blockchain ([lbry.wallet](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/wallet)).
|
|
||||||
* Daemon with a JSON-RPC API to ease building end user applications in any language and for automating various tasks ([lbry.extras.daemon](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/extras/daemon)).
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
Our [releases page](https://github.com/lbryio/lbry-sdk/releases) contains pre-built binaries of the latest release, pre-releases, and past releases for macOS, Debian-based Linux, and Windows. [Automated travis builds](http://build.lbry.io/daemon/) are also available for testing.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
Run `lbrynet start` to launch the API server.
|
|
||||||
|
|
||||||
By default, `lbrynet` will provide a JSON-RPC server at `http://localhost:5279`. It is easy to interact with via cURL or sane programming languages.
|
|
||||||
|
|
||||||
Our [quickstart guide](https://lbry.tech/playground) provides a simple walkthrough and examples for learning.
|
|
||||||
|
|
||||||
With the daemon running, `lbrynet commands` will show you a list of commands.
|
|
||||||
|
|
||||||
The full API is documented [here](https://lbry.tech/api/sdk).
|
|
||||||
|
|
||||||
## Running from source
|
|
||||||
|
|
||||||
Installing from source is also relatively painless. Full instructions are in [INSTALL.md](INSTALL.md)
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
Contributions to this project are welcome, encouraged, and compensated. For more details, please check [this](https://lbry.tech/contribute) link.
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
This project is MIT licensed. For the full license, see [LICENSE](LICENSE).
|
|
||||||
|
|
||||||
## Security
|
|
||||||
|
|
||||||
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our GPG key is here](https://lbry.com/faq/gpg-key) if you need it.
|
|
||||||
|
|
||||||
## Contact
|
|
||||||
|
|
||||||
The primary contact for this project is [@eukreign](mailto:lex@lbry.com).
|
|
||||||
|
|
||||||
## Additional information and links
|
|
||||||
|
|
||||||
The documentation for the API can be found [here](https://lbry.tech/api/sdk).
|
|
||||||
|
|
||||||
Daemon defaults, ports, and other settings are documented [here](https://lbry.tech/resources/daemon-settings).
|
|
||||||
|
|
||||||
Settings can be configured using a daemon-settings.yml file. An example can be found [here](https://github.com/lbryio/lbry-sdk/blob/master/lbry/example_daemon_settings.yml).
|
|
2
lbry/__init__.py
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
__version__ = "0.113.0"
|
||||||
|
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
|
6
lbry/blob/__init__.py
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
from lbry.utils import get_lbry_hash_obj
|
||||||
|
|
||||||
|
MAX_BLOB_SIZE = 2 * 2 ** 20
|
||||||
|
|
||||||
|
# digest_size is in bytes, and blob hashes are hex encoded
|
||||||
|
BLOBHASH_LENGTH = get_lbry_hash_obj().digest_size * 2
|
|
@ -1,5 +1,6 @@
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import time
|
||||||
import asyncio
|
import asyncio
|
||||||
import binascii
|
import binascii
|
||||||
import logging
|
import logging
|
||||||
|
@ -9,18 +10,20 @@ from io import BytesIO
|
||||||
from cryptography.hazmat.primitives.ciphers import Cipher, modes
|
from cryptography.hazmat.primitives.ciphers import Cipher, modes
|
||||||
from cryptography.hazmat.primitives.ciphers.algorithms import AES
|
from cryptography.hazmat.primitives.ciphers.algorithms import AES
|
||||||
from cryptography.hazmat.primitives.padding import PKCS7
|
from cryptography.hazmat.primitives.padding import PKCS7
|
||||||
|
from cryptography.hazmat.backends import default_backend
|
||||||
|
|
||||||
from lbry.cryptoutils import backend, get_lbry_hash_obj
|
from lbry.utils import get_lbry_hash_obj
|
||||||
from lbry.error import DownloadCancelledError, InvalidBlobHashError, InvalidDataError
|
from lbry.error import DownloadCancelledError, InvalidBlobHashError, InvalidDataError
|
||||||
|
|
||||||
from lbry.blob import MAX_BLOB_SIZE, blobhash_length
|
from lbry.blob import MAX_BLOB_SIZE, BLOBHASH_LENGTH
|
||||||
from lbry.blob.blob_info import BlobInfo
|
from lbry.blob.blob_info import BlobInfo
|
||||||
from lbry.blob.writer import HashBlobWriter
|
from lbry.blob.writer import HashBlobWriter
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
_hexmatch = re.compile("^[a-f,0-9]+$")
|
HEXMATCH = re.compile("^[a-f,0-9]+$")
|
||||||
|
BACKEND = default_backend()
|
||||||
|
|
||||||
|
|
||||||
def is_valid_blobhash(blobhash: str) -> bool:
|
def is_valid_blobhash(blobhash: str) -> bool:
|
||||||
|
@ -31,11 +34,11 @@ def is_valid_blobhash(blobhash: str) -> bool:
|
||||||
|
|
||||||
@return: True/False
|
@return: True/False
|
||||||
"""
|
"""
|
||||||
return len(blobhash) == blobhash_length and _hexmatch.match(blobhash)
|
return len(blobhash) == BLOBHASH_LENGTH and HEXMATCH.match(blobhash)
|
||||||
|
|
||||||
|
|
||||||
def encrypt_blob_bytes(key: bytes, iv: bytes, unencrypted: bytes) -> typing.Tuple[bytes, str]:
|
def encrypt_blob_bytes(key: bytes, iv: bytes, unencrypted: bytes) -> typing.Tuple[bytes, str]:
|
||||||
cipher = Cipher(AES(key), modes.CBC(iv), backend=backend)
|
cipher = Cipher(AES(key), modes.CBC(iv), backend=BACKEND)
|
||||||
padder = PKCS7(AES.block_size).padder()
|
padder = PKCS7(AES.block_size).padder()
|
||||||
encryptor = cipher.encryptor()
|
encryptor = cipher.encryptor()
|
||||||
encrypted = encryptor.update(padder.update(unencrypted) + padder.finalize()) + encryptor.finalize()
|
encrypted = encryptor.update(padder.update(unencrypted) + padder.finalize()) + encryptor.finalize()
|
||||||
|
@ -47,7 +50,7 @@ def encrypt_blob_bytes(key: bytes, iv: bytes, unencrypted: bytes) -> typing.Tupl
|
||||||
def decrypt_blob_bytes(data: bytes, length: int, key: bytes, iv: bytes) -> bytes:
|
def decrypt_blob_bytes(data: bytes, length: int, key: bytes, iv: bytes) -> bytes:
|
||||||
if len(data) != length:
|
if len(data) != length:
|
||||||
raise ValueError("unexpected length")
|
raise ValueError("unexpected length")
|
||||||
cipher = Cipher(AES(key), modes.CBC(iv), backend=backend)
|
cipher = Cipher(AES(key), modes.CBC(iv), backend=BACKEND)
|
||||||
unpadder = PKCS7(AES.block_size).unpadder()
|
unpadder = PKCS7(AES.block_size).unpadder()
|
||||||
decryptor = cipher.decryptor()
|
decryptor = cipher.decryptor()
|
||||||
return unpadder.update(decryptor.update(data) + decryptor.finalize()) + unpadder.finalize()
|
return unpadder.update(decryptor.update(data) + decryptor.finalize()) + unpadder.finalize()
|
||||||
|
@ -68,21 +71,27 @@ class AbstractBlob:
|
||||||
'writers',
|
'writers',
|
||||||
'verified',
|
'verified',
|
||||||
'writing',
|
'writing',
|
||||||
'readers'
|
'readers',
|
||||||
|
'added_on',
|
||||||
|
'is_mine',
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
def __init__(
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||||
blob_directory: typing.Optional[str] = None):
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||||
|
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False,
|
||||||
|
):
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.blob_hash = blob_hash
|
self.blob_hash = blob_hash
|
||||||
self.length = length
|
self.length = length
|
||||||
self.blob_completed_callback = blob_completed_callback
|
self.blob_completed_callback = blob_completed_callback
|
||||||
self.blob_directory = blob_directory
|
self.blob_directory = blob_directory
|
||||||
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
|
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
|
||||||
self.verified: asyncio.Event = asyncio.Event(loop=self.loop)
|
self.verified: asyncio.Event = asyncio.Event()
|
||||||
self.writing: asyncio.Event = asyncio.Event(loop=self.loop)
|
self.writing: asyncio.Event = asyncio.Event()
|
||||||
self.readers: typing.List[typing.BinaryIO] = []
|
self.readers: typing.List[typing.BinaryIO] = []
|
||||||
|
self.added_on = added_on or time.time()
|
||||||
|
self.is_mine = is_mine
|
||||||
|
|
||||||
if not is_valid_blobhash(blob_hash):
|
if not is_valid_blobhash(blob_hash):
|
||||||
raise InvalidBlobHashError(blob_hash)
|
raise InvalidBlobHashError(blob_hash)
|
||||||
|
@ -108,7 +117,7 @@ class AbstractBlob:
|
||||||
if reader in self.readers:
|
if reader in self.readers:
|
||||||
self.readers.remove(reader)
|
self.readers.remove(reader)
|
||||||
|
|
||||||
def _write_blob(self, blob_bytes: bytes):
|
def _write_blob(self, blob_bytes: bytes) -> asyncio.Task:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def set_length(self, length) -> None:
|
def set_length(self, length) -> None:
|
||||||
|
@ -142,7 +151,7 @@ class AbstractBlob:
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
while self.writers:
|
while self.writers:
|
||||||
peer, writer = self.writers.popitem()
|
_, writer = self.writers.popitem()
|
||||||
if writer and writer.finished and not writer.finished.done() and not self.loop.is_closed():
|
if writer and writer.finished and not writer.finished.done() and not self.loop.is_closed():
|
||||||
writer.finished.cancel()
|
writer.finished.cancel()
|
||||||
while self.readers:
|
while self.readers:
|
||||||
|
@ -165,7 +174,7 @@ class AbstractBlob:
|
||||||
with self.reader_context() as handle:
|
with self.reader_context() as handle:
|
||||||
try:
|
try:
|
||||||
return await self.loop.sendfile(writer.transport, handle, count=self.get_length())
|
return await self.loop.sendfile(writer.transport, handle, count=self.get_length())
|
||||||
except (ConnectionResetError, BrokenPipeError, RuntimeError, OSError, AttributeError):
|
except (ConnectionError, BrokenPipeError, RuntimeError, OSError, AttributeError):
|
||||||
return -1
|
return -1
|
||||||
|
|
||||||
def decrypt(self, key: bytes, iv: bytes) -> bytes:
|
def decrypt(self, key: bytes, iv: bytes) -> bytes:
|
||||||
|
@ -178,35 +187,42 @@ class AbstractBlob:
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_from_unencrypted(
|
async def create_from_unencrypted(
|
||||||
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
||||||
unencrypted: bytes, blob_num: int,
|
unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool,
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None) -> BlobInfo:
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
|
||||||
|
) -> BlobInfo:
|
||||||
"""
|
"""
|
||||||
Create an encrypted BlobFile from plaintext bytes
|
Create an encrypted BlobFile from plaintext bytes
|
||||||
"""
|
"""
|
||||||
|
|
||||||
blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted)
|
blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted)
|
||||||
length = len(blob_bytes)
|
length = len(blob_bytes)
|
||||||
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir)
|
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir, added_on, is_mine)
|
||||||
writer = blob.get_blob_writer()
|
writer = blob.get_blob_writer()
|
||||||
writer.write(blob_bytes)
|
writer.write(blob_bytes)
|
||||||
await blob.verified.wait()
|
await blob.verified.wait()
|
||||||
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash)
|
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), added_on, blob_hash, is_mine)
|
||||||
|
|
||||||
def save_verified_blob(self, verified_bytes: bytes):
|
def save_verified_blob(self, verified_bytes: bytes):
|
||||||
if self.verified.is_set():
|
if self.verified.is_set():
|
||||||
return
|
return
|
||||||
if self.is_writeable():
|
|
||||||
self._write_blob(verified_bytes)
|
def update_events(_):
|
||||||
self.verified.set()
|
self.verified.set()
|
||||||
|
self.writing.clear()
|
||||||
|
|
||||||
|
if self.is_writeable():
|
||||||
|
self.writing.set()
|
||||||
|
task = self._write_blob(verified_bytes)
|
||||||
|
task.add_done_callback(update_events)
|
||||||
if self.blob_completed_callback:
|
if self.blob_completed_callback:
|
||||||
self.blob_completed_callback(self)
|
task.add_done_callback(lambda _: self.blob_completed_callback(self))
|
||||||
|
|
||||||
def get_blob_writer(self, peer_address: typing.Optional[str] = None,
|
def get_blob_writer(self, peer_address: typing.Optional[str] = None,
|
||||||
peer_port: typing.Optional[int] = None) -> HashBlobWriter:
|
peer_port: typing.Optional[int] = None) -> HashBlobWriter:
|
||||||
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
|
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
|
||||||
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
|
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
|
||||||
fut = asyncio.Future(loop=self.loop)
|
fut = asyncio.Future()
|
||||||
writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
|
writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
|
||||||
self.writers[(peer_address, peer_port)] = writer
|
self.writers[(peer_address, peer_port)] = writer
|
||||||
|
|
||||||
|
@ -240,11 +256,13 @@ class BlobBuffer(AbstractBlob):
|
||||||
"""
|
"""
|
||||||
An in-memory only blob
|
An in-memory only blob
|
||||||
"""
|
"""
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
def __init__(
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||||
blob_directory: typing.Optional[str] = None):
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||||
|
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
|
||||||
|
):
|
||||||
self._verified_bytes: typing.Optional[BytesIO] = None
|
self._verified_bytes: typing.Optional[BytesIO] = None
|
||||||
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
|
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
|
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
|
||||||
|
@ -259,9 +277,11 @@ class BlobBuffer(AbstractBlob):
|
||||||
self.verified.clear()
|
self.verified.clear()
|
||||||
|
|
||||||
def _write_blob(self, blob_bytes: bytes):
|
def _write_blob(self, blob_bytes: bytes):
|
||||||
if self._verified_bytes:
|
async def write():
|
||||||
raise OSError("already have bytes for blob")
|
if self._verified_bytes:
|
||||||
self._verified_bytes = BytesIO(blob_bytes)
|
raise OSError("already have bytes for blob")
|
||||||
|
self._verified_bytes = BytesIO(blob_bytes)
|
||||||
|
return self.loop.create_task(write())
|
||||||
|
|
||||||
def delete(self):
|
def delete(self):
|
||||||
if self._verified_bytes:
|
if self._verified_bytes:
|
||||||
|
@ -279,10 +299,12 @@ class BlobFile(AbstractBlob):
|
||||||
"""
|
"""
|
||||||
A blob existing on the local file system
|
A blob existing on the local file system
|
||||||
"""
|
"""
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
def __init__(
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
|
||||||
blob_directory: typing.Optional[str] = None):
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
|
||||||
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
|
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
|
||||||
|
):
|
||||||
|
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
|
||||||
if not blob_directory or not os.path.isdir(blob_directory):
|
if not blob_directory or not os.path.isdir(blob_directory):
|
||||||
raise OSError(f"invalid blob directory '{blob_directory}'")
|
raise OSError(f"invalid blob directory '{blob_directory}'")
|
||||||
self.file_path = os.path.join(self.blob_directory, self.blob_hash)
|
self.file_path = os.path.join(self.blob_directory, self.blob_hash)
|
||||||
|
@ -317,22 +339,28 @@ class BlobFile(AbstractBlob):
|
||||||
handle.close()
|
handle.close()
|
||||||
|
|
||||||
def _write_blob(self, blob_bytes: bytes):
|
def _write_blob(self, blob_bytes: bytes):
|
||||||
with open(self.file_path, 'wb') as f:
|
def _write_blob():
|
||||||
f.write(blob_bytes)
|
with open(self.file_path, 'wb') as f:
|
||||||
|
f.write(blob_bytes)
|
||||||
|
|
||||||
|
async def write_blob():
|
||||||
|
await self.loop.run_in_executor(None, _write_blob)
|
||||||
|
|
||||||
|
return self.loop.create_task(write_blob())
|
||||||
|
|
||||||
def delete(self):
|
def delete(self):
|
||||||
|
super().delete()
|
||||||
if os.path.isfile(self.file_path):
|
if os.path.isfile(self.file_path):
|
||||||
os.remove(self.file_path)
|
os.remove(self.file_path)
|
||||||
return super().delete()
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_from_unencrypted(
|
async def create_from_unencrypted(
|
||||||
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
|
||||||
unencrypted: bytes, blob_num: int,
|
unencrypted: bytes, blob_num: int, added_on: float, is_mine: bool,
|
||||||
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'],
|
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None
|
||||||
asyncio.Task]] = None) -> BlobInfo:
|
) -> BlobInfo:
|
||||||
if not blob_dir or not os.path.isdir(blob_dir):
|
if not blob_dir or not os.path.isdir(blob_dir):
|
||||||
raise OSError(f"cannot create blob in directory: '{blob_dir}'")
|
raise OSError(f"cannot create blob in directory: '{blob_dir}'")
|
||||||
return await super().create_from_unencrypted(
|
return await super().create_from_unencrypted(
|
||||||
loop, blob_dir, key, iv, unencrypted, blob_num, blob_completed_callback
|
loop, blob_dir, key, iv, unencrypted, blob_num, added_on, is_mine, blob_completed_callback
|
||||||
)
|
)
|
|
@ -7,13 +7,19 @@ class BlobInfo:
|
||||||
'blob_num',
|
'blob_num',
|
||||||
'length',
|
'length',
|
||||||
'iv',
|
'iv',
|
||||||
|
'added_on',
|
||||||
|
'is_mine'
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, blob_num: int, length: int, iv: str, blob_hash: typing.Optional[str] = None):
|
def __init__(
|
||||||
|
self, blob_num: int, length: int, iv: str, added_on,
|
||||||
|
blob_hash: typing.Optional[str] = None, is_mine=False):
|
||||||
self.blob_hash = blob_hash
|
self.blob_hash = blob_hash
|
||||||
self.blob_num = blob_num
|
self.blob_num = blob_num
|
||||||
self.length = length
|
self.length = length
|
||||||
self.iv = iv
|
self.iv = iv
|
||||||
|
self.added_on = added_on
|
||||||
|
self.is_mine = is_mine
|
||||||
|
|
||||||
def as_dict(self) -> typing.Dict:
|
def as_dict(self) -> typing.Dict:
|
||||||
d = {
|
d = {
|
|
@ -2,7 +2,7 @@ import os
|
||||||
import typing
|
import typing
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from lbry.utils import LRUCache
|
from lbry.utils import LRUCacheWithMetrics
|
||||||
from lbry.blob.blob_file import is_valid_blobhash, BlobFile, BlobBuffer, AbstractBlob
|
from lbry.blob.blob_file import is_valid_blobhash, BlobFile, BlobBuffer, AbstractBlob
|
||||||
from lbry.stream.descriptor import StreamDescriptor
|
from lbry.stream.descriptor import StreamDescriptor
|
||||||
from lbry.connection_manager import ConnectionManager
|
from lbry.connection_manager import ConnectionManager
|
||||||
|
@ -32,34 +32,34 @@ class BlobManager:
|
||||||
else self._node_data_store.completed_blobs
|
else self._node_data_store.completed_blobs
|
||||||
self.blobs: typing.Dict[str, AbstractBlob] = {}
|
self.blobs: typing.Dict[str, AbstractBlob] = {}
|
||||||
self.config = config
|
self.config = config
|
||||||
self.decrypted_blob_lru_cache = None if not self.config.blob_lru_cache_size else LRUCache(
|
self.decrypted_blob_lru_cache = None if not self.config.blob_lru_cache_size else LRUCacheWithMetrics(
|
||||||
self.config.blob_lru_cache_size)
|
self.config.blob_lru_cache_size)
|
||||||
self.connection_manager = ConnectionManager(loop)
|
self.connection_manager = ConnectionManager(loop)
|
||||||
|
|
||||||
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None):
|
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None, is_mine: bool = False):
|
||||||
if self.config.save_blobs or (
|
if self.config.save_blobs or (
|
||||||
is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))):
|
is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))):
|
||||||
return BlobFile(
|
return BlobFile(
|
||||||
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
|
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
|
||||||
)
|
)
|
||||||
return BlobBuffer(
|
return BlobBuffer(
|
||||||
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
|
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_blob(self, blob_hash, length: typing.Optional[int] = None):
|
def get_blob(self, blob_hash, length: typing.Optional[int] = None, is_mine: bool = False):
|
||||||
if blob_hash in self.blobs:
|
if blob_hash in self.blobs:
|
||||||
if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer):
|
if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer):
|
||||||
buffer = self.blobs.pop(blob_hash)
|
buffer = self.blobs.pop(blob_hash)
|
||||||
if blob_hash in self.completed_blob_hashes:
|
if blob_hash in self.completed_blob_hashes:
|
||||||
self.completed_blob_hashes.remove(blob_hash)
|
self.completed_blob_hashes.remove(blob_hash)
|
||||||
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
|
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
|
||||||
if buffer.is_readable():
|
if buffer.is_readable():
|
||||||
with buffer.reader_context() as reader:
|
with buffer.reader_context() as reader:
|
||||||
self.blobs[blob_hash].write_blob(reader.read())
|
self.blobs[blob_hash].write_blob(reader.read())
|
||||||
if length and self.blobs[blob_hash].length is None:
|
if length and self.blobs[blob_hash].length is None:
|
||||||
self.blobs[blob_hash].set_length(length)
|
self.blobs[blob_hash].set_length(length)
|
||||||
else:
|
else:
|
||||||
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
|
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
|
||||||
return self.blobs[blob_hash]
|
return self.blobs[blob_hash]
|
||||||
|
|
||||||
def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool:
|
def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool:
|
||||||
|
@ -83,6 +83,8 @@ class BlobManager:
|
||||||
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
|
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
|
||||||
if to_add:
|
if to_add:
|
||||||
self.completed_blob_hashes.update(to_add)
|
self.completed_blob_hashes.update(to_add)
|
||||||
|
# check blobs that aren't set as finished but were seen on disk
|
||||||
|
await self.ensure_completed_blobs_status(in_blobfiles_dir - to_add)
|
||||||
if self.config.track_bandwidth:
|
if self.config.track_bandwidth:
|
||||||
self.connection_manager.start()
|
self.connection_manager.start()
|
||||||
return True
|
return True
|
||||||
|
@ -105,13 +107,26 @@ class BlobManager:
|
||||||
if isinstance(blob, BlobFile):
|
if isinstance(blob, BlobFile):
|
||||||
if blob.blob_hash not in self.completed_blob_hashes:
|
if blob.blob_hash not in self.completed_blob_hashes:
|
||||||
self.completed_blob_hashes.add(blob.blob_hash)
|
self.completed_blob_hashes.add(blob.blob_hash)
|
||||||
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=True))
|
return self.loop.create_task(self.storage.add_blobs(
|
||||||
|
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=True)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=False))
|
return self.loop.create_task(self.storage.add_blobs(
|
||||||
|
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False)
|
||||||
|
)
|
||||||
|
|
||||||
def check_completed_blobs(self, blob_hashes: typing.List[str]) -> typing.List[str]:
|
async def ensure_completed_blobs_status(self, blob_hashes: typing.Iterable[str]):
|
||||||
"""Returns of the blobhashes_to_check, which are valid"""
|
"""Ensures that completed blobs from a given list of blob hashes are set as 'finished' in the database."""
|
||||||
return [blob_hash for blob_hash in blob_hashes if self.is_blob_verified(blob_hash)]
|
to_add = []
|
||||||
|
for blob_hash in blob_hashes:
|
||||||
|
if not self.is_blob_verified(blob_hash):
|
||||||
|
continue
|
||||||
|
blob = self.get_blob(blob_hash)
|
||||||
|
to_add.append((blob.blob_hash, blob.length, blob.added_on, blob.is_mine))
|
||||||
|
if len(to_add) > 500:
|
||||||
|
await self.storage.add_blobs(*to_add, finished=True)
|
||||||
|
to_add.clear()
|
||||||
|
return await self.storage.add_blobs(*to_add, finished=True)
|
||||||
|
|
||||||
def delete_blob(self, blob_hash: str):
|
def delete_blob(self, blob_hash: str):
|
||||||
if not is_valid_blobhash(blob_hash):
|
if not is_valid_blobhash(blob_hash):
|
77
lbry/blob/disk_space_manager.py
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DiskSpaceManager:
|
||||||
|
|
||||||
|
def __init__(self, config, db, blob_manager, cleaning_interval=30 * 60, analytics=None):
|
||||||
|
self.config = config
|
||||||
|
self.db = db
|
||||||
|
self.blob_manager = blob_manager
|
||||||
|
self.cleaning_interval = cleaning_interval
|
||||||
|
self.running = False
|
||||||
|
self.task = None
|
||||||
|
self.analytics = analytics
|
||||||
|
self._used_space_bytes = None
|
||||||
|
|
||||||
|
async def get_free_space_mb(self, is_network_blob=False):
|
||||||
|
limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
|
||||||
|
space_used_mb = await self.get_space_used_mb()
|
||||||
|
space_used_mb = space_used_mb['network_storage'] if is_network_blob else space_used_mb['content_storage']
|
||||||
|
return max(0, limit_mb - space_used_mb)
|
||||||
|
|
||||||
|
async def get_space_used_bytes(self):
|
||||||
|
self._used_space_bytes = await self.db.get_stored_blob_disk_usage()
|
||||||
|
return self._used_space_bytes
|
||||||
|
|
||||||
|
async def get_space_used_mb(self, cached=True):
|
||||||
|
cached = cached and self._used_space_bytes is not None
|
||||||
|
space_used_bytes = self._used_space_bytes if cached else await self.get_space_used_bytes()
|
||||||
|
return {key: int(value/1024.0/1024.0) for key, value in space_used_bytes.items()}
|
||||||
|
|
||||||
|
async def clean(self):
|
||||||
|
await self._clean(False)
|
||||||
|
await self._clean(True)
|
||||||
|
|
||||||
|
async def _clean(self, is_network_blob=False):
|
||||||
|
space_used_mb = await self.get_space_used_mb(cached=False)
|
||||||
|
if is_network_blob:
|
||||||
|
space_used_mb = space_used_mb['network_storage']
|
||||||
|
else:
|
||||||
|
space_used_mb = space_used_mb['content_storage'] + space_used_mb['private_storage']
|
||||||
|
storage_limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
|
||||||
|
if self.analytics:
|
||||||
|
asyncio.create_task(
|
||||||
|
self.analytics.send_disk_space_used(space_used_mb, storage_limit_mb, is_network_blob)
|
||||||
|
)
|
||||||
|
delete = []
|
||||||
|
available = storage_limit_mb - space_used_mb
|
||||||
|
if storage_limit_mb == 0 if not is_network_blob else available >= 0:
|
||||||
|
return 0
|
||||||
|
for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False, is_network_blob=is_network_blob):
|
||||||
|
delete.append(blob_hash)
|
||||||
|
available += int(file_size/1024.0/1024.0)
|
||||||
|
if available >= 0:
|
||||||
|
break
|
||||||
|
if delete:
|
||||||
|
await self.db.stop_all_files()
|
||||||
|
await self.blob_manager.delete_blobs(delete, delete_from_db=True)
|
||||||
|
self._used_space_bytes = None
|
||||||
|
return len(delete)
|
||||||
|
|
||||||
|
async def cleaning_loop(self):
|
||||||
|
while self.running:
|
||||||
|
await asyncio.sleep(self.cleaning_interval)
|
||||||
|
await self.clean()
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
self.running = True
|
||||||
|
self.task = asyncio.create_task(self.cleaning_loop())
|
||||||
|
self.task.add_done_callback(lambda _: log.info("Stopping blob cleanup service."))
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
if self.running:
|
||||||
|
self.running = False
|
||||||
|
self.task.cancel()
|
|
@ -3,7 +3,7 @@ import logging
|
||||||
import asyncio
|
import asyncio
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from lbry.error import InvalidBlobHashError, InvalidDataError
|
from lbry.error import InvalidBlobHashError, InvalidDataError
|
||||||
from lbry.cryptoutils import get_lbry_hash_obj
|
from lbry.utils import get_lbry_hash_obj
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
|
@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
||||||
self.buf = b''
|
self.buf = b''
|
||||||
|
|
||||||
# this is here to handle the race when the downloader is closed right as response_fut gets a result
|
# this is here to handle the race when the downloader is closed right as response_fut gets a result
|
||||||
self.closed = asyncio.Event(loop=self.loop)
|
self.closed = asyncio.Event()
|
||||||
|
|
||||||
def data_received(self, data: bytes):
|
def data_received(self, data: bytes):
|
||||||
if self.connection_manager:
|
if self.connection_manager:
|
||||||
|
@ -95,7 +95,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
||||||
if self._response_fut and not self._response_fut.done():
|
if self._response_fut and not self._response_fut.done():
|
||||||
self._response_fut.set_exception(err)
|
self._response_fut.set_exception(err)
|
||||||
|
|
||||||
async def _download_blob(self) -> typing.Tuple[int, Optional['BlobExchangeClientProtocol']]:
|
async def _download_blob(self) -> typing.Tuple[int, Optional['BlobExchangeClientProtocol']]: # pylint: disable=too-many-return-statements
|
||||||
"""
|
"""
|
||||||
:return: download success (bool), connected protocol (BlobExchangeClientProtocol)
|
:return: download success (bool), connected protocol (BlobExchangeClientProtocol)
|
||||||
"""
|
"""
|
||||||
|
@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
||||||
self.transport.write(msg)
|
self.transport.write(msg)
|
||||||
if self.connection_manager:
|
if self.connection_manager:
|
||||||
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
|
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
|
||||||
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop)
|
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout)
|
||||||
availability_response = response.get_availability_response()
|
availability_response = response.get_availability_response()
|
||||||
price_response = response.get_price_response()
|
price_response = response.get_price_response()
|
||||||
blob_response = response.get_blob_response()
|
blob_response = response.get_blob_response()
|
||||||
|
@ -151,7 +151,9 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
||||||
f" timeout in {self.peer_timeout}"
|
f" timeout in {self.peer_timeout}"
|
||||||
log.debug(msg)
|
log.debug(msg)
|
||||||
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
|
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
|
||||||
await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop)
|
await asyncio.wait_for(self.writer.finished, self.peer_timeout)
|
||||||
|
# wait for the io to finish
|
||||||
|
await self.blob.verified.wait()
|
||||||
log.info("%s at %fMB/s", msg,
|
log.info("%s at %fMB/s", msg,
|
||||||
round((float(self._blob_bytes_received) /
|
round((float(self._blob_bytes_received) /
|
||||||
float(time.perf_counter() - start_time)) / 1000000.0, 2))
|
float(time.perf_counter() - start_time)) / 1000000.0, 2))
|
||||||
|
@ -185,7 +187,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
||||||
try:
|
try:
|
||||||
self._blob_bytes_received = 0
|
self._blob_bytes_received = 0
|
||||||
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
|
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
|
||||||
self._response_fut = asyncio.Future(loop=self.loop)
|
self._response_fut = asyncio.Future()
|
||||||
return await self._download_blob()
|
return await self._download_blob()
|
||||||
except OSError:
|
except OSError:
|
||||||
# i'm not sure how to fix this race condition - jack
|
# i'm not sure how to fix this race condition - jack
|
||||||
|
@ -213,11 +215,11 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
||||||
self.connection_manager.connection_made(f"{self.peer_address}:{self.peer_port}")
|
self.connection_manager.connection_made(f"{self.peer_address}:{self.peer_port}")
|
||||||
log.debug("connection made to %s:%i", self.peer_address, self.peer_port)
|
log.debug("connection made to %s:%i", self.peer_address, self.peer_port)
|
||||||
|
|
||||||
def connection_lost(self, reason):
|
def connection_lost(self, exc):
|
||||||
if self.connection_manager:
|
if self.connection_manager:
|
||||||
self.connection_manager.outgoing_connection_lost(f"{self.peer_address}:{self.peer_port}")
|
self.connection_manager.outgoing_connection_lost(f"{self.peer_address}:{self.peer_port}")
|
||||||
log.debug("connection lost to %s:%i (reason: %s, %s)", self.peer_address, self.peer_port, str(reason),
|
log.debug("connection lost to %s:%i (reason: %s, %s)", self.peer_address, self.peer_port, str(exc),
|
||||||
str(type(reason)))
|
str(type(exc)))
|
||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
|
|
||||||
|
@ -242,7 +244,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract
|
||||||
try:
|
try:
|
||||||
if not connected_protocol:
|
if not connected_protocol:
|
||||||
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
|
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
|
||||||
peer_connect_timeout, loop=loop)
|
peer_connect_timeout)
|
||||||
connected_protocol = protocol
|
connected_protocol = protocol
|
||||||
if blob is None or blob.get_is_verified() or not blob.is_writeable():
|
if blob is None or blob.get_is_verified() or not blob.is_writeable():
|
||||||
# blob is None happens when we are just opening a connection
|
# blob is None happens when we are just opening a connection
|
|
@ -3,6 +3,7 @@ import typing
|
||||||
import logging
|
import logging
|
||||||
from lbry.utils import cache_concurrent
|
from lbry.utils import cache_concurrent
|
||||||
from lbry.blob_exchange.client import request_blob
|
from lbry.blob_exchange.client import request_blob
|
||||||
|
from lbry.dht.node import get_kademlia_peers_from_hosts
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.conf import Config
|
from lbry.conf import Config
|
||||||
from lbry.dht.node import Node
|
from lbry.dht.node import Node
|
||||||
|
@ -29,7 +30,7 @@ class BlobDownloader:
|
||||||
self.failures: typing.Dict['KademliaPeer', int] = {}
|
self.failures: typing.Dict['KademliaPeer', int] = {}
|
||||||
self.connection_failures: typing.Set['KademliaPeer'] = set()
|
self.connection_failures: typing.Set['KademliaPeer'] = set()
|
||||||
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
|
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
|
||||||
self.is_running = asyncio.Event(loop=self.loop)
|
self.is_running = asyncio.Event()
|
||||||
|
|
||||||
def should_race_continue(self, blob: 'AbstractBlob'):
|
def should_race_continue(self, blob: 'AbstractBlob'):
|
||||||
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
|
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
|
||||||
|
@ -63,8 +64,8 @@ class BlobDownloader:
|
||||||
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
|
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
|
||||||
|
|
||||||
async def new_peer_or_finished(self):
|
async def new_peer_or_finished(self):
|
||||||
active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)]
|
active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
|
||||||
await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED')
|
await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
|
||||||
|
|
||||||
def cleanup_active(self):
|
def cleanup_active(self):
|
||||||
if not self.active_connections and not self.connections:
|
if not self.active_connections and not self.connections:
|
||||||
|
@ -87,7 +88,6 @@ class BlobDownloader:
|
||||||
if blob.get_is_verified():
|
if blob.get_is_verified():
|
||||||
return blob
|
return blob
|
||||||
self.is_running.set()
|
self.is_running.set()
|
||||||
tried_for_this_blob: typing.Set['KademliaPeer'] = set()
|
|
||||||
try:
|
try:
|
||||||
while not blob.get_is_verified() and self.is_running.is_set():
|
while not blob.get_is_verified() and self.is_running.is_set():
|
||||||
batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
|
batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
|
||||||
|
@ -97,24 +97,15 @@ class BlobDownloader:
|
||||||
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
|
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
|
||||||
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
|
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
|
||||||
)
|
)
|
||||||
re_add: typing.Set['KademliaPeer'] = set()
|
|
||||||
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
|
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
|
||||||
if peer in self.ignored:
|
if peer in self.ignored:
|
||||||
continue
|
continue
|
||||||
if peer in tried_for_this_blob:
|
if peer in self.active_connections or not self.should_race_continue(blob):
|
||||||
continue
|
continue
|
||||||
if peer in self.active_connections:
|
|
||||||
if peer not in re_add:
|
|
||||||
re_add.add(peer)
|
|
||||||
continue
|
|
||||||
if not self.should_race_continue(blob):
|
|
||||||
break
|
|
||||||
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
|
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
|
||||||
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
|
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
|
||||||
self.active_connections[peer] = t
|
self.active_connections[peer] = t
|
||||||
tried_for_this_blob.add(peer)
|
self.peer_queue.put_nowait(list(batch))
|
||||||
if not re_add:
|
|
||||||
self.peer_queue.put_nowait(list(batch))
|
|
||||||
await self.new_peer_or_finished()
|
await self.new_peer_or_finished()
|
||||||
self.cleanup_active()
|
self.cleanup_active()
|
||||||
log.debug("downloaded %s", blob_hash[:8])
|
log.debug("downloaded %s", blob_hash[:8])
|
||||||
|
@ -133,11 +124,14 @@ class BlobDownloader:
|
||||||
protocol.close()
|
protocol.close()
|
||||||
|
|
||||||
|
|
||||||
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', node: 'Node',
|
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
|
||||||
blob_hash: str) -> 'AbstractBlob':
|
blob_hash: str) -> 'AbstractBlob':
|
||||||
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download)
|
search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
|
||||||
search_queue.put_nowait(blob_hash)
|
search_queue.put_nowait(blob_hash)
|
||||||
peer_queue, accumulate_task = node.accumulate_peers(search_queue)
|
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
|
||||||
|
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)
|
||||||
|
if fixed_peers:
|
||||||
|
loop.call_later(config.fixed_peer_delay, peer_queue.put_nowait, fixed_peers)
|
||||||
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
|
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
|
||||||
try:
|
try:
|
||||||
return await downloader.download_blob(blob_hash)
|
return await downloader.download_blob(blob_hash)
|
|
@ -46,7 +46,7 @@ class BlobAvailabilityRequest(BlobMessage):
|
||||||
|
|
||||||
def __init__(self, requested_blobs: typing.List[str], lbrycrd_address: typing.Optional[bool] = True,
|
def __init__(self, requested_blobs: typing.List[str], lbrycrd_address: typing.Optional[bool] = True,
|
||||||
**kwargs) -> None:
|
**kwargs) -> None:
|
||||||
assert len(requested_blobs)
|
assert len(requested_blobs) > 0
|
||||||
self.requested_blobs = requested_blobs
|
self.requested_blobs = requested_blobs
|
||||||
self.lbrycrd_address = lbrycrd_address
|
self.lbrycrd_address = lbrycrd_address
|
||||||
|
|
||||||
|
@ -134,9 +134,9 @@ class BlobErrorResponse(BlobMessage):
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
blob_request_types = typing.Union[BlobPriceRequest, BlobAvailabilityRequest, BlobDownloadRequest,
|
blob_request_types = typing.Union[BlobPriceRequest, BlobAvailabilityRequest, BlobDownloadRequest, # pylint: disable=invalid-name
|
||||||
BlobPaymentAddressRequest]
|
BlobPaymentAddressRequest]
|
||||||
blob_response_types = typing.Union[BlobPriceResponse, BlobAvailabilityResponse, BlobDownloadResponse,
|
blob_response_types = typing.Union[BlobPriceResponse, BlobAvailabilityResponse, BlobDownloadResponse, # pylint: disable=invalid-name
|
||||||
BlobErrorResponse, BlobPaymentAddressResponse]
|
BlobErrorResponse, BlobPaymentAddressResponse]
|
||||||
|
|
||||||
|
|
||||||
|
@ -157,10 +157,10 @@ def _parse_blob_response(response_msg: bytes) -> typing.Tuple[typing.Optional[ty
|
||||||
except ValueError:
|
except ValueError:
|
||||||
continue
|
continue
|
||||||
possible_response_keys = {
|
possible_response_keys = {
|
||||||
BlobPaymentAddressResponse.key,
|
BlobPaymentAddressResponse.key,
|
||||||
BlobAvailabilityResponse.key,
|
BlobAvailabilityResponse.key,
|
||||||
BlobPriceResponse.key,
|
BlobPriceResponse.key,
|
||||||
BlobDownloadResponse.key
|
BlobDownloadResponse.key
|
||||||
}
|
}
|
||||||
if isinstance(response, dict) and response.keys():
|
if isinstance(response, dict) and response.keys():
|
||||||
if set(response.keys()).issubset(possible_response_keys):
|
if set(response.keys()).issubset(possible_response_keys):
|
||||||
|
@ -179,7 +179,7 @@ class BlobRequest:
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _get_request(self, request_type: blob_request_types):
|
def _get_request(self, request_type: blob_request_types):
|
||||||
request = tuple(filter(lambda r: type(r) == request_type, self.requests))
|
request = tuple(filter(lambda r: type(r) == request_type, self.requests)) # pylint: disable=unidiomatic-typecheck
|
||||||
if request:
|
if request:
|
||||||
return request[0]
|
return request[0]
|
||||||
|
|
||||||
|
@ -235,7 +235,7 @@ class BlobResponse:
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _get_response(self, response_type: blob_response_types):
|
def _get_response(self, response_type: blob_response_types):
|
||||||
response = tuple(filter(lambda r: type(r) == response_type, self.responses))
|
response = tuple(filter(lambda r: type(r) == response_type, self.responses)) # pylint: disable=unidiomatic-typecheck
|
||||||
if response:
|
if response:
|
||||||
return response[0]
|
return response[0]
|
||||||
|
|
||||||
|
@ -280,4 +280,3 @@ class BlobResponse:
|
||||||
if response_type.key in response
|
if response_type.key in response
|
||||||
])
|
])
|
||||||
return cls(requests, extra)
|
return cls(requests, extra)
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import asyncio
|
import asyncio
|
||||||
import binascii
|
import binascii
|
||||||
import logging
|
import logging
|
||||||
|
import socket
|
||||||
import typing
|
import typing
|
||||||
from json.decoder import JSONDecodeError
|
from json.decoder import JSONDecodeError
|
||||||
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
|
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
|
||||||
|
@ -24,19 +25,19 @@ class BlobServerProtocol(asyncio.Protocol):
|
||||||
self.idle_timeout = idle_timeout
|
self.idle_timeout = idle_timeout
|
||||||
self.transfer_timeout = transfer_timeout
|
self.transfer_timeout = transfer_timeout
|
||||||
self.server_task: typing.Optional[asyncio.Task] = None
|
self.server_task: typing.Optional[asyncio.Task] = None
|
||||||
self.started_listening = asyncio.Event(loop=self.loop)
|
self.started_listening = asyncio.Event()
|
||||||
self.buf = b''
|
self.buf = b''
|
||||||
self.transport: typing.Optional[asyncio.Transport] = None
|
self.transport: typing.Optional[asyncio.Transport] = None
|
||||||
self.lbrycrd_address = lbrycrd_address
|
self.lbrycrd_address = lbrycrd_address
|
||||||
self.peer_address_and_port: typing.Optional[str] = None
|
self.peer_address_and_port: typing.Optional[str] = None
|
||||||
self.started_transfer = asyncio.Event(loop=self.loop)
|
self.started_transfer = asyncio.Event()
|
||||||
self.transfer_finished = asyncio.Event(loop=self.loop)
|
self.transfer_finished = asyncio.Event()
|
||||||
self.close_on_idle_task: typing.Optional[asyncio.Task] = None
|
self.close_on_idle_task: typing.Optional[asyncio.Task] = None
|
||||||
|
|
||||||
async def close_on_idle(self):
|
async def close_on_idle(self):
|
||||||
while self.transport:
|
while self.transport:
|
||||||
try:
|
try:
|
||||||
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop)
|
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
log.debug("closing idle connection from %s", self.peer_address_and_port)
|
log.debug("closing idle connection from %s", self.peer_address_and_port)
|
||||||
return self.close()
|
return self.close()
|
||||||
|
@ -96,27 +97,30 @@ class BlobServerProtocol(asyncio.Protocol):
|
||||||
incoming_blob = {'blob_hash': blob.blob_hash, 'length': blob.length}
|
incoming_blob = {'blob_hash': blob.blob_hash, 'length': blob.length}
|
||||||
responses.append(BlobDownloadResponse(incoming_blob=incoming_blob))
|
responses.append(BlobDownloadResponse(incoming_blob=incoming_blob))
|
||||||
self.send_response(responses)
|
self.send_response(responses)
|
||||||
bh = blob.blob_hash[:8]
|
blob_hash = blob.blob_hash[:8]
|
||||||
log.debug("send %s to %s:%i", bh, peer_address, peer_port)
|
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
|
||||||
self.started_transfer.set()
|
self.started_transfer.set()
|
||||||
try:
|
try:
|
||||||
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop)
|
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
|
||||||
if sent and sent > 0:
|
if sent and sent > 0:
|
||||||
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
|
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
|
||||||
log.info("sent %s (%i bytes) to %s:%i", bh, sent, peer_address, peer_port)
|
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
|
||||||
else:
|
else:
|
||||||
log.debug("stopped sending %s to %s:%i", bh, peer_address, peer_port)
|
self.close()
|
||||||
except (OSError, asyncio.TimeoutError) as err:
|
log.debug("stopped sending %s to %s:%i", blob_hash, peer_address, peer_port)
|
||||||
|
return
|
||||||
|
except (OSError, ValueError, asyncio.TimeoutError) as err:
|
||||||
if isinstance(err, asyncio.TimeoutError):
|
if isinstance(err, asyncio.TimeoutError):
|
||||||
log.debug("timed out sending blob %s to %s", bh, peer_address)
|
log.debug("timed out sending blob %s to %s", blob_hash, peer_address)
|
||||||
else:
|
else:
|
||||||
log.warning("could not read blob %s to send %s:%i", bh, peer_address, peer_port)
|
log.warning("could not read blob %s to send %s:%i", blob_hash, peer_address, peer_port)
|
||||||
self.close()
|
self.close()
|
||||||
|
return
|
||||||
finally:
|
finally:
|
||||||
self.transfer_finished.set()
|
self.transfer_finished.set()
|
||||||
else:
|
else:
|
||||||
log.info("don't have %s to send %s:%i", blob.blob_hash[:8], peer_address, peer_port)
|
log.info("don't have %s to send %s:%i", blob.blob_hash[:8], peer_address, peer_port)
|
||||||
if responses:
|
if responses and not self.transport.is_closing():
|
||||||
self.send_response(responses)
|
self.send_response(responses)
|
||||||
|
|
||||||
def data_received(self, data):
|
def data_received(self, data):
|
||||||
|
@ -127,14 +131,14 @@ class BlobServerProtocol(asyncio.Protocol):
|
||||||
return
|
return
|
||||||
if data:
|
if data:
|
||||||
self.blob_manager.connection_manager.received_data(self.peer_address_and_port, len(data))
|
self.blob_manager.connection_manager.received_data(self.peer_address_and_port, len(data))
|
||||||
message, separator, remainder = data.rpartition(b'}')
|
_, separator, remainder = data.rpartition(b'}')
|
||||||
if not separator:
|
if not separator:
|
||||||
self.buf += data
|
self.buf += data
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
request = BlobRequest.deserialize(self.buf + data)
|
request = BlobRequest.deserialize(self.buf + data)
|
||||||
self.buf = remainder
|
self.buf = remainder
|
||||||
except JSONDecodeError:
|
except (UnicodeDecodeError, JSONDecodeError):
|
||||||
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
|
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
|
||||||
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
|
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
|
||||||
self.close()
|
self.close()
|
||||||
|
@ -153,7 +157,7 @@ class BlobServer:
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.blob_manager = blob_manager
|
self.blob_manager = blob_manager
|
||||||
self.server_task: typing.Optional[asyncio.Task] = None
|
self.server_task: typing.Optional[asyncio.Task] = None
|
||||||
self.started_listening = asyncio.Event(loop=self.loop)
|
self.started_listening = asyncio.Event()
|
||||||
self.lbrycrd_address = lbrycrd_address
|
self.lbrycrd_address = lbrycrd_address
|
||||||
self.idle_timeout = idle_timeout
|
self.idle_timeout = idle_timeout
|
||||||
self.transfer_timeout = transfer_timeout
|
self.transfer_timeout = transfer_timeout
|
||||||
|
@ -164,6 +168,13 @@ class BlobServer:
|
||||||
raise Exception("already running")
|
raise Exception("already running")
|
||||||
|
|
||||||
async def _start_server():
|
async def _start_server():
|
||||||
|
# checking if the port is in use
|
||||||
|
# thx https://stackoverflow.com/a/52872579
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
if s.connect_ex(('localhost', port)) == 0:
|
||||||
|
# the port is already in use!
|
||||||
|
log.error("Failed to bind TCP %s:%d", interface, port)
|
||||||
|
|
||||||
server = await self.loop.create_server(
|
server = await self.loop.create_server(
|
||||||
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
|
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
|
||||||
self.idle_timeout, self.transfer_timeout),
|
self.idle_timeout, self.transfer_timeout),
|
|
@ -1,3 +1,4 @@
|
||||||
# don't touch this. CI server changes this during build/deployment
|
# don't touch this. CI server changes this during build/deployment
|
||||||
BUILD = "dev"
|
BUILD = "dev"
|
||||||
BUILD_COMMIT = "source installation"
|
COMMIT_HASH = "none"
|
||||||
|
DOCKER_TAG = "none"
|
|
@ -1,21 +1,21 @@
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import typing
|
|
||||||
import logging
|
import logging
|
||||||
import yaml
|
from typing import List, Dict, Tuple, Union, TypeVar, Generic, Optional
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from appdirs import user_data_dir, user_config_dir
|
from appdirs import user_data_dir, user_config_dir
|
||||||
|
import yaml
|
||||||
from lbry.error import InvalidCurrencyError
|
from lbry.error import InvalidCurrencyError
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
from torba.client.coinselection import STRATEGIES
|
from lbry.wallet.coinselection import STRATEGIES
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
NOT_SET = type('NOT_SET', (object,), {})
|
NOT_SET = type('NOT_SET', (object,), {}) # pylint: disable=invalid-name
|
||||||
T = typing.TypeVar('T')
|
T = TypeVar('T')
|
||||||
|
|
||||||
CURRENCIES = {
|
CURRENCIES = {
|
||||||
'BTC': {'type': 'crypto'},
|
'BTC': {'type': 'crypto'},
|
||||||
|
@ -24,18 +24,18 @@ CURRENCIES = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class Setting(typing.Generic[T]):
|
class Setting(Generic[T]):
|
||||||
|
|
||||||
def __init__(self, doc: str, default: typing.Optional[T] = None,
|
def __init__(self, doc: str, default: Optional[T] = None,
|
||||||
previous_names: typing.Optional[typing.List[str]] = None,
|
previous_names: Optional[List[str]] = None,
|
||||||
metavar: typing.Optional[str] = None):
|
metavar: Optional[str] = None):
|
||||||
self.doc = doc
|
self.doc = doc
|
||||||
self.default = default
|
self.default = default
|
||||||
self.previous_names = previous_names or []
|
self.previous_names = previous_names or []
|
||||||
self.metavar = metavar
|
self.metavar = metavar
|
||||||
|
|
||||||
def __set_name__(self, owner, name):
|
def __set_name__(self, owner, name):
|
||||||
self.name = name
|
self.name = name # pylint: disable=attribute-defined-outside-init
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def cli_name(self):
|
def cli_name(self):
|
||||||
|
@ -45,7 +45,7 @@ class Setting(typing.Generic[T]):
|
||||||
def no_cli_name(self):
|
def no_cli_name(self):
|
||||||
return f"--no-{self.name.replace('_', '-')}"
|
return f"--no-{self.name.replace('_', '-')}"
|
||||||
|
|
||||||
def __get__(self, obj: typing.Optional['BaseConfig'], owner) -> T:
|
def __get__(self, obj: Optional['BaseConfig'], owner) -> T:
|
||||||
if obj is None:
|
if obj is None:
|
||||||
return self
|
return self
|
||||||
for location in obj.search_order:
|
for location in obj.search_order:
|
||||||
|
@ -53,7 +53,7 @@ class Setting(typing.Generic[T]):
|
||||||
return location[self.name]
|
return location[self.name]
|
||||||
return self.default
|
return self.default
|
||||||
|
|
||||||
def __set__(self, obj: 'BaseConfig', val: typing.Union[T, NOT_SET]):
|
def __set__(self, obj: 'BaseConfig', val: Union[T, NOT_SET]):
|
||||||
if val == NOT_SET:
|
if val == NOT_SET:
|
||||||
for location in obj.modify_order:
|
for location in obj.modify_order:
|
||||||
if self.name in location:
|
if self.name in location:
|
||||||
|
@ -63,13 +63,25 @@ class Setting(typing.Generic[T]):
|
||||||
for location in obj.modify_order:
|
for location in obj.modify_order:
|
||||||
location[self.name] = val
|
location[self.name] = val
|
||||||
|
|
||||||
def validate(self, val):
|
def is_set(self, obj: 'BaseConfig') -> bool:
|
||||||
|
for location in obj.search_order:
|
||||||
|
if self.name in location:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def is_set_to_default(self, obj: 'BaseConfig') -> bool:
|
||||||
|
for location in obj.search_order:
|
||||||
|
if self.name in location:
|
||||||
|
return location[self.name] == self.default
|
||||||
|
return False
|
||||||
|
|
||||||
|
def validate(self, value):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def deserialize(self, value):
|
def deserialize(self, value): # pylint: disable=no-self-use
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def serialize(self, value):
|
def serialize(self, value): # pylint: disable=no-self-use
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def contribute_to_argparse(self, parser: ArgumentParser):
|
def contribute_to_argparse(self, parser: ArgumentParser):
|
||||||
|
@ -82,14 +94,18 @@ class Setting(typing.Generic[T]):
|
||||||
|
|
||||||
|
|
||||||
class String(Setting[str]):
|
class String(Setting[str]):
|
||||||
def validate(self, val):
|
def validate(self, value):
|
||||||
assert isinstance(val, str), \
|
assert isinstance(value, str), \
|
||||||
f"Setting '{self.name}' must be a string."
|
f"Setting '{self.name}' must be a string."
|
||||||
|
|
||||||
|
# TODO: removes this after pylint starts to understand generics
|
||||||
|
def __get__(self, obj: Optional['BaseConfig'], owner) -> str: # pylint: disable=useless-super-delegation
|
||||||
|
return super().__get__(obj, owner)
|
||||||
|
|
||||||
|
|
||||||
class Integer(Setting[int]):
|
class Integer(Setting[int]):
|
||||||
def validate(self, val):
|
def validate(self, value):
|
||||||
assert isinstance(val, int), \
|
assert isinstance(value, int), \
|
||||||
f"Setting '{self.name}' must be an integer."
|
f"Setting '{self.name}' must be an integer."
|
||||||
|
|
||||||
def deserialize(self, value):
|
def deserialize(self, value):
|
||||||
|
@ -97,8 +113,8 @@ class Integer(Setting[int]):
|
||||||
|
|
||||||
|
|
||||||
class Float(Setting[float]):
|
class Float(Setting[float]):
|
||||||
def validate(self, val):
|
def validate(self, value):
|
||||||
assert isinstance(val, float), \
|
assert isinstance(value, float), \
|
||||||
f"Setting '{self.name}' must be a decimal."
|
f"Setting '{self.name}' must be a decimal."
|
||||||
|
|
||||||
def deserialize(self, value):
|
def deserialize(self, value):
|
||||||
|
@ -106,8 +122,8 @@ class Float(Setting[float]):
|
||||||
|
|
||||||
|
|
||||||
class Toggle(Setting[bool]):
|
class Toggle(Setting[bool]):
|
||||||
def validate(self, val):
|
def validate(self, value):
|
||||||
assert isinstance(val, bool), \
|
assert isinstance(value, bool), \
|
||||||
f"Setting '{self.name}' must be a true/false value."
|
f"Setting '{self.name}' must be a true/false value."
|
||||||
|
|
||||||
def contribute_to_argparse(self, parser: ArgumentParser):
|
def contribute_to_argparse(self, parser: ArgumentParser):
|
||||||
|
@ -127,10 +143,10 @@ class Toggle(Setting[bool]):
|
||||||
|
|
||||||
|
|
||||||
class Path(String):
|
class Path(String):
|
||||||
def __init__(self, doc: str, default: str = '', *args, **kwargs):
|
def __init__(self, doc: str, *args, default: str = '', **kwargs):
|
||||||
super().__init__(doc, default, *args, **kwargs)
|
super().__init__(doc, default, *args, **kwargs)
|
||||||
|
|
||||||
def __get__(self, obj, owner):
|
def __get__(self, obj, owner) -> str:
|
||||||
value = super().__get__(obj, owner)
|
value = super().__get__(obj, owner)
|
||||||
if isinstance(value, str):
|
if isinstance(value, str):
|
||||||
return os.path.expanduser(os.path.expandvars(value))
|
return os.path.expanduser(os.path.expandvars(value))
|
||||||
|
@ -187,7 +203,7 @@ class MaxKeyFee(Setting[dict]):
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
self.no_cli_name,
|
self.no_cli_name,
|
||||||
help=f"Disable maximum key fee check.",
|
help="Disable maximum key fee check.",
|
||||||
dest=self.name,
|
dest=self.name,
|
||||||
const=None,
|
const=None,
|
||||||
action="store_const",
|
action="store_const",
|
||||||
|
@ -196,7 +212,7 @@ class MaxKeyFee(Setting[dict]):
|
||||||
|
|
||||||
|
|
||||||
class StringChoice(String):
|
class StringChoice(String):
|
||||||
def __init__(self, doc: str, valid_values: typing.List[str], default: str, *args, **kwargs):
|
def __init__(self, doc: str, valid_values: List[str], default: str, *args, **kwargs):
|
||||||
super().__init__(doc, default, *args, **kwargs)
|
super().__init__(doc, default, *args, **kwargs)
|
||||||
if not valid_values:
|
if not valid_values:
|
||||||
raise ValueError("No valid values provided")
|
raise ValueError("No valid values provided")
|
||||||
|
@ -204,16 +220,16 @@ class StringChoice(String):
|
||||||
raise ValueError(f"Default value must be one of: {', '.join(valid_values)}")
|
raise ValueError(f"Default value must be one of: {', '.join(valid_values)}")
|
||||||
self.valid_values = valid_values
|
self.valid_values = valid_values
|
||||||
|
|
||||||
def validate(self, val):
|
def validate(self, value):
|
||||||
super().validate(val)
|
super().validate(value)
|
||||||
if val not in self.valid_values:
|
if value not in self.valid_values:
|
||||||
raise ValueError(f"Setting '{self.name}' value must be one of: {', '.join(self.valid_values)}")
|
raise ValueError(f"Setting '{self.name}' value must be one of: {', '.join(self.valid_values)}")
|
||||||
|
|
||||||
|
|
||||||
class ListSetting(Setting[list]):
|
class ListSetting(Setting[list]):
|
||||||
|
|
||||||
def validate(self, val):
|
def validate(self, value):
|
||||||
assert isinstance(val, (tuple, list)), \
|
assert isinstance(value, (tuple, list)), \
|
||||||
f"Setting '{self.name}' must be a tuple or list."
|
f"Setting '{self.name}' must be a tuple or list."
|
||||||
|
|
||||||
def contribute_to_argparse(self, parser: ArgumentParser):
|
def contribute_to_argparse(self, parser: ArgumentParser):
|
||||||
|
@ -226,10 +242,10 @@ class ListSetting(Setting[list]):
|
||||||
|
|
||||||
class Servers(ListSetting):
|
class Servers(ListSetting):
|
||||||
|
|
||||||
def validate(self, val):
|
def validate(self, value):
|
||||||
assert isinstance(val, (tuple, list)), \
|
assert isinstance(value, (tuple, list)), \
|
||||||
f"Setting '{self.name}' must be a tuple or list of servers."
|
f"Setting '{self.name}' must be a tuple or list of servers."
|
||||||
for idx, server in enumerate(val):
|
for idx, server in enumerate(value):
|
||||||
assert isinstance(server, (tuple, list)) and len(server) == 2, \
|
assert isinstance(server, (tuple, list)) and len(server) == 2, \
|
||||||
f"Server defined '{server}' at index {idx} in setting " \
|
f"Server defined '{server}' at index {idx} in setting " \
|
||||||
f"'{self.name}' must be a tuple or list of two items."
|
f"'{self.name}' must be a tuple or list of two items."
|
||||||
|
@ -260,26 +276,104 @@ class Servers(ListSetting):
|
||||||
|
|
||||||
class Strings(ListSetting):
|
class Strings(ListSetting):
|
||||||
|
|
||||||
def validate(self, val):
|
def validate(self, value):
|
||||||
assert isinstance(val, (tuple, list)), \
|
assert isinstance(value, (tuple, list)), \
|
||||||
f"Setting '{self.name}' must be a tuple or list of strings."
|
f"Setting '{self.name}' must be a tuple or list of strings."
|
||||||
for idx, string in enumerate(val):
|
for idx, string in enumerate(value):
|
||||||
assert isinstance(string, str), \
|
assert isinstance(string, str), \
|
||||||
f"Value of '{string}' at index {idx} in setting " \
|
f"Value of '{string}' at index {idx} in setting " \
|
||||||
f"'{self.name}' must be a string."
|
f"'{self.name}' must be a string."
|
||||||
|
|
||||||
|
|
||||||
|
class KnownHubsList:
|
||||||
|
|
||||||
|
def __init__(self, config: 'Config' = None, file_name: str = 'known_hubs.yml'):
|
||||||
|
self.file_name = file_name
|
||||||
|
self.path = os.path.join(config.wallet_dir, self.file_name) if config else None
|
||||||
|
self.hubs: Dict[Tuple[str, int], Dict] = {}
|
||||||
|
if self.exists:
|
||||||
|
self.load()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exists(self):
|
||||||
|
return self.path and os.path.exists(self.path)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def serialized(self) -> Dict[str, Dict]:
|
||||||
|
return {f"{host}:{port}": details for (host, port), details in self.hubs.items()}
|
||||||
|
|
||||||
|
def filter(self, match_none=False, **kwargs):
|
||||||
|
if not kwargs:
|
||||||
|
return self.hubs
|
||||||
|
result = {}
|
||||||
|
for hub, details in self.hubs.items():
|
||||||
|
for key, constraint in kwargs.items():
|
||||||
|
value = details.get(key)
|
||||||
|
if value == constraint or (match_none and value is None):
|
||||||
|
result[hub] = details
|
||||||
|
break
|
||||||
|
return result
|
||||||
|
|
||||||
|
def load(self):
|
||||||
|
if self.path:
|
||||||
|
with open(self.path, 'r') as known_hubs_file:
|
||||||
|
raw = known_hubs_file.read()
|
||||||
|
for hub, details in yaml.safe_load(raw).items():
|
||||||
|
self.set(hub, details)
|
||||||
|
|
||||||
|
def save(self):
|
||||||
|
if self.path:
|
||||||
|
with open(self.path, 'w') as known_hubs_file:
|
||||||
|
known_hubs_file.write(yaml.safe_dump(self.serialized, default_flow_style=False))
|
||||||
|
|
||||||
|
def set(self, hub: str, details: Dict):
|
||||||
|
if hub and hub.count(':') == 1:
|
||||||
|
host, port = hub.split(':')
|
||||||
|
hub_parts = (host, int(port))
|
||||||
|
if hub_parts not in self.hubs:
|
||||||
|
self.hubs[hub_parts] = details
|
||||||
|
return hub
|
||||||
|
|
||||||
|
def add_hubs(self, hubs: List[str]):
|
||||||
|
added = False
|
||||||
|
for hub in hubs:
|
||||||
|
if self.set(hub, {}) is not None:
|
||||||
|
added = True
|
||||||
|
return added
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
return self.hubs.items()
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
return len(self) > 0
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return self.hubs.__len__()
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return iter(self.hubs)
|
||||||
|
|
||||||
|
|
||||||
class EnvironmentAccess:
|
class EnvironmentAccess:
|
||||||
PREFIX = 'LBRY_'
|
PREFIX = 'LBRY_'
|
||||||
|
|
||||||
def __init__(self, environ: dict):
|
def __init__(self, config: 'BaseConfig', environ: dict):
|
||||||
self.environ = environ
|
self.configuration = config
|
||||||
|
self.data = {}
|
||||||
|
if environ:
|
||||||
|
self.load(environ)
|
||||||
|
|
||||||
|
def load(self, environ):
|
||||||
|
for setting in self.configuration.get_settings():
|
||||||
|
value = environ.get(f'{self.PREFIX}{setting.name.upper()}', NOT_SET)
|
||||||
|
if value != NOT_SET and not (isinstance(setting, ListSetting) and value is None):
|
||||||
|
self.data[setting.name] = setting.deserialize(value)
|
||||||
|
|
||||||
def __contains__(self, item: str):
|
def __contains__(self, item: str):
|
||||||
return f'{self.PREFIX}{item.upper()}' in self.environ
|
return item in self.data
|
||||||
|
|
||||||
def __getitem__(self, item: str):
|
def __getitem__(self, item: str):
|
||||||
return self.environ[f'{self.PREFIX}{item.upper()}']
|
return self.data[item]
|
||||||
|
|
||||||
|
|
||||||
class ArgumentAccess:
|
class ArgumentAccess:
|
||||||
|
@ -320,7 +414,7 @@ class ConfigFileAccess:
|
||||||
cls = type(self.configuration)
|
cls = type(self.configuration)
|
||||||
with open(self.path, 'r') as config_file:
|
with open(self.path, 'r') as config_file:
|
||||||
raw = config_file.read()
|
raw = config_file.read()
|
||||||
serialized = yaml.load(raw) or {}
|
serialized = yaml.safe_load(raw) or {}
|
||||||
for key, value in serialized.items():
|
for key, value in serialized.items():
|
||||||
attr = getattr(cls, key, None)
|
attr = getattr(cls, key, None)
|
||||||
if attr is None:
|
if attr is None:
|
||||||
|
@ -364,7 +458,7 @@ class ConfigFileAccess:
|
||||||
del self.data[key]
|
del self.data[key]
|
||||||
|
|
||||||
|
|
||||||
TBC = typing.TypeVar('TBC', bound='BaseConfig')
|
TBC = TypeVar('TBC', bound='BaseConfig')
|
||||||
|
|
||||||
|
|
||||||
class BaseConfig:
|
class BaseConfig:
|
||||||
|
@ -438,7 +532,7 @@ class BaseConfig:
|
||||||
self.arguments = ArgumentAccess(self, args)
|
self.arguments = ArgumentAccess(self, args)
|
||||||
|
|
||||||
def set_environment(self, environ=None):
|
def set_environment(self, environ=None):
|
||||||
self.environment = EnvironmentAccess(environ or os.environ)
|
self.environment = EnvironmentAccess(self, environ or os.environ)
|
||||||
|
|
||||||
def set_persisted(self, config_file_path=None):
|
def set_persisted(self, config_file_path=None):
|
||||||
if config_file_path is None:
|
if config_file_path is None:
|
||||||
|
@ -457,7 +551,27 @@ class BaseConfig:
|
||||||
self.persisted.save()
|
self.persisted.save()
|
||||||
|
|
||||||
|
|
||||||
class CLIConfig(BaseConfig):
|
class TranscodeConfig(BaseConfig):
|
||||||
|
|
||||||
|
ffmpeg_path = String('A list of places to check for ffmpeg and ffprobe. '
|
||||||
|
f'$data_dir/ffmpeg/bin and $PATH are checked afterward. Separator: {os.pathsep}',
|
||||||
|
'', previous_names=['ffmpeg_folder'])
|
||||||
|
video_encoder = String('FFmpeg codec and parameters for the video encoding. '
|
||||||
|
'Example: libaom-av1 -crf 25 -b:v 0 -strict experimental',
|
||||||
|
'libx264 -crf 24 -preset faster -pix_fmt yuv420p')
|
||||||
|
video_bitrate_maximum = Integer('Maximum bits per second allowed for video streams (0 to disable).', 5_000_000)
|
||||||
|
video_scaler = String('FFmpeg scaling parameters for reducing bitrate. '
|
||||||
|
'Example: -vf "scale=-2:720,fps=24" -maxrate 5M -bufsize 3M',
|
||||||
|
r'-vf "scale=if(gte(iw\,ih)\,min(1920\,iw)\,-2):if(lt(iw\,ih)\,min(1920\,ih)\,-2)" '
|
||||||
|
r'-maxrate 5500K -bufsize 5000K')
|
||||||
|
audio_encoder = String('FFmpeg codec and parameters for the audio encoding. '
|
||||||
|
'Example: libopus -b:a 128k',
|
||||||
|
'aac -b:a 160k')
|
||||||
|
volume_filter = String('FFmpeg filter for audio normalization. Exmple: -af loudnorm', '')
|
||||||
|
volume_analysis_time = Integer('Maximum seconds into the file that we examine audio volume (0 to disable).', 240)
|
||||||
|
|
||||||
|
|
||||||
|
class CLIConfig(TranscodeConfig):
|
||||||
|
|
||||||
api = String('Host name and port for lbrynet daemon API.', 'localhost:5279', metavar='HOST:PORT')
|
api = String('Host name and port for lbrynet daemon API.', 'localhost:5279', metavar='HOST:PORT')
|
||||||
|
|
||||||
|
@ -475,6 +589,9 @@ class CLIConfig(BaseConfig):
|
||||||
|
|
||||||
|
|
||||||
class Config(CLIConfig):
|
class Config(CLIConfig):
|
||||||
|
|
||||||
|
jurisdiction = String("Limit interactions to wallet server in this jurisdiction.")
|
||||||
|
|
||||||
# directories
|
# directories
|
||||||
data_dir = Path("Directory path to store blobs.", metavar='DIR')
|
data_dir = Path("Directory path to store blobs.", metavar='DIR')
|
||||||
download_dir = Path(
|
download_dir = Path(
|
||||||
|
@ -496,7 +613,8 @@ class Config(CLIConfig):
|
||||||
"ports or have firewall rules you likely want to disable this.", True
|
"ports or have firewall rules you likely want to disable this.", True
|
||||||
)
|
)
|
||||||
udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port'])
|
udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port'])
|
||||||
tcp_port = Integer("TCP port to listen for incoming blob requests", 3333, previous_names=['peer_port'])
|
tcp_port = Integer("TCP port to listen for incoming blob requests", 4444, previous_names=['peer_port'])
|
||||||
|
prometheus_port = Integer("Port to expose prometheus metrics (off by default)", 0)
|
||||||
network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0')
|
network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0')
|
||||||
|
|
||||||
# routing table
|
# routing table
|
||||||
|
@ -504,17 +622,24 @@ class Config(CLIConfig):
|
||||||
"Routing table bucket index below which we always split the bucket if given a new key to add to it and "
|
"Routing table bucket index below which we always split the bucket if given a new key to add to it and "
|
||||||
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
|
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
|
||||||
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
|
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
|
||||||
"use.", 1
|
"use.", 2
|
||||||
|
)
|
||||||
|
is_bootstrap_node = Toggle(
|
||||||
|
"When running as a bootstrap node, disable all logic related to balancing the routing table, so we can "
|
||||||
|
"add as many peers as possible and better help first-runs.", False
|
||||||
)
|
)
|
||||||
|
|
||||||
# protocol timeouts
|
# protocol timeouts
|
||||||
download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0)
|
download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0)
|
||||||
blob_download_timeout = Float("Timeout to download a blob from a peer", 30.0)
|
blob_download_timeout = Float("Timeout to download a blob from a peer", 30.0)
|
||||||
|
hub_timeout = Float("Timeout when making a hub request", 30.0)
|
||||||
peer_connect_timeout = Float("Timeout to establish a TCP connection to a peer", 3.0)
|
peer_connect_timeout = Float("Timeout to establish a TCP connection to a peer", 3.0)
|
||||||
node_rpc_timeout = Float("Timeout when making a DHT request", constants.rpc_timeout)
|
node_rpc_timeout = Float("Timeout when making a DHT request", constants.RPC_TIMEOUT)
|
||||||
|
|
||||||
# blob announcement and download
|
# blob announcement and download
|
||||||
save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True)
|
save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True)
|
||||||
|
network_storage_limit = Integer("Disk space in MB to be allocated for helping the P2P network. 0 = disable", 0)
|
||||||
|
blob_storage_limit = Integer("Disk space in MB to be allocated for blob storage. 0 = no limit", 0)
|
||||||
blob_lru_cache_size = Integer(
|
blob_lru_cache_size = Integer(
|
||||||
"LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when "
|
"LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when "
|
||||||
"replying to a range request. Set to 0 to disable.", 32
|
"replying to a range request. Set to 0 to disable.", 32
|
||||||
|
@ -531,6 +656,7 @@ class Config(CLIConfig):
|
||||||
"Maximum number of peers to connect to while downloading a blob", 4,
|
"Maximum number of peers to connect to while downloading a blob", 4,
|
||||||
previous_names=['max_connections_per_stream']
|
previous_names=['max_connections_per_stream']
|
||||||
)
|
)
|
||||||
|
concurrent_hub_requests = Integer("Maximum number of concurrent hub requests", 32)
|
||||||
fixed_peer_delay = Float(
|
fixed_peer_delay = Float(
|
||||||
"Amount of seconds before adding the reflector servers as potential peers to download from in case dht"
|
"Amount of seconds before adding the reflector servers as potential peers to download from in case dht"
|
||||||
"peers are not found or are slow", 2.0
|
"peers are not found or are slow", 2.0
|
||||||
|
@ -539,6 +665,7 @@ class Config(CLIConfig):
|
||||||
"Don't download streams with fees exceeding this amount. When set to "
|
"Don't download streams with fees exceeding this amount. When set to "
|
||||||
"null, the amount is unbounded.", {'currency': 'USD', 'amount': 50.0}
|
"null, the amount is unbounded.", {'currency': 'USD', 'amount': 50.0}
|
||||||
)
|
)
|
||||||
|
max_wallet_server_fee = String("Maximum daily LBC amount allowed as payment for wallet servers.", "0.0")
|
||||||
|
|
||||||
# reflector settings
|
# reflector settings
|
||||||
reflect_streams = Toggle(
|
reflect_streams = Toggle(
|
||||||
|
@ -550,42 +677,62 @@ class Config(CLIConfig):
|
||||||
)
|
)
|
||||||
|
|
||||||
# servers
|
# servers
|
||||||
reflector_servers = Servers("Reflector re-hosting servers", [
|
reflector_servers = Servers("Reflector re-hosting servers for mirroring publishes", [
|
||||||
('reflector.lbry.com', 5566)
|
('reflector.lbry.com', 5566)
|
||||||
])
|
])
|
||||||
|
|
||||||
|
fixed_peers = Servers("Fixed peers to fall back to if none are found on P2P for a blob", [
|
||||||
|
('cdn.reflector.lbry.com', 5567)
|
||||||
|
])
|
||||||
|
|
||||||
|
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
|
||||||
|
('tracker.lbry.com', 9252),
|
||||||
|
('tracker.lbry.grin.io', 9252),
|
||||||
|
('tracker.lbry.pigg.es', 9252),
|
||||||
|
('tracker.lizard.technology', 9252),
|
||||||
|
('s1.lbry.network', 9252),
|
||||||
|
])
|
||||||
|
|
||||||
lbryum_servers = Servers("SPV wallet servers", [
|
lbryum_servers = Servers("SPV wallet servers", [
|
||||||
('spv1.lbry.com', 50001),
|
('spv11.lbry.com', 50001),
|
||||||
('spv2.lbry.com', 50001),
|
('spv12.lbry.com', 50001),
|
||||||
('spv3.lbry.com', 50001),
|
('spv13.lbry.com', 50001),
|
||||||
('spv4.lbry.com', 50001),
|
('spv14.lbry.com', 50001),
|
||||||
('spv5.lbry.com', 50001),
|
('spv15.lbry.com', 50001),
|
||||||
('spv6.lbry.com', 50001),
|
('spv16.lbry.com', 50001),
|
||||||
('spv7.lbry.com', 50001),
|
('spv17.lbry.com', 50001),
|
||||||
('spv8.lbry.com', 50001),
|
('spv18.lbry.com', 50001),
|
||||||
('spv9.lbry.com', 50001),
|
('spv19.lbry.com', 50001),
|
||||||
|
('hub.lbry.grin.io', 50001),
|
||||||
|
('hub.lizard.technology', 50001),
|
||||||
|
('s1.lbry.network', 50001),
|
||||||
])
|
])
|
||||||
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
|
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
|
||||||
|
('dht.lbry.grin.io', 4444), # Grin
|
||||||
|
('dht.lbry.madiator.com', 4444), # Madiator
|
||||||
|
('dht.lbry.pigg.es', 4444), # Pigges
|
||||||
('lbrynet1.lbry.com', 4444), # US EAST
|
('lbrynet1.lbry.com', 4444), # US EAST
|
||||||
('lbrynet2.lbry.com', 4444), # US WEST
|
('lbrynet2.lbry.com', 4444), # US WEST
|
||||||
('lbrynet3.lbry.com', 4444), # EU
|
('lbrynet3.lbry.com', 4444), # EU
|
||||||
('lbrynet4.lbry.com', 4444) # ASIA
|
('lbrynet4.lbry.com', 4444), # ASIA
|
||||||
|
('dht.lizard.technology', 4444), # Jack
|
||||||
|
('s2.lbry.network', 4444),
|
||||||
])
|
])
|
||||||
|
|
||||||
comment_server = String("Comment server API URL", "https://comments.lbry.com/api")
|
|
||||||
|
|
||||||
# blockchain
|
# blockchain
|
||||||
blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main')
|
blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main')
|
||||||
s3_headers_depth = Integer("download headers from s3 when the local height is more than 10 chunks behind", 96 * 10)
|
|
||||||
cache_time = Integer("Time to cache resolved claims", 150) # TODO: use this
|
|
||||||
|
|
||||||
# daemon
|
# daemon
|
||||||
save_files = Toggle("Save downloaded files when calling `get` by default", True)
|
save_files = Toggle("Save downloaded files when calling `get` by default", False)
|
||||||
components_to_skip = Strings("components which will be skipped during start-up of daemon", [])
|
components_to_skip = Strings("components which will be skipped during start-up of daemon", [])
|
||||||
share_usage_data = Toggle(
|
share_usage_data = Toggle(
|
||||||
"Whether to share usage stats and diagnostic info with LBRY.", True,
|
"Whether to share usage stats and diagnostic info with LBRY.", False,
|
||||||
previous_names=['upload_log', 'upload_log', 'share_debug_info']
|
previous_names=['upload_log', 'upload_log', 'share_debug_info']
|
||||||
)
|
)
|
||||||
track_bandwidth = Toggle("Track bandwidth usage", True)
|
track_bandwidth = Toggle("Track bandwidth usage", True)
|
||||||
|
allowed_origin = String(
|
||||||
|
"Allowed `Origin` header value for API request (sent by browser), use * to allow "
|
||||||
|
"all hosts; default is to only allow API requests with no `Origin` value.", "")
|
||||||
|
|
||||||
# media server
|
# media server
|
||||||
streaming_server = String('Host name and port to serve streaming media over range requests',
|
streaming_server = String('Host name and port to serve streaming media over range requests',
|
||||||
|
@ -595,7 +742,14 @@ class Config(CLIConfig):
|
||||||
|
|
||||||
coin_selection_strategy = StringChoice(
|
coin_selection_strategy = StringChoice(
|
||||||
"Strategy to use when selecting UTXOs for a transaction",
|
"Strategy to use when selecting UTXOs for a transaction",
|
||||||
STRATEGIES, "standard")
|
STRATEGIES, "prefer_confirmed"
|
||||||
|
)
|
||||||
|
|
||||||
|
transaction_cache_size = Integer("Transaction cache size", 2 ** 17)
|
||||||
|
save_resolved_claims = Toggle(
|
||||||
|
"Save content claims to the database when they are resolved to keep file_list up to date, "
|
||||||
|
"only disable this if file_x commands are not needed", True
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def streaming_host(self):
|
def streaming_host(self):
|
||||||
|
@ -608,6 +762,7 @@ class Config(CLIConfig):
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.set_default_paths()
|
self.set_default_paths()
|
||||||
|
self.known_hubs = KnownHubsList(self)
|
||||||
|
|
||||||
def set_default_paths(self):
|
def set_default_paths(self):
|
||||||
if 'darwin' in sys.platform.lower():
|
if 'darwin' in sys.platform.lower():
|
||||||
|
@ -629,7 +784,7 @@ class Config(CLIConfig):
|
||||||
return os.path.join(self.data_dir, 'lbrynet.log')
|
return os.path.join(self.data_dir, 'lbrynet.log')
|
||||||
|
|
||||||
|
|
||||||
def get_windows_directories() -> typing.Tuple[str, str, str]:
|
def get_windows_directories() -> Tuple[str, str, str]:
|
||||||
from lbry.winpaths import get_path, FOLDERID, UserHandle, \
|
from lbry.winpaths import get_path, FOLDERID, UserHandle, \
|
||||||
PathNotFoundException # pylint: disable=import-outside-toplevel
|
PathNotFoundException # pylint: disable=import-outside-toplevel
|
||||||
|
|
||||||
|
@ -651,19 +806,20 @@ def get_windows_directories() -> typing.Tuple[str, str, str]:
|
||||||
return data_dir, lbryum_dir, download_dir
|
return data_dir, lbryum_dir, download_dir
|
||||||
|
|
||||||
|
|
||||||
def get_darwin_directories() -> typing.Tuple[str, str, str]:
|
def get_darwin_directories() -> Tuple[str, str, str]:
|
||||||
data_dir = user_data_dir('LBRY')
|
data_dir = user_data_dir('LBRY')
|
||||||
lbryum_dir = os.path.expanduser('~/.lbryum')
|
lbryum_dir = os.path.expanduser('~/.lbryum')
|
||||||
download_dir = os.path.expanduser('~/Downloads')
|
download_dir = os.path.expanduser('~/Downloads')
|
||||||
return data_dir, lbryum_dir, download_dir
|
return data_dir, lbryum_dir, download_dir
|
||||||
|
|
||||||
|
|
||||||
def get_linux_directories() -> typing.Tuple[str, str, str]:
|
def get_linux_directories() -> Tuple[str, str, str]:
|
||||||
try:
|
try:
|
||||||
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
|
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
|
||||||
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read()).group(1)
|
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read())
|
||||||
down_dir = re.sub(r'\$HOME', os.getenv('HOME') or os.path.expanduser("~/"), down_dir)
|
if down_dir:
|
||||||
download_dir = re.sub('\"', '', down_dir)
|
down_dir = re.sub(r'\$HOME', os.getenv('HOME') or os.path.expanduser("~/"), down_dir.group(1))
|
||||||
|
download_dir = re.sub('\"', '', down_dir)
|
||||||
except OSError:
|
except OSError:
|
||||||
download_dir = os.getenv('XDG_DOWNLOAD_DIR')
|
download_dir = os.getenv('XDG_DOWNLOAD_DIR')
|
||||||
if not download_dir:
|
if not download_dir:
|
|
@ -67,18 +67,18 @@ class ConnectionManager:
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
last = time.perf_counter()
|
last = time.perf_counter()
|
||||||
await asyncio.sleep(0.1, loop=self.loop)
|
await asyncio.sleep(0.1)
|
||||||
self._status['incoming_bps'].clear()
|
self._status['incoming_bps'].clear()
|
||||||
self._status['outgoing_bps'].clear()
|
self._status['outgoing_bps'].clear()
|
||||||
now = time.perf_counter()
|
now = time.perf_counter()
|
||||||
while self.outgoing:
|
while self.outgoing:
|
||||||
k, v = self.outgoing.popitem()
|
k, sent = self.outgoing.popitem()
|
||||||
self._status['total_sent'] += v
|
self._status['total_sent'] += sent
|
||||||
self._status['outgoing_bps'][k] = v / (now - last)
|
self._status['outgoing_bps'][k] = sent / (now - last)
|
||||||
while self.incoming:
|
while self.incoming:
|
||||||
k, v = self.incoming.popitem()
|
k, received = self.incoming.popitem()
|
||||||
self._status['total_received'] += v
|
self._status['total_received'] += received
|
||||||
self._status['incoming_bps'][k] = v / (now - last)
|
self._status['incoming_bps'][k] = received / (now - last)
|
||||||
self._status['total_outgoing_mbs'] = int(sum(list(self._status['outgoing_bps'].values())
|
self._status['total_outgoing_mbs'] = int(sum(list(self._status['outgoing_bps'].values())
|
||||||
)) / 1000000.0
|
)) / 1000000.0
|
||||||
self._status['total_incoming_mbs'] = int(sum(list(self._status['incoming_bps'].values())
|
self._status['total_incoming_mbs'] = int(sum(list(self._status['incoming_bps'].values())
|
2
lbry/constants.py
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
CENT = 1000000
|
||||||
|
COIN = 100*CENT
|
86
lbry/crypto/base58.py
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
from lbry.crypto.hash import double_sha256
|
||||||
|
from lbry.crypto.util import bytes_to_int, int_to_bytes
|
||||||
|
|
||||||
|
|
||||||
|
class Base58Error(Exception):
|
||||||
|
""" Exception used for Base58 errors. """
|
||||||
|
|
||||||
|
|
||||||
|
class Base58:
|
||||||
|
""" Class providing base 58 functionality. """
|
||||||
|
|
||||||
|
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||||
|
assert len(chars) == 58
|
||||||
|
char_map = {c: n for n, c in enumerate(chars)}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def char_value(cls, c):
|
||||||
|
val = cls.char_map.get(c)
|
||||||
|
if val is None:
|
||||||
|
raise Base58Error(f'invalid base 58 character "{c}"')
|
||||||
|
return val
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def decode(cls, txt):
|
||||||
|
""" Decodes txt into a big-endian bytearray. """
|
||||||
|
if isinstance(txt, memoryview):
|
||||||
|
txt = str(txt)
|
||||||
|
|
||||||
|
if isinstance(txt, bytes):
|
||||||
|
txt = txt.decode()
|
||||||
|
|
||||||
|
if not isinstance(txt, str):
|
||||||
|
raise TypeError('a string is required')
|
||||||
|
|
||||||
|
if not txt:
|
||||||
|
raise Base58Error('string cannot be empty')
|
||||||
|
|
||||||
|
value = 0
|
||||||
|
for c in txt:
|
||||||
|
value = value * 58 + cls.char_value(c)
|
||||||
|
|
||||||
|
result = int_to_bytes(value)
|
||||||
|
|
||||||
|
# Prepend leading zero bytes if necessary
|
||||||
|
count = 0
|
||||||
|
for c in txt:
|
||||||
|
if c != '1':
|
||||||
|
break
|
||||||
|
count += 1
|
||||||
|
if count:
|
||||||
|
result = bytes((0,)) * count + result
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def encode(cls, be_bytes):
|
||||||
|
"""Converts a big-endian bytearray into a base58 string."""
|
||||||
|
value = bytes_to_int(be_bytes)
|
||||||
|
|
||||||
|
txt = ''
|
||||||
|
while value:
|
||||||
|
value, mod = divmod(value, 58)
|
||||||
|
txt += cls.chars[mod]
|
||||||
|
|
||||||
|
for byte in be_bytes:
|
||||||
|
if byte != 0:
|
||||||
|
break
|
||||||
|
txt += '1'
|
||||||
|
|
||||||
|
return txt[::-1]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def decode_check(cls, txt, hash_fn=double_sha256):
|
||||||
|
""" Decodes a Base58Check-encoded string to a payload. The version prefixes it. """
|
||||||
|
be_bytes = cls.decode(txt)
|
||||||
|
result, check = be_bytes[:-4], be_bytes[-4:]
|
||||||
|
if check != hash_fn(result)[:4]:
|
||||||
|
raise Base58Error(f'invalid base 58 checksum for {txt}')
|
||||||
|
return result
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def encode_check(cls, payload, hash_fn=double_sha256):
|
||||||
|
""" Encodes a payload bytearray (which includes the version byte(s))
|
||||||
|
into a Base58Check string."""
|
||||||
|
be_bytes = payload + hash_fn(payload)[:4]
|
||||||
|
return cls.encode(be_bytes)
|
71
lbry/crypto/crypt.py
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
import os
|
||||||
|
import base64
|
||||||
|
import typing
|
||||||
|
from cryptography.hazmat.primitives.kdf.scrypt import Scrypt
|
||||||
|
from cryptography.hazmat.primitives.ciphers import Cipher, modes
|
||||||
|
from cryptography.hazmat.primitives.ciphers.algorithms import AES
|
||||||
|
from cryptography.hazmat.primitives.padding import PKCS7
|
||||||
|
from cryptography.hazmat.backends import default_backend
|
||||||
|
|
||||||
|
from lbry.error import InvalidPasswordError
|
||||||
|
from lbry.crypto.hash import double_sha256
|
||||||
|
|
||||||
|
|
||||||
|
def aes_encrypt(secret: str, value: str, init_vector: bytes = None) -> str:
|
||||||
|
if init_vector is not None:
|
||||||
|
assert len(init_vector) == 16
|
||||||
|
else:
|
||||||
|
init_vector = os.urandom(16)
|
||||||
|
key = double_sha256(secret.encode())
|
||||||
|
encryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).encryptor()
|
||||||
|
padder = PKCS7(AES.block_size).padder()
|
||||||
|
padded_data = padder.update(value.encode()) + padder.finalize()
|
||||||
|
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
|
||||||
|
return base64.b64encode(init_vector + encrypted_data).decode()
|
||||||
|
|
||||||
|
|
||||||
|
def aes_decrypt(secret: str, value: str) -> typing.Tuple[str, bytes]:
|
||||||
|
try:
|
||||||
|
data = base64.b64decode(value.encode())
|
||||||
|
key = double_sha256(secret.encode())
|
||||||
|
init_vector, data = data[:16], data[16:]
|
||||||
|
decryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).decryptor()
|
||||||
|
unpadder = PKCS7(AES.block_size).unpadder()
|
||||||
|
result = unpadder.update(decryptor.update(data)) + unpadder.finalize()
|
||||||
|
return result.decode(), init_vector
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
raise InvalidPasswordError()
|
||||||
|
except ValueError as e:
|
||||||
|
if e.args[0] == 'Invalid padding bytes.':
|
||||||
|
raise InvalidPasswordError()
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def better_aes_encrypt(secret: str, value: bytes) -> bytes:
|
||||||
|
init_vector = os.urandom(16)
|
||||||
|
key = scrypt(secret.encode(), salt=init_vector)
|
||||||
|
encryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).encryptor()
|
||||||
|
padder = PKCS7(AES.block_size).padder()
|
||||||
|
padded_data = padder.update(value) + padder.finalize()
|
||||||
|
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
|
||||||
|
return base64.b64encode(b's:8192:16:1:' + init_vector + encrypted_data)
|
||||||
|
|
||||||
|
|
||||||
|
def better_aes_decrypt(secret: str, value: bytes) -> bytes:
|
||||||
|
try:
|
||||||
|
data = base64.b64decode(value)
|
||||||
|
_, scryp_n, scrypt_r, scrypt_p, data = data.split(b':', maxsplit=4)
|
||||||
|
init_vector, data = data[:16], data[16:]
|
||||||
|
key = scrypt(secret.encode(), init_vector, int(scryp_n), int(scrypt_r), int(scrypt_p))
|
||||||
|
decryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).decryptor()
|
||||||
|
unpadder = PKCS7(AES.block_size).unpadder()
|
||||||
|
return unpadder.update(decryptor.update(data)) + unpadder.finalize()
|
||||||
|
except ValueError as e:
|
||||||
|
if e.args[0] == 'Invalid padding bytes.':
|
||||||
|
raise InvalidPasswordError()
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def scrypt(passphrase, salt, scrypt_n=1<<13, scrypt_r=16, scrypt_p=1):
|
||||||
|
kdf = Scrypt(salt, length=32, n=scrypt_n, r=scrypt_r, p=scrypt_p, backend=default_backend())
|
||||||
|
return kdf.derive(passphrase)
|
47
lbry/crypto/hash.py
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
|
from binascii import hexlify, unhexlify
|
||||||
|
|
||||||
|
|
||||||
|
def sha256(x):
|
||||||
|
""" Simple wrapper of hashlib sha256. """
|
||||||
|
return hashlib.sha256(x).digest()
|
||||||
|
|
||||||
|
|
||||||
|
def sha512(x):
|
||||||
|
""" Simple wrapper of hashlib sha512. """
|
||||||
|
return hashlib.sha512(x).digest()
|
||||||
|
|
||||||
|
|
||||||
|
def ripemd160(x):
|
||||||
|
""" Simple wrapper of hashlib ripemd160. """
|
||||||
|
h = hashlib.new('ripemd160')
|
||||||
|
h.update(x)
|
||||||
|
return h.digest()
|
||||||
|
|
||||||
|
|
||||||
|
def double_sha256(x):
|
||||||
|
""" SHA-256 of SHA-256, as used extensively in bitcoin. """
|
||||||
|
return sha256(sha256(x))
|
||||||
|
|
||||||
|
|
||||||
|
def hmac_sha512(key, msg):
|
||||||
|
""" Use SHA-512 to provide an HMAC. """
|
||||||
|
return hmac.new(key, msg, hashlib.sha512).digest()
|
||||||
|
|
||||||
|
|
||||||
|
def hash160(x):
|
||||||
|
""" RIPEMD-160 of SHA-256.
|
||||||
|
Used to make bitcoin addresses from pubkeys. """
|
||||||
|
return ripemd160(sha256(x))
|
||||||
|
|
||||||
|
|
||||||
|
def hash_to_hex_str(x):
|
||||||
|
""" Convert a big-endian binary hash to displayed hex string.
|
||||||
|
Display form of a binary hash is reversed and converted to hex. """
|
||||||
|
return hexlify(reversed(x))
|
||||||
|
|
||||||
|
|
||||||
|
def hex_str_to_hash(x):
|
||||||
|
""" Convert a displayed hex string to a binary hash. """
|
||||||
|
return reversed(unhexlify(x))
|
13
lbry/crypto/util.py
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
from binascii import unhexlify, hexlify
|
||||||
|
|
||||||
|
|
||||||
|
def bytes_to_int(be_bytes):
|
||||||
|
""" Interprets a big-endian sequence of bytes as an integer. """
|
||||||
|
return int(hexlify(be_bytes), 16)
|
||||||
|
|
||||||
|
|
||||||
|
def int_to_bytes(value):
|
||||||
|
""" Converts an integer to a big-endian sequence of bytes. """
|
||||||
|
length = (value.bit_length() + 7) // 8
|
||||||
|
s = '%x' % value
|
||||||
|
return unhexlify(('0' * (len(s) % 2) + s).zfill(length * 2))
|
78
lbry/dht/blob_announcer.py
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
import asyncio
|
||||||
|
import typing
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from prometheus_client import Counter, Gauge
|
||||||
|
|
||||||
|
if typing.TYPE_CHECKING:
|
||||||
|
from lbry.dht.node import Node
|
||||||
|
from lbry.extras.daemon.storage import SQLiteStorage
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BlobAnnouncer:
|
||||||
|
announcements_sent_metric = Counter(
|
||||||
|
"announcements_sent", "Number of announcements sent and their respective status.", namespace="dht_node",
|
||||||
|
labelnames=("peers", "error"),
|
||||||
|
)
|
||||||
|
announcement_queue_size_metric = Gauge(
|
||||||
|
"announcement_queue_size", "Number of hashes waiting to be announced.", namespace="dht_node",
|
||||||
|
labelnames=("scope",)
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
|
||||||
|
self.loop = loop
|
||||||
|
self.node = node
|
||||||
|
self.storage = storage
|
||||||
|
self.announce_task: asyncio.Task = None
|
||||||
|
self.announce_queue: typing.List[str] = []
|
||||||
|
self._done = asyncio.Event()
|
||||||
|
self.announced = set()
|
||||||
|
|
||||||
|
async def _run_consumer(self):
|
||||||
|
while self.announce_queue:
|
||||||
|
try:
|
||||||
|
blob_hash = self.announce_queue.pop()
|
||||||
|
peers = len(await self.node.announce_blob(blob_hash))
|
||||||
|
self.announcements_sent_metric.labels(peers=peers, error=False).inc()
|
||||||
|
if peers > 4:
|
||||||
|
self.announced.add(blob_hash)
|
||||||
|
else:
|
||||||
|
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
|
||||||
|
except Exception as err:
|
||||||
|
self.announcements_sent_metric.labels(peers=0, error=True).inc()
|
||||||
|
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
|
||||||
|
|
||||||
|
async def _announce(self, batch_size: typing.Optional[int] = 10):
|
||||||
|
while batch_size:
|
||||||
|
if not self.node.joined.is_set():
|
||||||
|
await self.node.joined.wait()
|
||||||
|
await asyncio.sleep(60)
|
||||||
|
if not self.node.protocol.routing_table.get_peers():
|
||||||
|
log.warning("No peers in DHT, announce round skipped")
|
||||||
|
continue
|
||||||
|
self.announce_queue.extend(await self.storage.get_blobs_to_announce())
|
||||||
|
self.announcement_queue_size_metric.labels(scope="global").set(len(self.announce_queue))
|
||||||
|
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
|
||||||
|
while len(self.announce_queue) > 0:
|
||||||
|
log.info("%i blobs to announce", len(self.announce_queue))
|
||||||
|
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)])
|
||||||
|
announced = list(filter(None, self.announced))
|
||||||
|
if announced:
|
||||||
|
await self.storage.update_last_announced_blobs(announced)
|
||||||
|
log.info("announced %i blobs", len(announced))
|
||||||
|
self.announced.clear()
|
||||||
|
self._done.set()
|
||||||
|
self._done.clear()
|
||||||
|
|
||||||
|
def start(self, batch_size: typing.Optional[int] = 10):
|
||||||
|
assert not self.announce_task or self.announce_task.done(), "already running"
|
||||||
|
self.announce_task = self.loop.create_task(self._announce(batch_size))
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
if self.announce_task and not self.announce_task.done():
|
||||||
|
self.announce_task.cancel()
|
||||||
|
|
||||||
|
def wait(self):
|
||||||
|
return self._done.wait()
|
40
lbry/dht/constants.py
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
import hashlib
|
||||||
|
import os
|
||||||
|
|
||||||
|
HASH_CLASS = hashlib.sha384 # pylint: disable=invalid-name
|
||||||
|
HASH_LENGTH = HASH_CLASS().digest_size
|
||||||
|
HASH_BITS = HASH_LENGTH * 8
|
||||||
|
ALPHA = 5
|
||||||
|
K = 8
|
||||||
|
SPLIT_BUCKETS_UNDER_INDEX = 1
|
||||||
|
REPLACEMENT_CACHE_SIZE = 8
|
||||||
|
RPC_TIMEOUT = 5.0
|
||||||
|
RPC_ATTEMPTS = 5
|
||||||
|
RPC_ATTEMPTS_PRUNING_WINDOW = 600
|
||||||
|
ITERATIVE_LOOKUP_DELAY = RPC_TIMEOUT / 2.0 # TODO: use config val / 2 if rpc timeout is provided
|
||||||
|
REFRESH_INTERVAL = 3600 # 1 hour
|
||||||
|
REPLICATE_INTERVAL = REFRESH_INTERVAL
|
||||||
|
DATA_EXPIRATION = 86400 # 24 hours
|
||||||
|
TOKEN_SECRET_REFRESH_INTERVAL = 300 # 5 minutes
|
||||||
|
MAYBE_PING_DELAY = 300 # 5 minutes
|
||||||
|
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
|
||||||
|
RPC_ID_LENGTH = 20
|
||||||
|
PROTOCOL_VERSION = 1
|
||||||
|
MSG_SIZE_LIMIT = 1400
|
||||||
|
|
||||||
|
|
||||||
|
def digest(data: bytes) -> bytes:
|
||||||
|
h = HASH_CLASS()
|
||||||
|
h.update(data)
|
||||||
|
return h.digest()
|
||||||
|
|
||||||
|
|
||||||
|
def generate_id(num=None) -> bytes:
|
||||||
|
if num is not None:
|
||||||
|
return digest(str(num).encode())
|
||||||
|
else:
|
||||||
|
return digest(os.urandom(32))
|
||||||
|
|
||||||
|
|
||||||
|
def generate_rpc_id(num=None) -> bytes:
|
||||||
|
return generate_id(num)[:RPC_ID_LENGTH]
|
|
@ -1,9 +1,11 @@
|
||||||
import logging
|
import logging
|
||||||
import asyncio
|
import asyncio
|
||||||
import typing
|
import typing
|
||||||
import binascii
|
|
||||||
import socket
|
import socket
|
||||||
from lbry.utils import resolve_host
|
|
||||||
|
from prometheus_client import Gauge
|
||||||
|
|
||||||
|
from lbry.utils import aclosing, resolve_host
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
from lbry.dht.peer import make_kademlia_peer
|
from lbry.dht.peer import make_kademlia_peer
|
||||||
from lbry.dht.protocol.distance import Distance
|
from lbry.dht.protocol.distance import Distance
|
||||||
|
@ -18,20 +20,32 @@ log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Node:
|
class Node:
|
||||||
|
storing_peers_metric = Gauge(
|
||||||
|
"storing_peers", "Number of peers storing blobs announced to this node", namespace="dht_node",
|
||||||
|
labelnames=("scope",),
|
||||||
|
)
|
||||||
|
stored_blob_with_x_bytes_colliding = Gauge(
|
||||||
|
"stored_blobs_x_bytes_colliding", "Number of blobs with at least X bytes colliding with this node id prefix",
|
||||||
|
namespace="dht_node", labelnames=("amount",)
|
||||||
|
)
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
|
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
|
||||||
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.rpc_timeout,
|
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
|
||||||
split_buckets_under_index: int = constants.split_buckets_under_index,
|
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False,
|
||||||
storage: typing.Optional['SQLiteStorage'] = None):
|
storage: typing.Optional['SQLiteStorage'] = None):
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.internal_udp_port = internal_udp_port
|
self.internal_udp_port = internal_udp_port
|
||||||
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
|
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
|
||||||
split_buckets_under_index)
|
split_buckets_under_index, is_bootstrap_node)
|
||||||
self.listening_port: asyncio.DatagramTransport = None
|
self.listening_port: asyncio.DatagramTransport = None
|
||||||
self.joined = asyncio.Event(loop=self.loop)
|
self.joined = asyncio.Event()
|
||||||
self._join_task: asyncio.Task = None
|
self._join_task: asyncio.Task = None
|
||||||
self._refresh_task: asyncio.Task = None
|
self._refresh_task: asyncio.Task = None
|
||||||
self._storage = storage
|
self._storage = storage
|
||||||
|
|
||||||
|
@property
|
||||||
|
def stored_blob_hashes(self):
|
||||||
|
return self.protocol.data_store.keys()
|
||||||
|
|
||||||
async def refresh_node(self, force_once=False):
|
async def refresh_node(self, force_once=False):
|
||||||
while True:
|
while True:
|
||||||
# remove peers with expired blob announcements from the datastore
|
# remove peers with expired blob announcements from the datastore
|
||||||
|
@ -41,17 +55,21 @@ class Node:
|
||||||
# add all peers in the routing table
|
# add all peers in the routing table
|
||||||
total_peers.extend(self.protocol.routing_table.get_peers())
|
total_peers.extend(self.protocol.routing_table.get_peers())
|
||||||
# add all the peers who have announced blobs to us
|
# add all the peers who have announced blobs to us
|
||||||
total_peers.extend(self.protocol.data_store.get_storing_contacts())
|
storing_peers = self.protocol.data_store.get_storing_contacts()
|
||||||
|
self.storing_peers_metric.labels("global").set(len(storing_peers))
|
||||||
|
total_peers.extend(storing_peers)
|
||||||
|
|
||||||
|
counts = {0: 0, 1: 0, 2: 0}
|
||||||
|
node_id = self.protocol.node_id
|
||||||
|
for blob_hash in self.protocol.data_store.keys():
|
||||||
|
bytes_colliding = 0 if blob_hash[0] != node_id[0] else 2 if blob_hash[1] == node_id[1] else 1
|
||||||
|
counts[bytes_colliding] += 1
|
||||||
|
self.stored_blob_with_x_bytes_colliding.labels(amount=0).set(counts[0])
|
||||||
|
self.stored_blob_with_x_bytes_colliding.labels(amount=1).set(counts[1])
|
||||||
|
self.stored_blob_with_x_bytes_colliding.labels(amount=2).set(counts[2])
|
||||||
|
|
||||||
# get ids falling in the midpoint of each bucket that hasn't been recently updated
|
# get ids falling in the midpoint of each bucket that hasn't been recently updated
|
||||||
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
|
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
|
||||||
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
|
|
||||||
# populate/split the buckets further
|
|
||||||
buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts()
|
|
||||||
if buckets_with_contacts <= 3:
|
|
||||||
for i in range(buckets_with_contacts):
|
|
||||||
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
|
|
||||||
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
|
|
||||||
|
|
||||||
if self.protocol.routing_table.get_peers():
|
if self.protocol.routing_table.get_peers():
|
||||||
# if we have node ids to look up, perform the iterative search until we have k results
|
# if we have node ids to look up, perform the iterative search until we have k results
|
||||||
|
@ -61,8 +79,8 @@ class Node:
|
||||||
else:
|
else:
|
||||||
if force_once:
|
if force_once:
|
||||||
break
|
break
|
||||||
fut = asyncio.Future(loop=self.loop)
|
fut = asyncio.Future()
|
||||||
self.loop.call_later(constants.refresh_interval // 4, fut.set_result, None)
|
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
|
||||||
await fut
|
await fut
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -75,13 +93,13 @@ class Node:
|
||||||
if force_once:
|
if force_once:
|
||||||
break
|
break
|
||||||
|
|
||||||
fut = asyncio.Future(loop=self.loop)
|
fut = asyncio.Future()
|
||||||
self.loop.call_later(constants.refresh_interval, fut.set_result, None)
|
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
|
||||||
await fut
|
await fut
|
||||||
|
|
||||||
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
|
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
|
||||||
hash_value = binascii.unhexlify(blob_hash.encode())
|
hash_value = bytes.fromhex(blob_hash)
|
||||||
assert len(hash_value) == constants.hash_length
|
assert len(hash_value) == constants.HASH_LENGTH
|
||||||
peers = await self.peer_search(hash_value)
|
peers = await self.peer_search(hash_value)
|
||||||
|
|
||||||
if not self.protocol.external_ip:
|
if not self.protocol.external_ip:
|
||||||
|
@ -90,12 +108,14 @@ class Node:
|
||||||
for peer in peers:
|
for peer in peers:
|
||||||
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
|
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
|
||||||
stored_to_tup = await asyncio.gather(
|
stored_to_tup = await asyncio.gather(
|
||||||
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop
|
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers)
|
||||||
)
|
)
|
||||||
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
|
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
|
||||||
if stored_to:
|
if stored_to:
|
||||||
log.debug("Stored %s to %i of %i attempted peers", binascii.hexlify(hash_value).decode()[:8],
|
log.debug(
|
||||||
len(stored_to), len(peers))
|
"Stored %s to %i of %i attempted peers", hash_value.hex()[:8],
|
||||||
|
len(stored_to), len(peers)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
log.debug("Failed announcing %s, stored to 0 peers", blob_hash[:8])
|
log.debug("Failed announcing %s, stored to 0 peers", blob_hash[:8])
|
||||||
return stored_to
|
return stored_to
|
||||||
|
@ -162,39 +182,36 @@ class Node:
|
||||||
for address, udp_port in known_node_urls or []
|
for address, udp_port in known_node_urls or []
|
||||||
]))
|
]))
|
||||||
except socket.gaierror:
|
except socket.gaierror:
|
||||||
await asyncio.sleep(30, loop=self.loop)
|
await asyncio.sleep(30)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.protocol.peer_manager.reset()
|
self.protocol.peer_manager.reset()
|
||||||
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
|
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
|
||||||
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
|
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
|
||||||
|
|
||||||
await asyncio.sleep(1, loop=self.loop)
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
|
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
|
||||||
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
|
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
|
||||||
|
|
||||||
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
||||||
bottom_out_limit: int = constants.bottom_out_limit,
|
max_results: int = constants.K) -> IterativeNodeFinder:
|
||||||
max_results: int = constants.k) -> IterativeNodeFinder:
|
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
|
||||||
|
return IterativeNodeFinder(self.loop, self.protocol, key, max_results, shortlist)
|
||||||
return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
|
|
||||||
key, bottom_out_limit, max_results, None, shortlist)
|
|
||||||
|
|
||||||
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
||||||
bottom_out_limit: int = 40,
|
|
||||||
max_results: int = -1) -> IterativeValueFinder:
|
max_results: int = -1) -> IterativeValueFinder:
|
||||||
|
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
|
||||||
|
return IterativeValueFinder(self.loop, self.protocol, key, max_results, shortlist)
|
||||||
|
|
||||||
return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
|
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
|
||||||
key, bottom_out_limit, max_results, None, shortlist)
|
shortlist: typing.Optional[typing.List['KademliaPeer']] = None
|
||||||
|
|
||||||
async def peer_search(self, node_id: bytes, count=constants.k, max_results=constants.k*2,
|
|
||||||
bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None
|
|
||||||
) -> typing.List['KademliaPeer']:
|
) -> typing.List['KademliaPeer']:
|
||||||
peers = []
|
peers = []
|
||||||
async for iteration_peers in self.get_iterative_node_finder(
|
async with aclosing(self.get_iterative_node_finder(
|
||||||
node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results):
|
node_id, shortlist=shortlist, max_results=max_results)) as node_finder:
|
||||||
peers.extend(iteration_peers)
|
async for iteration_peers in node_finder:
|
||||||
|
peers.extend(iteration_peers)
|
||||||
distance = Distance(node_id)
|
distance = Distance(node_id)
|
||||||
peers.sort(key=lambda peer: distance(peer.node_id))
|
peers.sort(key=lambda peer: distance(peer.node_id))
|
||||||
return peers[:count]
|
return peers[:count]
|
||||||
|
@ -220,39 +237,46 @@ class Node:
|
||||||
|
|
||||||
# prioritize peers who reply to a dht ping first
|
# prioritize peers who reply to a dht ping first
|
||||||
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
|
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
|
||||||
|
async with aclosing(self.get_iterative_value_finder(bytes.fromhex(blob_hash))) as value_finder:
|
||||||
async for results in self.get_iterative_value_finder(binascii.unhexlify(blob_hash.encode())):
|
async for results in value_finder:
|
||||||
to_put = []
|
to_put = []
|
||||||
for peer in results:
|
for peer in results:
|
||||||
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
|
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
|
||||||
continue
|
continue
|
||||||
is_good = self.protocol.peer_manager.peer_is_good(peer)
|
is_good = self.protocol.peer_manager.peer_is_good(peer)
|
||||||
if is_good:
|
if is_good:
|
||||||
# the peer has replied recently over UDP, it can probably be reached on the TCP port
|
# the peer has replied recently over UDP, it can probably be reached on the TCP port
|
||||||
to_put.append(peer)
|
to_put.append(peer)
|
||||||
elif is_good is None:
|
elif is_good is None:
|
||||||
if not peer.udp_port:
|
if not peer.udp_port:
|
||||||
# TODO: use the same port for TCP and UDP
|
# TODO: use the same port for TCP and UDP
|
||||||
# the udp port must be guessed
|
# the udp port must be guessed
|
||||||
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
|
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
|
||||||
# including on a network with several nodes, then assume the udp port is proportionately
|
# including on a network with several nodes, then assume the udp port is proportionately
|
||||||
# based on a starting port of 4444
|
# based on a starting port of 4444
|
||||||
udp_port_to_try = peer.tcp_port
|
udp_port_to_try = peer.tcp_port
|
||||||
if 3400 > peer.tcp_port > 3332:
|
if 3400 > peer.tcp_port > 3332:
|
||||||
udp_port_to_try = (peer.tcp_port - 3333) + 4444
|
udp_port_to_try = (peer.tcp_port - 3333) + 4444
|
||||||
self.loop.create_task(put_into_result_queue_after_pong(
|
self.loop.create_task(put_into_result_queue_after_pong(
|
||||||
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
|
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
|
||||||
))
|
))
|
||||||
|
else:
|
||||||
|
self.loop.create_task(put_into_result_queue_after_pong(peer))
|
||||||
else:
|
else:
|
||||||
self.loop.create_task(put_into_result_queue_after_pong(peer))
|
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
|
||||||
else:
|
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
|
||||||
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
|
if to_put:
|
||||||
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
|
result_queue.put_nowait(to_put)
|
||||||
if to_put:
|
|
||||||
result_queue.put_nowait(to_put)
|
|
||||||
|
|
||||||
def accumulate_peers(self, search_queue: asyncio.Queue,
|
def accumulate_peers(self, search_queue: asyncio.Queue,
|
||||||
peer_queue: typing.Optional[asyncio.Queue] = None) -> typing.Tuple[
|
peer_queue: typing.Optional[asyncio.Queue] = None
|
||||||
asyncio.Queue, asyncio.Task]:
|
) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
|
||||||
q = peer_queue or asyncio.Queue(loop=self.loop)
|
queue = peer_queue or asyncio.Queue()
|
||||||
return q, self.loop.create_task(self._accumulate_peers_for_value(search_queue, q))
|
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))
|
||||||
|
|
||||||
|
|
||||||
|
async def get_kademlia_peers_from_hosts(peer_list: typing.List[typing.Tuple[str, int]]) -> typing.List['KademliaPeer']:
|
||||||
|
peer_address_list = [(await resolve_host(url, port, proto='tcp'), port) for url, port in peer_list]
|
||||||
|
kademlia_peer_list = [make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
|
||||||
|
for address, port in peer_address_list]
|
||||||
|
return kademlia_peer_list
|
|
@ -1,18 +1,21 @@
|
||||||
import typing
|
import typing
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import ipaddress
|
|
||||||
from binascii import hexlify
|
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
|
|
||||||
|
from prometheus_client import Gauge
|
||||||
|
|
||||||
|
from lbry.utils import is_valid_public_ipv4 as _is_valid_public_ipv4, LRUCache
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
|
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
|
||||||
|
|
||||||
|
ALLOW_LOCALHOST = False
|
||||||
|
CACHE_SIZE = 16384
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@lru_cache(1024)
|
@lru_cache(CACHE_SIZE)
|
||||||
def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str],
|
def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str],
|
||||||
udp_port: typing.Optional[int] = None,
|
udp_port: typing.Optional[int] = None,
|
||||||
tcp_port: typing.Optional[int] = None,
|
tcp_port: typing.Optional[int] = None,
|
||||||
|
@ -20,40 +23,32 @@ def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional
|
||||||
return KademliaPeer(address, node_id, udp_port, tcp_port=tcp_port, allow_localhost=allow_localhost)
|
return KademliaPeer(address, node_id, udp_port, tcp_port=tcp_port, allow_localhost=allow_localhost)
|
||||||
|
|
||||||
|
|
||||||
# the ipaddress module does not show these subnets as reserved
|
|
||||||
carrier_grade_NAT_subnet = ipaddress.ip_network('100.64.0.0/10')
|
|
||||||
ip4_to_6_relay_subnet = ipaddress.ip_network('192.88.99.0/24')
|
|
||||||
|
|
||||||
ALLOW_LOCALHOST = False
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_public_ipv4(address, allow_localhost: bool = False):
|
def is_valid_public_ipv4(address, allow_localhost: bool = False):
|
||||||
allow_localhost = bool(allow_localhost or ALLOW_LOCALHOST)
|
allow_localhost = bool(allow_localhost or ALLOW_LOCALHOST)
|
||||||
try:
|
return _is_valid_public_ipv4(address, allow_localhost)
|
||||||
parsed_ip = ipaddress.ip_address(address)
|
|
||||||
if parsed_ip.is_loopback and allow_localhost:
|
|
||||||
return True
|
|
||||||
return not any((parsed_ip.version != 4, parsed_ip.is_unspecified, parsed_ip.is_link_local,
|
|
||||||
parsed_ip.is_loopback, parsed_ip.is_multicast, parsed_ip.is_reserved, parsed_ip.is_private,
|
|
||||||
parsed_ip.is_reserved,
|
|
||||||
carrier_grade_NAT_subnet.supernet_of(ipaddress.ip_network(f"{address}/32")),
|
|
||||||
ip4_to_6_relay_subnet.supernet_of(ipaddress.ip_network(f"{address}/32"))))
|
|
||||||
except ipaddress.AddressValueError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class PeerManager:
|
class PeerManager:
|
||||||
|
peer_manager_keys_metric = Gauge(
|
||||||
|
"peer_manager_keys", "Number of keys tracked by PeerManager dicts (sum)", namespace="dht_node",
|
||||||
|
labelnames=("scope",)
|
||||||
|
)
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop):
|
def __init__(self, loop: asyncio.AbstractEventLoop):
|
||||||
self._loop = loop
|
self._loop = loop
|
||||||
self._rpc_failures: typing.Dict[
|
self._rpc_failures: typing.Dict[
|
||||||
typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]]
|
typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]]
|
||||||
] = {}
|
] = LRUCache(CACHE_SIZE)
|
||||||
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = {}
|
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
|
||||||
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = {}
|
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
|
||||||
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = {}
|
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
|
||||||
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = {}
|
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = LRUCache(CACHE_SIZE)
|
||||||
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = {}
|
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = LRUCache(CACHE_SIZE)
|
||||||
self._node_tokens: typing.Dict[bytes, (float, bytes)] = {}
|
self._node_tokens: typing.Dict[bytes, (float, bytes)] = LRUCache(CACHE_SIZE)
|
||||||
|
|
||||||
|
def count_cache_keys(self):
|
||||||
|
return len(self._rpc_failures) + len(self._last_replied) + len(self._last_sent) + len(
|
||||||
|
self._last_requested) + len(self._node_id_mapping) + len(self._node_id_reverse_mapping) + len(
|
||||||
|
self._node_tokens)
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested):
|
for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested):
|
||||||
|
@ -85,7 +80,7 @@ class PeerManager:
|
||||||
|
|
||||||
def get_node_token(self, node_id: bytes) -> typing.Optional[bytes]:
|
def get_node_token(self, node_id: bytes) -> typing.Optional[bytes]:
|
||||||
ts, token = self._node_tokens.get(node_id, (0, None))
|
ts, token = self._node_tokens.get(node_id, (0, None))
|
||||||
if ts and ts > self._loop.time() - constants.token_secret_refresh_interval:
|
if ts and ts > self._loop.time() - constants.TOKEN_SECRET_REFRESH_INTERVAL:
|
||||||
return token
|
return token
|
||||||
|
|
||||||
def get_last_replied(self, address: str, udp_port: int) -> typing.Optional[float]:
|
def get_last_replied(self, address: str, udp_port: int) -> typing.Optional[float]:
|
||||||
|
@ -103,28 +98,32 @@ class PeerManager:
|
||||||
self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id))
|
self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id))
|
||||||
self._node_id_mapping[(address, udp_port)] = node_id
|
self._node_id_mapping[(address, udp_port)] = node_id
|
||||||
self._node_id_reverse_mapping[node_id] = (address, udp_port)
|
self._node_id_reverse_mapping[node_id] = (address, udp_port)
|
||||||
|
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
|
||||||
|
|
||||||
|
def get_node_id_for_endpoint(self, address, port):
|
||||||
|
return self._node_id_mapping.get((address, port))
|
||||||
|
|
||||||
def prune(self): # TODO: periodically call this
|
def prune(self): # TODO: periodically call this
|
||||||
now = self._loop.time()
|
now = self._loop.time()
|
||||||
to_pop = []
|
to_pop = []
|
||||||
for (address, udp_port), (_, last_failure) in self._rpc_failures.items():
|
for (address, udp_port), (_, last_failure) in self._rpc_failures.items():
|
||||||
if last_failure and last_failure < now - constants.rpc_attempts_pruning_window:
|
if last_failure and last_failure < now - constants.RPC_ATTEMPTS_PRUNING_WINDOW:
|
||||||
to_pop.append((address, udp_port))
|
to_pop.append((address, udp_port))
|
||||||
while to_pop:
|
while to_pop:
|
||||||
del self._rpc_failures[to_pop.pop()]
|
del self._rpc_failures[to_pop.pop()]
|
||||||
to_pop = []
|
to_pop = []
|
||||||
for node_id, (age, token) in self._node_tokens.items():
|
for node_id, (age, token) in self._node_tokens.items(): # pylint: disable=unused-variable
|
||||||
if age < now - constants.token_secret_refresh_interval:
|
if age < now - constants.TOKEN_SECRET_REFRESH_INTERVAL:
|
||||||
to_pop.append(node_id)
|
to_pop.append(node_id)
|
||||||
while to_pop:
|
while to_pop:
|
||||||
del self._node_tokens[to_pop.pop()]
|
del self._node_tokens[to_pop.pop()]
|
||||||
|
|
||||||
def contact_triple_is_good(self, node_id: bytes, address: str, udp_port: int):
|
def contact_triple_is_good(self, node_id: bytes, address: str, udp_port: int): # pylint: disable=too-many-return-statements
|
||||||
"""
|
"""
|
||||||
:return: False if peer is bad, None if peer is unknown, or True if peer is good
|
:return: False if peer is bad, None if peer is unknown, or True if peer is good
|
||||||
"""
|
"""
|
||||||
|
|
||||||
delay = self._loop.time() - constants.check_refresh_interval
|
delay = self._loop.time() - constants.CHECK_REFRESH_INTERVAL
|
||||||
|
|
||||||
# fixme: find a way to re-enable that without breaking other parts
|
# fixme: find a way to re-enable that without breaking other parts
|
||||||
# if node_id not in self._node_id_reverse_mapping or (address, udp_port) not in self._node_id_mapping:
|
# if node_id not in self._node_id_reverse_mapping or (address, udp_port) not in self._node_id_mapping:
|
||||||
|
@ -154,9 +153,10 @@ class PeerManager:
|
||||||
def peer_is_good(self, peer: 'KademliaPeer'):
|
def peer_is_good(self, peer: 'KademliaPeer'):
|
||||||
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
|
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
|
||||||
|
|
||||||
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer':
|
|
||||||
node_id, address, tcp_port = decode_compact_address(compact_address)
|
def decode_tcp_peer_from_compact_address(compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
|
||||||
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
|
node_id, address, tcp_port = decode_compact_address(compact_address)
|
||||||
|
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
|
||||||
|
|
||||||
|
|
||||||
@dataclass(unsafe_hash=True)
|
@dataclass(unsafe_hash=True)
|
||||||
|
@ -170,12 +170,12 @@ class KademliaPeer:
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
if self._node_id is not None:
|
if self._node_id is not None:
|
||||||
if not len(self._node_id) == constants.hash_length:
|
if not len(self._node_id) == constants.HASH_LENGTH:
|
||||||
raise ValueError("invalid node_id: {}".format(hexlify(self._node_id).decode()))
|
raise ValueError("invalid node_id: {}".format(self._node_id.hex()))
|
||||||
if self.udp_port is not None and not 1 <= self.udp_port <= 65535:
|
if self.udp_port is not None and not 1024 <= self.udp_port <= 65535:
|
||||||
raise ValueError("invalid udp port")
|
raise ValueError(f"invalid udp port: {self.address}:{self.udp_port}")
|
||||||
if self.tcp_port is not None and not 1 <= self.tcp_port <= 65535:
|
if self.tcp_port is not None and not 1024 <= self.tcp_port <= 65535:
|
||||||
raise ValueError("invalid tcp port")
|
raise ValueError(f"invalid tcp port: {self.address}:{self.tcp_port}")
|
||||||
if not is_valid_public_ipv4(self.address, self.allow_localhost):
|
if not is_valid_public_ipv4(self.address, self.allow_localhost):
|
||||||
raise ValueError(f"invalid ip address: '{self.address}'")
|
raise ValueError(f"invalid ip address: '{self.address}'")
|
||||||
|
|
||||||
|
@ -194,3 +194,6 @@ class KademliaPeer:
|
||||||
|
|
||||||
def compact_ip(self):
|
def compact_ip(self):
|
||||||
return make_compact_ip(self.address)
|
return make_compact_ip(self.address)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"{self.__class__.__name__}({self.node_id.hex()[:8]}@{self.address}:{self.udp_port}-{self.tcp_port})"
|
|
@ -16,13 +16,19 @@ class DictDataStore:
|
||||||
self._peer_manager = peer_manager
|
self._peer_manager = peer_manager
|
||||||
self.completed_blobs: typing.Set[str] = set()
|
self.completed_blobs: typing.Set[str] = set()
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
return self._data_store.keys()
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return self._data_store.__len__()
|
||||||
|
|
||||||
def removed_expired_peers(self):
|
def removed_expired_peers(self):
|
||||||
now = self.loop.time()
|
now = self.loop.time()
|
||||||
keys = list(self._data_store.keys())
|
keys = list(self._data_store.keys())
|
||||||
for key in keys:
|
for key in keys:
|
||||||
to_remove = []
|
to_remove = []
|
||||||
for (peer, ts) in self._data_store[key]:
|
for (peer, ts) in self._data_store[key]:
|
||||||
if ts + constants.data_expiration < now or self._peer_manager.peer_is_good(peer) is False:
|
if ts + constants.DATA_EXPIRATION < now or self._peer_manager.peer_is_good(peer) is False:
|
||||||
to_remove.append((peer, ts))
|
to_remove.append((peer, ts))
|
||||||
for item in to_remove:
|
for item in to_remove:
|
||||||
self._data_store[key].remove(item)
|
self._data_store[key].remove(item)
|
||||||
|
@ -43,7 +49,7 @@ class DictDataStore:
|
||||||
"""
|
"""
|
||||||
now = self.loop.time()
|
now = self.loop.time()
|
||||||
for (peer, ts) in self._data_store.get(key, []):
|
for (peer, ts) in self._data_store.get(key, []):
|
||||||
if ts + constants.data_expiration > now:
|
if ts + constants.DATA_EXPIRATION > now:
|
||||||
yield peer
|
yield peer
|
||||||
|
|
||||||
def has_peers_for_blob(self, key: bytes) -> bool:
|
def has_peers_for_blob(self, key: bytes) -> bool:
|
||||||
|
@ -53,7 +59,7 @@ class DictDataStore:
|
||||||
now = self.loop.time()
|
now = self.loop.time()
|
||||||
if key in self._data_store:
|
if key in self._data_store:
|
||||||
current = list(filter(lambda x: x[0] == contact, self._data_store[key]))
|
current = list(filter(lambda x: x[0] == contact, self._data_store[key]))
|
||||||
if len(current):
|
if len(current) > 0:
|
||||||
self._data_store[key][self._data_store[key].index(current[0])] = contact, now
|
self._data_store[key][self._data_store[key].index(current[0])] = contact, now
|
||||||
else:
|
else:
|
||||||
self._data_store[key].append((contact, now))
|
self._data_store[key].append((contact, now))
|
||||||
|
@ -65,6 +71,6 @@ class DictDataStore:
|
||||||
|
|
||||||
def get_storing_contacts(self) -> typing.List['KademliaPeer']:
|
def get_storing_contacts(self) -> typing.List['KademliaPeer']:
|
||||||
peers = set()
|
peers = set()
|
||||||
for key, stored in self._data_store.items():
|
for _, stored in self._data_store.items():
|
||||||
peers.update(set(map(lambda tup: tup[0], stored)))
|
peers.update(set(map(lambda tup: tup[0], stored)))
|
||||||
return list(peers)
|
return list(peers)
|
|
@ -9,17 +9,17 @@ class Distance:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, key: bytes):
|
def __init__(self, key: bytes):
|
||||||
if len(key) != constants.hash_length:
|
if len(key) != constants.HASH_LENGTH:
|
||||||
raise ValueError(f"invalid key length: {len(key)}")
|
raise ValueError(f"invalid key length: {len(key)}")
|
||||||
self.key = key
|
self.key = key
|
||||||
self.val_key_one = int.from_bytes(key, 'big')
|
self.val_key_one = int.from_bytes(key, 'big')
|
||||||
|
|
||||||
def __call__(self, key_two: bytes) -> int:
|
def __call__(self, key_two: bytes) -> int:
|
||||||
if len(key_two) != constants.hash_length:
|
if len(key_two) != constants.HASH_LENGTH:
|
||||||
raise ValueError(f"invalid length of key to compare: {len(key_two)}")
|
raise ValueError(f"invalid length of key to compare: {len(key_two)}")
|
||||||
val_key_two = int.from_bytes(key_two, 'big')
|
val_key_two = int.from_bytes(key_two, 'big')
|
||||||
return self.val_key_one ^ val_key_two
|
return self.val_key_one ^ val_key_two
|
||||||
|
|
||||||
def is_closer(self, a: bytes, b: bytes) -> bool:
|
def is_closer(self, key_a: bytes, key_b: bytes) -> bool:
|
||||||
"""Returns true is `a` is closer to `key` than `b` is"""
|
"""Returns true is `key_a` is closer to `key` than `key_b` is"""
|
||||||
return self(a) < self(b)
|
return self(key_a) < self(key_b)
|
|
@ -1,18 +1,17 @@
|
||||||
import asyncio
|
import asyncio
|
||||||
from binascii import hexlify
|
|
||||||
from itertools import chain
|
from itertools import chain
|
||||||
from collections import defaultdict
|
from collections import defaultdict, OrderedDict
|
||||||
|
from collections.abc import AsyncIterator
|
||||||
import typing
|
import typing
|
||||||
import logging
|
import logging
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
from lbry.dht.error import RemoteException, TransportNotConnected
|
from lbry.dht.error import RemoteException, TransportNotConnected
|
||||||
from lbry.dht.protocol.distance import Distance
|
from lbry.dht.protocol.distance import Distance
|
||||||
from lbry.dht.peer import make_kademlia_peer
|
from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address
|
||||||
from lbry.dht.serialization.datagram import PAGE_KEY
|
from lbry.dht.serialization.datagram import PAGE_KEY
|
||||||
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from lbry.dht.protocol.routing_table import TreeRoutingTable
|
|
||||||
from lbry.dht.protocol.protocol import KademliaProtocol
|
from lbry.dht.protocol.protocol import KademliaProtocol
|
||||||
from lbry.dht.peer import PeerManager, KademliaPeer
|
from lbry.dht.peer import PeerManager, KademliaPeer
|
||||||
|
|
||||||
|
@ -27,6 +26,15 @@ class FindResponse:
|
||||||
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
|
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
|
||||||
|
for contact_triple in self.get_close_triples():
|
||||||
|
node_id, address, udp_port = contact_triple
|
||||||
|
try:
|
||||||
|
yield make_kademlia_peer(node_id, address, udp_port)
|
||||||
|
except ValueError:
|
||||||
|
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
|
||||||
|
peer_info.udp_port, address, udp_port)
|
||||||
|
|
||||||
|
|
||||||
class FindNodeResponse(FindResponse):
|
class FindNodeResponse(FindResponse):
|
||||||
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
|
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
|
||||||
|
@ -57,57 +65,33 @@ class FindValueResponse(FindResponse):
|
||||||
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
|
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
|
||||||
|
|
||||||
|
|
||||||
def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
|
class IterativeFinder(AsyncIterator):
|
||||||
shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']:
|
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||||
"""
|
protocol: 'KademliaProtocol', key: bytes,
|
||||||
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table
|
max_results: typing.Optional[int] = constants.K,
|
||||||
|
|
||||||
:param routing_table: a TreeRoutingTable
|
|
||||||
:param key: a 48 byte hash
|
|
||||||
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
|
|
||||||
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
|
|
||||||
"""
|
|
||||||
if len(key) != constants.hash_length:
|
|
||||||
raise ValueError("invalid key length: %i" % len(key))
|
|
||||||
return shortlist or routing_table.find_close_peers(key)
|
|
||||||
|
|
||||||
|
|
||||||
class IterativeFinder:
|
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
|
||||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
|
||||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.k,
|
|
||||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
|
||||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||||
if len(key) != constants.hash_length:
|
if len(key) != constants.HASH_LENGTH:
|
||||||
raise ValueError("invalid key length: %i" % len(key))
|
raise ValueError("invalid key length: %i" % len(key))
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.peer_manager = peer_manager
|
self.peer_manager = protocol.peer_manager
|
||||||
self.routing_table = routing_table
|
|
||||||
self.protocol = protocol
|
self.protocol = protocol
|
||||||
|
|
||||||
self.key = key
|
self.key = key
|
||||||
self.bottom_out_limit = bottom_out_limit
|
self.max_results = max(constants.K, max_results)
|
||||||
self.max_results = max_results
|
|
||||||
self.exclude = exclude or []
|
|
||||||
|
|
||||||
self.active: typing.Set['KademliaPeer'] = set()
|
self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
|
||||||
self.contacted: typing.Set['KademliaPeer'] = set()
|
self.contacted: typing.Set['KademliaPeer'] = set()
|
||||||
self.distance = Distance(key)
|
self.distance = Distance(key)
|
||||||
|
|
||||||
self.closest_peer: typing.Optional['KademliaPeer'] = None
|
self.iteration_queue = asyncio.Queue()
|
||||||
self.prev_closest_peer: typing.Optional['KademliaPeer'] = None
|
|
||||||
|
|
||||||
self.iteration_queue = asyncio.Queue(loop=self.loop)
|
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
|
||||||
|
|
||||||
self.running_probes: typing.Set[asyncio.Task] = set()
|
|
||||||
self.iteration_count = 0
|
self.iteration_count = 0
|
||||||
self.bottom_out_count = 0
|
|
||||||
self.running = False
|
self.running = False
|
||||||
self.tasks: typing.List[asyncio.Task] = []
|
self.tasks: typing.List[asyncio.Task] = []
|
||||||
self.delayed_calls: typing.List[asyncio.Handle] = []
|
for peer in shortlist:
|
||||||
for peer in get_shortlist(routing_table, key, shortlist):
|
|
||||||
if peer.node_id:
|
if peer.node_id:
|
||||||
self._add_active(peer)
|
self._add_active(peer, force=True)
|
||||||
else:
|
else:
|
||||||
# seed nodes
|
# seed nodes
|
||||||
self._schedule_probe(peer)
|
self._schedule_probe(peer)
|
||||||
|
@ -132,73 +116,86 @@ class IterativeFinder:
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def get_initial_result(self) -> typing.List['KademliaPeer']:
|
def get_initial_result(self) -> typing.List['KademliaPeer']: #pylint: disable=no-self-use
|
||||||
"""
|
"""
|
||||||
Get an initial or cached result to be put into the Queue. Used for findValue requests where the blob
|
Get an initial or cached result to be put into the Queue. Used for findValue requests where the blob
|
||||||
has peers in the local data store of blobs announced to us
|
has peers in the local data store of blobs announced to us
|
||||||
"""
|
"""
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def _is_closer(self, peer: 'KademliaPeer') -> bool:
|
def _add_active(self, peer, force=False):
|
||||||
return not self.closest_peer or self.distance.is_closer(peer.node_id, self.closest_peer.node_id)
|
if not force and self.peer_manager.peer_is_good(peer) is False:
|
||||||
|
return
|
||||||
def _add_active(self, peer):
|
if peer in self.contacted:
|
||||||
|
return
|
||||||
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
|
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
|
||||||
self.active.add(peer)
|
self.active[peer] = self.distance(peer.node_id)
|
||||||
if self._is_closer(peer):
|
self.active = OrderedDict(sorted(self.active.items(), key=lambda item: item[1]))
|
||||||
self.prev_closest_peer = self.closest_peer
|
|
||||||
self.closest_peer = peer
|
|
||||||
|
|
||||||
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
|
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
|
||||||
self._add_active(peer)
|
self._add_active(peer)
|
||||||
for contact_triple in response.get_close_triples():
|
for new_peer in response.get_close_kademlia_peers(peer):
|
||||||
node_id, address, udp_port = contact_triple
|
self._add_active(new_peer)
|
||||||
try:
|
|
||||||
self._add_active(make_kademlia_peer(node_id, address, udp_port))
|
|
||||||
except ValueError:
|
|
||||||
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address,
|
|
||||||
peer.udp_port, address, udp_port)
|
|
||||||
self.check_result_ready(response)
|
self.check_result_ready(response)
|
||||||
|
self._log_state(reason="check result")
|
||||||
|
|
||||||
|
def _reset_closest(self, peer):
|
||||||
|
if peer in self.active:
|
||||||
|
del self.active[peer]
|
||||||
|
|
||||||
async def _send_probe(self, peer: 'KademliaPeer'):
|
async def _send_probe(self, peer: 'KademliaPeer'):
|
||||||
try:
|
try:
|
||||||
response = await self.send_probe(peer)
|
response = await self.send_probe(peer)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
self.active.discard(peer)
|
self._reset_closest(peer)
|
||||||
return
|
return
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
log.debug("%s[%x] cancelled probe",
|
||||||
|
type(self).__name__, id(self))
|
||||||
|
raise
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
log.warning(str(err))
|
log.warning(str(err))
|
||||||
self.active.discard(peer)
|
self._reset_closest(peer)
|
||||||
return
|
return
|
||||||
except TransportNotConnected:
|
except TransportNotConnected:
|
||||||
return self.aclose()
|
await self._aclose(reason="not connected")
|
||||||
|
return
|
||||||
except RemoteException:
|
except RemoteException:
|
||||||
|
self._reset_closest(peer)
|
||||||
return
|
return
|
||||||
return await self._handle_probe_result(peer, response)
|
return await self._handle_probe_result(peer, response)
|
||||||
|
|
||||||
async def _search_round(self):
|
def _search_round(self):
|
||||||
"""
|
"""
|
||||||
Send up to constants.alpha (5) probes to closest active peers
|
Send up to constants.alpha (5) probes to closest active peers
|
||||||
"""
|
"""
|
||||||
|
|
||||||
added = 0
|
added = 0
|
||||||
to_probe = list(self.active - self.contacted)
|
for index, peer in enumerate(self.active.keys()):
|
||||||
to_probe.sort(key=lambda peer: self.distance(self.key))
|
if index == 0:
|
||||||
for peer in to_probe:
|
log.debug("%s[%x] closest to probe: %s",
|
||||||
if added >= constants.alpha:
|
type(self).__name__, id(self),
|
||||||
|
peer.node_id.hex()[:8])
|
||||||
|
if peer in self.contacted:
|
||||||
|
continue
|
||||||
|
if len(self.running_probes) >= constants.ALPHA:
|
||||||
|
break
|
||||||
|
if index > (constants.K + len(self.running_probes)):
|
||||||
break
|
break
|
||||||
origin_address = (peer.address, peer.udp_port)
|
origin_address = (peer.address, peer.udp_port)
|
||||||
if origin_address in self.exclude:
|
|
||||||
continue
|
|
||||||
if peer.node_id == self.protocol.node_id:
|
if peer.node_id == self.protocol.node_id:
|
||||||
continue
|
continue
|
||||||
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
|
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
|
||||||
continue
|
continue
|
||||||
self._schedule_probe(peer)
|
self._schedule_probe(peer)
|
||||||
added += 1
|
added += 1
|
||||||
log.debug("running %d probes", len(self.running_probes))
|
log.debug("%s[%x] running %d probes for key %s",
|
||||||
|
type(self).__name__, id(self),
|
||||||
|
len(self.running_probes), self.key.hex()[:8])
|
||||||
if not added and not self.running_probes:
|
if not added and not self.running_probes:
|
||||||
log.debug("search for %s exhausted", hexlify(self.key)[:8])
|
log.debug("%s[%x] search for %s exhausted",
|
||||||
|
type(self).__name__, id(self),
|
||||||
|
self.key.hex()[:8])
|
||||||
self.search_exhausted()
|
self.search_exhausted()
|
||||||
|
|
||||||
def _schedule_probe(self, peer: 'KademliaPeer'):
|
def _schedule_probe(self, peer: 'KademliaPeer'):
|
||||||
|
@ -207,33 +204,24 @@ class IterativeFinder:
|
||||||
t = self.loop.create_task(self._send_probe(peer))
|
t = self.loop.create_task(self._send_probe(peer))
|
||||||
|
|
||||||
def callback(_):
|
def callback(_):
|
||||||
self.running_probes.difference_update({
|
self.running_probes.pop(peer, None)
|
||||||
probe for probe in self.running_probes if probe.done() or probe == t
|
if self.running:
|
||||||
})
|
self._search_round()
|
||||||
if not self.running_probes:
|
|
||||||
self.tasks.append(self.loop.create_task(self._search_task(0.0)))
|
|
||||||
|
|
||||||
t.add_done_callback(callback)
|
t.add_done_callback(callback)
|
||||||
self.running_probes.add(t)
|
self.running_probes[peer] = t
|
||||||
|
|
||||||
async def _search_task(self, delay: typing.Optional[float] = constants.iterative_lookup_delay):
|
def _log_state(self, reason="?"):
|
||||||
try:
|
log.debug("%s[%x] [%s] %s: %i active nodes %i contacted %i produced %i queued",
|
||||||
if self.running:
|
type(self).__name__, id(self), self.key.hex()[:8],
|
||||||
await self._search_round()
|
reason, len(self.active), len(self.contacted),
|
||||||
if self.running:
|
self.iteration_count, self.iteration_queue.qsize())
|
||||||
self.delayed_calls.append(self.loop.call_later(delay, self._search))
|
|
||||||
except (asyncio.CancelledError, StopAsyncIteration, TransportNotConnected):
|
|
||||||
if self.running:
|
|
||||||
self.loop.call_soon(self.aclose)
|
|
||||||
|
|
||||||
def _search(self):
|
|
||||||
self.tasks.append(self.loop.create_task(self._search_task()))
|
|
||||||
|
|
||||||
def __aiter__(self):
|
def __aiter__(self):
|
||||||
if self.running:
|
if self.running:
|
||||||
raise Exception("already running")
|
raise Exception("already running")
|
||||||
self.running = True
|
self.running = True
|
||||||
self._search()
|
self.loop.call_soon(self._search_round)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
async def __anext__(self) -> typing.List['KademliaPeer']:
|
async def __anext__(self) -> typing.List['KademliaPeer']:
|
||||||
|
@ -246,47 +234,57 @@ class IterativeFinder:
|
||||||
raise StopAsyncIteration
|
raise StopAsyncIteration
|
||||||
self.iteration_count += 1
|
self.iteration_count += 1
|
||||||
return result
|
return result
|
||||||
except (asyncio.CancelledError, StopAsyncIteration):
|
except asyncio.CancelledError:
|
||||||
self.loop.call_soon(self.aclose)
|
await self._aclose(reason="cancelled")
|
||||||
|
raise
|
||||||
|
except StopAsyncIteration:
|
||||||
|
await self._aclose(reason="no more results")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def aclose(self):
|
async def _aclose(self, reason="?"):
|
||||||
|
log.debug("%s[%x] [%s] shutdown because %s: %i active nodes %i contacted %i produced %i queued",
|
||||||
|
type(self).__name__, id(self), self.key.hex()[:8],
|
||||||
|
reason, len(self.active), len(self.contacted),
|
||||||
|
self.iteration_count, self.iteration_queue.qsize())
|
||||||
self.running = False
|
self.running = False
|
||||||
self.iteration_queue.put_nowait(None)
|
self.iteration_queue.put_nowait(None)
|
||||||
for task in chain(self.tasks, self.running_probes, self.delayed_calls):
|
for task in chain(self.tasks, self.running_probes.values()):
|
||||||
task.cancel()
|
task.cancel()
|
||||||
self.tasks.clear()
|
self.tasks.clear()
|
||||||
self.running_probes.clear()
|
self.running_probes.clear()
|
||||||
self.delayed_calls.clear()
|
|
||||||
|
|
||||||
|
async def aclose(self):
|
||||||
|
if self.running:
|
||||||
|
await self._aclose(reason="aclose")
|
||||||
|
log.debug("%s[%x] [%s] async close completed",
|
||||||
|
type(self).__name__, id(self), self.key.hex()[:8])
|
||||||
|
|
||||||
class IterativeNodeFinder(IterativeFinder):
|
class IterativeNodeFinder(IterativeFinder):
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
protocol: 'KademliaProtocol', key: bytes,
|
||||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.k,
|
max_results: typing.Optional[int] = constants.K,
|
||||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
|
||||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||||
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
|
super().__init__(loop, protocol, key, max_results, shortlist)
|
||||||
shortlist)
|
|
||||||
self.yielded_peers: typing.Set['KademliaPeer'] = set()
|
self.yielded_peers: typing.Set['KademliaPeer'] = set()
|
||||||
|
|
||||||
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
|
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
|
||||||
log.debug("probing %s:%d %s", peer.address, peer.udp_port, hexlify(peer.node_id)[:8] if peer.node_id else '')
|
log.debug("probe %s:%d (%s) for NODE %s",
|
||||||
|
peer.address, peer.udp_port, peer.node_id.hex()[:8] if peer.node_id else '', self.key.hex()[:8])
|
||||||
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
|
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
|
||||||
return FindNodeResponse(self.key, response)
|
return FindNodeResponse(self.key, response)
|
||||||
|
|
||||||
def search_exhausted(self):
|
def search_exhausted(self):
|
||||||
self.put_result(self.active, finish=True)
|
self.put_result(self.active.keys(), finish=True)
|
||||||
|
|
||||||
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
|
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
|
||||||
not_yet_yielded = [
|
not_yet_yielded = [
|
||||||
peer for peer in from_iter
|
peer for peer in from_iter
|
||||||
if peer not in self.yielded_peers
|
if peer not in self.yielded_peers
|
||||||
and peer.node_id != self.protocol.node_id
|
and peer.node_id != self.protocol.node_id
|
||||||
and self.peer_manager.peer_is_good(peer) is not False
|
and self.peer_manager.peer_is_good(peer) is True # return only peers who answered
|
||||||
]
|
]
|
||||||
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
|
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
|
||||||
to_yield = not_yet_yielded[:min(constants.k, len(not_yet_yielded))]
|
to_yield = not_yet_yielded[:max(constants.K, self.max_results)]
|
||||||
if to_yield:
|
if to_yield:
|
||||||
self.yielded_peers.update(to_yield)
|
self.yielded_peers.update(to_yield)
|
||||||
self.iteration_queue.put_nowait(to_yield)
|
self.iteration_queue.put_nowait(to_yield)
|
||||||
|
@ -298,27 +296,15 @@ class IterativeNodeFinder(IterativeFinder):
|
||||||
|
|
||||||
if found:
|
if found:
|
||||||
log.debug("found")
|
log.debug("found")
|
||||||
return self.put_result(self.active, finish=True)
|
return self.put_result(self.active.keys(), finish=True)
|
||||||
if self.prev_closest_peer and self.closest_peer and not self._is_closer(self.prev_closest_peer):
|
|
||||||
# log.info("improving, %i %i %i %i %i", len(self.shortlist), len(self.active), len(self.contacted),
|
|
||||||
# self.bottom_out_count, self.iteration_count)
|
|
||||||
self.bottom_out_count = 0
|
|
||||||
elif self.prev_closest_peer and self.closest_peer:
|
|
||||||
self.bottom_out_count += 1
|
|
||||||
log.info("bottom out %i %i %i", len(self.active), len(self.contacted), self.bottom_out_count)
|
|
||||||
if self.bottom_out_count >= self.bottom_out_limit or self.iteration_count >= self.bottom_out_limit:
|
|
||||||
log.info("limit hit")
|
|
||||||
self.put_result(self.active, True)
|
|
||||||
|
|
||||||
|
|
||||||
class IterativeValueFinder(IterativeFinder):
|
class IterativeValueFinder(IterativeFinder):
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
protocol: 'KademliaProtocol', key: bytes,
|
||||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.k,
|
max_results: typing.Optional[int] = constants.K,
|
||||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
|
||||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||||
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
|
super().__init__(loop, protocol, key, max_results, shortlist)
|
||||||
shortlist)
|
|
||||||
self.blob_peers: typing.Set['KademliaPeer'] = set()
|
self.blob_peers: typing.Set['KademliaPeer'] = set()
|
||||||
# this tracks the index of the most recent page we requested from each peer
|
# this tracks the index of the most recent page we requested from each peer
|
||||||
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
|
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
|
||||||
|
@ -326,6 +312,8 @@ class IterativeValueFinder(IterativeFinder):
|
||||||
self.discovered_peers: typing.Dict['KademliaPeer', typing.Set['KademliaPeer']] = defaultdict(set)
|
self.discovered_peers: typing.Dict['KademliaPeer', typing.Set['KademliaPeer']] = defaultdict(set)
|
||||||
|
|
||||||
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
|
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
|
||||||
|
log.debug("probe %s:%d (%s) for VALUE %s",
|
||||||
|
peer.address, peer.udp_port, peer.node_id.hex()[:8], self.key.hex()[:8])
|
||||||
page = self.peer_pages[peer]
|
page = self.peer_pages[peer]
|
||||||
response = await self.protocol.get_rpc_peer(peer).find_value(self.key, page=page)
|
response = await self.protocol.get_rpc_peer(peer).find_value(self.key, page=page)
|
||||||
parsed = FindValueResponse(self.key, response)
|
parsed = FindValueResponse(self.key, response)
|
||||||
|
@ -335,7 +323,7 @@ class IterativeValueFinder(IterativeFinder):
|
||||||
decoded_peers = set()
|
decoded_peers = set()
|
||||||
for compact_addr in parsed.found_compact_addresses:
|
for compact_addr in parsed.found_compact_addresses:
|
||||||
try:
|
try:
|
||||||
decoded_peers.add(self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr))
|
decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
log.warning("misbehaving peer %s:%i returned invalid peer for blob",
|
log.warning("misbehaving peer %s:%i returned invalid peer for blob",
|
||||||
peer.address, peer.udp_port)
|
peer.address, peer.udp_port)
|
||||||
|
@ -347,8 +335,7 @@ class IterativeValueFinder(IterativeFinder):
|
||||||
already_known + len(parsed.found_compact_addresses))
|
already_known + len(parsed.found_compact_addresses))
|
||||||
if len(self.discovered_peers[peer]) != already_known + len(parsed.found_compact_addresses):
|
if len(self.discovered_peers[peer]) != already_known + len(parsed.found_compact_addresses):
|
||||||
log.warning("misbehaving peer %s:%i returned duplicate peers for blob", peer.address, peer.udp_port)
|
log.warning("misbehaving peer %s:%i returned duplicate peers for blob", peer.address, peer.udp_port)
|
||||||
parsed.found_compact_addresses.clear()
|
elif len(parsed.found_compact_addresses) >= constants.K and self.peer_pages[peer] < parsed.pages:
|
||||||
elif len(parsed.found_compact_addresses) >= constants.k and self.peer_pages[peer] < parsed.pages:
|
|
||||||
# the peer returned a full page and indicates it has more
|
# the peer returned a full page and indicates it has more
|
||||||
self.peer_pages[peer] += 1
|
self.peer_pages[peer] += 1
|
||||||
if peer in self.contacted:
|
if peer in self.contacted:
|
||||||
|
@ -358,26 +345,15 @@ class IterativeValueFinder(IterativeFinder):
|
||||||
|
|
||||||
def check_result_ready(self, response: FindValueResponse):
|
def check_result_ready(self, response: FindValueResponse):
|
||||||
if response.found:
|
if response.found:
|
||||||
blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)
|
blob_peers = [decode_tcp_peer_from_compact_address(compact_addr)
|
||||||
for compact_addr in response.found_compact_addresses]
|
for compact_addr in response.found_compact_addresses]
|
||||||
to_yield = []
|
to_yield = []
|
||||||
self.bottom_out_count = 0
|
|
||||||
for blob_peer in blob_peers:
|
for blob_peer in blob_peers:
|
||||||
if blob_peer not in self.blob_peers:
|
if blob_peer not in self.blob_peers:
|
||||||
self.blob_peers.add(blob_peer)
|
self.blob_peers.add(blob_peer)
|
||||||
to_yield.append(blob_peer)
|
to_yield.append(blob_peer)
|
||||||
if to_yield:
|
if to_yield:
|
||||||
# log.info("found %i new peers for blob", len(to_yield))
|
|
||||||
self.iteration_queue.put_nowait(to_yield)
|
self.iteration_queue.put_nowait(to_yield)
|
||||||
# if self.max_results and len(self.blob_peers) >= self.max_results:
|
|
||||||
# log.info("enough blob peers found")
|
|
||||||
# if not self.finished.is_set():
|
|
||||||
# self.finished.set()
|
|
||||||
elif self.prev_closest_peer and self.closest_peer:
|
|
||||||
self.bottom_out_count += 1
|
|
||||||
if self.bottom_out_count >= self.bottom_out_limit:
|
|
||||||
log.info("blob peer search bottomed out")
|
|
||||||
self.iteration_queue.put_nowait(None)
|
|
||||||
|
|
||||||
def get_initial_result(self) -> typing.List['KademliaPeer']:
|
def get_initial_result(self) -> typing.List['KademliaPeer']:
|
||||||
if self.protocol.data_store.has_peers_for_blob(self.key):
|
if self.protocol.data_store.has_peers_for_blob(self.key):
|
|
@ -3,13 +3,16 @@ import socket
|
||||||
import functools
|
import functools
|
||||||
import hashlib
|
import hashlib
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import time
|
||||||
import typing
|
import typing
|
||||||
import binascii
|
|
||||||
import random
|
import random
|
||||||
from asyncio.protocols import DatagramProtocol
|
from asyncio.protocols import DatagramProtocol
|
||||||
from asyncio.transports import DatagramTransport
|
from asyncio.transports import DatagramTransport
|
||||||
|
|
||||||
|
from prometheus_client import Gauge, Counter, Histogram
|
||||||
|
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
|
from lbry.dht.serialization.bencoding import DecodeError
|
||||||
from lbry.dht.serialization.datagram import decode_datagram, ErrorDatagram, ResponseDatagram, RequestDatagram
|
from lbry.dht.serialization.datagram import decode_datagram, ErrorDatagram, ResponseDatagram, RequestDatagram
|
||||||
from lbry.dht.serialization.datagram import RESPONSE_TYPE, ERROR_TYPE, PAGE_KEY
|
from lbry.dht.serialization.datagram import RESPONSE_TYPE, ERROR_TYPE, PAGE_KEY
|
||||||
from lbry.dht.error import RemoteException, TransportNotConnected
|
from lbry.dht.error import RemoteException, TransportNotConnected
|
||||||
|
@ -23,13 +26,18 @@ if typing.TYPE_CHECKING:
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
old_protocol_errors = {
|
OLD_PROTOCOL_ERRORS = {
|
||||||
"findNode() takes exactly 2 arguments (5 given)": "0.19.1",
|
"findNode() takes exactly 2 arguments (5 given)": "0.19.1",
|
||||||
"findValue() takes exactly 2 arguments (5 given)": "0.19.1"
|
"findValue() takes exactly 2 arguments (5 given)": "0.19.1"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class KademliaRPC:
|
class KademliaRPC:
|
||||||
|
stored_blob_metric = Gauge(
|
||||||
|
"stored_blobs", "Number of blobs announced by other peers", namespace="dht_node",
|
||||||
|
labelnames=("scope",),
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, protocol: 'KademliaProtocol', loop: asyncio.AbstractEventLoop, peer_port: int = 3333):
|
def __init__(self, protocol: 'KademliaProtocol', loop: asyncio.AbstractEventLoop, peer_port: int = 3333):
|
||||||
self.protocol = protocol
|
self.protocol = protocol
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
|
@ -48,35 +56,36 @@ class KademliaRPC:
|
||||||
return b'pong'
|
return b'pong'
|
||||||
|
|
||||||
def store(self, rpc_contact: 'KademliaPeer', blob_hash: bytes, token: bytes, port: int) -> bytes:
|
def store(self, rpc_contact: 'KademliaPeer', blob_hash: bytes, token: bytes, port: int) -> bytes:
|
||||||
if len(blob_hash) != constants.hash_bits // 8:
|
if len(blob_hash) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid length of blob hash: {len(blob_hash)}")
|
raise ValueError(f"invalid length of blob hash: {len(blob_hash)}")
|
||||||
if not 0 < port < 65535:
|
if not 0 < port < 65535:
|
||||||
raise ValueError(f"invalid tcp port: {port}")
|
raise ValueError(f"invalid tcp port: {port}")
|
||||||
rpc_contact.update_tcp_port(port)
|
rpc_contact.update_tcp_port(port)
|
||||||
if not self.verify_token(token, rpc_contact.compact_ip()):
|
if not self.verify_token(token, rpc_contact.compact_ip()):
|
||||||
if self.loop.time() - self.protocol.started_listening_time < constants.token_secret_refresh_interval:
|
if self.loop.time() - self.protocol.started_listening_time < constants.TOKEN_SECRET_REFRESH_INTERVAL:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
raise ValueError("Invalid token")
|
raise ValueError("Invalid token")
|
||||||
self.protocol.data_store.add_peer_to_blob(
|
self.protocol.data_store.add_peer_to_blob(
|
||||||
rpc_contact, blob_hash
|
rpc_contact, blob_hash
|
||||||
)
|
)
|
||||||
|
self.stored_blob_metric.labels("global").set(len(self.protocol.data_store))
|
||||||
return b'OK'
|
return b'OK'
|
||||||
|
|
||||||
def find_node(self, rpc_contact: 'KademliaPeer', key: bytes) -> typing.List[typing.Tuple[bytes, str, int]]:
|
def find_node(self, rpc_contact: 'KademliaPeer', key: bytes) -> typing.List[typing.Tuple[bytes, str, int]]:
|
||||||
if len(key) != constants.hash_length:
|
if len(key) != constants.HASH_LENGTH:
|
||||||
raise ValueError("invalid contact node_id length: %i" % len(key))
|
raise ValueError("invalid contact node_id length: %i" % len(key))
|
||||||
|
|
||||||
contacts = self.protocol.routing_table.find_close_peers(key, sender_node_id=rpc_contact.node_id)
|
contacts = self.protocol.routing_table.find_close_peers(key, sender_node_id=rpc_contact.node_id)
|
||||||
contact_triples = []
|
contact_triples = []
|
||||||
for contact in contacts[:constants.k * 2]:
|
for contact in contacts[:constants.K * 2]:
|
||||||
contact_triples.append((contact.node_id, contact.address, contact.udp_port))
|
contact_triples.append((contact.node_id, contact.address, contact.udp_port))
|
||||||
return contact_triples
|
return contact_triples
|
||||||
|
|
||||||
def find_value(self, rpc_contact: 'KademliaPeer', key: bytes, page: int = 0):
|
def find_value(self, rpc_contact: 'KademliaPeer', key: bytes, page: int = 0):
|
||||||
page = page if page > 0 else 0
|
page = page if page > 0 else 0
|
||||||
|
|
||||||
if len(key) != constants.hash_length:
|
if len(key) != constants.HASH_LENGTH:
|
||||||
raise ValueError("invalid blob_exchange hash length: %i" % len(key))
|
raise ValueError("invalid blob_exchange hash length: %i" % len(key))
|
||||||
|
|
||||||
response = {
|
response = {
|
||||||
|
@ -84,7 +93,7 @@ class KademliaRPC:
|
||||||
}
|
}
|
||||||
|
|
||||||
if not page:
|
if not page:
|
||||||
response[b'contacts'] = self.find_node(rpc_contact, key)[:constants.k]
|
response[b'contacts'] = self.find_node(rpc_contact, key)[:constants.K]
|
||||||
|
|
||||||
if self.protocol.protocol_version:
|
if self.protocol.protocol_version:
|
||||||
response[b'protocolVersion'] = self.protocol.protocol_version
|
response[b'protocolVersion'] = self.protocol.protocol_version
|
||||||
|
@ -96,16 +105,16 @@ class KademliaRPC:
|
||||||
if not rpc_contact.tcp_port or peer.compact_address_tcp() != rpc_contact.compact_address_tcp()
|
if not rpc_contact.tcp_port or peer.compact_address_tcp() != rpc_contact.compact_address_tcp()
|
||||||
]
|
]
|
||||||
# if we don't have k storing peers to return and we have this hash locally, include our contact information
|
# if we don't have k storing peers to return and we have this hash locally, include our contact information
|
||||||
if len(peers) < constants.k and binascii.hexlify(key).decode() in self.protocol.data_store.completed_blobs:
|
if len(peers) < constants.K and key.hex() in self.protocol.data_store.completed_blobs:
|
||||||
peers.append(self.compact_address())
|
peers.append(self.compact_address())
|
||||||
if not peers:
|
if not peers:
|
||||||
response[PAGE_KEY] = 0
|
response[PAGE_KEY] = 0
|
||||||
else:
|
else:
|
||||||
response[PAGE_KEY] = (len(peers) // (constants.k + 1)) + 1 # how many pages of peers we have for the blob
|
response[PAGE_KEY] = (len(peers) // (constants.K + 1)) + 1 # how many pages of peers we have for the blob
|
||||||
if len(peers) > constants.k:
|
if len(peers) > constants.K:
|
||||||
random.Random(self.protocol.node_id).shuffle(peers)
|
random.Random(self.protocol.node_id).shuffle(peers)
|
||||||
if page * constants.k < len(peers):
|
if page * constants.K < len(peers):
|
||||||
response[key] = peers[page * constants.k:page * constants.k + constants.k]
|
response[key] = peers[page * constants.K:page * constants.K + constants.K]
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def refresh_token(self): # TODO: this needs to be called periodically
|
def refresh_token(self): # TODO: this needs to be called periodically
|
||||||
|
@ -154,7 +163,7 @@ class RemoteKademliaRPC:
|
||||||
:param blob_hash: blob hash as bytes
|
:param blob_hash: blob hash as bytes
|
||||||
:return: b'OK'
|
:return: b'OK'
|
||||||
"""
|
"""
|
||||||
if len(blob_hash) != constants.hash_bits // 8:
|
if len(blob_hash) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid length of blob hash: {len(blob_hash)}")
|
raise ValueError(f"invalid length of blob hash: {len(blob_hash)}")
|
||||||
if not self.protocol.peer_port or not 0 < self.protocol.peer_port < 65535:
|
if not self.protocol.peer_port or not 0 < self.protocol.peer_port < 65535:
|
||||||
raise ValueError(f"invalid tcp port: {self.protocol.peer_port}")
|
raise ValueError(f"invalid tcp port: {self.protocol.peer_port}")
|
||||||
|
@ -171,7 +180,7 @@ class RemoteKademliaRPC:
|
||||||
"""
|
"""
|
||||||
:return: [(node_id, address, udp_port), ...]
|
:return: [(node_id, address, udp_port), ...]
|
||||||
"""
|
"""
|
||||||
if len(key) != constants.hash_bits // 8:
|
if len(key) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid length of find node key: {len(key)}")
|
raise ValueError(f"invalid length of find node key: {len(key)}")
|
||||||
response = await self.protocol.send_request(
|
response = await self.protocol.send_request(
|
||||||
self.peer, RequestDatagram.make_find_node(self.protocol.node_id, key)
|
self.peer, RequestDatagram.make_find_node(self.protocol.node_id, key)
|
||||||
|
@ -186,7 +195,7 @@ class RemoteKademliaRPC:
|
||||||
<key bytes>: [<blob_peer_compact_address, ...]
|
<key bytes>: [<blob_peer_compact_address, ...]
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
if len(key) != constants.hash_bits // 8:
|
if len(key) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid length of find value key: {len(key)}")
|
raise ValueError(f"invalid length of find value key: {len(key)}")
|
||||||
response = await self.protocol.send_request(
|
response = await self.protocol.send_request(
|
||||||
self.peer, RequestDatagram.make_find_value(self.protocol.node_id, key, page=page)
|
self.peer, RequestDatagram.make_find_value(self.protocol.node_id, key, page=page)
|
||||||
|
@ -203,12 +212,16 @@ class PingQueue:
|
||||||
self._process_task: asyncio.Task = None
|
self._process_task: asyncio.Task = None
|
||||||
self._running = False
|
self._running = False
|
||||||
self._running_pings: typing.Set[asyncio.Task] = set()
|
self._running_pings: typing.Set[asyncio.Task] = set()
|
||||||
self._default_delay = constants.maybe_ping_delay
|
self._default_delay = constants.MAYBE_PING_DELAY
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def running(self):
|
def running(self):
|
||||||
return self._running
|
return self._running
|
||||||
|
|
||||||
|
@property
|
||||||
|
def busy(self):
|
||||||
|
return self._running and (any(self._running_pings) or any(self._pending_contacts))
|
||||||
|
|
||||||
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
|
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
|
||||||
delay = delay if delay is not None else self._default_delay
|
delay = delay if delay is not None else self._default_delay
|
||||||
now = self._loop.time()
|
now = self._loop.time()
|
||||||
|
@ -220,11 +233,11 @@ class PingQueue:
|
||||||
async def ping_task():
|
async def ping_task():
|
||||||
try:
|
try:
|
||||||
if self._protocol.peer_manager.peer_is_good(peer):
|
if self._protocol.peer_manager.peer_is_good(peer):
|
||||||
if peer not in self._protocol.routing_table.get_peers():
|
if not self._protocol.routing_table.get_peer(peer.node_id):
|
||||||
self._protocol.add_peer(peer)
|
self._protocol.add_peer(peer)
|
||||||
return
|
return
|
||||||
await self._protocol.get_rpc_peer(peer).ping()
|
await self._protocol.get_rpc_peer(peer).ping()
|
||||||
except asyncio.TimeoutError:
|
except (asyncio.TimeoutError, RemoteException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
task = self._loop.create_task(ping_task())
|
task = self._loop.create_task(ping_task())
|
||||||
|
@ -240,7 +253,7 @@ class PingQueue:
|
||||||
del self._pending_contacts[peer]
|
del self._pending_contacts[peer]
|
||||||
self.maybe_ping(peer)
|
self.maybe_ping(peer)
|
||||||
break
|
break
|
||||||
await asyncio.sleep(1, loop=self._loop)
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
assert not self._running
|
assert not self._running
|
||||||
|
@ -259,9 +272,33 @@ class PingQueue:
|
||||||
|
|
||||||
|
|
||||||
class KademliaProtocol(DatagramProtocol):
|
class KademliaProtocol(DatagramProtocol):
|
||||||
|
request_sent_metric = Counter(
|
||||||
|
"request_sent", "Number of requests send from DHT RPC protocol", namespace="dht_node",
|
||||||
|
labelnames=("method",),
|
||||||
|
)
|
||||||
|
request_success_metric = Counter(
|
||||||
|
"request_success", "Number of successful requests", namespace="dht_node",
|
||||||
|
labelnames=("method",),
|
||||||
|
)
|
||||||
|
request_error_metric = Counter(
|
||||||
|
"request_error", "Number of errors returned from request to other peers", namespace="dht_node",
|
||||||
|
labelnames=("method",),
|
||||||
|
)
|
||||||
|
HISTOGRAM_BUCKETS = (
|
||||||
|
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 3.0, 3.5, 4.0, 4.50, 5.0, 5.50, 6.0, float('inf')
|
||||||
|
)
|
||||||
|
response_time_metric = Histogram(
|
||||||
|
"response_time", "Response times of DHT RPC requests", namespace="dht_node", buckets=HISTOGRAM_BUCKETS,
|
||||||
|
labelnames=("method",)
|
||||||
|
)
|
||||||
|
received_request_metric = Counter(
|
||||||
|
"received_request", "Number of received DHT RPC requests", namespace="dht_node",
|
||||||
|
labelnames=("method",),
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
|
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
|
||||||
udp_port: int, peer_port: int, rpc_timeout: float = constants.rpc_timeout,
|
udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
|
||||||
split_buckets_under_index: int = constants.split_buckets_under_index):
|
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_boostrap_node: bool = False):
|
||||||
self.peer_manager = peer_manager
|
self.peer_manager = peer_manager
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
self.node_id = node_id
|
self.node_id = node_id
|
||||||
|
@ -271,20 +308,21 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
self.is_seed_node = False
|
self.is_seed_node = False
|
||||||
self.partial_messages: typing.Dict[bytes, typing.Dict[bytes, bytes]] = {}
|
self.partial_messages: typing.Dict[bytes, typing.Dict[bytes, bytes]] = {}
|
||||||
self.sent_messages: typing.Dict[bytes, typing.Tuple['KademliaPeer', asyncio.Future, RequestDatagram]] = {}
|
self.sent_messages: typing.Dict[bytes, typing.Tuple['KademliaPeer', asyncio.Future, RequestDatagram]] = {}
|
||||||
self.protocol_version = constants.protocol_version
|
self.protocol_version = constants.PROTOCOL_VERSION
|
||||||
self.started_listening_time = 0
|
self.started_listening_time = 0
|
||||||
self.transport: DatagramTransport = None
|
self.transport: DatagramTransport = None
|
||||||
self.old_token_secret = constants.generate_id()
|
self.old_token_secret = constants.generate_id()
|
||||||
self.token_secret = constants.generate_id()
|
self.token_secret = constants.generate_id()
|
||||||
self.routing_table = TreeRoutingTable(self.loop, self.peer_manager, self.node_id, split_buckets_under_index)
|
self.routing_table = TreeRoutingTable(
|
||||||
|
self.loop, self.peer_manager, self.node_id, split_buckets_under_index, is_bootstrap_node=is_boostrap_node)
|
||||||
self.data_store = DictDataStore(self.loop, self.peer_manager)
|
self.data_store = DictDataStore(self.loop, self.peer_manager)
|
||||||
self.ping_queue = PingQueue(self.loop, self)
|
self.ping_queue = PingQueue(self.loop, self)
|
||||||
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
|
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
|
||||||
self.rpc_timeout = rpc_timeout
|
self.rpc_timeout = rpc_timeout
|
||||||
self._split_lock = asyncio.Lock(loop=self.loop)
|
self._split_lock = asyncio.Lock()
|
||||||
self._to_remove: typing.Set['KademliaPeer'] = set()
|
self._to_remove: typing.Set['KademliaPeer'] = set()
|
||||||
self._to_add: typing.Set['KademliaPeer'] = set()
|
self._to_add: typing.Set['KademliaPeer'] = set()
|
||||||
self._wakeup_routing_task = asyncio.Event(loop=self.loop)
|
self._wakeup_routing_task = asyncio.Event()
|
||||||
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
|
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
|
||||||
|
|
||||||
@functools.lru_cache(128)
|
@functools.lru_cache(128)
|
||||||
|
@ -323,72 +361,10 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
return args, {}
|
return args, {}
|
||||||
|
|
||||||
async def _add_peer(self, peer: 'KademliaPeer'):
|
async def _add_peer(self, peer: 'KademliaPeer'):
|
||||||
if not peer.node_id:
|
async def probe(some_peer: 'KademliaPeer'):
|
||||||
log.warning("Tried adding a peer with no node id!")
|
rpc_peer = self.get_rpc_peer(some_peer)
|
||||||
return False
|
await rpc_peer.ping()
|
||||||
for p in self.routing_table.get_peers():
|
return await self.routing_table.add_peer(peer, probe)
|
||||||
if (p.address, p.udp_port) == (peer.address, peer.udp_port) and p.node_id != peer.node_id:
|
|
||||||
self.routing_table.remove_peer(p)
|
|
||||||
self.routing_table.join_buckets()
|
|
||||||
bucket_index = self.routing_table.kbucket_index(peer.node_id)
|
|
||||||
if self.routing_table.buckets[bucket_index].add_peer(peer):
|
|
||||||
return True
|
|
||||||
|
|
||||||
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
|
|
||||||
if self.routing_table.should_split(bucket_index, peer.node_id):
|
|
||||||
self.routing_table.split_bucket(bucket_index)
|
|
||||||
# Retry the insertion attempt
|
|
||||||
result = await self._add_peer(peer)
|
|
||||||
self.routing_table.join_buckets()
|
|
||||||
return result
|
|
||||||
else:
|
|
||||||
# We can't split the k-bucket
|
|
||||||
#
|
|
||||||
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
|
|
||||||
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
|
|
||||||
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
|
|
||||||
#
|
|
||||||
# A reasonable extension to this is BEP 0005, which extends the above:
|
|
||||||
#
|
|
||||||
# Not all nodes that we learn about are equal. Some are "good" and some are not.
|
|
||||||
# Many nodes using the DHT are able to send queries and receive responses,
|
|
||||||
# but are not able to respond to queries from other nodes. It is important that
|
|
||||||
# each node's routing table must contain only known good nodes. A good node is
|
|
||||||
# a node has responded to one of our queries within the last 15 minutes. A node
|
|
||||||
# is also good if it has ever responded to one of our queries and has sent us a
|
|
||||||
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
|
|
||||||
# questionable. Nodes become bad when they fail to respond to multiple queries
|
|
||||||
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
|
|
||||||
#
|
|
||||||
# When there are bad or questionable nodes in the bucket, the least recent is selected for
|
|
||||||
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
|
|
||||||
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
|
|
||||||
# is ignored if the pinged node replies.
|
|
||||||
|
|
||||||
not_good_contacts = self.routing_table.buckets[bucket_index].get_bad_or_unknown_peers()
|
|
||||||
not_recently_replied = []
|
|
||||||
for p in not_good_contacts:
|
|
||||||
last_replied = self.peer_manager.get_last_replied(p.address, p.udp_port)
|
|
||||||
if not last_replied or last_replied + 60 < self.loop.time():
|
|
||||||
not_recently_replied.append(p)
|
|
||||||
if not_recently_replied:
|
|
||||||
to_replace = not_recently_replied[0]
|
|
||||||
else:
|
|
||||||
to_replace = self.routing_table.buckets[bucket_index].peers[0]
|
|
||||||
last_replied = self.peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
|
|
||||||
if last_replied and last_replied + 60 > self.loop.time():
|
|
||||||
return False
|
|
||||||
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
|
|
||||||
try:
|
|
||||||
to_replace_rpc = self.get_rpc_peer(to_replace)
|
|
||||||
await to_replace_rpc.ping()
|
|
||||||
return False
|
|
||||||
except asyncio.TimeoutError:
|
|
||||||
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
|
|
||||||
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
|
|
||||||
if to_replace in self.routing_table.buckets[bucket_index]:
|
|
||||||
self.routing_table.buckets[bucket_index].remove_peer(to_replace)
|
|
||||||
return await self._add_peer(peer)
|
|
||||||
|
|
||||||
def add_peer(self, peer: 'KademliaPeer'):
|
def add_peer(self, peer: 'KademliaPeer'):
|
||||||
if peer.node_id == self.node_id:
|
if peer.node_id == self.node_id:
|
||||||
|
@ -406,35 +382,34 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
async with self._split_lock:
|
async with self._split_lock:
|
||||||
peer = self._to_remove.pop()
|
peer = self._to_remove.pop()
|
||||||
self.routing_table.remove_peer(peer)
|
self.routing_table.remove_peer(peer)
|
||||||
self.routing_table.join_buckets()
|
|
||||||
while self._to_add:
|
while self._to_add:
|
||||||
async with self._split_lock:
|
async with self._split_lock:
|
||||||
await self._add_peer(self._to_add.pop())
|
await self._add_peer(self._to_add.pop())
|
||||||
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop)
|
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1))
|
||||||
self._wakeup_routing_task.clear()
|
self._wakeup_routing_task.clear()
|
||||||
|
|
||||||
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
|
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
|
||||||
assert sender_contact.node_id != self.node_id, (binascii.hexlify(sender_contact.node_id)[:8].decode(),
|
assert sender_contact.node_id != self.node_id, (sender_contact.node_id.hex()[:8],
|
||||||
binascii.hexlify(self.node_id)[:8].decode())
|
self.node_id.hex()[:8])
|
||||||
method = message.method
|
method = message.method
|
||||||
if method not in [b'ping', b'store', b'findNode', b'findValue']:
|
if method not in [b'ping', b'store', b'findNode', b'findValue']:
|
||||||
raise AttributeError('Invalid method: %s' % message.method.decode())
|
raise AttributeError('Invalid method: %s' % message.method.decode())
|
||||||
if message.args and isinstance(message.args[-1], dict) and b'protocolVersion' in message.args[-1]:
|
if message.args and isinstance(message.args[-1], dict) and b'protocolVersion' in message.args[-1]:
|
||||||
# args don't need reformatting
|
# args don't need reformatting
|
||||||
args, kw = tuple(message.args[:-1]), message.args[-1]
|
args, kwargs = tuple(message.args[:-1]), message.args[-1]
|
||||||
else:
|
else:
|
||||||
args, kw = self._migrate_incoming_rpc_args(sender_contact, message.method, *message.args)
|
args, kwargs = self._migrate_incoming_rpc_args(sender_contact, message.method, *message.args)
|
||||||
log.debug("%s:%i RECV CALL %s %s:%i", self.external_ip, self.udp_port, message.method.decode(),
|
log.debug("%s:%i RECV CALL %s %s:%i", self.external_ip, self.udp_port, message.method.decode(),
|
||||||
sender_contact.address, sender_contact.udp_port)
|
sender_contact.address, sender_contact.udp_port)
|
||||||
|
|
||||||
if method == b'ping':
|
if method == b'ping':
|
||||||
result = self.node_rpc.ping()
|
result = self.node_rpc.ping()
|
||||||
elif method == b'store':
|
elif method == b'store':
|
||||||
blob_hash, token, port, original_publisher_id, age = args[:5]
|
blob_hash, token, port, original_publisher_id, age = args[:5] # pylint: disable=unused-variable
|
||||||
result = self.node_rpc.store(sender_contact, blob_hash, token, port)
|
result = self.node_rpc.store(sender_contact, blob_hash, token, port)
|
||||||
else:
|
else:
|
||||||
key = args[0]
|
key = args[0]
|
||||||
page = kw.get(PAGE_KEY, 0)
|
page = kwargs.get(PAGE_KEY, 0)
|
||||||
if method == b'findNode':
|
if method == b'findNode':
|
||||||
result = self.node_rpc.find_node(sender_contact, key)
|
result = self.node_rpc.find_node(sender_contact, key)
|
||||||
else:
|
else:
|
||||||
|
@ -447,11 +422,15 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
|
|
||||||
def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram):
|
def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram):
|
||||||
# This is an RPC method request
|
# This is an RPC method request
|
||||||
|
self.received_request_metric.labels(method=request_datagram.method).inc()
|
||||||
self.peer_manager.report_last_requested(address[0], address[1])
|
self.peer_manager.report_last_requested(address[0], address[1])
|
||||||
try:
|
peer = self.routing_table.get_peer(request_datagram.node_id)
|
||||||
peer = self.routing_table.get_peer(request_datagram.node_id)
|
if not peer:
|
||||||
except IndexError:
|
try:
|
||||||
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
|
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
|
||||||
|
except ValueError as err:
|
||||||
|
log.warning("error replying to %s: %s", address[0], str(err))
|
||||||
|
return
|
||||||
try:
|
try:
|
||||||
self._handle_rpc(peer, request_datagram)
|
self._handle_rpc(peer, request_datagram)
|
||||||
# if the contact is not known to be bad (yet) and we haven't yet queried it, send it a ping so that it
|
# if the contact is not known to be bad (yet) and we haven't yet queried it, send it a ping so that it
|
||||||
|
@ -484,25 +463,25 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
def handle_response_datagram(self, address: typing.Tuple[str, int], response_datagram: ResponseDatagram):
|
def handle_response_datagram(self, address: typing.Tuple[str, int], response_datagram: ResponseDatagram):
|
||||||
# Find the message that triggered this response
|
# Find the message that triggered this response
|
||||||
if response_datagram.rpc_id in self.sent_messages:
|
if response_datagram.rpc_id in self.sent_messages:
|
||||||
peer, df, request = self.sent_messages[response_datagram.rpc_id]
|
peer, future, _ = self.sent_messages[response_datagram.rpc_id]
|
||||||
if peer.address != address[0]:
|
if peer.address != address[0]:
|
||||||
df.set_exception(RemoteException(
|
future.set_exception(
|
||||||
f"response from {address[0]}, expected {peer.address}")
|
RemoteException(f"response from {address[0]}, expected {peer.address}")
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
# We got a result from the RPC
|
# We got a result from the RPC
|
||||||
if peer.node_id == self.node_id:
|
if peer.node_id == self.node_id:
|
||||||
df.set_exception(RemoteException("node has our node id"))
|
future.set_exception(RemoteException("node has our node id"))
|
||||||
return
|
return
|
||||||
elif response_datagram.node_id == self.node_id:
|
elif response_datagram.node_id == self.node_id:
|
||||||
df.set_exception(RemoteException("incoming message is from our node id"))
|
future.set_exception(RemoteException("incoming message is from our node id"))
|
||||||
return
|
return
|
||||||
peer = make_kademlia_peer(response_datagram.node_id, address[0], address[1])
|
peer = make_kademlia_peer(response_datagram.node_id, address[0], address[1])
|
||||||
self.peer_manager.report_last_replied(address[0], address[1])
|
self.peer_manager.report_last_replied(address[0], address[1])
|
||||||
self.peer_manager.update_contact_triple(peer.node_id, address[0], address[1])
|
self.peer_manager.update_contact_triple(peer.node_id, address[0], address[1])
|
||||||
if not df.cancelled():
|
if not future.cancelled():
|
||||||
df.set_result(response_datagram)
|
future.set_result(response_datagram)
|
||||||
self.add_peer(peer)
|
self.add_peer(peer)
|
||||||
else:
|
else:
|
||||||
log.warning("%s:%i replied, but after we cancelled the request attempt",
|
log.warning("%s:%i replied, but after we cancelled the request attempt",
|
||||||
|
@ -516,11 +495,13 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
# The RPC request raised a remote exception; raise it locally
|
# The RPC request raised a remote exception; raise it locally
|
||||||
remote_exception = RemoteException(f"{error_datagram.exception_type}({error_datagram.response})")
|
remote_exception = RemoteException(f"{error_datagram.exception_type}({error_datagram.response})")
|
||||||
if error_datagram.rpc_id in self.sent_messages:
|
if error_datagram.rpc_id in self.sent_messages:
|
||||||
peer, df, request = self.sent_messages.pop(error_datagram.rpc_id)
|
peer, future, request = self.sent_messages.pop(error_datagram.rpc_id)
|
||||||
if (peer.address, peer.udp_port) != address:
|
if (peer.address, peer.udp_port) != address:
|
||||||
df.set_exception(RemoteException(
|
future.set_exception(
|
||||||
f"response from {address[0]}:{address[1]}, "
|
RemoteException(
|
||||||
f"expected {peer.address}:{peer.udp_port}")
|
f"response from {address[0]}:{address[1]}, "
|
||||||
|
f"expected {peer.address}:{peer.udp_port}"
|
||||||
|
)
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
error_msg = f"" \
|
error_msg = f"" \
|
||||||
|
@ -529,28 +510,32 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
f"Raised: {str(remote_exception)}"
|
f"Raised: {str(remote_exception)}"
|
||||||
if 'Invalid token' in error_msg:
|
if 'Invalid token' in error_msg:
|
||||||
log.debug(error_msg)
|
log.debug(error_msg)
|
||||||
elif error_datagram.response not in old_protocol_errors:
|
elif error_datagram.response not in OLD_PROTOCOL_ERRORS:
|
||||||
log.warning(error_msg)
|
log.warning(error_msg)
|
||||||
else:
|
else:
|
||||||
log.debug("known dht protocol backwards compatibility error with %s:%i (lbrynet v%s)",
|
log.debug(
|
||||||
peer.address, peer.udp_port, old_protocol_errors[error_datagram.response])
|
"known dht protocol backwards compatibility error with %s:%i (lbrynet v%s)",
|
||||||
df.set_exception(remote_exception)
|
peer.address, peer.udp_port, OLD_PROTOCOL_ERRORS[error_datagram.response]
|
||||||
|
)
|
||||||
|
future.set_exception(remote_exception)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
if error_datagram.response not in old_protocol_errors:
|
if error_datagram.response not in OLD_PROTOCOL_ERRORS:
|
||||||
msg = f"Received error from {address[0]}:{address[1]}, but it isn't in response to a " \
|
msg = f"Received error from {address[0]}:{address[1]}, but it isn't in response to a " \
|
||||||
f"pending request: {str(remote_exception)}"
|
f"pending request: {str(remote_exception)}"
|
||||||
log.warning(msg)
|
log.warning(msg)
|
||||||
else:
|
else:
|
||||||
log.debug("known dht protocol backwards compatibility error with %s:%i (lbrynet v%s)",
|
log.debug(
|
||||||
address[0], address[1], old_protocol_errors[error_datagram.response])
|
"known dht protocol backwards compatibility error with %s:%i (lbrynet v%s)",
|
||||||
|
address[0], address[1], OLD_PROTOCOL_ERRORS[error_datagram.response]
|
||||||
|
)
|
||||||
|
|
||||||
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None:
|
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: # pylint: disable=arguments-renamed
|
||||||
try:
|
try:
|
||||||
message = decode_datagram(datagram)
|
message = decode_datagram(datagram)
|
||||||
except (ValueError, TypeError):
|
except (ValueError, TypeError, DecodeError):
|
||||||
self.peer_manager.report_failure(address[0], address[1])
|
self.peer_manager.report_failure(address[0], address[1])
|
||||||
log.warning("Couldn't decode dht datagram from %s: %s", address, binascii.hexlify(datagram).decode())
|
log.warning("Couldn't decode dht datagram from %s: %s", address, datagram.hex())
|
||||||
return
|
return
|
||||||
|
|
||||||
if isinstance(message, RequestDatagram):
|
if isinstance(message, RequestDatagram):
|
||||||
|
@ -565,14 +550,19 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
self._send(peer, request)
|
self._send(peer, request)
|
||||||
response_fut = self.sent_messages[request.rpc_id][1]
|
response_fut = self.sent_messages[request.rpc_id][1]
|
||||||
try:
|
try:
|
||||||
|
self.request_sent_metric.labels(method=request.method).inc()
|
||||||
|
start = time.perf_counter()
|
||||||
response = await asyncio.wait_for(response_fut, self.rpc_timeout)
|
response = await asyncio.wait_for(response_fut, self.rpc_timeout)
|
||||||
|
self.response_time_metric.labels(method=request.method).observe(time.perf_counter() - start)
|
||||||
self.peer_manager.report_last_replied(peer.address, peer.udp_port)
|
self.peer_manager.report_last_replied(peer.address, peer.udp_port)
|
||||||
|
self.request_success_metric.labels(method=request.method).inc()
|
||||||
return response
|
return response
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
if not response_fut.done():
|
if not response_fut.done():
|
||||||
response_fut.cancel()
|
response_fut.cancel()
|
||||||
raise
|
raise
|
||||||
except (asyncio.TimeoutError, RemoteException):
|
except (asyncio.TimeoutError, RemoteException):
|
||||||
|
self.request_error_metric.labels(method=request.method).inc()
|
||||||
self.peer_manager.report_failure(peer.address, peer.udp_port)
|
self.peer_manager.report_failure(peer.address, peer.udp_port)
|
||||||
if self.peer_manager.peer_is_good(peer) is False:
|
if self.peer_manager.peer_is_good(peer) is False:
|
||||||
self.remove_peer(peer)
|
self.remove_peer(peer)
|
||||||
|
@ -589,12 +579,12 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
raise TransportNotConnected()
|
raise TransportNotConnected()
|
||||||
|
|
||||||
data = message.bencode()
|
data = message.bencode()
|
||||||
if len(data) > constants.msg_size_limit:
|
if len(data) > constants.MSG_SIZE_LIMIT:
|
||||||
log.warning("cannot send datagram larger than %i bytes (packet is %i bytes)",
|
log.warning("cannot send datagram larger than %i bytes (packet is %i bytes)",
|
||||||
constants.msg_size_limit, len(data))
|
constants.MSG_SIZE_LIMIT, len(data))
|
||||||
log.debug("Packet is too large to send: %s", binascii.hexlify(data[:3500]).decode())
|
log.debug("Packet is too large to send: %s", data[:3500].hex())
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"cannot send datagram larger than {constants.msg_size_limit} bytes (packet is {len(data)} bytes)"
|
f"cannot send datagram larger than {constants.MSG_SIZE_LIMIT} bytes (packet is {len(data)} bytes)"
|
||||||
)
|
)
|
||||||
if isinstance(message, (RequestDatagram, ResponseDatagram)):
|
if isinstance(message, (RequestDatagram, ResponseDatagram)):
|
||||||
assert message.node_id == self.node_id, message
|
assert message.node_id == self.node_id, message
|
||||||
|
@ -637,38 +627,38 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
return constants.digest(self.token_secret + compact_ip)
|
return constants.digest(self.token_secret + compact_ip)
|
||||||
|
|
||||||
def verify_token(self, token, compact_ip):
|
def verify_token(self, token, compact_ip):
|
||||||
h = constants.hash_class()
|
h = constants.HASH_CLASS()
|
||||||
h.update(self.token_secret + compact_ip)
|
h.update(self.token_secret + compact_ip)
|
||||||
if self.old_token_secret and not token == h.digest(): # TODO: why should we be accepting the previous token?
|
if self.old_token_secret and not token == h.digest(): # TODO: why should we be accepting the previous token?
|
||||||
h = constants.hash_class()
|
h = constants.HASH_CLASS()
|
||||||
h.update(self.old_token_secret + compact_ip)
|
h.update(self.old_token_secret + compact_ip)
|
||||||
if not token == h.digest():
|
if not token == h.digest():
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
async def store_to_peer(self, hash_value: bytes, peer: 'KademliaPeer',
|
async def store_to_peer(self, hash_value: bytes, peer: 'KademliaPeer', # pylint: disable=too-many-return-statements
|
||||||
retry: bool = True) -> typing.Tuple[bytes, bool]:
|
retry: bool = True) -> typing.Tuple[bytes, bool]:
|
||||||
async def __store():
|
async def __store():
|
||||||
res = await self.get_rpc_peer(peer).store(hash_value)
|
res = await self.get_rpc_peer(peer).store(hash_value)
|
||||||
if res != b"OK":
|
if res != b"OK":
|
||||||
raise ValueError(res)
|
raise ValueError(res)
|
||||||
log.debug("Stored %s to %s", binascii.hexlify(hash_value).decode()[:8], peer)
|
log.debug("Stored %s to %s", hash_value.hex()[:8], peer)
|
||||||
return peer.node_id, True
|
return peer.node_id, True
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return await __store()
|
return await __store()
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
log.debug("Timeout while storing blob_hash %s at %s", binascii.hexlify(hash_value).decode()[:8], peer)
|
log.debug("Timeout while storing blob_hash %s at %s", hash_value.hex()[:8], peer)
|
||||||
return peer.node_id, False
|
return peer.node_id, False
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
log.error("Unexpected response: %s" % err)
|
log.error("Unexpected response: %s", err)
|
||||||
return peer.node_id, False
|
return peer.node_id, False
|
||||||
except RemoteException as err:
|
except RemoteException as err:
|
||||||
if 'findValue() takes exactly 2 arguments (5 given)' in str(err):
|
if 'findValue() takes exactly 2 arguments (5 given)' in str(err):
|
||||||
log.debug("peer %s:%i is running an incompatible version of lbrynet", peer.address, peer.udp_port)
|
log.debug("peer %s:%i is running an incompatible version of lbrynet", peer.address, peer.udp_port)
|
||||||
return peer.node_id, False
|
return peer.node_id, False
|
||||||
if 'Invalid token' not in str(err):
|
if 'Invalid token' not in str(err):
|
||||||
log.exception("Unexpected error while storing blob_hash")
|
log.warning("Unexpected error while storing blob_hash: %s", err)
|
||||||
return peer.node_id, False
|
return peer.node_id, False
|
||||||
self.peer_manager.clear_token(peer.node_id)
|
self.peer_manager.clear_token(peer.node_id)
|
||||||
if not retry:
|
if not retry:
|
|
@ -4,7 +4,11 @@ import logging
|
||||||
import typing
|
import typing
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
|
from prometheus_client import Gauge
|
||||||
|
|
||||||
|
from lbry import utils
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
|
from lbry.dht.error import RemoteException
|
||||||
from lbry.dht.protocol.distance import Distance
|
from lbry.dht.protocol.distance import Distance
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.dht.peer import KademliaPeer, PeerManager
|
from lbry.dht.peer import KademliaPeer, PeerManager
|
||||||
|
@ -13,10 +17,20 @@ log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class KBucket:
|
class KBucket:
|
||||||
""" Description - later
|
|
||||||
"""
|
"""
|
||||||
|
Kademlia K-bucket implementation.
|
||||||
|
"""
|
||||||
|
peer_in_routing_table_metric = Gauge(
|
||||||
|
"peers_in_routing_table", "Number of peers on routing table", namespace="dht_node",
|
||||||
|
labelnames=("scope",)
|
||||||
|
)
|
||||||
|
peer_with_x_bit_colliding_metric = Gauge(
|
||||||
|
"peer_x_bit_colliding", "Number of peers with at least X bits colliding with this node id",
|
||||||
|
namespace="dht_node", labelnames=("amount",)
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, node_id: bytes):
|
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int,
|
||||||
|
node_id: bytes, capacity: int = constants.K):
|
||||||
"""
|
"""
|
||||||
@param range_min: The lower boundary for the range in the n-bit ID
|
@param range_min: The lower boundary for the range in the n-bit ID
|
||||||
space covered by this k-bucket
|
space covered by this k-bucket
|
||||||
|
@ -24,12 +38,12 @@ class KBucket:
|
||||||
covered by this k-bucket
|
covered by this k-bucket
|
||||||
"""
|
"""
|
||||||
self._peer_manager = peer_manager
|
self._peer_manager = peer_manager
|
||||||
self.last_accessed = 0
|
|
||||||
self.range_min = range_min
|
self.range_min = range_min
|
||||||
self.range_max = range_max
|
self.range_max = range_max
|
||||||
self.peers: typing.List['KademliaPeer'] = []
|
self.peers: typing.List['KademliaPeer'] = []
|
||||||
self._node_id = node_id
|
self._node_id = node_id
|
||||||
self._distance_to_self = Distance(node_id)
|
self._distance_to_self = Distance(node_id)
|
||||||
|
self.capacity = capacity
|
||||||
|
|
||||||
def add_peer(self, peer: 'KademliaPeer') -> bool:
|
def add_peer(self, peer: 'KademliaPeer') -> bool:
|
||||||
""" Add contact to _contact list in the right order. This will move the
|
""" Add contact to _contact list in the right order. This will move the
|
||||||
|
@ -50,24 +64,25 @@ class KBucket:
|
||||||
self.peers.append(peer)
|
self.peers.append(peer)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
for i in range(len(self.peers)):
|
for i, _ in enumerate(self.peers):
|
||||||
p = self.peers[i]
|
local_peer = self.peers[i]
|
||||||
if p.node_id == peer.node_id:
|
if local_peer.node_id == peer.node_id:
|
||||||
self.peers.remove(p)
|
self.peers.remove(local_peer)
|
||||||
self.peers.append(peer)
|
self.peers.append(peer)
|
||||||
return True
|
return True
|
||||||
if len(self.peers) < constants.k:
|
if len(self.peers) < self.capacity:
|
||||||
self.peers.append(peer)
|
self.peers.append(peer)
|
||||||
|
self.peer_in_routing_table_metric.labels("global").inc()
|
||||||
|
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
|
||||||
|
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).inc()
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
# raise BucketFull("No space in bucket to insert contact")
|
|
||||||
|
|
||||||
def get_peer(self, node_id: bytes) -> 'KademliaPeer':
|
def get_peer(self, node_id: bytes) -> 'KademliaPeer':
|
||||||
for peer in self.peers:
|
for peer in self.peers:
|
||||||
if peer.node_id == node_id:
|
if peer.node_id == node_id:
|
||||||
return peer
|
return peer
|
||||||
raise IndexError(node_id)
|
|
||||||
|
|
||||||
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
|
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
|
||||||
""" Returns a list containing up to the first count number of contacts
|
""" Returns a list containing up to the first count number of contacts
|
||||||
|
@ -101,8 +116,8 @@ class KBucket:
|
||||||
current_len = len(peers)
|
current_len = len(peers)
|
||||||
|
|
||||||
# If count greater than k - return only k contacts
|
# If count greater than k - return only k contacts
|
||||||
if count > constants.k:
|
if count > constants.K:
|
||||||
count = constants.k
|
count = constants.K
|
||||||
|
|
||||||
if not current_len:
|
if not current_len:
|
||||||
return peers
|
return peers
|
||||||
|
@ -124,6 +139,9 @@ class KBucket:
|
||||||
|
|
||||||
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
||||||
self.peers.remove(peer)
|
self.peers.remove(peer)
|
||||||
|
self.peer_in_routing_table_metric.labels("global").dec()
|
||||||
|
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
|
||||||
|
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).dec()
|
||||||
|
|
||||||
def key_in_range(self, key: bytes) -> bool:
|
def key_in_range(self, key: bytes) -> bool:
|
||||||
""" Tests whether the specified key (i.e. node ID) is in the range
|
""" Tests whether the specified key (i.e. node ID) is in the range
|
||||||
|
@ -161,31 +179,43 @@ class TreeRoutingTable:
|
||||||
version of the Kademlia paper, in section 2.4. It does, however, use the
|
version of the Kademlia paper, in section 2.4. It does, however, use the
|
||||||
ping RPC-based k-bucket eviction algorithm described in section 2.2 of
|
ping RPC-based k-bucket eviction algorithm described in section 2.2 of
|
||||||
that paper.
|
that paper.
|
||||||
|
|
||||||
|
BOOTSTRAP MODE: if set to True, we always add all peers. This is so a
|
||||||
|
bootstrap node does not get a bias towards its own node id and replies are
|
||||||
|
the best it can provide (joining peer knows its neighbors immediately).
|
||||||
|
Over time, this will need to be optimized so we use the disk as holding
|
||||||
|
everything in memory won't be feasible anymore.
|
||||||
|
See: https://github.com/bittorrent/bootstrap-dht
|
||||||
"""
|
"""
|
||||||
|
bucket_in_routing_table_metric = Gauge(
|
||||||
|
"buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node",
|
||||||
|
labelnames=("scope",)
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
|
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
|
||||||
split_buckets_under_index: int = constants.split_buckets_under_index):
|
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False):
|
||||||
self._loop = loop
|
self._loop = loop
|
||||||
self._peer_manager = peer_manager
|
self._peer_manager = peer_manager
|
||||||
self._parent_node_id = parent_node_id
|
self._parent_node_id = parent_node_id
|
||||||
self._split_buckets_under_index = split_buckets_under_index
|
self._split_buckets_under_index = split_buckets_under_index
|
||||||
self.buckets: typing.List[KBucket] = [
|
self.buckets: typing.List[KBucket] = [
|
||||||
KBucket(
|
KBucket(
|
||||||
self._peer_manager, range_min=0, range_max=2 ** constants.hash_bits, node_id=self._parent_node_id
|
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id,
|
||||||
|
capacity=1 << 32 if is_bootstrap_node else constants.K
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
def get_peers(self) -> typing.List['KademliaPeer']:
|
def get_peers(self) -> typing.List['KademliaPeer']:
|
||||||
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
|
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
|
||||||
|
|
||||||
def should_split(self, bucket_index: int, to_add: bytes) -> bool:
|
def _should_split(self, bucket_index: int, to_add: bytes) -> bool:
|
||||||
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
|
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
|
||||||
if bucket_index < self._split_buckets_under_index:
|
if bucket_index < self._split_buckets_under_index:
|
||||||
return True
|
return True
|
||||||
contacts = self.get_peers()
|
contacts = self.get_peers()
|
||||||
distance = Distance(self._parent_node_id)
|
distance = Distance(self._parent_node_id)
|
||||||
contacts.sort(key=lambda c: distance(c.node_id))
|
contacts.sort(key=lambda c: distance(c.node_id))
|
||||||
kth_contact = contacts[-1] if len(contacts) < constants.k else contacts[constants.k - 1]
|
kth_contact = contacts[-1] if len(contacts) < constants.K else contacts[constants.K - 1]
|
||||||
return distance(to_add) < distance(kth_contact.node_id)
|
return distance(to_add) < distance(kth_contact.node_id)
|
||||||
|
|
||||||
def find_close_peers(self, key: bytes, count: typing.Optional[int] = None,
|
def find_close_peers(self, key: bytes, count: typing.Optional[int] = None,
|
||||||
|
@ -193,7 +223,7 @@ class TreeRoutingTable:
|
||||||
exclude = [self._parent_node_id]
|
exclude = [self._parent_node_id]
|
||||||
if sender_node_id:
|
if sender_node_id:
|
||||||
exclude.append(sender_node_id)
|
exclude.append(sender_node_id)
|
||||||
count = count or constants.k
|
count = count or constants.K
|
||||||
distance = Distance(key)
|
distance = Distance(key)
|
||||||
contacts = self.get_peers()
|
contacts = self.get_peers()
|
||||||
contacts = [c for c in contacts if c.node_id not in exclude]
|
contacts = [c for c in contacts if c.node_id not in exclude]
|
||||||
|
@ -203,39 +233,32 @@ class TreeRoutingTable:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
|
def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
|
||||||
"""
|
return self.buckets[self._kbucket_index(contact_id)].get_peer(contact_id)
|
||||||
@raise IndexError: No contact with the specified contact ID is known
|
|
||||||
by this node
|
|
||||||
"""
|
|
||||||
return self.buckets[self.kbucket_index(contact_id)].get_peer(contact_id)
|
|
||||||
|
|
||||||
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
|
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
|
||||||
bucket_index = start_index
|
|
||||||
refresh_ids = []
|
refresh_ids = []
|
||||||
now = int(self._loop.time())
|
for offset, _ in enumerate(self.buckets[start_index:]):
|
||||||
for bucket in self.buckets[start_index:]:
|
refresh_ids.append(self._midpoint_id_in_bucket_range(start_index + offset))
|
||||||
if force or now - bucket.last_accessed >= constants.refresh_interval:
|
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
|
||||||
to_search = self.midpoint_id_in_bucket_range(bucket_index)
|
# populate/split the buckets further
|
||||||
refresh_ids.append(to_search)
|
buckets_with_contacts = self.buckets_with_contacts()
|
||||||
bucket_index += 1
|
if buckets_with_contacts <= 3:
|
||||||
|
for i in range(buckets_with_contacts):
|
||||||
|
refresh_ids.append(self._random_id_in_bucket_range(i))
|
||||||
|
refresh_ids.append(self._random_id_in_bucket_range(i))
|
||||||
return refresh_ids
|
return refresh_ids
|
||||||
|
|
||||||
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
||||||
if not peer.node_id:
|
if not peer.node_id:
|
||||||
return
|
return
|
||||||
bucket_index = self.kbucket_index(peer.node_id)
|
bucket_index = self._kbucket_index(peer.node_id)
|
||||||
try:
|
try:
|
||||||
self.buckets[bucket_index].remove_peer(peer)
|
self.buckets[bucket_index].remove_peer(peer)
|
||||||
|
self._join_buckets()
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return
|
return
|
||||||
|
|
||||||
def touch_kbucket(self, key: bytes) -> None:
|
def _kbucket_index(self, key: bytes) -> int:
|
||||||
self.touch_kbucket_by_index(self.kbucket_index(key))
|
|
||||||
|
|
||||||
def touch_kbucket_by_index(self, bucket_index: int):
|
|
||||||
self.buckets[bucket_index].last_accessed = int(self._loop.time())
|
|
||||||
|
|
||||||
def kbucket_index(self, key: bytes) -> int:
|
|
||||||
i = 0
|
i = 0
|
||||||
for bucket in self.buckets:
|
for bucket in self.buckets:
|
||||||
if bucket.key_in_range(key):
|
if bucket.key_in_range(key):
|
||||||
|
@ -244,19 +267,19 @@ class TreeRoutingTable:
|
||||||
i += 1
|
i += 1
|
||||||
return i
|
return i
|
||||||
|
|
||||||
def random_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
def _random_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||||
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
|
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
|
||||||
return Distance(
|
return Distance(
|
||||||
self._parent_node_id
|
self._parent_node_id
|
||||||
)(random_id.to_bytes(constants.hash_length, 'big')).to_bytes(constants.hash_length, 'big')
|
)(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big')
|
||||||
|
|
||||||
def midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
def _midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||||
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
|
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
|
||||||
return Distance(self._parent_node_id)(
|
return Distance(self._parent_node_id)(
|
||||||
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.hash_length, 'big')
|
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big')
|
||||||
).to_bytes(constants.hash_length, 'big')
|
).to_bytes(constants.HASH_LENGTH, 'big')
|
||||||
|
|
||||||
def split_bucket(self, old_bucket_index: int) -> None:
|
def _split_bucket(self, old_bucket_index: int) -> None:
|
||||||
""" Splits the specified k-bucket into two new buckets which together
|
""" Splits the specified k-bucket into two new buckets which together
|
||||||
cover the same range in the key/ID space
|
cover the same range in the key/ID space
|
||||||
|
|
||||||
|
@ -279,11 +302,12 @@ class TreeRoutingTable:
|
||||||
# ...and remove them from the old bucket
|
# ...and remove them from the old bucket
|
||||||
for contact in new_bucket.peers:
|
for contact in new_bucket.peers:
|
||||||
old_bucket.remove_peer(contact)
|
old_bucket.remove_peer(contact)
|
||||||
|
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
|
||||||
|
|
||||||
def join_buckets(self):
|
def _join_buckets(self):
|
||||||
if len(self.buckets) == 1:
|
if len(self.buckets) == 1:
|
||||||
return
|
return
|
||||||
to_pop = [i for i, bucket in enumerate(self.buckets) if not len(bucket)]
|
to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0]
|
||||||
if not to_pop:
|
if not to_pop:
|
||||||
return
|
return
|
||||||
log.info("join buckets %i", len(to_pop))
|
log.info("join buckets %i", len(to_pop))
|
||||||
|
@ -302,18 +326,79 @@ class TreeRoutingTable:
|
||||||
elif can_go_higher:
|
elif can_go_higher:
|
||||||
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
|
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
|
||||||
self.buckets.remove(bucket)
|
self.buckets.remove(bucket)
|
||||||
return self.join_buckets()
|
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
|
||||||
|
return self._join_buckets()
|
||||||
def contact_in_routing_table(self, address_tuple: typing.Tuple[str, int]) -> bool:
|
|
||||||
for bucket in self.buckets:
|
|
||||||
for contact in bucket.get_peers(sort_distance_to=False):
|
|
||||||
if address_tuple[0] == contact.address and address_tuple[1] == contact.udp_port:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def buckets_with_contacts(self) -> int:
|
def buckets_with_contacts(self) -> int:
|
||||||
count = 0
|
count = 0
|
||||||
for bucket in self.buckets:
|
for bucket in self.buckets:
|
||||||
if len(bucket):
|
if len(bucket) > 0:
|
||||||
count += 1
|
count += 1
|
||||||
return count
|
return count
|
||||||
|
|
||||||
|
async def add_peer(self, peer: 'KademliaPeer', probe: typing.Callable[['KademliaPeer'], typing.Awaitable]):
|
||||||
|
if not peer.node_id:
|
||||||
|
log.warning("Tried adding a peer with no node id!")
|
||||||
|
return False
|
||||||
|
for my_peer in self.get_peers():
|
||||||
|
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
|
||||||
|
self.remove_peer(my_peer)
|
||||||
|
self._join_buckets()
|
||||||
|
bucket_index = self._kbucket_index(peer.node_id)
|
||||||
|
if self.buckets[bucket_index].add_peer(peer):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
|
||||||
|
if self._should_split(bucket_index, peer.node_id):
|
||||||
|
self._split_bucket(bucket_index)
|
||||||
|
# Retry the insertion attempt
|
||||||
|
result = await self.add_peer(peer, probe)
|
||||||
|
self._join_buckets()
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
# We can't split the k-bucket
|
||||||
|
#
|
||||||
|
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
|
||||||
|
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
|
||||||
|
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
|
||||||
|
#
|
||||||
|
# A reasonable extension to this is BEP 0005, which extends the above:
|
||||||
|
#
|
||||||
|
# Not all nodes that we learn about are equal. Some are "good" and some are not.
|
||||||
|
# Many nodes using the DHT are able to send queries and receive responses,
|
||||||
|
# but are not able to respond to queries from other nodes. It is important that
|
||||||
|
# each node's routing table must contain only known good nodes. A good node is
|
||||||
|
# a node has responded to one of our queries within the last 15 minutes. A node
|
||||||
|
# is also good if it has ever responded to one of our queries and has sent us a
|
||||||
|
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
|
||||||
|
# questionable. Nodes become bad when they fail to respond to multiple queries
|
||||||
|
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
|
||||||
|
#
|
||||||
|
# When there are bad or questionable nodes in the bucket, the least recent is selected for
|
||||||
|
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
|
||||||
|
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
|
||||||
|
# is ignored if the pinged node replies.
|
||||||
|
|
||||||
|
not_good_contacts = self.buckets[bucket_index].get_bad_or_unknown_peers()
|
||||||
|
not_recently_replied = []
|
||||||
|
for my_peer in not_good_contacts:
|
||||||
|
last_replied = self._peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
|
||||||
|
if not last_replied or last_replied + 60 < self._loop.time():
|
||||||
|
not_recently_replied.append(my_peer)
|
||||||
|
if not_recently_replied:
|
||||||
|
to_replace = not_recently_replied[0]
|
||||||
|
else:
|
||||||
|
to_replace = self.buckets[bucket_index].peers[0]
|
||||||
|
last_replied = self._peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
|
||||||
|
if last_replied and last_replied + 60 > self._loop.time():
|
||||||
|
return False
|
||||||
|
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
|
||||||
|
try:
|
||||||
|
await probe(to_replace)
|
||||||
|
return False
|
||||||
|
except (asyncio.TimeoutError, RemoteException):
|
||||||
|
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
|
||||||
|
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
|
||||||
|
if to_replace in self.buckets[bucket_index]:
|
||||||
|
self.buckets[bucket_index].remove_peer(to_replace)
|
||||||
|
return await self.add_peer(peer, probe)
|
|
@ -52,8 +52,7 @@ def _bdecode(data: bytes, start_index: int = 0) -> typing.Tuple[typing.Union[int
|
||||||
raise DecodeError(err)
|
raise DecodeError(err)
|
||||||
start_index = split_pos + 1
|
start_index = split_pos + 1
|
||||||
end_pos = start_index + length
|
end_pos = start_index + length
|
||||||
b = data[start_index:end_pos]
|
return data[start_index:end_pos], end_pos
|
||||||
return b, end_pos
|
|
||||||
|
|
||||||
|
|
||||||
def bencode(data: typing.Dict) -> bytes:
|
def bencode(data: typing.Dict) -> bytes:
|
|
@ -34,9 +34,9 @@ class KademliaDatagramBase:
|
||||||
self.packet_type = packet_type
|
self.packet_type = packet_type
|
||||||
if self.expected_packet_type != packet_type:
|
if self.expected_packet_type != packet_type:
|
||||||
raise ValueError(f"invalid packet type: {packet_type}, expected {self.expected_packet_type}")
|
raise ValueError(f"invalid packet type: {packet_type}, expected {self.expected_packet_type}")
|
||||||
if len(rpc_id) != constants.rpc_id_length:
|
if len(rpc_id) != constants.RPC_ID_LENGTH:
|
||||||
raise ValueError(f"invalid rpc node_id: {len(rpc_id)} bytes (expected 20)")
|
raise ValueError(f"invalid rpc node_id: {len(rpc_id)} bytes (expected 20)")
|
||||||
if not len(node_id) == constants.hash_length:
|
if not len(node_id) == constants.HASH_LENGTH:
|
||||||
raise ValueError(f"invalid node node_id: {len(node_id)} bytes (expected 48)")
|
raise ValueError(f"invalid node node_id: {len(node_id)} bytes (expected 48)")
|
||||||
self.rpc_id = rpc_id
|
self.rpc_id = rpc_id
|
||||||
self.node_id = node_id
|
self.node_id = node_id
|
||||||
|
@ -46,9 +46,9 @@ class KademliaDatagramBase:
|
||||||
i: getattr(self, k) for i, k in enumerate(self.required_fields)
|
i: getattr(self, k) for i, k in enumerate(self.required_fields)
|
||||||
}
|
}
|
||||||
for i, k in enumerate(OPTIONAL_FIELDS):
|
for i, k in enumerate(OPTIONAL_FIELDS):
|
||||||
v = getattr(self, k, None)
|
value = getattr(self, k, None)
|
||||||
if v is not None:
|
if value is not None:
|
||||||
datagram[i + OPTIONAL_ARG_OFFSET] = v
|
datagram[i + OPTIONAL_ARG_OFFSET] = value
|
||||||
return bencode(datagram)
|
return bencode(datagram)
|
||||||
|
|
||||||
|
|
||||||
|
@ -77,18 +77,18 @@ class RequestDatagram(KademliaDatagramBase):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def make_ping(cls, from_node_id: bytes, rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
|
def make_ping(cls, from_node_id: bytes, rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
|
||||||
rpc_id = rpc_id or constants.generate_id()[:constants.rpc_id_length]
|
rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
|
||||||
return cls(REQUEST_TYPE, rpc_id, from_node_id, b'ping')
|
return cls(REQUEST_TYPE, rpc_id, from_node_id, b'ping')
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def make_store(cls, from_node_id: bytes, blob_hash: bytes, token: bytes, port: int,
|
def make_store(cls, from_node_id: bytes, blob_hash: bytes, token: bytes, port: int,
|
||||||
rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
|
rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
|
||||||
rpc_id = rpc_id or constants.generate_id()[:constants.rpc_id_length]
|
rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
|
||||||
if len(blob_hash) != constants.hash_bits // 8:
|
if len(blob_hash) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid blob hash length: {len(blob_hash)}")
|
raise ValueError(f"invalid blob hash length: {len(blob_hash)}")
|
||||||
if not 0 < port < 65536:
|
if not 0 < port < 65536:
|
||||||
raise ValueError(f"invalid port: {port}")
|
raise ValueError(f"invalid port: {port}")
|
||||||
if len(token) != constants.hash_bits // 8:
|
if len(token) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid token length: {len(token)}")
|
raise ValueError(f"invalid token length: {len(token)}")
|
||||||
store_args = [blob_hash, token, port, from_node_id, 0]
|
store_args = [blob_hash, token, port, from_node_id, 0]
|
||||||
return cls(REQUEST_TYPE, rpc_id, from_node_id, b'store', store_args)
|
return cls(REQUEST_TYPE, rpc_id, from_node_id, b'store', store_args)
|
||||||
|
@ -96,16 +96,16 @@ class RequestDatagram(KademliaDatagramBase):
|
||||||
@classmethod
|
@classmethod
|
||||||
def make_find_node(cls, from_node_id: bytes, key: bytes,
|
def make_find_node(cls, from_node_id: bytes, key: bytes,
|
||||||
rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
|
rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
|
||||||
rpc_id = rpc_id or constants.generate_id()[:constants.rpc_id_length]
|
rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
|
||||||
if len(key) != constants.hash_bits // 8:
|
if len(key) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid key length: {len(key)}")
|
raise ValueError(f"invalid key length: {len(key)}")
|
||||||
return cls(REQUEST_TYPE, rpc_id, from_node_id, b'findNode', [key])
|
return cls(REQUEST_TYPE, rpc_id, from_node_id, b'findNode', [key])
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def make_find_value(cls, from_node_id: bytes, key: bytes,
|
def make_find_value(cls, from_node_id: bytes, key: bytes,
|
||||||
rpc_id: typing.Optional[bytes] = None, page: int = 0) -> 'RequestDatagram':
|
rpc_id: typing.Optional[bytes] = None, page: int = 0) -> 'RequestDatagram':
|
||||||
rpc_id = rpc_id or constants.generate_id()[:constants.rpc_id_length]
|
rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
|
||||||
if len(key) != constants.hash_bits // 8:
|
if len(key) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid key length: {len(key)}")
|
raise ValueError(f"invalid key length: {len(key)}")
|
||||||
if page < 0:
|
if page < 0:
|
||||||
raise ValueError(f"cannot request a negative page ({page})")
|
raise ValueError(f"cannot request a negative page ({page})")
|
||||||
|
@ -144,7 +144,7 @@ class ErrorDatagram(KademliaDatagramBase):
|
||||||
self.response = response.decode()
|
self.response = response.decode()
|
||||||
|
|
||||||
|
|
||||||
def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDatagram, ErrorDatagram]:
|
def _decode_datagram(datagram: bytes):
|
||||||
msg_types = {
|
msg_types = {
|
||||||
REQUEST_TYPE: RequestDatagram,
|
REQUEST_TYPE: RequestDatagram,
|
||||||
RESPONSE_TYPE: ResponseDatagram,
|
RESPONSE_TYPE: ResponseDatagram,
|
||||||
|
@ -152,26 +152,36 @@ def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDa
|
||||||
}
|
}
|
||||||
|
|
||||||
primitive: typing.Dict = bdecode(datagram)
|
primitive: typing.Dict = bdecode(datagram)
|
||||||
if primitive[0] in [REQUEST_TYPE, ERROR_TYPE, RESPONSE_TYPE]: # pylint: disable=unsubscriptable-object
|
|
||||||
datagram_type = primitive[0] # pylint: disable=unsubscriptable-object
|
converted = {
|
||||||
|
str(k).encode() if not isinstance(k, bytes) else k: v for k, v in primitive.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
if converted[b'0'] in [REQUEST_TYPE, ERROR_TYPE, RESPONSE_TYPE]: # pylint: disable=unsubscriptable-object
|
||||||
|
datagram_type = converted[b'0'] # pylint: disable=unsubscriptable-object
|
||||||
else:
|
else:
|
||||||
raise ValueError("invalid datagram type")
|
raise ValueError("invalid datagram type")
|
||||||
datagram_class = msg_types[datagram_type]
|
datagram_class = msg_types[datagram_type]
|
||||||
decoded = {
|
decoded = {
|
||||||
k: primitive[i] # pylint: disable=unsubscriptable-object
|
k: converted[str(i).encode()] # pylint: disable=unsubscriptable-object
|
||||||
for i, k in enumerate(datagram_class.required_fields)
|
for i, k in enumerate(datagram_class.required_fields)
|
||||||
if i in primitive # pylint: disable=unsupported-membership-test
|
if str(i).encode() in converted # pylint: disable=unsupported-membership-test
|
||||||
}
|
}
|
||||||
for i, k in enumerate(OPTIONAL_FIELDS):
|
for i, _ in enumerate(OPTIONAL_FIELDS):
|
||||||
if i + OPTIONAL_ARG_OFFSET in primitive:
|
if str(i + OPTIONAL_ARG_OFFSET).encode() in converted:
|
||||||
decoded[i + OPTIONAL_ARG_OFFSET] = primitive[i + OPTIONAL_ARG_OFFSET]
|
decoded[i + OPTIONAL_ARG_OFFSET] = converted[str(i + OPTIONAL_ARG_OFFSET).encode()]
|
||||||
|
return decoded, datagram_class
|
||||||
|
|
||||||
|
|
||||||
|
def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDatagram, ErrorDatagram]:
|
||||||
|
decoded, datagram_class = _decode_datagram(datagram)
|
||||||
return datagram_class(**decoded)
|
return datagram_class(**decoded)
|
||||||
|
|
||||||
|
|
||||||
def make_compact_ip(address: str) -> bytearray:
|
def make_compact_ip(address: str) -> bytearray:
|
||||||
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
|
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
|
||||||
if len(compact_ip) != 4:
|
if len(compact_ip) != 4:
|
||||||
raise ValueError(f"invalid IPv4 length")
|
raise ValueError("invalid IPv4 length")
|
||||||
return compact_ip
|
return compact_ip
|
||||||
|
|
||||||
|
|
||||||
|
@ -179,8 +189,8 @@ def make_compact_address(node_id: bytes, address: str, port: int) -> bytearray:
|
||||||
compact_ip = make_compact_ip(address)
|
compact_ip = make_compact_ip(address)
|
||||||
if not 0 < port < 65536:
|
if not 0 < port < 65536:
|
||||||
raise ValueError(f'Invalid port: {port}')
|
raise ValueError(f'Invalid port: {port}')
|
||||||
if len(node_id) != constants.hash_bits // 8:
|
if len(node_id) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid node node_id length")
|
raise ValueError("invalid node node_id length")
|
||||||
return compact_ip + port.to_bytes(2, 'big') + node_id
|
return compact_ip + port.to_bytes(2, 'big') + node_id
|
||||||
|
|
||||||
|
|
||||||
|
@ -190,6 +200,6 @@ def decode_compact_address(compact_address: bytes) -> typing.Tuple[bytes, str, i
|
||||||
node_id = compact_address[6:]
|
node_id = compact_address[6:]
|
||||||
if not 0 < port < 65536:
|
if not 0 < port < 65536:
|
||||||
raise ValueError(f'Invalid port: {port}')
|
raise ValueError(f'Invalid port: {port}')
|
||||||
if len(node_id) != constants.hash_bits // 8:
|
if len(node_id) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid node node_id length")
|
raise ValueError("invalid node node_id length")
|
||||||
return node_id, address, port
|
return node_id, address, port
|
4186
lbry/docs/api.json
|
@ -34,6 +34,11 @@ Code | Name | Message
|
||||||
**11x** | InputValue(ValueError) | Invalid argument value provided to command.
|
**11x** | InputValue(ValueError) | Invalid argument value provided to command.
|
||||||
111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid.
|
111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid.
|
||||||
112 | InputValueIsNone | None or null is not valid value for argument '{argument}'.
|
112 | InputValueIsNone | None or null is not valid value for argument '{argument}'.
|
||||||
|
113 | ConflictingInputValue | Only '{first_argument}' or '{second_argument}' is allowed, not both.
|
||||||
|
114 | InputStringIsBlank | {argument} cannot be blank.
|
||||||
|
115 | EmptyPublishedFile | Cannot publish empty file: {file_path}
|
||||||
|
116 | MissingPublishedFile | File does not exist: {file_path}
|
||||||
|
117 | InvalidStreamURL | Invalid LBRY stream URL: '{url}' -- When an URL cannot be downloaded, such as '@Channel/' or a collection
|
||||||
**2xx** | Configuration | Configuration errors.
|
**2xx** | Configuration | Configuration errors.
|
||||||
201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues.
|
201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues.
|
||||||
202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args.
|
202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args.
|
||||||
|
@ -47,14 +52,26 @@ Code | Name | Message
|
||||||
401 | TransactionRejected | Transaction rejected, unknown reason.
|
401 | TransactionRejected | Transaction rejected, unknown reason.
|
||||||
402 | TransactionFeeTooLow | Fee too low.
|
402 | TransactionFeeTooLow | Fee too low.
|
||||||
403 | TransactionInvalidSignature | Invalid signature.
|
403 | TransactionInvalidSignature | Invalid signature.
|
||||||
404 | InsufficientFunds | Insufficient funds. -- determined by wallet prior to attempting to broadcast a tx; this is different for example from a TX being created and sent but then rejected by lbrycrd for unspendable utxos.
|
404 | InsufficientFunds | Not enough funds to cover this transaction. -- determined by wallet prior to attempting to broadcast a tx; this is different for example from a TX being created and sent but then rejected by lbrycrd for unspendable utxos.
|
||||||
405 | ChannelKeyNotFound | Channel signing key not found.
|
405 | ChannelKeyNotFound | Channel signing key not found.
|
||||||
406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key.
|
406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key.
|
||||||
407 | DataDownload | Failed to download blob. *generic*
|
407 | DataDownload | Failed to download blob. *generic*
|
||||||
408 | Resolve | Failed to resolve '{url}'.
|
408 | PrivateKeyNotFound | Couldn't find private key for {key} '{value}'.
|
||||||
409 | ResolveTimeout | Failed to resolve '{url}' within the timeout.
|
410 | Resolve | Failed to resolve '{url}'.
|
||||||
410 | KeyFeeAboveMaxAllowed | {message}
|
411 | ResolveTimeout | Failed to resolve '{url}' within the timeout.
|
||||||
411 | InvalidPassword | Password is invalid.
|
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{censor_id}'.
|
||||||
|
420 | KeyFeeAboveMaxAllowed | {message}
|
||||||
|
421 | InvalidPassword | Password is invalid.
|
||||||
|
422 | IncompatibleWalletServer | '{server}:{port}' has an incompatibly old version.
|
||||||
|
423 | TooManyClaimSearchParameters | {key} cant have more than {limit} items.
|
||||||
|
424 | AlreadyPurchased | You already have a purchase for claim_id '{claim_id_hex}'. Use --allow-duplicate-purchase flag to override.
|
||||||
|
431 | ServerPaymentInvalidAddress | Invalid address from wallet server: '{address}' - skipping payment round.
|
||||||
|
432 | ServerPaymentWalletLocked | Cannot spend funds with locked wallet, skipping payment round.
|
||||||
|
433 | ServerPaymentFeeAboveMaxAllowed | Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.
|
||||||
|
434 | WalletNotLoaded | Wallet {wallet_id} is not loaded.
|
||||||
|
435 | WalletAlreadyLoaded | Wallet {wallet_path} is already loaded.
|
||||||
|
436 | WalletNotFound | Wallet not found at {wallet_path}.
|
||||||
|
437 | WalletAlreadyExists | Wallet {wallet_path} already exists, use `wallet_add` to load it.
|
||||||
**5xx** | Blob | **Blobs**
|
**5xx** | Blob | **Blobs**
|
||||||
500 | BlobNotFound | Blob not found.
|
500 | BlobNotFound | Blob not found.
|
||||||
501 | BlobPermissionDenied | Permission denied to read blob.
|
501 | BlobPermissionDenied | Permission denied to read blob.
|
|
@ -1,4 +1,4 @@
|
||||||
from .base import BaseError
|
from .base import BaseError, claim_id
|
||||||
|
|
||||||
|
|
||||||
class UserInputError(BaseError):
|
class UserInputError(BaseError):
|
||||||
|
@ -16,18 +16,22 @@ class CommandError(UserInputError):
|
||||||
class CommandDoesNotExistError(CommandError):
|
class CommandDoesNotExistError(CommandError):
|
||||||
|
|
||||||
def __init__(self, command):
|
def __init__(self, command):
|
||||||
|
self.command = command
|
||||||
super().__init__(f"Command '{command}' does not exist.")
|
super().__init__(f"Command '{command}' does not exist.")
|
||||||
|
|
||||||
|
|
||||||
class CommandDeprecatedError(CommandError):
|
class CommandDeprecatedError(CommandError):
|
||||||
|
|
||||||
def __init__(self, command):
|
def __init__(self, command):
|
||||||
|
self.command = command
|
||||||
super().__init__(f"Command '{command}' is deprecated.")
|
super().__init__(f"Command '{command}' is deprecated.")
|
||||||
|
|
||||||
|
|
||||||
class CommandInvalidArgumentError(CommandError):
|
class CommandInvalidArgumentError(CommandError):
|
||||||
|
|
||||||
def __init__(self, argument, command):
|
def __init__(self, argument, command):
|
||||||
|
self.argument = argument
|
||||||
|
self.command = command
|
||||||
super().__init__(f"Invalid argument '{argument}' to command '{command}'.")
|
super().__init__(f"Invalid argument '{argument}' to command '{command}'.")
|
||||||
|
|
||||||
|
|
||||||
|
@ -37,6 +41,7 @@ class CommandTemporarilyUnavailableError(CommandError):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, command):
|
def __init__(self, command):
|
||||||
|
self.command = command
|
||||||
super().__init__(f"Command '{command}' is temporarily unavailable.")
|
super().__init__(f"Command '{command}' is temporarily unavailable.")
|
||||||
|
|
||||||
|
|
||||||
|
@ -46,6 +51,7 @@ class CommandPermanentlyUnavailableError(CommandError):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, command):
|
def __init__(self, command):
|
||||||
|
self.command = command
|
||||||
super().__init__(f"Command '{command}' is permanently unavailable.")
|
super().__init__(f"Command '{command}' is permanently unavailable.")
|
||||||
|
|
||||||
|
|
||||||
|
@ -58,15 +64,57 @@ class InputValueError(UserInputError, ValueError):
|
||||||
class GenericInputValueError(InputValueError):
|
class GenericInputValueError(InputValueError):
|
||||||
|
|
||||||
def __init__(self, value, argument):
|
def __init__(self, value, argument):
|
||||||
|
self.value = value
|
||||||
|
self.argument = argument
|
||||||
super().__init__(f"The value '{value}' for argument '{argument}' is not valid.")
|
super().__init__(f"The value '{value}' for argument '{argument}' is not valid.")
|
||||||
|
|
||||||
|
|
||||||
class InputValueIsNoneError(InputValueError):
|
class InputValueIsNoneError(InputValueError):
|
||||||
|
|
||||||
def __init__(self, argument):
|
def __init__(self, argument):
|
||||||
|
self.argument = argument
|
||||||
super().__init__(f"None or null is not valid value for argument '{argument}'.")
|
super().__init__(f"None or null is not valid value for argument '{argument}'.")
|
||||||
|
|
||||||
|
|
||||||
|
class ConflictingInputValueError(InputValueError):
|
||||||
|
|
||||||
|
def __init__(self, first_argument, second_argument):
|
||||||
|
self.first_argument = first_argument
|
||||||
|
self.second_argument = second_argument
|
||||||
|
super().__init__(f"Only '{first_argument}' or '{second_argument}' is allowed, not both.")
|
||||||
|
|
||||||
|
|
||||||
|
class InputStringIsBlankError(InputValueError):
|
||||||
|
|
||||||
|
def __init__(self, argument):
|
||||||
|
self.argument = argument
|
||||||
|
super().__init__(f"{argument} cannot be blank.")
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyPublishedFileError(InputValueError):
|
||||||
|
|
||||||
|
def __init__(self, file_path):
|
||||||
|
self.file_path = file_path
|
||||||
|
super().__init__(f"Cannot publish empty file: {file_path}")
|
||||||
|
|
||||||
|
|
||||||
|
class MissingPublishedFileError(InputValueError):
|
||||||
|
|
||||||
|
def __init__(self, file_path):
|
||||||
|
self.file_path = file_path
|
||||||
|
super().__init__(f"File does not exist: {file_path}")
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidStreamURLError(InputValueError):
|
||||||
|
"""
|
||||||
|
When an URL cannot be downloaded, such as '@Channel/' or a collection
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, url):
|
||||||
|
self.url = url
|
||||||
|
super().__init__(f"Invalid LBRY stream URL: '{url}'")
|
||||||
|
|
||||||
|
|
||||||
class ConfigurationError(BaseError):
|
class ConfigurationError(BaseError):
|
||||||
"""
|
"""
|
||||||
Configuration errors.
|
Configuration errors.
|
||||||
|
@ -79,6 +127,7 @@ class ConfigWriteError(ConfigurationError):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, path):
|
def __init__(self, path):
|
||||||
|
self.path = path
|
||||||
super().__init__(f"Cannot write configuration file '{path}'.")
|
super().__init__(f"Cannot write configuration file '{path}'.")
|
||||||
|
|
||||||
|
|
||||||
|
@ -88,6 +137,7 @@ class ConfigReadError(ConfigurationError):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, path):
|
def __init__(self, path):
|
||||||
|
self.path = path
|
||||||
super().__init__(f"Cannot find provided configuration file '{path}'.")
|
super().__init__(f"Cannot find provided configuration file '{path}'.")
|
||||||
|
|
||||||
|
|
||||||
|
@ -97,18 +147,21 @@ class ConfigParseError(ConfigurationError):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, path):
|
def __init__(self, path):
|
||||||
|
self.path = path
|
||||||
super().__init__(f"Failed to parse the configuration file '{path}'.")
|
super().__init__(f"Failed to parse the configuration file '{path}'.")
|
||||||
|
|
||||||
|
|
||||||
class ConfigMissingError(ConfigurationError):
|
class ConfigMissingError(ConfigurationError):
|
||||||
|
|
||||||
def __init__(self, path):
|
def __init__(self, path):
|
||||||
|
self.path = path
|
||||||
super().__init__(f"Configuration file '{path}' is missing setting that has no default / fallback.")
|
super().__init__(f"Configuration file '{path}' is missing setting that has no default / fallback.")
|
||||||
|
|
||||||
|
|
||||||
class ConfigInvalidError(ConfigurationError):
|
class ConfigInvalidError(ConfigurationError):
|
||||||
|
|
||||||
def __init__(self, path):
|
def __init__(self, path):
|
||||||
|
self.path = path
|
||||||
super().__init__(f"Configuration file '{path}' has setting with invalid value.")
|
super().__init__(f"Configuration file '{path}' has setting with invalid value.")
|
||||||
|
|
||||||
|
|
||||||
|
@ -161,7 +214,7 @@ class InsufficientFundsError(WalletError):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__("Insufficient funds.")
|
super().__init__("Not enough funds to cover this transaction.")
|
||||||
|
|
||||||
|
|
||||||
class ChannelKeyNotFoundError(WalletError):
|
class ChannelKeyNotFoundError(WalletError):
|
||||||
|
@ -185,21 +238,41 @@ class DataDownloadError(WalletError):
|
||||||
super().__init__("Failed to download blob. *generic*")
|
super().__init__("Failed to download blob. *generic*")
|
||||||
|
|
||||||
|
|
||||||
|
class PrivateKeyNotFoundError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, key, value):
|
||||||
|
self.key = key
|
||||||
|
self.value = value
|
||||||
|
super().__init__(f"Couldn't find private key for {key} '{value}'.")
|
||||||
|
|
||||||
|
|
||||||
class ResolveError(WalletError):
|
class ResolveError(WalletError):
|
||||||
|
|
||||||
def __init__(self, url):
|
def __init__(self, url):
|
||||||
|
self.url = url
|
||||||
super().__init__(f"Failed to resolve '{url}'.")
|
super().__init__(f"Failed to resolve '{url}'.")
|
||||||
|
|
||||||
|
|
||||||
class ResolveTimeoutError(WalletError):
|
class ResolveTimeoutError(WalletError):
|
||||||
|
|
||||||
def __init__(self, url):
|
def __init__(self, url):
|
||||||
|
self.url = url
|
||||||
super().__init__(f"Failed to resolve '{url}' within the timeout.")
|
super().__init__(f"Failed to resolve '{url}' within the timeout.")
|
||||||
|
|
||||||
|
|
||||||
|
class ResolveCensoredError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, url, censor_id, censor_row):
|
||||||
|
self.url = url
|
||||||
|
self.censor_id = censor_id
|
||||||
|
self.censor_row = censor_row
|
||||||
|
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.")
|
||||||
|
|
||||||
|
|
||||||
class KeyFeeAboveMaxAllowedError(WalletError):
|
class KeyFeeAboveMaxAllowedError(WalletError):
|
||||||
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
|
self.message = message
|
||||||
super().__init__(f"{message}")
|
super().__init__(f"{message}")
|
||||||
|
|
||||||
|
|
||||||
|
@ -209,6 +282,81 @@ class InvalidPasswordError(WalletError):
|
||||||
super().__init__("Password is invalid.")
|
super().__init__("Password is invalid.")
|
||||||
|
|
||||||
|
|
||||||
|
class IncompatibleWalletServerError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, server, port):
|
||||||
|
self.server = server
|
||||||
|
self.port = port
|
||||||
|
super().__init__(f"'{server}:{port}' has an incompatibly old version.")
|
||||||
|
|
||||||
|
|
||||||
|
class TooManyClaimSearchParametersError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, key, limit):
|
||||||
|
self.key = key
|
||||||
|
self.limit = limit
|
||||||
|
super().__init__(f"{key} cant have more than {limit} items.")
|
||||||
|
|
||||||
|
|
||||||
|
class AlreadyPurchasedError(WalletError):
|
||||||
|
"""
|
||||||
|
allow-duplicate-purchase flag to override.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, claim_id_hex):
|
||||||
|
self.claim_id_hex = claim_id_hex
|
||||||
|
super().__init__(f"You already have a purchase for claim_id '{claim_id_hex}'. Use")
|
||||||
|
|
||||||
|
|
||||||
|
class ServerPaymentInvalidAddressError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, address):
|
||||||
|
self.address = address
|
||||||
|
super().__init__(f"Invalid address from wallet server: '{address}' - skipping payment round.")
|
||||||
|
|
||||||
|
|
||||||
|
class ServerPaymentWalletLockedError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__("Cannot spend funds with locked wallet, skipping payment round.")
|
||||||
|
|
||||||
|
|
||||||
|
class ServerPaymentFeeAboveMaxAllowedError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, daily_fee, max_fee):
|
||||||
|
self.daily_fee = daily_fee
|
||||||
|
self.max_fee = max_fee
|
||||||
|
super().__init__(f"Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.")
|
||||||
|
|
||||||
|
|
||||||
|
class WalletNotLoadedError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, wallet_id):
|
||||||
|
self.wallet_id = wallet_id
|
||||||
|
super().__init__(f"Wallet {wallet_id} is not loaded.")
|
||||||
|
|
||||||
|
|
||||||
|
class WalletAlreadyLoadedError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, wallet_path):
|
||||||
|
self.wallet_path = wallet_path
|
||||||
|
super().__init__(f"Wallet {wallet_path} is already loaded.")
|
||||||
|
|
||||||
|
|
||||||
|
class WalletNotFoundError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, wallet_path):
|
||||||
|
self.wallet_path = wallet_path
|
||||||
|
super().__init__(f"Wallet not found at {wallet_path}.")
|
||||||
|
|
||||||
|
|
||||||
|
class WalletAlreadyExistsError(WalletError):
|
||||||
|
|
||||||
|
def __init__(self, wallet_path):
|
||||||
|
self.wallet_path = wallet_path
|
||||||
|
super().__init__(f"Wallet {wallet_path} already exists, use `wallet_add` to load it.")
|
||||||
|
|
||||||
|
|
||||||
class BlobError(BaseError):
|
class BlobError(BaseError):
|
||||||
"""
|
"""
|
||||||
**Blobs**
|
**Blobs**
|
||||||
|
@ -266,30 +414,35 @@ class DownloadCancelledError(BlobError):
|
||||||
class DownloadSDTimeoutError(BlobError):
|
class DownloadSDTimeoutError(BlobError):
|
||||||
|
|
||||||
def __init__(self, download):
|
def __init__(self, download):
|
||||||
|
self.download = download
|
||||||
super().__init__(f"Failed to download sd blob {download} within timeout.")
|
super().__init__(f"Failed to download sd blob {download} within timeout.")
|
||||||
|
|
||||||
|
|
||||||
class DownloadDataTimeoutError(BlobError):
|
class DownloadDataTimeoutError(BlobError):
|
||||||
|
|
||||||
def __init__(self, download):
|
def __init__(self, download):
|
||||||
|
self.download = download
|
||||||
super().__init__(f"Failed to download data blobs for sd hash {download} within timeout.")
|
super().__init__(f"Failed to download data blobs for sd hash {download} within timeout.")
|
||||||
|
|
||||||
|
|
||||||
class InvalidStreamDescriptorError(BlobError):
|
class InvalidStreamDescriptorError(BlobError):
|
||||||
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
|
self.message = message
|
||||||
super().__init__(f"{message}")
|
super().__init__(f"{message}")
|
||||||
|
|
||||||
|
|
||||||
class InvalidDataError(BlobError):
|
class InvalidDataError(BlobError):
|
||||||
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
|
self.message = message
|
||||||
super().__init__(f"{message}")
|
super().__init__(f"{message}")
|
||||||
|
|
||||||
|
|
||||||
class InvalidBlobHashError(BlobError):
|
class InvalidBlobHashError(BlobError):
|
||||||
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
|
self.message = message
|
||||||
super().__init__(f"{message}")
|
super().__init__(f"{message}")
|
||||||
|
|
||||||
|
|
||||||
|
@ -302,12 +455,14 @@ class ComponentError(BaseError):
|
||||||
class ComponentStartConditionNotMetError(ComponentError):
|
class ComponentStartConditionNotMetError(ComponentError):
|
||||||
|
|
||||||
def __init__(self, components):
|
def __init__(self, components):
|
||||||
|
self.components = components
|
||||||
super().__init__(f"Unresolved dependencies for: {components}")
|
super().__init__(f"Unresolved dependencies for: {components}")
|
||||||
|
|
||||||
|
|
||||||
class ComponentsNotStartedError(ComponentError):
|
class ComponentsNotStartedError(ComponentError):
|
||||||
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
|
self.message = message
|
||||||
super().__init__(f"{message}")
|
super().__init__(f"{message}")
|
||||||
|
|
||||||
|
|
||||||
|
@ -320,16 +475,20 @@ class CurrencyExchangeError(BaseError):
|
||||||
class InvalidExchangeRateResponseError(CurrencyExchangeError):
|
class InvalidExchangeRateResponseError(CurrencyExchangeError):
|
||||||
|
|
||||||
def __init__(self, source, reason):
|
def __init__(self, source, reason):
|
||||||
|
self.source = source
|
||||||
|
self.reason = reason
|
||||||
super().__init__(f"Failed to get exchange rate from {source}: {reason}")
|
super().__init__(f"Failed to get exchange rate from {source}: {reason}")
|
||||||
|
|
||||||
|
|
||||||
class CurrencyConversionError(CurrencyExchangeError):
|
class CurrencyConversionError(CurrencyExchangeError):
|
||||||
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
|
self.message = message
|
||||||
super().__init__(f"{message}")
|
super().__init__(f"{message}")
|
||||||
|
|
||||||
|
|
||||||
class InvalidCurrencyError(CurrencyExchangeError):
|
class InvalidCurrencyError(CurrencyExchangeError):
|
||||||
|
|
||||||
def __init__(self, currency):
|
def __init__(self, currency):
|
||||||
|
self.currency = currency
|
||||||
super().__init__(f"Invalid currency: {currency} is not a supported currency.")
|
super().__init__(f"Invalid currency: {currency} is not a supported currency.")
|
9
lbry/error/base.py
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
from binascii import hexlify
|
||||||
|
|
||||||
|
|
||||||
|
def claim_id(claim_hash):
|
||||||
|
return hexlify(claim_hash[::-1]).decode()
|
||||||
|
|
||||||
|
|
||||||
|
class BaseError(Exception):
|
||||||
|
pass
|
|
@ -13,10 +13,12 @@ class {name}({parents}):{doc}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
INIT = """
|
INIT = """
|
||||||
def __init__({args}):
|
def __init__({args}):{fields}
|
||||||
super().__init__({format}"{message}")
|
super().__init__({format}"{message}")
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
FUNCTIONS = ['claim_id']
|
||||||
|
|
||||||
|
|
||||||
class ErrorClass:
|
class ErrorClass:
|
||||||
|
|
||||||
|
@ -50,11 +52,22 @@ class ErrorClass:
|
||||||
|
|
||||||
def get_arguments(self):
|
def get_arguments(self):
|
||||||
args = ['self']
|
args = ['self']
|
||||||
for arg in re.findall('{([a-z0-1]+)}', self.message):
|
for arg in re.findall('{([a-z0-1_()]+)}', self.message):
|
||||||
|
for func in FUNCTIONS:
|
||||||
|
if arg.startswith(f'{func}('):
|
||||||
|
arg = arg[len(f'{func}('):-1]
|
||||||
|
break
|
||||||
args.append(arg)
|
args.append(arg)
|
||||||
return args
|
return args
|
||||||
|
|
||||||
def get_doc_string(self, doc):
|
@staticmethod
|
||||||
|
def get_fields(args):
|
||||||
|
if len(args) > 1:
|
||||||
|
return ''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
|
||||||
|
return ''
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_doc_string(doc):
|
||||||
if doc:
|
if doc:
|
||||||
return f'\n{INDENT}"""\n{indent(fill(doc, 100), INDENT)}\n{INDENT}"""'
|
return f'\n{INDENT}"""\n{indent(fill(doc, 100), INDENT)}\n{INDENT}"""'
|
||||||
return ""
|
return ""
|
||||||
|
@ -68,7 +81,8 @@ class ErrorClass:
|
||||||
args = self.get_arguments()
|
args = self.get_arguments()
|
||||||
if self.is_leaf:
|
if self.is_leaf:
|
||||||
out.write((CLASS + INIT).format(
|
out.write((CLASS + INIT).format(
|
||||||
name=self.class_name, parents=', '.join(parents), args=', '.join(args),
|
name=self.class_name, parents=', '.join(parents),
|
||||||
|
args=', '.join(args), fields=self.get_fields(args),
|
||||||
message=self.message, doc=self.get_doc_string(self.comment), format='f' if len(args) > 1 else ''
|
message=self.message, doc=self.get_doc_string(self.comment), format='f' if len(args) > 1 else ''
|
||||||
))
|
))
|
||||||
else:
|
else:
|
||||||
|
@ -101,7 +115,7 @@ def find_parent(stack, child):
|
||||||
|
|
||||||
|
|
||||||
def generate(out):
|
def generate(out):
|
||||||
out.write('from .base import BaseError\n')
|
out.write(f"from .base import BaseError, {', '.join(FUNCTIONS)}\n")
|
||||||
stack = {}
|
stack = {}
|
||||||
for error in get_errors():
|
for error in get_errors():
|
||||||
error.render(out, find_parent(stack, error))
|
error.render(out, find_parent(stack, error))
|
||||||
|
@ -139,7 +153,7 @@ def analyze():
|
||||||
print(f' - {error}')
|
print(f' - {error}')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("action", choices=['generate', 'analyze'])
|
parser.add_argument("action", choices=['generate', 'analyze'])
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
@ -147,3 +161,7 @@ if __name__ == "__main__":
|
||||||
analyze()
|
analyze()
|
||||||
elif args.action == "generate":
|
elif args.action == "generate":
|
||||||
generate(sys.stdout)
|
generate(sys.stdout)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -14,9 +14,8 @@ from aiohttp.web import GracefulExit
|
||||||
from docopt import docopt
|
from docopt import docopt
|
||||||
|
|
||||||
from lbry import __version__ as lbrynet_version
|
from lbry import __version__ as lbrynet_version
|
||||||
from lbry.extras.daemon.loggly_handler import get_loggly_handler
|
from lbry.extras.daemon.daemon import Daemon
|
||||||
from lbry.conf import Config, CLIConfig
|
from lbry.conf import Config, CLIConfig
|
||||||
from lbry.extras.daemon.Daemon import Daemon
|
|
||||||
|
|
||||||
log = logging.getLogger('lbry')
|
log = logging.getLogger('lbry')
|
||||||
|
|
||||||
|
@ -102,7 +101,7 @@ class ArgumentParser(argparse.ArgumentParser):
|
||||||
self._optionals.title = 'Options'
|
self._optionals.title = 'Options'
|
||||||
if group_name is None:
|
if group_name is None:
|
||||||
self.epilog = (
|
self.epilog = (
|
||||||
f"Run 'lbrynet COMMAND --help' for more information on a command or group."
|
"Run 'lbrynet COMMAND --help' for more information on a command or group."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.epilog = (
|
self.epilog = (
|
||||||
|
@ -168,16 +167,16 @@ def add_command_parser(parent, command):
|
||||||
|
|
||||||
|
|
||||||
def get_argument_parser():
|
def get_argument_parser():
|
||||||
main = ArgumentParser(
|
root = ArgumentParser(
|
||||||
'lbrynet', description='An interface to the LBRY Network.', allow_abbrev=False,
|
'lbrynet', description='An interface to the LBRY Network.', allow_abbrev=False,
|
||||||
)
|
)
|
||||||
main.add_argument(
|
root.add_argument(
|
||||||
'-v', '--version', dest='cli_version', action="store_true",
|
'-v', '--version', dest='cli_version', action="store_true",
|
||||||
help='Show lbrynet CLI version and exit.'
|
help='Show lbrynet CLI version and exit.'
|
||||||
)
|
)
|
||||||
main.set_defaults(group=None, command=None)
|
root.set_defaults(group=None, command=None)
|
||||||
CLIConfig.contribute_to_argparse(main)
|
CLIConfig.contribute_to_argparse(root)
|
||||||
sub = main.add_subparsers(metavar='COMMAND')
|
sub = root.add_subparsers(metavar='COMMAND')
|
||||||
start = sub.add_parser(
|
start = sub.add_parser(
|
||||||
'start',
|
'start',
|
||||||
usage='lbrynet start [--config FILE] [--data-dir DIR] [--wallet-dir DIR] [--download-dir DIR] ...',
|
usage='lbrynet start [--config FILE] [--data-dir DIR] [--wallet-dir DIR] [--download-dir DIR] ...',
|
||||||
|
@ -187,6 +186,10 @@ def get_argument_parser():
|
||||||
'--quiet', dest='quiet', action="store_true",
|
'--quiet', dest='quiet', action="store_true",
|
||||||
help='Disable all console output.'
|
help='Disable all console output.'
|
||||||
)
|
)
|
||||||
|
start.add_argument(
|
||||||
|
'--no-logging', dest='no_logging', action="store_true",
|
||||||
|
help='Disable all logging of any kind.'
|
||||||
|
)
|
||||||
start.add_argument(
|
start.add_argument(
|
||||||
'--verbose', nargs="*",
|
'--verbose', nargs="*",
|
||||||
help=('Enable debug output for lbry logger and event loop. Optionally specify loggers for which debug output '
|
help=('Enable debug output for lbry logger and event loop. Optionally specify loggers for which debug output '
|
||||||
|
@ -217,15 +220,18 @@ def get_argument_parser():
|
||||||
else:
|
else:
|
||||||
add_command_parser(groups[command['group']], command)
|
add_command_parser(groups[command['group']], command)
|
||||||
|
|
||||||
return main
|
return root
|
||||||
|
|
||||||
|
|
||||||
def ensure_directory_exists(path: str):
|
def ensure_directory_exists(path: str):
|
||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
|
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
|
||||||
|
use_effective_ids = os.access in os.supports_effective_ids
|
||||||
|
if not os.access(path, os.W_OK, effective_ids=use_effective_ids):
|
||||||
|
raise PermissionError(f"The following directory is not writable: {path}")
|
||||||
|
|
||||||
|
|
||||||
LOG_MODULES = ('lbry', 'torba', 'aioupnp')
|
LOG_MODULES = 'lbry', 'aioupnp'
|
||||||
|
|
||||||
|
|
||||||
def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config):
|
def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config):
|
||||||
|
@ -241,7 +247,6 @@ def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config
|
||||||
logger.getChild(module_name).addHandler(handler)
|
logger.getChild(module_name).addHandler(handler)
|
||||||
|
|
||||||
logger.getChild('lbry').setLevel(logging.INFO)
|
logger.getChild('lbry').setLevel(logging.INFO)
|
||||||
logger.getChild('torba').setLevel(logging.INFO)
|
|
||||||
logger.getChild('aioupnp').setLevel(logging.WARNING)
|
logger.getChild('aioupnp').setLevel(logging.WARNING)
|
||||||
logger.getChild('aiohttp').setLevel(logging.CRITICAL)
|
logger.getChild('aiohttp').setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
|
@ -252,17 +257,13 @@ def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config
|
||||||
else:
|
else:
|
||||||
logger.getChild('lbry').setLevel(logging.DEBUG)
|
logger.getChild('lbry').setLevel(logging.DEBUG)
|
||||||
|
|
||||||
if conf.share_usage_data:
|
|
||||||
loggly_handler = get_loggly_handler()
|
|
||||||
loggly_handler.setLevel(logging.ERROR)
|
|
||||||
logger.getChild('lbry').addHandler(loggly_handler)
|
|
||||||
|
|
||||||
|
|
||||||
def run_daemon(args: argparse.Namespace, conf: Config):
|
def run_daemon(args: argparse.Namespace, conf: Config):
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
if args.verbose is not None:
|
if args.verbose is not None:
|
||||||
loop.set_debug(True)
|
loop.set_debug(True)
|
||||||
setup_logging(logging.getLogger(), args, conf)
|
if not args.no_logging:
|
||||||
|
setup_logging(logging.getLogger(), args, conf)
|
||||||
daemon = Daemon(conf)
|
daemon = Daemon(conf)
|
||||||
|
|
||||||
def __exit():
|
def __exit():
|
|
@ -1,8 +1,8 @@
|
||||||
import asyncio
|
import asyncio
|
||||||
import collections
|
import collections
|
||||||
import logging
|
import logging
|
||||||
import aiohttp
|
|
||||||
import typing
|
import typing
|
||||||
|
import aiohttp
|
||||||
from lbry import utils
|
from lbry import utils
|
||||||
from lbry.conf import Config
|
from lbry.conf import Config
|
||||||
from lbry.extras import system_info
|
from lbry.extras import system_info
|
||||||
|
@ -18,6 +18,7 @@ DOWNLOAD_STARTED = 'Download Started'
|
||||||
DOWNLOAD_ERRORED = 'Download Errored'
|
DOWNLOAD_ERRORED = 'Download Errored'
|
||||||
DOWNLOAD_FINISHED = 'Download Finished'
|
DOWNLOAD_FINISHED = 'Download Finished'
|
||||||
HEARTBEAT = 'Heartbeat'
|
HEARTBEAT = 'Heartbeat'
|
||||||
|
DISK_SPACE = 'Disk Space'
|
||||||
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
|
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
|
||||||
NEW_CHANNEL = 'New Channel'
|
NEW_CHANNEL = 'New Channel'
|
||||||
CREDITS_SENT = 'Credits Sent'
|
CREDITS_SENT = 'Credits Sent'
|
||||||
|
@ -66,7 +67,7 @@ def _download_properties(conf: Config, external_ip: str, resolve_duration: float
|
||||||
"node_rpc_timeout": conf.node_rpc_timeout,
|
"node_rpc_timeout": conf.node_rpc_timeout,
|
||||||
"peer_connect_timeout": conf.peer_connect_timeout,
|
"peer_connect_timeout": conf.peer_connect_timeout,
|
||||||
"blob_download_timeout": conf.blob_download_timeout,
|
"blob_download_timeout": conf.blob_download_timeout,
|
||||||
"use_fixed_peers": len(conf.reflector_servers) > 0,
|
"use_fixed_peers": len(conf.fixed_peers) > 0,
|
||||||
"fixed_peer_delay": fixed_peer_delay,
|
"fixed_peer_delay": fixed_peer_delay,
|
||||||
"added_fixed_peers": added_fixed_peers,
|
"added_fixed_peers": added_fixed_peers,
|
||||||
"active_peer_count": active_peer_count,
|
"active_peer_count": active_peer_count,
|
||||||
|
@ -110,7 +111,6 @@ class AnalyticsManager:
|
||||||
self.cookies = {}
|
self.cookies = {}
|
||||||
self.url = ANALYTICS_ENDPOINT
|
self.url = ANALYTICS_ENDPOINT
|
||||||
self._write_key = utils.deobfuscate(ANALYTICS_TOKEN)
|
self._write_key = utils.deobfuscate(ANALYTICS_TOKEN)
|
||||||
self._enabled = conf.share_usage_data
|
|
||||||
self._tracked_data = collections.defaultdict(list)
|
self._tracked_data = collections.defaultdict(list)
|
||||||
self.context = _make_context(system_info.get_platform())
|
self.context = _make_context(system_info.get_platform())
|
||||||
self.installation_id = installation_id
|
self.installation_id = installation_id
|
||||||
|
@ -118,20 +118,24 @@ class AnalyticsManager:
|
||||||
self.task: typing.Optional[asyncio.Task] = None
|
self.task: typing.Optional[asyncio.Task] = None
|
||||||
self.external_ip: typing.Optional[str] = None
|
self.external_ip: typing.Optional[str] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def enabled(self):
|
||||||
|
return self.conf.share_usage_data
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def is_started(self):
|
def is_started(self):
|
||||||
return self.task is not None
|
return self.task is not None
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
if self._enabled and self.task is None:
|
if self.task is None:
|
||||||
self.external_ip = await utils.get_external_ip()
|
|
||||||
self.task = asyncio.create_task(self.run())
|
self.task = asyncio.create_task(self.run())
|
||||||
|
|
||||||
async def run(self):
|
async def run(self):
|
||||||
while True:
|
while True:
|
||||||
await self._send_heartbeat()
|
if self.enabled:
|
||||||
|
self.external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||||
|
await self._send_heartbeat()
|
||||||
await asyncio.sleep(1800)
|
await asyncio.sleep(1800)
|
||||||
self.external_ip = await utils.get_external_ip()
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
if self.task is not None and not self.task.done():
|
if self.task is not None and not self.task.done():
|
||||||
|
@ -154,7 +158,7 @@ class AnalyticsManager:
|
||||||
|
|
||||||
async def track(self, event: typing.Dict):
|
async def track(self, event: typing.Dict):
|
||||||
"""Send a single tracking event"""
|
"""Send a single tracking event"""
|
||||||
if self._enabled:
|
if self.enabled:
|
||||||
log.debug('Sending track event: %s', event)
|
log.debug('Sending track event: %s', event)
|
||||||
await self._post(event)
|
await self._post(event)
|
||||||
|
|
||||||
|
@ -166,6 +170,15 @@ class AnalyticsManager:
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def send_disk_space_used(self, storage_used, storage_limit, is_from_network_quota):
|
||||||
|
await self.track(
|
||||||
|
self._event(DISK_SPACE, {
|
||||||
|
'used': storage_used,
|
||||||
|
'limit': storage_limit,
|
||||||
|
'from_network_quota': is_from_network_quota
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
async def send_server_startup(self):
|
async def send_server_startup(self):
|
||||||
await self.track(self._event(SERVER_STARTUP))
|
await self.track(self._event(SERVER_STARTUP))
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from lbry.conf import Config
|
|
||||||
from lbry.extras.cli import execute_command
|
from lbry.extras.cli import execute_command
|
||||||
|
from lbry.conf import Config
|
||||||
|
|
||||||
|
|
||||||
def daemon_rpc(conf: Config, method: str, **kwargs):
|
def daemon_rpc(conf: Config, method: str, **kwargs):
|
|
@ -1,7 +1,7 @@
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from lbry.conf import Config
|
from lbry.conf import Config
|
||||||
from lbry.extras.daemon.ComponentManager import ComponentManager
|
from lbry.extras.daemon.componentmanager import ComponentManager
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ class Component(metaclass=ComponentType):
|
||||||
def running(self):
|
def running(self):
|
||||||
return self._running
|
return self._running
|
||||||
|
|
||||||
async def get_status(self):
|
async def get_status(self): # pylint: disable=no-self-use
|
||||||
return
|
return
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
|
@ -42,7 +42,7 @@ class ComponentManager:
|
||||||
self.analytics_manager = analytics_manager
|
self.analytics_manager = analytics_manager
|
||||||
self.component_classes = {}
|
self.component_classes = {}
|
||||||
self.components = set()
|
self.components = set()
|
||||||
self.started = asyncio.Event(loop=self.loop)
|
self.started = asyncio.Event()
|
||||||
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
|
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
|
||||||
|
|
||||||
for component_name, component_class in self.default_component_classes.items():
|
for component_name, component_class in self.default_component_classes.items():
|
||||||
|
@ -118,7 +118,7 @@ class ComponentManager:
|
||||||
component._setup() for component in stage if not component.running
|
component._setup() for component in stage if not component.running
|
||||||
]
|
]
|
||||||
if needing_start:
|
if needing_start:
|
||||||
await asyncio.wait(needing_start)
|
await asyncio.wait(map(asyncio.create_task, needing_start))
|
||||||
self.started.set()
|
self.started.set()
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
|
@ -131,7 +131,7 @@ class ComponentManager:
|
||||||
component._stop() for component in stage if component.running
|
component._stop() for component in stage if component.running
|
||||||
]
|
]
|
||||||
if needing_stop:
|
if needing_stop:
|
||||||
await asyncio.wait(needing_stop)
|
await asyncio.wait(map(asyncio.create_task, needing_stop))
|
||||||
|
|
||||||
def all_components_running(self, *component_names):
|
def all_components_running(self, *component_names):
|
||||||
"""
|
"""
|
||||||
|
@ -158,11 +158,14 @@ class ComponentManager:
|
||||||
for component in self.components
|
for component in self.components
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_component(self, component_name):
|
def get_actual_component(self, component_name):
|
||||||
for component in self.components:
|
for component in self.components:
|
||||||
if component.component_name == component_name:
|
if component.component_name == component_name:
|
||||||
return component.component
|
return component
|
||||||
raise NameError(component_name)
|
raise NameError(component_name)
|
||||||
|
|
||||||
|
def get_component(self, component_name):
|
||||||
|
return self.get_actual_component(component_name).component
|
||||||
|
|
||||||
def has_component(self, component_name):
|
def has_component(self, component_name):
|
||||||
return any(component for component in self.components if component_name == component.component_name)
|
return any(component for component in self.components if component_name == component.component_name)
|
|
@ -4,6 +4,7 @@ import asyncio
|
||||||
import logging
|
import logging
|
||||||
import binascii
|
import binascii
|
||||||
import typing
|
import typing
|
||||||
|
|
||||||
import base58
|
import base58
|
||||||
|
|
||||||
from aioupnp import __version__ as aioupnp_version
|
from aioupnp import __version__ as aioupnp_version
|
||||||
|
@ -15,12 +16,19 @@ from lbry.dht.node import Node
|
||||||
from lbry.dht.peer import is_valid_public_ipv4
|
from lbry.dht.peer import is_valid_public_ipv4
|
||||||
from lbry.dht.blob_announcer import BlobAnnouncer
|
from lbry.dht.blob_announcer import BlobAnnouncer
|
||||||
from lbry.blob.blob_manager import BlobManager
|
from lbry.blob.blob_manager import BlobManager
|
||||||
|
from lbry.blob.disk_space_manager import DiskSpaceManager
|
||||||
from lbry.blob_exchange.server import BlobServer
|
from lbry.blob_exchange.server import BlobServer
|
||||||
|
from lbry.stream.background_downloader import BackgroundDownloader
|
||||||
from lbry.stream.stream_manager import StreamManager
|
from lbry.stream.stream_manager import StreamManager
|
||||||
from lbry.extras.daemon.Component import Component
|
from lbry.file.file_manager import FileManager
|
||||||
|
from lbry.extras.daemon.component import Component
|
||||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||||
from lbry.extras.daemon.storage import SQLiteStorage
|
from lbry.extras.daemon.storage import SQLiteStorage
|
||||||
from lbry.wallet import LbryWalletManager
|
from lbry.torrent.torrent_manager import TorrentManager
|
||||||
|
from lbry.wallet import WalletManager
|
||||||
|
from lbry.wallet.usage_payment import WalletServerPayer
|
||||||
|
from lbry.torrent.tracker import TrackerClient
|
||||||
|
from lbry.torrent.session import TorrentSession
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -29,12 +37,17 @@ log = logging.getLogger(__name__)
|
||||||
DATABASE_COMPONENT = "database"
|
DATABASE_COMPONENT = "database"
|
||||||
BLOB_COMPONENT = "blob_manager"
|
BLOB_COMPONENT = "blob_manager"
|
||||||
WALLET_COMPONENT = "wallet"
|
WALLET_COMPONENT = "wallet"
|
||||||
|
WALLET_SERVER_PAYMENTS_COMPONENT = "wallet_server_payments"
|
||||||
DHT_COMPONENT = "dht"
|
DHT_COMPONENT = "dht"
|
||||||
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
|
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
|
||||||
STREAM_MANAGER_COMPONENT = "stream_manager"
|
FILE_MANAGER_COMPONENT = "file_manager"
|
||||||
|
DISK_SPACE_COMPONENT = "disk_space"
|
||||||
|
BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
|
||||||
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
|
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
|
||||||
UPNP_COMPONENT = "upnp"
|
UPNP_COMPONENT = "upnp"
|
||||||
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
|
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
|
||||||
|
TRACKER_ANNOUNCER_COMPONENT = "tracker_announcer_component"
|
||||||
|
LIBTORRENT_COMPONENT = "libtorrent_component"
|
||||||
|
|
||||||
|
|
||||||
class DatabaseComponent(Component):
|
class DatabaseComponent(Component):
|
||||||
|
@ -50,7 +63,7 @@ class DatabaseComponent(Component):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_current_db_revision():
|
def get_current_db_revision():
|
||||||
return 14
|
return 15
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def revision_filename(self):
|
def revision_filename(self):
|
||||||
|
@ -110,9 +123,17 @@ class WalletComponent(Component):
|
||||||
async def get_status(self):
|
async def get_status(self):
|
||||||
if self.wallet_manager is None:
|
if self.wallet_manager is None:
|
||||||
return
|
return
|
||||||
session_pool = self.wallet_manager.ledger.network.session_pool
|
is_connected = self.wallet_manager.ledger.network.is_connected
|
||||||
sessions = session_pool.sessions
|
sessions = []
|
||||||
|
connected = None
|
||||||
|
if is_connected:
|
||||||
|
addr, port = self.wallet_manager.ledger.network.client.server
|
||||||
|
connected = f"{addr}:{port}"
|
||||||
|
sessions.append(self.wallet_manager.ledger.network.client)
|
||||||
|
|
||||||
result = {
|
result = {
|
||||||
|
'connected': connected,
|
||||||
|
'connected_features': self.wallet_manager.ledger.network.server_features,
|
||||||
'servers': [
|
'servers': [
|
||||||
{
|
{
|
||||||
'host': session.server[0],
|
'host': session.server[0],
|
||||||
|
@ -121,18 +142,20 @@ class WalletComponent(Component):
|
||||||
'availability': session.available,
|
'availability': session.available,
|
||||||
} for session in sessions
|
} for session in sessions
|
||||||
],
|
],
|
||||||
'known_servers': len(sessions),
|
'known_servers': len(self.wallet_manager.ledger.network.known_hubs),
|
||||||
'available_servers': len(list(session_pool.available_sessions))
|
'available_servers': 1 if is_connected else 0
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.wallet_manager.ledger.network.remote_height:
|
if self.wallet_manager.ledger.network.remote_height:
|
||||||
local_height = self.wallet_manager.ledger.local_height_including_downloaded_height
|
local_height = self.wallet_manager.ledger.local_height_including_downloaded_height
|
||||||
disk_height = len(self.wallet_manager.ledger.headers)
|
disk_height = len(self.wallet_manager.ledger.headers)
|
||||||
download_height = local_height - disk_height if disk_height != local_height else local_height
|
|
||||||
remote_height = self.wallet_manager.ledger.network.remote_height
|
remote_height = self.wallet_manager.ledger.network.remote_height
|
||||||
target_height = remote_height - disk_height if disk_height != local_height else remote_height
|
download_height, target_height = local_height - disk_height, remote_height - disk_height
|
||||||
best_hash = self.wallet_manager.get_best_blockhash()
|
if target_height > 0:
|
||||||
progress = min(max(math.ceil(float(download_height) / float(target_height) * 100), 0), 100)
|
progress = min(max(math.ceil(float(download_height) / float(target_height) * 100), 0), 100)
|
||||||
|
else:
|
||||||
|
progress = 100
|
||||||
|
best_hash = await self.wallet_manager.get_best_blockhash()
|
||||||
result.update({
|
result.update({
|
||||||
'headers_synchronization_progress': progress,
|
'headers_synchronization_progress': progress,
|
||||||
'blocks': max(local_height, 0),
|
'blocks': max(local_height, 0),
|
||||||
|
@ -143,8 +166,8 @@ class WalletComponent(Component):
|
||||||
return result
|
return result
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
log.info("Starting torba wallet")
|
log.info("Starting wallet")
|
||||||
self.wallet_manager = await LbryWalletManager.from_lbrynet_config(self.conf)
|
self.wallet_manager = await WalletManager.from_lbrynet_config(self.conf)
|
||||||
await self.wallet_manager.start()
|
await self.wallet_manager.start()
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
|
@ -152,6 +175,34 @@ class WalletComponent(Component):
|
||||||
self.wallet_manager = None
|
self.wallet_manager = None
|
||||||
|
|
||||||
|
|
||||||
|
class WalletServerPaymentsComponent(Component):
|
||||||
|
component_name = WALLET_SERVER_PAYMENTS_COMPONENT
|
||||||
|
depends_on = [WALLET_COMPONENT]
|
||||||
|
|
||||||
|
def __init__(self, component_manager):
|
||||||
|
super().__init__(component_manager)
|
||||||
|
self.usage_payment_service = WalletServerPayer(
|
||||||
|
max_fee=self.conf.max_wallet_server_fee, analytics_manager=self.component_manager.analytics_manager,
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def component(self) -> typing.Optional[WalletServerPayer]:
|
||||||
|
return self.usage_payment_service
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
wallet_manager = self.component_manager.get_component(WALLET_COMPONENT)
|
||||||
|
await self.usage_payment_service.start(wallet_manager.ledger, wallet_manager.default_wallet)
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
await self.usage_payment_service.stop()
|
||||||
|
|
||||||
|
async def get_status(self):
|
||||||
|
return {
|
||||||
|
'max_fee': self.usage_payment_service.max_fee,
|
||||||
|
'running': self.usage_payment_service.running
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class BlobComponent(Component):
|
class BlobComponent(Component):
|
||||||
component_name = BLOB_COMPONENT
|
component_name = BLOB_COMPONENT
|
||||||
depends_on = [DATABASE_COMPONENT]
|
depends_on = [DATABASE_COMPONENT]
|
||||||
|
@ -228,7 +279,7 @@ class DHTComponent(Component):
|
||||||
external_ip = upnp_component.external_ip
|
external_ip = upnp_component.external_ip
|
||||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||||
if not external_ip:
|
if not external_ip:
|
||||||
external_ip = await utils.get_external_ip()
|
external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||||
if not external_ip:
|
if not external_ip:
|
||||||
log.warning("failed to get external ip")
|
log.warning("failed to get external ip")
|
||||||
|
|
||||||
|
@ -242,6 +293,7 @@ class DHTComponent(Component):
|
||||||
peer_port=self.external_peer_port,
|
peer_port=self.external_peer_port,
|
||||||
rpc_timeout=self.conf.node_rpc_timeout,
|
rpc_timeout=self.conf.node_rpc_timeout,
|
||||||
split_buckets_under_index=self.conf.split_buckets_under_index,
|
split_buckets_under_index=self.conf.split_buckets_under_index,
|
||||||
|
is_bootstrap_node=self.conf.is_bootstrap_node,
|
||||||
storage=storage
|
storage=storage
|
||||||
)
|
)
|
||||||
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
|
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
|
||||||
|
@ -280,23 +332,23 @@ class HashAnnouncerComponent(Component):
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class StreamManagerComponent(Component):
|
class FileManagerComponent(Component):
|
||||||
component_name = STREAM_MANAGER_COMPONENT
|
component_name = FILE_MANAGER_COMPONENT
|
||||||
depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT]
|
depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT]
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
def __init__(self, component_manager):
|
||||||
super().__init__(component_manager)
|
super().__init__(component_manager)
|
||||||
self.stream_manager: typing.Optional[StreamManager] = None
|
self.file_manager: typing.Optional[FileManager] = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def component(self) -> typing.Optional[StreamManager]:
|
def component(self) -> typing.Optional[FileManager]:
|
||||||
return self.stream_manager
|
return self.file_manager
|
||||||
|
|
||||||
async def get_status(self):
|
async def get_status(self):
|
||||||
if not self.stream_manager:
|
if not self.file_manager:
|
||||||
return
|
return
|
||||||
return {
|
return {
|
||||||
'managed_files': len(self.stream_manager.streams),
|
'managed_files': len(self.file_manager.get_filtered()),
|
||||||
}
|
}
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
|
@ -307,14 +359,148 @@ class StreamManagerComponent(Component):
|
||||||
if self.component_manager.has_component(DHT_COMPONENT) else None
|
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||||
log.info('Starting the file manager')
|
log.info('Starting the file manager')
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
self.stream_manager = StreamManager(
|
self.file_manager = FileManager(
|
||||||
loop, self.conf, blob_manager, wallet, storage, node, self.component_manager.analytics_manager
|
loop, self.conf, wallet, storage, self.component_manager.analytics_manager
|
||||||
)
|
)
|
||||||
await self.stream_manager.start()
|
self.file_manager.source_managers['stream'] = StreamManager(
|
||||||
|
loop, self.conf, blob_manager, wallet, storage, node,
|
||||||
|
)
|
||||||
|
if self.component_manager.has_component(LIBTORRENT_COMPONENT):
|
||||||
|
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
|
||||||
|
self.file_manager.source_managers['torrent'] = TorrentManager(
|
||||||
|
loop, self.conf, torrent, storage, self.component_manager.analytics_manager
|
||||||
|
)
|
||||||
|
await self.file_manager.start()
|
||||||
log.info('Done setting up file manager')
|
log.info('Done setting up file manager')
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
self.stream_manager.stop()
|
await self.file_manager.stop()
|
||||||
|
|
||||||
|
|
||||||
|
class BackgroundDownloaderComponent(Component):
|
||||||
|
MIN_PREFIX_COLLIDING_BITS = 8
|
||||||
|
component_name = BACKGROUND_DOWNLOADER_COMPONENT
|
||||||
|
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT]
|
||||||
|
|
||||||
|
def __init__(self, component_manager):
|
||||||
|
super().__init__(component_manager)
|
||||||
|
self.background_task: typing.Optional[asyncio.Task] = None
|
||||||
|
self.download_loop_delay_seconds = 60
|
||||||
|
self.ongoing_download: typing.Optional[asyncio.Task] = None
|
||||||
|
self.space_manager: typing.Optional[DiskSpaceManager] = None
|
||||||
|
self.blob_manager: typing.Optional[BlobManager] = None
|
||||||
|
self.background_downloader: typing.Optional[BackgroundDownloader] = None
|
||||||
|
self.dht_node: typing.Optional[Node] = None
|
||||||
|
self.space_available: typing.Optional[int] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_busy(self):
|
||||||
|
return bool(self.ongoing_download and not self.ongoing_download.done())
|
||||||
|
|
||||||
|
@property
|
||||||
|
def component(self) -> 'BackgroundDownloaderComponent':
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def get_status(self):
|
||||||
|
return {'running': self.background_task is not None and not self.background_task.done(),
|
||||||
|
'available_free_space_mb': self.space_available,
|
||||||
|
'ongoing_download': self.is_busy}
|
||||||
|
|
||||||
|
async def download_blobs_in_background(self):
|
||||||
|
while True:
|
||||||
|
self.space_available = await self.space_manager.get_free_space_mb(True)
|
||||||
|
if not self.is_busy and self.space_available > 10:
|
||||||
|
self._download_next_close_blob_hash()
|
||||||
|
await asyncio.sleep(self.download_loop_delay_seconds)
|
||||||
|
|
||||||
|
def _download_next_close_blob_hash(self):
|
||||||
|
node_id = self.dht_node.protocol.node_id
|
||||||
|
for blob_hash in self.dht_node.stored_blob_hashes:
|
||||||
|
if blob_hash.hex() in self.blob_manager.completed_blob_hashes:
|
||||||
|
continue
|
||||||
|
if utils.get_colliding_prefix_bits(node_id, blob_hash) >= self.MIN_PREFIX_COLLIDING_BITS:
|
||||||
|
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash.hex()))
|
||||||
|
return
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT)
|
||||||
|
if not self.component_manager.has_component(DHT_COMPONENT):
|
||||||
|
return
|
||||||
|
self.dht_node = self.component_manager.get_component(DHT_COMPONENT)
|
||||||
|
self.blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||||
|
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||||
|
self.background_downloader = BackgroundDownloader(self.conf, storage, self.blob_manager, self.dht_node)
|
||||||
|
self.background_task = asyncio.create_task(self.download_blobs_in_background())
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
if self.ongoing_download and not self.ongoing_download.done():
|
||||||
|
self.ongoing_download.cancel()
|
||||||
|
if self.background_task:
|
||||||
|
self.background_task.cancel()
|
||||||
|
|
||||||
|
|
||||||
|
class DiskSpaceComponent(Component):
|
||||||
|
component_name = DISK_SPACE_COMPONENT
|
||||||
|
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT]
|
||||||
|
|
||||||
|
def __init__(self, component_manager):
|
||||||
|
super().__init__(component_manager)
|
||||||
|
self.disk_space_manager: typing.Optional[DiskSpaceManager] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def component(self) -> typing.Optional[DiskSpaceManager]:
|
||||||
|
return self.disk_space_manager
|
||||||
|
|
||||||
|
async def get_status(self):
|
||||||
|
if self.disk_space_manager:
|
||||||
|
space_used = await self.disk_space_manager.get_space_used_mb(cached=True)
|
||||||
|
return {
|
||||||
|
'total_used_mb': space_used['total'],
|
||||||
|
'published_blobs_storage_used_mb': space_used['private_storage'],
|
||||||
|
'content_blobs_storage_used_mb': space_used['content_storage'],
|
||||||
|
'seed_blobs_storage_used_mb': space_used['network_storage'],
|
||||||
|
'running': self.disk_space_manager.running,
|
||||||
|
}
|
||||||
|
return {'space_used': '0', 'network_seeding_space_used': '0', 'running': False}
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
db = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||||
|
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||||
|
self.disk_space_manager = DiskSpaceManager(
|
||||||
|
self.conf, db, blob_manager,
|
||||||
|
analytics=self.component_manager.analytics_manager
|
||||||
|
)
|
||||||
|
await self.disk_space_manager.start()
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
await self.disk_space_manager.stop()
|
||||||
|
|
||||||
|
|
||||||
|
class TorrentComponent(Component):
|
||||||
|
component_name = LIBTORRENT_COMPONENT
|
||||||
|
|
||||||
|
def __init__(self, component_manager):
|
||||||
|
super().__init__(component_manager)
|
||||||
|
self.torrent_session = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def component(self) -> typing.Optional[TorrentSession]:
|
||||||
|
return self.torrent_session
|
||||||
|
|
||||||
|
async def get_status(self):
|
||||||
|
if not self.torrent_session:
|
||||||
|
return
|
||||||
|
return {
|
||||||
|
'running': True, # TODO: what to return here?
|
||||||
|
}
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
|
||||||
|
await self.torrent_session.bind() # TODO: specify host/port
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
if self.torrent_session:
|
||||||
|
await self.torrent_session.pause()
|
||||||
|
|
||||||
|
|
||||||
class PeerProtocolServerComponent(Component):
|
class PeerProtocolServerComponent(Component):
|
||||||
|
@ -331,9 +517,8 @@ class PeerProtocolServerComponent(Component):
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
log.info("start blob server")
|
log.info("start blob server")
|
||||||
upnp = self.component_manager.get_component(UPNP_COMPONENT)
|
|
||||||
blob_manager: BlobManager = self.component_manager.get_component(BLOB_COMPONENT)
|
blob_manager: BlobManager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||||
wallet: LbryWalletManager = self.component_manager.get_component(WALLET_COMPONENT)
|
wallet: WalletManager = self.component_manager.get_component(WALLET_COMPONENT)
|
||||||
peer_port = self.conf.tcp_port
|
peer_port = self.conf.tcp_port
|
||||||
address = await wallet.get_unused_address()
|
address = await wallet.get_unused_address()
|
||||||
self.blob_server = BlobServer(asyncio.get_event_loop(), blob_manager, address)
|
self.blob_server = BlobServer(asyncio.get_event_loop(), blob_manager, address)
|
||||||
|
@ -366,7 +551,7 @@ class UPnPComponent(Component):
|
||||||
while True:
|
while True:
|
||||||
if now:
|
if now:
|
||||||
await self._maintain_redirects()
|
await self._maintain_redirects()
|
||||||
await asyncio.sleep(360, loop=self.component_manager.loop)
|
await asyncio.sleep(360)
|
||||||
|
|
||||||
async def _maintain_redirects(self):
|
async def _maintain_redirects(self):
|
||||||
# setup the gateway if necessary
|
# setup the gateway if necessary
|
||||||
|
@ -375,8 +560,6 @@ class UPnPComponent(Component):
|
||||||
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
|
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
|
||||||
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
if isinstance(err, asyncio.CancelledError):
|
|
||||||
raise
|
|
||||||
log.warning("upnp discovery failed: %s", err)
|
log.warning("upnp discovery failed: %s", err)
|
||||||
self.upnp = None
|
self.upnp = None
|
||||||
|
|
||||||
|
@ -391,11 +574,15 @@ class UPnPComponent(Component):
|
||||||
pass
|
pass
|
||||||
if external_ip and not is_valid_public_ipv4(external_ip):
|
if external_ip and not is_valid_public_ipv4(external_ip):
|
||||||
log.warning("UPnP returned a private/reserved ip - %s, checking lbry.com fallback", external_ip)
|
log.warning("UPnP returned a private/reserved ip - %s, checking lbry.com fallback", external_ip)
|
||||||
external_ip = await utils.get_external_ip()
|
external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||||
if self.external_ip and self.external_ip != external_ip:
|
if self.external_ip and self.external_ip != external_ip:
|
||||||
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
|
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
|
||||||
if external_ip:
|
if external_ip:
|
||||||
self.external_ip = external_ip
|
self.external_ip = external_ip
|
||||||
|
dht_component = self.component_manager.get_component(DHT_COMPONENT)
|
||||||
|
if dht_component:
|
||||||
|
dht_node = dht_component.component
|
||||||
|
dht_node.protocol.external_ip = external_ip
|
||||||
# assert self.external_ip is not None # TODO: handle going/starting offline
|
# assert self.external_ip is not None # TODO: handle going/starting offline
|
||||||
|
|
||||||
if not self.upnp_redirects and self.upnp: # setup missing redirects
|
if not self.upnp_redirects and self.upnp: # setup missing redirects
|
||||||
|
@ -440,22 +627,24 @@ class UPnPComponent(Component):
|
||||||
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
|
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
|
||||||
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
||||||
del self.upnp_redirects['TCP']
|
del self.upnp_redirects['TCP']
|
||||||
if ('TCP' in self.upnp_redirects
|
if ('TCP' in self.upnp_redirects and
|
||||||
and PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components) and (
|
PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components) and \
|
||||||
'UDP' in self.upnp_redirects and DHT_COMPONENT not in self.component_manager.skip_components):
|
('UDP' in self.upnp_redirects and DHT_COMPONENT not in self.component_manager.skip_components):
|
||||||
if self.upnp_redirects:
|
if self.upnp_redirects:
|
||||||
log.debug("upnp redirects are still active")
|
log.debug("upnp redirects are still active")
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
log.info("detecting external ip")
|
log.info("detecting external ip")
|
||||||
if not self.use_upnp:
|
if not self.use_upnp:
|
||||||
self.external_ip = await utils.get_external_ip()
|
self.external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||||
return
|
return
|
||||||
success = False
|
success = False
|
||||||
await self._maintain_redirects()
|
await self._maintain_redirects()
|
||||||
if self.upnp:
|
if self.upnp:
|
||||||
if not self.upnp_redirects and not all([x in self.component_manager.skip_components for x in
|
if not self.upnp_redirects and not all(
|
||||||
(DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)]):
|
x in self.component_manager.skip_components
|
||||||
|
for x in (DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)
|
||||||
|
):
|
||||||
log.error("failed to setup upnp")
|
log.error("failed to setup upnp")
|
||||||
else:
|
else:
|
||||||
success = True
|
success = True
|
||||||
|
@ -464,12 +653,14 @@ class UPnPComponent(Component):
|
||||||
else:
|
else:
|
||||||
log.error("failed to setup upnp")
|
log.error("failed to setup upnp")
|
||||||
if not self.external_ip:
|
if not self.external_ip:
|
||||||
self.external_ip = await utils.get_external_ip()
|
self.external_ip, probed_url = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||||
if self.external_ip:
|
if self.external_ip:
|
||||||
log.info("detected external ip using lbry.com fallback")
|
log.info("detected external ip using %s fallback", probed_url)
|
||||||
if self.component_manager.analytics_manager:
|
if self.component_manager.analytics_manager:
|
||||||
await self.component_manager.analytics_manager.send_upnp_setup_success_fail(
|
self.component_manager.loop.create_task(
|
||||||
success, await self.get_status()
|
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
|
||||||
|
success, await self.get_status()
|
||||||
|
)
|
||||||
)
|
)
|
||||||
self._maintain_redirects_task = self.component_manager.loop.create_task(
|
self._maintain_redirects_task = self.component_manager.loop.create_task(
|
||||||
self._repeatedly_maintain_redirects(now=False)
|
self._repeatedly_maintain_redirects(now=False)
|
||||||
|
@ -480,7 +671,7 @@ class UPnPComponent(Component):
|
||||||
log.info("Removing upnp redirects: %s", self.upnp_redirects)
|
log.info("Removing upnp redirects: %s", self.upnp_redirects)
|
||||||
await asyncio.wait([
|
await asyncio.wait([
|
||||||
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
|
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
|
||||||
], loop=self.component_manager.loop)
|
])
|
||||||
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
|
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
|
||||||
self._maintain_redirects_task.cancel()
|
self._maintain_redirects_task.cancel()
|
||||||
|
|
||||||
|
@ -511,3 +702,49 @@ class ExchangeRateManagerComponent(Component):
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
self.exchange_rate_manager.stop()
|
self.exchange_rate_manager.stop()
|
||||||
|
|
||||||
|
|
||||||
|
class TrackerAnnouncerComponent(Component):
|
||||||
|
component_name = TRACKER_ANNOUNCER_COMPONENT
|
||||||
|
depends_on = [FILE_MANAGER_COMPONENT]
|
||||||
|
|
||||||
|
def __init__(self, component_manager):
|
||||||
|
super().__init__(component_manager)
|
||||||
|
self.file_manager = None
|
||||||
|
self.announce_task = None
|
||||||
|
self.tracker_client: typing.Optional[TrackerClient] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def component(self):
|
||||||
|
return self.tracker_client
|
||||||
|
|
||||||
|
@property
|
||||||
|
def running(self):
|
||||||
|
return self._running and self.announce_task and not self.announce_task.done()
|
||||||
|
|
||||||
|
async def announce_forever(self):
|
||||||
|
while True:
|
||||||
|
sleep_seconds = 60.0
|
||||||
|
announce_sd_hashes = []
|
||||||
|
for file in self.file_manager.get_filtered():
|
||||||
|
if not file.downloader:
|
||||||
|
continue
|
||||||
|
announce_sd_hashes.append(bytes.fromhex(file.sd_hash))
|
||||||
|
await self.tracker_client.announce_many(*announce_sd_hashes)
|
||||||
|
await asyncio.sleep(sleep_seconds)
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
node = self.component_manager.get_component(DHT_COMPONENT) \
|
||||||
|
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||||
|
node_id = node.protocol.node_id if node else None
|
||||||
|
self.tracker_client = TrackerClient(node_id, self.conf.tcp_port, lambda: self.conf.tracker_servers)
|
||||||
|
await self.tracker_client.start()
|
||||||
|
self.file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
|
||||||
|
self.announce_task = asyncio.create_task(self.announce_forever())
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
self.file_manager = None
|
||||||
|
if self.announce_task and not self.announce_task.done():
|
||||||
|
self.announce_task.cancel()
|
||||||
|
self.announce_task = None
|
||||||
|
self.tracker_client.stop()
|
|
@ -2,8 +2,10 @@ import json
|
||||||
import time
|
import time
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
|
from statistics import median
|
||||||
from decimal import Decimal
|
from decimal import Decimal
|
||||||
from typing import Optional, Iterable, Type
|
from typing import Optional, Iterable, Type
|
||||||
|
from aiohttp.client_exceptions import ContentTypeError, ClientConnectionError
|
||||||
from lbry.error import InvalidExchangeRateResponseError, CurrencyConversionError
|
from lbry.error import InvalidExchangeRateResponseError, CurrencyConversionError
|
||||||
from lbry.utils import aiohttp_request
|
from lbry.utils import aiohttp_request
|
||||||
from lbry.wallet.dewies import lbc_to_dewies
|
from lbry.wallet.dewies import lbc_to_dewies
|
||||||
|
@ -29,9 +31,9 @@ class ExchangeRate:
|
||||||
|
|
||||||
|
|
||||||
class MarketFeed:
|
class MarketFeed:
|
||||||
name: str
|
name: str = ""
|
||||||
market: str
|
market: str = ""
|
||||||
url: str
|
url: str = ""
|
||||||
params = {}
|
params = {}
|
||||||
fee = 0
|
fee = 0
|
||||||
|
|
||||||
|
@ -53,12 +55,20 @@ class MarketFeed:
|
||||||
def is_online(self):
|
def is_online(self):
|
||||||
return self.last_check+self.update_interval+self.request_timeout > time.time()
|
return self.last_check+self.update_interval+self.request_timeout > time.time()
|
||||||
|
|
||||||
def get_rate_from_response(self, response):
|
def get_rate_from_response(self, json_response):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
async def get_response(self):
|
async def get_response(self):
|
||||||
async with aiohttp_request('get', self.url, params=self.params, timeout=self.request_timeout) as response:
|
async with aiohttp_request(
|
||||||
self._last_response = await response.json()
|
'get', self.url, params=self.params,
|
||||||
|
timeout=self.request_timeout, headers={"User-Agent": "lbrynet"}
|
||||||
|
) as response:
|
||||||
|
try:
|
||||||
|
self._last_response = await response.json(content_type=None)
|
||||||
|
except ContentTypeError as e:
|
||||||
|
self._last_response = {}
|
||||||
|
log.warning("Could not parse exchange rate response from %s: %s", self.name, e.message)
|
||||||
|
log.debug(await response.text())
|
||||||
return self._last_response
|
return self._last_response
|
||||||
|
|
||||||
async def get_rate(self):
|
async def get_rate(self):
|
||||||
|
@ -69,18 +79,21 @@ class MarketFeed:
|
||||||
log.debug("Saving rate update %f for %s from %s", rate, self.market, self.name)
|
log.debug("Saving rate update %f for %s from %s", rate, self.market, self.name)
|
||||||
self.rate = ExchangeRate(self.market, rate, int(time.time()))
|
self.rate = ExchangeRate(self.market, rate, int(time.time()))
|
||||||
self.last_check = time.time()
|
self.last_check = time.time()
|
||||||
self.event.set()
|
|
||||||
return self.rate
|
return self.rate
|
||||||
except asyncio.CancelledError:
|
|
||||||
raise
|
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
log.warning("Timed out fetching exchange rate from %s.", self.name)
|
log.warning("Timed out fetching exchange rate from %s.", self.name)
|
||||||
except json.JSONDecodeError as e:
|
except json.JSONDecodeError as e:
|
||||||
log.warning("Could not parse exchange rate response from %s: %s", self.name, e.doc)
|
msg = e.doc if '<html>' not in e.doc else 'unexpected content type.'
|
||||||
|
log.warning("Could not parse exchange rate response from %s: %s", self.name, msg)
|
||||||
|
log.debug(e.doc)
|
||||||
except InvalidExchangeRateResponseError as e:
|
except InvalidExchangeRateResponseError as e:
|
||||||
log.warning(str(e))
|
log.warning(str(e))
|
||||||
|
except ClientConnectionError as e:
|
||||||
|
log.warning("Error trying to connect to exchange rate %s: %s", self.name, str(e))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.exception("Exchange rate error (%s from %s):", self.market, self.name)
|
log.exception("Exchange rate error (%s from %s):", self.market, self.name)
|
||||||
|
finally:
|
||||||
|
self.event.set()
|
||||||
|
|
||||||
async def keep_updated(self):
|
async def keep_updated(self):
|
||||||
while True:
|
while True:
|
||||||
|
@ -98,70 +111,92 @@ class MarketFeed:
|
||||||
self.event.clear()
|
self.event.clear()
|
||||||
|
|
||||||
|
|
||||||
class BittrexFeed(MarketFeed):
|
class BaseBittrexFeed(MarketFeed):
|
||||||
name = "Bittrex"
|
name = "Bittrex"
|
||||||
market = "BTCLBC"
|
market = None
|
||||||
url = "https://bittrex.com/api/v1.1/public/getmarkethistory"
|
url = None
|
||||||
params = {'market': 'BTC-LBC', 'count': 50}
|
|
||||||
fee = 0.0025
|
fee = 0.0025
|
||||||
|
|
||||||
|
def get_rate_from_response(self, json_response):
|
||||||
|
if 'lastTradeRate' not in json_response:
|
||||||
|
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||||
|
return 1.0 / float(json_response['lastTradeRate'])
|
||||||
|
|
||||||
|
|
||||||
|
class BittrexBTCFeed(BaseBittrexFeed):
|
||||||
|
market = "BTCLBC"
|
||||||
|
url = "https://api.bittrex.com/v3/markets/LBC-BTC/ticker"
|
||||||
|
|
||||||
|
|
||||||
|
class BittrexUSDFeed(BaseBittrexFeed):
|
||||||
|
market = "USDLBC"
|
||||||
|
url = "https://api.bittrex.com/v3/markets/LBC-USD/ticker"
|
||||||
|
|
||||||
|
|
||||||
|
class BaseCoinExFeed(MarketFeed):
|
||||||
|
name = "CoinEx"
|
||||||
|
market = None
|
||||||
|
url = None
|
||||||
|
|
||||||
|
def get_rate_from_response(self, json_response):
|
||||||
|
if 'data' not in json_response or \
|
||||||
|
'ticker' not in json_response['data'] or \
|
||||||
|
'last' not in json_response['data']['ticker']:
|
||||||
|
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||||
|
return 1.0 / float(json_response['data']['ticker']['last'])
|
||||||
|
|
||||||
|
|
||||||
|
class CoinExBTCFeed(BaseCoinExFeed):
|
||||||
|
market = "BTCLBC"
|
||||||
|
url = "https://api.coinex.com/v1/market/ticker?market=LBCBTC"
|
||||||
|
|
||||||
|
|
||||||
|
class CoinExUSDFeed(BaseCoinExFeed):
|
||||||
|
market = "USDLBC"
|
||||||
|
url = "https://api.coinex.com/v1/market/ticker?market=LBCUSDT"
|
||||||
|
|
||||||
|
|
||||||
|
class BaseHotbitFeed(MarketFeed):
|
||||||
|
name = "hotbit"
|
||||||
|
market = None
|
||||||
|
url = "https://api.hotbit.io/api/v1/market.last"
|
||||||
|
|
||||||
def get_rate_from_response(self, json_response):
|
def get_rate_from_response(self, json_response):
|
||||||
if 'result' not in json_response:
|
if 'result' not in json_response:
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||||
trades = json_response['result']
|
return 1.0 / float(json_response['result'])
|
||||||
if len(trades) == 0:
|
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'trades not found')
|
|
||||||
totals = sum([i['Total'] for i in trades])
|
|
||||||
qtys = sum([i['Quantity'] for i in trades])
|
|
||||||
if totals <= 0 or qtys <= 0:
|
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'quantities were not positive')
|
|
||||||
vwap = totals / qtys
|
|
||||||
return float(1.0 / vwap)
|
|
||||||
|
|
||||||
|
|
||||||
class LBRYFeed(MarketFeed):
|
class HotbitBTCFeed(BaseHotbitFeed):
|
||||||
name = "lbry.com"
|
|
||||||
market = "BTCLBC"
|
market = "BTCLBC"
|
||||||
url = "https://api.lbry.com/lbc/exchange_rate"
|
params = {"market": "LBC/BTC"}
|
||||||
|
|
||||||
def get_rate_from_response(self, json_response):
|
|
||||||
if 'data' not in json_response:
|
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
|
||||||
return 1.0 / json_response['data']['lbc_btc']
|
|
||||||
|
|
||||||
|
|
||||||
class LBRYBTCFeed(LBRYFeed):
|
class HotbitUSDFeed(BaseHotbitFeed):
|
||||||
market = "USDBTC"
|
market = "USDLBC"
|
||||||
|
params = {"market": "LBC/USDT"}
|
||||||
def get_rate_from_response(self, json_response):
|
|
||||||
if 'data' not in json_response:
|
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
|
||||||
return 1.0 / json_response['data']['btc_usd']
|
|
||||||
|
|
||||||
|
|
||||||
class CryptonatorFeed(MarketFeed):
|
class UPbitBTCFeed(MarketFeed):
|
||||||
name = "cryptonator.com"
|
name = "UPbit"
|
||||||
market = "BTCLBC"
|
market = "BTCLBC"
|
||||||
url = "https://api.cryptonator.com/api/ticker/btc-lbc"
|
url = "https://api.upbit.com/v1/ticker"
|
||||||
|
params = {"markets": "BTC-LBC"}
|
||||||
|
|
||||||
def get_rate_from_response(self, json_response):
|
def get_rate_from_response(self, json_response):
|
||||||
if 'ticker' not in json_response or len(json_response['ticker']) == 0 or \
|
if "error" in json_response or len(json_response) != 1 or 'trade_price' not in json_response[0]:
|
||||||
'success' not in json_response or json_response['success'] is not True:
|
|
||||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||||
return float(json_response['ticker']['price'])
|
return 1.0 / float(json_response[0]['trade_price'])
|
||||||
|
|
||||||
|
|
||||||
class CryptonatorBTCFeed(CryptonatorFeed):
|
|
||||||
market = "USDBTC"
|
|
||||||
url = "https://api.cryptonator.com/api/ticker/usd-btc"
|
|
||||||
|
|
||||||
|
|
||||||
FEEDS: Iterable[Type[MarketFeed]] = (
|
FEEDS: Iterable[Type[MarketFeed]] = (
|
||||||
LBRYFeed,
|
BittrexBTCFeed,
|
||||||
LBRYBTCFeed,
|
BittrexUSDFeed,
|
||||||
BittrexFeed,
|
CoinExBTCFeed,
|
||||||
CryptonatorFeed,
|
CoinExUSDFeed,
|
||||||
CryptonatorBTCFeed,
|
# HotbitBTCFeed,
|
||||||
|
# HotbitUSDFeed,
|
||||||
|
# UPbitBTCFeed,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -185,20 +220,23 @@ class ExchangeRateManager:
|
||||||
source.stop()
|
source.stop()
|
||||||
|
|
||||||
def convert_currency(self, from_currency, to_currency, amount):
|
def convert_currency(self, from_currency, to_currency, amount):
|
||||||
rates = [market.rate for market in self.market_feeds]
|
log.debug(
|
||||||
log.debug("Converting %f %s to %s, rates: %s" % (amount, from_currency, to_currency, rates))
|
"Converting %f %s to %s, rates: %s",
|
||||||
|
amount, from_currency, to_currency,
|
||||||
|
[market.rate for market in self.market_feeds]
|
||||||
|
)
|
||||||
if from_currency == to_currency:
|
if from_currency == to_currency:
|
||||||
return round(amount, 8)
|
return round(amount, 8)
|
||||||
|
|
||||||
|
rates = []
|
||||||
for market in self.market_feeds:
|
for market in self.market_feeds:
|
||||||
if (market.has_rate and market.is_online and
|
if (market.has_rate and market.is_online and
|
||||||
market.rate.currency_pair == (from_currency, to_currency)):
|
market.rate.currency_pair == (from_currency, to_currency)):
|
||||||
return round(amount * Decimal(market.rate.spot), 8)
|
rates.append(market.rate.spot)
|
||||||
for market in self.market_feeds:
|
|
||||||
if (market.has_rate and market.is_online and
|
if rates:
|
||||||
market.rate.currency_pair[0] == from_currency):
|
return round(amount * Decimal(median(rates)), 8)
|
||||||
return round(self.convert_currency(
|
|
||||||
market.rate.currency_pair[1], to_currency, amount * Decimal(market.rate.spot)), 8)
|
|
||||||
raise CurrencyConversionError(
|
raise CurrencyConversionError(
|
||||||
f'Unable to convert {amount} from {from_currency} to {to_currency}')
|
f'Unable to convert {amount} from {from_currency} to {to_currency}')
|
||||||
|
|
|
@ -6,11 +6,11 @@ from json import JSONEncoder
|
||||||
|
|
||||||
from google.protobuf.message import DecodeError
|
from google.protobuf.message import DecodeError
|
||||||
|
|
||||||
from torba.client.wallet import Wallet
|
|
||||||
from torba.client.bip32 import PubKey
|
|
||||||
from lbry.schema.claim import Claim
|
from lbry.schema.claim import Claim
|
||||||
from lbry.wallet.ledger import MainNetLedger, Account
|
from lbry.schema.support import Support
|
||||||
from lbry.wallet.transaction import Transaction, Output
|
from lbry.torrent.torrent_manager import TorrentSource
|
||||||
|
from lbry.wallet import Wallet, Ledger, Account, Transaction, Output
|
||||||
|
from lbry.wallet.bip32 import PublicKey
|
||||||
from lbry.wallet.dewies import dewies_to_lbc
|
from lbry.wallet.dewies import dewies_to_lbc
|
||||||
from lbry.stream.managed_stream import ManagedStream
|
from lbry.stream.managed_stream import ManagedStream
|
||||||
|
|
||||||
|
@ -27,6 +27,8 @@ def encode_txo_doc():
|
||||||
'address': "address of who can spend the txo",
|
'address': "address of who can spend the txo",
|
||||||
'confirmations': "number of confirmed blocks",
|
'confirmations': "number of confirmed blocks",
|
||||||
'is_change': "payment to change address, only available when it can be determined",
|
'is_change': "payment to change address, only available when it can be determined",
|
||||||
|
'is_received': "true if txo was sent from external account to this account",
|
||||||
|
'is_spent': "true if txo is spent",
|
||||||
'is_mine': "payment to one of your accounts, only available when it can be determined",
|
'is_mine': "payment to one of your accounts, only available when it can be determined",
|
||||||
'type': "one of 'claim', 'support' or 'purchase'",
|
'type': "one of 'claim', 'support' or 'purchase'",
|
||||||
'name': "when type is 'claim' or 'support', this is the claim name",
|
'name': "when type is 'claim' or 'support', this is the claim name",
|
||||||
|
@ -108,23 +110,25 @@ def encode_file_doc():
|
||||||
'metadata': '(dict) None if claim is not found else the claim metadata',
|
'metadata': '(dict) None if claim is not found else the claim metadata',
|
||||||
'channel_claim_id': '(str) None if claim is not found or not signed',
|
'channel_claim_id': '(str) None if claim is not found or not signed',
|
||||||
'channel_name': '(str) None if claim is not found or not signed',
|
'channel_name': '(str) None if claim is not found or not signed',
|
||||||
'claim_name': '(str) None if claim is not found else the claim name'
|
'claim_name': '(str) None if claim is not found else the claim name',
|
||||||
|
'reflector_progress': '(int) reflector upload progress, 0 to 100',
|
||||||
|
'uploading_to_reflector': '(bool) set to True when currently uploading to reflector'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class JSONResponseEncoder(JSONEncoder):
|
class JSONResponseEncoder(JSONEncoder):
|
||||||
|
|
||||||
def __init__(self, *args, ledger: MainNetLedger, include_protobuf=False, **kwargs):
|
def __init__(self, *args, ledger: Ledger, include_protobuf=False, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.ledger = ledger
|
self.ledger = ledger
|
||||||
self.include_protobuf = include_protobuf
|
self.include_protobuf = include_protobuf
|
||||||
|
|
||||||
def default(self, obj): # pylint: disable=method-hidden
|
def default(self, obj): # pylint: disable=method-hidden,arguments-renamed,too-many-return-statements
|
||||||
if isinstance(obj, Account):
|
if isinstance(obj, Account):
|
||||||
return self.encode_account(obj)
|
return self.encode_account(obj)
|
||||||
if isinstance(obj, Wallet):
|
if isinstance(obj, Wallet):
|
||||||
return self.encode_wallet(obj)
|
return self.encode_wallet(obj)
|
||||||
if isinstance(obj, ManagedStream):
|
if isinstance(obj, (ManagedStream, TorrentSource)):
|
||||||
return self.encode_file(obj)
|
return self.encode_file(obj)
|
||||||
if isinstance(obj, Transaction):
|
if isinstance(obj, Transaction):
|
||||||
return self.encode_transaction(obj)
|
return self.encode_transaction(obj)
|
||||||
|
@ -132,7 +136,9 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
return self.encode_output(obj)
|
return self.encode_output(obj)
|
||||||
if isinstance(obj, Claim):
|
if isinstance(obj, Claim):
|
||||||
return self.encode_claim(obj)
|
return self.encode_claim(obj)
|
||||||
if isinstance(obj, PubKey):
|
if isinstance(obj, Support):
|
||||||
|
return obj.to_dict()
|
||||||
|
if isinstance(obj, PublicKey):
|
||||||
return obj.extended_key_string()
|
return obj.extended_key_string()
|
||||||
if isinstance(obj, datetime):
|
if isinstance(obj, datetime):
|
||||||
return obj.strftime("%Y%m%dT%H:%M:%S")
|
return obj.strftime("%Y%m%dT%H:%M:%S")
|
||||||
|
@ -166,12 +172,22 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
'amount': dewies_to_lbc(txo.amount),
|
'amount': dewies_to_lbc(txo.amount),
|
||||||
'address': txo.get_address(self.ledger) if txo.has_address else None,
|
'address': txo.get_address(self.ledger) if txo.has_address else None,
|
||||||
'confirmations': (best_height+1) - tx_height if tx_height > 0 else tx_height,
|
'confirmations': (best_height+1) - tx_height if tx_height > 0 else tx_height,
|
||||||
'timestamp': self.ledger.headers[tx_height]['timestamp'] if 0 < tx_height <= best_height else None
|
'timestamp': self.ledger.headers.estimated_timestamp(tx_height)
|
||||||
}
|
}
|
||||||
if txo.is_change is not None:
|
if txo.is_spent is not None:
|
||||||
output['is_change'] = txo.is_change
|
output['is_spent'] = txo.is_spent
|
||||||
if txo.is_my_account is not None:
|
if txo.is_my_output is not None:
|
||||||
output['is_mine'] = txo.is_my_account
|
output['is_my_output'] = txo.is_my_output
|
||||||
|
if txo.is_my_input is not None:
|
||||||
|
output['is_my_input'] = txo.is_my_input
|
||||||
|
if txo.sent_supports is not None:
|
||||||
|
output['sent_supports'] = dewies_to_lbc(txo.sent_supports)
|
||||||
|
if txo.sent_tips is not None:
|
||||||
|
output['sent_tips'] = dewies_to_lbc(txo.sent_tips)
|
||||||
|
if txo.received_tips is not None:
|
||||||
|
output['received_tips'] = dewies_to_lbc(txo.received_tips)
|
||||||
|
if txo.is_internal_transfer is not None:
|
||||||
|
output['is_internal_transfer'] = txo.is_internal_transfer
|
||||||
|
|
||||||
if txo.script.is_claim_name:
|
if txo.script.is_claim_name:
|
||||||
output['type'] = 'claim'
|
output['type'] = 'claim'
|
||||||
|
@ -207,25 +223,26 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
output['claims'] = [self.encode_output(o) for o in txo.claims]
|
output['claims'] = [self.encode_output(o) for o in txo.claims]
|
||||||
if txo.reposted_claim is not None:
|
if txo.reposted_claim is not None:
|
||||||
output['reposted_claim'] = self.encode_output(txo.reposted_claim)
|
output['reposted_claim'] = self.encode_output(txo.reposted_claim)
|
||||||
if txo.script.is_claim_name or txo.script.is_update_claim:
|
if txo.script.is_claim_name or txo.script.is_update_claim or txo.script.is_support_claim_data:
|
||||||
try:
|
try:
|
||||||
output['value'] = txo.claim
|
output['value'] = txo.signable
|
||||||
|
if self.include_protobuf:
|
||||||
|
output['protobuf'] = hexlify(txo.signable.to_bytes())
|
||||||
|
if txo.purchase_receipt is not None:
|
||||||
|
output['purchase_receipt'] = self.encode_output(txo.purchase_receipt)
|
||||||
|
if txo.script.is_claim_name or txo.script.is_update_claim:
|
||||||
output['value_type'] = txo.claim.claim_type
|
output['value_type'] = txo.claim.claim_type
|
||||||
if self.include_protobuf:
|
|
||||||
output['protobuf'] = hexlify(txo.claim.to_bytes())
|
|
||||||
if txo.purchase_receipt is not None:
|
|
||||||
output['purchase_receipt'] = self.encode_output(txo.purchase_receipt)
|
|
||||||
if txo.claim.is_channel:
|
if txo.claim.is_channel:
|
||||||
output['has_signing_key'] = txo.has_private_key
|
output['has_signing_key'] = txo.has_private_key
|
||||||
if check_signature and txo.claim.is_signed:
|
if check_signature and txo.signable.is_signed:
|
||||||
if txo.channel is not None:
|
if txo.channel is not None:
|
||||||
output['signing_channel'] = self.encode_output(txo.channel)
|
output['signing_channel'] = self.encode_output(txo.channel)
|
||||||
output['is_channel_signature_valid'] = txo.is_signed_by(txo.channel, self.ledger)
|
output['is_channel_signature_valid'] = txo.is_signed_by(txo.channel, self.ledger)
|
||||||
else:
|
else:
|
||||||
output['signing_channel'] = {'channel_id': txo.claim.signing_channel_id}
|
output['signing_channel'] = {'channel_id': txo.signable.signing_channel_id}
|
||||||
output['is_channel_signature_valid'] = False
|
output['is_channel_signature_valid'] = False
|
||||||
except DecodeError:
|
except DecodeError:
|
||||||
pass
|
pass
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def encode_claim_meta(self, meta):
|
def encode_claim_meta(self, meta):
|
||||||
|
@ -234,7 +251,7 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
if isinstance(value, int):
|
if isinstance(value, int):
|
||||||
meta[key] = dewies_to_lbc(value)
|
meta[key] = dewies_to_lbc(value)
|
||||||
if 0 < meta.get('creation_height', 0) <= self.ledger.headers.height:
|
if 0 < meta.get('creation_height', 0) <= self.ledger.headers.height:
|
||||||
meta['creation_timestamp'] = self.ledger.headers[meta['creation_height']]['timestamp']
|
meta['creation_timestamp'] = self.ledger.headers.estimated_timestamp(meta['creation_height'])
|
||||||
return meta
|
return meta
|
||||||
|
|
||||||
def encode_input(self, txi):
|
def encode_input(self, txi):
|
||||||
|
@ -250,7 +267,8 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
result['is_default'] = self.ledger.accounts[0] == account
|
result['is_default'] = self.ledger.accounts[0] == account
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def encode_wallet(self, wallet):
|
@staticmethod
|
||||||
|
def encode_wallet(wallet):
|
||||||
return {
|
return {
|
||||||
'id': wallet.id,
|
'id': wallet.id,
|
||||||
'name': wallet.name
|
'name': wallet.name
|
||||||
|
@ -260,26 +278,32 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
output_exists = managed_stream.output_file_exists
|
output_exists = managed_stream.output_file_exists
|
||||||
tx_height = managed_stream.stream_claim_info.height
|
tx_height = managed_stream.stream_claim_info.height
|
||||||
best_height = self.ledger.headers.height
|
best_height = self.ledger.headers.height
|
||||||
return {
|
is_stream = hasattr(managed_stream, 'stream_hash')
|
||||||
'streaming_url': managed_stream.stream_url,
|
if is_stream:
|
||||||
|
total_bytes_lower_bound = managed_stream.descriptor.lower_bound_decrypted_length()
|
||||||
|
total_bytes = managed_stream.descriptor.upper_bound_decrypted_length()
|
||||||
|
else:
|
||||||
|
total_bytes_lower_bound = total_bytes = managed_stream.torrent_length
|
||||||
|
result = {
|
||||||
|
'streaming_url': None,
|
||||||
'completed': managed_stream.completed,
|
'completed': managed_stream.completed,
|
||||||
'file_name': managed_stream.file_name if output_exists else None,
|
'file_name': None,
|
||||||
'download_directory': managed_stream.download_directory if output_exists else None,
|
'download_directory': None,
|
||||||
'download_path': managed_stream.full_path if output_exists else None,
|
'download_path': None,
|
||||||
'points_paid': 0.0,
|
'points_paid': 0.0,
|
||||||
'stopped': not managed_stream.running,
|
'stopped': not managed_stream.running,
|
||||||
'stream_hash': managed_stream.stream_hash,
|
'stream_hash': None,
|
||||||
'stream_name': managed_stream.descriptor.stream_name,
|
'stream_name': None,
|
||||||
'suggested_file_name': managed_stream.descriptor.suggested_file_name,
|
'suggested_file_name': None,
|
||||||
'sd_hash': managed_stream.descriptor.sd_hash,
|
'sd_hash': None,
|
||||||
'mime_type': managed_stream.mime_type,
|
'mime_type': None,
|
||||||
'key': managed_stream.descriptor.key,
|
'key': None,
|
||||||
'total_bytes_lower_bound': managed_stream.descriptor.lower_bound_decrypted_length(),
|
'total_bytes_lower_bound': total_bytes_lower_bound,
|
||||||
'total_bytes': managed_stream.descriptor.upper_bound_decrypted_length(),
|
'total_bytes': total_bytes,
|
||||||
'written_bytes': managed_stream.written_bytes,
|
'written_bytes': managed_stream.written_bytes,
|
||||||
'blobs_completed': managed_stream.blobs_completed,
|
'blobs_completed': None,
|
||||||
'blobs_in_stream': managed_stream.blobs_in_stream,
|
'blobs_in_stream': None,
|
||||||
'blobs_remaining': managed_stream.blobs_remaining,
|
'blobs_remaining': None,
|
||||||
'status': managed_stream.status,
|
'status': managed_stream.status,
|
||||||
'claim_id': managed_stream.claim_id,
|
'claim_id': managed_stream.claim_id,
|
||||||
'txid': managed_stream.txid,
|
'txid': managed_stream.txid,
|
||||||
|
@ -295,8 +319,38 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
'added_on': managed_stream.added_on,
|
'added_on': managed_stream.added_on,
|
||||||
'height': tx_height,
|
'height': tx_height,
|
||||||
'confirmations': (best_height + 1) - tx_height if tx_height > 0 else tx_height,
|
'confirmations': (best_height + 1) - tx_height if tx_height > 0 else tx_height,
|
||||||
'timestamp': self.ledger.headers[tx_height]['timestamp'] if 0 < tx_height <= best_height else None
|
'timestamp': self.ledger.headers.estimated_timestamp(tx_height),
|
||||||
|
'is_fully_reflected': False,
|
||||||
|
'reflector_progress': False,
|
||||||
|
'uploading_to_reflector': False
|
||||||
}
|
}
|
||||||
|
if is_stream:
|
||||||
|
result.update({
|
||||||
|
'streaming_url': managed_stream.stream_url,
|
||||||
|
'stream_hash': managed_stream.stream_hash,
|
||||||
|
'stream_name': managed_stream.stream_name,
|
||||||
|
'suggested_file_name': managed_stream.suggested_file_name,
|
||||||
|
'sd_hash': managed_stream.descriptor.sd_hash,
|
||||||
|
'mime_type': managed_stream.mime_type,
|
||||||
|
'key': managed_stream.descriptor.key,
|
||||||
|
'blobs_completed': managed_stream.blobs_completed,
|
||||||
|
'blobs_in_stream': managed_stream.blobs_in_stream,
|
||||||
|
'blobs_remaining': managed_stream.blobs_remaining,
|
||||||
|
'is_fully_reflected': managed_stream.is_fully_reflected,
|
||||||
|
'reflector_progress': managed_stream.reflector_progress,
|
||||||
|
'uploading_to_reflector': managed_stream.uploading_to_reflector
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
result.update({
|
||||||
|
'streaming_url': f'file://{managed_stream.full_path}',
|
||||||
|
})
|
||||||
|
if output_exists:
|
||||||
|
result.update({
|
||||||
|
'file_name': managed_stream.file_name,
|
||||||
|
'download_directory': managed_stream.download_directory,
|
||||||
|
'download_path': managed_stream.full_path,
|
||||||
|
})
|
||||||
|
return result
|
||||||
|
|
||||||
def encode_claim(self, claim):
|
def encode_claim(self, claim):
|
||||||
encoded = getattr(claim, claim.claim_type).to_dict()
|
encoded = getattr(claim, claim.claim_type).to_dict()
|
|
@ -35,6 +35,10 @@ def migrate_db(conf, start, end):
|
||||||
from .migrate12to13 import do_migration
|
from .migrate12to13 import do_migration
|
||||||
elif current == 13:
|
elif current == 13:
|
||||||
from .migrate13to14 import do_migration
|
from .migrate13to14 import do_migration
|
||||||
|
elif current == 14:
|
||||||
|
from .migrate14to15 import do_migration
|
||||||
|
elif current == 15:
|
||||||
|
from .migrate15to16 import do_migration
|
||||||
else:
|
else:
|
||||||
raise Exception(f"DB migration of version {current} to {current+1} is not available")
|
raise Exception(f"DB migration of version {current} to {current+1} is not available")
|
||||||
try:
|
try:
|
|
@ -65,7 +65,7 @@ def do_migration(conf):
|
||||||
added_on
|
added_on
|
||||||
from file;
|
from file;
|
||||||
|
|
||||||
insert into new_content_claim (stream_hash, bt_infohash, claim_outpoint)
|
insert or ignore into new_content_claim (stream_hash, bt_infohash, claim_outpoint)
|
||||||
select stream_hash, NULL, claim_outpoint from content_claim;
|
select stream_hash, NULL, claim_outpoint from content_claim;
|
||||||
|
|
||||||
drop table file;
|
drop table file;
|
16
lbry/extras/daemon/migrator/migrate14to15.py
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
import os
|
||||||
|
import sqlite3
|
||||||
|
|
||||||
|
|
||||||
|
def do_migration(conf):
|
||||||
|
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||||
|
connection = sqlite3.connect(db_path)
|
||||||
|
cursor = connection.cursor()
|
||||||
|
|
||||||
|
cursor.executescript("""
|
||||||
|
alter table blob add column added_on integer not null default 0;
|
||||||
|
alter table blob add column is_mine integer not null default 1;
|
||||||
|
""")
|
||||||
|
|
||||||
|
connection.commit()
|
||||||
|
connection.close()
|