From dfef80d9c28c4ec953b809dee32170e940748304 Mon Sep 17 00:00:00 2001 From: Jack Robison Date: Tue, 8 Mar 2022 11:01:19 -0500 Subject: [PATCH] initial --- .gitignore | 13 + README.md | 38 + diagram.png | Bin 0 -> 145582 bytes scribe/__init__.py | 5 + scribe/base58.py | 111 + scribe/bip32.py | 373 ++ scribe/blockchain/__init__.py | 1 + scribe/blockchain/block_processor.py | 1695 ++++++ scribe/blockchain/daemon.py | 328 ++ scribe/blockchain/network.py | 300 ++ scribe/blockchain/prefetcher.py | 128 + scribe/blockchain/transaction/__init__.py | 148 + scribe/blockchain/transaction/deserializer.py | 163 + scribe/blockchain/transaction/script.py | 298 ++ scribe/build_info.py | 4 + scribe/cli.py | 63 + scribe/common.py | 362 ++ scribe/db/__init__.py | 1 + scribe/db/common.py | 526 ++ scribe/db/db.py | 1129 ++++ scribe/db/interface.py | 273 + scribe/db/merkle.py | 258 + scribe/db/prefixes.py | 1670 ++++++ scribe/db/revertable.py | 175 + scribe/elasticsearch/__init__.py | 2 + scribe/elasticsearch/constants.py | 100 + scribe/elasticsearch/fast_ar_trending.py | 117 + scribe/elasticsearch/notifier_protocol.py | 55 + scribe/elasticsearch/search.py | 870 +++ scribe/env.py | 393 ++ scribe/error/Makefile | 5 + scribe/error/README.md | 95 + scribe/error/__init__.py | 494 ++ scribe/error/base.py | 9 + scribe/error/generate.py | 167 + scribe/hub/__init__.py | 0 scribe/hub/common.py | 209 + scribe/hub/framer.py | 51 + scribe/hub/jsonrpc.py | 616 +++ scribe/hub/mempool.py | 200 + scribe/hub/prometheus.py | 68 + scribe/hub/session.py | 1829 +++++++ scribe/hub/udp.py | 240 + scribe/readers/__init__.py | 3 + scribe/readers/elastic_sync.py | 421 ++ scribe/readers/hub_server.py | 162 + scribe/readers/interface.py | 119 + scribe/schema/Makefile | 5 + scribe/schema/README.md | 24 + scribe/schema/__init__.py | 1 + scribe/schema/attrs.py | 573 ++ scribe/schema/base.py | 124 + scribe/schema/claim.py | 422 ++ scribe/schema/compat.py | 93 + scribe/schema/mime_types.py | 214 + scribe/schema/purchase.py | 47 + scribe/schema/result.py | 258 + scribe/schema/support.py | 23 + scribe/schema/tags.py | 13 + scribe/schema/types/__init__.py | 0 scribe/schema/types/v1/__init__.py | 0 scribe/schema/types/v1/certificate_pb2.py | 146 + scribe/schema/types/v1/fee_pb2.py | 148 + scribe/schema/types/v1/legacy_claim_pb2.py | 158 + scribe/schema/types/v1/metadata_pb2.py | 936 ++++ scribe/schema/types/v1/signature_pb2.py | 118 + scribe/schema/types/v1/source_pb2.py | 140 + scribe/schema/types/v1/stream_pb2.py | 113 + scribe/schema/types/v2/__init__.py | 0 scribe/schema/types/v2/claim_pb2.py | 4692 +++++++++++++++++ scribe/schema/types/v2/hub_pb2.py | 960 ++++ scribe/schema/types/v2/hub_pb2_grpc.py | 298 ++ scribe/schema/types/v2/purchase_pb2.py | 69 + scribe/schema/types/v2/result_pb2.py | 464 ++ scribe/schema/types/v2/result_pb2_grpc.py | 4 + scribe/schema/types/v2/support_pb2.py | 76 + scribe/schema/url.py | 130 + setup.py | 69 + tests/__init__.py | 0 tests/test_resolve_command.py | 1798 +++++++ tests/test_revertable.py | 237 + tests/testcase.py | 748 +++ 82 files changed, 27388 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 diagram.png create mode 100644 scribe/__init__.py create mode 100644 scribe/base58.py create mode 100644 scribe/bip32.py create mode 100644 scribe/blockchain/__init__.py create mode 100644 scribe/blockchain/block_processor.py create mode 100644 scribe/blockchain/daemon.py create mode 100644 scribe/blockchain/network.py create mode 100644 scribe/blockchain/prefetcher.py create mode 100644 scribe/blockchain/transaction/__init__.py create mode 100644 scribe/blockchain/transaction/deserializer.py create mode 100644 scribe/blockchain/transaction/script.py create mode 100644 scribe/build_info.py create mode 100644 scribe/cli.py create mode 100644 scribe/common.py create mode 100644 scribe/db/__init__.py create mode 100644 scribe/db/common.py create mode 100644 scribe/db/db.py create mode 100644 scribe/db/interface.py create mode 100644 scribe/db/merkle.py create mode 100644 scribe/db/prefixes.py create mode 100644 scribe/db/revertable.py create mode 100644 scribe/elasticsearch/__init__.py create mode 100644 scribe/elasticsearch/constants.py create mode 100644 scribe/elasticsearch/fast_ar_trending.py create mode 100644 scribe/elasticsearch/notifier_protocol.py create mode 100644 scribe/elasticsearch/search.py create mode 100644 scribe/env.py create mode 100644 scribe/error/Makefile create mode 100644 scribe/error/README.md create mode 100644 scribe/error/__init__.py create mode 100644 scribe/error/base.py create mode 100644 scribe/error/generate.py create mode 100644 scribe/hub/__init__.py create mode 100644 scribe/hub/common.py create mode 100644 scribe/hub/framer.py create mode 100644 scribe/hub/jsonrpc.py create mode 100644 scribe/hub/mempool.py create mode 100644 scribe/hub/prometheus.py create mode 100644 scribe/hub/session.py create mode 100644 scribe/hub/udp.py create mode 100644 scribe/readers/__init__.py create mode 100644 scribe/readers/elastic_sync.py create mode 100644 scribe/readers/hub_server.py create mode 100644 scribe/readers/interface.py create mode 100644 scribe/schema/Makefile create mode 100644 scribe/schema/README.md create mode 100644 scribe/schema/__init__.py create mode 100644 scribe/schema/attrs.py create mode 100644 scribe/schema/base.py create mode 100644 scribe/schema/claim.py create mode 100644 scribe/schema/compat.py create mode 100644 scribe/schema/mime_types.py create mode 100644 scribe/schema/purchase.py create mode 100644 scribe/schema/result.py create mode 100644 scribe/schema/support.py create mode 100644 scribe/schema/tags.py create mode 100644 scribe/schema/types/__init__.py create mode 100644 scribe/schema/types/v1/__init__.py create mode 100644 scribe/schema/types/v1/certificate_pb2.py create mode 100644 scribe/schema/types/v1/fee_pb2.py create mode 100644 scribe/schema/types/v1/legacy_claim_pb2.py create mode 100644 scribe/schema/types/v1/metadata_pb2.py create mode 100644 scribe/schema/types/v1/signature_pb2.py create mode 100644 scribe/schema/types/v1/source_pb2.py create mode 100644 scribe/schema/types/v1/stream_pb2.py create mode 100644 scribe/schema/types/v2/__init__.py create mode 100644 scribe/schema/types/v2/claim_pb2.py create mode 100644 scribe/schema/types/v2/hub_pb2.py create mode 100644 scribe/schema/types/v2/hub_pb2_grpc.py create mode 100644 scribe/schema/types/v2/purchase_pb2.py create mode 100644 scribe/schema/types/v2/result_pb2.py create mode 100644 scribe/schema/types/v2/result_pb2_grpc.py create mode 100644 scribe/schema/types/v2/support_pb2.py create mode 100644 scribe/schema/url.py create mode 100644 setup.py create mode 100644 tests/__init__.py create mode 100644 tests/test_resolve_command.py create mode 100644 tests/test_revertable.py create mode 100644 tests/testcase.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fe826e1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,13 @@ +/.idea +/.DS_Store +/build +/dist +/.tox +/.coverage* +/venv +scribe.egg-info +__pycache__ +/tests/.coverage.* +/.vscode +/.gitignore +.*pyc diff --git a/README.md b/README.md new file mode 100644 index 0000000..8bcb50c --- /dev/null +++ b/README.md @@ -0,0 +1,38 @@ +Scribe maintains a rocksdb database containing the [LBRY blockchain](https://github.com/lbryio/lbrycrd) and provides an interface for python based services that utilize the blockchain data in an ongoing manner. Scribe includes implementations of this interface to provide an electrum server for thin-wallet clients such as lbry-sdk and to maintain an elasticsearch database of claims in the LBRY blockchain. + + * Uses Python 3.7-3.8 + * Protobuf schema for encoding and decoding metadata stored on the blockchain ([scribe.schema](https://github.com/lbryio/scribe/tree/master/scribe/schema)). + * Blockchain processor that maintains an up to date rocksdb database ([scribe.blockchain](https://github.com/lbryio/scribe/tree/master/scribe/blockchain)) + * Rocksdb based database containing the blockchain data ([scribe.db]((https://github.com/lbryio/scribe/tree/master/scribe/db))) + * Interface for python services to implement in order for them maintain a read only view of the blockchain data ([scribe.readers.interface]((https://github.com/lbryio/scribe/tree/master/scribe/db))) + * Electrum based server for thin-wallet clients like lbry-sdk ([scribe.readers.hub_server]((https://github.com/lbryio/scribe/tree/master/scribe/db))) + * Elasticsearch sync utility to index all the claim metadata in the blockchain into an easily searchable form ([scribe.readers.elastic_sync]((https://github.com/lbryio/scribe/tree/master/scribe/db))) + + +## Installation + +Our [releases page](https://github.com/lbryio/scribe/releases) contains pre-built binaries of the latest release, pre-releases, and past releases for macOS and Debian-based Linux. +Prebuilt [docker images](https://hub.docker.com/r/lbry/scribe/latest-release) are also available. + +## Usage + + +## Running from source + +Installing from source is also relatively painless. Full instructions are in [INSTALL.md](INSTALL.md) + +## Contributing + +Contributions to this project are welcome, encouraged, and compensated. For more details, please check [this](https://lbry.tech/contribute) link. + +## License + +This project is MIT licensed. For the full license, see [LICENSE](LICENSE). + +## Security + +We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it. + +## Contact + +The primary contact for this project is [@jackrobison](mailto:jackrobison@lbry.com). diff --git a/diagram.png b/diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..ac6faf0a0779f665ae1df0c05b6bf6679a5d6f02 GIT binary patch literal 145582 zcmeFYV{oP2`tBQd(6MdXw#|;yNyoNrd&agpwr$(CZJxYq{ny(2eAsn9pQ`=oshaiF zs5!^H$BpayjW7i{aad?9XdoaUSV;*HB_JShav&fODoBX$PZl4(e1U)nfFwl(Ra}3c z>wxQ_iX-E(2tdls?4p=TQ%<8%6sqSow`pB(UtS{QG;Jqba9;SmWx9cwB`NDzNt0)qe3<9jiT|GU^fd+zst_w4^2 zDF1&64L{Q;fu9{te(%zH_10x9|Jwn|!vd-1*YEyRaE8Qx){%)XS(H@D9v1sP8pD^s zEnWZXRJ3!01Si^PzKsiE{RJuq1p!5+ABh0H*u!|{p5`GI`}v!584Y$7v)8E8V@Lw( zKieeWDDHa!)ku9W%ue9HipPfYyslo&JI|`R7IcBQA`x{7Dzb*Ec_G zDjaN&K_%|@=H`A91Mol?Zo2+Jv}BF?7!Yyxmm&{w7k~zgEcqs!esBut{fUXd_jmhs z@lj_d3coi<+wDtC%Bg$i0gBnd8-ry*^nZSRw-#~E?*}v^1A?!f$B;>(TG8!`gF?%g z<(fT(b9KhV3Iu6pu7s7`cIKaZ!e2pF@MPqm*U}1W!fm-V#iIviKA0hvNu5q1qd|&j z-+PpAxzs*{+Na%e`mW67F&2~N1xTqajyz+)>>9EHl)?Nvi;3`nST3K5@nGMLk}T0W zZyoU^a3~Umlu6`*Xg;j@jm>bZ_@_w=Or{r()j z&w@s}s^9m!81rU(7#IA%S($(bfnoEuaYEq&S$FFwVR-@;JGX{{OhUqHZ&p78W6_Tp zx#yDk1HRGDQA^w^p|e-46n_nGvzixad4-lyKdg+2=T&nw(i1E&Bl4XuG>P?E-6eU_ zFGJ=S3GkXEBPLoa|0Lc$#dFt_3>h{Hf1{$d)UOIzAfvo>$*VQAY0b z%mgW(^^PPjS~UKe#s;6}8FWxGjqcy|0)Yo4SQx;XH1{|8bN}#E$}U%BnrRKgCNWfa zR^9vApyv8M6@4Habx1pB84Y@;Mk1`4Z8Y;grOpo?4H%8Ab9PT~*9ot?F(lpCLp_6< z2m-GS8zSCEfPgpP0ui|}WDmluUq#}Q2r>{KBu-e4YtXc|ET^U7RW9gt;GNFYliVro zcaj03GdJd9h~_}Zw05nE7;VT|Ls0ZVWAxL~nMt!3I3j3TrO9I^T7kD1^REIe*)%Iz z7;`5SH z+h}gHEz`sHlq0a_qfK&tSTa7~CszokSl%%J;gLlyFlM^VR=T9xOkO#-zLN=?JFa({ zkL~;4Vjj9`lj%z`mH*Tc3SZwk@wLmkChx}v7iRpb?Jg^fO+N*i)|Npvy9^@FKign7 zcNJl}>as8UE6lM8kbm;J&P68>z#&Ri1pi(A308^pbf~eZ^$kWWKe$zZ)Gg5t3jeh4 z-+0RpH-_ZKNA7!xJ9%`#l^1#TVi$zkmEvkzN^^qwJz@HVl9NA#d_B%3J05BH&MC+q z_6k20*=Km-^l@uUHN~YiHp33?4!YVlWQ+M~cwkLq&g4Pz>fD$}0n?Ie@^o5Ojv3dm zY0kE$)DR!dQ@-+^rzk$u{MLf`WB#@b5d==X2Hw1kRKA2!;(TY$04RT~xjQIEj9jYJ zr8&5+0>FQXFI{#iJoF6$4j+TRNF%WU-es?dUhzbiT5fJrzpDe;tk-1ur_!Y=p>$9; z>#kRBS+UJF;#(3}YOp)5pNgRy(iJMGP)q-0o}#cLwW?x*h;=#7DUvraIoxTASo9&hmCJ+dE z7AJq&?a5=Yn?ZcMzOkAs{KaA|Q$zl!PFb{EmBLP<&k-@D9y0l>*~Q6PsTE6fj)vlgiN~Z zWMHn9mAOifA<(!8V{oQkf0n9AaJ`24%VlC~wzY?zY$Dj@T1_=!8DE5<(k$ii>*`mb z4-l_s-U^;)ij~iq>~83ZeoF+w_RoL*f5iyNA7A+k=-Dg%{)fFoTt3V5$gA@Pgs?O) zq$W}VZKAO?Ow$WtlP7HAJ9&I7oMTKC=sDi%Fm?4kDL-(Jm`wVMOKTYx^%w2S&$IjV z8A7>c?U^4k`AXNo`>N;JDa2Oyt!S4jqu`dXy!5lf($x&wYv;AhDW_CQP7m%WQ~gH| zX8qrSoi_%Lan#(GHUk{5ukB73I(S>ifFzHO=2JW*UPCx#$?C^DBwQ5 zX_!A#P~1ui9r^~{kG|Y>)*k)A7d>I7%ZJ-COI0}}k??E6nWq9|x7nJ#SRMw!{C|{Y z^$4=wUKMWp(=K|7S79$I6&Tv{-|s$Ome-`~&E zkV+icFOVE%;(S31pW**{4bgOUB%VSHIIffDSA_7b%QxRJPGG-6pU)S@KA&$%DOT5g zBjeP--Zzwu1;j}pb-_vRy=}a~D}L=5oE){xQ78xBGl>VGad6UW66#T)%&SCkvH@Wd z@3r+8E{8Utz&Hre2qT)BH83jzW4{B_*O+z_?FDt=zwkT_^a{#HA6l-mKQ;VTB(mhA zTqvyb&*xaxHX<{>;`J<-&@}KfFuZlt#w#t1?+eazizeFt!#%?tz}@js$)SbUllE^JiemS;owUClbds8dvW4&!}>R?}0RgQ^s#Cn6ilxMXf0q|h8Zy5UtX|3NKHl|uxOuObX zS59d#oG1y_79o4+>s_LqOqEC}T9L-m4Ksb1XM>A9)t;MC{^NiI08o%)(lFNwLOg5894q`AhgSNs9n9~rOIf=jX}i=ArCW~YsQq0W`Gsw zIGmitR^Q1s6xwx-g@m0#r&<+(=0xywsvVwF24SQ{*197G@hkSyn=%99G!@F zXaOGuCt~Y?J)ES`3+5+ZJFr#1m@UsJF=cC$y#8N_z*QWqys>&h2YZx%887UQNB{Ro z@uM+>OUTNGLuJ&{$J|*$K+iYnCd~?-IfWHH2AyRQ3aUEk&)Qz*w zU+h8DAB#}7A@$|+3VrmnNvOcl&$+kmR_&vQD&w(ss~;U67KMSwVFCrQfTryB0#x*@ znT8+qmL*sg7}&HXB)j!mYo=4aY!%>cI5_;%*+hWteb#vIp&S_e3v5*8v0H?+f(-8p zJD8#DL8-o0IA$!9j^sVBii7JHRm|4T&KVcJ3OCrRn zSL?nQKCVW8jXv99ovyUt8jYm*Rc^FPIvmpH-zC!-fJ95`9=-XmvRH_eDiE9pE-uL~S$>-K^Y4(rPA2(i=8pl&hVOO{P zH>8FF76gpU*$Rr~vr^@iB7z@E{&Zl~>zzQaR=fSEPzTywrUgDdmt8nx2j$e5E6V8S z#i&cY^ING=K2AQm*O;WjK+iomEbmO~Fr~EGYJp$kFPb+~QtWx} zhNRo0R3^#C0~MdP>G}|~NS;viGU7SDyl7R{PUc{-dWU=nwce`BMgu+dfY8~&O~8^ zP!=#(tfp3_w^w~{$2nL>a{4faza@|Q`d?68==3Keb0(v>JWgf%=D$6v_Mr*C8|?n< zH}Kf22z29j*^OleE6fT=nylZQ0u)irRGnl~22v!w4q$yjU%&m9e@XgZhkT7Cr|^5p z8Ws6rZpEYjJml>ypDG~uZ`)>xz#u%B!=c+g_Fdg2=mU`_0L$YNJe{3R;)Z@>nVddTZ@z!;l42Xk}I|4Pc5iYH^en zH+)2omk@9KU0UI);YJeIv~w3ss^wi=FLY>%tI)DZ7ZiS^P0biRtfhq9GDQu))u3(0 zwBDfpc|gLDT{peUby3v9UgtkOwG?CTe@%7$%e(dQNZ0Xtn32lwIkcmVkStG{hs*1d z=mCe}C$g;!m2z!LT8hp41N+0-svzyVrsKI_&S;6@&DF@%P>eG!hrkGcEFTkDT#<-m zb7z^%s`atRBD6!j))i4Im4@?z(^f)<+vXHZRLE>pvOhz+Q=t_5M@$A#?LE;4i!p-{ z2=Hil@__~0bfjBEiG3cEsK zHz4V}AE=p!@zH8F6QI)G((*!#G`oBujwZ7=5hY~%6KH_j$L$5e9WMmcnoRu6W6Sr~ zdzY^>ofxDgK=mo0c|PBs*(30wpMBl|^%drO*gbMBiBO!{mjoy!O7SZZKu#o{u?aI` zp@WUX+>8HyMfvbJUWRIY?xLQr(h@IIC^EF&K=j&VnvJIj_O)V{)WQ+iIC}U=;-h3Q z_+AuB3Sm`xc+U2mhoOI^f{Sad+UW77FRbcQ_3964{Lob-7bRPB&PGM!RnT?$3%C9u z*o?j;3fVa`wMP=lf;NY`IDPfn(n^}$R;GP!Iq4?`_oUUk(hY$Us946 z)N5^5QKMb~fCg^$-$kBkWAA)~gWRsu+xj|B5 zP&i+zvRfu)56~jSlH^|&HCm$%|y$~yema0Kj-ID#+GIcV?$f)&1%qH z&iMq0d5aactZ_>?hASMB=@jX;x1jq~e^Kmk6z$HLOy!(=NZ}XD=O+N_9#R0>O%Lf! zgdR^)u|B*f>F>H_xKw3Gm{SAb z&@%YuGo3#+I(+0~OirXeaU77sXHW~gtMtaIKd;d;$g{7BO*mpL=X@>;LBVt75f@-aQ=J#jbs zDGqK0+WP||v0?>i|BuG$dSg(lCuykI6lI;+M{P?2)i0@o+RwT1mU_DZ5DeiF){G17 zPEfNZCHd&WgS+|^fakNo#tZSPR-LBk>2w)pI^#ZkgT>rpW*iGSrYsh(x)&QP;SaR0 z_TVC+!c_XHqtjJf2_zQNc~>gcW`ro5d9BPR5UHB7Zc_1R%Y7(&<wPtP8{F~4)pNMK=ML7n}WiDb8o2@Jm2 z5oxwd+IvRqbd4mv+rV$vV%QAczlAcsb;6xv#E1D;qA zL6o;>xh|(HAV-_Q8OA4}X$Tl)U$cqIUp z;IBwLbV#pJs-4$0njUiN6^Gq#Y~CDlq;z@IUh;l7)m17Qayj4mOWAOUPSOqmIj9DM z!yE*c*DidKW^rTX!E!^aEwz8&V4}#1#VEL9LlCXZz4#u7Lwlo6S<6M#H`pIo^gaa9ex-) zD(y7gs=)sp|EZuHt(t#HGJG7(3!y+)g5E{_jH-P#*H`$CECrxSRE4lz?zoC2ics^- z7!SR|Oyjp+FMznjn=h4h7`+=XIs8DQ(Uo1Q`hsF|4#t|pvG;|#5awP~q=_Wm4150p zJ%bgcm3k`s&?%Oi8F%sC6A)7G`Y0cF>p3dRF91dp0s6_hUC2)w1d@@RXljuxo@*T) zD;i4&p{UH|{WQSs_X~No<0gg=A`gK^3H9Gyjh|_>?<&ecGSc6VKgkZyDJe6`l^CoZ zN|?YqrY9QVidBaUU0w7wfX{a*4t*qVj@WtDD?S? z4_O;nCoD(?=QGt&$w}`hjpz+q)+Mv33>E7Q1%%q#sSZ0GQD;WZ7I?b$zCqa{C}Ifz z_ZHn%vODA2Olaq3vmt+k}#N^427W-F%; z;VRl)j&!7;Gl!PFnJ~MKGvTbGqFlgC#k@DS{%^OYqtMAaDLCh;63GDaHKvwoQsHpI)QIcrQ?i|i>GocxSwWE2QAYS!_dWPB{;JVZRy1;kFeDHnW+^YnMO&<$t z5(vFCNkq*<)zc>uq}lO3C%F*jE!eGc&)C0IIqpIx7{xSM%KuKT1Xyl-YyCXB#N;m? z(Hg6{Z$NK7Wyq~J$o<6~*lMO1A!W&X5qO&|n>c4rcziih4e5$g(N?9J15BE2wpLTX z#3*(xj?u9I8!njP7n=}Y`yJIJ{hx*Ep;gpbTa_#=R8+)zlYdJ{=pnrj955Bh?qL~q zBwy&bgA`kCl-U0##7g{tHklWj7jcy1J1S(F%vTs9us%?V8m8BKU%vRhy%d?XxuOD; zU?v&h80R1;PRxr|7IT*4(k<%NvxoRva`-fr108cyl75Y#K#udpi7QrSVzvZ;O!&*m z=m?U8YVYWXD?yDbks;G5xZ<*9}E#?T+&+zmAumBol`R(8ea>jx7v%lj?XyH>iblUYUIVN6s;1=UK(qn`h0^KGR{Hb*4W&tXiRQ1R`{Oex+^Wwy{cx`nLKJoXWv=_MnMm)RAgBWDV7jrTc< z0dL*BZ8$6EMXOao3$zJ=UQG4|=6p(WPU6POj6kXw$+DPuVmDZAcVyDT z=P=s7wY2crZz4mb$*zlV9wM9t>B3)V?ntfdv&@M+_2st29AfO31$Z zIs#nI8^1PjmsXFf&QUOHlKwv~k1D25jCVuqdZRw@{t+D1 ze>s#nO+evO8@E#c54jmM2RRw@!y}0V-WHgv|=tq#Ru@1o;JB%lR`i{)@ zJ7!hPz~ggHa4$Bi>UTW(OUp%Df~l5oh%o0&|5a)HxlpEwN1|t**0yh=38!R=J1i+I zI82g>z8-$p9n~_;t~-OjR7tG$1l25wp3{kQhe)?rd>_am#W@0H&T&{!so&;xlr?%j zzW=9mS9Ap4yaU(ckyuWjC~Y8+Tpt%gkGvWcC5jyZxz=(dFM&tYUR3Y3(>MK*MIIr? zz0rE!st;OrVT47y)4Ex_Xi* zRVfhHWQY&6f#5Guou4aru;oVAr?>3XqLqWmk+Vns#(8H&!yCk^7tRHpgqMe-!VNp_ z2*0&flvqWHRYdSTC=NdBP@4F!@YmxwFI81JSFjQmT zASw_P*OQ9^7=4VDonZif_M(>A0`-De3*cRPR(LOngFW%Gh?!^CMnte@(%wnz?L+eu za0gpD6(k;-8<6f&sH((Msa%2|2qzl6 zJ$mt9Z7`K7o%1Z9l)Jvl7mq=Nv17#L^%m<0fiyG_?9ldxBE;8RuEZhEzCo8JbM{^ABp|5g`xE6>-#4BgikipiRi()K^!6ZF zCPQJ&CT7FKVfA`N#iRNY;-MD5>f2;kDagk@Bqk95#x2CWN2j*fLRl@>Baeg|=WJ?{ zJwy>uAJJ(-*{L-HAdYmb-3_hxfc;G{k`NU1hQguf3Pbv%wo z_Dm8;mBpwc3%H_A=fO%VHmgnNkWd|NyBAqkBhbG#eC{E=W&_<_tl`_G(#xhc1H?+5 zX@u;_zK#5A0Rb)S>PDVe?c88v7b-&1KG2h=zcRw)FByOX0@Ds?K|;{Ik5iG-ihm7} ziU`2oiPch*#vc9o%>-nmPqHZI<&c%}nHM`DB3>9Yg1wQVwwe3Ripc+vHpX2N75>5k zYW~>%wtFllJ{DEt>2eAKF7shjzpJtBfGenY0f`2eieJ6Wlc7=1tsWs1zu4&;7sfTS zejQuSkKK4TVno4Ry2@48w6RGD3JSsvP;!NqH_ps!U>Ps2p5~n4gKHH<*Kkd|wiz_w zbSZ;tULFCZi9dNuEpUCy#&7YF;I5Dl_Nz^=PO2`hM+O3O1*v*E>Cu&DpJGb$B(EjhY9> z2q8UfA+Npt$aDlUB=M!9Y>|ml-?zmCkFg^E+#a7W_veVjvsyk=pkGa2C%}ilb}GbK+r_1m1wTT&Akvp8K8%w zM*;!W04B{0q8#wDuCHsTfuJ+~m~@O#Y4cx0 zvVNwcJXy=i~hZP%0=y1gM1qPOF#PE1by-RUYw`Nt#FR7p&(8nQYwoN+Ivnu)LVB3CSy zx3^uh??X$is)_3(2OY(| zHwqx)0={SJxPZ4Gc4{Fcqxd4}g>nC)?E;}d%t3rcuELY>>*`9SaUc#vW6njw znm%iCwkm{4|4O;<{h=6I1EByNyllr%P~Xq7N~Ema&L1jB5QXxOZQU&(K)T3`k%v*u zo287jjAy6k(QkMw;bW%GY=_MjvW&+RDHP$G+uiZ?3^jbov5e3VoD>&_qKT8g=Ju^C zHQ_}OUF;l;`<-pppZcS_p-4()7tE8a)gmK_a!HgCegk8QSipU}=|?j3HuO@dJWws& z7pmh86dP7ob_W99Z^WMfdVP5+DI_WgQ3Ytu7d)3Tm46pZ1!BEWsmOiz%_%*P0vRc~ z8vQfx^;!9Y6F0*X_ zCR5;4nF%~`?4Zp1$V_~GRWU!N0PTDC5P}e<)gVTLv|`9H(JPfJZ(JRQko1_Ke`%B< z$s$Z^T#3tjIS!CiA(9Bnv7G3b1^+9dRtXB+%}>wAGA4{UgDm3_R9J;Kg6J{6c;zqMHo_is3b zOjMMTSo~jcWg6t$^P#?{+1A=G%5$cUv_P!+;D}-sX}wmqyfnOdfe!=^w~SY#=RuK& zq+3F>DWoJe1NZaITojM9l)jP1t*ag!Ge)@WiYAI}exGpoGV*NV?|jRm1jZ^m7kiaA zdpL~UI=0j7nI70D>zU=t0L{=dX@UQOOEl{={q!#wV~401r^roBZw#F1T-WQ0V$-g( zWiHR2=lT%)Tb=|!)qOaT6qb=RVsdb@w4fPskLb2l%=Tc24!;&ztwLmaQ-xGXW!)w} z3Pwnoj6=JblnQG!ZFBpW)wuUTPzhGL7^o#{-I>S3;WP@HdAa6Lsg6PY-KP7upqO&( zmsXGdxp5rDD(ww|s;B44I2y}GH3s0QBx zAkZ7cR#KYLTYu{%9euGPJs?S?ZXvq;_6&%3a}U?5ao{uHKlT(aJn8 zw_4?GsPgwmzsIYxkUf6oNU?_A7|7+7?g}&W=-$8f0$u|REc`R)a7(Ho9E>*o^wcgv zMBb45G*0jP>57m8N%!y!mKw!k-;ZX?4pHbFA!#OH$*GHP5wTqCU8PXjb@zKF+p|vQ zBdc^nKS-f+uqovL%b}oL-rSYOZV1o<*kSi+A+het~d|3cu}36-~dO5C!>Lf{fUf ziR85^Hz9x3iohA0RsIgB$3NclFRhn4QtG9d=RY5m;*T`uPlax8mkCD^Qzep_SP4Jr z*M`%k7cG~CBGk%rYf7{pX*9n<|v$Xcy&XOHeDs=dT#pF4U7x9ESeE5 zR*!|(iTbN+#?ysTT%%`>3gz?9_cQ!7J{QK!7T37ZL@G(yv&EbVZs&Y@w)mqG!F$O? zntL25o^|f~oL=zGyw-)k3h^Xiy=5mmD$#6Z6}dl%LL*=rdE4M|^74n|0pept2`rR@ zc7`}WelARB>^Aa7J~WaK7OI3xV6NVMHyjS1M6eiq(GZX4GHzY(+TdewC)?g!?x#O1 z)Z4NNJ9vE5fs2r)+n zVdj)7t&1!f@SZY_+5r+rs zQoRY+=yBWcM!q=S@0)2d+LZa54Eh_fxE+FUHM<3^%D`0;PH^A*Te(lh#!>ve%ug7h zoI1)@N@km&AYL_~$-zuE;8@~l)>YbBe|NK2PHvo_&^etC6}*%crJ65N8tBZQ32-k} zj=F5MyR#{q%|UIjn2FV7tHgE=KtjC{OM0}C`Zds6&B|K-i}ifeSv9_vn`LdLn)mk! z5r%cSt6d=ddz_9tNgO~#tIBmQlovk0R!=Y#2rrO_k5(da;zY{JN8%ENJ74?GY#?az zP@P*}c7r8$^iT%Fvn5$^s{_8lLzBv^qUv>{5;*ObF2ut{%qv&CoA$hF{(3A1LdaF* zTM7pUXvn>@%DkCR{!6<;}JUQ62wy1w1rC0NZISO+%yVmdd{^_Y0?V?rjay?sV4x&M& zZR^mR7vg7eeJOulFaZvLT8QRYnZ@$_TU@#1L8Vj)l|rGF$vzyGBbUo86dsaSs(W>! zkWGsj#%`;w_%?8!$?#drHKM+mgm={kH!aW2!&d69FTpWii+}J+gVuYL2puh=J)r$5 zFVE6nN1G~j`Wmmf`%ftRe2`5|C(|w!{HMDQ;~`j$mh$|c3K>}Qm10oHM8TG>3*b-O zz@S#w6!NEA9iU!`U((GO1Kt-GzxCdi%vO^$*)1eUJLx0G`{L$ZZ2X%(KN-R9arP+b zkO4dG9Jb}Vb{HGPL$U?BbYJ1Ja6Xo|hWWF{7Ei{q6pFopr{6M-oN{V7t7URtwDt2P zK)zO)`^i|P!CbUWwru~TRw@70X~bcip>1s=RtO@{-?Eq_Wcbl$S0dkO9YB zDdAtG9x=$bv-41a$$drs?4oUEqYd8ltqgopD*_;A5YVc)W_?d= zxiw{f&CRQTXy^flm(O4MCE3B%SR__03R^QHF@;$V+4y$U!uJiO-cDP^W^+H`6{z5o z7AM{nDb$ikRvXWZzVE!*P#%$MoGknmaKAog{Kgwn@ZO(NARdk-8U)=TSb3`1Diui=GuXVF|Pw_DK{O2kF7(Pmp& zKe7+LUM&4xDq(Djm=gW4-slk5a=FOwuWn$Y&lV0tYWe<(C-sI4?yJp~Y(^6aJ?FJq z0pfF6!s2FEtE36TuvfC(7k#Lli3adit`@JT7%f zT-^5jTj~Y4&f{iI8t^O>brarH2lDp&@Jg!s2DG=Fx!`OfuKtj7*lK8Yy>xCA6igAf zix1IlYF~7c0+d(ew<7oTgsd-$E?A6hbw|?{g8!42#`;AvJpJ$P4kHc*VWHb>EwNJ8IA)!G=ZV{jk z_VN}?ANvh4$!@&D7OF;_&NT&flA+5D?u6oYTd&j<6Y+C5T16-ONQqus@cF#Hf$+Dd zYM)JKb^>#*l&C(GRAq>4#NhBs_6>fB2){Yy(|0{*=o3>sr|V;oWk$B1uRFvVPwL75 z#Ncvc=86>u#0D7-=^kz$b0IF#>}L}b0Xrrc85~~G!M+*i#ls^!3~3EA3q81lcl>6n z+OHq4un3PI!>r!#u_D`lW@WEbLO~fZpHERq2dsTW@XQ^Kj_t}+I_L`JGQ&rcX=Tqx zJa= z*oxoy6H1=S%r|>mrSVwT^QnNG?4Q)BRN7mWX_w+mr1GttK#qP{*_G+!Q#~Bd63#1F zt~3HpS*;{Po~#Skv%RrS&x9O^kedv6ee%ozgMSeHTj<_{y-drHPmZqrFE9JPD6&31 zp#(8cbcj&@J*q_^PP2*L@VAcvntL8Yb6m6x%9StIJxYd0wyoVx*TK!#8@cEGcuR>S z1g(M)CA{h1OSLl(2}*b821%I{YwTddd!!yT6;hG5=9+W8>SdX=tFa3VLNWHalfNtA}E;6&?ny60vVOVaF(~wY82JJFgnB*-Q0#UDj>TjHc2g7mQZdNxx~s zj-(9B^IYT_YV_lkl7_b0x z!Lzd+e{fr$wt*AN79|jy3?Lqr4n9Aox>PEinax$Jip*`)A*ha8-8|f=4-B6kX-my` zEtSNl)frzU;u@-DuRfme7Yi-dEW$O%;_sq{y0j5k5kpz*?U$`(q`KVFsjUnm(<9`m z39U8aZih;@sXAI24X5RzcPL!es)nBninR8GtnR6r46Lvg;l_BM+f<+wt&zzOcH4K_ z@Ma!$JI2oz3SWDbD3Kp(Gk8hPsV>SpJ?ZwUWp{F9+Zx@w4oioYN)@+QyL+H)3KC&( z#``1({Pilc%TLERI0$(|F1!Qn{Q~Az1jvhpFwQ(N7g?zg(U-o8%StUTPjcPF6uRS4 zW#lvy0ZHDAW3IsQmEga<-2`S?{rnj?C0Oc4;XbT!kp%~%`ZyRNd>gXM6*x8nz7El+ zcTxZ5X5g4oky|Z+k=~!bj$3!18YKA~M|>4_>qiO7?FNm6D=t_g{E)>mzkpN9GDuZj zSFDNBa-JTnXe+gOica7p=7(>rF~wz$(sU}wP>^o4REkj|kv9Iy+vOWf{QYfJ1>4`I zSEXi@i-qN8{G5g78VnX%U74bEf1Kxr6uQNqe}@<$PE5^z-5636X5xWA|aI~obV(J%T?DscIs}&q5%fZ3}J^Gz?y;<|!Djl3GHD2F4%tgEQqWj`GaC+!Fl>)IKTVl6;-42O$07AfFZ`R1Kyd;3WgIWutW z59v<%)1YKAop!;XRV$@^Ey%E>)&)qZ^3@;5_i+ubMu*H+^U1vI5r#GG8HTjVMXA2|PLBWiw<*xM)PBT1;y%=mCrNUR*Qu!mc`JAFdvWifdSiqWX<;w34 z+Ls8mz~t2?7H2)126appccuz|^c~gaK@*YZicex5v$4sJdW#RFLL{d~oSL&gzbZ*H zV!ZJ`3@XZ!q$myy%>pta)elJ2299UdFzTN0sxNYbl4g^bddgpj^mxW-d-41vh{|2nOHXZ5YW|}|lNh_MKBP%z=Tj1`> zU%GQ?+OC_PiBLyrW9;v8wN1{J*W^FU4#4TPyrSB)&P-Zno7GPHOX$}J?uzEnETVrb zxsaY(>>`AVM~=zuJ-XwJ+fQ&|F-JZ6DT<52*%Z4R1-L4#o$UV-;Sv_n7++7G_s|`L=yt(E}F7=9@Wi2fZD8 zz6u%**37i88fxF1<=WCxOmK9XgwNL-J?!~LP%VX|H^H~^p7?zWNwn$O4Jp=Dduer{ z%vbM4y``9mlf`Fbs@`LZaKYoH>m9`I0cjesSutiV5dcAXCZgo`9~QvE;>pPU^`25+ z!CS6k`!vmOZDQOOa)_W=h0NEo6SJ+a81i&;>a>!~s%`elN1J&_5x3cat=D>Evg3y- z>#)*gdbtKunPpW4&-S8@fWz6LGo!J5HZN%9*>;v)N*0edw5XY?y^TsT-xp1B?>Nn} z(v(_tK~mMKZ*x-H@#95X0uG~L=QksD>PI4(O$QtGVMdnOBX_o6BTOk<7Bg49o!{hhLtDH)rBzB9& z5ux-0{oAivxdwUbjK0z0qgy(Dx>%B@y^7qw_Z%GfT2X8i3cA{A#=8pa62DcYQ!ImD zT+BwBrU0_!TJO4o#pUpcG@Q<%f<~f?t9>=LB1o(eFTB<53~vA0pm4o9PtptIXEb?WD6^+I=&0fe8XI?W%BW3zvX;V$i@ zHp{^85XEuiVxtB{NHYhDD1epl;p1@GgX}q@ez@J?^U$A^kiEK&j=!kXoWrMSxiIYS zNAcUtu8UG{$KUZ|!1^VyPnqm*HfK>0v#lq#z&cF_=WU^?*wM&mwg>>*E>URiYn7av z_m=ptqC0U!n@lRmdBZkYZ=zl(awYU$_Ex6QYwDDRM~B_8Tk?D z+5mBB+hHbwI+iF_KI$UTiMk88wbsS50cESNe_=O~E|ZqGUh{OdTz*LB-9sX0pe%QLr&Zxh%ms7M z`)!??z>{g($eBN51qT6K(+F2; z*$e~|Yt1(x>}}@!4b&;hj=)_)`v~HpktKV@8yrY5I*PdY_ z{<@vlo@M-{UjJ0&Z9pUl`PXdZFOVSGtzL*}SLr62!STm)ZvY?o0S=6FMMn*i4)W2v z*b)7lu{^_&C~hmSq`fPfKe1{pgU`S1arPwZ^bVMqeA?V~9(S`GvnXB^+D!KZeMzYO!KQli_;9CE7YTBKBmv-%ld)c( z!6y|*q`p>BbJ^P|TPgyZ9%y>-O3Pw?E^Xn0!X_4^Ce*Stoo zu$HA+jr>>0cUuptb~mU}BSUGvm4}#EIs^9TE!rPwnSYAw%R1)h`nzJteR~9%{^0U* zvxLp-P1bvXrm==|u<)qbyB_4MQ99XYNRx&+3U6AtqI5aYbG5zD@okbfam=sEYRL=8 zDMZUfx%`P}kD;6ZPP7|Z8HyemvjgGtDH6qgR!mppP>9X)A#5lEfu?X%j8U_I@G zZC4K?mx%qyaGyshr?{9(JSr~@3W3>j)s|SS*8#w|1;Lmn$FF8NoyQZu4_^;jX@Rfb z-H)DDz?dH>;3-m0O=f#O`$WQN2`4O3nPf`En@nRe_D{DyR(vD=o~|$lUUsVQ39C$Q z1MWSu42yvKCjWZ_7n}a?YcVZoKSSC@qmneN*ww{ZPJJY-2Ii-&RBWzQTe7zxTB;zX z=F_B?Jjl2LUR}EvHiGWigaad12$>s8=Rc>{*9XM|yMFl%b6zY;(PH>0QuQJU;zwB5 zw=gQ|A>|zTWzVxvCWbVc8Mc>8=9h^3sAsrL9-jsRH+3(ljNwNXU-Q!`c>`(Pre^zc zb1`?>BH}WzH-RecZhtQKkoKdyOQLko0Ds5h5XK>&KX(9xo>fbNoOlbW9FJ6ZkLt_f zxEljGw$}J(NZYpTHrd+WWKTPIa(bs~iyi%B{QlYR4s#_9Ij@q%k`vpGbv~#gyVaN@ z_4!rsk*uF%sAAD?AB9KEFr!j1-aD;8s6Q$r7Rzjz&Gv|!X>+iaxlFj>sMW2p-zIi{c$YfsMTH;n&#SuFc>r-FkFy1&!Ef3_zm~lwd-?!S52st{ z>g-$a_w~**D%0Va{KM3-(CgS5x!G#x@Mv-Xfm~nj^<_9^H846|D4`T*Gm#eocyJg7 z>A?B~z74?$7X( zzTco4a`ep~0#Uya`W;uHTCHc_@sTFOIx}=g#O+CSxb07|905D(sK4;szuf4GB1aPv zMm;kBG$TSC1`c{bD4tzO_^&nd^XuZQm^0nM&f>Ka3~Uuh+k*>@ReY%TW;c z`y2IDEvo@XrlZ9%+4T9-MqlnjioOzy$Sk0j((C)A zC+wvW)jWCGs}H>Clp%dkhNPoyJ|ilAE5j)CWX;mBN@z;=5|{#F@0DPl@m9qPKVhlg z2*im`IKl;I=?h<2s3QH2MeH8MstMA%z8}$GihnSNGuZYzTU61o8i&fxULr8+lxDg= zc`+H5r^~x`1%`#c5eqf@2;XTqx^J$Lzj}|6|9ATC{RtsuC_*nwI{w6j0T3Doj7B1n zGF?m_5UZ?9=7?DGU+a8B=_mKHF5de%icFfkUa-1h(SX==J3zZU8xi0f$%Hp5PcNjr zQ)tOl$|J=^S93u&mp1y-aLM~%z11sML9<$A%u>0KeIwN}{SaD3g$4S+jE*FP!~&z$ z)6sW$z=HNZ)yy@Ej;{f+yg}U0ukCQ$#MIr~-SYH+b|#aa_}SQTeAqW(-zoGZRFlye z4#`?x=2AZPL|-gYtgJx3Iib&!b>Kbg`AlgAz&@ z2LpjDP75WK$P2HriGZ2Mdc7h}CY&j!dB;C=-VcT^S1M5`0;&-WHl=WLDjmEl^=_0t zJxyn8m>htk`KbFi_W})%t;u!DCggq*4yqQl%&g|k_iH!u$wz(g!E)4qz5;4>OHE0t zL~d8y3vPTyVmVYp(*xbMb&E()?2qBawxL=9EL0coi0wkIO3-ybT`su`ImVqOrRZl8 z3{-yt$Pn5ns>Ysih^QD?EM@zr524sO2C=Ys8MgABdCrz9J=i|>TTRi#i5_$ibjhG6 z^Skfwqo#1+Wt3E?KTs-)eS71>AQuc)NMlg&sL-Zq?svRC&fp_RSJAZRjMuMsWgVTw z+kAF?;L~ey|IipTRV6ifmhu>jx_qHw#awTv)1BjOvtK=f^eOyI*WnU` z^nHdQ%aB1A-y3mz<~o`dA6Ui+XirKwb0gttX{M3&>DdP&o~+e03Kq7cwck<7WPO?r zTFcg|&r76#l3^88t2S6nVPgBR()Q5#9rqfhnM0u`6P2B_A9B2qjb{8Q5tY?wm^93} z$~Lu>4A+rPDLv@4cPG!GSw!=>y?3U?k+l?+oKqY=cEfwpwLU)DI+PQEr5*ii-fy?h z3<848X`Wbj^usfIGFe_Hd5pugd<0S#B`BqxHcu4&(YkUueaJz7e_`Uzh)c(CVjXb+ zBB39k9VejJW}zHDAZ{TNs;P_6x0S$-XD?$mb*n#gQeCfRdM%~_d53-dK=9yxUY0c*4 z9JwE(QajBz^a=nWiy>mN8+;tmAJUj9h(c>SYT-%B;=59&VQXzrTQHq|EA;eHX*IevhrB-5#{Dni&)Q#4iXGZichL_nAZ@hvK}`o6@%$wQgF!ju_|*7X zShT1hs!JStSZY$nk%Zv+N}qRSvg^SLaR!|V(&nJi3 z-64%_&iw+MAfp-qnqD6LhJ>0PrDR54Fu zAODfVcW=VbHijsNQJr40GtH#nfFe8SNVsAu+s9ODkblqR0;T+Gy053t$X{~ePO7{A zL>^SvC`9G*+VONIv*uDHl-@QvTOS21TAe2tE2GmF%mhc+?oeMR;(pzX6d*CRr=EmO z)Vg6&2uqjTg+ru%Qvu@mztNjAOw0Y`LL2Q-ILGFUx2WJNA74SvOXPOmyjyGVY18*Y zL3Ozoil_C#g(L9L;YLx)zl~6Y+Pm<7v8OliOlTFSv7K&=9nMxty|gtnFE8x$3+x;< zsG0;HQiVRi^jq)Yc?)aSy5lES4p?9RHKJ#H6s;7$^3Ty&O&F2)N(5JlWqR7KO6lJp z-jH(n5F9_;$jxyf8>!gL4s}pa@c2ZigLuWq<^6!t`$I-LIS(ilNgQSa;z`L6b{3bo z<9etvK^XH6>-Q_AX z{n(Hs!#2L@-VYUw@C1dGQDe!Boum9?b!3YH&y)Ps>nxujsFQCFJp=-wILRG>Ts&Crha#JL zPaRgE#td~e#kujBg5db=LUQb(2Mtlc5wtwRg_kT&x12UMsPsREd)I4|=osB~B?VNa zA4IjKRC`;~W#O$hfm}Bx81#AP0Zyk#!x2qh+;PA>IZ58AR55{iV=%>~?|Zd^IDBM+ ztU<#Qw>wPoU63~r>(lP*uw~kNb7Sb~7bDm3vWhrb;lL@CS+AFn`!Pb)A4ZZMq&v;m z=bCOuahwH}R&K)a+}zC;>kBNP-LnODaSqkkf}Hv`UDQ6J2EA&Vd)bU5gVmB<=(ad? zm#yyjw|b#y+iagmrOo$+K8|wkb8Fl(Ww1NU;hw7{2CSA9#5 znhEkr;A>YXQ7$W3*U2<8Z$lyAa;p)orc>Wx&NKdg+3{VZ>uo*v8 ziPs_qI2x2s7xK&uhbMN{#@oVE=1lMXY!MQl27FcBnj4<`h3eAvsz3{Pd)VV`?7RB( z!xpg|srP8`^A|OC^KVWGbrXlWtaH@CiRAe!=aE?Z(y07>KJSIRD;d)qpNB{>Y{bNf z1BmlxcbJS_A74KIFXb7xWUuvwd&#a>Z%Z+B|g`tANz;#gG{@&}OeVtxN@{3gv3a`z8NM z@rBS`=SfSrYf>RV``u{?iAUC-LR4BUOBcr{+Wx_gxBa|Pory@EU31T{?;qnS(;*$M z8PyQcGPwYFzSkmMyF{?64MaGvy`(@yM&<%)RoeYs5%U1+@U*qW(}prN$C{)q168xv zM&EG7kq9K}wex9ZyBu8NGpTowVNfg}%Ma_x>|^_^{(2Ta_ch@#FBO#%e-P;3_WPgS z9~HL4eBo512{|RoW`lx!5*g6n`mUzogqeVU)oSZbWGE8q^U(Z~&C(y)g$*H-VmXFx zaznq5%f}WcXlR{bvh!)*8iu$g^Zm@G3|2KWQn!eg!Oe)lPESW36^U<@)L;Mre*S7@ znTd#i$p;>LHGM{sU@%tZELdr~3FAkSKlJ|Wh>RD1OwtKoE+cYJ{>FY>nTJ>0Ga)MT zt~kEH=`YWa3?4J!%hQVhPlzlLB^ht{JTb*72DO0oeuFN#FP66LR5pXC;nGZbOgA;7 z`D{d$W<3PWx`b=_GO-#0IRTPUn0(H9`MKwLD*QgLy?R<3A5T*-FtWVfHB@^>0|21) z9p`V)h5APGh7RS!9i)GEd)q{$3;3p{F;-TdM-+lS0tRQ9s!4E&`+Q1W$8)Ri zIcOH+7V)JbXjCq4iFhq>UtaD+XPRXJqfy>IgL`rMl3G#xNmW^V-{DmUSPkmY6vVOR z7JpO(P2FzWeUAaQ8ZX)&n?F$Tn)sXY?0sx03e3im7x`YfMTkE{XwVtRe6sK(&5>qs z{T;)af!Ly*Yl^ImkuIgJ`<)6{12T*iAMU z${}GJ1*RYxeJ@FD4J8mKPP_31Q%kid+Gfr@zR@2^U$3G$&zEe6~lZ_`LXD)q;GBj;TO+V`)_;77MM>hPbrqFqmj}JaA=~oT_x*eo3Oc zFE#zgtIL;J@$r#ej3cIjU8oVr=3XZe9HsWKL#V_*6x4f*mOTAcCi`uA)!Cs}nSx7k zS4kQ&mJ$OoM<-Cqf+5`1EI*D^wR1_%AN|=#AjeaU{AHql%Yu%YSagOMI1;glNWyO_ zv@}H^b@(a9e*1CGW+@^j;ImxZqEv)04!JjMzujl-8mx`LJRq9g%h;P$Nc_=o9yZc6*ot%X*eEkG*Nz-dRubAVBcYu+!{?`7XN)psM+h~p%%ywFkhA50jS?t? z=#%5>{#f8Dzdm0M{>~*?Xj_msdeEgicFOl7-m~w@!|#PXo>Ax=O*nPhEx>bz=Uxge zJ)VOtQNG)SYAU}fo>JA%KB!bCH6*zbfeDyAF=SZVUymh)7IvJub_0bx%-KIk*sFsf z<~xNvyL8p-U-mzyfHACl#u+t>>Y;V1(#Nk`ai{C88UIu!M;FhOqAhqEr1o^V93UN^ z44?-6WtifR6O_X7)=N_mlZ?Y(PKR+lzdK5auVa(f%uzivW$HaecF40%D>0bV`geQK z@`MblUf+jDtW05xd=XADp-rDk?Gl_{IU=ZZUKW`^FW5sK)l@Lr?@AYJ;$9)sEKuE6h&eyT~s(&As-^ z`9;j7lJhykLh<(*R8euX%Ee?eD8FCQ^Fj+^oTm(~SN!gY^0*t)SMzherj+RXj|=?M z4(B7UjIQuVo>wv>hZq8d0P4_Uo0qe_^s;(?Wf%=W7gz@9->_euGuyvsJTF|7lWndk zraQ+a2{Qj!!AHBh{VjD=ka0h6vES;N|62j_@}|yZtAF=DZ`V@}ybYQuh+xQ)k5Xj= zcwyK|!fp2K%=C5PK^w`bJP!g5lw%BBB|%Lj$M!IOJwyJmv1SWf)c|cNPsf^StIZh_ zw`9r4)QVZn@R+3rd)}2a;Q7~+zrG1M{MiIO1?N*Hq#SOkpj?{%mR z=Q7}|hr!^@DzIiB6PMCLC9f<)Y4s-2mETE7)e)#*Q{uL}=jV_{j~1y5PA6;LReI}8fBr#L3#6%$`o-Ut0= znUJLvFw|6gbD{0HN(JbiuuBHF-;!&Oj`Rey1&fW_lvDiTFx5f(=3<%%93;Awe3pq% z<#!=5n&bcNM%P|Q56~XxwOf{MrTLMy-2i;I@w$@h8qB}TyKFiig4;WB;nBYfiDmgB ztCCX;)EF$gaKB!#*^#JDf^xey+qxa=UF$`Y5oTe5k@pEycwdzk<~Aya<>_SgVD5f% z+kBsqXy(*iAVUo+KioF=_uLs0r3LJ7(;b$ifWPrEH6J|2b(c!k1kWIFP>j*+zQg-^ zJZD&f3I?yMYk(J~DOj~ZzGAB*g+=8B#CDUsP5v*>r7m{#%hM98+W3bn$1yWdp4ttX zl*M{A1yeeId)^z?g^sQR6y3r0hLi#(4-y9+_X_-)ivth(N&tN>zNmug+c9QFTTVrZ zjFmlh%mc7zT%cbZ*$|~d0K#V15*|8mq@Qufa>tA{BvmwL_w zb!Uzo;_Awbt2OIz3>qW+RqR@%Savav>ascDmPbyiT|pH4WvxU0;ayt)o#>xUA1;<( z7=>?%M9j;357x8E51RQzLw-~7X(+jgG@UtNGH>H#lS(nC63;S1CHmNkv(87BuF1a{ zHR_PEfBvWnC+I4~rHVl`g2NZ1oK zw(uuNMeYHZy_8^P-b@DFrD-l|X5)&i&OCra^ukYaEOyJhh@gxoCF}p=e|5|Zud+U% zI$_K1<2;7($C(Hw4AQsK0)bgS-1@d6f9U-X9$Rb!7*7ZM%0Ix4q0(=DW!w3@aFcvK~av%t}2WVSZ#pK?7Xu zSB=*D;c-<#(X^g6z@(}hCP)%alqiYryjqt7?gM)_4hiDWWYffhNKRuE{5J3N9< z0vs_iw?BFg+(ObS!`h*%HQOmHE;p7O-rpBnt!igy%{Y*MNCN*9a%KWI%-138*|Dp=nb`XczoJN=k`oH)-*(u5l$%#ybM1G5GR2jLb# zl|7hlp;HNAUTnuTZWPo5JYE`|=kS>=*I-vjuwRC3)AGD`CetWPgFep`Lw^(ka3nhD zk0WJEbUek1?1z($yQIC8MOAbPDSA7_artaAj>nj2koH5P%^CvR-U1@J%M)WFN#k*) zDX1yzT}xiF1_vPTD~X^cEub}?6>6+kQUMhvll9L_w-8~;L|i9RB^=T?8mc#BINdDI z9x=tX+4-}|e1AECPeR!$RtV{3TbCJhp)Xm`a_=36Egrkn#IB44K|u{1JP$N#eX9NO zwK*9t|EC4uKJ zLruR>DTYrG!D;k(X!!MB!t!XeJO3+_6Sgx=k)s@o7W#!S5MF?=->d}$ZkoZVGgVAu zu}#i25y9^?3!Y0?siBeV{4Q#yO}jR_e(_VlbrT`ALOsi}N2{Gzb6EkXtL1i79c?wQ zAgY`$lp67sNb#x3Nh;!&eWnAJe-8J}3LnpyQ66`Y|^fkEVM?7>vz;axT+?aD<0%SF7%TF%MF55y# zPquS^qz8rPUyZinF|~KztXe#GP2@wyBY?fu(DR;EF;F#Ml(b^D{!fqRg=mfIu?(}_ zOyjF2@);P9l}?XN=%5)jPHk)ani^F#x=qK-<8b$`@M{dDQo&3dBvS@2mRY^prk^^x zLcS?Tr_deF&QBJE^ReEo7*K5hp6ewACze|;v36I$n*%lemE+q+K};zz;${N)*T=FyUc)-6 zqY%oDEs-{F0GAETEM~UP0k$QM&t#9zp0oPuto!~qeH7ry%zN~)X zB?AwU*opAe%?aD7N)@x+-1VD-Hh=xo(?q~i5>)}iN82ETkD2<%4~n0#Ke?uidat2# zU>s>zpj#yGP6;CvG7P1#1hpjPN;WUs@<= zpeBY^ra4sFrgxevc58mW|KOUukul3ZlqP zRUFstk2@B%XBm)g{~|VbLa17EB56uWua{x&c{mqG;EbM!Dg4NU+epNT>6YjSis~c& z380yvYVTsc0W`yo$``$Ir-0=&8<5MFY;un0vf<3`RSuV|7%X6^4Qf2KQd5(nO+|bQ zz9i%b70l#a75{iA9A&&lH0T9!se7xPcZN;Q<{adsu87Q@pD3CCaIv$ux$cII zqWzv@VFr*EBmk)On@#RSU|OFoV9gW8Qzcat3U2hbEUPxwjwODS^u!JKw6Gg?k;ok& z$C$fI?s|#j2n8s7q3!6~OP_B!uM*6I*l&aUnjF4A`-sP1Ud5+_HL%Cr&u%e34)ZjB zpm+js$na~zty|vZ1Ze&+(#YHdW**KziTcgMQc2fIy=&QOfsu_*2BVmOQ5)Q(KlrhdKbIDzvRW~ z%Ey;z^zY0D`o9icwBCJji|UGG*vQ^>qLK+IHRrAE0$ES8(w-sOpbjOxBv^maPR1q94%W z$AXZ-;9Ln{D~h}jDV01uRIj|Zjnrr~u*C3gQ|>n6ev__Il15YHK8J-wr`<=epW(P# z7@0zWbC)071l2r-H)L~QN$n5CTJ!c&2txyCI(rZl;fhOL)1cpMq}UPp#~iZvT|h!V zRbr|u0DGQHV*D3|qE&2|-)Glx`9yH;xT_i{pF%(vvqJrdW=^EWW}dp8Kh{YHm|=js z`7!4hk+9bVfX_6AATM(}`4W)XQSUAZ^0jF8btyRSYKp#Qj&!#Y1XiCABRlGz~tAn!1#GewJ+?b8S!R z0kW^GG#>sT`CxsVL>VX}kx9&r?b!K&Bih9PoJAS!JS%Ol*n-%M1Unof)3xr}AEb*o zaG(RlO%PjfC|XFd{hI(l6tdR1+W!yFgPH8pa)S|%nH$dnRNX`T(&o5dySHh6@us_GE( zw2#<7Is(60L9;TO)nSBH$36Wn)a)SU<{EW)q;(pAMiZYE|@Nt9h5!F=)BAxB(mf3B2y7B;jz@=BAIlsvX+n^PX*v}eEQ1P z`_se~G|xfgvHj#a4CA=YVSci8afOR=&@Fa(?iJA9)VvwMS=QqHKq3%uTj_0-DN)CR zf4H>7Q%+;|oi?JxtrIUC&3g@?xc_G|xBruc_-<_*uhW_kXBpP;$4Mmf3Mb6Ilpz7q z(I3X%CrA&XH!nDxNqF&CmJ>w|g11yx4;nN^MV_l`my3@5zaWzMb>fo+(}Mc|+3~_j zIm`a-sgGe8a@8ZwVLq8ZCF63p5fr|v6e@jmq;zL-3bYKA+C7>lo)Ht-5qEv<&TfycByxwL~H+C9elPUlXZSA@5_XGU*rb;4Y8`A)U4T4l#iFyN42Lxc1Y z7G&l&3cWFZBh)~U?fmG}kE-GKneA~eHxy{=(w34svkpF@4D&7DINOF&@)ln8^pyvm z9L~J2+ssb(%V&*E;&%gX-o3fqLKcSoyCi^NFE-Sx)h=Wr5u7+GYGk&OSzCjIAwz0z z{p!u{Gw*%b=d?SK5()#N%nq>B!tI2uBx*mN6d=}9y=t?Q&8{o%fr_R+P)O@PMSI`3 z)sc+=WSdy75jv9Fx~_QV6c8)xm;V}Qr_`HH1{(AUT&4Q{Dxz_7x_ktfo0x|$JBem zumv(v=b%%G?Gc7j2rV|w)Yu6cYq9G>35TnMxFpnb#t@>RMDR$y-Dx>)1f+xcetZCn z1XdV0?A*&a_G3UHdi~}#H=zFK8OxnEUQz&O!;tR`UL>0e0c%a;X&%XOOI6m3j24!f zC`MvuP7^2iTftHh^GI`ZFr=`{Rl)u5->~2li5xzUr|iT0UCq}C^1!#*o~996i+)wbJtfh-eV5BmTF=|u_EhL|pb)GL8)xX;=S8hJf8v$Z8d9Z~_(pA={_7D%~ z_pm-c!TXg$LG#*+$7>TAe};?;j(QTnSqYLx=SF>S;&gp=1SofY$C;P^8>ZrawQ>Fe zW!pV|1PnLHQrTmP^119gHI54k|2E;OK#ZzTEB~quU{-xG;XA72Nv=w#J0;}pqdRDf ze|K`1+zn`Kbeb{pnq$gkK1?e#=5v@MXx9)lIP(mBEVZkAiXL;+OJvb`@3nQ6Z2INM zd*z^|rRqpE8)4>64~16-nDRBs$Kf;2y0*>kws}YpH2^Fxn{G>lfDU(u6#P!r)g!Dz z!jPf%aK4-xy~cb*<>ixGYMv){H7lfc&!H4ORJ^+I7+x5)&y0Rkqki-<)P?rDYHFSE z;vbPP1iJP+>Ma`2(Z!vo2=fC;WE@|4N3{7YF3U|yD_X0NOB1psk=eT@K;mSvH7=OD z4F62?hSbPPa7+)PW3jq$J_o5Zej;GTDvWunrZdgc2-63(%y+K9W6N#6x(7i znJFI(#3=ed(p3=MS+DelY>8vhXhx%+B&xP(*Ez$#ak%7CxC7Fkh@mY)3VsuV0b{lI z*e-|irKYF*>oi3ku!sI!rTWRM4kvLF7FL$X37rE-`N6*$BOpq=h9*p=*dI|=SmFFO zF1z4RefTMMu2E@m_&5^?S&~0UC;3M=wVyQVcTm7UWK}O%1w~3MD&|A|1M%gH4&TjY z@Ow|E_>f-6axSlWOiO9AST{Qwv6xF4Xl^8%VTJ~kPzU9ay(6subIwp;bl%X#b;E-~ zgrNQ&&QwwpssJ_PURa=DEPv~)s;hHBtrgxLWOxI_$wzHn7}^iZC!9%4!-NKji_!k6 zmK9A(DRRpqxHcFlk)k2}odU(_Du=)f53Ty$(Ox~1;gHs`Ckt9g!_lfx(PS^>>sV^D z$TfT9i}mZ?+(=iFO`jmaVo6h>_nV`euGQP&1Nt~nU4$BFl^g$sXpNLu$>FUNSE6fJh-p~#t!@efuln>|e# zxj<86X}(OEf6@DI0hFcsJ|b^K@v~kbfgdbFqu2pNhs%kEVWyX)&94-@mgxC;i0Buj z`rAIUtnt6C$vFu^n5nwMS0w07Jdz^|6h_mnjJ4DpbrBwX~Rb|JOZ)!7cX)ZSmz{G@trhb`@~*pGMK&(b;gMSbMle6#l>Vy!K~f`hih7c29T^#9sH{e5J_ z-6mjU{r7yqp(B$WwsYjqgR3_h=nY9-Az(f*Sf$Pu9I*Ya}`gJ_DbESCF_ zJ-9GeNAg3JKAgh9euF8YG4U&mJfU_~%JiQ1f-$de?}x5^)rv~t|LZaFpZYj|rT953 z_}kr|l2mLF3iyuTolZ{QPgi3z01yw@LZ;rGG5t7K=IC@Hv4y@MIxhrwTry^el5LAS z=`wArS611&GLBr>yv+N7zS}?Y_ez{1zbT=E>iSuPoAv)X3`<-r5p3}3d7Nn5DG5lS z9-yHdDyEJg)+9UFul7D!P~H~T&Irsg{q_C79-~UM$F2o~iKm~;z?yBnnSE4$TT$rL z&D}d1gf&@+w;DHEJIm}Ghvk2tNlwH4_X}g}b8Likh`DKUf|59_rORLH7t3-VGX!sL zH{-{sQfqmom~=4xDVUh+Uz~_j`B1@V`N8@1jE3>|-eR*KZL0}t#4gK<{jL=_Gf+5I z04#^LdSaiJO{JUrH;ZN_&*MX<2cs7gTQg`M{0jo37#PxoOvrX_2G=PTyi((1qYC?y<5pZLPoEX5&#rB-+$lGZ|> zvRM#`pC|$T$F^}&$6_@yBEsXeELf&wq3bIFX@MHkH+1#QbbJ41!CLU@1N1384NaN; z4V8aSu@)1Att`>|^Ie$I0lc!d+0ToIoB-~N&;$45p{*2>)1NA-e#wtFr=T}xeZ5j4 ziM~Dtrww&(lW}L;>H5~cEPcJ_x=HFY-Clf$>lH)7yWaR5qoJyge@nb^A9jD!a{eBo zN7*Sv=dlTy@;iDOYZ!xW=g-9ubd&};f=&EtF_M9j-!o=UzmsCUopb%l>`s^IUXHD| z9C1bdu+px$z+|&j{0{LR`Ok{Y)~R6+g?2I>plBXxnF1OfwV3W2hS0bVq@;f%*ynZ` z=`Jpg`0wTX?V|af3hod?k%2;?x76!>P5O$fu05CIQv^SO%C!ne2KxJ_+Ay*hglLlI zSUyYr*7nYnsQz5?JU~&xK^vhS*~BXSCM7+fniX$Y7MQK4Sp)jzw`{ea1;0*Shzc&5 z{E9`d9&fLE#_{0eRPj`+xFxNw>CPfOFgW@(<5?8X?b5_6wzd2J*Dh-9u@X~y4&WK$ zd}P;e_ZJ=)DGqtexgCrI($IFtp8lMKKyT4d7Yh>o^#ZePuE0$v-EpK7Gbp;T#x^W} zG3Ibr)k^h1Wjs8xJxyrz30yG7K6!fwny!$%Qa|9UZJ7?{=%_9#7XM$%39yZCB!t?L z#5;rTRm49sO+Uu03^qI0d1d!bi<1pk*5fm&Ej4sC_$|wzVNOsLM~l8a$1;7YlHPg! z^i7-9P;MR(_<%4x(le8%Y&SOW6zJ<$quZRM*LWteezGnt9sB;4?flHg4tHaUJhTPu zV#*TxmeHbeM`oe%{lE6MXdoPKo2!ZmT;3>yds(pfD6kJ{k4U{hxTAlJ(@v`dJvGkq z%&fPPftoGGBD44tJvN%7lEl%26JQ`QOEsS>LL*M~R?|&=YV+^|Z?C;Q+>W`i7|gG_ zVR@_hD?xH*3ODkPCd;(EY#?b^PHGFG;xk8eHE0+}veQrVve@1;zuZ`qdIyWl(j#|1 z<~XE{V5g2W%quni8$3_pLGi#5U9N}EB*&NaA=E{Wp8~w@KXzW~36s36VIMAF78YPc zX_UO?3;DQ)v|MXIT@QfBAc(z(VjhFxJfe0kQ-VVhp%{W{vrAvo`ll*G6$b(LBB$Gv z{LO)w6=S)5iIqyV(|IZd&mD!r--ay`2iQ*v$zUe^=8QeCp;HFhzodc@2et|59i#ld z4iG@aJ{DBSzt+V%s+@B>38&X!%Bt+`1oJDsJstEKkV(O+9))ysER}%IZaC#C*Ld74(i5! zvR)5?GF%5Tz0T6s)g#x=rb;N4i|Z58-sc)(w1QIYy2Q!#yQNR`xN&x#pN~*8xy>lZ zrRB030=usL!(C@8H1u1(S`jqK+mrD+GtcLP9I~blrmd{cf}I#8jj=I~7jgE!l|V1} zOBQ8@Vh4cHS1QjJfX>A{WG%lk+|K##_B1Sao7(Jl=Zq;>_=&k+xwVE4N?5;7eWCBo z^Xm|&v{7vTYXAXFtOpWwsuE&*^SV>kUjq7)EEn{Qn;~m3YeyZRbXU^}$4Os68U3u|tdiE4s`|4j_S5MLMs95x@;1v))|~Ce{xQa2G$8* zJEhgwbq?63nFq@h@7F}IE4j3ha;6yqWFi>MtPBoc`I-QR|lXwk7 z-PKC){}xns2{C5CfI+mEr;Pnq&b+N(2p?3$!6mO&Di&wzbtMm{TdVHvuO?P! ztoE(-+V^Rhv>ULq1-*i8yB{!npAgilY_%A;Ss-&xtKzM0+uwO&t&&tSXPiI97u2WP zstR;BT$3HDk{OY$`+Gr&IfqIEu>?}%BaYi=SgyDn07=#Eh$;a!&;DM>+lDuTnRtrt z0}d9c03D0OV8%>SZfcDFQu_U`oOlG z@-0}f2Kl|u4yWcSL79kq!`K|(9AcY9y;1#Xe4oe&tL8S{*<5r0y73_)Bx21-4dR*- zqPGu1;W5A?AOV>q;iLfhX*GF7(5Mo=5IA+hqHQNuP=^J1yeA?Gl4psVzN!ARhC0)1 zMD11#ofVy#QLJ6nf5R&s7WN6UV9V5>{PJa%_IU+bpZ9(kE48b6Z7|uvEx&%|a$N3j z3G1yA{HzAeoq&r95JGZtW#A=#i8#EkMy?|jF6?ProPH_d=BKX9OGV9_9R>H}l3bg~ zk}(7z(B1g6%fud(s?gZsEv*=){5jzuTMG=>M z5BuxYnN!&4ki++Qa?`Zl?6eS3l9aIQ>vXE-OW3%<4W=t{W3;>2|{ThJ;dE1~5J zO@SFUJhyg##`|w+Isosh@_DMv(O}_B9VGHvC?VYqy>!XcXiQT!B4=`KG&ait0Z&5Dp z_mR2QL*?OcS6G(6OIl;K|77Ai+J*g}7C^To&%PXctp?6g^GIn}+X} z;R6OC3D78g$bLB=mPh~oZir?-?PNaLzFOZ=JLV?&g1`V(%1Wh$ZoO@TRN5=rtCp5N z^eE&{>N|JWhx(glkL*qu`oluQQ~v(D!*@4jTVy_UA|DRI+lc0)`-d@j*+)MjOMTkJtK?x3uV&OS z(LPz={`f3C&c%7v5zUuTX~bX5`Cq~#R=vj!EbARTUl!nJAYU==oWK3M=yUnCmD}k8 zO`z>dg{>XUOTZ(UmfcK9#O;wx928NA2Y4$KQ02HiTcSYN-NI#m&gyh#rqQ09%!_y{A#OA(h?PhYmWlT|Na=y~R6;K`^z8J$htrd|!uTNjbU zmF-q&(FI2zwPBHo13=H>`sbbW64hY_1$*Uu`CW$Vc?8NNs_0S8Q>k{U%grPjzcI<2 zmU+|Fv^uAX08|&7RTy*$WA2&7MNxL0U*0g&QuTZIjZ4eY_tqmGR9vMtJA%8N^Kgfb?=~ zn1lsrG-5Ckg>tvd1P}HkVAifN3UgJ{29nwIG4=TXA0FfksRLVB z`j2%=kGOf&Im(tcBUl4E)CkJ?T8(jCA1xE`(!9=}3Wf`P(@Ey47*m)TPhn8@ej65w zoM9q!ECv~Q-`X(LOaZr|+yWm?y!1N!S$V2IffcBDV-gY!Dzo~a`M-AyO06xmTDRhc zX-Xn?Bu2#FNd2-%=R3sMH1*~+_*V^}PxfkCEMxQO98_-jLO_nEya;6vt<><{oKlaB zgnWSB{+-68QR1}}l@a;b3ue|h#U3bYbL#ek!Sbodjx4};$EaN#X9DWeTy zyLbOU{^VN0BWu{=9?djwtI+r8IYd$>K+b}dUcLm7^lk^;#pUK0l07%gXB7mVvW07hq6r#m0m;Nhb7w>#a*(;Z-MCul=nLdL61mP)yNmz*99J9Eg!~U2;Ck zzQd!Bi?INr^<=eL>iWdQ_=~KN!qxUqeW%?$azMR;D$W|@#5kZTi%;Bj%Sv)#1fek1 z&|`#ny}nxW#Tb&C8*yS>lCES%@PcO#LfJccX?V$^{JY^5|1nP@+@!My+o~{EIQRw_OpU9Y0dMv&5qd5 zcUV`O7m6-L8I^TAurfxesn$%68!Ooa+mp)WK_kF7S&YbXGA>}zYve>6_)UuvQ(~v! ziz+N|I?vqGvOvoK_;*vj&)LKy)Ris56(ALJCH1b!)W7Mh!;X3?z(`f+H5qVL*j>>W zFNs#->2Lk;_Up86s+p0;aUXnIYi+WjH$QNwy@(H?-{GR& zCUhUkK!b9;%Niq(0lw;iL)a#;Q?#<8MK>mbR5?s|D`ZUl-LJIYG*Q3@fZX(Vt@&IE z2}!Z1OxMK*Zg`lOHHWb$bRCyc4ylMY>jR92_%+Z=M0)jXj+@Nj&DxL24o$;AwT$ercfrG*MxgNwjO1M|PcBj4W6ItSXx7Mqa0KTYUUIyMy zS70zv?;xx&_i9o9IUC?F(j>O7lbTw)d){xcbzKHQ!@-de z{Jg`h4hGb74a8i2y%IIJA(oJYQEQ*o@39s5Ax1u>u~_#wa2mFHaG=xtik3%f^gUO= zVcPS_0J3Tr1$2e07$=4Pd4oW50wdA>;0Lp>p9QmuMRy{2#P$R*nc;5CHvW{z?slxj z<(}F**a+UGe)yDFKyAQIL&cSq%6P2 z!^?p68ZBk2ywVBDTpw4(yQR83l$FmUEm1>>QN}+1vp3-@xbEJ8$G{7{X-)~6EhQWs zT9g02zGtp(^ol|&QM&#u?I{u6n{z;7`%jN?lWX+C$Hv3l++ix!0bCfpU9XTLJ5DeE zCJ&@KevLW>=lM?2%-Y#wW<6MQ#z-ul+;E(7H02*sD3&hc!Jk9dT-2ca=2IEk7kb@K z6O(9oS^j(b`=={2z4H~#nI~Ku*{CRc%-9Eo61B?(^FYL)$XHL0ulZ~xGXiazF|60n z?@iRygkHY_ynD4xK#kc`2vCxrt2ABbn0R6)XKX&5Q@p?O0;7=Y&(pggA}6pfGi>m%olhF zF8-Kv%8P@v9~@Sfz&~cEKwUQmHlOa_C@G z_#_?J()40JUTujl7Se=G`!O08Fbb^8od(WF&WMQAkd_=^cV)_k^|~85D)&k|muJx- z9u{S6PiRz;?HNqJS!8h8(4~(6|LONVDc@9 zKERKz0ZQV#ee0wmqJ-6al&6wHjf1j+K@p}kXJNjag7N0?Xc+X^pJ;Y%@}LR8+4#K6 z(Qj^@PBTh%zhabVycOB3!SgYo^W$tI`otVARQe+e<(=w48Bn2~7llZz8FSHOy=m+y z0IQW#u3;FL3yei7!p!lV;Q6{}zgH_gp8Xs-=kZeY#JE&bD0 z_gZ&p?osI(oxR@YNP&LGrP|>z5tGZ0!TxBGA$8#5xbx?j8_g^PZ+P5m?UuSyr19Ig z*6I4**NtXP&7^Z$3QqF`^$E~JSYzY{*Dp^ZHM+LugEc`>oC+Q~#$-#l=3L%ByWMgV zk=XI^@WN1~+ks#zw6j42uux?iy?`{afWScUu3?~sfzn&+c99;@fky&1P!PCRITZQZ zM!?ehBob^}$yAICqpMq4!3Zf0da*>6@fFw3OEFN4?#%=GA|o3j-2In9#RDB$ShN^u z2BGh3u*9z{xY5b_iAY8wYme_dU)Q~hO~U+ugZK?Ls|~cacOTit;>ihCj%O<>}yd zMs*pgaG+O6JLOeVX6l|tn-}IM1kYa#Q2xC^IanP&L3M+N<`9@TiK=a*-1u?fFUd{B*$Ov)UFLVoj;^iD?h@O zB3ck7i!*}TBhpD{zgE?OeB4tH6N(VI)Ou-;SZsO09l%vgY>^YP8^WR4CiM&oP$9ga zF(@`*wbJQjfIperYM=+ead8Y;{Voi?6^?)Ubi&Zg#oosQeB zY72=A-YLeVUwY8&HkwP( z^0JEaU^*gAhIQf+CxsMZa7Y?lb3c#i9}KrWOomA;0ateMr0~zIc2>oQzvz^;cs3 zz0I5&M}#baYoK**VwKY?;t{~@n^5%|WQ^`Oak0C0>L{r$1|6*c|YWO*e}RoQ$N#rhA%-1#6w`bLB9wN zzb^~gQTs{crf$AeSiT+vL2*CWt5PfjepT?`gzNcfpk?DptZFIvu6Dk+Ck{&);}Jwx zDF`v`$T`ptCD&G)HYvTrgXZf(_JPb;Vfxb{!ga>r19D-4{k#kbgWAi@2F<6={Y#S# zGUN+U@Ix#W`Fjx)M_7l*lyI^!6`eGMxqMCyED_!Q5*vhkEC3~Gkt7KwG0+G# z04vACD8lnd+gEJ4TFjJ5p;NHU;2gd(l3*_PVY6qxTyzarp%XT?7Iat2Bbogdoz1 z`8$;>MCErRg%uEh>8pUt9t(D`OU6rnMp*`t4+&J`#~ztRL( z!#q;TP?5v!k?7Egcprd@%5$HH-t9Vj@603evV8rVWX{2Wjw}7Gt4M|?wT*gI=f_IB zzU!8}iPstif;4OT+Cc9KZqYBH{9R>clA6Y3eAP}h5MBJRt<5f1OvSK&tNh=eLj_e4 zK80c&RQ_3{nc2V}=P*Q9#2<*qp>He|U$w$YKEMF-=Q zVE#K1h!47Ewvbw{I>yBiJG94?Nai72EH|PSy+By}F0y<$-u{~10s6}LZP{e>r_s^oCzD?Jv_BEn7 z5@ZJ|s$>HdIt(%+Y6J?pr4bNX{P~@9M5qVR5P78J6?zQ&UuZ&6#bf=qv$*uD_5|*! zq9W>PAl#bT^%R~IOcCRuTdF1hQfh>E6p-pVJ?{dsfKczbTK0zXbuJ zrRAGl+7sV0tl>cIkn3z&obO5|!*FwW*kL_Pw-DP6QX&j?yN!N4@PNZ{pStR2bm5@6 z(kqmcw;M1tr16&1L}=A#m?+c)=sqg}6D}dm+4i*v-sd~KUuZ6cB>oIf{+ZR4G99Gp z7oXIaONB&$IW*lBff!;>WzhGBafISmt;a*@UIbIdq}u4n_0UU5T+{brkQ%z|e4fRM zOBCTQPYTY0cq=~69j+;GzQduQ^m~2e#9;?N79AdEoZHSXGHpHKap0|0|4`>~Iq`b^ zCNs39zrLV)m}eD)%c=MY9p*JIqiBS+a;pqCBG-oM^!^)YZb+2!GbWQ{?BD>a&Gp#= z@$f)Zh1>HxNSaqSFP?B#ZefUn`LU_6o(tT;-G%QoBVe^BoFrWdjBFE__RXO};S<(I zB}f1n;$T0Mdc5^;oE-(?94*|3i`*AD7Lw*lbr|4sui*tnB2jE|OSI8<+iR1~;?XEs z&-v6M*%UsiujYa$F6MaSDJ?aT%SYhpPZOH0ArBBMlq3Hyy|51&gcq5bLo7MlsCvyN zchwfn$SZA-$YBO)R>nZYE<`Io4LG1{VOKFSF`!&!$1Qw@`)jHm2V?@OyQfXOQv!Xv zoJ6vU!??L6I$T3Z90!<9)23tp8p{>kn!a*wlyTb`cW2a+18qNSypuKVZLmP#;$2$X@G3h0p()rnR5)%i2 z;$W16E@8%iEI0>p%2D|eA45ONjdJcm_QVo7JKqKucoslw5oR3p6{6`J0kbbmk6(g8 z?IVuaZ!9`cCTEeX0SFi}Vm|Ctsv)iOEk7#$P;eeR-Gy=Tb$Ex+o-cn1Vdhu2{qJhf z>63x1rbyu)EA$(Pt9`u9o|#U^6X7eib1ru?z2Z*!QAD)dB}Y(Hl-iza^}t~GbXLlS zv0(qYEID5(y180O$$0IJ3yqIs3wBB@a62#%+azx9Vi!CaVbIkuS^Pv5zZ4LdlcpXg z)j_=P{+*)Vq>=YfsbR30Nq0x8?@z+B(pKIRNu)znyJMt(9tC!aZtB_oyH65OfDYs+ z*%Xn{s@uqH5vgzCDzrcfu(pg2UNFz}wpyG1^6+4j7`_RyVCxEM%FajGqFNV)Q`Q^1 z9gp7JeKG0NhXpRI?2ohN>b=x?r+6fD^o@>CnAR(ODGt`_1$4Bizu~bMv?OQ&RX3eD zC?Y_2y!-?QKZJH)eC(FeSwZs|Qc`wfJ`wW2A9E&2$fieqTbscsOc;^O@(L~9oy=y_ zz%j?zv4xXUn#=hgX6)7KH#5`QC&)Q-K`n02o%KN?JXYX5sW@(xEvARxDQW%c5%gK(gTSRlG#!D2{a8e89xOat`QLSR$8BhvOj-i+SonsAl0F@F_4OvnKE^8sRFoA8p#KI z6!G>zrKNPpiZm2q8WQ%4I$~qrcXZ6I{zt=d5B+80HtR^q4xL05z%cpJ=LDi@fM?01 z30{hLv!~|Z9$L|^L;S;e$*m1kT9l07z+KO~OA3b8UDT}Vn~|plr=3}#Md6Cfi1~_i zEuJxu9W`P1cqO5|+5t?!%b`OgVtv$9qRsjHffymy_bIt^r-v9ZL`gs^?E7h1UBp(& zX@hZKETYF3_8Vx(C~Q0NC1rr*Q%*&Y`-snyA(KKykE8KU%)Mk(x*TTQaPxa&vGBu(!m&`;QIJSIwce;KpHAAl z8pYM@tddTnC9Y3&ha|DYPk+!@rNWNpGN^{$lHj%Z&*{VE$B=4X<>&oHn24zj?#6Pj ztzS_}WTR`3x)!*48wotaCa`AfU<95z^3`vVl1GYUKwsnDA2lT#c47itI9wpgoc7W7QjKe>AkmY#Wt6()aPw0uUn z=qd(er$*B*-~|dbi2c$gdAX_25GJ@AHwT+hPL^^_$G~U2U2q^ZjaDW@H z8<}P7HB} zCn0QE`PT^!If8W$`IHw@FFGUjDec;<=f~6dvRl=dsPk&9I=!E8B{=z9VksHl>ub~I zEI6nx@Cx_ECAtS>v>)vf{d!g*Gq8b8ttqq@i zIGMPxui-Mf^E5pkWQV4@aQ;2nGETTJ5NXz)x1I{=aYV6<=B0M!#9|_yzOi8^0Vv}R zBzl^!kV&nuUM4kdu}u+L!~8BoZ9+|fj>%xmi_Kk=8UwvR_b(;ZfftGin|j8)K;?b# zx}I4%iB&e`paripJ0#utniLdI=IHFvL0E3h7Ou32<2`!{R{1JALx1jVg?_xnLR5ib zy@naxS^U?wD*yK$N#?ow#j4x$2eO3Lzm}XMreK;R@?Rbx|M1w>n!O{POlLA|tlTQm zxgGD&`}i57R&8%Knbap`~;PCuu)Wklfoha27ZA^PiuBnE%X zy>6XAX&8cNq<6aB`-|p>%oA{7W(5Vwtc$MU$WV`G7R3S0;LhIBbFHaqgT<|5lEQ#u z2UX)*FRRSQhz-%R+Hep>K>Gw1AR6(Ruz5_!q*?RJKt2y(*9XPGZl`^FyEVv3ey_6M&9#IA)8wm>G!*a3n2QcB`_B%| zah>KTc~{u}gCApocj1E23M23yjmVPg>Dl0!!6+aGurw1>b$7nmuka*y{1PE8pvKtu z_i0x3_jjSfN-I|jcT}O0C|Ui_WQfRt5^BuUdrxS`IzA1tcVrkyct;ZJu~c&sKs7v4 z=zEN?9v~^8!fAnA$BWTgW=Sfd1YiQhOFe{0flUlZwrPdMzfI9Ux5FLkMBiLqMuyYA zUI(fFY;~BIUaxEFl_)rxFnKlv^Lv+2onHA+fl#L#E5%RKqPlpa4C{YCe1!ut`80b= zDefd7SNd>b@Lj)NiQAKYch5Ldh*wbtSotSz*ENXKBUE82 zju#J<5d8Nh>^MR}&A``VQ-B)6LKNpw-L@;9EzUnNNra9*o^PsgF$fT}NRlDKAWMWq zE8@#YeAzl-Z@>?1`BW2Ac6a*8pZ~WSh{%cZQ1;5uGWJ5Dv>2f##3Ev1N^H_uC$yA~ zZmhuZhKfi*V#a<^YlN|&wuUpFE{XQBP5%qOYyc_xWkMQqcf?ZCE#gQ95DVzFqbE8wRJdW{wPB1G3UFC0k$4Z1pkqx{CgcjDBx;A z!m`6e($c)b)NZx6+77QmGJ~C~#_aj3?Az8-t^^X=HNS^XHt(SEeb0@W=40$kfm~`a z@yeA1qB3z%yy>KLsz&3Co%?4%UFO|?zH~xNC=+F(SXY7ZVvimUE~0^Qg}Q~>A_RT7SKi7xGV*3) zRu+mB2K^3^{(fvKE+!$2)8b0-5Dm6V;Z{X@IIS)eP)mhnf_^8rZx zeqLPZBbxt74!(gO1TpUE^Nvxzgz_8ZvPe)-LA5}l907BON((zTdzxx6(WQ!@TKE|Z zOLpMu_ya)szYXzL7Fg9txjpy6)45K9@A&Z31?(AlF+qNT+Uc-9oXGaEC#-VHHG>lE&V39x!I&NkLS=QbiYMl&qu% z_ijFU@!-yQABBpBLgHsw3%=#TtI~Hrzw7;xZz3KpaKU=8`EuzjDjiKdG$ZqYxK_+i z0)D4=>UXlK3T^hVGV|nryR`B6+CG`wS^mATK+dzggv^4d4)RlO!06%=lufLVnb+Te zr`b^5g883uZEpta1atMwXsD=+7X*$w20c9jXm~rpEszfS# zW1odApT@?hC-dkl1fQpLED7P#+VI7B2Q}!-(yDu?Mr+=kz%BI74Vm@$e}Wj{K2aox zGK?VJ4@tLIWdk#fYbo ztci!(jm3?%$3^DC@8|nHj_7h!R2=;_ItZkeS1@FZc9n@tOp0%#-5kAVs>13ILm}b? zt~LSvy9_h#CMtIT3obR72(>>!WR(~;e<#4Xo@+1=!A+b||5pKc;2tu|Ry?|4*z#x- zvq@GIDh5FB65xfg6-$m`3vO);hT1sa;Ejr{AY-1nv85>kQmqR-N`D zB7A24nXOtb9yCTJL~R%oHF8RpOn4}8+{e4-aA5{+)UvP%3FzeraQHRXt?XLpZMV?S zXz=Q1&2dU`-M%z7Wsy1w`j6_{IKv0F`8En1|5>U2o#K5ez`mA>Qrwm>3|<9yE_n=u zMnB1QU_o@9xB{wD=3O0!d)tau#P67GJDgvsoqDoWRxYaa_bF?bH9s>#i&^(BrjZxX zOB5M6e)e#S1{{af>eEeNE6h59dlM#Ioim0JMc-4H@%p|FRTtZP(PrVR{6pvo}yr_4A&SWCY+L3>>Vz{1hGwSSq5wBxbVyp=kgyQ({mn z<-YHvw3zr;0!&k8LjWU*2t!-Fd|zEymAR6Tx+s&!bT)%*cqPlEiJ9SmK3~ITq41Ci z7TuKJBv(n4FjV__D}Pfr%Y+fuFeR(7e&}81GKbVVI3rHS8JNl{?hnSdXswp*EcXi( zo7rPO?P+EWeqe;M?B}Vq;W+7UsL~L>ctgIvvhK^cBIXjRCjGZr-9QH&h^dQ$k;XcU z=Ii#XDB*1mAHBzoLSdI&Yh^x^Y+x$?)A$?fwv+!fQ+0JBB|IXk;6j@PdVO%%rM;44 z>pGI%I}848TXvbwX|{uL9ds?);F@^9K#NMH`7$0ytoQ)w(x&87b*rC6_C^!5p~f0` zKc%3GAMabklwiGhm8-_2Lg|fNcRxZ#ldi@8>3t)4Shd+8t~<9oK17#=70xts{g4G| zSV%Zu4)2=Jo$Pu$0L(VM^OovmN3`c&m@JE%kFAEVwLKzzmPS^Ciatm0KJ)mpwcn3@ zIi@8ZHqJyzI>m4;M!vP>Ztk<2t;1=&$p~W=PL#QlEM-ZFoAs@^FD%gx^zhovfL}iJ z)Lm7>{Sem~b}@MuJeLTa){MZ%(^rU~iS8YW@(9H~K5g(su>g3XwwwTcu5 zE-WA*V9Xt-(=H6nqS;dDF*qovitJJiB>CFL@r>qwb){$Dt9VMpO7HcZ2k)RQ@^6 zcDN`P;_Wd;9;@W0n56#{{X|TTJ5fW!^S!EaeG<}eGb^%bdHK=Svc0lA*ncJ4(X(<* zaN)3h>pDa%kY>g$yVL(%&E0Et50*Ea*tuNhJ?mB`pt>Xed_dW_a$*j@gyu%~*{{e+ zCCuq2V=!`xl8POjAU$+GRT(5a`?yrs0|E7PU|81WX`>h6S*ZJUf2;pm90oHF4m&_B z^Dl(==e8^wfM2ZZw{~>`LO-CwN5dmKhe~~*Fuy*>*7v90HZEeBSydR1*Dk1dQ0PM8 zaKzStzR{1q=Do#jKh@VfxFlLhj$>=R?dDdc0&enP;yi;hjRd?&LhYHUcVG9ds#zJS z=%mLpq<#5zWL>fx6B^BhU^S7lcICFn%63*B7@6-~szGn@SsV$e1;WkL7!2~M`6&-( z?i9+BnY7Ix{Ab1sP*LjGZo^@~Sizs2pY(M~_f1)i_%zbcgu$FBfsISNiB)s=g* z|2C{}pctSr)GBkfElP}T?Kvwht<}hvZXuf-vzdN)$7=+VYYKeQA7^VZ= zna5L9z-BJ#lK5!Gl*hKAbYysPnaZ}K$!gkM({{yzgtVoS3)s_W?Ye*$ZE!uqCz+9b zOtYA+kcw5k7};w*1(R{$L!K3UbzQRecq+kxw#2*8)7|;Q6B8^t=HG@%Vks~mBlkT@ z$fcRZQTiO<9kuz)k>QffxV4Xj2Rp86*XtiL-7W1OhZN5qPJTeC*Il~B7#^Zb*APjw zoY3`hf6qQ1n~R7=<~X_hi#nyKm4 zIw_T7rMRr-+=gO`%4A7&-2w#YcnOISU(V-tViLKk1We#-)%L9N!}ajw!$2cCmEv?3 z8fd`LF)HQB`%5I7w~{?gnqaN!9VnwukNsNJY#r0tA3v;Si}^SMk1NYc!9R8zFq|TW z6%Z(zh}lP%9jZ2Zblt0E49T8*juY|xxcGmch_c`B*_tiZcAj2Y+Wn*JQNzdm-jlG^ zfTnbSLf>pi15paf(isCGK|(>{$YZ%~ZtjTJ2CKW-T|$eEq*FG_2PPUVBKrg$Ny*_^ zO`x(>zw=pjhT&T2Cm$|w%DaIDQXS-;waaw%NYm%Wj$5XHYDtq(?@l}QX&lN`zIm@+ zE=ta8dBqer{9XaNLrGE+uf{Vr3`Qtd1lhl&OZIzCK+mqE_-!+@%AyBhnPOC;!mcsG zn%oBZxbAh&K2tWsr=powM9&uXjo+nnBiAJIW}E{}A%Sc4GT<#bihBFMCl(-T_|3lv z*4A3yXq%}y^P9Ak6#EDC`aro%#zRwQgzTO0=s?SrKj1GB0{J?Dr4}d^h5hK+2SL-r z&!g0azysUjOXU0@r3Ob0I?F?!0uuo%y`aloF`={QA4>?lOgPRA%Ok^Wg|KYjfmEb0 zSP6-Bg7hP} z0uCpq{-|*^B141HywMnAl+$%lNsGQZ=Ec{`lEpj)cvG22bX2&})4w0Fst;PX#>Svd zUnrlo<73Rq+V|zQ#?2=>>yg_P>(LlM?^Cbe4Y&$OPjL``eDuxn*k!lN{H<6zTpnqb zY#ulsFW2G?YSrb{797Jj;iQ0cJdO~NYx1+r=5AKz8nsG=O$+k^ zU0z!Z>^k^aN?L1f?IyPVg!37&;CMLkB!u4VK7_kc3JmnO+xyF07M@!GywBZ(L=Bl7 zI`Wqh@%=Na@bQ4T!L@QYGMIz4kuwe7 zzc17f3?}rLueL1NO}}@p6*qw{>yEH1vl)|`iD}qtsIh>U!!NyY!L9nxd_VX~wkAHf zu05i98~AljEG4W?_|DK6)9L9~0TU@EcIJ@!xIr!3ArPCZ{je6 ze;ZvaZ~`XKiNAFh)*D-&Z?;Pzm+;NlG?X@{yB4-_I$It{-+pvsWg)kO#@5OZk=lFD z$RToOnF9~81;{h66=q&ziB0ECJ7kA`Rc1;!)fO%dOZd1`4bpZPffdASpD3x3UpLJh z%joWtvX_4@?LORf@qzZeP;HVs{RxR4LZ5if{tWiLS7I7%Jr-H~IZ3*88X6VHzi$KT z_;=s;luW}m(^7M9{X8`LZ##=OT(p1j#zVjA@5E?1X)i{q>YeY1#I11PXDwz)cSt7N zHjkd#XI3`J(_0x87%*8?iep^f1vrK#e&8Kr)>R|gECjgNy5~?`h~RYB?PcRRmb@y) zyr!STGpyQ8h_~a`0F%ZFxM5T01JV=AiN>TQ`}|g00mnZlWAq?rt zefDTKI2jGQNpi-41Y#wi@ZYvj8X*;H$Nl2ikQy^7V{VSw1>Z`sMzur^n=WgG{A~l` zs?i!W6t<6?hax8cS(j~_G&bysCEXo^i*?KL7sP%RttP?{$s@6w{nzejuI0fSp_0+i zs&}I>e}0vGu7K2qNXYKmeHbJ*0kV8~ycwrz2wW1X7@=Y1eg|{-zy}rv4#S8U(}_>7gK{akNA+7oS<9% zoUMS&Zra_nZ*tQ)-9of~?125sX9!P|t#B~R!Dz3y@z}`FNP#@2qhVKMy(LnTfYQ(= zw8%-I$KvBevo*;G?egu!l6BZ5D!itj;Y7qS5ir`~rvN)tn|R*uu^a{66Upz23dWf0 z+ubZ%#aVd|S%YN7kKr77js%o(&?EN)*Hnglp?CxO6NERaj5r%u^U^eDEUEXo%pQ<+ zD#T=DXsc9aci58%vosK&<6oG|og;>?*C6>*!9jJC1K>@-J)$&w9`F+tGKB-< zX6u{aPuuED=KpO{fbJaPq`IlcqKC~ENnN79L<<^sprK6{96evC`s#*xi8`|P6KN;Wx9 zW8LC$37V}jEia~dZzPjQGb``mt(!>`V6XFvp$5Ko7eMoPVRL#P=uJ2>NdlUUy;!yF zF+M<@o}ZL*Pg_G%rdDa<7Jc-^JG+d$Y|V5S2}j3c3e>Y-hD}BFWrmhQ|C}#{v|b35 z>eHT+n?LH)bTl2}48P-2TIZ?Tc#M`nOUcK&Oe<(tnzyyfTzwbynQKUPI5&_$m4GYZ zYj+NZDCqHXZ~(;VOR4-)?D^WI1d=VDPMy?xudI`(O(;wQpTW z5P}bLiIVol1p8v0e#KdCRUxB;8Z~=uYqA0M*l@Vp82KMfvC-cWECv>1PbR^(XW!FR z!KJk-9bo}Sr(fYmxdJ{R1Ei$!Cq+=*3up2ZQ{%EsP=!KtSHP$>j>`?`1S-55aF8T= zFAS@fpC&XKWqF<546hbjlua&=-vOoKtglx$$ifpZ0ViHy;QN*8G_WWMh+Cez%|@&~ zw@dD5__*Q%<=$mlb0S0yI6zmeSRw=?D%TVKu+P8ujB^484>K7o-^mIDo6rA19%vK! zFu7q-G=N)emUDjGtzRcWaQz(501D}=#nlbG3GBx1l|MO~>`wU{6%95Bfg(O*AG?ys zR3V-M#s*V_5}c(9rNd*I1ja?htdHvFJ|iuQ(tHvkP`{T$PNW!QHDES_QhU$Q2K&iI zCDb?l>nVHGr&*47lZ1K5eG2!hti?J5c^bnOHXuSiU#E8kz@eG z=FPj-A^ONVJ??iZCnr!-8aR={Kz=qkv8!^Y3J2SDk+r0&2Tu6jNzw z9bPUEq;={6q}fAl5%S+0VOVZn%xPejUw$A}Zx8}a9nye&C`(Fhz3+6Xqd}LzA*AK+ zI*)nxSIy`vBbX zlSG85OYg-KOUALuRcmR@gdGdivGkfA}-sMC}ycuiaqhi{~ek0h5?XKT?o85AD8BT{>w}vWWh; zL?@qSEjxgOcM3g% z8RBVL!X7E{k0i>v(hxrR_=M+gD3%&4QjnS1MshJ;-|DFMV*W*Ce7_AR9(fSy|7AM9 zgyi)qAt0GLmVT+1-~EAZZOCyB$zxeBt;JF5wMkOK>78be)GHfb*+QP?OyDQx5|G zR$+F~?kIc9lGx#Mcg1nY8&BqNLO_90%^Qcz7|*BY8und3M8`i*&S@v(A9rYgqGPheeTE+@BfiZ=nnw*+bO7e0aIk(UvYri|q<@)Y0=MF>?GBcVl z>C_V8h2?&RI!z;1j9RiD&kK~@aWLo-x$RGj@~<_2ApvLcxaxlG4Mg2(L~k#D?b$qk zBN>atCr4(#!IktXet`wtLjbze9CIT045ts~A zXN|VUb~KM-&^Wwpy(ib{&Y1I7J8v#84G(+HGqYw?U-N+%)%yGDJP(72Y&YGG4}wDz z-bdw5B1`T2_dZN$dzNcK@gm|cZ{@0N7JoeeaNtgpKkcC!V#Z^DK1U_Yi2~qTu?wnS zw4{3c?$?`)EWNRW0#alsJmIf!9`Xg*?-9UXA&fl=4Fw|HSrV#>>V zc;982@7$0)ygcVCN(H7}))$TG8sE95l}EbLtg{h&6cVCa+7&dHb;xfDei4$ab7v$S zoJN85Rsy(M&g}9N9km~-LXxn2#SJjx(&w9XJz+8bQKI~Jdiw*Z4LEv+my~;KsiR%% znbpBJN?lozuza3yt^QO`%$l!Jf;Gk+%|Fi~%g>&Ank|i#*Pw(E)n$H;N$PUP%18j7 zcOm^D6&1E}-O^MNVL?4&b57hcYFN3;x5}||p+T9`1LuoUt&-MtW>?pS4-T@#CxxES zk+hSC`SN6JJ|%o;zD%|2BZmnMK+4;G?z`>QOpZ%vxw@mnG{6w`>GUoo-5pPY-N7Aq zFwqrKC=>@uv9W+kJ2wQ3-9FzJ{>uAyI6!;-ytOh;QhE1Vx^Sl=J3Qw7hV{|+nIF)q z$k`?-NWU1uj>BF$AIlHyA>c7EZx5C`ob7kF-o`Je_gWGK-orTTzwk^KLzPqm)P{;i z7!KtyAifWXuP1-Z9UL?)H97o2qk4x4Z|MDdr+dchLKY2is&(291Dbk-4}fBylPe?s z$UE5I2a5~8IO_C$@kX*t-%+^)Mw`_BsYH=~@1;LGMKrd@8I#rzj!?IP9ca;?)0T$Ou2h)@#&HRR8Zpt0n{jiz~mfk2QQ+YTRuLKQzoo%eTZgc z4z)yhRa=e9Vv~T28%4{0?Fl<4u$s+rk*RIjywszNA$Bhvi_eL#doGWm`*GyFU>jSN z01;1x2v6pZt;IqBIc=G%b6jbd9Qs}@NZD5t(?%cTj=8FA6LphJF=aAxBvh00U}TApc!u_>!5BmpQ=~!GMyqb zFA6fUt|r)x%8~JU^r)Wossq@W&%rgFw3g{)bJ2&IKF8AGRo;ECt5r{M^O&dTVIdD zYu#V873z;>$bmD2xBOWtOj|C<*&!oS7`?1{NbfE$IBUJO^o#r~owMnU$7RCzZi~HMk`-Vi z;o}_@uG@JPjNMR&241TYXdUifAZLOua6Wvlc*%gOATml^pc~<%6|ECWDNj-&X%)cI zDPeQ|B`n5t9&tjtt-<~qH#RD&sH{c}SblQ?Pm`U3uO)J9{7htYzM^IoX1!oLLsizr zpKJX#v7*v(2i)FMVyT~PuAJ6jB-Jer%|C67e_cnoW?FuhPM$7sFNo1$QBbyCyk~LL zYLRV>KK4K3@>qfDWs08cTcQp2|V z3)XzVn?|q3kM9$f{>1}lPe%gk@L)!XiJ?dOw{%SG3+`fMphSbiK{z!(}$-Y0AZ3bIM(}D*zLQoC1S(bISLpGl?%Skjb{}U>3K% zrdV)}0GD2FLYMm<_H6(mC>Buo(=;MZ!*h34v~FLjgKxnec+b@MVWvvqu%YIaMfCON2ZpiQd z82jp|D7!CQK~ykkqy>f;knW+orMr=C7`jtnXkh?}Ata>{P)b6&k(L}9=~6oG%lG;H z?z;b7*DMzcotgLSbI#t+es;OhbV9%P-FnnD@8yZ*0M1W5gnD+jZxuU~!w@$ltdGWn zSlI9PlLRk1CZ@z;<7>i_R%vIOl(bg9QW!1S2#ZRtC&tk99_KDIHfwdt;t{3^-m~7anoZN38S8RzDtjrbU3lsftb$fq z0c%}l4J!Ga?-uT)cV6;fqhV7z464&0{m}N@hk5(yv@5x2np|~-f5eYkXh*SM0|x+V znDu!p#y#yj92a02eC~)?gkJ&A7u3fN`qgTz$tp?#=yk?);Az$;d>FW1`(Oo8$qK0u zgbXdTe+4{|w(2K*>m|30IcWZNIv`X^Eg5?7qH*hil^{#UeY+2>?&_%cgCV(a9{WWq z^!u**qNAyqmc#n=EAozpH^mQS5#%^HQvOfku^r4b3wX$W8eae5ofGH#8Kl=dNwj7T z`9iq1wmMO&pbw>>bBc&beL%oyP>-{zp-&Qc=c@F{1gQ5B{j$72vUh8GH~o9-8?h|O zYT2*#^SCQfOXc)i_7@Q(bp~l*N%pdSMDvpR<#tABt@*86RJcomnbKkm!d&oYf$wSx zdF>5Q(`R?A!@lU~_RD_3ZfRfGrdC5KTt-@K`7%sJ3Uhxhw4V|qQEM>jcEy*C52M~oai*708_LhFcmoJS3>4}C&f)jB9lk#L}z7BP6K6=oO zkACLhu5%#I;8eBHUT9GDlJpn^joqVVR1CNXzHu06jozKx_O8#{Zjze)h?p=YbvbUq zl5>6dH?j?=@Rz8w8dbg)Kl>z9Q<)oLs_;6cY?sY$-0a=-v|@*7=hZ6F6*^yfbiY3M zTH*lEq+XBiTLDKozEO&GW?J7p5{!+*rR>axty+t;0lD0`BaAW}A0 zOOVWDgpmF=r%#i;Y{oqA+Y`tJ7tXw*O1xUbt2n5n$}-Dl*^?3{F*G5?c0zI9$+{Fa zRVwKU414YV2`Iimius-|LEU*w~b? zepx9c%a%tjiIrCC<_nhPlDmV!LXGI+>3ztC=#(F`igPx9a^v3{PRCn{sWuYqPt8p^ zS7SVJ!BN4gzB)YsirgAF>7tw;lm%Q9U<9O+?ayqHcaHf<(Gv>%CKFXodd-jsG&~H^ zNmwl$&JFxnjSz-%!Fp=NrM%aED(8;)y4f;TBPwUABDKW(_#Ce3U%zLq%i)JSG#2*@ z8JxczAAwo%#qrlUNpTx{%W1@ariiZ|$=iLz366XR85a}0bGz222N&_-F=)BsM#^bc z;8}?t%vK~zS%ZlgW4OCH7=yJdkmB$hx4~GiQbC?pVYgJe+!!SX{ZU3%A+WS6sz+Gb zx3e<^utxqHrGSvla?5pJ7<5m;Enr0Mt7ws`OJb zsm)k-eW*?29-fM9p>lHWSJW&oG1r%MD%h@11X1ypGN{6Y&lVkEi&CpY1&@Ox?h}S7 zK<67%b)ljlf7JCgGGO?03|WZ%0$=7z_|Y>F#`%iz5S{r*b8vRx5rL*e_@)6R6je;T zTg>6?ilWH6{Yi~j?>EuRCC5yb%%O5pJan+^%1D#to(Ik1_S8khjkPVCNt&i314N`w0GBci+xR_}0w0D;q8ZnAC|;=_#`;>Jrizm3 zscws9D9i@(9EzpT=0Nrwhx}@tus;Kgl`Lxd(lY`Wg5;9;?DA~Pq8PCYlxb(JJ&q+( z?g`*mqK^0-X6M86Q3rupuzFB*8$x>VW;E_cl}gF!r9Nof)2#E|U>s4#Aa!pl{FZVq zf93t=(o;=astt3q%~Ux3LG(l8=v4R-6eFaTl+CnE_V8G5@-?b)9P}6iB5y;U^~!>cK{6+$$U`+Wp_oGL0+EGrHvFg|OkiI8qYv7enU?=5k&D{Fl!o`N^CeTNf$~cGR z_H0%2HT`g5(`3t$;l1~J-za4VooDO+a9-j@$#kc{2g!g?E4%JOm7}5$vE-|!m@Ki7 zT*!Mk!GI8k=dY6q|27qx69#byY}YZYdS~2B1|n^Qb;M4We1H4~OLsm0b2Tp-i$}sC zI@(XpP^`|cR#|C~#5^yzhfiTnKrg2Z9d@AHgK255K7}QqeGvF#Qy(0jmX=0LVi_Ku zMoeny2%oNfYiVgYoY4@6ANm|>-cvnt6L6rwyk~Td{8At!z}Yj=lZ1Q}o47kxn3hUa zfK6S@=F)7Gln$p(pi;D*s4yChIZ>IPq>yJG0D1tyGR}PAQ$o7$HrgK@=Y&rbYPc4y zCWZ*x4V#)}O_Yi49bBxhe?|!ka)6HENsu`?e_mwMal~v979C|hXL&Xkc(Yny=654; z$}Bav-iCA(|He*LnFR{{M!si&m`I1uHT&?yQ={Ng{_{H5MPr&FGH&&S@Sc1r^bEXe zTxOz{7_x7X*4UoLB>=gNE|Y>9;2!#-(JNgx3oXakWB+Jf-c~D5fb&&b;(S=E z(lYE;OUTn&KkiQUot%wT3v~Vf(;114j4Z@%C0srlD^LJkN{UT;FiY)5?IgY2FS9A{ zT~r%50Q$U}mLztfzdfT6Z1cw9W|WQkI`#fJj$kVBSth#3pLEa8KN-9#Kp~)jzv9-^ zK38&`#$+R%OZAa<2QIlBHTW*Ex9<;KxxSfxins5IYm222NrkA!a>bFw8=PO= zzyx4IR^wQIeJy>FnOCxF=EOMQCiyjUzF0&O-W?uzUlo*UktPaIeo0x++YC`3519wf=iL^PJv2pr-IgI;g6*KWQIM? zIUQ_tFhFj{sKzVByX0IAt>x{CyuTXS)EcN$3UB|6X5C*29zTXHh8qsDQ(Yg-@wF8aWLNu9?hywDIq}2Hx}#{k&N3LHMGsR zzx{25^W-3>b>hm8)~rZA0l!jeLNIXaDM#dmV%;fo>|vpJ*3+16lUutMgHJk*5K~y` z757`7H@S+&3bXD{*(ue`3K);|l-P}Jr*jkdwADvsuGwb>8QqV~afL{qe_%0|=wFMB zH7RS~97_%)`OfMoCdqUS$|z>l25dWpo#a`6zoxr%i5#?w-!%-WKf-1g zheSqw3p>OC<+x@Pq7yT{!||DvrB`kDf>H3sMLrM;T&})o9=D@3yX3kSkV17tHz)gD zzL9@BB#>*f5K!W;Y+rB#cI=e1;-#SysM^TCVW^S$d$JE5P>~>}Op(Li$r0Ht%^Gns zsKT;AEBRRM7SkRylz|I%2eYiIc0iilX|w9!AcfyRbK&)zw3-SWtYA-;sp)r#Y>j~4 zbUO(8T@&H7hf5gX_)fG*hOtZ!m!+fuSe!`Ubho3Rl=qFKX=l~j1X^{U$d>~VD%;qO zkz)y?GB@l5J74W3T9E?Oo~LCizsNh^tp_GqH@~_GrwrUTpx52i&mS~pkKyyaWP1E( z%X0MEdHwRmHA8~;^g6O?xn15(-zAAr^EnkQ>O=~pL2Ihj(Dv~y)nn$Abn6kxcY9M# zpK!*+A$yRHthU8av|H-2DTRVvDTQZV3hqHJ@wYv9=J zum&gCH4@_uT``}Hq#@3QhRF*;s zzr*X)6*izv%Pc~#7HQ_ym-{F-qsgdlom2dvtibffM{^)o;MfS&e;qAtTgKw2d*K6q4N=LP&gZZZTUQo%)-mB;t zWjp$G^1!Z{ZZ-Yr2vU|m)wT)#ew4Hwrmg@@Iu8$dEuo~ zYgu#QSE*}+sxSBniz27Z`7_PZBUF5F^MlBUa~#x^_de6(&_6o;$9WfLJ6Mi+o6Wdl zt8UZQ>`Ck*kfsC#n?H}o-i`izr3|IZ8jQt>!(p(G!$EmwX&3hs!n1Z36F%Np=kA^^_E)a1LRdQ^!>8XtF+K$^7tG8I#(zleOqLl8HxfE- z*VHd8RsL!b>T{b_u-d}khoL&2c3sd}TXs+H^rxpE{qaAm;N|r-^Z!1ReCU3g!p}9# z#@!NdmJgd9gpa)%Y zfmM+PAqTi#3<(xz`y%4X#XG6g`qErZDm|$MhnzjPS4(Rp3$nJ{Q<932&n-@LQ7@(B zsX4jJ_3d&?AZgd7kgXc(8qWwYC82URO5)Vbm#r^rXJd_lEbE@8u#-Jkaa$#i_v!HK zrSpc2_EEWiwoG>E35HJA$-&ho`t9W=y|7#$iF0m)Z9|QfWEPnoc#$y}*c6;BB~(l$ zK<|V)Q5GExpskHto!y4&)V+P?xY!aQEHs<(+gOp-s)iHgZF$8M5Q`C84d;$!f9ABd zH~7=tv)y1}vTk=iRElxR1eQc8OUHn;r&;0)X6=Pkg+_P8ohJ6mMLC5>F4kMGe{_!Wzy=6bIAwRxO1qf-wn^wgXm>^xg{`8 z4&(yf=Bm>}n}azl(^+3=l4Wqt9q4EV>L7K#Iw)EkEY(%X{+_2&D2a2O&P&KXM^O{~ zD*1x6_?eIFcJlu0)d$J4DN+04(SB*vEL@Yd{34hE(Ms>AlH)>(*E411O_aYIDT*2p z6BH07;(v=wQo*r6_bv+5RDT z8G>dXw#NAeDRVmFPA)ga{~Kq}aYD}O5v!q7@Q88{HH0Jh&BM7s-yllD5!ER~tz!|+ zEv?I#cOGP`S6g6H>7P?1imHTW$$X(6qd`&HRhaLhA0zXJJo&Xq2HL?dRqBs#zf?c3 zU6PVOeTAI^=^F_BE5rL;w$}5M*;>_KIsTkwc1}A_tjQY=aof55ZWmVse}M5Gc(o~m z4!NyOJ5K!}(EZ$EhT_76;277nI=aeqGK_MuG#(4JBqqe!jwh|9(sh33YE?igaB4-q z@gYe5dH(|p2r9}=RHhgRoLQF#QmXc+UeZH6pA2}?rEf&}-!;fV8;)i?D^V24v^)qS znY%PB_KOsk&4RRqR-}&8p=8qn^0K<9_WWxC(Xr7uFma({Gap=|;4F0u4nuw9C@YOi zpugw0O#~MbDAm~)3sMTW**B{}ot%Ck$K5@e)BlwADM%_8&1jV4hMJ=y5>1ds6q-ng z{`XPew_)Kq8@Z~rMpJC@>D`Kqry$w_aO(C!8^#LH7!u(2|J!X$*|to*a|!UU$5GB# z{Zbz#WLR0xcxfAoXNLMIEuh%^F}NbA{({j!1<{2@?(#X^H5xl5{yB%`6OSaK709?P zgVN<3@-a)yKRZGhgiFLP2AKSE{nAU6B(x8Ts7U3Ym9;a@RpS4BZv{FLbWGPwuTDYK z@i~-;uMbJ}A1u(mN3o@QYo>l_zz!Hc9$x5sOa47GNOTp% z+N3DzA{Y?v)vg!a|Ae3oJU@fd@rU5QCk2%QB?^w>eb#VF0c^yv492k^FIrZhSG>NH zF_mV$Y`(-lABimrd}2O?Sd_(`1^)M~_P|&7Jqtzq2CO9iZ06{VCweV^BbLAcQhe{6 z;fIg~oeSt6DWN|A9$yXd{J{sl53+xsaR2_z(f`kzCvH5SUKWY2jy(xj_-l-OK^b`xfB|lL^^p`Tku0) z8{sRgSM4X8tnX6s_@)*4Ufy!EhCF$;vC(`N_o`<=yE1`MMhop3$wb#x(MVQLtiGoT zTcD%gOBSHrYg>;p#lk{_EDkkg00IK>K1B@9@z@FR$>pNSX3?-$7_=^18i(|eQNMc@ zkuQe}a_NL=S-x_(jfkiCY;`A=FPBEfZYFs$#<$p#>IK4hAGCm>g7{6uu+X>{(Dzy5 zKS`_KAHU+gC3CWvpvq`?-Qu_QZ+4r|(=NOx5?xz9$!%#Lln;+Po=|@-jbUN=Xv0~b83jAMolGtepu*aIQ~*tIzO}klv*~sI>obmn z1e&LOvvL*_#?!UFNPr7lU1&Pn_#)tW(1%#02AmD8(JGB25WRLaeM+U#0h{)Zp#O3K zERB=WzV7OBepE}t1#R<3cE9>k3gpmA^&r_ld1~h?!*NrD5!tS*Qx1Xw*Z4N-J6FNY zK61wpK=#J>ro^llP&mt=HSGT#U0{o@BM4EM)F{1RD4XF~{wZ%A8;$DIQ$C7CEV&{iP zcv1Mh(L}gmXW}4SR-+bM5PH*ByI{y?vswHB``C}D3z6zpRW@@N)z8h#We#AGBkJ}X=Q4ckZd~iOfaKh(uog^vJ(oPucuD($$ZvVOc z(R*qIy79WGQob51C?0X~s{8f~#kA96_pV$X4qdGR)aCcPEs&Dq_t^qh91e5z>l37^ zUlP5P!xh$bDD#|Wo)o+hMj+JqmPEDlM=E=se{A@*eVl%kTLG-#>_>%^#5;7H_HQYa zj%1zjgioHOl)KchRwBQBKf){e>@aP_Rbl*WU4fv6fnEUFMq4^%3V2GX5Zm`NGWrgq zQQVN<2ddAv$6tJu2wQWa0&W8Z2-twtAwa`MuxKfT{ndMZ_t=dhsGsHGa};Aju)lu;B3~tfBTqT^uuj!r} zt{c}Kq0l8(S3~p%cBg(F z-jyQXL8EV8VOp8KAg@D8A_2Fcw;M*|y%VRP-*hm-Y1jxA%?qR!selc?$ALi5BhKy& zm?jT!{8|CMncw3p1|2&vC`DlWH6iC{fWZ(JOZI<^kJ`ebgvsZNc2b%BjNb3z8oyyQ zJmqOr3Pk2$D(iO%q`_v6ESY6`$BuWy+LAE z!qs;Z?e<2kJZdxw_G9PVH&-`TSPvO5)9Fw^Z62O8VS0;-N|h?uh03;2G;+65SLo}rXNIhdN7Ld*|f_U zXPuB!kLw-YIEh1%)VJ+k@1Yo6X8iSCKKuz* zxbDdZ)6srzdS;x8x`X*H>SFzi?LUm>BM2OKJFOJEj?+|+i!c)~%|B4#w^tr9AF*fv zPW_vIj7abUg`gGlpamb6c}%o`D2Ppjcg-`AH*MQf%vA7{>i07snU3@4-mOi;`6m{7 z?>c_lr~$LoHXk!44v3xy7Z$w8L5I7JNwxk531$KP@$J9?0ZYLS~QI7`jMJQsGiqUGM>&-Y}xs4nY#91_6ZKH zUbR<3NO$th&q#GI>Ov9VPIHMLKQGqgH?~c&#AB~EX_-f-{lbey#vfBIaGGNrAJ?17 zDG>75o#W^&z~a^PEY&3Buamw=TkCc9bw=V+8XARJWlZ*AexPVljsQ0G6ebxNvoRp%u=ap3NcTu`tc?_POOr}fi{G@ zh|l1tKnIBF8=Gv~6(BgDZ1NGeh#)D%1lRs|8`m6_-}N4EEpRZclu1G*G3I$Y zp~63}uqd3Bw}}^9Jh39;GuzVL_1}vqE4f))2#Alw>esmyGJK~H@$al(zQN(HmQ)XL zm`aB$c42SYaTQoS`?Gz2u53xZq(5qa?z|FtvBTrU?YJPEHnle|Ww^s99Z#Z}t7$fe zADrnr`{j)Fn%#nqE}lFtb+jvNA=FXbo_04~-$(CUwor#0ZjH#LG_U0p`YUp$_>+k7|k9l-NdATuL|fG$=reDh9hMKJTPrIBr81-Oi@4<4beP%gkbBX{UQJ@gZD+0kNxns#x;{n+7k z=-?^lP2<(}D1Jn&uq_o%@Oc|=*r47-t(Gx`B@XBpOhRZiJtTyp(N*5XI0f^T2d@pL z&rERF!r+>WDPk@2d<#UQk~mL(0gi%6C`2{zzz;PFa?u)yW%i@@C>7#@z0z;Q$IDmk zUaXwmL^PmP-nArbn82kf&i*|zXh03G*6b{xH=Q;Qx~e61Tn+h9dak24KkJ^&ypm6{ zfzNu)C>}*Mtja!=#&0J)%j5J8SIv_nwNx(?f0gU2wzd&Yt!N~tDIUArWwv>4i z2hJIQ`*@4LOEw{Z_L2#ht1=GCSIF!KZrEv&G;d^Anr~0m%}=2J={^ia>8JDC7-;{8 z5o71GTYtIb*oc(aZ8?YbUp!%K14OR*6&4}q(DI)7_eonG3ssS zmP)`n2m@zq(W-~jKdtTFt#X7~-kxv4hZ5|xSXfug49wbcH`scVv$`{V&yLo%)Ze`z z??#6;xLQd4rdM4c$YnC)s?hETyNJcCx4s}*JztPqdsakkR%x~5=H2R>^VsW3SS|Wd zsQZx6W`DNlV@FR+>RgV8W^or$mmy2(lq1KAW5?{|!tVnd&JmV`3lLGsxc93_7fq$C zFT74j({l83?aGDszjwKS9?gYly|JTY@@3-7I>&NFJ|7hd zyrUAEyXE!>sQ~DiQUX(Ck)KcoArg#&`+#2Q`1QX*L*Q@pUf`@fUR&6l4FtG@J__32 z6TmuL3!*6QXhFxcet*i&76i+pVu}NLGt?fxNbWCJL9;}CZ*9@FJV@?okmr=7Ll%jm zq5vhqL3toNy|0h9ORH4!QS%lznayFIEnhKnOn=;Iwt~@Rvp1Z7lF@s|Zxh_?b3+m~ zpwZE(8v343qLE^Mq>lfG3(YLR80@V|OxZ1pZC`B2Oh#Gw`!Qku%OC^9V=`dKS{peL zAE@sy40=;1U*ZFdYity5abT_|ffg%by>Nd6%Y6_IETruIF7Cx1@FKKy*L)K#B?qFQO(gI=rt zfvpD1=BgR=!GBPr^nUS^nakhEf`O-z`AItrfaIkJ^j(2NEX-S1qVkMMPj%0e2>y*e zz(>6`Lby#Zzhd(%gF|KtGUHi;SMuYjHy-@!zG8|3vVCZeTlDK6n^dFU5t;BW85y*S zUFErE9y&W&<$HjvzdQUW$-E)@!GpOBi}3Y?J*@kWox*D<%0uhD?sA<*7v`742$O)R z-g`K*0)%AiDW!Fb)eA+(FS*x|YmPyBW4##@rH{C=eXrLTs;&BhPMJxlO*_$@Gc5yt z9jdx*aihm>PqiIS?i?Pif;CDtUVbUjsyc0U-!yuYF8su4v036)uyqMQG4zIIRYSF# z#WkC&Ku2ZRE9Tdy6oT)5nl5S;_}SpI9+z$@BSqXdP;;eXW10i+urOA-yC^!YqWN^u z-=uM40p0dGec3Gs(wOlYYaQM`pPOiNxSr76xTX|CH9IKO>}J{@Y|SeLT=-N&pa&`CS`>;T6XJcy zr)loxMs>u1ThW!M3m+VLt(3*@ivo;+l@9TOvN@cBfVdhy`GwX4riC&HTqibr@^*8Jb>FjUO#$f1|OUu+6 zpJvcoHwI!|3Zl{*01%2*p21U8C-JioJi?o6e{joo#4_mRS=98nk+;mbf6Q5vO@K@8Ckyljy`>ASaPgZzO96OREd zsbgo_N#&!5nfaR!j{yhbU4_~AG0jwNC_Vsi!}GvOdEQr!Dyk5>?%z5M9uGm`@j<}+ zS#QWoz^%WVT>0;){-*}E!1quCKG!by-%&Ngsj7p_+?^>+eQE(5R>~?^RELz;_yz$~ zjr!q}({0;05&u&`lCRtE{tRr?7n9oHP&CP#TQ;b54IaIoEuZ`LLcvPeLmOex>c^5R z8D*aPG+Ogha5!jNvs44q_mo1kQjGeu_oVncOafehzDPc^%he5?-H(E2)E#uEOK~c? ztpW_5YOb0?q@KOwROQMVFj=R{eF${YQc(OLgI4|VcqLD4$?lXD+o_{bSdLgooBNO) z0d>=Fl3!evKRyofxo6Z7uI7|kt5wp`y~&6LoEI3*sPoC<2Ug?3*4G<8JTIxiX|NGHZ?@?oBkAGt?OO(K3`Fo|I=>ds=<`ZdPM&SQNqL7y; zzR_?1N)!zl1?7R2o3O#83bn=z#jBqbsh?@$yJbx|C?IO{*#y8llu)l zGDB_iA;hR5v^qCmU7P8|f5n`=m`}Gx-kVw}ODIrv`wgZuR4l1X;|X*6#FxUa`dxit zoY;)gZoDh`9ON<6%FYkzM>2ha!qpWAg_2r0%10g_;( zn>Jzx(>1=$Hyk(-0=3b)nZ{Algr3QQv zQ_ZSx9BvIb=2EAB%I|!oRpPOoB%Z0xC(NTLs@=bNax3;>F|^8RT>hT1hQW>VPt6Z1 z_J*#W++JWA((cVAI#yIp+AX30)m-d03${6~@-e;|_#O*_qzYYqxnako?8&izQ6lPV z0N9|oZ#Fpk0>;k~rf-#`}4!qK@_gM*# zD>>azvx|H(Yp=Y%))DyyOWks~=f~|r0*|J=ug8In@wS?PPrDntLodq)MwgJ#tuJQ1 z+aD6k5q;A)FE8B=4uy)2bpYEJDBs50)h9-M#}>}g+*sRnKV3~kCI)*k4B~w`FJGeOEp~2 zz=^}_!4yOGbHL#3B`qyvstsIDQ=z79drfB#QajObY_YED{8^}@QOA%hF3Ol z&JSu<)Aq5wTE7ViBL(9`M-xF_TLI|!RE37RHc=T-@L~1sZvw!~H8A6ZEt(=$neI#r z4h4m<9~<`=kHcJ8<0PX*rj>tb@!am);&o#PM`YDxv2N3dmkr7si`!?Op3=A`x{U53e?UXZxD6Ax{$pvb&qg^LqR>y-4AoKms zE35-W8XIl)(MU?5ya5Q^BR>9O8&yj-PpJ+Az#PsO2c};7r@L=PdtXT zSQbNs_`h*J+{3_o#CXX=Je`nm`pVd_a#BUQYZ%DjYus9FPES0|dUEx@oV!Eadkc`l z>U)N~hZqMpHw>jUQE(Rtx{}m1i65xUyD1xr(6JMTN!hefTyi*{89zlaiZt*Wp(Pv| z>>mJbcAJGNX7AHDE_OxZ14b|Mvtqw*nNh3mSS^@Zehmn7Je*~HJgx9=ywT(yXtU`L zgAP*@vSoHgK z!{|3vL%`sUZ5VvfvUmhFF4rtTb)0WB8-9ih!H$ykG3emu#+IK~MmGD#8=A(jz1_Bt zc{NYk2d7I(w~8P2T|$>k()fO!xY2Aj65wjEXk4AaZ=B*L2n^mF36x=%P%Ao^4-A@M zNCfF`JlFZ+h*zS?WVapGFfHtV`3{)Jo9OQq>mjU^5R>e(U{DypRTE$If z<{8k*`IENv#(%j)LT2(L7D;kNS_tNUD&$XT_`-nqc7kv(~=oWlV@e{8&5!^+M z0~R>_<%BkAE{7YfV9R5Wav6Jd=ZPLhvv6=0SAL}1Ia))=@=+o_0OS%bd27ITc-{4E z_hN@q{2h8E;J=Xsk2M#fuBON8$GGlvCWI;)%Ifd+e<4j}bKn6Y452Lx0_hTi4WwU9 zdKO!M)agH=NzB-e{!nT9Gc^*lDBQ`)S3_mJ)OIdoTLFX(A6&R>vaRF~d)L9>LQ-C_ zOh69dSb;cGafZk5bB3A?TQ#e4+Y64x1(z%tAdBb$8kr`-?LjR92+#iYlpfyuf)6q} z87_72Loh8Fx(|)BkS8^w5Ox7#gi2NBr02_KVMbjq!0);mmDbh^pwa?v#t& zpFkh}PTiB|I3)i`<14;!z_s-o((u?cn(OgKZ1Cjr*V_mYv_ljy={U!r*c3^tlu|mXB3XxPO+6Ynt3hT^ zyQ0kW{?}lE8Z{FTq;@>j?2;3zln+z0o~&>2Hr|OTjy?|`5@7Q^CX&Ozvfy;7*3bS9 z1AY&fXwO_u^T?vB&q52Lbxg(=`dv5+``UGZQGWlIGSBr+kYw1-DU~oLz zk}n=#qUg29Y_Mui*AZO~ z#m#0|o917si9P_bKsbUN1wS|t-k&X7y|bQ&Cx;iyv`tJwndf4tU&k61!#yd{;?mK+ zN4Z}t*E$lQoH^;58D8H+V5xr9d#G+)?YDJ+2mZ2Qexo<3QYmdQj(igNe4aibI#BlM zxkjYpn{@h*q`fJ6$-^VQHyh0NarZ|G+RIpxg)!ueg^JK>UnG@U9Xs8npJKt($&}-4 zh}6Io48lkBy?|#Gh;kORthx{4B#e&@8`jeoFFusKE?sieI|Iz^eT3S6zL69zjm2TJ z{Ey63B~urw+@J6`EQDnl&Zak}%?=wk#@N&lrru^2xhrQh^;R*x{=)lZ0q&CqR55k| zB-4>^5l=f!IK?*;K3V_=-hMuOVIVd4uRgp+2aZ~R zIW^=(1kKXUO1^~JN`7l}kZ(9d%ur4N^S@kx#5L(E4jH^0PE9Zbf=kcVIQ%pPLag@2%W}jx$_%Vz zc6pA|?Ivx7tX!iX$mh8JI3qO>WUQ7#9}Dh+o14z) z71jDxSCtP7H_U%qtA3UMfj^Y#tQPe4VT^C)cuYotunBo@8ej-~b{>QJDx^E}kqIDy zp8Fh`)n*@o!uQb+#lIq#AsGW{01x%AKJt@C@ zfu7iqXxLp{@9s;d#A7{!<3#%!x;%)1Tf4PJQc-D$`;cTa90EK{iFvA^Eqch~7q;bm z-W&5$_6ExVLvli_QpYr@96p~SJ%AbjAR3gA4nO;BA9uwK7K%nZC*)z$s#tgj3^_NI zMefTcvY{+~^N(J*!ZN*1(tQ;LCuA}H9`w;{w8}$sZ6sqrV(~Rui9#94!tR z55{E!_y`WBdf-Fp1M)3m6bW@Rt_zDh^X3Jl?#8?xPY& zaonPhy(2VrIGyj|t$>TcHsm&T=}w$Q2izw@O5)R$ISyb3nFl zd^2qFD`g)!QnzxxUb9JJxh0+bV!c*D6=$_#Ui(QkhVqkm(Qq@s7}bMObEWx&eXctV zp=#qDp1X??7Nt{fbMxKo%H`wJ3xk{ps6R&H?xoUcJ~bbHKb&t@I^YSykz{sU*zDpn zurqA&zLMU6FHBqr9v-}%Z}vrg)vv4nRI2Ye5BUsT`zc2qpjE0dB@qE_G9=rD0R&(X zK^ZISwA6M>q}lBs{V8_80S7P$gWHi+n}~b z=c0r&4~)@*Mccksr!;0^Z!88E&wQLG00V$+@~MTSpYJ<~h!8^rPFGY!6$9ltWKvKWjF2gQ99X$C#>a(ny2f2E`jC3LRhHwEOq4Rwd;*m8M68X zaJM-2H&yc_UqY3!*Ud zVDXH%uy=87IN`;#<2-)di2@8;n5-Xt?rDJnr6g?0C=E9rh~+t}{G+xZh3*2s!iH@A zx&c9i_x27%|4JH_2Y||J7H>UKjb9mbZj#FErF1)-Va$G%h((mCl;V&?W9!zqcnkdg z_4OG6FZUzEE@c7?q`a-?^kOJ&Vl_#BfICo4AKW|&6rmv4J=uXaMR6e)Cti7kS>k|Q zz30r;5N>NbA6x-K{hmt?{x4JT`hOvbg*h}oJ%r!gty}c!!hLb-%mb&fy()^qFbV`YWyX)nj}}r7ALVsfY}$sBaHw1rSIDCE`Aj2 z6k(+Ee2OhuSD5pkBw=W{TcRq{slf!&l%zY+mt*;q_FfZU%L)Iw)*I)~^v&N`i}6p9!CTh7$D4q>|6%?y0hJ2mqdaML<8$~&a5&U%&NY6{f9b`O>~zAi3Z>fJil2jyaM=umw-^pRK5lTe7yb zua|zdT9s$bT0Ui~tmr6xIN>yk@=-jjGha2d*{2_i!t>9pVoWyAGw=vP;uAL2drse% zwe}i>%k5z|%Plh~7+&{jJO?VPeE)k!7T~tzL;8#{{tEUG)^>G9@xE13imTZ88IVH) zJ^LXPNG@3?9LnywtwW=n__IO<=+O}kuonBs>h8H4A9r#ia+CJj8T40zko1Yttx5HO zU-do8*|}PIJm@$akp5))D6n$Y+sVe*-c;ciw?x8A5tl%4WXTYaMYBTOW`n}g{6fb0 z*(L47U=~t_UNNKQhhNU_V%18-RUnWT@p{T8r0|7pp6$h$$b9w*CUB|V0VOyf!`TVz zh5Q!7E050MeV}Y3OhKDUB}deHp*Gm>!4ua`3+G{Ao%`6I*evK-M7~)qDw1(O*5clg z)*43tMy}rbDE|GP=Xe}=-kG;7f{;RWI_A=ep zF2RISjj#DbZ@I!&JGVG{lZ8mfiWJw?)!()POA;0}ncK=st@VWeCPQaFCXh&&Aoe%G z@7V+N$Lc=0)xUhL2?ZeEy^bRp-v;+9mbh%;=#{W%I!*j;=k&8~Te0jCmmsUJgChKB zBjDfzfIK;FN;pCrLD%V&6JjywRgumaOs6ciD)PSYkAk-Wsh{jC@eI8u3Lac#hC>6p z0UN<~Qx4t8yX$C@WJZ{1+U$mg)^!7lz}ax%x$L*R{%&q=C zc0fisNNK9r2m^~kiALS@9B4OP0R_7 ztG4uXnEOHqWGr_M&3R&r7#ZL*syzU5ci8=Prc|5sq^zq3z6WAbTmPVR8LYI{sCyjp z7{fGfd^SlzyS!tH+}C+$m>eyc53GPdXC|2x+fM2$7x)RV*2R1bXG5UpwgPVFspw(N zY&qJ5Wpnk4`!~Z>wiCUHPmN!+5)5wc4-1Gm?S+>^T=xar!!cr`EEK$TR@DUz0GEZU z*>AEv4MsqnPAsuWz+wOa;{3Rzb$gwmPp}dIRN`j4@Fukr+Q#@^{dC}-V_IK;{%Ftz zreYe?1WAkF1`W3+*XRS|ZRJw2)|OZ4GO&)r-Ak@67}taI>sfWGNt_m25|Z{W(FvzK zKCZ9ZO_^;j-h5)B$a!?Kd33U~f_AS9+wOyzKFRohi%AbF(U@|Ls&d4n*5u>JbZeFd zFP{Bm)buIm<=7yp7?ZfsuWqOKXc)LZ!yp$@``S$k#1;2&eJWoS)Gx)`v;VW1Lg)#m zj9!}AK7!Brru)T~6|awnV|1d_GuwZg$z?ddonWc}>6o?SRNSpozIAkHSP2CGw9k0doTzgmqLcOGQU=4gCw=V2`es!l)ZHE+mb zi(t~lzA_b(-gMt@x9E%^UV!Dk?sWWF{cto_`g`IGyc|>(*lg6mg1pChp59MjqX-!# ze&fSRhOv)<#kS~#uYY4ft6*@6tJAi!M+(q;ZcY(_O|Hiy9c7LLaSdpd#?QB!OMLjs zJm__>*xZw(dqTtEBmzVhdcG9~nhZ?a!%B|vcW*;w&d++GClh5k*X&L#jut6Jo)zG0 z-M9vp;>qz1rnUsLqP0CyATeKvWTJ#FaAGyRV?kl3?lrQJvz7COsLMOgYcmNRjO?DIZ42PBY zjhXk{#U$5}6{+F~F{b;IkyNS261${XVR_}pWZE0XB7;@M<7-oXepn~NYuqkKy7 z@aN0RS+A0@t~Kp=Dcz&YKWX)CbXY-vGp~<8dA)$jK|~>g8!=O3C^KdxLz!@xgWuV5Lm%kP;wa*c}MelE8bOb06&7 zy!{i-jKlIqXE2rR|B(0BQB`$e`zZW~C?F!;NQab2cStwV4blw)(yeq#2m%|BknZkK zQo6e~-O>%;+&=o8_xFC|jB)-w@&9&A&=e*;(uKWAE%?#-|E6Dt_LbJr4 zIw?TLJkkT5Amyw3Kb!O))!7wke&f^PC^oC`3FFc%QqrunnHVBE21SuE^C657|LLjp zTkF)x%==fn_t!pOXyJ;)owbq%Kt^guL%=SSoo8IB#NmpBd_h{2iV$fk490C}7$EM7 z^U}88Utsho^1Hoof+PPV_4M22*I}NxTN!nc&@b{N&lW7M{4ySSP)LLaADM(MSMZUI z1~;I!iTsz)tX5g8@1}wpqh7un$%d|GZ8?_ew)dd)ZX`~sKo~UZA}@!AQIlo2M)3{y zKk;4CsWt{9CimOOA1>S?Yz{S%EHwKB83sK0VpJV+k)!Z{raMSGOA_5CG?orxk<``_ z6tx0imuSqM@VkN+sHs1LOiO_N2b=68FNdo4Yg!D^IhOdN-(D-BLkA!Yz8)CjB_u5P ztL$EI99C@@dA(lb1LiHj2zGX=Jsn9S@V1Cq{;>uq^aFcC~%(AjuGKTa4 zv@;~+2f6ijqm^o&Z#h+%d+p~dvPSjxsZeqp(?P?AXY&;3#tDTNqggIk-3c<57VEJ& z{)#4x3kLf!*i$bYC;3O*Kw?WTHQZa&E74It@p?qnhhX?5QwLd-C0k_dMf71>;T{-| z`NY?FH|g9-5<=LPd&2-We;*vT$bSn6OX`RkqD@asLNgtc?ano20Hm!}inY}HQb#ld zs|S=o;buBy)cbBzzir(rAk`&UjTS!0m-(l?oPFn&>G$`Ax-7HsN>i4gRQ;x;cyq!f zxGrI5wCQ7*NSIn~2%MWYGkH#tsip*&>bhLk=Y%W-L6lP_x8>X~YtFfbo4 zosD*#B?(^JQ^M(wHdZ}b8xc=ww21>%I-m~nLO4#jav3LYW!R>K*5E zY1UivBSk0#N@YGy&T#=ygV!Z~6tWi$V(srEHO1tyxl}jDP#>rK=!!~{6$KbT5!n;t zj&iEG;Mgm+y~<$AB%U`FmToEq8WZn5wzk<%NO(HO4&E@U^xL|l5QM;EFkg5iR3pB0 zbidsGj820|6Kp5Sq0LTXw^$dHSFygb{}Ewxs-8B6M#?(nj2<1QMFOH?P-3vR-JQT2 zlx5h0Ns%mvK~+B#>}exPs)~NrlG(+qY1vFElZ2={%Pp2OV>u|+szbWJ9hFraF0dBH z0J=B#tMyCCMSo!wwmzbil6`L`Dm6I7M1COXEee7+vAAcp?LAcV6B(Xc&|IBnEa8Bs z7XFj?$GC~dmZ}B5xdWENTq7kY8`5wBuZ2jYA>1y*^E8{O5qV;xymtcscyJb-?7nGe z)Gu*E2np`C%*cCQr3mm&DJzZtMq1!~@GA~FFmZy=!4cJn-a%=H#5?9XZ_CFsOnQu= zY)(m_uwL|uMp$vLw>&Jcj;})dR*xrbFCEdNr%no1H67aOOW@?36fh_ ze9%OP^+IW6|F;IST~^l<_#3aKPw1LeO>Wm}`=G;1E5>AhxtiBJFtDFf2Gp({BXl5v z9T!8J`~7Q?DLPG;OlkOWVIvt!Ns7n)_iFYqU5N7?)P&7)9>31Pv|~tMe*S&l@e{gp zulUV3{6Zf!e+Em=s6EwZWarv*@+wX9m1{lk=sCQ#zn+fumE-?f42q3`h#}u1c<)QO z>g;+gty030YxafiH3}rvidC3JrV60Obp$9>w>AFg^OY9MP%(_Q8leoy*R- z-bYWb+NG`HpC-ilgBo&Tp#k(PhfvQh9>E?nCAP|glo`@`D0?4%rnQH)~ zg#87(^q88my~E1Hd<=`8xXy+*dyskDc9w-Bx@f5KWRYH?Sn|W|#plJd)DLbAi&5V) zR7Eu7#(H1wja3Mq*=Zg(<+$%$@LxZZY|1o$DedA~keTL+(b1EJm?rXX?&bmsE>&D! zx;;H%$#*mAJ8Ni}_)r zOrOMUL4fs;BtL{KVilmN{-23`uMp=#_000bb%c=aQ7oun_Q3Zs`yZbGUKrP(ok2en zQB~4$o{hgzGW~}7^IR2Ip2e`X^Dc)tM{)AROQ~2M?(B%?wxPoJFNZ%nS*d(}Z;_)# zTb$eg&EjM9WZHrQ`5Z_4pXZ^mz-^5gy6cR2xqkw0f8N|u_I8>s?u8O!Qee8=qdowQ zlh&1SeE|sDb(4H~qq)6&+!b!FRWleglQuJ^mUi)UT2orQolZ1#^7*|r6VIyfwi@G3 zE)Jnw=O>rQY;kd6t5PjuF;}Ph_ybY!o=Lu$A+ZAeKRB;)xV8x6xF7$HlYRB6-=fo0 zncnU87|C+PbASZ35V7a}^rniVL&=!n*d!#mMK=0r8HmLV?kW!_uA+fs*6zD(B&A9HW6C~$77i^1JLTiyWCI=aKy9Ws*g{z(4KOr*y8VcLiDCuf zt3^(U~LIGP(hdcca9|&!b2Xo*2D4q#=MUMxDltVEz5W3swVwDIDe7G!@s>Gzq+Udh>O(7(Z;NPg z+=_F}Z%{UG%cD&+heja=G38r$fVhg46}c_3+_^fB!JZ=R=88BpkpXokeWUq!n_zTyx^=nxAKMeUIZ}koQFTnvRl820^RVa> zqq1k>m|btbGvQ`V;4)8bBj+0$`Y!2NGoUl(x+dr; zb(>tU*=4OXC}?_;yY5h0>m-qo76wG2y&n$B4h1ieSOunw>6}Ga1sPrV09Whwp7{Fe zNfC5)lFc?nXoK8DkyYYBLtjVTkECxR+R?EnTF&j-=xZ>TZfdK=s$p0pzii6W4QLj~ zB-l>HN}w>avoo;MR1wd1V#{}O4a zx#_Q^wyz&@2eeaKQzpUso(@2i*(A!2BhT%X1V6d#-x9`}4T$aKW6Rlep0n(~bh8-= z=f;TE7?Wt1Lb=wbt0Fb(2#*wZ;xEC@+1k4@`6`?k1KvvVM0GM_@vfGSc$oYSro7X< z$Nk|)E{7macJ2tW$l$M*hFMh#02k1k6zhCxV?=ZJ+4tK*Og8x~Ip#D!>>Hs4sKytTPnJFmRyA8*M!q$)R<*%fnT(4SKuNHe`#-?|3dyGu0B znz@IF{m8I$`0>A~+fvR4*#97Z&i@b!a#rbc_qlEB2+Vb3xm}S(Nm>zdPCD3wCwkus z=W;gBQEjwhOf%r!H-E_A5=SfV2WW8?**V2b7>yY_f)X3_&I{h};UkV=FPki46UY>^ z9LZ?AkV?z>?({{G?OBVV;b^s$oia&__?p9dZGX&Rwn;N!6B3dMf;R@S9*>sI>? zL!O=1O1;3mgUx!EQn_bHh(c!ZjB(1c15AZj$%#R6-?Mgj`hUWG5;zfdVA@eO`bn-$VVYwlKyS+iHWqN zTB>sVSB?m*@nA)58}r5eZYEQqnUiaEN!t0(?8=X=nI`U{5(ICz)C zD3?%R3EKw|Ij*v;Za|YWkBK{5TEtlHEw9QPFGCZZyd@(#KSxa#hFL$PtybvXV--gE z-vBnU!*3d&yv~3>va*`!=&);Wx%48uc+ZBgDOcNNcHisQ$(AElU&3syG>M>7XK>W~ z3_EVk)?6o2!=qrdA|hEx$Cr>-&oH#sc%QGer9&`sGq+g?l+JzcVx@llFbI+Y6Ds0kU05z-B0_>`P*r_R-V!&8sok((|# zQO=+y>9BatTAmJRmQ*a`3-q+?=)I0oVUWZ(Myd+Y0EsK zcHWR`*=RPTC!w)htxx}A0W@+QOA*cK42x$gpYKpv-vx}x%YNny$*a?lB#*DOp!_yr zm?+>v_C4^K(FjGRc!)nBJuerXaObM4bKT2pOLWJL7W|xBf(Fr$t`j60IMJbB#Kva@}ir&zJSRr5Xuh6W!{et97j$4n! z5{*Z)MC^EpYw5x26f`b&8U{rjY#`j8YxF|Me0=WwLrJ+TFfx%6)n_2-g|p3eXdU1I z94}t2fp$gj&B;s+XDS8aa58Jgk}|mPzrIv>8ULs9EZIs3OYA-*P&j0T<&G1JehFf7 zyPQ+sH8~I4TFl)AJynM)1Jf{8wLczU5@rH91K%!q9YW})NFbkBZygp5E<@Y4SViIU z&j5VAr_JshJ+UVaX{3N=Va2)3k5L_EB7W^B9FLB{J`Crcf&}ye5R;i4QGg0<46{QF zF591?3dlsb0^$HA6S@SbFpNaIM8V3YufUj&gh~AlD3%hr>bwTMPe3!1Wc8P?qPw)L zYThO0XLm*)C&lW??G!7O;9IKZk)GQ4uwv>qoASpO>1_x@!igCP9-lggUYTm0pYPE` z{aZ@=&1@=4nXY$KfThd6Al{l=Ei@Cg@{u>Da_Z|8MiS|!1=Zu6QM9gv5|n2Wf9jKe zJR=wp)edvR75vwk<}11eU~waSZog#m*@rkEq&<)E^`ZE>u4&f(++%mh+teayBBe&d zO_qRPG(1-+c@hAGOt+`J(UkL9lrUXJCsNEQvULL5=XO9kV7OUZ-`=~BoZYa7^xY8Y z3y5G8-kWhhg77+9Kp<=PMK`j){K>8?!Ql}fo^4y)@kzo;np!#zjni&{WneMsCk!rJ zAY!6yU)GmGLAb3_vRbPBy2dtWo<|(Vd@ zgsj`d^1n)iz_i}IhS@RV-#Y0Z-0#}9XFM2s+FI|ze?yo22aCoH9hg4spYJ`)@O=)L z)Hf<%Q;|Xc?5~s+c>mx;l)onm!pOX7cCQjPGGQma(EG!?_+3bb$pifdui^%e@6PjM z9UlyOFd}hS?)ZO|#leYI|DDJSJJBnsP9P0dho;#oxBGMAyP#I!=_@uFAX{rzp)LOi za&(L{JGiEv0=bK?uL~llDh(fefJX|?-m52fiL}mDZi?x*7z0Fw4&)6OE6>L@DK+Bs z$3w22rJmTFTE_zc5E0nwd7WOdKS{_749R>9gUYT~!EpQ|Cv}K~FP{Q{jxw>>@H z21V&o?@^A`{?E@9$t5^SunRPhJDy+*1&nl{3+fRup(@z_KOLPbwSfCsv)zd0LNg&< z9IYRD%R1{1cWOMILM~dxXdo~19TcK$pkDWP504UZgY%4n$h>A$7ONg%(X=4rqW)d5 zf?!aoOA@$5f4F>QGaziWnq_uC!sCer1Vw~nuh`1~ffXJUPd|FNh{d!cG271& zSx=WC03Nu|fVJjOogAxG{zV-!F0;dpicaHuY;+1Sp@G6g6FqixWr0^N8s@ zi&u=iUsZ;`=js>aCx@AA?_P4*Zxb1|2M5BaG@>Drdqd?&4#xe){LZ6i^I!OVkcjy_ z{jqv5lFxzU=ZA_7rVQ_}XA>)caTQ*fryPu@i8R`Y5_>Nna#TgIygz@v{>2{&`m;`M zINd`ZB(({n$;H@8N@i=ZN2#uq^c@!f!D;`1<4*xy2u=@XmGp70Pm!j;ERIqCHwOX= z+J4~eFlkj2XtI10SrYOq1JW+&E7eA`X)>_lV|WVNkDED7(YiP3!iIhATeXhosj85Z z-P@1k?$x+t3_c6`Z@%eRKmcC#b8o_TK;kQN>aqn0wj}vU!O#jZkLa*zI5PyB z#>6C>!x7ky%oodPJXEfB?BZ)}glKYnGfO4=TQ*-P+E6F^trZ*%{X=6}4|=Jiy05=4h} zLuD$pH)5hQK)-Y4PaNlu#X+NxCbq(%E6d})9=?^nwQV-2+x|0PwEx;_!mbmz3Z>Tj zD)6Ypvi>SLl0CE-KoyjnN>%8klA6_zC&8DL23tJ`?a%>Z8TO~8V0aZ*fcGjBB^qEJ_C?WliV1G( zGuNpJ)Qk*GxDARZ3yn=}0;g}t5;LOvNEJz1a@rax7565*G}YsEK6Q1s<2YYqn~#o5-aN|4V%+>MGdEzAP)E6#c?~!&rZEzY3iNBLPfCza z<$xCi~B?W5J_2lwd!1U=Q@_8A?-oy8yQe_WAy;N&274WzC%VA zhE-q$15t%0WVe><*R!nbM~;q94khf_$G5#bmA> zdl*Ec2(YQ>$6q6dJY~4lCOe)0dRW6*Hn>28bNZT2-g$F)5YhehNBFWc{VW})M$gNi zC>mkN7iaU=lqT*OAy>$R)RpLTyfs#SLl{kNfT{F;hZk?V^Ere0o2<{~t+snwS^+D=?G_2Jo0q#Ff1d)}GlNh1@3S3wiOf*w|#6dDB-Us^BFANJ3Z2zZM& z8)D058JVIZd!P^*kdu2jS(x$9zq4bwCU-oJ1LaN*wAtY4q<8&lq_a>NlnX*S+ z9t+e&Zs#|C{_$aifJ?OM^#89hsDZ?wS%o9B=F`+rDY1>!^XsAq6BFb?X=H*Cvn_f7 z_CB=N4IJ#zL|5)JN(QinM69b{G{a-*_jhr-sH7Fy=*nnt4C( z78-v|E&mKJa*0gjtNKRu#Bs`CKQA;(+$}zDLg=hIHIFlY>nLT0!;w`mH1@{m@65bQ z25!-PQ;SZ@AaJs_DmYKiJ?vHNwy$MVco08Az%d;{I-daBOuik5qnO2sC&)Qi}mzykC;lM4w7lM7~c{H`saWf`AHC7hx*@J_Jy$s zT5b7pZ4tmc+xsV4_m9ul`r%taDahhKy=Y=J6b%bK95p3GAHd+|c9_Gmo~|RDZ?6?c zu^)RqTL&P{2MsjxF9H8|g}PrfIs5cmv7wTE*un>7Xdl1N6-lkv2C*fl%}1D$w|+6? zLeRtP0m)if)gb~3&fJFv<*&6^OjtJds5o=aLmL`+;f3v8Xc^t3aT5M%3+08Mz_10yQ?=N?xV%aRGypQAs1d+Vx)6%#F{55&I6=zUZ9#QA0lD%a2aE&d>nXmgBGC6Ym{mLT91*wGef^HQVXRbf0KE&3TW$)x;Od z8DG$hX7hH&SvQz)#(&dGQdvlH!;|9F zTRvK1(WI>Ky-WCa#~gs!%JEeyFDgHXZPLxU@&5~QB}p}40iV#{9Rn6ZZqK&NrFenc zt7Ot2_#c5G2;)S4dpO`%|L^P&_zh`7&Lw?ki`7&)8k_E+pYitp_$i~s9PA+Z8H;1&)o&9IZ~lnMIjCX ze`3WU9B?BuM+imsFigqwUj53yX(l!)((}@n;?60%oQko|feDIOG(W=lCjWw>OM&;y zV$J1Rkc44 zinQ@JNEu(xENbTu&&C%Ab}Jun%|B+sc9-1L%=tgR{gn(1l2+bwlWPR)&kZ0tf5^L8s?k}NolLWD#x(5l2K(ca%!4dpjzTuG!W zI{VM^E%XIuSnJ(&Un60|iDs?C{(DKL-=heeoL;H_J203CeAUvUnWbFV`^eug=lo;D z=CI@k*l~Fx+5U=Zeun_S|0{`sM@|4(o)rn{Df)X!*WaUHX&fN_`~3(jAGg8toG&K zfx+ir(Y(~^oVr@sS328S%$kSp61VKZB0aM<>*v7v!00m9#VB8w+~M&S=jcNj!Mkhfva z={a|-_TNkX`aKF`*A)KZe_>8Sa9Uu2P(s%GiV_LqVP(2UI{!@*1^zaoyz~`G<-c~- z2QL29aaHGy1Q^bCcjDZCm&E-&iXS_>_PT!u1_usl{erW32ZQiGA9t)w{$nw0zp?*P zD9c6HEn*nVsvZNR20D8)#`*gMXFAC2~! ziHzU}44wb)zh#l7zJT7T@(rP2oD!96JO|T$ybR`DS-1h1>P&@e65z-D`yL3I?PoGN!2~tV5MFy%C4;B=&ADe?DkjWP2sLE>XmSht7(HJdEf3cjjUslE5VtFSO1S z`2?CDZez<(DA$ku9>5X3g3x12|19H(y)RQaf%`Tmj%2*+FkI0?@~xP z_rSuq4v_MG3WNiz`Cx%SLcu#C>%CJ*VA0<7 z4|eyb8X{U*1`)KN59xk{6g`|1&YUxcoqjW{|yM+30b; z@WOorO==5@fo(HqMV=HE_VFNYJ=XVsZ*BF|?`}G=T^SPhYr2Hgzo@&u7g^7uec$Bc zn0)QIL%C40eO5I)Nxz_<`9haqK-^yxzfZ1h^1p}5CJW?nJv6FqlNU3G)Ok_Psgfp& zLV;qi`ILK-INS_u_7i%4#r|ViOQrCQkv9Ql4IFqOvHae5*eFlp-%s&RkNi@&FYXL# zF(Wt(QOZCezttFx?|t|3j}4Iey(53OJI1{`?W5wUaLqoLB{{FOo&(pNM(@F;*={1$ zS_amV_Y6M6T7MS@O1}q$2?i@$Mn9|b1y{R1<)5-!n7QJg8g!X^^0X*3lF({%i_$LA zH_9&m-48(WpS0BE+#6EoD%KBSm3gc}cOCm&Z8?teD~y?Zx~0DVH&OjP&bLp{qIQA5 zM3Hx+pA{d=DcWb`T@$Zm(P1Aees)f5*dM;K?=4dIO>FOkjXLsp$rK!~HI{{C!}*C+9m#*uvPOUHSR1qx3fq z{hxWi|6jY=e?R}<2VV-Xdu&NAWEA~3xc~j(D{SZb(Vgf)JojNELZqBF62@-oHR5JC*o zVAFy;Za{_d{fOaefBx_;)f-tIwaGX9>wEo_>DMk8&Zf$crw=ElonNXKFuYKCER`XK z`VqSh9?ATp?_e+PrmH83PNrx_w9Xsq&rg9teHmACU`}^od-gUSHZC(Qn@QKld zzeB25Q*$2Vmj36NAI8B~{*NC6)AI{#O=lo?CVh|t`{XdG{Qu_5k_%(V{lxPPg<{(NC7{M>*y0>1iWo&{{W}x++|BeP0uX%!N}W#|PHXa%e7U z**mOuU}`vwYEQpw*3nI#GAxi1@VxNFz_v0RSRL+;W(anN&d*!(I>azQgl#cS z6ZzfsfNnVP$Yh3n`F%&P`YWRw#IP$~tsKx$ARRQvdsm{ajF}A<%O*lC4$FW|<_x2w zT`9|QpUB&=Vw-xyi46SiBns_KTtvgbXT;CP>y5I*pPdpIm^kL5R2{-&(bznV8?}h^ zRa2ZoM-2EkNwJgB{1Shh{}DDLu1$H z6B?v#RhaZ2KAbs6EkADAkpF6r>XJFBtE2O2hLPP;y)Zxv+X~2j2H%ymxL(3JCJeR} ze{4AXwM9WG_XS>6TK)C0RzEtGG}idw7^KXwUAR7OYiP43@Zy{3qvKAWJ^PDA5p&j3 z#xtfx)X6W!)Lql=X^!o8+3xb}TJ+9+Q{4~&))@}-Fi-B|yh&=tq^E;bVPy_KrHv(H z+j8#@R(!hjgYLhl$=r)pnh!H9lu$jV=_83^=beBq{rnjJ-sPtNYz^f7A8X)Hld4OW zbTYwn50_GJ3cQ~pg`Uod*Y*K-QAq5;qKtj{V^cYT2pw)~%%*FZh``P9 zJQQhcCV~u1UW-n?ISlA(t-))--A9mLyN~SnJkE$Zp5P)5^F)r+i3RPyP|E!WFx-of zVrk_A7f@Jf21Unm6^9$;9NDLbe@F~xiYUk?b7FHlZalq(d)+sbEv4O#23@_tUlC~% z*O(@<6tUrZ&>=Ulb`2G}u*T7=J)35^ z@VdO%?zh_IKcWFWY(AtidOUeu7JYT;ap;zAKiJ8&=e{(s9A;0Xj0o2N$6Tkxcr{xj|2+L4wFDNtjMv%j4y96Ns9? z3j$X~Nabx05`B6U)OgcN!k*>iDfh=}B=a?Xn!@&;ZIqB(2q@%b%4V{dE9N}fRxctk*4I85C6(Vln~R_%XBivHD%GkoQC;nh z!>kDER}v%^FoIXJ){I1{;2N!kO3-uDa9Uf37Mcw8siP9)8!$MV_tQQQWFPY`A?rLia z`n9ycFB(boC@-K@qeeP1x* zV_oi!8;HrZqh9lmU75*4I=Y)FUb%ipGwy@pR^I^$-a_qWoNMR3AM}grk_oiW^C$8@ zKm_lW1n*iG9!}L7QtZz+MRD01QZ(URTkYjbCbu0#)mR7J3HV3Qb1tU_i-yq`7sk=) zW2jo)Z!^VcmUB+$NL6L$4wq(s*&voD<_-D6+`(B|G#|~YZ_9_PXr*_YuzVtzvsQu1 zVlwC-NR0AHQYSXLpH$v?YZBY)ULy>Yi)8cJ@`g3SQOyg+5m>?zwA_nkj{`Zy-O}Dn zc%k1%D+GFv7;m=s062xgNjpB5Hxvu#x<%z<|4e_qXKA@W)jRoi^h~!FC|p3>BO~&m zyKccjwj#6Tj;J_pMJmyO(-o%TgYq`fvRU}AMCCcGRB(OvFgI?7DQ;Aj8z4(u^AxH z*7oS|n$IF`k#q7$TC4N@*1OYA6rE-VY<$+N9|yX;Z!eP~E_PZC?9cqPT2!AMC7p!f zHsdkd6Wm>%$JGjBs=oUP81;m~+_&B(FdWU`TEXGPp)!zTeV{v2bKjZ5$bF1Brb1I(QNL=UW(66Sn$hs$OD96g9dvj1~6rf2@2_-2U=Buqo z{s}m`&+Y6+4Ye4}ZY!E~S@N1jNbK;waz5~1?TNXnK<&K;jdac`Y2{~9^O{VvxGb2; zA?oz-s~++^hpESDu7lAB>C@%L;`E2aUl?Vm@6YEPabHp>e!rM<*+f*zk@TZA3Jl%| zKF4EsdmMS|h%v02kuKd)Za!S=cmG+{agb&`mF$DNH}iQo+MI_wpz7KG^646D_U`+P zIogHF>F@)9g_)hs;7%(fqsa{ytE;BUjZm}ub^Qf#WFhS#T2Ogh$hU-L1BxMMO zMMuQXXCGT%v=FHee$^)t?hT<|c+{bKutd-ybzJ;3!kotJfFpzc&gCfh{&ZRGR%>PV z8|!wcQ)|J(k=xP0M_$R4xdY7+G(g;d#Fg~xG9xJCD0Ce4(0}4L`ek7#4-rH<-;ap< zl*pzyG#FRCVD<)@RoPNhYv?_^kx^gFIGb|q@}?&fgD$Sn7kyrQsl}t_%{MUwi!N(rWPT9V0>R(J5CTyPi1~5vm6K-n;N{SdEKqfMRJ78=!+{{ z$z6VcSibf%TU2>Ea`&EuItr2j-#&Ng7-)Kq2@+1TM)xnVP|%c}5S_-BC@xRs zvA+MSDzBAiwZWIveJQ@?(4=$I}kbcLtn`bHUHplu8~uZ!D>}sgF(XK zEgrx^We+{kB>T?Teq*un320c~c>VI(*#Hk=J8jgkzIuT%#Jaaah3|!lGvFmK? zsCC^6y?Do_!W>Uh=vh23ib@tvjYE>!8BAZv5@gcmDVSm3q$NED8YSVIhSBc|eyRQb zvSS$2YyjzXMasqVsNjdtpEiev0iVch2td&jZvOfidWJ0_l{@JD@#vn{8LoiOjI6%f z%~Aa(BAVeSw4iPjVmY16!N6@4?OarGB{qKh zZHBuUAH*;*0pU79-9aT*Xp;c2Hh5W{$VB{$#gO7 zGgM^5`ILYiG1>ox_hDnr6?yi-(qcz~TF=lz=Eu-@JYJ{pZ96m?kFdoAU8uE3T0`)c zGQ;H>t(H7D6vm}`g#ep8u~0M@RF@k{QCpd$a7g&e0i6Xnw=%h@yWK6;RmLy4DIHE7 zu7@qyvbnTJZ?W(4Ue%v&Z42)0S~!;4cJNp)wlD%BuweqlDUv+oWZ?|UN1-w7dP9$Q zB;LFL6EirLh3+S+tcA)evE~iXpc!Ac9XAkgGr9nlinIkwx57BmegE@8-4VZ@0LjfK z{xDacrYK8Q@7D@WJrl~0qa9N9?BnzTK$aN z7i~*g@1Uz`P)nDb^psa7typn%Ds)qB#2o&Lwk@>g3W5krlU2&pZuf$6M)D?^7LgRxn0q;cp%2BAqe z+5CiVMZ8~>!Nzp6i10sGQ6dpP&+8^6>EZJZR(-OoD=JjZ;8-t-GqE$B>G8p5`~1OM zMmd=qpHijyoU%;iA%)(q4c$zaviz;m4}PLCTI><$Jh2}k#c0A2c1xtIVs<-ui`AVh zAxBG+`q29a!Mo>fx_M5=oiEhuxnSLkMmJstXK;Fd2wvpe8fizogRXvRoNR?gRJGn;~3vQLS)nS(Lq@=oRUy`Q{0kwTDmpL?U}W2|Ra4DjP2)!8a)PSaG=L zo|tbAY~HX=CZq|o;wxm-SunppB5}v1X~J2HRG zL%Z6CW6Qk9dEywf;SyQQl%UYW(T!5q4!F~Av9GPe@mZf&b;(!1PqlnDO&zimu%m7x^m$wLlf38}F`AdL zLDF!nMEcj@_Z*X}^$zlvk7C0&TW%K_t*1+|1N<)pEHf4limk^*pB0e{Vhcxk^WXY# zIF39m)U6|Kc($h^STSbm$iY3c(-sn|{3NCG_+I#a%{P-dy(v;oXr0`;?pWLZv*(}` zd8$>=>A^P+YUEBfyG7lmdz#>b^V9~=WMEgPrSEkgChU-!5AYxlnPC<){+A5@xl3Y5uYA1A#+i@xHBE~HGf`u7KI@J-e z-_{86UG>cJ9ZfTp9y}}u4&mK74`oIQgHkO2$3-(z+Q?hCO@%oOMPJs(gRN2AH?}{! z=_hDovRk2LVIOt*xa^O9h5Rs|uA%BYB@R34@Ii7;4eh7TaIdrcg_g$s(W-hO=QU)m zP7+>)(pgzX5I4YA%}z5!DvahwQxdOxUDQLPMLv$Tnp z>n3Wv2}^ZMd!3c9F|{1`dcn;{?w->bCd`e=`jqcKzC>B_nkU>+&h8q7ZqN**wGNUa zY}yIlXOpv^lgu)av(l4`mi2($G%S-p{NTk?;|bt5=USBfS*JKUG3lD9B#0&^GTNF-rfyi#T$0Xs^7mg@*`1Y zGkI*^G57AXR-}by&NqxZz}43tuV#hjZ-;i!w_vh=oCV8Fp;7_eHamMUOOw}%v+Dpx zv`1TT)?@n-$5D)XyKif!zlw=5fDtvwbVS|(q|7btj8G1}P!KjN}wXG(RTq0a`GjBd! z`uXAmoj|7oiP^}4uVCEH0#@F`_^@1~NiJ`x&8}yJVf{Nb!NJ{oboolvpMqoCrmZJP z5q8laT-;Sl7JLn2_9GAGx5ubydZ^obbUUHO#OOVX6PaaD@)EIxk_DmpxKHX0-?-ew z`3T{$p4-he2GsY5r^&&!)FgTdAg$w**KXiC{x{8oV;$ydX)DH2u-ve97CaHB*lue` zeYq0rOX=s-uji+p0CZ&1eFezA42)TGvuw0xHOJl@zr-m~C*o#ZYkg0q@p$qBCQVeB zWOsH`(h~z)`26@hj8(RBJcX+2ag{jJdX=pYx`PwfY{9YFIQ=P%oY`UD1YWilOOqTLGT#70uXYJctR-f*61d8W=DJp(eDvgs1jZG-l|GrPhARPPHSv)Z6|>)WBmLZQ%BER`9~nW^C5W(qO0Het4KO|>BBdnq1jp= zaZujmX;F<%)-N`9gqM+bEiSJtqC97|@xS`@VpfCt_Tdgg9G8Xc6#QqM6 zu0;r!1sbsj>RtrNk#{dMzU$$<_IYl>A_&6NhToQto4*mcZE4Jh>UA*D^{WsHnAFyr zUQB#w!f)@<+OJY9DqcJ#64BnOgWE~4^W6yS_@pS9os{>1OM#gGwa59H`9w4KPqncG zt}U02GkyD_>-z79?7l3L`taw%TkSm^Rr_}893}G?3agSfH2#Hd@2-G*E)t zSR7r4E++cr=4^g!4(ue@0bH~t-;(nqVL|Tq%_oQV58a(#SNGw=`mAAd;}-&wV*M#I zS$FRX1q%D}*@JGzV&!BE8Wh4tkNfoCtJ-kQ4T(StbHYMw+; z!)XHUnKTaL<16FKVcHU>!4v@!88PBf!;GJv#f^2Jgmvb7qv;V0!{V^ZFLe%ISJd|p zDO2hSMMYgq&r@RXLLQ4%l)#93mdk zwo3@n@ohtmIBv^7*9xR%y^hyW&USzGjvAXs2ejVFn6w~j)D_>HkGUz|=qG&7!g`TV ze5j(BRvsVVLD4eoa~>J*{eSV@BzrJm z?Oxs0v#P6W&N4~EPYrK&O5Z6`+0-Cy0)P_}h}9crzm%$jZ+q$lw36f%h*Lz=HO^ez+x`s#(q{@jRzx9fGk!B2$>Bq!D^mur69 zJg8m7a@U{RQ(ng(#VEZv6h(X*iv=kpSYw{k8oL9P&xrm}yYN3U~1IL*O zrZ(3j0?1h;M(ptZIw1~=?|iTXE>9m$pnm84r<8i7aJX&-x5V+ejutQLI*?^`4s?&| zaJ6Qw#nNrdW63>Ddoa1q5Vig=0yoe(*_@f(Xcf-6)nAEnm6imWG>y|(Lsw4M=mj~6dIyxTw7=2%} zr$rW*(JS@ZdNY92M*zV_2*10|HdT-5Il6Hg6hyPe^So48VQZ&-d>uoYUgom!LDW{t zL7BoiS)1oy*}oGrmyl*ZM1B`LQ!Bv28Vq)J0`>u4_moh`vdyWw}KcF$1O3 zULr)OGGm5bsnihPhf^@iVt^RP4;+sEx!Mhh)_PZlWPjz!CPY`C)(6>#8#6yf0I+V#}$f$?mfu(&0x)*V;I zT7kL0Cy|G`g|`jH`bEG+4bMHA$wYX6^;8}fIY@CKf{4$B9N2rgySahuQ}41Y#$Q)G zzzD~cYPkcq1VJt@pTY&^-i|38cOgA-TFK{Y6h2UC{C;lQ<2*dX=KkA8QUlKStTYGK zs)(gkZ}Vha(-O`4mKy%DXM1wL0%`OjxO6pn*O! ziC>o)1Z!=&iC=B;t{;kCZt$U}Lv74YW=tW5gSg!$n^lH?k1Ros!;Id8l4IP7Nf&~q zsTU|(%c9m|+9UfR@E+;#*{HpQ+a??o@A+Jh^)~n;Yr2#2jtd=Kg_NtU;e$ytH3bkT z0(Ie!lvn))0vyz_Ozxky8A+zEqT zExy*AZEuW}$V2X1lb&okhIldU<%FY~=cmrk#G6bXHx!?xm*T}`G1Zz*jCvC6R;M2t z9~DN@na3qTyp$vqUqit0f~aV?^cJCPVl>cIV&;-f4i=gyQJcxdul-kPA|S`OZpF2s zZK%(E(W#XnY?|)3Q{!E9(GvR*W;zM^ydth^NE#ZrZOxlS9Wn!_u=M*T_d;mH@ZS97 zMsBOA8k|N6d16amF4d?!lT>v~3Pd;lYHSDKJyh&BvBIPh)COzi6mRMjoZ-$ z63W+4hHt&^UcHq+)R?dagWm)(ntAc@leI@2HD9Cks_SMMrJK4jydTz_8d^h*Qe`F8C7eH}oJ5uNHT;=!jR^vNewbKOphJnH44cD5c8+%p3kkPtQHH^UUF<7u) zuP|Y=hZK=1^=tEs2L@w6ncuHwnmKV|#+&O;9QJ#pP}q3HF(OiT**HXFZaLhO#pLKJEe8q#Ov?0B?olZydH$kTnC^K---5 z=2#NFOkOK~W80tE?XD695qmRi*FWSIvl^)ws8kjQc44H`Iv-@@13YN;I!cl*+Y262Y7Xhw->v30KsV z5^Eq`%RN`Z8dF-_!e|`hb+l19W{&wA!X4#?)}up*V@NNq$Ep1L=RGok^nyD^vu66L zVx=_S!%81xrf~;m0;h$=Rvxv(dB8a2^sypd1c=pl_VxA48I9%Na_Yx?f+sGms?I+R zH@bD+D3z_P=01lNbsu1}*(3}t{v@TbzCIy=^A4ezuPLb;!D%$;^++tN_L?~d?s{}r zS6WQNsqW5~^gu0M>&#Ftcts=%=UAgibMqCP?uTT?ohiB^9>p|sHG>;W%p41qB)*$~ zyDHT-C@66r?qc)&b|hp#tXMFdqCV=Zq^ifRdXSXZX_8=EJt-Ee&P&IKs-bv&v=S_u zqkItq`w8eUAXax=oTKTQ_NzEh4w9JGi4YUgk3=Fgg6V5t$wPF~Aw8#qvbfC^3dVN; z3yR=221=uS;d%JOGo!H*8xUrizA!mXp2Aa9F0pex71G2RnP;O=5{6ri=2FTAN0>%1 zLd3t^peZfmTJF1NZ0y%H6yR_fk|m&WA)CzXQ6N~}Vl~FDUNOFJ66Vt$UQ(*ruN?`c za$+B(RRuU8XN-p=yWHe7UJ7q{WJiy@Z^H(vS>cm=Ph`&Xq!MGOXtjr+si^lyG3I1k zLyLrQ+$nmS5pr%+k%+yd&zSAwEuiy?XlU&uP%*0E!^1jt0<$Pdp)12crNC(c-9cW1 z4iw5kVyPVFB_XDE2vBuO9>+i@dzm2LZw&#SLS8XxtV1VD_4`ZkVq#I^ABrTt`6_%7 z3MP1Tu;2CYW#RtyGgzCXz9881@x=OG113sFl>96_=he(EW2O?79sxOfA7=Yh%^*xn zV|eQ+aDA4C6)3YGE}a+Dl6G#n_`N#bUQN_>2tTpr=Xjfl#5BL`g6aE*vYYgGLg)sK z6jvFV+R%0jd2ItMspA`uUiGj%Pl6{KTVhJoJdF)QN4Pvm0=+`*NPQR%eR+~pPD1J^ zjfBGqj7N+sgN)I}*{NkY%B|M3;_^en9Lv7f;mGV;paea#L9e)T-1qr$iDxQ%neW`T zh(}(?=IJLn>83 zA4MxCc4ClYxKfutzjxJI(vRW5*iy4~A10PiZ(sWCzv_hGG^_=YOF7=d6uZ07OO9Kj zN)@hd2of@eJ-+RUGYiT3D`6 zF`jx?oxCV~jMo%;c7Tv?Unrw00*9t>*J6e@0(h>r$PT8nULy5?)sp`auoG8UvQdt< zQtf7&apd)QBXg~y!^UM|s~m^n-hAPI^O_q`v|R5Y!@vZO#q3QByGQ83O2)t!HjS8# zyfR*$LKU4BuM5;>tP>$tEUftS(;sVrfmWD?a~nDom5R^=#z5v-t-N_T%Z1Z%AI61l z$sfWYk~H=cDkUXD%Byi_nJSk)%{;kO^;Dmub}LC_Mt@%b{YWUuD-E4}4acM|Bp#ay zfEt#Ui@29crH`r(%C_^nlWN4Q48h}Kgv9cFf=weN!X>tB#B2FyyG%w8viN-V==r|X zkk{(RH$-jKcScXl#D{OyZ?`5*WrI;o!b>fuj7+1%!=`;!jlb}@%d5R^_t_3AdjF^` zirAFx7&$}(KKXs??rYD3t5|hp#bfhEe}KToNa$DW1y=|t1?+%`56nds$}Jyu>!9wh z5Bp`#4BqbqmuY@2X&<1@rRAvj5#m%hwwW{_g{gD69n8z6ROs*)gU!M`_=%hB&L)6P zFkVuXW{j?9hFfl*>P(eg=ITlbMnkTIP3)+zPDw4K#D9_HOpwOOCgX0m)DRgA=y9SGK*TTF< zO2SY~>rz5+97v_VveKyd6tW>A>`oj|h)$@!oh-G;ZX=;{-DJKnR++QifU;R_aYR9^ zn1tec!n__rHT!YDXYZuMIhfT^c}%BGnJZ5!9m+TSrvY`s6>{P_?ek7DyhT@$)O?i( z9~^wAKs|EX$x>7JTdi-owILfvY-?I3M_KY;@;A0>#D?E#iM|q({`+97wv#rv{v_odt$Ih)ORoukm-~{jl@2gYIHV0PA@(#M`!le2Q`zQ9rSe} zpA%+k5R{_MeRl_bVhsZPXS@MWP>tosiys6}8l|hD(o@HLxSL&x7gSn#Zzl63gW>si z9oBBZ(8p9DUaCV3a}366#D++$g34$F*UrB7&wJZ*5b(Pfksz5s7%GnDMhDXb7|{q% zOk-=EuuwTnDvlI>DBt-xS^G7XS|~KWCeAhTiY+mP`a-DyY+oya;w(Eh& zo~7j1_b$a^JAP|PGw_6tO>Iqe<*cdk_%K4D%MSn-$o~Oxb zhT>woOx_l9t_Gg(wXFEC^XK|)M4R>Xg6MjFM|rCRbYj0U=1A<(Ry*ZHv5DEo;{~bh zl!Rh&`Er?w7twOG(RH=AwRFG(5n+tya!qaEEGYk3N)^XOE+WzQnGb9?%@Wr-&1d{mta|s4M6t-X(4p- zsjTG2aEVQc`z6B8L08&a>Y!F@%q#pKpCp9sW$^{78_Jb%2%LG5*`!rxMyEkcqQu>` z!uHm`tVD7*82oMGFOrefI(HE>!yg1v#?s%qMU-Dtho8w_g2DunkJZ%bUVjr`#ue5* zTiI@euQv2*?j$KS$Ex^%Z6n%L`wihiIRobh4vIHTWliQ8KdCYunV?dvK#kVdihV8_ z_Uq&04O4a5?dXo9&dp$KCZmS1`G~(Rz33pZI^CDYoH-lzvu;J!@?KJ0$8NvF9Lip) z(g}wwM3~Rwpy|%5Mo3D^ELVrg!R6tj^p|Eb5}$$Ec$^m!1QerFG@1TefE$(M1ZXq7 z7t@fp~`Pk#XzW2{> zmcEo$|NFy$!ufYE1j`Sihl1epFQ2w6fCJV4``Z6E$^ZW{Dd4h6Te0X(oM^c1B`>}t zc$9d@?l$~K-ZrF)*xWVAJ=`c${v&dKMKpki;IyPgT5SbV9Pfvf>`;Qy_H-lEQD+D! zm0>+RVVYDLDfsfjkw7*CSPeDB8tgqT;iwyD_`r_ z|BN%O8X0}kSxt$gL?whet&onhXVi=MNey6Z034ZnyH)+M?XNehoA(2R0w|cfv}kQm z4$i1B(ElQo$flz>Utoy=gp#gKI0h+j1DQ4d6Q_~Yc4&zT9r3p5J*G;f`zkiS(iiHH zAU5i0*JvPZsxg3$3%jFDC)xLH)z_Gw;XXWP>$N?`RP} z`Yf0#v?xBw{X#y1fdo6TQK+8mrbK74rvkn#1Gb@ZC}LVn`?5tW{V;MPPwrh>rYKxO z21l#ISue|t&x5|tE6*4|M3{k)L6nL{a6t|T;~wMCia{z_&7VpD znK-z%w#>^WW?<|BlOb*E6@| z6IVgz2>B>ZNF&q)D44_6b6lrrUX`R;Dr~HL=AQ3^RGUppGTZdiK6ToJf2ubmm# z0OOyU-wMfy4YaTQ2o)yRY+*d+e^p#-OkneenG80HJU@SnD_?#tqjn*31aaeRs5v?> zE+W~n0^#;_BsX7k3lv26g|_0y`0XOtMV#~dGwMCSsfmUlgSE2k`g`t=AU5Eo$pX7R z1H$Xs9U;@l^>lC4jFiPVYA|9mj#L_49Q$*juWY%TgR&JtHs9~1)FiVvKK4Gu|3%mn zOd)X5V^-k!NjXP9oJ^B7nLXA|4tU{3bh9FgTuvMsFZ?n!aEVNQ!FVHInv1r!QIakV zqd9bj=(38lT1)OD5`dzH?SF5Tu`&BA*sq$Q*iohcFYXPB&KqAHR?~*~bU;RT5b0I@ zZ>c~1q>f0Dw$rlhqdCU!!39Y?HN;YgJ67!Qv2XE{UX8bZ)0S{h)_TffV<#fIkbUyf zXYuOw9ln15$y)zrwSweitHLlb)pruA3{ZCXndXN~S{dCM+`p*nRX>_{xd|{6Kd(M@ z{KWliWE%yv$h(VF|IOlxczz8*e2BLH<|Qj1-d7y&1EcU#$qM z@pciqQOHv08he{THzqo;@|zGqZg~?i$Nl$QuS@HXjqf@1!b! zSNm8h;m=8x=&vzF*U`uDdsy;<`n!J5_Qp@%7)-?h9N@+A=U)@<$jF~6^)JY%ILPwR z6xT7o1Oj7Y=L2jf-6va39eMs@4U-#SH0H#rIAMvT_D~D627P(4e3`ZFh5ClcU3UK4 zFj{%AG?{a%0Zx8$?YNrLNT1g6mv;3pL{YydST{S*utWlW;veQm?t8I~;7(V;Ft|Kc zaljW*Eaybza2D&Vey>4BeeZgY3y5Q&<|uW4#S*RCvJm<6RZ?q%wtCux?pS&e&Qc9K zQBl9T<4KiALhhD0UH=7iYpf9RqhubWo5r(ricFL+*WfA8oZ@X?n!d2&BOFWXJcd{z&^@?O&qZBs(l%0D$P80aP2-p*0OC0Z2ZANSccClZWy0xrWq$Uh2Fc zt&DIb>lsLRi%RUkrfV=6TH1*4H%|H!0Ri&*guN*^@AHpS{$44Y8-MlrP#_xS@!zf` z%X$q?ND$t|v23?FTIVX>9E#3IGKnMuHP=FymGhdz3I5NZ1i?*To`d4>e>`Vx4Pr&x zCs>Bhc60MG-nM`0`)U9g(svf7W z%d5};>~#Sjwg4O+EHBRu>KxQ+@9s6pg0UYasSK+zEAFe$IkMGwn~?axTqzV_pLv)kSE z@Clv5EHCjAZm(s|6_s<;Ma4!z2$pIU`9Hq4sd;WxM8iot(_ZE)_sIQx3Q9JB%sB5=#eVu^D2ZF$NI39qmZ9!QDN`k1B77U4< z>tE^Kv|()f`p)9 zX4!X6u`hZa%q;^>I;b$7;F-GKK?b6C83?xvu`i&_v$izuEH(JzRV<@=D~<)`lEMZyS* z@GV?e9B~SXGW-eQb{I2zDqwTJ9nxX_t!zVt4+J)_5;xl1OMuG^ZiY_)`=6Bvf@$Hs z$JDr6>Z4p$5La&`iw=m`ROuq8hF>)w&@nKD18B-3qk+Fy0w0ib!GU%q8b!hY%Xn5? z|GJ5?5T5E+M8y_d1BXT9%}KU*FDloa6QBI%7VZ|>!c7kOdD zD<0=m|8YLRpUHA|?M|-C`|S!W>14h6pXKOE@4#A{1qC-e_fMVBNhuPuPyUjGQ%rti z#~65U2N38n1h9U0u(c-wb_D*w2iBP#02gecrb}Rf;3P2e6Mi6oRT5|DM`U2bu>THu z(6CI*Cc&bT+VmOQ@zsU4z-&#ITQxHMHex(L6vxOY3pAGf7k&au|DX6N$cb`u`X0S) z&j-)0qLrnFNcmTYqyQa`_C+5I<#0pUEX?mG{KH5Es*%|clh z1zsK00j-(z-+K-?#n>^lcjpt-pPem8<-5g|&OWZ9Ri=}r9BfABvAHX()At<&WxdB+7|+XF(W%HJU(5bmPAmzM!I__w$k zYmnk~E%t{T;wf__pwoBWyKf=GfBfQ$Yvc!DaUz(o5&i&}4Xyss*<335A8Ay(Q|JKf zra3}K5`&e(Ea{v>xb6tt*;@Y#8ZomStpDW!fDh_`1sGwlBTm8sHg3ZSbo%^HpaDql zdi*|}b=j_GzfO`@%ah97g&Wwy2%*&N|L*PjqB_6<7RGXS#psH~`|=nN^8fgNF$jLX z=g%^&wwsr|_2RT*5x8I$&`rd)6YpKV`Az*1(|LI@rx!rL_t{~PLwf;)!T+(qL&=x8 zIfvw$zQtEx`-Z}CX<&eroh!%eWA)}fDD5AS2oRt=gtL3j;c6x8x2uOo8Svll2DqqI zU)}D))wfKal-7dZ zHqF)B)+qJIC&5qyt3piP_vAs+aZ!n22S_&kK9c-xZ|wgOmzqW(OTkG01&5`P>Km+M z;#u>x>Ec@a-}w{260iCEQA_C{0roXrSC2e5V8@jR{r~$^qSNCQ3GL%6!j^-uWd!<;rq?v8GZpNYyK_2GshA>@k>XRlPD064_&*h`<-A3i-}tO6e@s8{KjK8voPSUebF9af z44D~tJ(0ff*58jBLF2qSkpgsC0YSNPriI&?gE=a#ItI+~w9m3Qlc|~SQV0n7y#!yX za$Se8w+51W54GFc7$-cxOMAy2kUvpm}xxu4sejGgytups|bXf&m~M*s$ge z8842&zTi7l2HD_t3uhWZXfWaR?Ql9zP6M45ghX(BX~RXo>xlZ-YMcM}Y6Ge)=sCx;otU%AsC7`j z4QT7SWOrF*-v?nlm_V#bwc7stLi*RA&6^lVBrRQNRJWTC2$WIRH-9D=h+}#UFy;vk zQ|NiF^)q$}-`9W4SZFi}erZIykx!e#Y+5wiecIC*J{Su z$fQ7$+I$seKnbS!JBD-3;vB@UW}^`qUw)9JYN0|zfp}>vtXvPdUTp>;bwoEhV%w;X z0pj1r&zT5Et7s-N=-tI%vEG@?dfjL@Bi84Iu1^&44<*nQkPxMRJKA5>)9oE2-SIj< z#1U5j`EOFn6B`8F6lrGT1ARCO?xY+5k?RY|SY8}NeEzTo2g6*e<|L{V#%K?|)4CHt zkb&5!>0^V(CR7XKJ<->Oot1h|N-~8c)yw3$@6gd1A-)P3DBsA1vLD>591i3Ue^fp2 zNPIiG;+ZUzinM*JSv<$@`xK#fz$S8F8mU<9g)>kWhB<Fwc997xQ@e2|=f#(DVCF4y8hr<7#4wL2 zIuWBPLKw3h)3(~;O@#bJ1)OZdi<8Y01vdbWX~rqpJHjt30i1aAI2ZEAiMDQudD7U+ zN+J4z3CqeHID22LoqEzUI9Iuv<&ldy@P&oP@=+bu@nI6fs#^cf4IuKO0BztU08Q_S zG9{g@;m6mr)=L2m)tgzKg|p%t#%T$I6AjL0s-?Rg>eHwP+-+*yuvFnJ=KI z`Q}KIy$_z4U@UDn)M7ITUT9J-G+rjDQfLaQPN_kV->qD;aA)bXA6(SQ;r954YdGR^ zAe9FiA9uaj#^*C8({oueSD7$aQmB%;KDpFLfNzMTEXzZ4!#;y1OnrqP@bXK}&>oU#gpu4j91pgPB+ zUKl8fc*yy1aFQG@jF1Ddd}gyBAog9< z*yeRMJoOgnQ|;^E*!MYB7+t&kb6vH_ees+u8#RG+^~a3?p?#CoV23~SDqmM>{+;1? z88K`qpu+ENR4bua882P}N!-81XCg-V7b}d1<5N4*6A~@vm}(_X4kOk$C`1-#Djl7Y zCCpq32bSigWm2{NpyT1nPb{3hm74egg;YX%vFT!K=5xpGA957EgA&o1S-^d944vcd z2t|$%)*3+ae(H4x4~zF-SZZ}8oWc9nQGgMy%1ZUpLx%mu9~S7DCq}{?Hn^gxc#VZ| zIAn=5;ZtR@CPtd>umUqBgwnuwf&~3FuTt`YH%4ClxB7)v(4E0|t^DbfmX@QB7>VVn8#iPY}58LE3FDq!i;fZS!*Ft2DV* z_esDr=IoY)TX$@#UG}t{N4f3eFNSINRr(QC7`<<9PDNkDmU8$+ebFx<~Ci*6wy z-;>O>D=|w{Z$3mKP{|L`>?$WL`<}vNgjy{JeS6`qbfzlmLB}-{gZ;ON=IU*uUKfnkP(B}OEGwyP!-Lz z)v1F3go%A9<)xKe0&kwd_?n5*8@QkU;s5{wz5MNHphQ4|4A_L_l8yv_Lg0T3&X!TD zereVd7jCWRK4;B!p!m!+T_0TXO;*+-QM`zmQT zOuBB)h{KnGdgmvn^V$$qe4?H~t|!CIslst&MqO<-?a=t}mMYr?@JqYyx??~-faImD zdwh${shiDF3t$pyUMq;f=ZJ0IS(XF6G-;G3RL7)lo9Jd$@?FqkPna3`LtEm44i3)i z{9@yCDdhVX>+P6|6gb!_$LsLS=W~GU%JP$X+G+~j8mrqhM0{`3NHk(oFx~$2OGt#$ zTXGlP@>L+2h9GTEHj3|`*b4S?DNbY@th?Ixo?A35ul1o62*ZvH<^OHPe{TS?ELZR+ zNQWLaQ>5#=2CrV|c+)W~wIZIJb4HUHwOv#U%OaXdw%$vN4{KuLN81>^ww4;DBybxK zpL&PYi*r~3IfX7jr+>!Tgj#h-JEl|&@^rqUqJ*Zd*m9Yk`>LnK!YGzcX6Hl^ItrAq zx&5P9xuLjpCR#K*ow{pwg@nU2G!xFK9ht@hHrsHrNQflb2vH5a(EHk_Bu4%(ZEhGw zfF9aa%hatJXah{Bkyyf3MeF^9f@q{P%E$Q>DVly0K-OT%9w@H^%IKzUTD72`X_o<8 z{9tdsH&loHqaHerOgf~Pq_Pm*1e>e)dUY)8n$jsB5oN>J-fS3`yOhJavx}Uz2MdZX zhTI71xus@y1Y;WemZR`@Z0B86*m}K&5*5-eq(++r&+KYbgaz9RXex<-MT`6sA^$B) z2>Rp~A$~aN8B>&^gNFw+?d=(eRs1H(L@eHPFF_)Q>P|3cvqAweEdRoO9n)+1d^WXH zB>h7Ql{_Fg1U*53qjq%B)u?k4!|kj~12&0|r%q|6+T^%rL-;3%$K;Kp@o-Mlu+^GE z24DZ1*j*<3^g8`eWSPq1rR1O;pwO{_h7UfuQda^fbbJvAVMW0xH;GMmJz2>;IQiso zcjzvQDrTSOclTb~pMcMRdy9XwKRWpDQcLa+RXYLK7|$fEFLv9mdcmlSc5?}YJfU3z zw`yl^7XZm{@sMz!jNvZR&Bs#IFSI`CC_gHOG1yDF=2`*nk*2(Bjrib&wZHbB4I&P-oIR*zRO zoWUcMq^qxZ>M!2$I7^C*iWt>@F#5P6#7{KD%N;~ZU(mJKFP9N$<9QVZ-H%#wp2Me= zpLYCnp%FnEoz{BX#G1Q)B(oMkDYvoIhvgQL-ue{_l|o@a|J|mkK%IhxEPR!?!VJ=j zYBtQ1UCa<9XKJmYUFqR(#bXtsV18}y``ylcf(wTI)wObuTV3st9OUs^D>Ru;RE`+or z;;Z+Xm=KeH`c5~><9roRl*X^8o!-s>M@1eak$WfokR%5;V>aeaDitS6W2sg^`g3Xa zHHY1@I=|1u7PSve0^zb4xvNOirw(>ww2990H1^YH(35P)c`n6xN42j!Ns|n#YVmvZ zJP7kOJvm)Wt2o2@viF%})i zK+VEv0rdxY;AC>fAzUk1SwEn(trS8d? z+{Nz7S?AFxCMu-g?yy%Txm1y%8`Ys@ZUlrSN{P)w7lh6b6V2=HK<2x`>)-_?ekeaM z7!7{kzhKc^;a#L&Cq3Qtq1zlme=cK{%4$|t^)vaWTO zQ(!CPy*b*v6Yk*;|?t{YB$N+a9`)G>v|Vk z7jIOzb!nD{vbdj#WH|=f9YJ{{yd&~vX0=z(V`?{j&g7O`jB6h!8%n1vTpppMcs(-~ zJxYtI@}t(mu*Rb0-d6V5A&J6nA5>Gj+<*4~w0s4Kdn^>*?PYun+FA!3Hn}J5)b}gb zWL5->R^tkHXb=!!LW0PWZZP`Uc*I;(zP@K0kaB5E$v}`1I` zzMdYr)cQ!2>_T5G^@$r!WJOg&jpcq zXG&v|#FPSXhlFN@OyraTV^o_im5KsTfpki=uuK+ISVdEL^z`son;1!N#5p1d!?W`7 z3f2)TVomFha*zu7 zk`?@R7bww_gjY<_H1zkt+0{zLas=fkEA6 zZt6)@t0^;Rs+<|mqQX1Mm=Lvnj$AI4KAK^u8w1^D_wb+^4AEn4E|Rxik)Ua7*eV;K z-^^gXjf}dYIL4RG;me_!16HeZ?EHbUsWSRoCh_^`d{hVfH=P1OzO}Ag#FU#?P0^RF z>v@@+%Npgj#v^NayzD<%?eeayB;_^Z&h|ZB1$W>>9g`8qYHj)%nudMIR+&6kQyIgk zy^OjyZXjmdn8C(YwzAnh zZuKJ76Vqr|s-#;@A#}pg9Xnk3I3(pX>1xCwt1@YKv{P}Ufp@*?dNN_GyB#1Qd%i_o zWYCK1HD#I4(!AVc!~uC8Hut63mD)3jPBonH&1uX8>W$&!R&5vJn{knmZO^1}q&2lR zE5s zqMj9Gz@{^C)nS2HO1%E$LC#_-&!BsKKeh~IhY>D!v<3Y7;TGORJYo_GJKncDgO?4r zE+_W&{hn$97NcJNEW>fFqk|s}Vahs3=A+Y79&|K;(v|gxo0g9S(yP9?E_UzvW24l( zAD5a>!_0bK<0FJE24)YVi)Uid=lJ+#5z5Ssb1}@N;2{f3$b*bieQjx!t6pWAN#s&d zo~_C4r+i-0;VI(xKk_p$Qu_ezv!zdNyJXK;=V4YNYPc81RvV`=naq$0M~&mTx3tPT4Vm4Fd{5Amv;N+#;W4O2HO{cu?$Io? z+9=GakHenWW~nJ2f8OaJ{oLe6(?RVTS>{@41Z7S^#3!R%3s<&SC0})_k3SrfM$~L9 zvv5iqQLTLPnzcv|DgMzQ9XC?56ysy$T2HjKTi|-z=O8nbL$y;Q^mQ0FsQdAhA|Gwb zIZUr%*~FWkEw?>Fo?AP>lOX?bDDPp4YMp$r{3lV4qCW+5?s-Eze)BFzgsJ!Wl)zjq zBVh)&sd|Z1cEw^%^?2OBJw$Y*Xo?W<20=&(!;&q^Sm8DKj?fNWZJtL&Hbi@7wigb< z$rBr1y^`ff0=2T@u)OX57npq+)<;JZ)sOUW@X=(P0bk^l`}06K%aod1TS_}Df*<$m z0Xe7%BXxIN_R*C=bgoeMWQMbRTNdQ%<`3EC^L^3SMJwAeR08%}oB=k9aXq2M*rgoM zR}LDY`RcRf?sYlQ46V>qahNlHK?o}qL}46s@Y#B|tqugG0vhzN$|ny>3Gwx|QxHJ4 z$rs-I=|c0qToTb}@M}~Da?!_y2{`3QtMxb#3?NaKt$ClhD~Um^uOoFK znk>JTh%D@p1v)hE{58?2JFgHMtBd_sHv9pU?qfXKLP1qV_{n!8!fZ~^_z%9 zg{&UK(dY||z4N&VG>AG4N?k>&(V$)0S=+3S7Lzq|G8{D@+t$+roeq}dV=LaNkB{!Q z8+_mh>6idT6s3N%qV<;i4A6-AJsUv#=52KhBB-9iQIF7nLMwWx7k7GFGYQl#M3Mkqm)T zF2@bA!UgT1>6OMtIM#PS8f_;;ZQF*&^#B~~^yCLWwC6A5Zjiz>3-Yz5(A>~LF?Td(Pi zt3>5+PLRsWa}kNisLP4=oj z^lY&le91-T+mx%+Pa-6jnFRwtSkZk2SS5`tSEKZq?7h68=#BPmoatUc?eV{pDPdLLRr6d*$B7s1lsg=>F zm^?@Jy}VAnz(NoGG=pFS#xg++D=XEALT4m@Y%N7&BVA%M(p+s&Dj#NjG+zPfDP9om z$gtk~Azoq^NUwT79S}mNe8jEHTO$fsY4wyC{o$%cfJa10FSUwbmy)z&#A$0|0z2vM zrk%A8`S8wD<3<*{w+*L$9xD%i71>h>!v~B*oM5qAEH^ejXxD(m2d3-(b@8VivgefC z?A^R|Ib2`QNNkyA_KNR9;FGTi5{U+~-k;<0N3qi(g{m*W!Zh159I4T4g8Tx*vM#=` z>s}(X4~v)c>rlT`Sl@I zc%NLb`PxFj#hIMnt96~rMH(PdaL&-h_QA%HW2%%;Qw6qYXipzm$q-)gjR(O1?5G`YI%BRIPsq!?-`;OH~eokX48G zt$KCXA&uWBZ@)tK>y>4l!UZy03z9hC7b+qkE%t58MCZU0q6h@?rKzod8@SV7A>KL{ zq;qjGGs-g4q}C%lZ{bK{UdjR9W|eAMUKF*bCeGcM_%8n)2;qL61OMB14INj_ZvJ6= zcLdc5&9BphJ_)lX!vTb!cD~0NQJSATeX{n-2u{3uR<`57Dp{ziJBH3@azq`BERgmx z^k_NU<03M`_#Mc-_<#eF7o%>MA))}T3DapH+lLd=S$3hQOKDVPTN%0BLA^+ijdZ+> z@O~X^OWSMdiQ(W0r(h?u*T<`c89F@0g7`N}-HwO`?F#eq2%G*fIqtc4vR!Yt?33-k zm`ke!7HA2!n%9pCrCwETi?cl-Bg)_JY4*C8jL*^d=uz`M4e9!RU1-h#eQ7WZcp>Amtre zBQPBKyn#g%3F>JZ(y-D`0)5=Zs`scXDK=Q&hQ(?zO|Tj=6P8(++%c5>%MI)Y8PQzg zLafvsaQu8#33prb1A!4lpZ1Uy_`a)hxsIh< z)5A8y2vE;setNhQ+4u*Qf?)v1tM0fwa-z@k{XMMdX@^hS-BNOUn!pz+JD9;nV|ZD4 zp#|kNn@-HHFn*3#MdTx*eW)6&;&5ax$fr&k(~-I6><;f-x*+G?O=yfuNa1bHagu$0 zwhlc7vAGiVwsLRvZ}0b4_^>=EMUUq==^Zb1$3ObgPOOvmr!GB6yEknt5q6PFD!#JQ zHz6w_jEIfDvk0Hh^a)jvjZSXmeQgZ-ldJ&#)Axq>^_Ai~4`wJ3)r8l=gb}4|&?)ba zbmo}T6#AEEjOjLra&ZnoorqVBb)8_|WM*z{*{GgoueWa9YeuSGrcitgpr7*j_IauS z;vs1$nUPVa+2sptN4V;*_I(t&%V4qPP*7!6CVe&j+jynNA2UU?8p-BrIf}3W(;^-& znCkjn$#{uY=0z0;EcTr6rSjCnw5nhBIJ(a)GuN+w^6v1?+We5WTWM1|(H|)Hdj*34 zLG#d6r$`tgb!v1dHqW?M2--u9adMl^EPPseUeP#^TU95wwa2!K4F zL`38lUZ|^{bmRS~$dAA*XC=9LygB>fEmq!2C;kuB4;tg+?` zBQ8(#RklJz`1VrRlR|7fu=r1)bKemdsFytmdUgrhD$4q1LyTxZ6U6WBe8%M|)Y1r~%3DKbt{h2%Y6ho8CmQ|N9}) zYciU{M+E$c_TWB6Y_IV$cN({zW!RG+>4N4FvYCOU$znRiQ89dbJ1MZGvQO z2)hRtm_Qs^n;Qgl=P3g4KWKAf#7+GCa}A6rLmc$+<_dA__zB9=-3}L%pcg3%dQoDd6a)_E)ql8 zMJyc;t$o>BtaprNZy^Q_kBk|AEvc}AHl{@CwX`=V`UQ*#5S||v)bQ(dmSYkXH|fpM zpDtV8FCd7FAqVDpv0wVn>v+!+m-1 zSh+X$_-kcRzJh8AH_iP`?0Pgc1s)Jkz6Yq?;dVRj)?w0xNv3glAfvjK_b-*?t=36TcZvsf6nI0g))3ixu|eEbr*OSy z@y@CA&*mReu`hO7to=0IL65=JO@HPi(^ZlLxkWonM&cx6{mr)Xk9+gCw_i?eHsq{lI3hRqIX!_h|rFCUzSI&~=FdsiZKki#UtTm>p!G8Bywj;(!~79;%rrYP1+lgi-$@sFe0tnI+%)&(O^~GYGWSO=%CIo zJ+!oOQC)T6QSAuIVOFN1OS)@tl+Q8iVU_q1+f5T%ERbGsy)ll*xS!G-yvM?@zY6HizNZ^>lTS^3m*^6Z0^5E&}HzJf>2jtR5Q3zfe= zd^S*^Xpgf59Nu@v%vCtte`RM*4f$&n|B2o{P8dG+M80Vwv!zArE?avr>Y0-}B8p5p zSW{$VgkHV|5xG|1CzB^ak+(*Q1VYxr%Q;~J6IK<)TPU_hl9RvLYwT#E>OVwSr#IQd zCaB$6e*oo{_SkrRkWfXWIZ0wr_A4`9bi^jPCb6&Tchu+WFr=&s7vlt_Z%0mkxm@YX zemdS={-_#h)=8k~{Qixu6k6(=2Z%RL-pavTO|dn%^>gaGDkr3J$*{MB9|1Alw#)AI zW$HC0iM_3DcITseX*+B3_F?mwl-|Y1k#`KmcRX$n9L582GIQ@b?~oU~hdW8lfJ(V~ zlC&AZXFKy5aImuWjNaz6Cn7}wE{IL5ePZy=4VJ36F+;f&xgjXhbmQ=wECU6 zkMB>jIPL0&lg{Spo!Q?1N4a_turQIJ(sr1^UkRqmWXh(k3lFV!{0)Yuhwnz7!${C4)l;6!n|lG`hr8Mf9Wbf0165MVZ5 zDv}X%F%X@%4Sq2J2V-UFXB(LVIU%n0TloXa;!}pZoCdf{&CfB5&2>fIJb4|=ECD7T z@yg1*IP7Zx5_A(Ne<&xdjMmvvB+RsJ!!DIr%h3-ff&Y;rqgIB{EAfwc!i`&caH75MC!FkirHuDR;@Seu>H6`jr4) z9u1BnR2~?deFADzletDNsFDH4uFe_P&ynfhRrNZyvBT+P;wTZfIO1KApiVJ?Y+z%{ zFj3~eWxoHeI$#a$8&mco_yjYQdHrp^JcEGV>CvJ}kV?=kA*(WsS{M-}`7lkX`%w4JS+ES71 zw?U<60C#RDvJUdyWWNkNHHCV`!UyD?yIqeKQwQ9Q*bSNi?_SD!qL!R7kr+Rnhj7p7 z`jIE&@mlH;_iaiW)K3p=TAP%;C9nk+^y90I6Nz`@2|JtMt9#+Wp5h<{9b(J5WMB{) zIKUVy!i)Y-M)Y?{6BujED-4YWHzFqedf&uoucoI+k;5|urz5tP8?=RrT8B?{8b z{3Q~x)c=m=rB3^w3GC>T>cHil%dcSIG)ET&xqZwOJ=Q4U>7mGN(X>QG4W0Dv4S+y| zP3hg$-i3*Q=)o~^4S%LtNNL2O_ioQ|Cg*=-wQ&wh45Ez+-Y*lHk?;I)MBH7z&wAKTCV6O_H1Cu0-E;6U{pDNNn#iV~eL^Q8Y z?3B%Zl~N5doX(x^sML-<|7wEvOQuB|r1Ev(A*J?<@ih%f==F-}zXb(K* z_M?i<;8E!G60%r$^d?qYP6kT%>WoOEuqRfFP5uoJb#jTtaqq+LqwNoQVbJC8bbcBdJLcTxYm!k}>U|M<=RX@iH%;8m zHVyEL9QSCy%KX@O5Fv5F4q@+R4gqeGky7q`qs()4f-4(M{3zbqV+o}W!GNwmN&$<8{xy2`LB zsDzxEgC}iiwgW93_aYrCu5F$X%g)k`;T7ak?7X)VO~y)ap|+)19!zd%1P0AmH#VPv zjPrpk?lsbXx5~fv&nz^Qw#z{w)mqsc%_*1a!#4pU(*lhpttsFbi7$kPfLh>wO|0nq z;9Bf75ftEPQ-{N>2X61oOF7^d6)H!78QYn*K{x)Dl=w}*U37<8Ppx24myb`mI#+U% zYr*-bP=pO6FoQ+hD%R7{1lxGOq2tZd?lwk%09)$Nv2_u|PJE3y*fT zN5#Y|yICkU!_`~QNqEBfoi0@VQfyXJm|9^-(SgNlBJa`1@cYAUQ*;Y~iHfth(Is~x zq+2mlWc#g8DD{Dnxr>bJF;$)kT%3LP#^~%6C7X%gnXc=tP%Nn=cTh0x)NTuGcuKiI zU%piU66VTtn;OMg23Mxc{K#bc@I`v%_>eYh6cG=03kb_pi;yS0=JViZw4%U>Sd1U8 zf4DmRv7d^11wO*&UyFAT^3Js$i&!jXqpDU~-M5vCf!bD1pLey%3Q&Z|v=BvfP|N@< zd_M<$U0D1&hR5lO@U}*?T9D%==d-PSuFz2i6Y7kf@s8GBZ{~2Bm0pweDXB*QGpVla zYX2$yt<9{@GM6_6_X7r=c1$V*PSv*1Y4=3t5#2PT`2ySbl|U)n#ojl)T}&;1*#a&9 zq|$>5+s+3>>pM36zjn+z5fp8zl!(F#@i!Hq&VBmDi!CQ0_|;^)5zR=xrF;?&6F6nO zNbVQi33DR$EByYw0r@2V2T-N}EN8j8zs`IEm|kB4iL{fr!l8M(yw18LWDmH`30xQf zqSqAq_#0vc&ysfZ;2CZ58~M}JiDm=ESII!+O(Q2;m%%J{OVz=iLfxnXX4jPeFLcEPoj%MP!)XEF8=E7co1FZe0 zCt}ZLpIFL#O>6dV9)8Nn=bIG%w$@gjl3gijyMDgKLBZup;`?r+uvRuLX|+=9OMXy2 zyj3<+jBsqE_?#(A7h%E^G$O>*HH~aN$z?{m`M4b{!AQ@Nxiy%eXw;Yc3O}KDV*6%d z?i?p^Slm>N&N3j6&i-tzTY9o8GJNT#28q|a=sI5Ncg7TA2_8}36bC>rioB#%ih2cN z{Jm`yJbUd7`dr_FPDJnV1l-7tSEqOrNHi?2==s~7cHSQ@cq=A`n?126ZPRBxle~s( zAirZW-fO1u94u?415`i$W%1YqMzkGX~kCP?Jf-~t~$&?)CDB4eP(Dx`L*q2RI8=3*<^X$Qp?+2`m_ALWp-wG*&r{y`VyCGm z9A;e^E!w1#>pB!|FYus+Laf!Foy?f{2Co45h)P!W)%Pv)ir-BnkhFC;#qHU?U0GH< z_Jly*&NbE>d@u7l@ey#^ZaEVD^v6HiW;4HHgJ~n&6FB(4wP(8;VVm>+Yv!thJH}Nl zRSV`07hLFUGIb{N--fwNBY4oEf?A98o;GbGj(}RRm z$H#3Zw=PP(lf^y4Hpj3>rQp%+#2onqYULi%$v8#w+)n4$uRkJW4yN%^w0hnrS#Vp+ zS!;7BJ(V_JJxIly4(DC|dbc0s|BB{oMc0`K1r6tjN-?n|=NE; zR1VTw3@qsUFLz%I8Q4!}>c zj^3*T3aXMGf72+U(~tRWA-O74Ih4Sp3b|`)1oI^&=N&G#CZB3Zs`C_-GU^{hyK-U$ zji%XSGQ9o6Ws|qy@>E!7K3+9n)fAVKhZmobH@QHVdAA{r*XNC1Iy?S9jJ_YgT+p{gz0_@&zh7Q0_A1FBFTWJ`zH^QdO6aqc9roTR2 zD=!^Jc(dt@^XczZZ) z&Opnh*f7DA`t?^1Ib+@*4b(#?1&{F0xtmb~H_}d=YB+UD!u^uGbU5k%#982DmL6&q z4xlP?>kW~AeCG4V-%c4|-z7^{^@sE{d)8RjXQr; z&tzFjX5jotw?&HopwR8VQ}UcScw9MKd9C=-Vzv}3l4wm*@D8m2JLh$sb2r!1qYI=k znxv$ytcUQRV1BGI@a`S}%8I|wAlzBsYb^<_9U>W%Kj?@u{yss8U_(prqDVp7{Oqmv z;drS@>pBAP^C2c)?g`6*yr?0m~$VHG`V~!!pd6H zElmth2M0g3J)=Koq826Qh&DZTetTbH1a2VvV{{>ScpHv$@M}J)>)ljVwhxJAao@&`_wD>`d; zFp%C$8zj^1a=auhcPllKfVIS7kGY8?K)Jf~0$EY-J@k8TOWZ*ONh1b+@4yA7{8aXS zXr66{S`H+QYrAPFb`$N3M9*i{YpP^5oG-eONsK zt!`SK!1fmxYm8q|`LBTgADpC#wxOdlKoy3BOQcS=D zpwre$bnclJq{_({na->%sUeWtNol-iRCBAQx7rsn=Yh9=b7QBOiL)T)db*dqaz&@? z#MtuKZ9KLy3tvK%yz6K-!8m_CmXfe#W@H9#M^!aECPTkV=Ye@8oK#(9`i&9(ToiAV zsA0v{lnSL^MZl#>b~)@<1`y+~G#MWMrN`8xvq*u&>Ti5-^h7}xgC8EK$I)7RX(*Pp z<>_cHz3(lJF4tFrD>ZRpy58Fy=`c3jX(G~c*q?nDMzx1(iN>_J#N!deD7?|vptqW> z$)IO}_qp~uY!zRDvlM2)$O(u5$S%yszsWAI1z5XOm#-g0D0eZb?j#ZoHXc7ddlKmH z_jZMkQ;C8azzYup$5uy+MO5}+27sb#(oVkOTM#1R9nk5txknw(HD(|`B@cthZx??> z1ESJ{c7OCQRl25Dpxl1A^#LzpV$=al@!{R?j6yldZfcD=5k9PbUJlw1Zf>YkCHoOF zkep9*ZCnw+?WQ2rea@me<=*0a9*#v15QNV%3Br*eZ~3Sh=Sx5wwi_3(S6*eV!TibV zM9&8kbNfg8lbw&gTOUIxPY5|Jo+#XU;B>+c<=|FxV_gxxDm`wit5%n?H)`!84s9bB zczbjecCOf|$Sgj7EHR#d*r7YUpn%n=V2}Uny9UcnFUa2EMOf8xm8d|6nTEDGbSCiL zDEubajp1Z}T&Xs}Q6dzgs9b=&9Pg2EhZh>&e$?$M@IEd=-&1ky+i0rUMEZ(<_5|OcaZ1`pfh&2%S;MtZ9}MFKJK# zF0M0VgNb67Lr`B3Z!>iBb2h@c-xG@v$BjhDm;pT-$@j)Mq-T>zAjp+DZ zL(YzWq`O=1_7lSQ`wq%xIGh$wH0XYH;&RzMr>Zd{f@fGJ_ndybMUW7T7#L36YFU%4 zlXXfTe*-dL(k=~1^B-9h;Tt~p`6SXm5a4P(Q2t-E$z4RmJ!@tMUSzP~uCknfyfmyn zeH19$s)V=VkD>cOgVByR@Oc|QDfyHRiLpN%R<4bW#@>Q;02GEZBoV^ zTG}DH)(@^s&^)+Xv8Wr6(ZWwLxqChPdot|+7c+|!Fv@LGszm>TPhnvQuF@+oMcjA} z3=BaSg_8nh&4P@VGTgs{Q^ET7A3zeC-{09chnJBlO-HVL*GlCRIv#B$DHE038*Gw} z%qnCc>)TPRBabK&Om16{!?GXOi%RI^iHwbZgo_4e!YG0*D)EAq7g2;^(e9NNr_!qmE0u z67;%43e<&VvUkQA#xog`g5iHxl3r4sNF?`le?-)qQ~drK$)lF|=cBYVFCGzZW3|zj zueEg?Cb$S6Awt2#!mfi?sa8X4nf*Q{( zQZTYK1V~M*)#7_Bgz=Rgu^u8G=fyL+O9lydo;XvPRNRjwve zk^w9yk<*nLWr@(JLfX;x-atq~xWfOGJS)7{3HUT0lOS%+dmm_A?XZ_4?|gY)NZxpw z5yg)UpStfqD}KPm-YAuT`MK&@dxyqU(k#bsq4B+NodT$pKEg+v$TiSRwo9ug>Iqt` zJHm!bf!5n&`%&D>PrKnUOJ_Uu>y8dUw;JYn_#E2sM)CtP4|?KTT1SvM6}p?Hdo=Uy zMN(t_fX9{>WJyx*v2h)XOV{fh$F3dCj0&qB-OtAq(eUH&YtKbh(%)}Dk3mF4Z-Fb? zh+vGWFz+(DZB9KaD0L{bPw#PGi>-3*+yA8VBI!;W(MyYozp~GN(`ydg_mS|<2+N~V zIsKhVk<{iY;SF(iYO|sforarn-xxW~Wt{(|Iu+wU2MSssISo!QU7l8j?`mfx00l0* z1vby2lvazCG39No5U*>H=aG)aFdW;NCAAFL7SG`vRd*n9027dbsRS-}SSH=cdF@>p z@~5?c=&)Z&2qaBTX)2}V_nw=Zv(anAYW9xq&?o`O{vr6OTp`Oq!szzTxniXO!&;e?v+WnbCLmnBE-3 z1iV|LCT4ldJo{oG7u!=mT*{vUq$|88j+l~US><7Gaj9r396#Pxv5PlwU2Tkw^P21l zS?&p$hcvC>+8*D1GZB9}<%}8___Y7W7n1bVrvg(1)HdQd3D%6Fv#)vQ4r(x84jBSM z($^=pF5wTyjiuhv81~?A?EexwjbUQcR|$TEBzhN>uS&+o-u??low-Kfb0~Z`l_kaj1-B zi-9exP{o|Pdbh$8`r#@Ni?sY1UcD~m{5tFXD>ELUR`fm|C-ns$+QbXpA79t^C_Il) zX?$a4?gBW`40eigsm=|;Pn#B}`BEi&^+Mk{ZeSj<38)XxU^;7_R4>-LCjNF{(--=U zXprTovWLwC{*V}hV-)Sp@lcpS@OFr5kAHWoESX+rF}%!vyig%eD0tj{-^$w>9!PBe8#eo?q6kMd8Qnd2&znT?pV%pB z)=WQvq(}e_x1eZ?w+9Z5jHBGlF!2CjC{@}e;kDi!6y;x=@z~hO+b&t`RJTRwFT7jf zbV1RkuV4{q)3}ugW%@wW4QCH-*Lk(iQT!zt=C=}>4$ubz4+OYrQW_(-k1`=UwVXRv z5s*5^D9xskb%;ME8#MqnwMiuP4_r4!O9ev~`-KT-PHLfd+6*@Od1Qg+MK()pyDc3$Hso6e_Z~LoB@}Dw96&I9-S#NVwQh@d0(}tXP;j-uYuZMD0 zG@!!dyDq&8J$8!Fx;s~&cP7dr8N;aE8WmsYSB&FbX`AC$_hsXq$v*Q~u1~7PWD@1? z3jW5ji&bLAg*K;qRE4wCdfX7B-&WIcZ*)io3Ak+-sEp$8oJ!O#W$WyhsI}T%o6mks z(9p1(*nYaLC1^Efu0#}QJ2RG}e`d{p3tX545WelkBg1?Lsn-BZgZTYjNZzeLM;Y3( zzT#|0*++&yT-5yEfw{ZE(1MCXEaTFzg4bvHRfj>I3%kDBtgzsu6k2(tbL0+D7hm0g z_f}k7T#*Jv389R@IvBzA0zl6?X%I;YDU-C~uP$}R&5kyD3_&t3pkX8~La1qz@;6g} zONaD$D0GU6J)TSRI|UU)$6=|KGu3)Oq#m9=26Kv8b{Klbn0OvEg7Z%jb8$a4dslp( z_}m~eo%#rRd0M1Vjh4T4%mdd>C1+999^QO}`v%0)Y{Zr4**(huL&@#>C4!ZKz=KZ2{ zo;Zm70#0@Z$OTUw%n2&(pCx|1yDNDs<6z92g8cRYU{=+UMG$&Ydj^(t`FNBq)zE!* z1Jtov%rU|e6%RSfU6*C`|0=J|Qb1{(NIF4*J?c%0pV%Nc0=oeEVwSj1<8bc>tMrpk zVJ^=jS1Ich?dQU{z~z1LNB1^?#>9%Sx*wvirj%3J`Npod+SQJus^HUEELiSIlA_XI z&+h2~TiRn4_z@AS;00aURl>e=I?Pi9hTz7(Gg_s^%K-WwDi-Q8mRmffNBJK${|osg!&@J!qJE7#2Pzw12OxoG;?8Ytux{yMQzi)+3tB+Csm| z8|oZkm%(Elb$6=Nq+;jRXMZG*$5!n4xh`dJpgMA`(csGv?nWAlw%JAlkz*#+dC8Bu zR-ui>MqSHvf#Z!&iYL!J?nk*u6QgOoc&#OtBevy=DP#{u6Ac-gCo<&pBXGTsv*NIu z&?=z_KSl6*k|76~X$l3(_U3-?>>Unz=+d}oeBSFdiMh^-7sv%qH<7$b`a4(J=z;10 zg!Czm6MtMX5>lNvzhFf~*6)!1RpQ}{FPm?)E_vHMi8eCZR@C#8_pSc5;a96VS1rDm zoXHLiNds4QK_jqiiDt%XRyI>Uq(^QB_j3;8$#ShL zJ3CSN%;#^)KamFhR+r)Tm1 zfvWNFfHt4HQy63uWta)A+({Uc`#ZVn;DJI{SQY#l+Z!5e#fR zwl8egyA%epqWF)*o9X|lKl`}CWcQ2 zxo`BFiGc#%kC=F8r(NSJKap}$gJE~s@;8cnkuJbD@QzVS0is*?h?EW zyglD4C?ovjn&dAiyaBWMjc8$4wjCugUh(XH7%dcGcC~K9-dL9Q5A6+fZs)Vt`U?j$ z1*h1-xynjDi=v?48iEuWb@W)QH*7(FAnp!yw=_^aw%n^XIb6T@9C^B}nv>n$Ep7;Y z9N=pZZ1mi*kxi$LEcKb7ujR1fvR)bxr5{mEt8zH+RXJ@A2}3;KCmF#q%qVIMt3=oc zwD2RGtCivdRdve(U8RTBd>W58GQq%OLce}_2-hhzyMkKEaKsB$JdvOf5no#_D`R4QVVk*@2I`;p_t&)A|I{{nxM7^DU}w;eWd_A>fH(2nJWYz{ zGxI3aiuu0H&8Yhuyov({B-UHFKSe9o|AAmq$vJU?-WxF=yCx(&K68#A-IUZ$&MYOK z*}7Ff`%#4%=v4RX2QuQv#gif!I<1eD5x9pGeQH0yZP5j#9SEvi&RBM^TnTx7IMSV; z5GIV-!@=zmEjCKbh?W95n=M5M1i3lHoe-pPeF>-`9qS*^;jN9Qxqdtu6XpO~Anna( zWOp7B);YDOhY~V9r=yAae})cvZ3;V(d;Sh6eOJR{cVTe#eyI3iQkl(H`%t*u{owxL zC&T8zXPOoinrdvMns-&+kJCRQkt2!M@fg~YPY*p3ZV{lad;qxX$~NJ1#D7=l{}fFG zC}&@v5r_z%(-mW9G6o9qRx8|_nb>&6M_&zzQ6LYC`l~fu9<^&@C1K}t(om||5DRmC zsF2Fk(>26PE#up>}^ zkbC>=x5)QhJw0c!$z+4;rTRpF$fW|Bww%rcq*G}pOEuLMX$?aL9B!>f9v^>H{kT=zaK49 zCD`QweW1}6sudC+vxF!rF^n3IDN09L%C{Yl$9>y?a-HW#j6D5hC8|v`Z(AQ7n@Ws} zg{%4|J95U|f4XoY3GMKLfJybB1t>c&TECiB1&XSzY3wxDr(%4gN+^BB;qpdnw-&Y^ z0l^*V-YcV=?adbkBS!Y632FA%zaS?fF?h)Tq5{yP(OD>IM8`QTmSm(sUF`Qn4A^T0 z#Vdq_PncBzUT+$@E!&ulJm7xge9+B^l9$&g!tE;a%T4oUilKYjvv_ zY%&F%75+ut8?o1Qliy>00Dc&o2PPV@a}RsoRfM(3+wo2q9l z*5y4H`55fa04YAe{*+RNyoCdeNJ#3-BmBfi%Vf6L)J1Y$MC{U9OSJhqkLusi${^td z$yLC9)bZkm?r#13Bc058j~&-&+02(>&rX6VJ%VzAs;}8gICYj^N48a)?t@x?56}Jx zghYIlR^G;h^&M$=T1kf?$t)fND`LV}LHT+=yGiW?>lJZFuSns4BpzW*sQ#jrj+^&J z?0;$pa9|+aMzGouffy2oOf+lL(1VLfhfmT+1}1hK2M5>*qb72+^+DC&M1fW&B^cfQ z=ruy0jiR{PYnIsEpAxwv0kJxKy7IBM0GR_3l78fw;TMPc@7AhB9)&3~i2v4rf>S`Q{D0VzWtf=D24M_9Ll@MAHAZGYAEf#&fy!)r~6Q%t<-iWI&x)nmf&!1bhRGkb3 zoITPgLvmJHoYL|6o9Qc+jh+gtVf_H;cIvX~wdW^Goa$v}4-4J-`sv%=-aAy`Gt>ac zLqEKTGfDGHO3{2Hd(0}~^Vm^2e4qX$STLlHws-Ib(EnlNM ze^8%cgwk<$1tU>Y=`MWFbw9}9`?Alt-)|_(q>19OweMsxR?Yv2yD8V`U|b||z>UO) zmY;!~%;-I!mjD=G7D`=O(w!ZWX`{UD2cYva=Lt{fV)JgdLC~xHrO^V09|8g&I=Rh* zm@Bltrvg>ofqWeqR9ij;U3d3toIP^cM0%>oOdefNn-FQ{Et^u;f^9=NnCQ%@6m2^>hp%{yK9I(B4wigNk~=k5x=8E*OY$*_-=`6We6>AvFXkVpz1KUG|A==~u)5O;))AUL|~LdqSS$wQhfya##@22_S~GOqPHFhAYeasUR(KL2=eYAL=x? z!6(rtI5qz~l_e~~732reX>`W2)@_x*@F~QXtrcbLDtsjh+!u_BmMP8a@Hvm#T31y> zB~}b}c3~>;Q8js@^8PA5YeZry1F#>E0$qI~nN^cdOS1sFkedpS6Q3D+krJu5gPr$> zc=vawqXBP@qOO)>r}PFou+Gm?_e<%@)+ za30Tb0pS30E}DJZ>arhV;%Po+P2HA|d$x#G^y-)hM*XS? z9|N37`z?Dk_-uDW$g$6B?8jQ=;0qWfwVN!Mm~`8M&`t7UqqTV6iq*#PQFHEvn}v%3^E1@cde34SYa^JVi540*^*FGQHAJSzn+m&;5>!#`>(PKf62- zI3Z1of$v;NV2F=A<#D8!^1m}vsCCvQm1?lj^8l`T`CRQaS3>s**8len@z-xva8SMd z1J!_~1oYJh7~T>(iX5{^v=+lmPzkvL9Ze39fDCAJ_k1TLRBblb28uh>fLC66{3844 zjd?9C2E!{(d(69E`Rrmu1&K>4_R9<`XT%TOzD>BjBZ8n-c|@3ki--RKutQZF9IzQoEQr#(K*HV-d{{O8(bYuiGOD9t0 z{d~uPjDpYU z3$9Jhl>!m-*+bx>2&I1tGne{VRSCmvt@3-@<3?{whgyq*+Kjp!07ic*S#26<2m+-D z1!vY?cJ+%sPF(|$OaAva!8`?!koTz{VM>7HE$@6zcVB0=u5`bEXKS8q?N56?AYB6Z$JLCk*a7yU6}+g$;sttO~zQ(Q%09xD#3}O%JkJn zbvU#`93_ZxGoLK`>^tcRD+ap#nNe;Q%k|^YC4bYdXFTzN8zHO@VFo)`&9o>0F=9vy zJ;VscIn!PC^*wQEYm-+-6g`eSMju7Y(!Vf?7fmT!h3Cb2rdCdxZy-!M#^PU9#*w4< zgc0!(fImhtE_@r%Ln<4cB~Y+0T&F@erzlA-Gmp!QDEUd@s-aw|-tR0xps%zWgOEeH z%*~jHw}i@Gcmr(lRgByUX*m%g)m*0?6~5bX%2yBo;RR{YVy+F)wouH&AUZ9MR7`pe z;KUHqfrMBS)>MQz=B7cJnDnCG5OumuJYEIDfrXiGD=G!sfLjV#GBGi|VGAPzP3#cL zh;&7{HU(RARa^~+yYzeg+Dw0Iu<~;z#<|%_Yb8?E9#B;Pivglbr(D;ZB|8M}!cN_n zC&d9s_Mb>pCBV8qQi+?W|a>6i}E`-SRC7wxdNRFE9UrD!!4!P~!0WbAi9M(DKG$b5G z5s*BRkkQZVr0GaS$f^8z=L8E1jcW4e+zOW&P*{jW`W|c_JjJm!*6>&yFtP6sl$WkEt1ax`(zrhZeU7}m1$l7wmZOW==vwVo zWX87kVQgyX`71LP5^WQ`4x>}1j{d)fpx*_J8(xii^kgPVBH`qF2{r-tSgun-P~u67 zH~T0FJh9NKPfrAVJ))vOCZ4yS$WS~oW3vo8=d~kncmfJognARYuus|6E0@R$8NAVF zTLUFm!5rsL;w`R143j&!zW|IMgtH>btlzqMn@q{PEc62LJHX(00%gQQElySMXV)9r z%L_s&_wQ2qo(o!7BQo0j5S|B@_#)MYp3G9v@Jp4sw4kC^2vM; zA&0kDp088}KaI`N#ZpL5$p8wLFAtz#4e$i(IMe>UA`SczB)ID}ymtYS`-cqR#G3ZY zRyaTP>=uxsjTe4MzoHvC-)1gEPJR!xrGO@L?K~~c#Fu`MgdDKyy+|T0LM?27CF2%h z2q%mNrK{64QBy5FxvhA+v3X*qz4L$sWy|OCPG%xxs3dfB`*BRMv&O&k1Q9Wj4)?g| zv~A#EHTf(R%mT>bWDq^+wu%dAtgRhPQb$ zC;MN1pf8={NgckQ}PyqpjA|+>VQ15{$o zdjPu*xL5-Oa49k|ppi)u5I>9PjDnR1)_<16$6ZhYGvNHv8|&y3!&648e z>^B2yTO$aeW6TD%gs6D+hA;t06fBRyFW$1g*CN1ZuU{!}Vf@{_RK*M9Z2RdL5Thq6 zl(p{?;(c~tU|t#y%m9}E63D)T`;Gd|63?_l1_rk*xkis60))it(or(;<7OaoDT4K} z@Hk4DT#7eJnSaLe7+Sr~gQ!Q&@&K2J9K+1kDHp)wrOjVzJcHk50LYG-T%KPmtQ{K( z0FKD{KuQHRGF7K0XwNo5`7;K*%Tg{L68&~=cS96~vqBTN)ln^0D$oEEne_SZUrZc$ ze)(X&hoYHTD$0nbXwhVFHz~@!%)oq)OJcVT>=cO;l{0E zF24)$-md?YD^>YUH=M!~hMhz;x$|O-dXehD0XU=HIjy|L=lMivt%oNIG+3x-wR_&H zmC)B?+OL>zm}4;TpW4K?29MKZgHdUZsf79uFWU10F!3@AJg96) zf6Ix!XyX4?oA-CO58F%Z2ylpa_kQ-Q9<{bh&kei?&g0iBDU>ut0P!nZIp%EVdfgB7 z#q9*yzKqoMc97FzZXi9REX5z188~uZ2vC@H5popm1?kEh_r-E z!}Ka^sEI5uV9Rv|VBP;%-vQ!p?LOA#Ccs3R0Tnu}YG;a<Nf|`Q) zSoANth^f@MTFDp)pu1?lJUeY3BnZ|0$!Tpr8|eaWa)cOq6%r7kwP1T;76ulQk9XIL z(=Tj~H~MjHsbt@OmrrQayn_jkl-EJ8asvM0x=RYmjhBgdd3B&d(f$!~coN9rzy}sv zaAUc9N)+S`f}Yrd_p-Pg)K2jzK^;7yeY}t--{lIvEAL`i<()Rg8>U33nR2aIz^qnc zM873_&?$XvNG7jDexOo?g#B@QVqn5lMe}-j<$5Ap2@6noWp5c) z<<^Cb3Zj%qcO%^(-QC@iBHbmhXp!#jPU-FzSd?^kr*t=*2lw7@ednC(`u_L}=X&NC zbB;OU9^+o(QUtPudU=KOQ;_4j z&}2rA8p#BO!r6ImRi-qBHMdtDDR3fbAf8rk2$wapJ)FR#>H<@f!&x&|Aaxxai~>xp zaYM)ihCIGx7rFzwJ~#wIzdRAR(29TEjbIHSz&vU|vZXuUAFRX2^*#Px-k10&o6O0C zo*W$$y(Nb;f*iRA3wiZoXs*288NL1caYm-@{^plev-cw-ilvLxQ1)!6y;cD4bB$xK z^?wzwf0(M5os@n*?#!cM0mhs*{mY?plaBk%qFU6a=lSS!#P2F%B!%{)jaM5TIjGKR z$C{jI=TC=XN86E95NFi$veyX`Cl`6nU2U=AO!zSM>RW`|jT&HRWz=;Xg)==lz19S~ zUP8Wlx$ZytPV`|PHUOgreZxbtQn;~s0tdH&UgT#x9)A@Y|8`kM|>#rdK#+~RfMQf&}df~<5Ld@#C? z4ju<~?%%eX6lyq?S6+GNMtK24mQtsw2Ca`E=K0}1aeE|*TH<95^cSB1jz}{L9!T)K z%KobVl5jO^?(K;eyE;M`%R6P?d%E(|U%P3(;90aN&XcpjC-(iM~H_Fh&-K7!?6u6fu7HSnIuMJjfqyf}?0 z6{ooim@|4~Nl)r~Tcv$J*-zqp+ezL3lF~n4`mZcX_y5P|AT~W-mz#X`>;vA$O{?5F zqM+u{SiD#khiDL^bkg_#S?4o>cg+6L#Be|RWCHbHFD?|bAkj~vmk-1<2Q;|B(MS0O z7SN>utOoZn#&R@f?-FV7ptRC&C8~ms#DmR9o~X1D1%HPB*{BF2M7vb)-f{KFWWq;u zDY+%jCmvVafqLM4y%dG(?2!Ckq)C($NCwC?lw{ z8Iexu`TemtyZB>)CLr|!|;ct{Aq!2%}1JbIih-V5TX%i`^bf_N}njy@gFY!2jBpG_#rAJ_GkIK z|6P9iu-L79ucX(})#+3;H;Ow~M&wP;cIKoWS$#Ejzb%j!?m&SY&SKvi_fh|C;W9yy z1-7dCG%v#UVpjiLBotC$3#5Hr^f6C}5S-6e8!EsNW(x-{>WeGP9RIC0$2ud;L(qh$ zd}2Q!3birDSXRdG2jJx3p|~4}2Ppef!-(K|9~u0U zgt+FJ)rFxpAL_##g#OzCPRliUG+Z<3;5o)Dw;5JTw*EWY#R^ve%Rcg)+?AM3W~ge>)vr0d+kg5B32XwQLpB`*tl{_ldO^)>{p zSs!(0_Vx;QqQnyWPEiN=xw!Klx$?rowf$d;U(;fMGhp$7`k!u)Zgt>z29=AeZKLA@ zqKtSiRr!n$+y+8}xh`>$m)HWv6Yu!$HZCEx&m&BCIS9`^hE({=pI~of2dno(^h?5| zqxa9B2DKe%{T+s+16uA$504L>nuqDTwXqUeDd#7qSV3f;8<2akW^(T*q2Hgc_{i@) zjN)Ew&n-ysk}+&b_)%AL{vv&KiC_PS^#B-O zW^(WdGjPKX1?Zh0ig(|NW;6G)A~xOfWrT#)Nv4K{gp8POvU|vp)g6UY1WarHT5?o^ zUPame}556d9up8Ao8#K%ZjcOfFGy0Gn85f5P8KVV&$nbHBQ%V}i+nuNFrw z(0Br1-j(kvzCPQ%9Gi{x=PWmV{>`_o4UfUWG9!{e?DKW0)#WUH7_m|A`RS5&oStv3 zd=o!5EVU@_X;kOe6|j}h;xGqoC$VpBT5xlaBZ_09s5b#PgVpA*zYp|x0e3(xNjD%B zBe^kAf#K&}67=R3RC{Kp)3{}R=9d6{^YsZtvln3C?~0#Xri9PFhhKd9OmMlvVBMNu z{a8L8vR_Ve`TnfxuQ;pwC@Y~X@xH>VZYJ@mJGu=h z&>$F=Q^$yx?Z#3qd>T25vB!H}aIuenax4{*v=H&lba@sQV0V_WQDCKll$2e`vtGPsjo4<(41^J}r`t{S`JR zpY>d;gV*Z%WB?IpDHI!?zX#(n!(zg7ENbkTZ!ikkE>!gCS&z_mRv|K-+@!4r`VW9d ztilL#WW`-z>CN@2(9_g)Sj1C||1%-Gm`cf4Nk7mT1He-U@E5;O7!v}tyDcbqNw3Pi z7G^zyW9ljqwf*7%BFq|;Sz9c%t(1$UYoU;sCXY$-=a>uxE~9m+)2OUskbt5IOaT5G z8tG?3D7i&9pE?VIJ@DHmkI!Ww8Dr!TNw$AT&AjCSKH?K|TSLf}Co6+%fGnwp&lX+n zg}z?ubC>>X^<}vxeFoaf7XezOG%(JIYBY|QssKdB@-j$leSF5ksm-3hf0 zD#*5unyS-6{N4((ao*{C^EVaqw_1R5uaZqa+~yY)=y9Rv2>ALik5%fD7Yh2g4ow#g zVJ5t1-Dq>Eu92}XVuD9;NsuZhb!X8WJb(n!kg=N187v!bl**fwt~hpe*ecV~AQGe@ z|DqgQ69#z3Z)~?|>m`zeAWFdKl7xHgbX^D49x+M;o;F3H*^B$zRg@{vY4?4;5Kw+ADC9t@tpkRp1{9 zy!+Ru{&iIU<6mDIZ+3=d1C=_|9?qQJ@Zcl$vw(dZ4s2KvQh*B>S@L;e5uhnHAZ&m` zA@Y_G$vj?Flt`sS=BlwnJpXiCvhuwaxBkn|k|2Eo8lJZeZpRSGE>1o1sv$^pnkvQe zw`Wr{v8Bte0Omu{C!WTkUD!IT; z9bK0@4lu5w6M!05&RWcwky_aO!!Y8&jh&j2LhEDa2A5IsmA?} zaOO|HW^C4W3(r9pJE$!#=hLZVRfw>&_}*U&l`~D(+!db1RNQWE&#aO60eZB1D09o>oTR8RdCY77sLSx{t+@OAmpo`9&2cLt&RwO zAKgRz{X$3##y5>aJLz9eGAxhh6Pm7@)B4F!F^P<^zq)~Z$m?WIXX0U%9>_W-4sw%q?e|UPll-d)ut{ zVlt%K+dA7Zm)TsWduUYIBUnpIIIOw5oCpbpP2@^p=k9C{X*n~Q<+Bg#HGKyt?}#n7 zLwD!fSecC^%6pq#H^}oxbiYmPF!UXd6ED;!*#U#A4mn*OCO_32Ph(mnxZ<`#sm`uu zZxvs3IY}7QAB^IEVm9i$)T+1L8(_ueMs;;Ii*rB$VrPnSU9%oRBGl8kT5^QXiS;Cg?*kn%3;*9YJm+{=D zVvFMUlur|gwhm^QUTV7Yz?_EOD$a?}m3s`?v@P8h3_Y($R{zWWaA1&8ro-{i*=i$J zfE%l*m!-zcAb?meU7LJq;hl$2Q}5Z6mEN_@{CU=`+L?v}tZ#sa#nJq`#};ax4xb5z zu&&9=;m`wgRDVYefb_kY7VnsEvF{_DCB&C3;-zUwL$1THKUBcOMhYlESwEb@ z;EUP@V}8WAa>6#YlbC4Pw#XVA$|G*O*K=BIeuiN!R_*?J{ARXoU;XRxZV(npH&qt1 zVy6wGxh-L<|K-`MRAy0TQZ)U&!MPBuk1lVGWpRR@jYj+_+S&@nMiIz@A2NIAtB;Z5 z?@PqfGK!(72DQv z-*%iH4<%hmZl~j)zR0h`+CL7?6-#RcMpGGdcgy?1zvb4=`1t|(gR#uT;k<6dL}n87 zQP4@V90k)ZwQ7j&DSad zYD3so} zKR*diHI<#1t_4$X{aP)jkXNYHMco{TLa(NX7&9n5cDeg%Hkwu2eCno`JsVM=o|D|- z+vMBCTD-ow=de3&g5$cPHVGVvr3Z!SqQ~9{HY^>ofajmi3ZqbqcVl;&> zYd3%D6MeS2XL~2zwUooabiD)LL!2$qbYh)wCX5c&{{SKW=QKGbznrGejKQHZVO(wAeW8a)gl2eKd8F2i=_0 z0+EdDZ9%^8Ai$gU28pCudM&8)%{jK8a#t!W3Tek~gi!gNj>Lx5@)NxA@TWk>U)w@k zp%$A*L6e2b!S&|CX!n(jTo=0$sdH7Cq7!0zXcX*asSGassOEz8U+5@K&nz?jCM%BA zO&O$oIOcltoJ^a~KqwV%kjj`hOTum-HiXN@ci6nh$KdluuMxS~Dg*Xu>-nNqLU!7e zfLVY-T`0^Si>ZPgZfnEhyHU@pv#jMr%Rv5>AEsU$Xc}(Ux=gW^%?7arTY{YT`bk9BI_kEQ%**b z&WT>Tl1sqb?`rA(gHE!Fpqm_@yD##_kF^885YXj7d7>H)^0HbmMFbqSL{T;O}t3bdP< z;)hbmb(17R0}46e(fa~0Wcgg={8-Y93bgXlY2sbw!F$y^mGW?!?lca-8zb(9$c{{w z%4l}+`80a~1E6_tye2wWING7g(+s&<68Spx13WZ(r2Y!Y_yLd}ATty*7+=v;l|AMj zx%4-e+4`#|c70JF;60bzUO8@xDggvoW0f0mjEY1JXtGDCpD1uP|E=VFZC9N`|T3!XzLLz>|R-Cq_sm* zRfY#jCWjgBy_ns2s7EG~!CMyKR-yvh1}U%Sp0y;gnA?7vnzY;uHy*xJ{L~5kSZ_L( zEwshwfEpo0Cg#Ziscs2NQ#O-JmK-t*=z0nuQyqgNi~;u_JqAb;bbF0ycGi8&m8Nl_ z_nOl8mr8B1+kc3mOp zr^=eUEP*j`qN$eWOrhia(C7mizvgtxnT&$vIh|CSBz>TZ;3)JezM{AMxbf4*(Ol)6 zI+n#AS*oU?j;9GyMv4C<(ZZ?vG$iifq_8Gj=2PCZY3QSP=x#G89H@P?-h^a%4nMwzf1-rUi0-qZ!0unl+ckI zP}>@HAyb8?6~amMBb7_GXE+lEm?xW=UJ@fXgpp;zQE(?+^jfqmDS9O+7v3$ZEPX1-kJ~7AokOWJ-Pm!F+-_L zcT-_KWpZH1PkjCW{w0Peba}tpqm~gsDp&LcnIB`Iyem&tSvPy&!}-efv0}E8)y3hm zU`DmMd8uNK*jUybXauk7O(6A=DW+W9oc40gAJb#eP<|5>>S8z^ zvx@|S@`Up2^Fno(vrs5@=rC-~IdO`qu1TIBT%Os&l3HK9;f3=h@YA%l>^)lziwf5D zmmj}Cmz#P9GqXW$Lg$B^$oxhZqwjk+Bs-3PaJeRoF89SpA5CO>jnf?YEeNW_ti?kz zRlCyZHM!imSaZB=lVz6W_>&euvwg?Y za5}kDMB=O*G2BP1Iyl8wFztLoc@V8ILhR85cbt9aoP|yy+^k2gTc^hpx^AceTh-2s z3RBEcNNq14f3L8VQT*F2%vE1W7H;E)&#lN|yr`wBD2ZC`h}Tx5dnhZK#`)FR_HbUv zy6;!uX!GZ~(H^f1vC_LWdQGI)?=*)zmM$e|c7{Tgid%)gP)n}3qhPT_P$X0x0N zx+YxS|3V{$r^1P@dahX^!h^@YR$MTqP&6ygL9ELoo4TK^UATVdu%**zdC9fd`uxf- zkE08>kSLAX#r}~IzoD?j5a9LgxWug&`6^QiKYB37fZnzvHxr=SW{)sI8j)MXmO|{I zsk6JiTd6GOEr*{&FsiDU4z#)KIpSynu}194Hc7O@c%DA~lIT{sDCKF^|IWo&+Hyv>mW=tC{G2~N(D)aF2919F^^pU<4o z=@TcPY&WonE5*8u3sl-_Dg`1mXMicW6)sak-ErTsidx;S--E%$4p+H~$y3OM!%L0V ztFOSJJY3g8#GlHEXN*m+{`VT8Q>dPn-^pqjtoM1}ZAF?FNz4L7<(u66^MT|A$E|ma zC&w!-F2xSYMcQ?!hBB%k+FZ9NF94`);PM>)!>+Jh@yhnUjFhtJBHsffvf~0 zzOI=D7uwg$Uyj3Kkl}xV`2(*1)*sSXPfF0QO?}eOC_qkvTz_3KDpo)P+%FOA_rvG| zlfu&hSV41n7g3-#LnNlvf5|PDt@LLZ(>1UFc zPyfEI?<5F>(mIx?m}Y;aA>+F=eP12AgnZs4B1eH4Zc9@h?=7^ zG8veV3byH#i}@h(_Rq?gRR8xYtEJMCt#2Cbhs0omrK>EDStOzs7;M;q^yN$963FP1 zDWtL(5Sk7qLWJo^bcg^yBnt)ZI`|HVqIs5_eq0KQQce?=HlP*;V*VWCXP=-9DUy=L zb-BUO#z=8I?EVwA7+SZ~4eJ+*Q2(1k10st?o5)rr$ZolZ+MwbsebQA8V1fA_Pv!sX zpAop97E#~{g(CU(3fvF#ji5UgjbdmP8t0?2fB_~7W7MlKc2vPr#%4fSM z@`nlQGi@R1Z@_yJbb50_D&WS)W9C3VU*GapqQmKrv2O)X?*Hj_W}X_8P!eERL$T?B zf*PXR3qkp#6p{7Lc4t%k&zb%X4xpyyY}YOO8LLk zatfy@tI$Dbvi#FfwlYmVD{ieovGH)hr@U+ex8-qFjauuEZ1gDxV~r%P6%~OoZe{JA zT|3X&y6HE_9yqD|(I}a`gp%yjV)7wZrD4|Z5=0?|UeYz%Z~ur%{G+jMn|sgzSy85B z2)QZsbKy4c67DZdy51{~g&`u-Ir<3-nXdS-g$mCdu3zu*22JU3>P1RsD;e#!eQi@F z6v?wsw<)=;_;)^i9k6PhZ6b-TVTdPEfq&T|P--uj(ttG!Qz$OVh;+K>HoL&#f!pxE z5-!90Bn~$+qmgS~S7x^L*zNtfOby!8N<%nth38JFJ!(eZ{Ue zGH+tz`et{lm%&!yj_Yl^TYkvy#Ogy-?6;h`^Xb{1j;oW(54~fBjueB5>vNu%YM{ zjLv4MW}ED2*bbLX=e&(-K2nT)_P!Rn*j;%Y6hq1!&hN4QRV0D{-@0^(mhVRnqQu6~ zSoy03ZZ98>LbWTa=-ou^kJ#(rK=W}S;4=F51H)uE%#!F}OF3>IhM;S7&au>=-7U0% z%k%5tjreWqpFK3ZghRv5j`*D8SV4nlH0Yx zZE_fM&xc&uG&mzE3?~0Fps}gwv`6+*sNShG)Ig`j9=#fqws*>6=5%UC?}KntO|*Cw zPw~5X=%H7yFP157Sbxszh64(HdZ}Ftnl`?6rO5-cP>Ebz8J6a`?K7W+ z-nts@ii({*xX>iAW@e@#{Q1FS1~8OXZg`%b=g;HY0ii0T);y?!ktI8tpq5)e5sGti zSQJ47+|*5a{WeRIoUV~Cg16J31y1cuhCHL=g9;w6`@3?oG0&!pJ%uu*Pm=}8s7zn( zT&^A-H(~STQ@w?u!*>Jr7hUYTa)d2%Wzv+qj#!7_ixX}$S1LRcxgFjgEj0!R@uQ0! zL-1biY=x+aI3PN6u-c9D<90R$gc5|_F2mcN-^so>{YUnL5nxXiC6DJPUJMZ3sQ!!> z|1Iv`$NYehX87PZT4NL!Z3peS#i~gWp@XKIULVc zy$;!?o8C!b)EC+uj1wLn!=59JNd9RtZfjeIH^*qPCi9r{!%eF@7#%)`B7!;Gaqod_ zFxvJSeuth!Fx!{%!9cLadfGy#y>Z!yeN}NB$(Km6=R=ljCOX-h`;{56eM7~o=Y9cxN}ndirDKZA)3UKrG_T?B*afz==XEp z#Cn6LVc*Y)F0Zq8Niz8|M<)}}o4|m1o3KzXX5(#VwSA<-_G&9zj$|>6g z8u`B4xo4CBQXir}hD=nnF=5H;0*lF@TR^K{r$nS}S0k!S%Yu?_L6p&IXFd zC-yptvBHiOx?%qp^N;B^CK!kK9-AT0Q`)r38F$t1u zx+$S5*r{q!lZas~#xJkfN^~PazU&)6uzhevJn&O7Q}QrI#O>}Ucx*BVXjyG1WwT3d zuv5&jMzFHLNv67L&2tgC0J+~1jB$6|N)nku%9pkGOnxy5BP5q0e4T!u6i-4@h8Y}V ztWeKtkRM{AuH6R0M$pC~v0L}$@VX*-A7aaDHw{H9oi2k*K$pwByVQKGP-#v^Diix^ zA@^ITd5Aj(iDr$drlmHs4Xje&no^^y(PDyuL3gO1952`EVG28?x(GoZY5Nz0QyIk} z+Xr^oHo>`IBP8V(C}~udmL3j8vM>Q4gj77|Jcd^M#TXl9)jMqFU>9?^z`zlY^Sd zyB&Z_phK3rx#yWY{e&{Y*=FTS>xe6_^+*Kdae~|xA#&`#YH9g(kw2QQ!kVS{djhbs zo}NtPckbU1Gll-uY}5$ng_U9m1Yxc1N8n^Dz8fhK$gXr!sZ^LLzxw%WR+?#|ra*|0 z8QSQ}J5?M4e*5f$ewm1`yy<07|>1xd^hhxrJ6TgAL%k5iT@; zYiAF7KB6RI=zd@q1|L81a%D@bLEhzfpIxtSmo_JIJIYk7*!kEdessf-!J6jps4QP; z5i1p=Q=-Lb9p@PcpL%LH@@^VWRZm5wF+=~ox)Xx zXcQhbK7sp#^Q1Fn>-lcj4Bo!`Jtyf^HgAM%z}tK^wVdJI%8U?_Oy#8x^ooRkLKP>e z1ql^xSL@Rl59*-ha_s2|w7MRW^E6!vR*PMZW~yC&PMZB4KpYe!8_zq*kn-S_VR@z`tVdZityqq@tp75N+!8f zjsn^@E!m=B-!0T_K93f7&zHTkM8pkOPv0clTuxHV4!;UM^xEqr`+B?}89CPZvZB^(4znSQN`RJUB4b@icEKXvV>$VC_ zjZZ1o3)0m*VKx2eJk{*;F(9F$|H$SGt)P6LqKRa3(}B$4azGkiu9rX4EYm%(hnjCN zwXG_VOe-0A&=%{!T*1WVa3({m*Mi~@55nzgx9RgSiY5(0mWTSantaw^xi*gC(Y;Zr zEy^G7ol+FuCJ`z_CPFTe_zFmt3xvVdDzq3Xe%)Il>mZ<3XviVcJt=u%=O`=ZO^h5Y>Mk^f^GLi{mG2?wWMcmIw)5{2yUbKXZdwb+dY z94&n4^-OSV1SjQrUN|u)<)iP94m#$Lm{u;g?9EydNa@HDL zy}S5kt2sydazhKm8H>(MA(-tbNc(JNVVYCa*+IU{D>1f*K>8Z0BZ? zh%{MpzTE8|H~>!AX0hn?dcUySDaneogfotzRKoieyI*x zdea4iV}9+(iFVqwF#D`5j`^HHScSyLGZ&C2{N2o z?ke(r@G!->LPS>qdP=4e7U4^dL$#PGa`)qz^tvId`aqvH{bB9%T)a$ z55hTm`QVr+D{&XTD8CMGGsH@VU7d|Rw6Bu>QeB@cNJy>{fS`QRN;cJUV>`&=Y*j_G z$jl@GX{iQf`idV`UX8$^sL=LM(t!L8O(%AD|6#igyZ?9GQ78(zvZbRB>4_Od8tPPJ zF0F?6Y2fC~yQzKC`Alpc?%JQuF6|$qb!qh{)yuVKdd785O?jwpJ0I#dQJ(UJLS^^L z@7_?z6`dTlKRv)d_xqgu-YTw?Kg+=`awUvo>>jpilF)E)LW|NiG$UhS3b6g?+f-aB zi%xOxbVy+n{WwfjfU)R8jX2~w^?QHxTxoqMk=TLUa5x(K6PTuN)LOUw&5UcA=C?A< z95^|Aem|$^YFQU0jE7yv+$-C?hrC%w+aFi{sXh z=c)9-7#>fJJd(2OMNR2Te~uOG~G~H?E%=(yeV_ET)n2K)D%b}|z z^AMjjRjdVRigi|T*TPa7Q$skWZ%(w3UO(5bGnH$4A}a();DncN@-0@*5V_wRzb1cA z_&HT<>!&4=?mbWgDypysQzs9an96S1#GZ-a6A|QJ%?hDVIBHz$!tS@3!tB3KoS7mR zNwTP;gZ%lpRK*HJ#hb?Rstk3Q*#(QuXJ~ysEMIoE2V$hN-PiXEUryqBu9ShwO>bug z-Kq5Y=Brb42GzFM)0^eA`_-$`g{F?&!oPinf1ALL;_){;9ETFblP%IQ1h0#|YHs<$G5!K_= zERg1w8pK^RVJwqMXMCo!ZfOv+Q+o7l-5mK;{e{kYxuLKXS=EB%b~Jgw>m_YU?S0dZ z_^h7{YK5Bm&Moiw(7!>!WO9dh__KiEyDAN(E-&?ij-O@mHn|IhYF`~Q(vQ%T;6oJZ z0v;-a2cP*aegIm*6}N9**#HnMh~*oQhXj;t23Lc{Hij^q!Id-J-#O?;rCN-E*-Hfs zzCfdN&djPJ%z(ogP$^6NaBVXMC9F_lO89sIGreG#)U<{NcQb4wtu7L}t1#u~kxat5ujIxQV%TcE6xI#hPN^^AI$x(XIg67 z>mGIfFD43+&83@PVyA|o7F8VB(L08wa=KgP+Kq;9+~doUqnryiyJ@qV8Ia2*7+{Ze z>#Y!yn`=hb^VTg*^8;h@TgVWHG)wJh!`Lkpwi5M;Pb5<>Xnaut5F+e`oIh>!3QMl> zvHnt#t+cG<%-V8;cO^=_U}_18?%CV1!AJ+@G&C_>Voq{Zc*#_i-UipNLc=AWY(=0( zilW;aB)hnXi4oLNSvFHuzNf=ss>=o?5$L!IqmL$$dqt>N&~Q@dNffMwR(6FF?#Q-PMp@l!!T&rI&;q}U=$(R5bcX)@Kg-$81eU56lc0R(m)8H|-iCV~DZ_@Y3I!$eH zJjcK&)y3N0c`aQHTYhS~;*V)IEDV{Cny7QiLy+!bbP#!T-(Y)rYdDj{hZ0c(1!M}1 z6>3-wwj0nA`(ya!Um^A+M@Liy25L&Ra5WqgHPs(`TdzMVZtc7;*q?g!3bRmBL{NqL zE)<~ODDuyjO;CHbEZZ?`C-zVjBcN1mw%>zWOiS4>S`O>}C5G-=s8mHA97pf({UkJh zBq3KZTe(40xtaE^ENNcP1=%{%wxQImZWmf?swm+R8+|jYN|pj~1I@=3@n_Z}blCc4 zPC&$o*fRBx7$hNKt$K6(9?4~vj8Z;Vk0OELZv?M%rLId}p_jkLHITZ|t`rI0p-nNs zJFeTrmZp3Yye@e;1-M2ss_1P~1UU1hxb>?f`To{TQYj>I zMJ@bOq=*Frit}y?HoR$PSgJt-4i6)2D9_`dj~n^j^lD_Ektn7}shyKh$lK9+@Ik~^ z^m0XzI{YCsPQhrBV-31=XfYLhKz3}nwxk85q~F^vjm$uHu z%bXE-WY!Jx&7bw%c9knq5%R~}^-qnV9%&t5w<+?lF(W|cB{*CNT!t8yhlGH|OTI)} zfc6|7VPrj-!S-U3!-?tq+oZhUua)EAD1jmq!dYjS0Fg+R z%>YG2nW5ChlijBeDpBr!uv^|{zj{h;+ZM%pF;_+XjmRpPqwreL=TTz8(t-tkrjsiLucJT%M2QHqz3359_kyGt2Q_sN}@VpRW$7{D3;Va;Y zKqPn|KK~(LcH1K7*e+G2mz<;Ztl!{rPmeK(6|k!(p0JqM{^j_otAU&$XEKCf%cXES zPWFi$cV3OtxDj;td^@uOq#WsRrb>g_>z0rCqI-GM9~r{a!%DDZ?z zT}TtFs?C&Whg@<4EjE)Wi(nLLkF!2l{4XftzG`vDRgGAdPj3W`1P8dY0wR(8$*siQfhS5=fTSDbuN(3dF%O7CvaoBpW4DvqIH*&7G8H2!Ys-58^XV7aX zIo!ut-zv*4dN+i2zdX)E;>t5$kKFn?H3RAarhxGXESGTlyT4tGv=Y_BZ+pRk#B(50 znJp^k5-78>_*faT8xIO}(yxiE)_^$btd`%S8i$IveZTEAn?6bN@6ozB+qRr01L|_1 zHtKh--M(fg@u{I^hb&*`n$#mbA_y?-#@j|xi|e5wYjzIx6G%pI%vQwGTQ`$t8N4mp zdAp?Qi$T{T6bX{5P(SAU@g(9B7=%B#n!FTAU;kBp#URcs!Y7DE>O{v)*w03YsvHA_ zgf{F+6wPL-PyYjyv*FfYc;Z7ufAo3@Bfr#TLuXLDd* zwDG$?V9AoP+o~axyE$727)qjYZ@)q5AK?N%%!I7Gw)|KbEe?&o2!ViC@p+-%;$EVy zcsp&`CK}!&GBBo@t%T9$70pF9ug#kRyt5D})0MMzBJUI8lkHo#7SO}{5yG~vsiig& z2|jSlez{L-OXq40hjmzgHZ`V-l_nt(a;^4Ka8WYJYhl2! zjo-ibkHXRv@(ss`%>EM>E*gAc6cv2j%2f-ln%Y0zKZZskgHxy%Q*B@=F*qPxyb;f3 zl>7&ttrVwwUH%E5GjBCrbE1^n*-`a2J#9g%_D;ohu*0G+i5FLs&2id8#qIg&wejGm zZ?ZtqPm3!4MmWnoS@GfK^kN4fnEMG3QQB_JBaq6+qXD^mMLFG~BETt;(qn#a!43tW z5=>%w0{F`w4A^%%{i46|qV_4XGf7>_gn{}$J@iapMFEj4x6mKp-wRk0Fy27b+87f^ z^38+U?4s02K8LJthG{vkZce}D3o#D)oQTKmwG`48J<@3BGk;?13~oPO$q|yJxHukz z1$*5Q0QhTzwW=vw)U3niXQSTM`v`?ST&~-xx7CU zstJ)~bhoCZs%+_63k)#Un?;<9N`ckl2!-eL68zh*T4XUan{!a71`n8pDf9rc+j8F~ z@|b7A;%z|GZM}o8#bFm;UVKJHu7#U?2#LS{JUm(17exnjE75rLa+EwG!TgP-0G`|G z6zxJ@ls-z`Xd|~Dg29WGhjzE`D~*AJYxxlXv1C@l!zR99T7oh`i8?RUUCf1UQO%%l zbbz^5v0yb1&zEx)<)DqoG9X`D_6Yz{t_;#JMCQSB@+D39Ks37?#uES6? zr$iKO5QYHFlv!xcrNv8s0F0Tt4@V=gX;QgF1vg%`k}cn8u;(~rk@+vugv%1nkdt3x z?L3j@2mp0xM`pyEB4GB{JN6O!J3JH$sa$>rqJ5jGl1e^Zm*}fq{&#M5cdNUn%Ik%k z-4R*`8g~ve)A))&ORniycM{Ms)@Fb=?Fr+HHvPDtcz<(F-PLqxBHpN^MRuYZN*Ne; zV$m&!g>b6eD==-pLgbbQ6_|Eop{2t-a~TCx&Og$>`Tw;5=oG!drn@w2i$3>XMZ^4u z7;C(oK~6_LQz0e>kNNm0;U4cd0uV=4Vqsji7vIPb@{_#Ei|m#pdyq}tzQq27p1jmw z4K`_4?_7~z!4#ShySy|Bh)5H-KYFzLyyLo2Ns#7hKGKaLi)*IO)NmU2H=?#}dbVBk z-kMUIsN7eJdv67w+XW|G=Wg}d%EFPONmCeLb*P0dG#^Rl75CE|o?lS*b5bwe51u}v zFHp(~eR=?D5~jP`S*;3)1f0a_aWreyvkM%0D?f9Pc*VY^1;pqtsoh1}X#gD&_tVyO z!SG}dD1k%(G-c?O{J(GSAI!;r-QME$P4BK=(syo<`QiA%?|JVG9;+JfR;0G1r1`*U zl)9VHledk*`6wE`ESYeUsf#rpm@Q=Q+_t{YLF|f!;fMxEKwE33K4dW_bInW;;v>t8 zY?)yT0Ld+Q7l*CMbwh~_8oMg#3_(p0OO0CeOx0&kGC8)%b}rA~wDWz5Kn=rx^nlt? zdv&Z6U3CB&g#y!Uts~1oqu~qdH0>Q}pSc~g@jIQLsV`^35O3NVX0)v~rF30L@*5OM z;ES$109?@D@JZ8_B$(iUxi>&;?o=tOof`);Tf}h-6lq}XoW$moz(}HSGZ()%VO1%2 z4sNAq-A)(*H1yOhr7Cgq*j*)l;=+}N(bE{V0-QV+Q#!7{8b&$l<9FkS2n2aU#`sZ$?GHkqC zRw-A?E{6BgZhS34+*j^3Hu!GOE>}RC7cr18 z!)DGI3I8{M`~PK=Mt1g(+lN>m8Y$SzH3J*$9;ltCi~CMjBUOr*0eC%E2t zby`WNEJR{hGQr>RLcCW#O>m*iKeI12#J5V(cIfgVt#MgTs>V&G-=FN7V-vdsS=>?}W}){IjvTvm9u zvLD_V%4h)IB`VP=QV4RUhXqqCULceN5dilB(8fLE|3Vx8pV#H33*7JZHhfd^X*6r_YU5;&88K9BRr}EcnX`FPV z-gHMMF~twS+a2E67IB%syW_s=isplm&68Y@JylRi@C0@dbUi00*LE_zjgFHBZJAn^)4`w{|E=jO3qHuSYtp! z0i8Us5tzYB-u!)j6Q`dYS_FY*SK (3, 7): + cachedproperty = functools.cached_property +else: + cachedproperty = property + + +def hmac_sha512(key, msg): + """ Use SHA-512 to provide an HMAC. """ + return hmac.new(key, msg, hashlib.sha512).digest() + + +def sha256(x): + """ Simple wrapper of hashlib sha256. """ + return hashlib.sha256(x).digest() + + +def hash160(x): + """ RIPEMD-160 of SHA-256. + Used to make bitcoin addresses from pubkeys. """ + return ripemd160(sha256(x)) + + +def ripemd160(x): + """ Simple wrapper of hashlib ripemd160. """ + h = hashlib.new('ripemd160') + h.update(x) + return h.digest() + + +def double_sha256(x): + """ SHA-256 of SHA-256, as used extensively in bitcoin. """ + return sha256(sha256(x)) + + +class KeyPath: + RECEIVE = 0 + CHANGE = 1 + CHANNEL = 2 + + +class DerivationError(Exception): + """ Raised when an invalid derivation occurs. """ + + +class _KeyBase: + """ A BIP32 Key, public or private. """ + + def __init__(self, ledger, chain_code, n, depth, parent): + if not isinstance(chain_code, (bytes, bytearray)): + raise TypeError('chain code must be raw bytes') + if len(chain_code) != 32: + raise ValueError('invalid chain code') + if not 0 <= n < 1 << 32: + raise ValueError('invalid child number') + if not 0 <= depth < 256: + raise ValueError('invalid depth') + if parent is not None: + if not isinstance(parent, type(self)): + raise TypeError('parent key has bad type') + self.ledger = ledger + self.chain_code = chain_code + self.n = n + self.depth = depth + self.parent = parent + + def _hmac_sha512(self, msg): + """ Use SHA-512 to provide an HMAC, returned as a pair of 32-byte objects. """ + hmac = hmac_sha512(self.chain_code, msg) + return hmac[:32], hmac[32:] + + def _extended_key(self, ver_bytes, raw_serkey): + """ Return the 78-byte extended key given prefix version bytes and serialized key bytes. """ + if not isinstance(ver_bytes, (bytes, bytearray)): + raise TypeError('ver_bytes must be raw bytes') + if len(ver_bytes) != 4: + raise ValueError('ver_bytes must have length 4') + if not isinstance(raw_serkey, (bytes, bytearray)): + raise TypeError('raw_serkey must be raw bytes') + if len(raw_serkey) != 33: + raise ValueError('raw_serkey must have length 33') + + return ( + ver_bytes + bytes((self.depth,)) + + self.parent_fingerprint() + self.n.to_bytes(4, 'big') + + self.chain_code + raw_serkey + ) + + def identifier(self): + raise NotImplementedError + + def extended_key(self): + raise NotImplementedError + + def fingerprint(self): + """ Return the key's fingerprint as 4 bytes. """ + return self.identifier()[:4] + + def parent_fingerprint(self): + """ Return the parent key's fingerprint as 4 bytes. """ + return self.parent.fingerprint() if self.parent else bytes((0,)*4) + + def extended_key_string(self): + """ Return an extended key as a base58 string. """ + return Base58.encode_check(self.extended_key()) + + +class PublicKey(_KeyBase): + """ A BIP32 public key. """ + + def __init__(self, ledger, pubkey, chain_code, n, depth, parent=None): + super().__init__(ledger, chain_code, n, depth, parent) + if isinstance(pubkey, cPublicKey): + self.verifying_key = pubkey + else: + self.verifying_key = self._verifying_key_from_pubkey(pubkey) + + @classmethod + def from_compressed(cls, public_key_bytes, ledger=None) -> 'PublicKey': + return cls(ledger, public_key_bytes, bytes((0,)*32), 0, 0) + + @classmethod + def _verifying_key_from_pubkey(cls, pubkey): + """ Converts a 33-byte compressed pubkey into an coincurve.PublicKey object. """ + if not isinstance(pubkey, (bytes, bytearray)): + raise TypeError('pubkey must be raw bytes') + if len(pubkey) != 33: + raise ValueError('pubkey must be 33 bytes') + if pubkey[0] not in (2, 3): + raise ValueError('invalid pubkey prefix byte') + return cPublicKey(pubkey) + + @cachedproperty + def pubkey_bytes(self): + """ Return the compressed public key as 33 bytes. """ + return self.verifying_key.format(True) + + @cachedproperty + def address(self): + """ The public key as a P2PKH address. """ + return self.ledger.public_key_to_address(self.pubkey_bytes) + + def ec_point(self): + return self.verifying_key.point() + + def child(self, n: int) -> 'PublicKey': + """ Return the derived child extended pubkey at index N. """ + if not 0 <= n < (1 << 31): + raise ValueError('invalid BIP32 public key child number') + + msg = self.pubkey_bytes + n.to_bytes(4, 'big') + L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name + derived_key = self.verifying_key.add(L_b) + return PublicKey(self.ledger, derived_key, R_b, n, self.depth + 1, self) + + def identifier(self): + """ Return the key's identifier as 20 bytes. """ + return hash160(self.pubkey_bytes) + + def extended_key(self): + """ Return a raw extended public key. """ + return self._extended_key( + self.ledger.extended_public_key_prefix, + self.pubkey_bytes + ) + + def verify(self, signature, digest) -> bool: + """ Verify that a signature is valid for a 32 byte digest. """ + + if len(signature) != 64: + raise ValueError('Signature must be 64 bytes long.') + + if len(digest) != 32: + raise ValueError('Digest must be 32 bytes long.') + + key = self.verifying_key + + raw_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *') + + parsed = libsecp256k1.secp256k1_ecdsa_signature_parse_compact( + key.context.ctx, raw_signature, signature + ) + assert parsed == 1 + + normalized_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *') + + libsecp256k1.secp256k1_ecdsa_signature_normalize( + key.context.ctx, normalized_signature, raw_signature + ) + + verified = libsecp256k1.secp256k1_ecdsa_verify( + key.context.ctx, normalized_signature, digest, key.public_key + ) + + return bool(verified) + + +class PrivateKey(_KeyBase): + """A BIP32 private key.""" + + HARDENED = 1 << 31 + + def __init__(self, ledger, privkey, chain_code, n, depth, parent=None): + super().__init__(ledger, chain_code, n, depth, parent) + if isinstance(privkey, cPrivateKey): + self.signing_key = privkey + else: + self.signing_key = self._signing_key_from_privkey(privkey) + + @classmethod + def _signing_key_from_privkey(cls, private_key): + """ Converts a 32-byte private key into an coincurve.PrivateKey object. """ + return cPrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key)) + + @classmethod + def _private_key_secret_exponent(cls, private_key): + """ Return the private key as a secret exponent if it is a valid private key. """ + if not isinstance(private_key, (bytes, bytearray)): + raise TypeError('private key must be raw bytes') + if len(private_key) != 32: + raise ValueError('private key must be 32 bytes') + return int.from_bytes(private_key, 'big') + + @classmethod + def from_seed(cls, ledger, seed) -> 'PrivateKey': + # This hard-coded message string seems to be coin-independent... + hmac = hmac_sha512(b'Bitcoin seed', seed) + privkey, chain_code = hmac[:32], hmac[32:] + return cls(ledger, privkey, chain_code, 0, 0) + + @classmethod + def from_pem(cls, ledger, pem) -> 'PrivateKey': + der = pem_to_der(pem.encode()) + try: + key_int = ECPrivateKey.load(der).native['private_key'] + except ValueError: + key_int = PrivateKeyInfo.load(der).native['private_key']['private_key'] + private_key = cPrivateKey.from_int(key_int) + return cls(ledger, private_key, bytes((0,)*32), 0, 0) + + @cachedproperty + def private_key_bytes(self): + """ Return the serialized private key (no leading zero byte). """ + return self.signing_key.secret + + @cachedproperty + def public_key(self) -> PublicKey: + """ Return the corresponding extended public key. """ + verifying_key = self.signing_key.public_key + parent_pubkey = self.parent.public_key if self.parent else None + return PublicKey( + self.ledger, verifying_key, self.chain_code, + self.n, self.depth, parent_pubkey + ) + + def ec_point(self): + return self.public_key.ec_point() + + def secret_exponent(self): + """ Return the private key as a secret exponent. """ + return self.signing_key.to_int() + + def wif(self): + """ Return the private key encoded in Wallet Import Format. """ + return self.ledger.private_key_to_wif(self.private_key_bytes) + + @property + def address(self): + """ The public key as a P2PKH address. """ + return self.public_key.address + + def child(self, n) -> 'PrivateKey': + """ Return the derived child extended private key at index N.""" + if not 0 <= n < (1 << 32): + raise ValueError('invalid BIP32 private key child number') + + if n >= self.HARDENED: + serkey = b'\0' + self.private_key_bytes + else: + serkey = self.public_key.pubkey_bytes + + msg = serkey + n.to_bytes(4, 'big') + L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name + derived_key = self.signing_key.add(L_b) + return PrivateKey(self.ledger, derived_key, R_b, n, self.depth + 1, self) + + def sign(self, data): + """ Produce a signature for piece of data by double hashing it and signing the hash. """ + return self.signing_key.sign(data, hasher=double_sha256) + + def sign_compact(self, digest): + """ Produce a compact signature. """ + key = self.signing_key + + signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *') + signed = libsecp256k1.secp256k1_ecdsa_sign( + key.context.ctx, signature, digest, key.secret, + libsecp256k1_ffi.NULL, libsecp256k1_ffi.NULL + ) + + if not signed: + raise ValueError('The private key was invalid.') + + serialized = libsecp256k1_ffi.new('unsigned char[%d]' % CDATA_SIG_LENGTH) + compacted = libsecp256k1.secp256k1_ecdsa_signature_serialize_compact( + key.context.ctx, serialized, signature + ) + if compacted != 1: + raise ValueError('The signature could not be compacted.') + + return bytes(libsecp256k1_ffi.buffer(serialized, CDATA_SIG_LENGTH)) + + def identifier(self): + """Return the key's identifier as 20 bytes.""" + return self.public_key.identifier() + + def extended_key(self): + """Return a raw extended private key.""" + return self._extended_key( + self.ledger.extended_private_key_prefix, + b'\0' + self.private_key_bytes + ) + + def to_pem(self): + return self.signing_key.to_pem() + + +def _from_extended_key(ledger, ekey): + """Return a PublicKey or PrivateKey from an extended key raw bytes.""" + if not isinstance(ekey, (bytes, bytearray)): + raise TypeError('extended key must be raw bytes') + if len(ekey) != 78: + raise ValueError('extended key must have length 78') + + depth = ekey[4] + n = int.from_bytes(ekey[9:13], 'big') + chain_code = ekey[13:45] + + if ekey[:4] == ledger.extended_public_key_prefix: + pubkey = ekey[45:] + key = PublicKey(ledger, pubkey, chain_code, n, depth) + elif ekey[:4] == ledger.extended_private_key_prefix: + if ekey[45] != 0: + raise ValueError('invalid extended private key prefix byte') + privkey = ekey[46:] + key = PrivateKey(ledger, privkey, chain_code, n, depth) + else: + raise ValueError('version bytes unrecognised') + + return key + + +def from_extended_key_string(ledger, ekey_str): + """Given an extended key string, such as + + xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd + 3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL + + return a PublicKey or PrivateKey. + """ + return _from_extended_key(ledger, Base58.decode_check(ekey_str)) diff --git a/scribe/blockchain/__init__.py b/scribe/blockchain/__init__.py new file mode 100644 index 0000000..a729426 --- /dev/null +++ b/scribe/blockchain/__init__.py @@ -0,0 +1 @@ +from .network import LBCTestNet, LBCRegTest, LBCMainNet diff --git a/scribe/blockchain/block_processor.py b/scribe/blockchain/block_processor.py new file mode 100644 index 0000000..41954e7 --- /dev/null +++ b/scribe/blockchain/block_processor.py @@ -0,0 +1,1695 @@ +import logging +import time +import asyncio +import typing +import signal + +from bisect import bisect_right +from struct import pack +from concurrent.futures.thread import ThreadPoolExecutor +from typing import Optional, List, Tuple, Set, DefaultDict, Dict +from prometheus_client import Gauge, Histogram +from collections import defaultdict + +from scribe.schema.url import normalize_name + +from scribe import __version__, PROMETHEUS_NAMESPACE +from scribe.blockchain.daemon import LBCDaemon +from scribe.blockchain.transaction import Tx, TxOutput, TxInput +from scribe.db.db import HubDB +from scribe.db.prefixes import ACTIVATED_SUPPORT_TXO_TYPE, ACTIVATED_CLAIM_TXO_TYPE +from scribe.db.prefixes import PendingActivationKey, PendingActivationValue, ClaimToTXOValue +from scribe.common import hash_to_hex_str, hash160, RPCError +from scribe.blockchain.prefetcher import Prefetcher + + +if typing.TYPE_CHECKING: + from scribe.env import Env + from scribe.db.revertable import RevertableOpStack + + +class ChainError(Exception): + """Raised on error processing blocks.""" + + +class StagedClaimtrieItem(typing.NamedTuple): + name: str + normalized_name: str + claim_hash: bytes + amount: int + expiration_height: int + tx_num: int + position: int + root_tx_num: int + root_position: int + channel_signature_is_valid: bool + signing_hash: Optional[bytes] + reposted_claim_hash: Optional[bytes] + + @property + def is_update(self) -> bool: + return (self.tx_num, self.position) != (self.root_tx_num, self.root_position) + + def invalidate_signature(self) -> 'StagedClaimtrieItem': + return StagedClaimtrieItem( + self.name, self.normalized_name, self.claim_hash, self.amount, self.expiration_height, self.tx_num, + self.position, self.root_tx_num, self.root_position, False, None, self.reposted_claim_hash + ) + + +HISTOGRAM_BUCKETS = ( + .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf') +) + + +NAMESPACE = f"{PROMETHEUS_NAMESPACE}_writer" + + +class BlockProcessor: + """Process blocks and update the DB state to match. + + Employ a prefetcher to prefetch blocks in batches for processing. + Coordinate backing up in case of chain reorganisations. + """ + + block_count_metric = Gauge( + "block_count", "Number of processed blocks", namespace=NAMESPACE + ) + block_update_time_metric = Histogram( + "block_time", "Block update times", namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS + ) + reorg_count_metric = Gauge( + "reorg_count", "Number of reorgs", namespace=NAMESPACE + ) + + def __init__(self, env: 'Env'): + self.cancellable_tasks = [] + + self.env = env + self.state_lock = asyncio.Lock() + self.daemon = LBCDaemon(env.coin, env.daemon_url) + self._chain_executor = ThreadPoolExecutor(1, thread_name_prefix='block-processor') + self.db = HubDB( + env.coin, env.db_dir, env.cache_MB, env.reorg_limit, env.cache_all_claim_txos, env.cache_all_tx_hashes, + max_open_files=env.db_max_open_files, blocking_channel_ids=env.blocking_channel_ids, + filtering_channel_ids=env.filtering_channel_ids, executor=self._chain_executor + ) + self.shutdown_event = asyncio.Event() + self.coin = env.coin + self.wait_for_blocks_duration = 0.1 + + self._caught_up_event: Optional[asyncio.Event] = None + self.height = 0 + self.tip = bytes.fromhex(self.coin.GENESIS_HASH)[::-1] + self.tx_count = 0 + + self.blocks_event = asyncio.Event() + self.prefetcher = Prefetcher(self.daemon, env.coin, self.blocks_event) + self.logger = logging.getLogger(__name__) + + # Meta + self.touched_hashXs: Set[bytes] = set() + + # UTXO cache + self.utxo_cache: Dict[Tuple[bytes, int], Tuple[bytes, int]] = {} + + # Claimtrie cache + self.db_op_stack: Optional['RevertableOpStack'] = None + + ################################# + # attributes used for calculating stake activations and takeovers per block + ################################# + + self.taken_over_names: Set[str] = set() + # txo to pending claim + self.txo_to_claim: Dict[Tuple[int, int], StagedClaimtrieItem] = {} + # claim hash to pending claim txo + self.claim_hash_to_txo: Dict[bytes, Tuple[int, int]] = {} + # claim hash to lists of pending support txos + self.support_txos_by_claim: DefaultDict[bytes, List[Tuple[int, int]]] = defaultdict(list) + # support txo: (supported claim hash, support amount) + self.support_txo_to_claim: Dict[Tuple[int, int], Tuple[bytes, int]] = {} + # removed supports {name: {claim_hash: [(tx_num, nout), ...]}} + self.removed_support_txos_by_name_by_claim: DefaultDict[str, DefaultDict[bytes, List[Tuple[int, int]]]] = \ + defaultdict(lambda: defaultdict(list)) + self.abandoned_claims: Dict[bytes, StagedClaimtrieItem] = {} + self.updated_claims: Set[bytes] = set() + # removed activated support amounts by claim hash + self.removed_active_support_amount_by_claim: DefaultDict[bytes, List[int]] = defaultdict(list) + # pending activated support amounts by claim hash + self.activated_support_amount_by_claim: DefaultDict[bytes, List[int]] = defaultdict(list) + # pending activated name and claim hash to claim/update txo amount + self.activated_claim_amount_by_name_and_hash: Dict[Tuple[str, bytes], int] = {} + # pending claim and support activations per claim hash per name, + # used to process takeovers due to added activations + activation_by_claim_by_name_type = DefaultDict[str, DefaultDict[bytes, List[Tuple[PendingActivationKey, int]]]] + self.activation_by_claim_by_name: activation_by_claim_by_name_type = defaultdict(lambda: defaultdict(list)) + # these are used for detecting early takeovers by not yet activated claims/supports + self.possible_future_support_amounts_by_claim_hash: DefaultDict[bytes, List[int]] = defaultdict(list) + self.possible_future_claim_amount_by_name_and_hash: Dict[Tuple[str, bytes], int] = {} + self.possible_future_support_txos_by_claim_hash: DefaultDict[bytes, List[Tuple[int, int]]] = defaultdict(list) + + self.removed_claims_to_send_es = set() # cumulative changes across blocks to send ES + self.touched_claims_to_send_es = set() + + self.removed_claim_hashes: Set[bytes] = set() # per block changes + self.touched_claim_hashes: Set[bytes] = set() + + self.signatures_changed = set() + + self.pending_reposted = set() + self.pending_channel_counts = defaultdict(lambda: 0) + self.pending_support_amount_change = defaultdict(lambda: 0) + + self.pending_channels = {} + self.amount_cache = {} + self.expired_claim_hashes: Set[bytes] = set() + + self.doesnt_have_valid_signature: Set[bytes] = set() + self.claim_channels: Dict[bytes, bytes] = {} + self.hashXs_by_tx: DefaultDict[bytes, List[int]] = defaultdict(list) + + self.pending_transaction_num_mapping: Dict[bytes, int] = {} + self.pending_transactions: Dict[int, bytes] = {} + + self._stopping = False + self._ready_to_stop = asyncio.Event() + + async def run_in_thread_with_lock(self, func, *args): + # Run in a thread to prevent blocking. Shielded so that + # cancellations from shutdown don't lose work - when the task + # completes the data will be flushed and then we shut down. + # Take the state lock to be certain in-memory state is + # consistent and not being updated elsewhere. + async def run_in_thread_locked(): + async with self.state_lock: + return await asyncio.get_event_loop().run_in_executor(self._chain_executor, func, *args) + return await asyncio.shield(run_in_thread_locked()) + + async def run_in_thread(self, func, *args): + async def run_in_thread(): + return await asyncio.get_event_loop().run_in_executor(self._chain_executor, func, *args) + return await asyncio.shield(run_in_thread()) + + async def refresh_mempool(self): + def fetch_mempool(mempool_prefix): + return { + k.tx_hash: v.raw_tx for (k, v) in mempool_prefix.iterate() + } + + def update_mempool(unsafe_commit, mempool_prefix, to_put, to_delete): + for tx_hash, raw_tx in to_put: + mempool_prefix.stage_put((tx_hash,), (raw_tx,)) + for tx_hash, raw_tx in to_delete.items(): + mempool_prefix.stage_delete((tx_hash,), (raw_tx,)) + unsafe_commit() + + async with self.state_lock: + current_mempool = await self.run_in_thread(fetch_mempool, self.db.prefix_db.mempool_tx) + _to_put = [] + try: + mempool_hashes = await self.daemon.mempool_hashes() + except (TypeError, RPCError): + self.logger.warning("failed to get mempool tx hashes, reorg underway?") + return + for hh in mempool_hashes: + tx_hash = bytes.fromhex(hh)[::-1] + if tx_hash in current_mempool: + current_mempool.pop(tx_hash) + else: + try: + _to_put.append((tx_hash, bytes.fromhex(await self.daemon.getrawtransaction(hh)))) + except (TypeError, RPCError): + self.logger.warning("failed to get a mempool tx, reorg underway?") + return + if current_mempool: + if bytes.fromhex(await self.daemon.getbestblockhash())[::-1] != self.coin.header_hash(self.db.headers[-1]): + return + await self.run_in_thread( + update_mempool, self.db.prefix_db.unsafe_commit, self.db.prefix_db.mempool_tx, _to_put, current_mempool + ) + + async def check_and_advance_blocks(self, raw_blocks): + """Process the list of raw blocks passed. Detects and handles + reorgs. + """ + + if not raw_blocks: + return + first = self.height + 1 + blocks = [self.coin.block(raw_block, first + n) + for n, raw_block in enumerate(raw_blocks)] + headers = [block.header for block in blocks] + hprevs = [self.coin.header_prevhash(h) for h in headers] + chain = [self.tip] + [self.coin.header_hash(h) for h in headers[:-1]] + + if hprevs == chain: + total_start = time.perf_counter() + try: + for block in blocks: + if self._stopping: + return + start = time.perf_counter() + start_count = self.tx_count + txo_count = await self.run_in_thread_with_lock(self.advance_block, block) + self.logger.info( + "writer advanced to %i (%i txs, %i txos) in %0.3fs", self.height, self.tx_count - start_count, + txo_count, time.perf_counter() - start + ) + if self.height == self.coin.nExtendedClaimExpirationForkHeight: + self.logger.warning( + "applying extended claim expiration fork on claims accepted by, %i", self.height + ) + await self.run_in_thread_with_lock(self.db.apply_expiration_extension_fork) + except: + self.logger.exception("advance blocks failed") + raise + processed_time = time.perf_counter() - total_start + self.block_count_metric.set(self.height) + self.block_update_time_metric.observe(processed_time) + self.touched_hashXs.clear() + elif hprevs[0] != chain[0]: + min_start_height = max(self.height - self.coin.REORG_LIMIT, 0) + count = 1 + block_hashes_from_lbrycrd = await self.daemon.block_hex_hashes( + min_start_height, self.coin.REORG_LIMIT + ) + for height, block_hash in zip( + reversed(range(min_start_height, min_start_height + self.coin.REORG_LIMIT)), + reversed(block_hashes_from_lbrycrd)): + if self.db.get_block_hash(height)[::-1].hex() == block_hash: + break + count += 1 + self.logger.warning(f"blockchain reorg detected at {self.height}, unwinding last {count} blocks") + try: + assert count > 0, count + for _ in range(count): + await self.run_in_thread_with_lock(self.backup_block) + self.logger.info(f'backed up to height {self.height:,d}') + + if self.env.cache_all_claim_txos: + await self.db._read_claim_txos() # TODO: don't do this + await self.prefetcher.reset_height(self.height) + self.reorg_count_metric.inc() + except: + self.logger.exception("reorg blocks failed") + raise + finally: + self.logger.info("backed up to block %i", self.height) + else: + # It is probably possible but extremely rare that what + # bitcoind returns doesn't form a chain because it + # reorg-ed the chain as it was processing the batched + # block hash requests. Should this happen it's simplest + # just to reset the prefetcher and try again. + self.logger.warning('daemon blocks do not form a chain; ' + 'resetting the prefetcher') + await self.prefetcher.reset_height(self.height) + + def _add_claim_or_update(self, height: int, txo: 'TxOutput', tx_hash: bytes, tx_num: int, nout: int, + spent_claims: typing.Dict[bytes, typing.Tuple[int, int, str]], first_input: 'TxInput'): + try: + claim_name = txo.claim.name.decode() + except UnicodeDecodeError: + claim_name = ''.join(chr(c) for c in txo.claim.name) + try: + normalized_name = normalize_name(claim_name) + except UnicodeDecodeError: + normalized_name = claim_name + if txo.is_claim: + claim_hash = hash160(tx_hash + pack('>I', nout))[::-1] + # print(f"\tnew {claim_hash.hex()} ({tx_num} {txo.value})") + else: + claim_hash = txo.claim.claim_hash[::-1] + # print(f"\tupdate {claim_hash.hex()} ({tx_num} {txo.value})") + + signing_channel_hash = None + channel_signature_is_valid = False + reposted_claim_hash = None + + try: + signable = txo.metadata + is_repost = signable.is_repost + if is_repost: + reposted_claim_hash = signable.repost.reference.claim_hash[::-1] + self.pending_reposted.add(reposted_claim_hash) + is_channel = signable.is_channel + if is_channel: + self.pending_channels[claim_hash] = signable.channel.public_key_bytes + if signable.is_signed: + signing_channel_hash = signable.signing_channel_hash[::-1] + except: # google.protobuf.message.DecodeError: Could not parse JSON. + signable = None + # is_repost = False + # is_channel = False + reposted_claim_hash = None + + self.doesnt_have_valid_signature.add(claim_hash) + raw_channel_tx = None + if signable and signable.signing_channel_hash: + signing_channel = self.db.get_claim_txo(signing_channel_hash) + + if signing_channel: + raw_channel_tx = self.db.prefix_db.tx.get( + self.db.get_tx_hash(signing_channel.tx_num), deserialize_value=False + ) + channel_pub_key_bytes = None + try: + if not signing_channel: + if txo.metadata.signing_channel_hash[::-1] in self.pending_channels: + channel_pub_key_bytes = self.pending_channels[signing_channel_hash] + elif raw_channel_tx: + chan_output = self.coin.transaction(raw_channel_tx).outputs[signing_channel.position] + channel_meta = chan_output.metadata # TODO: catch decode/type errors explicitly + channel_pub_key_bytes = channel_meta.channel.public_key_bytes + if channel_pub_key_bytes: + channel_signature_is_valid = self.coin.verify_signed_metadata( + channel_pub_key_bytes, txo, first_input + ) + if channel_signature_is_valid: + # print("\tvalidated signed claim") + self.pending_channel_counts[signing_channel_hash] += 1 + self.doesnt_have_valid_signature.remove(claim_hash) + self.claim_channels[claim_hash] = signing_channel_hash + # else: + # print("\tfailed to validate signed claim") + except: + self.logger.exception(f"error validating channel signature for %s:%i", tx_hash[::-1].hex(), nout) + + if txo.is_claim: # it's a root claim + root_tx_num, root_idx = tx_num, nout + previous_amount = 0 + else: # it's a claim update + if claim_hash not in spent_claims: + # print(f"\tthis is a wonky tx, contains unlinked claim update {claim_hash.hex()}") + return + if normalized_name != spent_claims[claim_hash][2]: + self.logger.warning( + f"{tx_hash[::-1].hex()} contains mismatched name for claim update {claim_hash.hex()}" + ) + return + (prev_tx_num, prev_idx, _) = spent_claims.pop(claim_hash) + # print(f"\tupdate {claim_hash.hex()} {tx_hash[::-1].hex()} {txo.value}") + if (prev_tx_num, prev_idx) in self.txo_to_claim: + previous_claim = self.txo_to_claim.pop((prev_tx_num, prev_idx)) + self.claim_hash_to_txo.pop(claim_hash) + root_tx_num, root_idx = previous_claim.root_tx_num, previous_claim.root_position + else: + previous_claim = self._make_pending_claim_txo(claim_hash) + root_tx_num, root_idx = previous_claim.root_tx_num, previous_claim.root_position + activation = self.db.get_activation(prev_tx_num, prev_idx) + claim_name = previous_claim.name + self.get_remove_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, claim_hash, prev_tx_num, prev_idx, activation, normalized_name, + previous_claim.amount + ) + previous_amount = previous_claim.amount + self.updated_claims.add(claim_hash) + + if self.env.cache_all_claim_txos: + self.db.claim_to_txo[claim_hash] = ClaimToTXOValue( + tx_num, nout, root_tx_num, root_idx, txo.value, channel_signature_is_valid, claim_name + ) + self.db.txo_to_claim[tx_num][nout] = claim_hash + + pending = StagedClaimtrieItem( + claim_name, normalized_name, claim_hash, txo.value, self.coin.get_expiration_height(height), tx_num, nout, + root_tx_num, root_idx, channel_signature_is_valid, signing_channel_hash, reposted_claim_hash + ) + self.txo_to_claim[(tx_num, nout)] = pending + self.claim_hash_to_txo[claim_hash] = (tx_num, nout) + self.get_add_claim_utxo_ops(pending) + + def get_add_claim_utxo_ops(self, pending: StagedClaimtrieItem): + # claim tip by claim hash + self.db.prefix_db.claim_to_txo.stage_put( + (pending.claim_hash,), (pending.tx_num, pending.position, pending.root_tx_num, pending.root_position, + pending.amount, pending.channel_signature_is_valid, pending.name) + ) + # claim hash by txo + self.db.prefix_db.txo_to_claim.stage_put( + (pending.tx_num, pending.position), (pending.claim_hash, pending.normalized_name) + ) + + # claim expiration + self.db.prefix_db.claim_expiration.stage_put( + (pending.expiration_height, pending.tx_num, pending.position), + (pending.claim_hash, pending.normalized_name) + ) + + # short url resolution + for prefix_len in range(10): + self.db.prefix_db.claim_short_id.stage_put( + (pending.normalized_name, pending.claim_hash.hex()[:prefix_len + 1], + pending.root_tx_num, pending.root_position), + (pending.tx_num, pending.position) + ) + + if pending.signing_hash and pending.channel_signature_is_valid: + # channel by stream + self.db.prefix_db.claim_to_channel.stage_put( + (pending.claim_hash, pending.tx_num, pending.position), (pending.signing_hash,) + ) + # stream by channel + self.db.prefix_db.channel_to_claim.stage_put( + (pending.signing_hash, pending.normalized_name, pending.tx_num, pending.position), + (pending.claim_hash,) + ) + + if pending.reposted_claim_hash: + self.db.prefix_db.repost.stage_put((pending.claim_hash,), (pending.reposted_claim_hash,)) + self.db.prefix_db.reposted_claim.stage_put( + (pending.reposted_claim_hash, pending.tx_num, pending.position), (pending.claim_hash,) + ) + + def get_remove_claim_utxo_ops(self, pending: StagedClaimtrieItem): + # claim tip by claim hash + self.db.prefix_db.claim_to_txo.stage_delete( + (pending.claim_hash,), (pending.tx_num, pending.position, pending.root_tx_num, pending.root_position, + pending.amount, pending.channel_signature_is_valid, pending.name) + ) + # claim hash by txo + self.db.prefix_db.txo_to_claim.stage_delete( + (pending.tx_num, pending.position), (pending.claim_hash, pending.normalized_name) + ) + + # claim expiration + self.db.prefix_db.claim_expiration.stage_delete( + (pending.expiration_height, pending.tx_num, pending.position), + (pending.claim_hash, pending.normalized_name) + ) + + # short url resolution + for prefix_len in range(10): + self.db.prefix_db.claim_short_id.stage_delete( + (pending.normalized_name, pending.claim_hash.hex()[:prefix_len + 1], + pending.root_tx_num, pending.root_position), + (pending.tx_num, pending.position) + ) + + if pending.signing_hash and pending.channel_signature_is_valid: + # channel by stream + self.db.prefix_db.claim_to_channel.stage_delete( + (pending.claim_hash, pending.tx_num, pending.position), (pending.signing_hash,) + ) + # stream by channel + self.db.prefix_db.channel_to_claim.stage_delete( + (pending.signing_hash, pending.normalized_name, pending.tx_num, pending.position), + (pending.claim_hash,) + ) + + if pending.reposted_claim_hash: + self.db.prefix_db.repost.stage_delete((pending.claim_hash,), (pending.reposted_claim_hash,)) + self.db.prefix_db.reposted_claim.stage_delete( + (pending.reposted_claim_hash, pending.tx_num, pending.position), (pending.claim_hash,) + ) + + def _add_support(self, height: int, txo: 'TxOutput', tx_num: int, nout: int): + supported_claim_hash = txo.support.claim_hash[::-1] + self.support_txos_by_claim[supported_claim_hash].append((tx_num, nout)) + self.support_txo_to_claim[(tx_num, nout)] = supported_claim_hash, txo.value + # print(f"\tsupport claim {supported_claim_hash.hex()} +{txo.value}") + + self.db.prefix_db.claim_to_support.stage_put((supported_claim_hash, tx_num, nout), (txo.value,)) + self.db.prefix_db.support_to_claim.stage_put((tx_num, nout), (supported_claim_hash,)) + self.pending_support_amount_change[supported_claim_hash] += txo.value + + def _add_claim_or_support(self, height: int, tx_hash: bytes, tx_num: int, nout: int, txo: 'TxOutput', + spent_claims: typing.Dict[bytes, Tuple[int, int, str]], first_input: 'TxInput'): + if txo.is_claim or txo.is_update: + self._add_claim_or_update(height, txo, tx_hash, tx_num, nout, spent_claims, first_input) + elif txo.is_support: + self._add_support(height, txo, tx_num, nout) + + def _spend_support_txo(self, height: int, txin: TxInput): + txin_num = self.get_pending_tx_num(txin.prev_hash) + activation = 0 + if (txin_num, txin.prev_idx) in self.support_txo_to_claim: + spent_support, support_amount = self.support_txo_to_claim.pop((txin_num, txin.prev_idx)) + self.support_txos_by_claim[spent_support].remove((txin_num, txin.prev_idx)) + supported_name = self._get_pending_claim_name(spent_support) + self.removed_support_txos_by_name_by_claim[supported_name][spent_support].append((txin_num, txin.prev_idx)) + else: + spent_support, support_amount = self.db.get_supported_claim_from_txo(txin_num, txin.prev_idx) + if not spent_support: # it is not a support + return + supported_name = self._get_pending_claim_name(spent_support) + if supported_name is not None: + self.removed_support_txos_by_name_by_claim[supported_name][spent_support].append( + (txin_num, txin.prev_idx)) + activation = self.db.get_activation(txin_num, txin.prev_idx, is_support=True) + if 0 < activation < self.height + 1: + self.removed_active_support_amount_by_claim[spent_support].append(support_amount) + if supported_name is not None and activation > 0: + self.get_remove_activate_ops( + ACTIVATED_SUPPORT_TXO_TYPE, spent_support, txin_num, txin.prev_idx, activation, supported_name, + support_amount + ) + # print(f"\tspent support for {spent_support.hex()} activation:{activation} {support_amount}") + self.db.prefix_db.claim_to_support.stage_delete((spent_support, txin_num, txin.prev_idx), (support_amount,)) + self.db.prefix_db.support_to_claim.stage_delete((txin_num, txin.prev_idx), (spent_support,)) + self.pending_support_amount_change[spent_support] -= support_amount + + def _spend_claim_txo(self, txin: TxInput, spent_claims: Dict[bytes, Tuple[int, int, str]]) -> bool: + txin_num = self.get_pending_tx_num(txin.prev_hash) + if (txin_num, txin.prev_idx) in self.txo_to_claim: + spent = self.txo_to_claim[(txin_num, txin.prev_idx)] + else: + if not self.db.get_cached_claim_exists(txin_num, txin.prev_idx): + # txo is not a claim + return False + spent_claim_hash_and_name = self.db.get_claim_from_txo( + txin_num, txin.prev_idx + ) + assert spent_claim_hash_and_name is not None + spent = self._make_pending_claim_txo(spent_claim_hash_and_name.claim_hash) + + if self.env.cache_all_claim_txos: + claim_hash = self.db.txo_to_claim[txin_num].pop(txin.prev_idx) + if not self.db.txo_to_claim[txin_num]: + self.db.txo_to_claim.pop(txin_num) + self.db.claim_to_txo.pop(claim_hash) + if spent.reposted_claim_hash: + self.pending_reposted.add(spent.reposted_claim_hash) + if spent.signing_hash and spent.channel_signature_is_valid and spent.signing_hash not in self.abandoned_claims: + self.pending_channel_counts[spent.signing_hash] -= 1 + spent_claims[spent.claim_hash] = (spent.tx_num, spent.position, spent.normalized_name) + # print(f"\tspend lbry://{spent.name}#{spent.claim_hash.hex()}") + self.get_remove_claim_utxo_ops(spent) + return True + + def _spend_claim_or_support_txo(self, height: int, txin: TxInput, spent_claims): + if not self._spend_claim_txo(txin, spent_claims): + self._spend_support_txo(height, txin) + + def _abandon_claim(self, claim_hash: bytes, tx_num: int, nout: int, normalized_name: str): + if (tx_num, nout) in self.txo_to_claim: + pending = self.txo_to_claim.pop((tx_num, nout)) + self.claim_hash_to_txo.pop(claim_hash) + self.abandoned_claims[pending.claim_hash] = pending + claim_root_tx_num, claim_root_idx = pending.root_tx_num, pending.root_position + prev_amount, prev_signing_hash = pending.amount, pending.signing_hash + reposted_claim_hash, name = pending.reposted_claim_hash, pending.name + expiration = self.coin.get_expiration_height(self.height) + signature_is_valid = pending.channel_signature_is_valid + else: + v = self.db.get_claim_txo( + claim_hash + ) + claim_root_tx_num, claim_root_idx, prev_amount = v.root_tx_num, v.root_position, v.amount + signature_is_valid, name = v.channel_signature_is_valid, v.name + prev_signing_hash = self.db.get_channel_for_claim(claim_hash, tx_num, nout) + reposted_claim_hash = self.db.get_repost(claim_hash) + expiration = self.coin.get_expiration_height(bisect_right(self.db.tx_counts, tx_num)) + self.abandoned_claims[claim_hash] = staged = StagedClaimtrieItem( + name, normalized_name, claim_hash, prev_amount, expiration, tx_num, nout, claim_root_tx_num, + claim_root_idx, signature_is_valid, prev_signing_hash, reposted_claim_hash + ) + for support_txo_to_clear in self.support_txos_by_claim[claim_hash]: + self.support_txo_to_claim.pop(support_txo_to_clear) + self.support_txos_by_claim[claim_hash].clear() + self.support_txos_by_claim.pop(claim_hash) + if normalized_name.startswith('@'): # abandon a channel, invalidate signatures + self._invalidate_channel_signatures(claim_hash) + + def _get_invalidate_signature_ops(self, pending: StagedClaimtrieItem): + if not pending.signing_hash: + return + self.db.prefix_db.claim_to_channel.stage_delete( + (pending.claim_hash, pending.tx_num, pending.position), (pending.signing_hash,) + ) + if pending.channel_signature_is_valid: + self.db.prefix_db.channel_to_claim.stage_delete( + (pending.signing_hash, pending.normalized_name, pending.tx_num, pending.position), + (pending.claim_hash,) + ) + self.db.prefix_db.claim_to_txo.stage_delete( + (pending.claim_hash,), + (pending.tx_num, pending.position, pending.root_tx_num, pending.root_position, pending.amount, + pending.channel_signature_is_valid, pending.name) + ) + self.db.prefix_db.claim_to_txo.stage_put( + (pending.claim_hash,), + (pending.tx_num, pending.position, pending.root_tx_num, pending.root_position, pending.amount, + False, pending.name) + ) + + def _invalidate_channel_signatures(self, claim_hash: bytes): + for (signed_claim_hash, ) in self.db.prefix_db.channel_to_claim.iterate( + prefix=(claim_hash, ), include_key=False): + if signed_claim_hash in self.abandoned_claims or signed_claim_hash in self.expired_claim_hashes: + continue + # there is no longer a signing channel for this claim as of this block + if signed_claim_hash in self.doesnt_have_valid_signature: + continue + # the signing channel changed in this block + if signed_claim_hash in self.claim_channels and signed_claim_hash != self.claim_channels[signed_claim_hash]: + continue + + # if the claim with an invalidated signature is in this block, update the StagedClaimtrieItem + # so that if we later try to spend it in this block we won't try to delete the channel info twice + if signed_claim_hash in self.claim_hash_to_txo: + signed_claim_txo = self.claim_hash_to_txo[signed_claim_hash] + claim = self.txo_to_claim[signed_claim_txo] + if claim.signing_hash != claim_hash: # claim was already invalidated this block + continue + self.txo_to_claim[signed_claim_txo] = claim.invalidate_signature() + else: + claim = self._make_pending_claim_txo(signed_claim_hash) + self.signatures_changed.add(signed_claim_hash) + self.pending_channel_counts[claim_hash] -= 1 + self._get_invalidate_signature_ops(claim) + + for staged in list(self.txo_to_claim.values()): + needs_invalidate = staged.claim_hash not in self.doesnt_have_valid_signature + if staged.signing_hash == claim_hash and needs_invalidate: + self._get_invalidate_signature_ops(staged) + self.txo_to_claim[self.claim_hash_to_txo[staged.claim_hash]] = staged.invalidate_signature() + self.signatures_changed.add(staged.claim_hash) + self.pending_channel_counts[claim_hash] -= 1 + + def _make_pending_claim_txo(self, claim_hash: bytes): + claim = self.db.get_claim_txo(claim_hash) + if claim_hash in self.doesnt_have_valid_signature: + signing_hash = None + else: + signing_hash = self.db.get_channel_for_claim(claim_hash, claim.tx_num, claim.position) + reposted_claim_hash = self.db.get_repost(claim_hash) + return StagedClaimtrieItem( + claim.name, claim.normalized_name, claim_hash, claim.amount, + self.coin.get_expiration_height( + bisect_right(self.db.tx_counts, claim.tx_num), + extended=self.height >= self.coin.nExtendedClaimExpirationForkHeight + ), + claim.tx_num, claim.position, claim.root_tx_num, claim.root_position, + claim.channel_signature_is_valid, signing_hash, reposted_claim_hash + ) + + def _expire_claims(self, height: int): + expired = self.db.get_expired_by_height(height) + self.expired_claim_hashes.update(set(expired.keys())) + spent_claims = {} + for expired_claim_hash, (tx_num, position, name, txi) in expired.items(): + if (tx_num, position) not in self.txo_to_claim: + self._spend_claim_txo(txi, spent_claims) + if expired: + # abandon the channels last to handle abandoned signed claims in the same tx, + # see test_abandon_channel_and_claims_in_same_tx + expired_channels = {} + for abandoned_claim_hash, (tx_num, nout, normalized_name) in spent_claims.items(): + self._abandon_claim(abandoned_claim_hash, tx_num, nout, normalized_name) + + if normalized_name.startswith('@'): + expired_channels[abandoned_claim_hash] = (tx_num, nout, normalized_name) + else: + # print(f"\texpire {abandoned_claim_hash.hex()} {tx_num} {nout}") + self._abandon_claim(abandoned_claim_hash, tx_num, nout, normalized_name) + + # do this to follow the same content claim removing pathway as if a claim (possible channel) was abandoned + for abandoned_claim_hash, (tx_num, nout, normalized_name) in expired_channels.items(): + # print(f"\texpire {abandoned_claim_hash.hex()} {tx_num} {nout}") + self._abandon_claim(abandoned_claim_hash, tx_num, nout, normalized_name) + + def _cached_get_active_amount(self, claim_hash: bytes, txo_type: int, height: int) -> int: + if (claim_hash, txo_type, height) in self.amount_cache: + return self.amount_cache[(claim_hash, txo_type, height)] + if txo_type == ACTIVATED_CLAIM_TXO_TYPE: + if claim_hash in self.claim_hash_to_txo: + amount = self.txo_to_claim[self.claim_hash_to_txo[claim_hash]].amount + else: + amount = self.db.get_active_amount_as_of_height( + claim_hash, height + ) + self.amount_cache[(claim_hash, txo_type, height)] = amount + else: + self.amount_cache[(claim_hash, txo_type, height)] = amount = self.db._get_active_amount( + claim_hash, txo_type, height + ) + return amount + + def _get_pending_claim_amount(self, name: str, claim_hash: bytes, height=None) -> int: + if (name, claim_hash) in self.activated_claim_amount_by_name_and_hash: + if claim_hash in self.claim_hash_to_txo: + return self.txo_to_claim[self.claim_hash_to_txo[claim_hash]].amount + return self.activated_claim_amount_by_name_and_hash[(name, claim_hash)] + if (name, claim_hash) in self.possible_future_claim_amount_by_name_and_hash: + return self.possible_future_claim_amount_by_name_and_hash[(name, claim_hash)] + return self._cached_get_active_amount(claim_hash, ACTIVATED_CLAIM_TXO_TYPE, height or (self.height + 1)) + + def _get_pending_claim_name(self, claim_hash: bytes) -> Optional[str]: + assert claim_hash is not None + if claim_hash in self.claim_hash_to_txo: + return self.txo_to_claim[self.claim_hash_to_txo[claim_hash]].normalized_name + claim_info = self.db.get_claim_txo(claim_hash) + if claim_info: + return claim_info.normalized_name + + def _get_pending_supported_amount(self, claim_hash: bytes, height: Optional[int] = None) -> int: + amount = self._cached_get_active_amount(claim_hash, ACTIVATED_SUPPORT_TXO_TYPE, height or (self.height + 1)) + if claim_hash in self.activated_support_amount_by_claim: + amount += sum(self.activated_support_amount_by_claim[claim_hash]) + if claim_hash in self.possible_future_support_amounts_by_claim_hash: + amount += sum(self.possible_future_support_amounts_by_claim_hash[claim_hash]) + if claim_hash in self.removed_active_support_amount_by_claim: + return amount - sum(self.removed_active_support_amount_by_claim[claim_hash]) + return amount + + def _get_pending_effective_amount(self, name: str, claim_hash: bytes, height: Optional[int] = None) -> int: + claim_amount = self._get_pending_claim_amount(name, claim_hash, height=height) + support_amount = self._get_pending_supported_amount(claim_hash, height=height) + return claim_amount + support_amount + + def get_activate_ops(self, txo_type: int, claim_hash: bytes, tx_num: int, position: int, + activation_height: int, name: str, amount: int): + self.db.prefix_db.activated.stage_put( + (txo_type, tx_num, position), (activation_height, claim_hash, name) + ) + self.db.prefix_db.pending_activation.stage_put( + (activation_height, txo_type, tx_num, position), (claim_hash, name) + ) + self.db.prefix_db.active_amount.stage_put( + (claim_hash, txo_type, activation_height, tx_num, position), (amount,) + ) + + def get_remove_activate_ops(self, txo_type: int, claim_hash: bytes, tx_num: int, position: int, + activation_height: int, name: str, amount: int): + self.db.prefix_db.activated.stage_delete( + (txo_type, tx_num, position), (activation_height, claim_hash, name) + ) + self.db.prefix_db.pending_activation.stage_delete( + (activation_height, txo_type, tx_num, position), (claim_hash, name) + ) + self.db.prefix_db.active_amount.stage_delete( + (claim_hash, txo_type, activation_height, tx_num, position), (amount,) + ) + + def _get_takeover_ops(self, height: int): + + # cache for controlling claims as of the previous block + controlling_claims = {} + + def get_controlling(_name): + if _name not in controlling_claims: + _controlling = self.db.get_controlling_claim(_name) + controlling_claims[_name] = _controlling + else: + _controlling = controlling_claims[_name] + return _controlling + + names_with_abandoned_or_updated_controlling_claims: List[str] = [] + + # get the claims and supports previously scheduled to be activated at this block + activated_at_height = self.db.get_activated_at_height(height) + activate_in_future = defaultdict(lambda: defaultdict(list)) + future_activations = defaultdict(dict) + + def get_delayed_activate_ops(name: str, claim_hash: bytes, is_new_claim: bool, tx_num: int, nout: int, + amount: int, is_support: bool): + controlling = get_controlling(name) + nothing_is_controlling = not controlling + staged_is_controlling = False if not controlling else claim_hash == controlling.claim_hash + controlling_is_abandoned = False if not controlling else \ + name in names_with_abandoned_or_updated_controlling_claims + + if nothing_is_controlling or staged_is_controlling or controlling_is_abandoned: + delay = 0 + elif is_new_claim: + delay = self.coin.get_delay_for_name(height - controlling.height) + else: + controlling_effective_amount = self._get_pending_effective_amount(name, controlling.claim_hash) + staged_effective_amount = self._get_pending_effective_amount(name, claim_hash) + staged_update_could_cause_takeover = staged_effective_amount > controlling_effective_amount + delay = 0 if not staged_update_could_cause_takeover else self.coin.get_delay_for_name( + height - controlling.height + ) + if delay == 0: # if delay was 0 it needs to be considered for takeovers + activated_at_height[PendingActivationValue(claim_hash, name)].append( + PendingActivationKey( + height, ACTIVATED_SUPPORT_TXO_TYPE if is_support else ACTIVATED_CLAIM_TXO_TYPE, tx_num, nout + ) + ) + else: # if the delay was higher if still needs to be considered if something else triggers a takeover + activate_in_future[name][claim_hash].append(( + PendingActivationKey( + height + delay, ACTIVATED_SUPPORT_TXO_TYPE if is_support else ACTIVATED_CLAIM_TXO_TYPE, + tx_num, nout + ), amount + )) + if is_support: + self.possible_future_support_txos_by_claim_hash[claim_hash].append((tx_num, nout)) + self.get_activate_ops( + ACTIVATED_SUPPORT_TXO_TYPE if is_support else ACTIVATED_CLAIM_TXO_TYPE, claim_hash, tx_num, nout, + height + delay, name, amount + ) + + # determine names needing takeover/deletion due to controlling claims being abandoned + # and add ops to deactivate abandoned claims + for claim_hash, staged in self.abandoned_claims.items(): + controlling = get_controlling(staged.normalized_name) + if controlling and controlling.claim_hash == claim_hash: + names_with_abandoned_or_updated_controlling_claims.append(staged.normalized_name) + # print(f"\t{staged.name} needs takeover") + activation = self.db.get_activation(staged.tx_num, staged.position) + if activation > 0: # db returns -1 for non-existent txos + # removed queued future activation from the db + self.get_remove_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, staged.claim_hash, staged.tx_num, staged.position, + activation, staged.normalized_name, staged.amount + ) + else: + # it hadn't yet been activated + pass + + # get the removed activated supports for controlling claims to determine if takeovers are possible + abandoned_support_check_need_takeover = defaultdict(list) + for claim_hash, amounts in self.removed_active_support_amount_by_claim.items(): + name = self._get_pending_claim_name(claim_hash) + if name is None: + continue + controlling = get_controlling(name) + if controlling and controlling.claim_hash == claim_hash and \ + name not in names_with_abandoned_or_updated_controlling_claims: + abandoned_support_check_need_takeover[(name, claim_hash)].extend(amounts) + + # get the controlling claims with updates to the claim to check if takeover is needed + for claim_hash in self.updated_claims: + if claim_hash in self.abandoned_claims: + continue + name = self._get_pending_claim_name(claim_hash) + if name is None: + continue + controlling = get_controlling(name) + if controlling and controlling.claim_hash == claim_hash and \ + name not in names_with_abandoned_or_updated_controlling_claims: + names_with_abandoned_or_updated_controlling_claims.append(name) + + # prepare to activate or delay activation of the pending claims being added this block + for (tx_num, nout), staged in self.txo_to_claim.items(): + is_delayed = not staged.is_update + prev_txo = self.db.get_cached_claim_txo(staged.claim_hash) + if prev_txo: + prev_activation = self.db.get_activation(prev_txo.tx_num, prev_txo.position) + if height < prev_activation or prev_activation < 0: + is_delayed = True + get_delayed_activate_ops( + staged.normalized_name, staged.claim_hash, is_delayed, tx_num, nout, staged.amount, + is_support=False + ) + + # and the supports + for (tx_num, nout), (claim_hash, amount) in self.support_txo_to_claim.items(): + if claim_hash in self.abandoned_claims: + continue + elif claim_hash in self.claim_hash_to_txo: + name = self.txo_to_claim[self.claim_hash_to_txo[claim_hash]].normalized_name + staged_is_new_claim = not self.txo_to_claim[self.claim_hash_to_txo[claim_hash]].is_update + else: + supported_claim_info = self.db.get_claim_txo(claim_hash) + if not supported_claim_info: + # the supported claim doesn't exist + continue + else: + v = supported_claim_info + name = v.normalized_name + staged_is_new_claim = (v.root_tx_num, v.root_position) == (v.tx_num, v.position) + get_delayed_activate_ops( + name, claim_hash, staged_is_new_claim, tx_num, nout, amount, is_support=True + ) + + # add the activation/delayed-activation ops + for activated, activated_txos in activated_at_height.items(): + controlling = get_controlling(activated.normalized_name) + if activated.claim_hash in self.abandoned_claims: + continue + reactivate = False + if not controlling or controlling.claim_hash == activated.claim_hash: + # there is no delay for claims to a name without a controlling value or to the controlling value + reactivate = True + for activated_txo in activated_txos: + if activated_txo.is_support and (activated_txo.tx_num, activated_txo.position) in \ + self.removed_support_txos_by_name_by_claim[activated.normalized_name][activated.claim_hash]: + # print("\tskip activate support for pending abandoned claim") + continue + if activated_txo.is_claim: + txo_type = ACTIVATED_CLAIM_TXO_TYPE + txo_tup = (activated_txo.tx_num, activated_txo.position) + if txo_tup in self.txo_to_claim: + amount = self.txo_to_claim[txo_tup].amount + else: + amount = self.db.get_claim_txo_amount( + activated.claim_hash + ) + if amount is None: + # print("\tskip activate for non existent claim") + continue + self.activated_claim_amount_by_name_and_hash[(activated.normalized_name, activated.claim_hash)] = amount + else: + txo_type = ACTIVATED_SUPPORT_TXO_TYPE + txo_tup = (activated_txo.tx_num, activated_txo.position) + if txo_tup in self.support_txo_to_claim: + amount = self.support_txo_to_claim[txo_tup][1] + else: + amount = self.db.get_support_txo_amount( + activated.claim_hash, activated_txo.tx_num, activated_txo.position + ) + if amount is None: + # print("\tskip activate support for non existent claim") + continue + self.activated_support_amount_by_claim[activated.claim_hash].append(amount) + self.activation_by_claim_by_name[activated.normalized_name][activated.claim_hash].append((activated_txo, amount)) + # print(f"\tactivate {'support' if txo_type == ACTIVATED_SUPPORT_TXO_TYPE else 'claim'} " + # f"{activated.claim_hash.hex()} @ {activated_txo.height}") + + # go through claims where the controlling claim or supports to the controlling claim have been abandoned + # check if takeovers are needed or if the name node is now empty + need_reactivate_if_takes_over = {} + for need_takeover in names_with_abandoned_or_updated_controlling_claims: + existing = self.db.get_claim_txos_for_name(need_takeover) + has_candidate = False + # add existing claims to the queue for the takeover + # track that we need to reactivate these if one of them becomes controlling + for candidate_claim_hash, (tx_num, nout) in existing.items(): + if candidate_claim_hash in self.abandoned_claims: + continue + has_candidate = True + existing_activation = self.db.get_activation(tx_num, nout) + activate_key = PendingActivationKey( + existing_activation, ACTIVATED_CLAIM_TXO_TYPE, tx_num, nout + ) + self.activation_by_claim_by_name[need_takeover][candidate_claim_hash].append(( + activate_key, self.db.get_claim_txo_amount(candidate_claim_hash) + )) + need_reactivate_if_takes_over[(need_takeover, candidate_claim_hash)] = activate_key + # print(f"\tcandidate to takeover abandoned controlling claim for " + # f"{activate_key.tx_num}:{activate_key.position} {activate_key.is_claim}") + if not has_candidate: + # remove name takeover entry, the name is now unclaimed + controlling = get_controlling(need_takeover) + self.db.prefix_db.claim_takeover.stage_delete( + (need_takeover,), (controlling.claim_hash, controlling.height) + ) + + # scan for possible takeovers out of the accumulated activations, of these make sure there + # aren't any future activations for the taken over names with yet higher amounts, if there are + # these need to get activated now and take over instead. for example: + # claim A is winning for 0.1 for long enough for a > 1 takeover delay + # claim B is made for 0.2 + # a block later, claim C is made for 0.3, it will schedule to activate 1 (or rarely 2) block(s) after B + # upon the delayed activation of B, we need to detect to activate C and make it take over early instead + + claim_exists = {} + for activated, activated_claim_txo in self.db.get_future_activated(height).items(): + # uses the pending effective amount for the future activation height, not the current height + future_amount = self._get_pending_claim_amount( + activated.normalized_name, activated.claim_hash, activated_claim_txo.height + 1 + ) + if activated.claim_hash not in claim_exists: + claim_exists[activated.claim_hash] = activated.claim_hash in self.claim_hash_to_txo or ( + self.db.get_claim_txo(activated.claim_hash) is not None) + if claim_exists[activated.claim_hash] and activated.claim_hash not in self.abandoned_claims: + v = future_amount, activated, activated_claim_txo + future_activations[activated.normalized_name][activated.claim_hash] = v + + for name, future_activated in activate_in_future.items(): + for claim_hash, activated in future_activated.items(): + if claim_hash not in claim_exists: + claim_exists[claim_hash] = claim_hash in self.claim_hash_to_txo or ( + self.db.get_claim_txo(claim_hash) is not None) + if not claim_exists[claim_hash]: + continue + if claim_hash in self.abandoned_claims: + continue + for txo in activated: + v = txo[1], PendingActivationValue(claim_hash, name), txo[0] + future_activations[name][claim_hash] = v + if txo[0].is_claim: + self.possible_future_claim_amount_by_name_and_hash[(name, claim_hash)] = txo[1] + else: + self.possible_future_support_amounts_by_claim_hash[claim_hash].append(txo[1]) + + # process takeovers + checked_names = set() + for name, activated in self.activation_by_claim_by_name.items(): + checked_names.add(name) + controlling = controlling_claims[name] + amounts = { + claim_hash: self._get_pending_effective_amount(name, claim_hash) + for claim_hash in activated.keys() if claim_hash not in self.abandoned_claims + } + # if there is a controlling claim include it in the amounts to ensure it remains the max + if controlling and controlling.claim_hash not in self.abandoned_claims: + amounts[controlling.claim_hash] = self._get_pending_effective_amount(name, controlling.claim_hash) + winning_claim_hash = max(amounts, key=lambda x: amounts[x]) + if not controlling or (winning_claim_hash != controlling.claim_hash and + name in names_with_abandoned_or_updated_controlling_claims) or \ + ((winning_claim_hash != controlling.claim_hash) and (amounts[winning_claim_hash] > amounts[controlling.claim_hash])): + amounts_with_future_activations = {claim_hash: amount for claim_hash, amount in amounts.items()} + amounts_with_future_activations.update( + { + claim_hash: self._get_pending_effective_amount( + name, claim_hash, self.height + 1 + self.coin.maxTakeoverDelay + ) for claim_hash in future_activations[name] + } + ) + winning_including_future_activations = max( + amounts_with_future_activations, key=lambda x: amounts_with_future_activations[x] + ) + future_winning_amount = amounts_with_future_activations[winning_including_future_activations] + + if winning_claim_hash != winning_including_future_activations and \ + future_winning_amount > amounts[winning_claim_hash]: + # print(f"\ttakeover by {winning_claim_hash.hex()} triggered early activation and " + # f"takeover by {winning_including_future_activations.hex()} at {height}") + # handle a pending activated claim jumping the takeover delay when another name takes over + if winning_including_future_activations not in self.claim_hash_to_txo: + claim = self.db.get_claim_txo(winning_including_future_activations) + tx_num = claim.tx_num + position = claim.position + amount = claim.amount + activation = self.db.get_activation(tx_num, position) + else: + tx_num, position = self.claim_hash_to_txo[winning_including_future_activations] + amount = self.txo_to_claim[(tx_num, position)].amount + activation = None + for (k, tx_amount) in activate_in_future[name][winning_including_future_activations]: + if (k.tx_num, k.position) == (tx_num, position): + activation = k.height + break + if activation is None: + # TODO: reproduce this in an integration test (block 604718) + _k = PendingActivationValue(winning_including_future_activations, name) + if _k in activated_at_height: + for pending_activation in activated_at_height[_k]: + if (pending_activation.tx_num, pending_activation.position) == (tx_num, position): + activation = pending_activation.height + break + assert None not in (amount, activation) + # update the claim that's activating early + self.get_remove_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, winning_including_future_activations, tx_num, + position, activation, name, amount + ) + self.get_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, winning_including_future_activations, tx_num, + position, height, name, amount + ) + + for (k, amount) in activate_in_future[name][winning_including_future_activations]: + txo = (k.tx_num, k.position) + if txo in self.possible_future_support_txos_by_claim_hash[winning_including_future_activations]: + self.get_remove_activate_ops( + ACTIVATED_SUPPORT_TXO_TYPE, winning_including_future_activations, k.tx_num, + k.position, k.height, name, amount + ) + self.get_activate_ops( + ACTIVATED_SUPPORT_TXO_TYPE, winning_including_future_activations, k.tx_num, + k.position, height, name, amount + ) + self.taken_over_names.add(name) + if controlling: + self.db.prefix_db.claim_takeover.stage_delete( + (name,), (controlling.claim_hash, controlling.height) + ) + self.db.prefix_db.claim_takeover.stage_put((name,), (winning_including_future_activations, height)) + self.touched_claim_hashes.add(winning_including_future_activations) + if controlling and controlling.claim_hash not in self.abandoned_claims: + self.touched_claim_hashes.add(controlling.claim_hash) + elif not controlling or (winning_claim_hash != controlling.claim_hash and + name in names_with_abandoned_or_updated_controlling_claims) or \ + ((winning_claim_hash != controlling.claim_hash) and (amounts[winning_claim_hash] > amounts[controlling.claim_hash])): + # print(f"\ttakeover by {winning_claim_hash.hex()} at {height}") + if (name, winning_claim_hash) in need_reactivate_if_takes_over: + previous_pending_activate = need_reactivate_if_takes_over[(name, winning_claim_hash)] + amount = self.db.get_claim_txo_amount( + winning_claim_hash + ) + if winning_claim_hash in self.claim_hash_to_txo: + tx_num, position = self.claim_hash_to_txo[winning_claim_hash] + amount = self.txo_to_claim[(tx_num, position)].amount + else: + tx_num, position = previous_pending_activate.tx_num, previous_pending_activate.position + if previous_pending_activate.height > height: + # the claim had a pending activation in the future, move it to now + if tx_num < self.tx_count: + self.get_remove_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, winning_claim_hash, tx_num, + position, previous_pending_activate.height, name, amount + ) + self.get_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, winning_claim_hash, tx_num, + position, height, name, amount + ) + self.taken_over_names.add(name) + if controlling: + self.db.prefix_db.claim_takeover.stage_delete( + (name,), (controlling.claim_hash, controlling.height) + ) + self.db.prefix_db.claim_takeover.stage_put((name,), (winning_claim_hash, height)) + if controlling and controlling.claim_hash not in self.abandoned_claims: + self.touched_claim_hashes.add(controlling.claim_hash) + self.touched_claim_hashes.add(winning_claim_hash) + elif winning_claim_hash == controlling.claim_hash: + # print("\tstill winning") + pass + else: + # print("\tno takeover") + pass + + # handle remaining takeovers from abandoned supports + for (name, claim_hash), amounts in abandoned_support_check_need_takeover.items(): + if name in checked_names: + continue + checked_names.add(name) + controlling = get_controlling(name) + amounts = { + claim_hash: self._get_pending_effective_amount(name, claim_hash) + for claim_hash in self.db.get_claims_for_name(name) if claim_hash not in self.abandoned_claims + } + if controlling and controlling.claim_hash not in self.abandoned_claims: + amounts[controlling.claim_hash] = self._get_pending_effective_amount(name, controlling.claim_hash) + winning = max(amounts, key=lambda x: amounts[x]) + + if (controlling and winning != controlling.claim_hash) or (not controlling and winning): + self.taken_over_names.add(name) + # print(f"\ttakeover from abandoned support {controlling.claim_hash.hex()} -> {winning.hex()}") + if controlling: + self.db.prefix_db.claim_takeover.stage_delete( + (name,), (controlling.claim_hash, controlling.height) + ) + self.db.prefix_db.claim_takeover.stage_put((name,), (winning, height)) + if controlling: + self.touched_claim_hashes.add(controlling.claim_hash) + self.touched_claim_hashes.add(winning) + + def _get_cumulative_update_ops(self, height: int): + # update the last takeover height for names with takeovers + for name in self.taken_over_names: + self.touched_claim_hashes.update( + {claim_hash for claim_hash in self.db.get_claims_for_name(name) + if claim_hash not in self.abandoned_claims} + ) + + # gather cumulative removed/touched sets to update the search index + self.removed_claim_hashes.update(set(self.abandoned_claims.keys())) + self.touched_claim_hashes.difference_update(self.removed_claim_hashes) + self.touched_claim_hashes.update( + set( + map(lambda item: item[1], self.activated_claim_amount_by_name_and_hash.keys()) + ).union( + set(self.claim_hash_to_txo.keys()) + ).union( + self.removed_active_support_amount_by_claim.keys() + ).union( + self.signatures_changed + ).union( + set(self.removed_active_support_amount_by_claim.keys()) + ).union( + set(self.activated_support_amount_by_claim.keys()) + ).union( + set(self.pending_support_amount_change.keys()) + ).difference( + self.removed_claim_hashes + ) + ) + + # update support amount totals + for supported_claim, amount in self.pending_support_amount_change.items(): + existing = self.db.prefix_db.support_amount.get(supported_claim) + total = amount + if existing is not None: + total += existing.amount + self.db.prefix_db.support_amount.stage_delete((supported_claim,), existing) + self.db.prefix_db.support_amount.stage_put((supported_claim,), (total,)) + + # use the cumulative changes to update bid ordered resolve + for removed in self.removed_claim_hashes: + removed_claim = self.db.get_claim_txo(removed) + if removed_claim: + amt = self.db.get_url_effective_amount( + removed_claim.normalized_name, removed + ) + if amt: + self.db.prefix_db.effective_amount.stage_delete( + (removed_claim.normalized_name, amt.effective_amount, amt.tx_num, amt.position), (removed,) + ) + for touched in self.touched_claim_hashes: + prev_effective_amount = 0 + + if touched in self.claim_hash_to_txo: + pending = self.txo_to_claim[self.claim_hash_to_txo[touched]] + name, tx_num, position = pending.normalized_name, pending.tx_num, pending.position + claim_from_db = self.db.get_claim_txo(touched) + if claim_from_db: + claim_amount_info = self.db.get_url_effective_amount(name, touched) + if claim_amount_info: + prev_effective_amount = claim_amount_info.effective_amount + self.db.prefix_db.effective_amount.stage_delete( + (name, claim_amount_info.effective_amount, claim_amount_info.tx_num, + claim_amount_info.position), (touched,) + ) + else: + v = self.db.get_claim_txo(touched) + if not v: + continue + name, tx_num, position = v.normalized_name, v.tx_num, v.position + amt = self.db.get_url_effective_amount(name, touched) + if amt: + prev_effective_amount = amt.effective_amount + self.db.prefix_db.effective_amount.stage_delete( + (name, prev_effective_amount, amt.tx_num, amt.position), (touched,) + ) + + new_effective_amount = self._get_pending_effective_amount(name, touched) + self.db.prefix_db.effective_amount.stage_put( + (name, new_effective_amount, tx_num, position), (touched,) + ) + if touched in self.claim_hash_to_txo or touched in self.removed_claim_hashes \ + or touched in self.pending_support_amount_change: + # exclude sending notifications for claims/supports that activated but + # weren't added/spent in this block + self.db.prefix_db.trending_notification.stage_put( + (height, touched), (prev_effective_amount, new_effective_amount) + ) + + for channel_hash, count in self.pending_channel_counts.items(): + if count != 0: + channel_count_val = self.db.prefix_db.channel_count.get(channel_hash) + channel_count = 0 if not channel_count_val else channel_count_val.count + if channel_count_val is not None: + self.db.prefix_db.channel_count.stage_delete((channel_hash,), (channel_count,)) + self.db.prefix_db.channel_count.stage_put((channel_hash,), (channel_count + count,)) + + self.touched_claim_hashes.update( + {k for k in self.pending_reposted if k not in self.removed_claim_hashes} + ) + self.touched_claim_hashes.update( + {k for k, v in self.pending_channel_counts.items() if v != 0 and k not in self.removed_claim_hashes} + ) + self.touched_claims_to_send_es.update(self.touched_claim_hashes) + self.touched_claims_to_send_es.difference_update(self.removed_claim_hashes) + self.removed_claims_to_send_es.update(self.removed_claim_hashes) + + def advance_block(self, block): + height = self.height + 1 + # print("advance ", height) + # Use local vars for speed in the loops + tx_count = self.tx_count + spend_utxo = self.spend_utxo + add_utxo = self.add_utxo + spend_claim_or_support_txo = self._spend_claim_or_support_txo + add_claim_or_support = self._add_claim_or_support + txs: List[Tuple[Tx, bytes]] = block.transactions + txo_count = 0 + + self.db.prefix_db.block_hash.stage_put(key_args=(height,), value_args=(self.coin.header_hash(block.header),)) + self.db.prefix_db.header.stage_put(key_args=(height,), value_args=(block.header,)) + self.db.prefix_db.block_txs.stage_put(key_args=(height,), value_args=([tx_hash for tx, tx_hash in txs],)) + + for tx, tx_hash in txs: + spent_claims = {} + # clean up mempool, delete txs that were already in mempool/staged to be added + # leave txs in mempool that werent in the block + mempool_tx = self.db.prefix_db.mempool_tx.get_pending(tx_hash) + if mempool_tx: + self.db.prefix_db.mempool_tx.stage_delete((tx_hash,), mempool_tx) + + self.db.prefix_db.tx.stage_put(key_args=(tx_hash,), value_args=(tx.raw,)) + self.db.prefix_db.tx_num.stage_put(key_args=(tx_hash,), value_args=(tx_count,)) + self.db.prefix_db.tx_hash.stage_put(key_args=(tx_count,), value_args=(tx_hash,)) + + # Spend the inputs + for txin in tx.inputs: + if txin.is_generation(): + continue + # spend utxo for address histories + hashX = spend_utxo(txin.prev_hash, txin.prev_idx) + if hashX: + if tx_count not in self.hashXs_by_tx[hashX]: + self.hashXs_by_tx[hashX].append(tx_count) + # spend claim/support txo + spend_claim_or_support_txo(height, txin, spent_claims) + + # Add the new UTXOs + for nout, txout in enumerate(tx.outputs): + txo_count += 1 + # Get the hashX. Ignore unspendable outputs + hashX = add_utxo(tx_hash, tx_count, nout, txout) + if hashX: + # self._set_hashX_cache(hashX) + if tx_count not in self.hashXs_by_tx[hashX]: + self.hashXs_by_tx[hashX].append(tx_count) + # add claim/support txo + add_claim_or_support( + height, tx_hash, tx_count, nout, txout, spent_claims, tx.inputs[0] + ) + + # Handle abandoned claims + abandoned_channels = {} + # abandon the channels last to handle abandoned signed claims in the same tx, + # see test_abandon_channel_and_claims_in_same_tx + for abandoned_claim_hash, (tx_num, nout, normalized_name) in spent_claims.items(): + if normalized_name.startswith('@'): + abandoned_channels[abandoned_claim_hash] = (tx_num, nout, normalized_name) + else: + # print(f"\tabandon {normalized_name} {abandoned_claim_hash.hex()} {tx_num} {nout}") + self._abandon_claim(abandoned_claim_hash, tx_num, nout, normalized_name) + + for abandoned_claim_hash, (tx_num, nout, normalized_name) in abandoned_channels.items(): + # print(f"\tabandon {normalized_name} {abandoned_claim_hash.hex()} {tx_num} {nout}") + self._abandon_claim(abandoned_claim_hash, tx_num, nout, normalized_name) + self.pending_transactions[tx_count] = tx_hash + self.pending_transaction_num_mapping[tx_hash] = tx_count + if self.env.cache_all_tx_hashes: + self.db.total_transactions.append(tx_hash) + self.db.tx_num_mapping[tx_hash] = tx_count + tx_count += 1 + + # handle expired claims + self._expire_claims(height) + + # activate claims and process takeovers + self._get_takeover_ops(height) + + # update effective amount and update sets of touched and deleted claims + self._get_cumulative_update_ops(height) + + self.db.prefix_db.touched_hashX.stage_put((height,), (list(sorted(self.touched_hashXs)),)) + + self.db.prefix_db.tx_count.stage_put(key_args=(height,), value_args=(tx_count,)) + + for hashX, new_history in self.hashXs_by_tx.items(): + if not new_history: + continue + self.db.prefix_db.hashX_history.stage_put(key_args=(hashX, height), value_args=(new_history,)) + + self.tx_count = tx_count + self.db.tx_counts.append(self.tx_count) + + cached_max_reorg_depth = self.daemon.cached_height() - self.env.reorg_limit + + # if height >= cached_max_reorg_depth: + self.db.prefix_db.touched_or_deleted.stage_put( + key_args=(height,), value_args=(self.touched_claim_hashes, self.removed_claim_hashes) + ) + + self.height = height + self.db.headers.append(block.header) + self.tip = self.coin.header_hash(block.header) + + self.db.fs_height = self.height + self.db.fs_tx_count = self.tx_count + self.db.hist_flush_count += 1 + self.db.hist_unflushed_count = 0 + self.db.utxo_flush_count = self.db.hist_flush_count + self.db.db_height = self.height + self.db.db_tx_count = self.tx_count + self.db.db_tip = self.tip + self.db.last_flush_tx_count = self.db.fs_tx_count + now = time.time() + self.db.wall_time += now - self.db.last_flush + self.db.last_flush = now + self.db.write_db_state() + + # flush the changes + save_undo = (self.daemon.cached_height() - self.height) <= self.env.reorg_limit + + if save_undo: + self.db.prefix_db.commit(self.height, self.tip) + else: + self.db.prefix_db.unsafe_commit() + self.clear_after_advance_or_reorg() + self.db.assert_db_state() + # print("*************\n") + return txo_count + + def clear_after_advance_or_reorg(self): + self.txo_to_claim.clear() + self.claim_hash_to_txo.clear() + self.support_txos_by_claim.clear() + self.support_txo_to_claim.clear() + self.removed_support_txos_by_name_by_claim.clear() + self.abandoned_claims.clear() + self.removed_active_support_amount_by_claim.clear() + self.activated_support_amount_by_claim.clear() + self.activated_claim_amount_by_name_and_hash.clear() + self.activation_by_claim_by_name.clear() + self.possible_future_claim_amount_by_name_and_hash.clear() + self.possible_future_support_amounts_by_claim_hash.clear() + self.possible_future_support_txos_by_claim_hash.clear() + self.pending_channels.clear() + self.amount_cache.clear() + self.signatures_changed.clear() + self.expired_claim_hashes.clear() + self.doesnt_have_valid_signature.clear() + self.claim_channels.clear() + self.utxo_cache.clear() + self.hashXs_by_tx.clear() + self.removed_claim_hashes.clear() + self.touched_claim_hashes.clear() + self.pending_reposted.clear() + self.pending_channel_counts.clear() + self.updated_claims.clear() + self.taken_over_names.clear() + self.pending_transaction_num_mapping.clear() + self.pending_transactions.clear() + self.pending_support_amount_change.clear() + self.touched_hashXs.clear() + + def backup_block(self): + assert len(self.db.prefix_db._op_stack) == 0 + touched_and_deleted = self.db.prefix_db.touched_or_deleted.get(self.height) + self.touched_claims_to_send_es.update(touched_and_deleted.touched_claims) + self.removed_claims_to_send_es.difference_update(touched_and_deleted.touched_claims) + self.removed_claims_to_send_es.update(touched_and_deleted.deleted_claims) + + # self.db.assert_flushed(self.flush_data()) + self.logger.info("backup block %i", self.height) + # Check and update self.tip + + self.db.tx_counts.pop() + reverted_block_hash = self.coin.header_hash(self.db.headers.pop()) + self.tip = self.coin.header_hash(self.db.headers[-1]) + if self.env.cache_all_tx_hashes: + while len(self.db.total_transactions) > self.db.tx_counts[-1]: + self.db.tx_num_mapping.pop(self.db.total_transactions.pop()) + self.tx_count -= 1 + else: + self.tx_count = self.db.tx_counts[-1] + self.height -= 1 + + # self.touched can include other addresses which is + # harmless, but remove None. + self.touched_hashXs.discard(None) + + assert self.height < self.db.db_height + assert not self.db.hist_unflushed + + start_time = time.time() + tx_delta = self.tx_count - self.db.last_flush_tx_count + ### + self.db.fs_tx_count = self.tx_count + # Truncate header_mc: header count is 1 more than the height. + self.db.header_mc.truncate(self.height + 1) + ### + # Not certain this is needed, but it doesn't hurt + self.db.hist_flush_count += 1 + + while self.db.fs_height > self.height: + self.db.fs_height -= 1 + self.db.utxo_flush_count = self.db.hist_flush_count + self.db.db_height = self.height + self.db.db_tx_count = self.tx_count + self.db.db_tip = self.tip + # Flush state last as it reads the wall time. + now = time.time() + self.db.wall_time += now - self.db.last_flush + self.db.last_flush = now + self.db.last_flush_tx_count = self.db.fs_tx_count + + # rollback + self.db.prefix_db.rollback(self.height + 1, reverted_block_hash) + self.db.es_sync_height = self.height + self.db.write_db_state() + self.db.prefix_db.unsafe_commit() + + self.clear_after_advance_or_reorg() + self.db.assert_db_state() + + elapsed = self.db.last_flush - start_time + self.logger.warning(f'backup flush #{self.db.hist_flush_count:,d} took {elapsed:.1f}s. ' + f'Height {self.height:,d} txs: {self.tx_count:,d} ({tx_delta:+,d})') + + def add_utxo(self, tx_hash: bytes, tx_num: int, nout: int, txout: 'TxOutput') -> Optional[bytes]: + hashX = self.coin.hashX_from_txo(txout) + if hashX: + self.touched_hashXs.add(hashX) + self.utxo_cache[(tx_hash, nout)] = (hashX, txout.value) + self.db.prefix_db.utxo.stage_put((hashX, tx_num, nout), (txout.value,)) + self.db.prefix_db.hashX_utxo.stage_put((tx_hash[:4], tx_num, nout), (hashX,)) + return hashX + + def get_pending_tx_num(self, tx_hash: bytes) -> int: + if tx_hash in self.pending_transaction_num_mapping: + return self.pending_transaction_num_mapping[tx_hash] + else: + return self.db.get_tx_num(tx_hash) + + def spend_utxo(self, tx_hash: bytes, nout: int): + hashX, amount = self.utxo_cache.pop((tx_hash, nout), (None, None)) + txin_num = self.get_pending_tx_num(tx_hash) + if not hashX: + hashX_value = self.db.prefix_db.hashX_utxo.get(tx_hash[:4], txin_num, nout) + if not hashX_value: + return + hashX = hashX_value.hashX + utxo_value = self.db.prefix_db.utxo.get(hashX, txin_num, nout) + if not utxo_value: + self.logger.warning( + "%s:%s is not found in UTXO db for %s", hash_to_hex_str(tx_hash), nout, hash_to_hex_str(hashX) + ) + raise ChainError( + f"{hash_to_hex_str(tx_hash)}:{nout} is not found in UTXO db for {hash_to_hex_str(hashX)}" + ) + self.touched_hashXs.add(hashX) + self.db.prefix_db.hashX_utxo.stage_delete((tx_hash[:4], txin_num, nout), hashX_value) + self.db.prefix_db.utxo.stage_delete((hashX, txin_num, nout), utxo_value) + return hashX + elif amount is not None: + self.db.prefix_db.hashX_utxo.stage_delete((tx_hash[:4], txin_num, nout), (hashX,)) + self.db.prefix_db.utxo.stage_delete((hashX, txin_num, nout), (amount,)) + self.touched_hashXs.add(hashX) + return hashX + + async def process_blocks_and_mempool_forever(self): + """Loop forever processing blocks as they arrive.""" + try: + while not self._stopping: + if self.height == self.daemon.cached_height(): + if not self._caught_up_event.is_set(): + await self._first_caught_up() + self._caught_up_event.set() + try: + await asyncio.wait_for(self.blocks_event.wait(), self.wait_for_blocks_duration) + except asyncio.TimeoutError: + pass + self.blocks_event.clear() + blocks = self.prefetcher.get_prefetched_blocks() + if self._stopping: + break + if not blocks: + try: + await self.refresh_mempool() + except asyncio.CancelledError: + raise + except Exception: + self.logger.exception("error while updating mempool txs") + raise + else: + try: + await self.check_and_advance_blocks(blocks) + except asyncio.CancelledError: + raise + except Exception: + self.logger.exception("error while processing txs") + raise + finally: + self._ready_to_stop.set() + + async def _first_caught_up(self): + self.logger.info(f'caught up to height {self.height}') + # Flush everything but with first_sync->False state. + first_sync = self.db.first_sync + self.db.first_sync = False + + def flush(): + assert len(self.db.prefix_db._op_stack) == 0 + self.db.write_db_state() + self.db.prefix_db.unsafe_commit() + self.db.assert_db_state() + + await self.run_in_thread_with_lock(flush) + + if first_sync: + self.logger.info(f'{__version__} synced to ' + f'height {self.height:,d}, halting here.') + self.shutdown_event.set() + + async def open(self): + self.db.open_db() + self.height = self.db.db_height + self.tip = self.db.db_tip + self.tx_count = self.db.db_tx_count + await self.db.initialize_caches() + + async def fetch_and_process_blocks(self, caught_up_event): + """Fetch, process and index blocks from the daemon. + + Sets caught_up_event when first caught up. Flushes to disk + and shuts down cleanly if cancelled. + + This is mainly because if, during initial sync ElectrumX is + asked to shut down when a large number of blocks have been + processed but not written to disk, it should write those to + disk before exiting, as otherwise a significant amount of work + could be lost. + """ + + await self.open() + + self._caught_up_event = caught_up_event + try: + await asyncio.wait([ + self.prefetcher.main_loop(self.height), + self.process_blocks_and_mempool_forever() + ]) + except asyncio.CancelledError: + raise + except: + self.logger.exception("Block processing failed!") + raise + finally: + # Shut down block processing + self.logger.info('closing the DB for a clean shutdown...') + self._chain_executor.shutdown(wait=True) + self.db.close() + + async def start(self): + self._stopping = False + env = self.env + self.logger.info(f'software version: {__version__}') + self.logger.info(f'event loop policy: {env.loop_policy}') + self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks') + + await self.daemon.height() + + def _start_cancellable(run, *args): + _flag = asyncio.Event() + self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag))) + return _flag.wait() + + await _start_cancellable(self.fetch_and_process_blocks) + + async def stop(self): + self._stopping = True + await self._ready_to_stop.wait() + for task in reversed(self.cancellable_tasks): + task.cancel() + await asyncio.wait(self.cancellable_tasks) + self.shutdown_event.set() + await self.daemon.close() + + def run(self): + loop = asyncio.get_event_loop() + loop.set_default_executor(self._chain_executor) + + def __exit(): + raise SystemExit() + try: + loop.add_signal_handler(signal.SIGINT, __exit) + loop.add_signal_handler(signal.SIGTERM, __exit) + loop.run_until_complete(self.start()) + loop.run_until_complete(self.shutdown_event.wait()) + except (SystemExit, KeyboardInterrupt): + pass + finally: + loop.run_until_complete(self.stop()) diff --git a/scribe/blockchain/daemon.py b/scribe/blockchain/daemon.py new file mode 100644 index 0000000..6567d01 --- /dev/null +++ b/scribe/blockchain/daemon.py @@ -0,0 +1,328 @@ +import asyncio +import itertools +import json +import time +import logging +from functools import wraps + +import aiohttp +from prometheus_client import Gauge, Histogram +from scribe import PROMETHEUS_NAMESPACE +from scribe.common import LRUCacheWithMetrics, RPCError, DaemonError, WarmingUpError, WorkQueueFullError + + +log = logging.getLogger(__name__) + + +NAMESPACE = f"{PROMETHEUS_NAMESPACE}_blockchain" +METHOD_NOT_FOUND = -32601 + + +def handles_errors(decorated_function): + @wraps(decorated_function) + async def wrapper(*args, **kwargs): + try: + return await decorated_function(*args, **kwargs) + except DaemonError as daemon_error: + raise RPCError(1, daemon_error.args[0]) + return wrapper + + +class LBCDaemon: + """Handles connections to a daemon at the given URL.""" + + WARMING_UP = -28 + id_counter = itertools.count() + + lbrycrd_request_time_metric = Histogram( + "lbrycrd_request", "lbrycrd requests count", namespace=NAMESPACE, labelnames=("method",) + ) + lbrycrd_pending_count_metric = Gauge( + "lbrycrd_pending_count", "Number of lbrycrd rpcs that are in flight", namespace=NAMESPACE, + labelnames=("method",) + ) + + def __init__(self, coin, url, max_workqueue=10, init_retry=0.25, + max_retry=4.0): + self.coin = coin + self.logger = logging.getLogger(__name__) + self.set_url(url) + # Limit concurrent RPC calls to this number. + # See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16 + self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue) + self.init_retry = init_retry + self.max_retry = max_retry + self._height = None + self.available_rpcs = {} + self.connector = aiohttp.TCPConnector(ssl=False) + self._block_hash_cache = LRUCacheWithMetrics(100000) + self._block_cache = LRUCacheWithMetrics(2 ** 13, metric_name='block', namespace=NAMESPACE) + + async def close(self): + if self.connector: + await self.connector.close() + self.connector = None + + def set_url(self, url): + """Set the URLS to the given list, and switch to the first one.""" + urls = url.split(',') + urls = [self.coin.sanitize_url(url) for url in urls] + for n, url in enumerate(urls): + status = '' if n else ' (current)' + logged_url = self.logged_url(url) + self.logger.info(f'daemon #{n + 1} at {logged_url}{status}') + self.url_index = 0 + self.urls = urls + + def current_url(self): + """Returns the current daemon URL.""" + return self.urls[self.url_index] + + def logged_url(self, url=None): + """The host and port part, for logging.""" + url = url or self.current_url() + return url[url.rindex('@') + 1:] + + def failover(self): + """Call to fail-over to the next daemon URL. + + Returns False if there is only one, otherwise True. + """ + if len(self.urls) > 1: + self.url_index = (self.url_index + 1) % len(self.urls) + self.logger.info(f'failing over to {self.logged_url()}') + return True + return False + + def client_session(self): + """An aiohttp client session.""" + return aiohttp.ClientSession(connector=self.connector, connector_owner=False) + + async def _send_data(self, data): + if not self.connector: + raise asyncio.CancelledError('Tried to send request during shutdown.') + async with self.workqueue_semaphore: + async with self.client_session() as session: + async with session.post(self.current_url(), data=data) as resp: + kind = resp.headers.get('Content-Type', None) + if kind == 'application/json': + return await resp.json() + # bitcoind's HTTP protocol "handling" is a bad joke + text = await resp.text() + if 'Work queue depth exceeded' in text: + raise WorkQueueFullError + text = text.strip() or resp.reason + self.logger.error(text) + raise DaemonError(text) + + async def _send(self, payload, processor): + """Send a payload to be converted to JSON. + + Handles temporary connection issues. Daemon response errors + are raise through DaemonError. + """ + + def log_error(error): + nonlocal last_error_log, retry + now = time.time() + if now - last_error_log > 60: + last_error_log = now + self.logger.error(f'{error} Retrying occasionally...') + if retry == self.max_retry and self.failover(): + retry = 0 + + on_good_message = None + last_error_log = 0 + data = json.dumps(payload) + retry = self.init_retry + methods = tuple( + [payload['method']] if isinstance(payload, dict) else [request['method'] for request in payload] + ) + while True: + try: + for method in methods: + self.lbrycrd_pending_count_metric.labels(method=method).inc() + result = await self._send_data(data) + result = processor(result) + if on_good_message: + self.logger.info(on_good_message) + return result + except asyncio.TimeoutError: + log_error('timeout error.') + except aiohttp.ServerDisconnectedError: + log_error('disconnected.') + on_good_message = 'connection restored' + except aiohttp.ClientConnectionError: + log_error('connection problem - is your daemon running?') + on_good_message = 'connection restored' + except aiohttp.ClientError as e: + log_error(f'daemon error: {e}') + on_good_message = 'running normally' + except WarmingUpError: + log_error('starting up checking blocks.') + on_good_message = 'running normally' + except WorkQueueFullError: + log_error('work queue full.') + on_good_message = 'running normally' + finally: + for method in methods: + self.lbrycrd_pending_count_metric.labels(method=method).dec() + await asyncio.sleep(retry) + retry = max(min(self.max_retry, retry * 2), self.init_retry) + + async def _send_single(self, method, params=None): + """Send a single request to the daemon.""" + + start = time.perf_counter() + + def processor(result): + err = result['error'] + if not err: + return result['result'] + if err.get('code') == self.WARMING_UP: + raise WarmingUpError + raise DaemonError(err) + + payload = {'method': method, 'id': next(self.id_counter)} + if params: + payload['params'] = params + result = await self._send(payload, processor) + self.lbrycrd_request_time_metric.labels(method=method).observe(time.perf_counter() - start) + return result + + async def _send_vector(self, method, params_iterable, replace_errs=False): + """Send several requests of the same method. + + The result will be an array of the same length as params_iterable. + If replace_errs is true, any item with an error is returned as None, + otherwise an exception is raised.""" + + start = time.perf_counter() + + def processor(result): + errs = [item['error'] for item in result if item['error']] + if any(err.get('code') == self.WARMING_UP for err in errs): + raise WarmingUpError + if not errs or replace_errs: + return [item['result'] for item in result] + raise DaemonError(errs) + + payload = [{'method': method, 'params': p, 'id': next(self.id_counter)} + for p in params_iterable] + result = [] + if payload: + result = await self._send(payload, processor) + self.lbrycrd_request_time_metric.labels(method=method).observe(time.perf_counter() - start) + return result + + async def _is_rpc_available(self, method): + """Return whether given RPC method is available in the daemon. + + Results are cached and the daemon will generally not be queried with + the same method more than once.""" + available = self.available_rpcs.get(method) + if available is None: + available = True + try: + await self._send_single(method) + except DaemonError as e: + err = e.args[0] + error_code = err.get("code") + available = error_code != METHOD_NOT_FOUND + self.available_rpcs[method] = available + return available + + async def block_hex_hashes(self, first, count): + """Return the hex hashes of count block starting at height first.""" + if first + count < (self.cached_height() or 0) - 200: + return await self._cached_block_hex_hashes(first, count) + params_iterable = ((h, ) for h in range(first, first + count)) + return await self._send_vector('getblockhash', params_iterable) + + async def _cached_block_hex_hashes(self, first, count): + """Return the hex hashes of count block starting at height first.""" + cached = self._block_hash_cache.get((first, count)) + if cached: + return cached + params_iterable = ((h, ) for h in range(first, first + count)) + self._block_hash_cache[(first, count)] = await self._send_vector('getblockhash', params_iterable) + return self._block_hash_cache[(first, count)] + + async def deserialised_block(self, hex_hash): + """Return the deserialised block with the given hex hash.""" + if hex_hash not in self._block_cache: + block = await self._send_single('getblock', (hex_hash, 1)) + self._block_cache[hex_hash] = block + return block + return self._block_cache[hex_hash] + + async def raw_blocks(self, hex_hashes): + """Return the raw binary blocks with the given hex hashes.""" + params_iterable = ((h, 0) for h in hex_hashes) + blocks = await self._send_vector('getblock', params_iterable) + # Convert hex string to bytes + return [bytes.fromhex(block) for block in blocks] + + async def mempool_hashes(self): + """Update our record of the daemon's mempool hashes.""" + return await self._send_single('getrawmempool') + + async def estimatefee(self, block_count): + """Return the fee estimate for the block count. Units are whole + currency units per KB, e.g. 0.00000995, or -1 if no estimate + is available. + """ + args = (block_count, ) + if await self._is_rpc_available('estimatesmartfee'): + estimate = await self._send_single('estimatesmartfee', args) + return estimate.get('feerate', -1) + return await self._send_single('estimatefee', args) + + async def getnetworkinfo(self): + """Return the result of the 'getnetworkinfo' RPC call.""" + return await self._send_single('getnetworkinfo') + + async def relayfee(self): + """The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.""" + network_info = await self.getnetworkinfo() + return network_info['relayfee'] + + async def getrawtransactions(self, hex_hashes, replace_errs=True): + """Return the serialized raw transactions with the given hashes. + + Replaces errors with None by default.""" + params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes) + txs = await self._send_vector('getrawtransaction', params_iterable, + replace_errs=replace_errs) + # Convert hex strings to bytes + return [bytes.fromhex(tx) if tx else None for tx in txs] + + async def broadcast_transaction(self, raw_tx): + """Broadcast a transaction to the network.""" + return await self._send_single('sendrawtransaction', (raw_tx, )) + + async def height(self): + """Query the daemon for its current height.""" + self._height = await self._send_single('getblockcount') + return self._height + + def cached_height(self): + """Return the cached daemon height. + + If the daemon has not been queried yet this returns None.""" + return self._height + + @handles_errors + async def getrawtransaction(self, hex_hash, verbose=False): + return await self._send_single('getrawtransaction', (hex_hash, int(verbose))) + + @handles_errors + async def getclaimsforname(self, name): + '''Given a name, retrieves all claims matching that name.''' + return await self._send_single('getclaimsforname', (name,)) + + @handles_errors + async def getbestblockhash(self): + '''Given a name, retrieves all claims matching that name.''' + return await self._send_single('getbestblockhash') diff --git a/scribe/blockchain/network.py b/scribe/blockchain/network.py new file mode 100644 index 0000000..5450666 --- /dev/null +++ b/scribe/blockchain/network.py @@ -0,0 +1,300 @@ +import re +import struct +import typing +from typing import List +from hashlib import sha256 +from decimal import Decimal +from scribe.base58 import Base58 +from scribe.bip32 import PublicKey +from scribe.common import hash160, hash_to_hex_str, double_sha256 +from scribe.blockchain.transaction import TxOutput, TxInput, Block +from scribe.blockchain.transaction.deserializer import Deserializer +from scribe.blockchain.transaction.script import OpCodes, P2PKH_script, P2SH_script, txo_script_parser + + +HASHX_LEN = 11 + + +class CoinError(Exception): + """Exception raised for coin-related errors.""" + + +ENCODE_CHECK = Base58.encode_check +DECODE_CHECK = Base58.decode_check + + +class LBCMainNet: + NAME = "LBRY" + SHORTNAME = "LBC" + NET = "mainnet" + ENCODE_CHECK = Base58.encode_check + DECODE_CHECK = Base58.decode_check + DESERIALIZER = Deserializer + BASIC_HEADER_SIZE = 112 + CHUNK_SIZE = 96 + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("55") + P2SH_VERBYTES = bytes.fromhex("7A") + WIF_BYTE = bytes.fromhex("1C") + GENESIS_HASH = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463' + RPC_PORT = 9245 + REORG_LIMIT = 200 + RPC_URL_REGEX = re.compile('.+@(\\[[0-9a-fA-F:]+\\]|[^:]+)(:[0-9]+)?') + VALUE_PER_COIN = 100000000 + + # Peer discovery + PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'} + PEERS: List[str] = [] + # claimtrie/takeover params + nOriginalClaimExpirationTime = 262974 + nExtendedClaimExpirationTime = 2102400 + nExtendedClaimExpirationForkHeight = 400155 + nNormalizedNameForkHeight = 539940 # targeting 21 March 2019 + nMinTakeoverWorkaroundHeight = 496850 + nMaxTakeoverWorkaroundHeight = 658300 # targeting 30 Oct 2019 + nWitnessForkHeight = 680770 # targeting 11 Dec 2019 + nAllClaimsInMerkleForkHeight = 658310 # targeting 30 Oct 2019 + proportionalDelayFactor = 32 + maxTakeoverDelay = 4032 + + @classmethod + def sanitize_url(cls, url): + # Remove surrounding ws and trailing /s + url = url.strip().rstrip('/') + match = cls.RPC_URL_REGEX.match(url) + if not match: + raise CoinError(f'invalid daemon URL: "{url}"') + if match.groups()[1] is None: + url += f':{cls.RPC_PORT:d}' + if not url.startswith('http://') and not url.startswith('https://'): + url = 'http://' + url + return url + '/' + + @classmethod + def address_to_hashX(cls, address): + """Return a hashX given a coin address.""" + return cls.hashX_from_script(cls.pay_to_address_script(address)) + + @classmethod + def P2PKH_address_from_hash160(cls, hash160_bytes): + """Return a P2PKH address given a public key.""" + assert len(hash160_bytes) == 20 + return ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160_bytes) + + @classmethod + def P2PKH_address_from_pubkey(cls, pubkey): + """Return a coin address given a public key.""" + return cls.P2PKH_address_from_hash160(hash160(pubkey)) + + @classmethod + def P2SH_address_from_hash160(cls, hash160_bytes): + """Return a coin address given a hash160.""" + assert len(hash160_bytes) == 20 + return ENCODE_CHECK(cls.P2SH_VERBYTES + hash160_bytes) + + @classmethod + def hash160_to_P2PKH_script(cls, hash160_bytes): + return P2PKH_script(hash160_bytes) + + @classmethod + def hash160_to_P2PKH_hashX(cls, hash160_bytes): + return cls.hashX_from_script(P2PKH_script(hash160_bytes)) + + @classmethod + def pay_to_address_script(cls, address): + """Return a pubkey script that pays to a pubkey hash. + + Pass the address (either P2PKH or P2SH) in base58 form. + """ + raw = DECODE_CHECK(address) + + # Require version byte(s) plus hash160. + verlen = len(raw) - 20 + if verlen > 0: + verbyte, hash160_bytes = raw[:verlen], raw[verlen:] + if verbyte == cls.P2PKH_VERBYTE: + return P2PKH_script(hash160_bytes) + if verbyte in cls.P2SH_VERBYTES: + return P2SH_script(hash160_bytes) + + raise CoinError(f'invalid address: {address}') + + @classmethod + def privkey_WIF(cls, privkey_bytes, compressed): + """Return the private key encoded in Wallet Import Format.""" + payload = bytearray(cls.WIF_BYTE) + privkey_bytes + if compressed: + payload.append(0x01) + return cls.ENCODE_CHECK(payload) + + @classmethod + def header_hash(cls, header): + """Given a header return hash""" + return double_sha256(header) + + @classmethod + def header_prevhash(cls, header): + """Given a header return previous hash""" + return header[4:36] + + @classmethod + def static_header_offset(cls, height): + """Given a header height return its offset in the headers file. + + If header sizes change at some point, this is the only code + that needs updating.""" + return height * cls.BASIC_HEADER_SIZE + + @classmethod + def static_header_len(cls, height): + """Given a header height return its length.""" + return (cls.static_header_offset(height + 1) + - cls.static_header_offset(height)) + + @classmethod + def block_header(cls, block, height): + """Returns the block header given a block and its height.""" + return block[:cls.static_header_len(height)] + + @classmethod + def block(cls, raw_block, height): + """Return a Block namedtuple given a raw block and its height.""" + header = cls.block_header(raw_block, height) + txs = Deserializer(raw_block, start=len(header)).read_tx_block() + return Block(raw_block, header, txs) + + @classmethod + def transaction(cls, raw_tx: bytes): + """Return a Block namedtuple given a raw block and its height.""" + return Deserializer(raw_tx).read_tx() + + @classmethod + def decimal_value(cls, value): + """Return the number of standard coin units as a Decimal given a + quantity of smallest units. + + For example 1 BTC is returned for 100 million satoshis. + """ + return Decimal(value) / cls.VALUE_PER_COIN + + @classmethod + def genesis_block(cls, block): + '''Check the Genesis block is the right one for this coin. + + Return the block less its unspendable coinbase. + ''' + header = cls.block_header(block, 0) + header_hex_hash = hash_to_hex_str(cls.header_hash(header)) + if header_hex_hash != cls.GENESIS_HASH: + raise CoinError(f'genesis block has hash {header_hex_hash} expected {cls.GENESIS_HASH}') + + return block + + @classmethod + def electrum_header(cls, header, height): + version, = struct.unpack(' typing.Optional[str]: + '''Parse a claim script, returns the address + ''' + if txo.pubkey_hash: + return cls.P2PKH_address_from_hash160(txo.pubkey_hash) + elif txo.script_hash: + return cls.P2SH_address_from_hash160(txo.script_hash) + elif txo.pubkey: + return cls.P2PKH_address_from_pubkey(txo.pubkey) + + @classmethod + def hashX_from_txo(cls, txo: 'TxOutput'): + address = cls.claim_address_handler(txo) + if address: + script = cls.pay_to_address_script(address) + else: + script = txo.pk_script + return sha256(script).digest()[:HASHX_LEN] + + @classmethod + def hashX_from_script(cls, script: bytes): + ''' + Overrides electrumx hashX from script by extracting addresses from claim scripts. + ''' + if script and script[0] == OpCodes.OP_RETURN or not script: + return None + if script[0] in [ + OpCodes.OP_CLAIM_NAME, + OpCodes.OP_UPDATE_CLAIM, + OpCodes.OP_SUPPORT_CLAIM, + ]: + decoded = txo_script_parser(script) + if not decoded: + return + claim, support, pubkey_hash, script_hash, pubkey = decoded + if pubkey_hash: + return cls.address_to_hashX(cls.P2PKH_address_from_hash160(pubkey_hash)) + elif script_hash: + return cls.address_to_hashX(cls.P2SH_address_from_hash160(script_hash)) + elif pubkey: + return cls.address_to_hashX(cls.P2PKH_address_from_pubkey(pubkey)) + else: + return sha256(script).digest()[:HASHX_LEN] + + @classmethod + def get_expiration_height(cls, last_updated_height: int, extended: bool = False) -> int: + if extended: + return last_updated_height + cls.nExtendedClaimExpirationTime + if last_updated_height < cls.nExtendedClaimExpirationForkHeight: + return last_updated_height + cls.nOriginalClaimExpirationTime + return last_updated_height + cls.nExtendedClaimExpirationTime + + @classmethod + def get_delay_for_name(cls, blocks_of_continuous_ownership: int) -> int: + return min(blocks_of_continuous_ownership // cls.proportionalDelayFactor, cls.maxTakeoverDelay) + + @classmethod + def verify_signed_metadata(cls, public_key_bytes: bytes, txo: TxOutput, first_input: TxInput): + m = txo.metadata + if m.unsigned_payload: + pieces = (Base58.decode(cls.claim_address_handler(txo)), m.unsigned_payload, m.signing_channel_hash[::-1]) + else: + pieces = (first_input.prev_hash + first_input.prev_idx.to_bytes(4, byteorder='little'), + m.signing_channel_hash, m.to_message_bytes()) + return PublicKey.from_compressed(public_key_bytes).verify( + m.signature, sha256(b''.join(pieces)).digest() + ) + + +class LBCRegTest(LBCMainNet): + NET = "regtest" + GENESIS_HASH = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556' + XPUB_VERBYTES = bytes.fromhex('043587cf') + XPRV_VERBYTES = bytes.fromhex('04358394') + + P2PKH_VERBYTE = bytes.fromhex("6f") + P2SH_VERBYTES = bytes.fromhex("c4") + + nOriginalClaimExpirationTime = 500 + nExtendedClaimExpirationTime = 600 + nExtendedClaimExpirationForkHeight = 800 + nNormalizedNameForkHeight = 250 + nMinTakeoverWorkaroundHeight = -1 + nMaxTakeoverWorkaroundHeight = -1 + nWitnessForkHeight = 150 + nAllClaimsInMerkleForkHeight = 350 + + +class LBCTestNet(LBCRegTest): + NET = "testnet" + GENESIS_HASH = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463' diff --git a/scribe/blockchain/prefetcher.py b/scribe/blockchain/prefetcher.py new file mode 100644 index 0000000..68f635a --- /dev/null +++ b/scribe/blockchain/prefetcher.py @@ -0,0 +1,128 @@ +import asyncio +import logging +import typing +if typing.TYPE_CHECKING: + from scribe.blockchain.network import LBCMainNet + from scribe.blockchain.daemon import LBCDaemon + + +def chunks(items, size): + """Break up items, an iterable, into chunks of length size.""" + for i in range(0, len(items), size): + yield items[i: i + size] + + +class Prefetcher: + """Prefetches blocks (in the forward direction only).""" + + def __init__(self, daemon: 'LBCDaemon', coin: 'LBCMainNet', blocks_event: asyncio.Event): + self.logger = logging.getLogger(__name__) + self.daemon = daemon + self.coin = coin + self.blocks_event = blocks_event + self.blocks = [] + self.caught_up = False + # Access to fetched_height should be protected by the semaphore + self.fetched_height = None + self.semaphore = asyncio.Semaphore() + self.refill_event = asyncio.Event() + # The prefetched block cache size. The min cache size has + # little effect on sync time. + self.cache_size = 0 + self.min_cache_size = 10 * 1024 * 1024 + # This makes the first fetch be 10 blocks + self.ave_size = self.min_cache_size // 10 + self.polling_delay = 0.5 + + async def main_loop(self, bp_height): + """Loop forever polling for more blocks.""" + await self.reset_height(bp_height) + try: + while True: + # Sleep a while if there is nothing to prefetch + await self.refill_event.wait() + if not await self._prefetch_blocks(): + await asyncio.sleep(self.polling_delay) + except Exception as e: + if not isinstance(e, asyncio.CancelledError): + self.logger.exception("block fetcher loop crashed") + raise + finally: + self.logger.info("block pre-fetcher is shutting down") + + def get_prefetched_blocks(self): + """Called by block processor when it is processing queued blocks.""" + blocks = self.blocks + self.blocks = [] + self.cache_size = 0 + self.refill_event.set() + return blocks + + async def reset_height(self, height): + """Reset to prefetch blocks from the block processor's height. + + Used in blockchain reorganisations. This coroutine can be + called asynchronously to the _prefetch_blocks coroutine so we + must synchronize with a semaphore. + """ + async with self.semaphore: + self.blocks.clear() + self.cache_size = 0 + self.fetched_height = height + self.refill_event.set() + + daemon_height = await self.daemon.height() + behind = daemon_height - height + if behind > 0: + self.logger.info(f'catching up to daemon height {daemon_height:,d} ' + f'({behind:,d} blocks behind)') + else: + self.logger.info(f'caught up to daemon height {daemon_height:,d}') + + async def _prefetch_blocks(self): + """Prefetch some blocks and put them on the queue. + + Repeats until the queue is full or caught up. + """ + daemon = self.daemon + daemon_height = await daemon.height() + async with self.semaphore: + while self.cache_size < self.min_cache_size: + # Try and catch up all blocks but limit to room in cache. + # Constrain fetch count to between 0 and 500 regardless; + # testnet can be lumpy. + cache_room = self.min_cache_size // self.ave_size + count = min(daemon_height - self.fetched_height, cache_room) + count = min(500, max(count, 0)) + if not count: + self.caught_up = True + return False + + first = self.fetched_height + 1 + hex_hashes = await daemon.block_hex_hashes(first, count) + if self.caught_up: + self.logger.info('new block height {:,d} hash {}' + .format(first + count-1, hex_hashes[-1])) + blocks = await daemon.raw_blocks(hex_hashes) + + assert count == len(blocks) + + # Special handling for genesis block + if first == 0: + blocks[0] = self.coin.genesis_block(blocks[0]) + self.logger.info(f'verified genesis block with hash {hex_hashes[0]}') + + # Update our recent average block size estimate + size = sum(len(block) for block in blocks) + if count >= 10: + self.ave_size = size // count + else: + self.ave_size = (size + (10 - count) * self.ave_size) // 10 + + self.blocks.extend(blocks) + self.cache_size += size + self.fetched_height += count + self.blocks_event.set() + + self.refill_event.clear() + return True diff --git a/scribe/blockchain/transaction/__init__.py b/scribe/blockchain/transaction/__init__.py new file mode 100644 index 0000000..6064314 --- /dev/null +++ b/scribe/blockchain/transaction/__init__.py @@ -0,0 +1,148 @@ +import sys +import functools +import typing +from dataclasses import dataclass +from struct import Struct +from scribe.schema.claim import Claim + +if (sys.version_info.major, sys.version_info.minor) > (3, 7): + cachedproperty = functools.cached_property +else: + cachedproperty = property + + +struct_le_i = Struct('H') +struct_be_I = Struct('>I') +structB = Struct('B') + +unpack_le_int32_from = struct_le_i.unpack_from +unpack_le_int64_from = struct_le_q.unpack_from +unpack_le_uint16_from = struct_le_H.unpack_from +unpack_le_uint32_from = struct_le_I.unpack_from +unpack_le_uint64_from = struct_le_Q.unpack_from +unpack_be_uint16_from = struct_be_H.unpack_from +unpack_be_uint32_from = struct_be_I.unpack_from + +pack_le_int32 = struct_le_i.pack +pack_le_int64 = struct_le_q.pack +pack_le_uint16 = struct_le_H.pack +pack_le_uint32 = struct_le_I.pack +pack_le_uint64 = struct_le_q.pack +pack_be_uint64 = lambda x: x.to_bytes(8, byteorder='big') +pack_be_uint16 = lambda x: x.to_bytes(2, byteorder='big') +pack_be_uint32 = struct_be_I.pack +pack_byte = structB.pack + + +def pack_varint(n): + if n < 253: + return pack_byte(n) + if n < 65536: + return pack_byte(253) + pack_le_uint16(n) + if n < 4294967296: + return pack_byte(254) + pack_le_uint32(n) + return pack_byte(255) + pack_le_uint64(n) + + +def pack_varbytes(data): + return pack_varint(len(data)) + data + + +class NameClaim(typing.NamedTuple): + name: bytes + value: bytes + + +class ClaimUpdate(typing.NamedTuple): + name: bytes + claim_hash: bytes + value: typing.Optional[bytes] = None + + +class ClaimSupport(typing.NamedTuple): + name: bytes + claim_hash: bytes + value: typing.Optional[bytes] = None + + +ZERO = bytes(32) +MINUS_1 = 4294967295 + + +class Tx(typing.NamedTuple): + version: int + inputs: typing.List['TxInput'] + outputs: typing.List['TxOutput'] + locktime: int + raw: bytes + marker: typing.Optional[int] = None + flag: typing.Optional[int] = None + witness: typing.Optional[typing.List[typing.List[bytes]]] = None + + +class TxInput(typing.NamedTuple): + prev_hash: bytes + prev_idx: int + script: bytes + sequence: int + + def __str__(self): + return f"TxInput({self.prev_hash[::-1].hex()}, {self.prev_idx:d}, script={self.script.hex()}, " \ + f"sequence={self.sequence:d})" + + def is_generation(self): + """Test if an input is generation/coinbase like""" + return self.prev_idx == MINUS_1 and self.prev_hash == ZERO + + def serialize(self): + return b''.join(( + self.prev_hash, + pack_le_uint32(self.prev_idx), + pack_varbytes(self.script), + pack_le_uint32(self.sequence), + )) + + +@dataclass +class TxOutput: + nout: int + value: int + pk_script: bytes + claim: typing.Optional[typing.Union[NameClaim, ClaimUpdate]] # TODO: fix this being mutable, it shouldn't be + support: typing.Optional[ClaimSupport] + pubkey_hash: typing.Optional[bytes] + script_hash: typing.Optional[bytes] + pubkey: typing.Optional[bytes] + + @property + def is_claim(self): + return isinstance(self.claim, NameClaim) + + @property + def is_update(self): + return isinstance(self.claim, ClaimUpdate) + + @cachedproperty + def metadata(self) -> typing.Optional[Claim]: + return None if not (self.claim or self.support).value else Claim.from_bytes((self.claim or self.support).value) + + @property + def is_support(self): + return self.support is not None + + def serialize(self): + return b''.join(( + pack_le_int64(self.value), + pack_varbytes(self.pk_script), + )) + + +class Block(typing.NamedTuple): + raw: bytes + header: bytes + transactions: typing.List[Tx] diff --git a/scribe/blockchain/transaction/deserializer.py b/scribe/blockchain/transaction/deserializer.py new file mode 100644 index 0000000..c9e027b --- /dev/null +++ b/scribe/blockchain/transaction/deserializer.py @@ -0,0 +1,163 @@ +from scribe.common import double_sha256 +from scribe.blockchain.transaction import ( + unpack_le_int32_from, unpack_le_int64_from, unpack_le_uint16_from, + unpack_le_uint32_from, unpack_le_uint64_from, Tx, TxInput, TxOutput +) +from scribe.blockchain.transaction.script import txo_script_parser + + +class Deserializer: + """Deserializes blocks into transactions. + + External entry points are read_tx(), read_tx_and_hash(), + read_tx_and_vsize() and read_block(). + + This code is performance sensitive as it is executed 100s of + millions of times during sync. + """ + + TX_HASH_FN = staticmethod(double_sha256) + + def __init__(self, binary, start=0): + assert isinstance(binary, bytes), f"type {type(binary)} is not 'bytes'" + self.binary = binary + self.binary_length = len(binary) + self.cursor = start + self.flags = 0 + + def _read_witness(self, fields): + read_witness_field = self._read_witness_field + return [read_witness_field() for i in range(fields)] + + def _read_witness_field(self): + read_varbytes = self._read_varbytes + return [read_varbytes() for i in range(self._read_varint())] + + def _read_tx_parts(self): + """Return a (deserialized TX, tx_hash, vsize) tuple.""" + start = self.cursor + marker = self.binary[self.cursor + 4] + if marker: + tx = Tx( + self._read_le_int32(), # version + self._read_inputs(), # inputs + self._read_outputs(), # outputs + self._read_le_uint32(), # locktime + self.binary[start:self.cursor], + ) + tx_hash = self.TX_HASH_FN(self.binary[start:self.cursor]) + return tx, tx_hash, self.binary_length + + # Ugh, this is nasty. + version = self._read_le_int32() + orig_ser = self.binary[start:self.cursor] + + marker = self._read_byte() + flag = self._read_byte() + + start = self.cursor + inputs = self._read_inputs() + outputs = self._read_outputs() + orig_ser += self.binary[start:self.cursor] + + base_size = self.cursor - start + witness = self._read_witness(len(inputs)) + + start = self.cursor + locktime = self._read_le_uint32() + orig_ser += self.binary[start:self.cursor] + vsize = (3 * base_size + self.binary_length) // 4 + + return Tx(version, inputs, outputs, locktime, orig_ser, marker, flag, witness), self.TX_HASH_FN(orig_ser), vsize + + def read_tx(self): + return self._read_tx_parts()[0] + + def read_tx_and_hash(self): + tx, tx_hash, vsize = self._read_tx_parts() + return tx, tx_hash + + def read_tx_and_vsize(self): + tx, tx_hash, vsize = self._read_tx_parts() + return tx, vsize + + def read_tx_block(self): + """Returns a list of (deserialized_tx, tx_hash) pairs.""" + read = self.read_tx_and_hash + # Some coins have excess data beyond the end of the transactions + return [read() for _ in range(self._read_varint())] + + def _read_inputs(self): + read_input = self._read_input + return [read_input() for i in range(self._read_varint())] + + def _read_input(self): + return TxInput( + self._read_nbytes(32), # prev_hash + self._read_le_uint32(), # prev_idx + self._read_varbytes(), # script + self._read_le_uint32() # sequence + ) + + def _read_outputs(self): + read_output = self._read_output + return [read_output(n) for n in range(self._read_varint())] + + def _read_output(self, n): + value = self._read_le_int64() + script = self._read_varbytes() # pk_script + decoded = txo_script_parser(script) + claim = support = pubkey_hash = script_hash = pubkey = None + if decoded: + claim, support, pubkey_hash, script_hash, pubkey = decoded + return TxOutput(n, value, script, claim, support, pubkey_hash, script_hash, pubkey) + + def _read_byte(self): + cursor = self.cursor + self.cursor += 1 + return self.binary[cursor] + + def _read_nbytes(self, n): + cursor = self.cursor + self.cursor = end = cursor + n + assert self.binary_length >= end + return self.binary[cursor:end] + + def _read_varbytes(self): + return self._read_nbytes(self._read_varint()) + + def _read_varint(self): + n = self.binary[self.cursor] + self.cursor += 1 + if n < 253: + return n + if n == 253: + return self._read_le_uint16() + if n == 254: + return self._read_le_uint32() + return self._read_le_uint64() + + def _read_le_int32(self): + result, = unpack_le_int32_from(self.binary, self.cursor) + self.cursor += 4 + return result + + def _read_le_int64(self): + result, = unpack_le_int64_from(self.binary, self.cursor) + self.cursor += 8 + return result + + def _read_le_uint16(self): + result, = unpack_le_uint16_from(self.binary, self.cursor) + self.cursor += 2 + return result + + def _read_le_uint32(self): + result, = unpack_le_uint32_from(self.binary, self.cursor) + self.cursor += 4 + return result + + def _read_le_uint64(self): + result, = unpack_le_uint64_from(self.binary, self.cursor) + self.cursor += 8 + return result diff --git a/scribe/blockchain/transaction/script.py b/scribe/blockchain/transaction/script.py new file mode 100644 index 0000000..4be83be --- /dev/null +++ b/scribe/blockchain/transaction/script.py @@ -0,0 +1,298 @@ +import typing +from scribe.blockchain.transaction import NameClaim, ClaimUpdate, ClaimSupport +from scribe.blockchain.transaction import unpack_le_uint16_from, unpack_le_uint32_from, pack_le_uint16, pack_le_uint32 + + +class _OpCodes(typing.NamedTuple): + def whatis(self, value: int): + try: + return self._fields[self.index(value)] + except (ValueError, IndexError): + return -1 + + OP_PUSHDATA1: int = 0x4c + OP_PUSHDATA2: int = 0x4d + OP_PUSHDATA4: int = 0x4e + OP_1NEGATE: int = 0x4f + OP_RESERVED: int = 0x50 + OP_1: int = 0x51 + OP_2: int = 0x52 + OP_3: int = 0x53 + OP_4: int = 0x54 + OP_5: int = 0x55 + OP_6: int = 0x56 + OP_7: int = 0x57 + OP_8: int = 0x58 + OP_9: int = 0x59 + OP_10: int = 0x5a + OP_11: int = 0x5b + OP_12: int = 0x5c + OP_13: int = 0x5d + OP_14: int = 0x5e + OP_15: int = 0x5f + OP_16: int = 0x60 + OP_NOP: int = 0x61 + OP_VER: int = 0x62 + OP_IF: int = 0x63 + OP_NOTIF: int = 0x64 + OP_VERIF: int = 0x65 + OP_VERNOTIF: int = 0x66 + OP_ELSE: int = 0x67 + OP_ENDIF: int = 0x68 + OP_VERIFY: int = 0x69 + OP_RETURN: int = 0x6a + OP_TOALTSTACK: int = 0x6b + OP_FROMALTSTACK: int = 0x6c + OP_2DROP: int = 0x6d + OP_2DUP: int = 0x6e + OP_3DUP: int = 0x6f + OP_2OVER: int = 0x70 + OP_2ROT: int = 0x71 + OP_2SWAP: int = 0x72 + OP_IFDUP: int = 0x73 + OP_DEPTH: int = 0x74 + OP_DROP: int = 0x75 + OP_DUP: int = 0x76 + OP_NIP: int = 0x77 + OP_OVER: int = 0x78 + OP_PICK: int = 0x79 + OP_ROLL: int = 0x7a + OP_ROT: int = 0x7b + OP_SWAP: int = 0x7c + OP_TUCK: int = 0x7d + OP_CAT: int = 0x7e + OP_SUBSTR: int = 0x7f + OP_LEFT: int = 0x80 + OP_RIGHT: int = 0x81 + OP_SIZE: int = 0x82 + OP_INVERT: int = 0x83 + OP_AND: int = 0x84 + OP_OR: int = 0x85 + OP_XOR: int = 0x86 + OP_EQUAL: int = 0x87 + OP_EQUALVERIFY: int = 0x88 + OP_RESERVED1: int = 0x89 + OP_RESERVED2: int = 0x8a + OP_1ADD: int = 0x8b + OP_1SUB: int = 0x8c + OP_2MUL: int = 0x8d + OP_2DIV: int = 0x8e + OP_NEGATE: int = 0x8f + OP_ABS: int = 0x90 + OP_NOT: int = 0x91 + OP_0NOTEQUAL: int = 0x92 + OP_ADD: int = 0x93 + OP_SUB: int = 0x94 + OP_MUL: int = 0x95 + OP_DIV: int = 0x96 + OP_MOD: int = 0x97 + OP_LSHIFT: int = 0x98 + OP_RSHIFT: int = 0x99 + OP_BOOLAND: int = 0x9a + OP_BOOLOR: int = 0x9b + OP_NUMEQUAL: int = 0x9c + OP_NUMEQUALVERIFY: int = 0x9d + OP_NUMNOTEQUAL: int = 0x9e + OP_LESSTHAN: int = 0x9f + OP_GREATERTHAN: int = 0xa0 + OP_LESSTHANOREQUAL: int = 0xa1 + OP_GREATERTHANOREQUAL: int = 0xa2 + OP_MIN: int = 0xa3 + OP_MAX: int = 0xa4 + OP_WITHIN: int = 0xa5 + OP_RIPEMD160: int = 0xa6 + OP_SHA1: int = 0xa7 + OP_SHA256: int = 0xa8 + OP_HASH160: int = 0xa9 + OP_HASH256: int = 0xaa + OP_CODESEPARATOR: int = 0xab + OP_CHECKSIG: int = 0xac + OP_CHECKSIGVERIFY: int = 0xad + OP_CHECKMULTISIG: int = 0xae + OP_CHECKMULTISIGVERIFY: int = 0xaf + OP_NOP1: int = 0xb0 + OP_CHECKLOCKTIMEVERIFY: int = 0xb1 + OP_CHECKSEQUENCEVERIFY: int = 0xb2 + OP_NOP4: int = 0xb3 + OP_NOP5: int = 0xb4 + OP_CLAIM_NAME: int = 0xb5 + OP_SUPPORT_CLAIM: int = 0xb6 + OP_UPDATE_CLAIM: int = 0xb7 + OP_NOP9: int = 0xb8 + OP_NOP10: int = 0xb9 + + +OpCodes = _OpCodes() + + +# Paranoia to make it hard to create bad scripts +assert OpCodes.OP_DUP == 0x76 +assert OpCodes.OP_HASH160 == 0xa9 +assert OpCodes.OP_EQUAL == 0x87 +assert OpCodes.OP_EQUALVERIFY == 0x88 +assert OpCodes.OP_CHECKSIG == 0xac +assert OpCodes.OP_CHECKMULTISIG == 0xae + +assert OpCodes.OP_CLAIM_NAME == 0xb5 +assert OpCodes.OP_SUPPORT_CLAIM == 0xb6 +assert OpCodes.OP_UPDATE_CLAIM == 0xb7 + + +def P2SH_script(hash160_bytes: bytes): + return bytes([OpCodes.OP_HASH160]) + script_push_data(hash160_bytes) + bytes([OpCodes.OP_EQUAL]) + + +def P2PKH_script(hash160_bytes: bytes): + return (bytes([OpCodes.OP_DUP, OpCodes.OP_HASH160]) + + script_push_data(hash160_bytes) + + bytes([OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG])) + + +def script_push_data(data: bytes): + n = len(data) + if n < OpCodes.OP_PUSHDATA1: + return bytes([n]) + data + if n < 256: + return bytes([OpCodes.OP_PUSHDATA1, n]) + data + if n < 65536: + return bytes([OpCodes.OP_PUSHDATA2]) + pack_le_uint16(n) + data + return bytes([OpCodes.OP_PUSHDATA4]) + pack_le_uint32(n) + data + + +def script_GetOp(script_bytes: bytes): + i = 0 + while i < len(script_bytes): + vch = None + opcode = script_bytes[i] + i += 1 + if opcode <= OpCodes.OP_PUSHDATA4: + n_size = opcode + if opcode == OpCodes.OP_PUSHDATA1: + n_size = script_bytes[i] + i += 1 + elif opcode == OpCodes.OP_PUSHDATA2: + (n_size,) = unpack_le_uint16_from(script_bytes, i) + i += 2 + elif opcode == OpCodes.OP_PUSHDATA4: + (n_size,) = unpack_le_uint32_from(script_bytes, i) + i += 4 + if i + n_size > len(script_bytes): + vch = b"_INVALID_" + script_bytes[i:] + i = len(script_bytes) + else: + vch = script_bytes[i:i + n_size] + i += n_size + yield opcode, vch, i + + +_SCRIPT_TEMPLATES = ( + # claim related templates + (OpCodes.OP_CLAIM_NAME, -1, -1, OpCodes.OP_2DROP, OpCodes.OP_DROP), + (OpCodes.OP_UPDATE_CLAIM, -1, -1, OpCodes.OP_2DROP, OpCodes.OP_DROP), + (OpCodes.OP_UPDATE_CLAIM, -1, -1, -1, OpCodes.OP_2DROP, OpCodes.OP_2DROP), + (OpCodes.OP_SUPPORT_CLAIM, -1, -1, OpCodes.OP_2DROP, OpCodes.OP_DROP), + (OpCodes.OP_SUPPORT_CLAIM, -1, -1, -1, OpCodes.OP_2DROP, OpCodes.OP_2DROP), + + # receive script templates + (OpCodes.OP_DUP, OpCodes.OP_HASH160, -1, OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG), + (OpCodes.OP_HASH160, -1, OpCodes.OP_EQUAL), + (-1, OpCodes.OP_CHECKSIG) +) +_CLAIM_TEMPLATE = 0 +_UPDATE_NO_DATA_TEMPLATE = 1 +_UPDATE_TEMPLATE = 2 +_SUPPORT_TEMPLATE = 3 +_SUPPORT_WITH_DATA_TEMPLATE = 4 + +_TO_ADDRESS_TEMPLATE = 5 +_TO_P2SH_TEMPLATE = 6 +_TO_PUBKEY_TEMPLATE = 7 + + +def txo_script_parser(script: bytes): + template = None + template_idx = None + values = [] + receive_values = [] + finished_decoding_claim = False + receive_cur = 0 + claim, support, pubkey_hash, pubkey, script_hash = None, None, None, None, None + for cur, (op, data, _) in enumerate(script_GetOp(script)): + if finished_decoding_claim: # we're decoding the receiving part of the script (the last part) + if receive_cur == 0: + if op == OpCodes.OP_DUP: + template_idx = _TO_ADDRESS_TEMPLATE + elif op == OpCodes.OP_HASH160: + template_idx = _TO_P2SH_TEMPLATE + elif op == -1: + template_idx = _TO_PUBKEY_TEMPLATE + else: + break # return the decoded part + template = _SCRIPT_TEMPLATES[template_idx] + expected = template[receive_cur] + if expected == -1 and data is None: # if data data is expected make sure it's there + # print("\texpected data", OpCodes.whatis(op), data) + return + elif expected == -1 and data: + receive_values.append(data) + elif op != expected: + # print("\top mismatch") + return + receive_cur += 1 + continue + + if cur == 0: # initialize the template + if op == OpCodes.OP_CLAIM_NAME: + template_idx = _CLAIM_TEMPLATE + elif op == OpCodes.OP_UPDATE_CLAIM: + template_idx = _UPDATE_NO_DATA_TEMPLATE + elif op == OpCodes.OP_SUPPORT_CLAIM: + template_idx = _SUPPORT_TEMPLATE # could be a support w/ data + elif op == OpCodes.OP_DUP: + template_idx = _TO_ADDRESS_TEMPLATE + elif op == OpCodes.OP_HASH160: + template_idx = _TO_P2SH_TEMPLATE + elif op == -1: + template_idx = _TO_PUBKEY_TEMPLATE + else: + return + template = _SCRIPT_TEMPLATES[template_idx] + elif cur == 3 and template_idx == _SUPPORT_TEMPLATE and data: + template_idx = _SUPPORT_WITH_DATA_TEMPLATE + template = _SCRIPT_TEMPLATES[template_idx] + elif cur == 3 and template_idx == _UPDATE_NO_DATA_TEMPLATE and data: + template_idx = _UPDATE_TEMPLATE + template = _SCRIPT_TEMPLATES[template_idx] + + if cur >= len(template): + return + + expected = template[cur] + + if expected == -1 and data is None: # if data data is expected make sure it's there + # print("\texpected data", OpCodes.whatis(op), data) + return + elif expected == -1 and data: + if template_idx in (_TO_ADDRESS_TEMPLATE, _TO_P2SH_TEMPLATE, _TO_ADDRESS_TEMPLATE): + receive_values.append(data) + else: + values.append(data) + elif op != expected: + # print("\top mismatch") + return + if cur + 1 == len(template): + finished_decoding_claim = True + if template_idx == _CLAIM_TEMPLATE: + claim = NameClaim(*values) + elif template_idx in (_UPDATE_NO_DATA_TEMPLATE, _UPDATE_TEMPLATE): + claim = ClaimUpdate(*values) + elif template_idx in (_SUPPORT_TEMPLATE, _SUPPORT_WITH_DATA_TEMPLATE): + support = ClaimSupport(*values) + + if template_idx == _TO_ADDRESS_TEMPLATE: + pubkey_hash = receive_values[0] + elif template_idx == _TO_P2SH_TEMPLATE: + script_hash = receive_values[0] + elif template_idx == _TO_PUBKEY_TEMPLATE: + pubkey = receive_values[0] + return claim, support, pubkey_hash, script_hash, pubkey diff --git a/scribe/build_info.py b/scribe/build_info.py new file mode 100644 index 0000000..6dadae4 --- /dev/null +++ b/scribe/build_info.py @@ -0,0 +1,4 @@ +# don't touch this. CI server changes this during build/deployment +BUILD = "dev" +COMMIT_HASH = "none" +DOCKER_TAG = "none" diff --git a/scribe/cli.py b/scribe/cli.py new file mode 100644 index 0000000..a424c82 --- /dev/null +++ b/scribe/cli.py @@ -0,0 +1,63 @@ +import logging +import traceback +import argparse +from scribe.env import Env +from scribe.blockchain.block_processor import BlockProcessor +from scribe.readers import BlockchainReaderServer, ElasticWriter + + +def get_arg_parser(name): + parser = argparse.ArgumentParser( + prog=name + ) + Env.contribute_to_arg_parser(parser) + return parser + + +def setup_logging(): + logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s") + logging.getLogger('aiohttp').setLevel(logging.WARNING) + logging.getLogger('elasticsearch').setLevel(logging.WARNING) + + +def run_writer_forever(): + setup_logging() + args = get_arg_parser('scribe').parse_args() + try: + block_processor = BlockProcessor(Env.from_arg_parser(args)) + block_processor.run() + except Exception: + traceback.print_exc() + logging.critical('scribe terminated abnormally') + else: + logging.info('scribe terminated normally') + + +def run_server_forever(): + setup_logging() + args = get_arg_parser('scribe-hub').parse_args() + + try: + server = BlockchainReaderServer(Env.from_arg_parser(args)) + server.run() + except Exception: + traceback.print_exc() + logging.critical('hub terminated abnormally') + else: + logging.info('hub terminated normally') + + +def run_es_sync_forever(): + setup_logging() + parser = get_arg_parser('scribe-elastic-sync') + parser.add_argument('--reindex', type=bool, default=False) + args = parser.parse_args() + + try: + server = ElasticWriter(Env.from_arg_parser(args)) + server.run(args.reindex) + except Exception: + traceback.print_exc() + logging.critical('es sync terminated abnormally') + else: + logging.info('es sync terminated normally') diff --git a/scribe/common.py b/scribe/common.py new file mode 100644 index 0000000..9e767b9 --- /dev/null +++ b/scribe/common.py @@ -0,0 +1,362 @@ +import hashlib +import hmac +import ipaddress +import logging +import typing +import collections +from asyncio import get_event_loop, Event +from prometheus_client import Counter + +log = logging.getLogger(__name__) + + +_sha256 = hashlib.sha256 +_sha512 = hashlib.sha512 +_new_hash = hashlib.new +_new_hmac = hmac.new +HASHX_LEN = 11 +CLAIM_HASH_LEN = 20 + + +# class cachedproperty: +# def __init__(self, f): +# self.f = f +# +# def __get__(self, obj, type): +# obj = obj or type +# value = self.f(obj) +# setattr(obj, self.f.__name__, value) +# return value + + +def formatted_time(t, sep=' '): + """Return a number of seconds as a string in days, hours, mins and + maybe secs.""" + t = int(t) + fmts = (('{:d}d', 86400), ('{:02d}h', 3600), ('{:02d}m', 60)) + parts = [] + for fmt, n in fmts: + val = t // n + if parts or val: + parts.append(fmt.format(val)) + t %= n + if len(parts) < 3: + parts.append(f'{t:02d}s') + return sep.join(parts) + + +def protocol_tuple(s): + """Converts a protocol version number, such as "1.0" to a tuple (1, 0). + + If the version number is bad, (0, ) indicating version 0 is returned.""" + try: + return tuple(int(part) for part in s.split('.')) + except Exception: + return (0, ) + + +def version_string(ptuple): + """Convert a version tuple such as (1, 2) to "1.2". + There is always at least one dot, so (1, ) becomes "1.0".""" + while len(ptuple) < 2: + ptuple += (0, ) + return '.'.join(str(p) for p in ptuple) + + +def protocol_version(client_req, min_tuple, max_tuple): + """Given a client's protocol version string, return a pair of + protocol tuples: + (negotiated version, client min request) + If the request is unsupported, the negotiated protocol tuple is + None. + """ + if client_req is None: + client_min = client_max = min_tuple + else: + if isinstance(client_req, list) and len(client_req) == 2: + client_min, client_max = client_req + else: + client_min = client_max = client_req + client_min = protocol_tuple(client_min) + client_max = protocol_tuple(client_max) + + result = min(client_max, max_tuple) + if result < max(client_min, min_tuple) or result == (0, ): + result = None + + return result, client_min + + +class LRUCacheWithMetrics: + __slots__ = [ + 'capacity', + 'cache', + '_track_metrics', + 'hits', + 'misses' + ] + + def __init__(self, capacity: int, metric_name: typing.Optional[str] = None, namespace: str = "daemon_cache"): + self.capacity = capacity + self.cache = collections.OrderedDict() + if metric_name is None: + self._track_metrics = False + self.hits = self.misses = None + else: + self._track_metrics = True + try: + self.hits = Counter( + f"{metric_name}_cache_hit_count", "Number of cache hits", namespace=namespace + ) + self.misses = Counter( + f"{metric_name}_cache_miss_count", "Number of cache misses", namespace=namespace + ) + except ValueError as err: + log.debug("failed to set up prometheus %s_cache_miss_count metric: %s", metric_name, err) + self._track_metrics = False + self.hits = self.misses = None + + def get(self, key, default=None): + try: + value = self.cache.pop(key) + if self._track_metrics: + self.hits.inc() + except KeyError: + if self._track_metrics: + self.misses.inc() + return default + self.cache[key] = value + return value + + def set(self, key, value): + try: + self.cache.pop(key) + except KeyError: + if len(self.cache) >= self.capacity: + self.cache.popitem(last=False) + self.cache[key] = value + + def clear(self): + self.cache.clear() + + def pop(self, key): + return self.cache.pop(key) + + def __setitem__(self, key, value): + return self.set(key, value) + + def __getitem__(self, item): + return self.get(item) + + def __contains__(self, item) -> bool: + return item in self.cache + + def __len__(self): + return len(self.cache) + + def __delitem__(self, key): + self.cache.pop(key) + + def __del__(self): + self.clear() + + +class LRUCache: + __slots__ = [ + 'capacity', + 'cache' + ] + + def __init__(self, capacity: int): + self.capacity = capacity + self.cache = collections.OrderedDict() + + def get(self, key, default=None): + try: + value = self.cache.pop(key) + except KeyError: + return default + self.cache[key] = value + return value + + def set(self, key, value): + try: + self.cache.pop(key) + except KeyError: + if len(self.cache) >= self.capacity: + self.cache.popitem(last=False) + self.cache[key] = value + + def items(self): + return self.cache.items() + + def clear(self): + self.cache.clear() + + def pop(self, key, default=None): + return self.cache.pop(key, default) + + def __setitem__(self, key, value): + return self.set(key, value) + + def __getitem__(self, item): + return self.get(item) + + def __contains__(self, item) -> bool: + return item in self.cache + + def __len__(self): + return len(self.cache) + + def __delitem__(self, key): + self.cache.pop(key) + + def __del__(self): + self.clear() + + +# the ipaddress module does not show these subnets as reserved +CARRIER_GRADE_NAT_SUBNET = ipaddress.ip_network('100.64.0.0/10') +IPV4_TO_6_RELAY_SUBNET = ipaddress.ip_network('192.88.99.0/24') + + +def is_valid_public_ipv4(address, allow_localhost: bool = False, allow_lan: bool = False): + try: + parsed_ip = ipaddress.ip_address(address) + if parsed_ip.is_loopback and allow_localhost: + return True + if allow_lan and parsed_ip.is_private: + return True + if any((parsed_ip.version != 4, parsed_ip.is_unspecified, parsed_ip.is_link_local, parsed_ip.is_loopback, + parsed_ip.is_multicast, parsed_ip.is_reserved, parsed_ip.is_private)): + return False + else: + return not any((CARRIER_GRADE_NAT_SUBNET.supernet_of(ipaddress.ip_network(f"{address}/32")), + IPV4_TO_6_RELAY_SUBNET.supernet_of(ipaddress.ip_network(f"{address}/32")))) + except (ipaddress.AddressValueError, ValueError): + return False + + +def sha256(x): + """Simple wrapper of hashlib sha256.""" + return _sha256(x).digest() + + +def ripemd160(x): + """Simple wrapper of hashlib ripemd160.""" + h = _new_hash('ripemd160') + h.update(x) + return h.digest() + + +def double_sha256(x): + """SHA-256 of SHA-256, as used extensively in bitcoin.""" + return sha256(sha256(x)) + + +def hmac_sha512(key, msg): + """Use SHA-512 to provide an HMAC.""" + return _new_hmac(key, msg, _sha512).digest() + + +def hash160(x): + """RIPEMD-160 of SHA-256. Used to make bitcoin addresses from pubkeys.""" + return ripemd160(sha256(x)) + + +def hash_to_hex_str(x: bytes) -> str: + """Convert a big-endian binary hash to displayed hex string. + + Display form of a binary hash is reversed and converted to hex. + """ + return x[::-1].hex() + + +def hex_str_to_hash(x: str) -> bytes: + """Convert a displayed hex string to a binary hash.""" + return bytes.fromhex(x)[::-1] + + + +INVALID_REQUEST = -32600 +INVALID_ARGS = -32602 + + +class CodeMessageError(Exception): + + @property + def code(self): + return self.args[0] + + @property + def message(self): + return self.args[1] + + def __eq__(self, other): + return (isinstance(other, self.__class__) and + self.code == other.code and self.message == other.message) + + def __hash__(self): + # overridden to make the exception hashable + # see https://bugs.python.org/issue28603 + return hash((self.code, self.message)) + + @classmethod + def invalid_args(cls, message): + return cls(INVALID_ARGS, message) + + @classmethod + def invalid_request(cls, message): + return cls(INVALID_REQUEST, message) + + @classmethod + def empty_batch(cls): + return cls.invalid_request('batch is empty') + + +class RPCError(CodeMessageError): + pass + + + +class DaemonError(Exception): + """Raised when the daemon returns an error in its results.""" + + +class WarmingUpError(Exception): + """Internal - when the daemon is warming up.""" + + +class WorkQueueFullError(Exception): + """Internal - when the daemon's work queue is full.""" + + +class TaskGroup: + def __init__(self, loop=None): + self._loop = loop or get_event_loop() + self._tasks = set() + self.done = Event() + self.started = Event() + + def __len__(self): + return len(self._tasks) + + def add(self, coro): + task = self._loop.create_task(coro) + self._tasks.add(task) + self.started.set() + self.done.clear() + task.add_done_callback(self._remove) + return task + + def _remove(self, task): + self._tasks.remove(task) + if len(self._tasks) < 1: + self.done.set() + self.started.clear() + + def cancel(self): + for task in self._tasks: + task.cancel() + self.done.set() + self.started.clear() diff --git a/scribe/db/__init__.py b/scribe/db/__init__.py new file mode 100644 index 0000000..0b2ebca --- /dev/null +++ b/scribe/db/__init__.py @@ -0,0 +1 @@ +from .db import HubDB diff --git a/scribe/db/common.py b/scribe/db/common.py new file mode 100644 index 0000000..5ca52ab --- /dev/null +++ b/scribe/db/common.py @@ -0,0 +1,526 @@ +import typing +import enum +from typing import Optional +from scribe.error import ResolveCensoredError + + +@enum.unique +class DB_PREFIXES(enum.Enum): + claim_to_support = b'K' + support_to_claim = b'L' + + claim_to_txo = b'E' + txo_to_claim = b'G' + + claim_to_channel = b'I' + channel_to_claim = b'J' + + claim_short_id_prefix = b'F' + effective_amount = b'D' + claim_expiration = b'O' + + claim_takeover = b'P' + pending_activation = b'Q' + activated_claim_and_support = b'R' + active_amount = b'S' + + repost = b'V' + reposted_claim = b'W' + + undo = b'M' + touched_or_deleted = b'Y' + + tx = b'B' + block_hash = b'C' + header = b'H' + tx_num = b'N' + tx_count = b'T' + tx_hash = b'X' + utxo = b'u' + hashx_utxo = b'h' + hashx_history = b'x' + db_state = b's' + channel_count = b'Z' + support_amount = b'a' + block_tx = b'b' + trending_notifications = b'c' + mempool_tx = b'd' + touched_hashX = b'e' + + +COLUMN_SETTINGS = {} # this is updated by the PrefixRow metaclass + + +CLAIM_TYPES = { + 'stream': 1, + 'channel': 2, + 'repost': 3, + 'collection': 4, +} + +STREAM_TYPES = { + 'video': 1, + 'audio': 2, + 'image': 3, + 'document': 4, + 'binary': 5, + 'model': 6, +} + +# 9/21/2020 +MOST_USED_TAGS = { + "gaming", + "people & blogs", + "entertainment", + "music", + "pop culture", + "education", + "technology", + "blockchain", + "news", + "funny", + "science & technology", + "learning", + "gameplay", + "news & politics", + "comedy", + "bitcoin", + "beliefs", + "nature", + "art", + "economics", + "film & animation", + "lets play", + "games", + "sports", + "howto & style", + "game", + "cryptocurrency", + "playstation 4", + "automotive", + "crypto", + "mature", + "sony interactive entertainment", + "walkthrough", + "tutorial", + "video game", + "weapons", + "playthrough", + "pc", + "anime", + "how to", + "btc", + "fun", + "ethereum", + "food", + "travel & events", + "minecraft", + "science", + "autos & vehicles", + "play", + "politics", + "commentary", + "twitch", + "ps4live", + "love", + "ps4", + "nonprofits & activism", + "ps4share", + "fortnite", + "xbox", + "porn", + "video games", + "trump", + "español", + "money", + "music video", + "nintendo", + "movie", + "coronavirus", + "donald trump", + "steam", + "trailer", + "android", + "podcast", + "xbox one", + "survival", + "audio", + "linux", + "travel", + "funny moments", + "litecoin", + "animation", + "gamer", + "lets", + "playstation", + "bitcoin news", + "history", + "xxx", + "fox news", + "dance", + "god", + "adventure", + "liberal", + "2020", + "horror", + "government", + "freedom", + "reaction", + "meme", + "photography", + "truth", + "health", + "lbry", + "family", + "online", + "eth", + "crypto news", + "diy", + "trading", + "gold", + "memes", + "world", + "space", + "lol", + "covid-19", + "rpg", + "humor", + "democrat", + "film", + "call of duty", + "tech", + "religion", + "conspiracy", + "rap", + "cnn", + "hangoutsonair", + "unboxing", + "fiction", + "conservative", + "cars", + "hoa", + "epic", + "programming", + "progressive", + "cryptocurrency news", + "classical", + "jesus", + "movies", + "book", + "ps3", + "republican", + "fitness", + "books", + "multiplayer", + "animals", + "pokemon", + "bitcoin price", + "facebook", + "sharefactory", + "criptomonedas", + "cod", + "bible", + "business", + "stream", + "comics", + "how", + "fail", + "nsfw", + "new music", + "satire", + "pets & animals", + "computer", + "classical music", + "indie", + "musica", + "msnbc", + "fps", + "mod", + "sport", + "sony", + "ripple", + "auto", + "rock", + "marvel", + "complete", + "mining", + "political", + "mobile", + "pubg", + "hip hop", + "flat earth", + "xbox 360", + "reviews", + "vlogging", + "latest news", + "hack", + "tarot", + "iphone", + "media", + "cute", + "christian", + "free speech", + "trap", + "war", + "remix", + "ios", + "xrp", + "spirituality", + "song", + "league of legends", + "cat" +} + +MATURE_TAGS = [ + 'nsfw', 'porn', 'xxx', 'mature', 'adult', 'sex' +] + + +def normalize_tag(tag): + return tag.replace(" ", "_").replace("&", "and").replace("-", "_") + + +COMMON_TAGS = { + tag: normalize_tag(tag) for tag in list(MOST_USED_TAGS) +} + +INDEXED_LANGUAGES = [ + 'none', + 'en', + 'aa', + 'ab', + 'ae', + 'af', + 'ak', + 'am', + 'an', + 'ar', + 'as', + 'av', + 'ay', + 'az', + 'ba', + 'be', + 'bg', + 'bh', + 'bi', + 'bm', + 'bn', + 'bo', + 'br', + 'bs', + 'ca', + 'ce', + 'ch', + 'co', + 'cr', + 'cs', + 'cu', + 'cv', + 'cy', + 'da', + 'de', + 'dv', + 'dz', + 'ee', + 'el', + 'eo', + 'es', + 'et', + 'eu', + 'fa', + 'ff', + 'fi', + 'fj', + 'fo', + 'fr', + 'fy', + 'ga', + 'gd', + 'gl', + 'gn', + 'gu', + 'gv', + 'ha', + 'he', + 'hi', + 'ho', + 'hr', + 'ht', + 'hu', + 'hy', + 'hz', + 'ia', + 'id', + 'ie', + 'ig', + 'ii', + 'ik', + 'io', + 'is', + 'it', + 'iu', + 'ja', + 'jv', + 'ka', + 'kg', + 'ki', + 'kj', + 'kk', + 'kl', + 'km', + 'kn', + 'ko', + 'kr', + 'ks', + 'ku', + 'kv', + 'kw', + 'ky', + 'la', + 'lb', + 'lg', + 'li', + 'ln', + 'lo', + 'lt', + 'lu', + 'lv', + 'mg', + 'mh', + 'mi', + 'mk', + 'ml', + 'mn', + 'mr', + 'ms', + 'mt', + 'my', + 'na', + 'nb', + 'nd', + 'ne', + 'ng', + 'nl', + 'nn', + 'no', + 'nr', + 'nv', + 'ny', + 'oc', + 'oj', + 'om', + 'or', + 'os', + 'pa', + 'pi', + 'pl', + 'ps', + 'pt', + 'qu', + 'rm', + 'rn', + 'ro', + 'ru', + 'rw', + 'sa', + 'sc', + 'sd', + 'se', + 'sg', + 'si', + 'sk', + 'sl', + 'sm', + 'sn', + 'so', + 'sq', + 'sr', + 'ss', + 'st', + 'su', + 'sv', + 'sw', + 'ta', + 'te', + 'tg', + 'th', + 'ti', + 'tk', + 'tl', + 'tn', + 'to', + 'tr', + 'ts', + 'tt', + 'tw', + 'ty', + 'ug', + 'uk', + 'ur', + 'uz', + 've', + 'vi', + 'vo', + 'wa', + 'wo', + 'xh', + 'yi', + 'yo', + 'za', + 'zh', + 'zu' +] + + +class ResolveResult(typing.NamedTuple): + name: str + normalized_name: str + claim_hash: bytes + tx_num: int + position: int + tx_hash: bytes + height: int + amount: int + short_url: str + is_controlling: bool + canonical_url: str + creation_height: int + activation_height: int + expiration_height: int + effective_amount: int + support_amount: int + reposted: int + last_takeover_height: typing.Optional[int] + claims_in_channel: typing.Optional[int] + channel_hash: typing.Optional[bytes] + reposted_claim_hash: typing.Optional[bytes] + signature_valid: typing.Optional[bool] + + +class TrendingNotification(typing.NamedTuple): + height: int + prev_amount: int + new_amount: int + + +class UTXO(typing.NamedTuple): + tx_num: int + tx_pos: int + tx_hash: bytes + height: int + value: int + + +OptionalResolveResultOrError = Optional[typing.Union[ResolveResult, ResolveCensoredError, LookupError, ValueError]] + + +class ExpandedResolveResult(typing.NamedTuple): + stream: OptionalResolveResultOrError + channel: OptionalResolveResultOrError + repost: OptionalResolveResultOrError + reposted_channel: OptionalResolveResultOrError + + +class DBError(Exception): + """Raised on general DB errors generally indicating corruption.""" diff --git a/scribe/db/db.py b/scribe/db/db.py new file mode 100644 index 0000000..60eb717 --- /dev/null +++ b/scribe/db/db.py @@ -0,0 +1,1129 @@ +import os +import asyncio +import array +import time +import typing +import struct +import zlib +import base64 +import logging +from typing import Optional, Iterable, Tuple, DefaultDict, Set, Dict, List, TYPE_CHECKING +from functools import partial +from asyncio import sleep +from bisect import bisect_right +from collections import defaultdict +from concurrent.futures.thread import ThreadPoolExecutor +from scribe.error import ResolveCensoredError +from scribe.schema.url import URL, normalize_name +from scribe.schema.claim import guess_stream_type +from scribe.schema.result import Censor +from scribe.blockchain.transaction import TxInput +from scribe.common import hash_to_hex_str, hash160, LRUCacheWithMetrics +from scribe.db.merkle import Merkle, MerkleCache +from scribe.db.common import ResolveResult, STREAM_TYPES, CLAIM_TYPES, ExpandedResolveResult, DBError, UTXO +from scribe.db.prefixes import PendingActivationValue, ClaimTakeoverValue, ClaimToTXOValue, PrefixDB +from scribe.db.prefixes import ACTIVATED_CLAIM_TXO_TYPE, ACTIVATED_SUPPORT_TXO_TYPE, EffectiveAmountKey +from scribe.db.prefixes import PendingActivationKey, TXOToClaimValue, DBStatePrefixRow, MempoolTXPrefixRow + + +TXO_STRUCT = struct.Struct(b'>LH') +TXO_STRUCT_unpack = TXO_STRUCT.unpack +TXO_STRUCT_pack = TXO_STRUCT.pack + + +class HubDB: + DB_VERSIONS = HIST_DB_VERSIONS = [7] + + def __init__(self, coin, db_dir: str, cache_MB: int = 512, reorg_limit: int = 200, + cache_all_claim_txos: bool = False, cache_all_tx_hashes: bool = False, + secondary_name: str = '', max_open_files: int = 64, blocking_channel_ids: List[str] = None, + filtering_channel_ids: List[str] = None, executor: ThreadPoolExecutor = None): + self.logger = logging.getLogger(__name__) + self.coin = coin + self._executor = executor + self._db_dir = db_dir + + self._cache_MB = cache_MB + self._reorg_limit = reorg_limit + self._cache_all_claim_txos = cache_all_claim_txos + self._cache_all_tx_hashes = cache_all_tx_hashes + self._secondary_name = secondary_name + if secondary_name: + assert max_open_files == -1, 'max open files must be -1 for secondary readers' + self._db_max_open_files = max_open_files + self.prefix_db: typing.Optional[PrefixDB] = None + + self.hist_unflushed = defaultdict(partial(array.array, 'I')) + self.hist_unflushed_count = 0 + self.hist_flush_count = 0 + self.hist_comp_flush_count = -1 + self.hist_comp_cursor = -1 + + self.es_sync_height = 0 + + # blocking/filtering dicts + blocking_channels = blocking_channel_ids or [] + filtering_channels = filtering_channel_ids or [] + self.blocked_streams = {} + self.blocked_channels = {} + self.blocking_channel_hashes = { + bytes.fromhex(channel_id) for channel_id in blocking_channels if channel_id + } + self.filtered_streams = {} + + self.filtered_channels = {} + self.filtering_channel_hashes = { + bytes.fromhex(channel_id) for channel_id in filtering_channels if channel_id + } + + self.tx_counts = None + self.headers = None + self.encoded_headers = LRUCacheWithMetrics(1 << 21, metric_name='encoded_headers', namespace='wallet_server') + self.last_flush = time.time() + + # Header merkle cache + self.merkle = Merkle() + self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes) + + self._tx_and_merkle_cache = LRUCacheWithMetrics(2 ** 16, metric_name='tx_and_merkle', namespace="wallet_server") + + # these are only used if the cache_all_tx_hashes setting is on + self.total_transactions: List[bytes] = [] + self.tx_num_mapping: Dict[bytes, int] = {} + + # these are only used if the cache_all_claim_txos setting is on + self.claim_to_txo: Dict[bytes, ClaimToTXOValue] = {} + self.txo_to_claim: DefaultDict[int, Dict[int, bytes]] = defaultdict(dict) + + self.genesis_bytes = bytes.fromhex(self.coin.GENESIS_HASH) + + def get_claim_from_txo(self, tx_num: int, tx_idx: int) -> Optional[TXOToClaimValue]: + claim_hash_and_name = self.prefix_db.txo_to_claim.get(tx_num, tx_idx) + if not claim_hash_and_name: + return + return claim_hash_and_name + + def get_repost(self, claim_hash) -> Optional[bytes]: + repost = self.prefix_db.repost.get(claim_hash) + if repost: + return repost.reposted_claim_hash + return + + def get_reposted_count(self, claim_hash: bytes) -> int: + return sum( + 1 for _ in self.prefix_db.reposted_claim.iterate(prefix=(claim_hash,), include_value=False) + ) + + def get_activation(self, tx_num, position, is_support=False) -> int: + activation = self.prefix_db.activated.get( + ACTIVATED_SUPPORT_TXO_TYPE if is_support else ACTIVATED_CLAIM_TXO_TYPE, tx_num, position + ) + if activation: + return activation.height + return -1 + + def get_supported_claim_from_txo(self, tx_num: int, position: int) -> typing.Tuple[Optional[bytes], Optional[int]]: + supported_claim_hash = self.prefix_db.support_to_claim.get(tx_num, position) + if supported_claim_hash: + packed_support_amount = self.prefix_db.claim_to_support.get( + supported_claim_hash.claim_hash, tx_num, position + ) + if packed_support_amount: + return supported_claim_hash.claim_hash, packed_support_amount.amount + return None, None + + def get_support_amount(self, claim_hash: bytes): + support_amount_val = self.prefix_db.support_amount.get(claim_hash) + if support_amount_val is None: + return 0 + return support_amount_val.amount + + def get_supports(self, claim_hash: bytes): + return [ + (k.tx_num, k.position, v.amount) for k, v in self.prefix_db.claim_to_support.iterate(prefix=(claim_hash,)) + ] + + def get_short_claim_id_url(self, name: str, normalized_name: str, claim_hash: bytes, + root_tx_num: int, root_position: int) -> str: + claim_id = claim_hash.hex() + for prefix_len in range(10): + for k in self.prefix_db.claim_short_id.iterate(prefix=(normalized_name, claim_id[:prefix_len+1]), + include_value=False): + if k.root_tx_num == root_tx_num and k.root_position == root_position: + return f'{name}#{k.partial_claim_id}' + break + print(f"{claim_id} has a collision") + return f'{name}#{claim_id}' + + def _prepare_resolve_result(self, tx_num: int, position: int, claim_hash: bytes, name: str, + root_tx_num: int, root_position: int, activation_height: int, + signature_valid: bool) -> ResolveResult: + try: + normalized_name = normalize_name(name) + except UnicodeDecodeError: + normalized_name = name + controlling_claim = self.get_controlling_claim(normalized_name) + + tx_hash = self.get_tx_hash(tx_num) + height = bisect_right(self.tx_counts, tx_num) + created_height = bisect_right(self.tx_counts, root_tx_num) + last_take_over_height = controlling_claim.height + + expiration_height = self.coin.get_expiration_height(height) + support_amount = self.get_support_amount(claim_hash) + claim_amount = self.get_cached_claim_txo(claim_hash).amount + + effective_amount = self.get_effective_amount(claim_hash) + channel_hash = self.get_channel_for_claim(claim_hash, tx_num, position) + reposted_claim_hash = self.get_repost(claim_hash) + short_url = self.get_short_claim_id_url(name, normalized_name, claim_hash, root_tx_num, root_position) + canonical_url = short_url + claims_in_channel = self.get_claims_in_channel_count(claim_hash) + if channel_hash: + channel_vals = self.get_cached_claim_txo(channel_hash) + if channel_vals: + channel_short_url = self.get_short_claim_id_url( + channel_vals.name, channel_vals.normalized_name, channel_hash, channel_vals.root_tx_num, + channel_vals.root_position + ) + canonical_url = f'{channel_short_url}/{short_url}' + return ResolveResult( + name, normalized_name, claim_hash, tx_num, position, tx_hash, height, claim_amount, short_url=short_url, + is_controlling=controlling_claim.claim_hash == claim_hash, canonical_url=canonical_url, + last_takeover_height=last_take_over_height, claims_in_channel=claims_in_channel, + creation_height=created_height, activation_height=activation_height, + expiration_height=expiration_height, effective_amount=effective_amount, support_amount=support_amount, + channel_hash=channel_hash, reposted_claim_hash=reposted_claim_hash, + reposted=self.get_reposted_count(claim_hash), + signature_valid=None if not channel_hash else signature_valid + ) + + def _resolve_parsed_url(self, name: str, claim_id: Optional[str] = None, + amount_order: Optional[int] = None) -> Optional[ResolveResult]: + """ + :param normalized_name: name + :param claim_id: partial or complete claim id + :param amount_order: '$' suffix to a url, defaults to 1 (winning) if no claim id modifier is provided + """ + try: + normalized_name = normalize_name(name) + except UnicodeDecodeError: + normalized_name = name + if (not amount_order and not claim_id) or amount_order == 1: + # winning resolution + controlling = self.get_controlling_claim(normalized_name) + if not controlling: + # print(f"none controlling for lbry://{normalized_name}") + return + # print(f"resolved controlling lbry://{normalized_name}#{controlling.claim_hash.hex()}") + return self._fs_get_claim_by_hash(controlling.claim_hash) + + amount_order = max(int(amount_order or 1), 1) + + if claim_id: + if len(claim_id) == 40: # a full claim id + claim_txo = self.get_claim_txo(bytes.fromhex(claim_id)) + if not claim_txo or normalized_name != claim_txo.normalized_name: + return + return self._prepare_resolve_result( + claim_txo.tx_num, claim_txo.position, bytes.fromhex(claim_id), claim_txo.name, + claim_txo.root_tx_num, claim_txo.root_position, + self.get_activation(claim_txo.tx_num, claim_txo.position), claim_txo.channel_signature_is_valid + ) + # resolve by partial/complete claim id + for key, claim_txo in self.prefix_db.claim_short_id.iterate(prefix=(normalized_name, claim_id[:10])): + full_claim_hash = self.get_cached_claim_hash(claim_txo.tx_num, claim_txo.position) + c = self.get_cached_claim_txo(full_claim_hash) + + non_normalized_name = c.name + signature_is_valid = c.channel_signature_is_valid + return self._prepare_resolve_result( + claim_txo.tx_num, claim_txo.position, full_claim_hash, non_normalized_name, key.root_tx_num, + key.root_position, self.get_activation(claim_txo.tx_num, claim_txo.position), + signature_is_valid + ) + return + + # resolve by amount ordering, 1 indexed + for idx, (key, claim_val) in enumerate(self.prefix_db.effective_amount.iterate(prefix=(normalized_name,))): + if amount_order > idx + 1: + continue + claim_txo = self.get_cached_claim_txo(claim_val.claim_hash) + activation = self.get_activation(key.tx_num, key.position) + return self._prepare_resolve_result( + key.tx_num, key.position, claim_val.claim_hash, key.normalized_name, claim_txo.root_tx_num, + claim_txo.root_position, activation, claim_txo.channel_signature_is_valid + ) + return + + def _resolve_claim_in_channel(self, channel_hash: bytes, normalized_name: str): + candidates = [] + for key, stream in self.prefix_db.channel_to_claim.iterate(prefix=(channel_hash, normalized_name)): + effective_amount = self.get_effective_amount(stream.claim_hash) + if not candidates or candidates[-1][-1] == effective_amount: + candidates.append((stream.claim_hash, key.tx_num, key.position, effective_amount)) + else: + break + if not candidates: + return + return list(sorted(candidates, key=lambda item: item[1]))[0] + + def _resolve(self, url) -> ExpandedResolveResult: + try: + parsed = URL.parse(url) + except ValueError as e: + return ExpandedResolveResult(e, None, None, None) + + stream = channel = resolved_channel = resolved_stream = None + if parsed.has_stream_in_channel: + channel = parsed.channel + stream = parsed.stream + elif parsed.has_channel: + channel = parsed.channel + elif parsed.has_stream: + stream = parsed.stream + if channel: + resolved_channel = self._resolve_parsed_url(channel.name, channel.claim_id, channel.amount_order) + if not resolved_channel: + return ExpandedResolveResult(None, LookupError(f'Could not find channel in "{url}".'), None, None) + if stream: + if resolved_channel: + stream_claim = self._resolve_claim_in_channel(resolved_channel.claim_hash, stream.normalized) + if stream_claim: + stream_claim_id, stream_tx_num, stream_tx_pos, effective_amount = stream_claim + resolved_stream = self._fs_get_claim_by_hash(stream_claim_id) + else: + resolved_stream = self._resolve_parsed_url(stream.name, stream.claim_id, stream.amount_order) + if not channel and not resolved_channel and resolved_stream and resolved_stream.channel_hash: + resolved_channel = self._fs_get_claim_by_hash(resolved_stream.channel_hash) + if not resolved_stream: + return ExpandedResolveResult(LookupError(f'Could not find claim at "{url}".'), None, None, None) + + repost = None + reposted_channel = None + if resolved_stream or resolved_channel: + claim_hash = resolved_stream.claim_hash if resolved_stream else resolved_channel.claim_hash + claim = resolved_stream if resolved_stream else resolved_channel + reposted_claim_hash = resolved_stream.reposted_claim_hash if resolved_stream else None + blocker_hash = self.blocked_streams.get(claim_hash) or self.blocked_streams.get( + reposted_claim_hash) or self.blocked_channels.get(claim_hash) or self.blocked_channels.get( + reposted_claim_hash) or self.blocked_channels.get(claim.channel_hash) + if blocker_hash: + reason_row = self._fs_get_claim_by_hash(blocker_hash) + return ExpandedResolveResult( + None, ResolveCensoredError(url, blocker_hash, censor_row=reason_row), None, None + ) + if claim.reposted_claim_hash: + repost = self._fs_get_claim_by_hash(claim.reposted_claim_hash) + if repost and repost.channel_hash and repost.signature_valid: + reposted_channel = self._fs_get_claim_by_hash(repost.channel_hash) + return ExpandedResolveResult(resolved_stream, resolved_channel, repost, reposted_channel) + + async def resolve(self, url) -> ExpandedResolveResult: + return await asyncio.get_event_loop().run_in_executor(self._executor, self._resolve, url) + + def _fs_get_claim_by_hash(self, claim_hash): + claim = self.get_cached_claim_txo(claim_hash) + if claim: + activation = self.get_activation(claim.tx_num, claim.position) + return self._prepare_resolve_result( + claim.tx_num, claim.position, claim_hash, claim.name, claim.root_tx_num, claim.root_position, + activation, claim.channel_signature_is_valid + ) + + async def fs_getclaimbyid(self, claim_id): + return await asyncio.get_event_loop().run_in_executor( + self._executor, self._fs_get_claim_by_hash, bytes.fromhex(claim_id) + ) + + def get_claim_txo_amount(self, claim_hash: bytes) -> Optional[int]: + claim = self.get_claim_txo(claim_hash) + if claim: + return claim.amount + + def get_block_hash(self, height: int) -> Optional[bytes]: + v = self.prefix_db.block_hash.get(height) + if v: + return v.block_hash + + def get_support_txo_amount(self, claim_hash: bytes, tx_num: int, position: int) -> Optional[int]: + v = self.prefix_db.claim_to_support.get(claim_hash, tx_num, position) + return None if not v else v.amount + + def get_claim_txo(self, claim_hash: bytes) -> Optional[ClaimToTXOValue]: + assert claim_hash + return self.prefix_db.claim_to_txo.get(claim_hash) + + def _get_active_amount(self, claim_hash: bytes, txo_type: int, height: int) -> int: + return sum( + v.amount for v in self.prefix_db.active_amount.iterate( + start=(claim_hash, txo_type, 0), stop=(claim_hash, txo_type, height), include_key=False + ) + ) + + def get_active_amount_as_of_height(self, claim_hash: bytes, height: int) -> int: + for v in self.prefix_db.active_amount.iterate( + start=(claim_hash, ACTIVATED_CLAIM_TXO_TYPE, 0), stop=(claim_hash, ACTIVATED_CLAIM_TXO_TYPE, height), + include_key=False, reverse=True): + return v.amount + return 0 + + def get_effective_amount(self, claim_hash: bytes) -> int: + return self._get_active_amount( + claim_hash, ACTIVATED_SUPPORT_TXO_TYPE, self.db_height + 1 + ) + self._get_active_amount(claim_hash, ACTIVATED_CLAIM_TXO_TYPE, self.db_height + 1) + + def get_url_effective_amount(self, name: str, claim_hash: bytes) -> Optional['EffectiveAmountKey']: + for k, v in self.prefix_db.effective_amount.iterate(prefix=(name,)): + if v.claim_hash == claim_hash: + return k + + def get_claims_for_name(self, name): + claims = [] + prefix = self.prefix_db.claim_short_id.pack_partial_key(name) + bytes([1]) + stop = self.prefix_db.claim_short_id.pack_partial_key(name) + int(2).to_bytes(1, byteorder='big') + cf = self.prefix_db.column_families[self.prefix_db.claim_short_id.prefix] + for _v in self.prefix_db.iterator(column_family=cf, start=prefix, iterate_upper_bound=stop, include_key=False): + v = self.prefix_db.claim_short_id.unpack_value(_v) + claim_hash = self.get_claim_from_txo(v.tx_num, v.position).claim_hash + if claim_hash not in claims: + claims.append(claim_hash) + return claims + + def get_claims_in_channel_count(self, channel_hash) -> int: + channel_count_val = self.prefix_db.channel_count.get(channel_hash) + if channel_count_val is None: + return 0 + return channel_count_val.count + + async def reload_blocking_filtering_streams(self): + def reload(): + self.blocked_streams, self.blocked_channels = self.get_streams_and_channels_reposted_by_channel_hashes( + self.blocking_channel_hashes + ) + self.filtered_streams, self.filtered_channels = self.get_streams_and_channels_reposted_by_channel_hashes( + self.filtering_channel_hashes + ) + await asyncio.get_event_loop().run_in_executor(self._executor, reload) + + def get_streams_and_channels_reposted_by_channel_hashes(self, reposter_channel_hashes: Set[bytes]): + streams, channels = {}, {} + for reposter_channel_hash in reposter_channel_hashes: + for stream in self.prefix_db.channel_to_claim.iterate((reposter_channel_hash, ), include_key=False): + repost = self.get_repost(stream.claim_hash) + if repost: + txo = self.get_claim_txo(repost) + if txo: + if txo.normalized_name.startswith('@'): + channels[repost] = reposter_channel_hash + else: + streams[repost] = reposter_channel_hash + return streams, channels + + def get_channel_for_claim(self, claim_hash, tx_num, position) -> Optional[bytes]: + v = self.prefix_db.claim_to_channel.get(claim_hash, tx_num, position) + if v: + return v.signing_hash + + def get_expired_by_height(self, height: int) -> Dict[bytes, Tuple[int, int, str, TxInput]]: + expired = {} + for k, v in self.prefix_db.claim_expiration.iterate(prefix=(height,)): + tx_hash = self.get_tx_hash(k.tx_num) + tx = self.coin.transaction(self.prefix_db.tx.get(tx_hash, deserialize_value=False)) + # treat it like a claim spend so it will delete/abandon properly + # the _spend_claim function this result is fed to expects a txi, so make a mock one + # print(f"\texpired lbry://{v.name} {v.claim_hash.hex()}") + expired[v.claim_hash] = ( + k.tx_num, k.position, v.normalized_name, + TxInput(prev_hash=tx_hash, prev_idx=k.position, script=tx.outputs[k.position].pk_script, sequence=0) + ) + return expired + + def get_controlling_claim(self, name: str) -> Optional[ClaimTakeoverValue]: + controlling = self.prefix_db.claim_takeover.get(name) + if not controlling: + return + return controlling + + def get_claim_txos_for_name(self, name: str): + txos = {} + prefix = self.prefix_db.claim_short_id.pack_partial_key(name) + int(1).to_bytes(1, byteorder='big') + stop = self.prefix_db.claim_short_id.pack_partial_key(name) + int(2).to_bytes(1, byteorder='big') + cf = self.prefix_db.column_families[self.prefix_db.claim_short_id.prefix] + for v in self.prefix_db.iterator(column_family=cf, start=prefix, iterate_upper_bound=stop, include_key=False): + tx_num, nout = self.prefix_db.claim_short_id.unpack_value(v) + txos[self.get_claim_from_txo(tx_num, nout).claim_hash] = tx_num, nout + return txos + + def get_claim_metadata(self, tx_hash, nout): + raw = self.prefix_db.tx.get(tx_hash, deserialize_value=False) + try: + return self.coin.transaction(raw).outputs[nout].metadata + except: + self.logger.exception("claim parsing for ES failed with tx: %s", tx_hash[::-1].hex()) + return + + def _prepare_claim_metadata(self, claim_hash: bytes, claim: ResolveResult): + metadata = self.get_claim_metadata(claim.tx_hash, claim.position) + if not metadata: + return + metadata = metadata + if not metadata.is_stream or not metadata.stream.has_fee: + fee_amount = 0 + else: + fee_amount = int(max(metadata.stream.fee.amount or 0, 0) * 1000) + if fee_amount >= 9223372036854775807: + return + reposted_claim_hash = None if not metadata.is_repost else metadata.repost.reference.claim_hash[::-1] + reposted_claim = None + reposted_metadata = None + if reposted_claim_hash: + reposted_claim = self.get_cached_claim_txo(reposted_claim_hash) + if not reposted_claim: + return + reposted_metadata = self.get_claim_metadata( + self.get_tx_hash(reposted_claim.tx_num), reposted_claim.position + ) + if not reposted_metadata: + return + reposted_tags = [] + reposted_languages = [] + reposted_has_source = False + reposted_claim_type = None + reposted_stream_type = None + reposted_media_type = None + reposted_fee_amount = None + reposted_fee_currency = None + reposted_duration = None + if reposted_claim: + reposted_tx_hash = self.get_tx_hash(reposted_claim.tx_num) + raw_reposted_claim_tx = self.prefix_db.tx.get(reposted_tx_hash, deserialize_value=False) + try: + reposted_metadata = self.coin.transaction( + raw_reposted_claim_tx + ).outputs[reposted_claim.position].metadata + except: + self.logger.error("failed to parse reposted claim in tx %s that was reposted by %s", + reposted_tx_hash[::-1].hex(), claim_hash.hex()) + return + if reposted_metadata: + if reposted_metadata.is_stream: + meta = reposted_metadata.stream + elif reposted_metadata.is_channel: + meta = reposted_metadata.channel + elif reposted_metadata.is_collection: + meta = reposted_metadata.collection + elif reposted_metadata.is_repost: + meta = reposted_metadata.repost + else: + return + reposted_tags = [tag for tag in meta.tags] + reposted_languages = [lang.language or 'none' for lang in meta.languages] or ['none'] + reposted_has_source = False if not reposted_metadata.is_stream else reposted_metadata.stream.has_source + reposted_claim_type = CLAIM_TYPES[reposted_metadata.claim_type] + reposted_stream_type = STREAM_TYPES[guess_stream_type(reposted_metadata.stream.source.media_type)] \ + if reposted_has_source else 0 + reposted_media_type = reposted_metadata.stream.source.media_type if reposted_metadata.is_stream else 0 + if not reposted_metadata.is_stream or not reposted_metadata.stream.has_fee: + reposted_fee_amount = 0 + else: + reposted_fee_amount = int(max(reposted_metadata.stream.fee.amount or 0, 0) * 1000) + if reposted_fee_amount >= 9223372036854775807: + return + reposted_fee_currency = None if not reposted_metadata.is_stream else reposted_metadata.stream.fee.currency + reposted_duration = None + if reposted_metadata.is_stream and \ + (reposted_metadata.stream.video.duration or reposted_metadata.stream.audio.duration): + reposted_duration = reposted_metadata.stream.video.duration or reposted_metadata.stream.audio.duration + if metadata.is_stream: + meta = metadata.stream + elif metadata.is_channel: + meta = metadata.channel + elif metadata.is_collection: + meta = metadata.collection + elif metadata.is_repost: + meta = metadata.repost + else: + return + claim_tags = [tag for tag in meta.tags] + claim_languages = [lang.language or 'none' for lang in meta.languages] or ['none'] + + tags = list(set(claim_tags).union(set(reposted_tags))) + languages = list(set(claim_languages).union(set(reposted_languages))) + blocked_hash = self.blocked_streams.get(claim_hash) or self.blocked_streams.get( + reposted_claim_hash) or self.blocked_channels.get(claim_hash) or self.blocked_channels.get( + reposted_claim_hash) or self.blocked_channels.get(claim.channel_hash) + filtered_hash = self.filtered_streams.get(claim_hash) or self.filtered_streams.get( + reposted_claim_hash) or self.filtered_channels.get(claim_hash) or self.filtered_channels.get( + reposted_claim_hash) or self.filtered_channels.get(claim.channel_hash) + value = { + 'claim_id': claim_hash.hex(), + 'claim_name': claim.name, + 'normalized_name': claim.normalized_name, + 'tx_id': claim.tx_hash[::-1].hex(), + 'tx_num': claim.tx_num, + 'tx_nout': claim.position, + 'amount': claim.amount, + 'timestamp': self.estimate_timestamp(claim.height), + 'creation_timestamp': self.estimate_timestamp(claim.creation_height), + 'height': claim.height, + 'creation_height': claim.creation_height, + 'activation_height': claim.activation_height, + 'expiration_height': claim.expiration_height, + 'effective_amount': claim.effective_amount, + 'support_amount': claim.support_amount, + 'is_controlling': bool(claim.is_controlling), + 'last_take_over_height': claim.last_takeover_height, + 'short_url': claim.short_url, + 'canonical_url': claim.canonical_url, + 'title': None if not metadata.is_stream else metadata.stream.title, + 'author': None if not metadata.is_stream else metadata.stream.author, + 'description': None if not metadata.is_stream else metadata.stream.description, + 'claim_type': CLAIM_TYPES[metadata.claim_type], + 'has_source': reposted_has_source if metadata.is_repost else ( + False if not metadata.is_stream else metadata.stream.has_source), + 'sd_hash': metadata.stream.source.sd_hash if metadata.is_stream and metadata.stream.has_source else None, + 'stream_type': STREAM_TYPES[guess_stream_type(metadata.stream.source.media_type)] + if metadata.is_stream and metadata.stream.has_source + else reposted_stream_type if metadata.is_repost else 0, + 'media_type': metadata.stream.source.media_type + if metadata.is_stream else reposted_media_type if metadata.is_repost else None, + 'fee_amount': fee_amount if not metadata.is_repost else reposted_fee_amount, + 'fee_currency': metadata.stream.fee.currency + if metadata.is_stream else reposted_fee_currency if metadata.is_repost else None, + 'repost_count': self.get_reposted_count(claim_hash), + 'reposted_claim_id': None if not reposted_claim_hash else reposted_claim_hash.hex(), + 'reposted_claim_type': reposted_claim_type, + 'reposted_has_source': reposted_has_source, + 'channel_id': None if not metadata.is_signed else metadata.signing_channel_hash[::-1].hex(), + 'public_key_id': None if not metadata.is_channel else + self.coin.P2PKH_address_from_hash160(hash160(metadata.channel.public_key_bytes)), + 'signature': (metadata.signature or b'').hex() or None, + # 'signature_digest': metadata.signature, + 'is_signature_valid': bool(claim.signature_valid), + 'tags': tags, + 'languages': languages, + 'censor_type': Censor.RESOLVE if blocked_hash else Censor.SEARCH if filtered_hash else Censor.NOT_CENSORED, + 'censoring_channel_id': (blocked_hash or filtered_hash or b'').hex() or None, + 'claims_in_channel': None if not metadata.is_channel else self.get_claims_in_channel_count(claim_hash) + } + + if metadata.is_repost and reposted_duration is not None: + value['duration'] = reposted_duration + elif metadata.is_stream and (metadata.stream.video.duration or metadata.stream.audio.duration): + value['duration'] = metadata.stream.video.duration or metadata.stream.audio.duration + if metadata.is_stream: + value['release_time'] = metadata.stream.release_time or value['creation_timestamp'] + elif metadata.is_repost or metadata.is_collection: + value['release_time'] = value['creation_timestamp'] + return value + + async def all_claims_producer(self, batch_size=500_000): + batch = [] + if self._cache_all_claim_txos: + claim_iterator = self.claim_to_txo.items() + else: + claim_iterator = map(lambda item: (item[0].claim_hash, item[1]), self.prefix_db.claim_to_txo.iterate()) + + for claim_hash, claim_txo in claim_iterator: + # TODO: fix the couple of claim txos that dont have controlling names + if not self.prefix_db.claim_takeover.get(claim_txo.normalized_name): + continue + activation = self.get_activation(claim_txo.tx_num, claim_txo.position) + claim = self._prepare_resolve_result( + claim_txo.tx_num, claim_txo.position, claim_hash, claim_txo.name, claim_txo.root_tx_num, + claim_txo.root_position, activation, claim_txo.channel_signature_is_valid + ) + if claim: + batch.append(claim) + if len(batch) == batch_size: + batch.sort(key=lambda x: x.tx_hash) # sort is to improve read-ahead hits + for claim in batch: + meta = self._prepare_claim_metadata(claim.claim_hash, claim) + if meta: + yield meta + batch.clear() + batch.sort(key=lambda x: x.tx_hash) + for claim in batch: + meta = self._prepare_claim_metadata(claim.claim_hash, claim) + if meta: + yield meta + batch.clear() + + def claim_producer(self, claim_hash: bytes) -> Optional[Dict]: + claim_txo = self.get_cached_claim_txo(claim_hash) + if not claim_txo: + self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex()) + return + if not self.prefix_db.claim_takeover.get(claim_txo.normalized_name): + self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex()) + return + activation = self.get_activation(claim_txo.tx_num, claim_txo.position) + claim = self._prepare_resolve_result( + claim_txo.tx_num, claim_txo.position, claim_hash, claim_txo.name, claim_txo.root_tx_num, + claim_txo.root_position, activation, claim_txo.channel_signature_is_valid + ) + if not claim: + self.logger.warning("wat") + return + return self._prepare_claim_metadata(claim.claim_hash, claim) + + def claims_producer(self, claim_hashes: Set[bytes]): + batch = [] + results = [] + + for claim_hash in claim_hashes: + claim_txo = self.get_cached_claim_txo(claim_hash) + if not claim_txo: + self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex()) + continue + if not self.prefix_db.claim_takeover.get(claim_txo.normalized_name): + self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex()) + continue + + activation = self.get_activation(claim_txo.tx_num, claim_txo.position) + claim = self._prepare_resolve_result( + claim_txo.tx_num, claim_txo.position, claim_hash, claim_txo.name, claim_txo.root_tx_num, + claim_txo.root_position, activation, claim_txo.channel_signature_is_valid + ) + if claim: + batch.append(claim) + + batch.sort(key=lambda x: x.tx_hash) + + for claim in batch: + _meta = self._prepare_claim_metadata(claim.claim_hash, claim) + if _meta: + results.append(_meta) + return results + + def get_activated_at_height(self, height: int) -> DefaultDict[PendingActivationValue, List[PendingActivationKey]]: + activated = defaultdict(list) + for k, v in self.prefix_db.pending_activation.iterate(prefix=(height,)): + activated[v].append(k) + return activated + + def get_future_activated(self, height: int) -> typing.Dict[PendingActivationValue, PendingActivationKey]: + results = {} + for k, v in self.prefix_db.pending_activation.iterate( + start=(height + 1,), stop=(height + 1 + self.coin.maxTakeoverDelay,), reverse=True): + if v not in results: + results[v] = k + return results + + async def _read_tx_counts(self): + if self.tx_counts is not None: + return + # tx_counts[N] has the cumulative number of txs at the end of + # height N. So tx_counts[0] is 1 - the genesis coinbase + + def get_counts(): + return [ + v.tx_count for v in self.prefix_db.tx_count.iterate( + start=(0,), stop=(self.db_height + 1,), include_key=False, fill_cache=False + ) + ] + + tx_counts = await asyncio.get_event_loop().run_in_executor(self._executor, get_counts) + assert len(tx_counts) == self.db_height + 1, f"{len(tx_counts)} vs {self.db_height + 1}" + self.tx_counts = array.array('I', tx_counts) + + if self.tx_counts: + assert self.db_tx_count == self.tx_counts[-1], \ + f"{self.db_tx_count} vs {self.tx_counts[-1]} ({len(self.tx_counts)} counts)" + else: + assert self.db_tx_count == 0 + + async def _read_claim_txos(self): + def read_claim_txos(): + set_claim_to_txo = self.claim_to_txo.__setitem__ + for k, v in self.prefix_db.claim_to_txo.iterate(fill_cache=False): + set_claim_to_txo(k.claim_hash, v) + self.txo_to_claim[v.tx_num][v.position] = k.claim_hash + + self.claim_to_txo.clear() + self.txo_to_claim.clear() + start = time.perf_counter() + self.logger.info("loading claims") + await asyncio.get_event_loop().run_in_executor(self._executor, read_claim_txos) + ts = time.perf_counter() - start + self.logger.info("loaded %i claim txos in %ss", len(self.claim_to_txo), round(ts, 4)) + + async def _read_headers(self): + if self.headers is not None: + return + + def get_headers(): + return [ + header for header in self.prefix_db.header.iterate( + start=(0, ), stop=(self.db_height + 1, ), include_key=False, fill_cache=False, deserialize_value=False + ) + ] + + headers = await asyncio.get_event_loop().run_in_executor(self._executor, get_headers) + assert len(headers) - 1 == self.db_height, f"{len(headers)} vs {self.db_height}" + self.headers = headers + + async def _read_tx_hashes(self): + def _read_tx_hashes(): + return list(self.prefix_db.tx_hash.iterate(start=(0,), stop=(self.db_tx_count + 1), include_key=False, fill_cache=False, deserialize_value=False)) + + self.logger.info("loading tx hashes") + self.total_transactions.clear() + self.tx_num_mapping.clear() + start = time.perf_counter() + self.total_transactions.extend(await asyncio.get_event_loop().run_in_executor(self._executor, _read_tx_hashes)) + self.tx_num_mapping = { + tx_hash: tx_num for tx_num, tx_hash in enumerate(self.total_transactions) + } + ts = time.perf_counter() - start + self.logger.info("loaded %i tx hashes in %ss", len(self.total_transactions), round(ts, 4)) + + def estimate_timestamp(self, height: int) -> int: + if height < len(self.headers): + return struct.unpack(' 0: + await self.populate_header_merkle_cache() + + def close(self): + self.prefix_db.close() + self.prefix_db = None + + def get_tx_hash(self, tx_num: int) -> bytes: + if self._cache_all_tx_hashes: + return self.total_transactions[tx_num] + return self.prefix_db.tx_hash.get(tx_num, deserialize_value=False) + + def get_tx_num(self, tx_hash: bytes) -> int: + if self._cache_all_tx_hashes: + return self.tx_num_mapping[tx_hash] + return self.prefix_db.tx_num.get(tx_hash).tx_num + + def get_cached_claim_txo(self, claim_hash: bytes) -> Optional[ClaimToTXOValue]: + if self._cache_all_claim_txos: + return self.claim_to_txo.get(claim_hash) + return self.prefix_db.claim_to_txo.get_pending(claim_hash) + + def get_cached_claim_hash(self, tx_num: int, position: int) -> Optional[bytes]: + if self._cache_all_claim_txos: + if tx_num not in self.txo_to_claim: + return + return self.txo_to_claim[tx_num].get(position, None) + v = self.prefix_db.txo_to_claim.get_pending(tx_num, position) + return None if not v else v.claim_hash + + def get_cached_claim_exists(self, tx_num: int, position: int) -> bool: + return self.get_cached_claim_hash(tx_num, position) is not None + + # Header merkle cache + + async def populate_header_merkle_cache(self): + self.logger.info('populating header merkle cache...') + length = max(1, self.db_height - self._reorg_limit) + start = time.time() + await self.header_mc.initialize(length) + elapsed = time.time() - start + self.logger.info(f'header merkle cache populated in {elapsed:.1f}s') + + async def header_branch_and_root(self, length, height): + return await self.header_mc.branch_and_root(length, height) + + async def raw_header(self, height): + """Return the binary header at the given height.""" + header, n = await self.read_headers(height, 1) + if n != 1: + raise IndexError(f'height {height:,d} out of range') + return header + + def encode_headers(self, start_height, count, headers): + key = (start_height, count) + if not self.encoded_headers.get(key): + compressobj = zlib.compressobj(wbits=-15, level=1, memLevel=9) + headers = base64.b64encode(compressobj.compress(headers) + compressobj.flush()).decode() + if start_height % 1000 != 0: + return headers + self.encoded_headers[key] = headers + return self.encoded_headers.get(key) + + async def read_headers(self, start_height, count) -> typing.Tuple[bytes, int]: + """Requires start_height >= 0, count >= 0. Reads as many headers as + are available starting at start_height up to count. This + would be zero if start_height is beyond self.db_height, for + example. + + Returns a (binary, n) pair where binary is the concatenated + binary headers, and n is the count of headers returned. + """ + + if start_height < 0 or count < 0: + raise DBError(f'{count:,d} headers starting at {start_height:,d} not on disk') + + disk_count = max(0, min(count, self.db_height + 1 - start_height)) + + def read_headers(): + x = b''.join( + self.prefix_db.header.iterate( + start=(start_height,), stop=(start_height+disk_count,), include_key=False, deserialize_value=False + ) + ) + return x + + if disk_count: + return await asyncio.get_event_loop().run_in_executor(self._executor, read_headers), disk_count + return b'', 0 + + def fs_tx_hash(self, tx_num): + """Return a par (tx_hash, tx_height) for the given tx number. + + If the tx_height is not on disk, returns (None, tx_height).""" + tx_height = bisect_right(self.tx_counts, tx_num) + if tx_height > self.db_height: + return None, tx_height + try: + return self.get_tx_hash(tx_num), tx_height + except IndexError: + self.logger.exception( + "Failed to access a cached transaction, known bug #3142 " + "should be fixed in #3205" + ) + return None, tx_height + + def get_block_txs(self, height: int) -> List[bytes]: + return self.prefix_db.block_txs.get(height).tx_hashes + + async def get_transactions_and_merkles(self, tx_hashes: Iterable[str]): + tx_infos = {} + for tx_hash in tx_hashes: + tx_infos[tx_hash] = await asyncio.get_event_loop().run_in_executor( + self._executor, self._get_transaction_and_merkle, tx_hash + ) + await asyncio.sleep(0) + return tx_infos + + def _get_transaction_and_merkle(self, tx_hash): + cached_tx = self._tx_and_merkle_cache.get(tx_hash) + if cached_tx: + tx, merkle = cached_tx + else: + tx_hash_bytes = bytes.fromhex(tx_hash)[::-1] + tx_num = self.prefix_db.tx_num.get(tx_hash_bytes) + tx = None + tx_height = -1 + tx_num = None if not tx_num else tx_num.tx_num + if tx_num is not None: + if self._cache_all_claim_txos: + fill_cache = tx_num in self.txo_to_claim and len(self.txo_to_claim[tx_num]) > 0 + else: + fill_cache = False + tx_height = bisect_right(self.tx_counts, tx_num) + tx = self.prefix_db.tx.get(tx_hash_bytes, fill_cache=fill_cache, deserialize_value=False) + if tx_height == -1: + merkle = { + 'block_height': -1 + } + else: + tx_pos = tx_num - self.tx_counts[tx_height - 1] + branch, root = self.merkle.branch_and_root( + self.get_block_txs(tx_height), tx_pos + ) + merkle = { + 'block_height': tx_height, + 'merkle': [ + hash_to_hex_str(hash) + for hash in branch + ], + 'pos': tx_pos + } + if tx_height + 10 < self.db_height: + self._tx_and_merkle_cache[tx_hash] = tx, merkle + return (None if not tx else tx.hex(), merkle) + + async def fs_block_hashes(self, height, count): + if height + count > len(self.headers): + raise DBError(f'only got {len(self.headers) - height:,d} headers starting at {height:,d}, not {count:,d}') + return [self.coin.header_hash(header) for header in self.headers[height:height + count]] + + def read_history(self, hashX: bytes, limit: int = 1000) -> List[Tuple[bytes, int]]: + txs = [] + txs_extend = txs.extend + for hist in self.prefix_db.hashX_history.iterate(prefix=(hashX,), include_key=False): + txs_extend(hist) + if len(txs) >= limit: + break + return [ + (self.get_tx_hash(tx_num), bisect_right(self.tx_counts, tx_num)) + for tx_num in txs + ] + + async def limited_history(self, hashX, *, limit=1000): + """Return an unpruned, sorted list of (tx_hash, height) tuples of + confirmed transactions that touched the address, earliest in + the blockchain first. Includes both spending and receiving + transactions. By default returns at most 1000 entries. Set + limit to None to get them all. + """ + return await asyncio.get_event_loop().run_in_executor(self._executor, self.read_history, hashX, limit) + + # -- Undo information + + def min_undo_height(self, max_height): + """Returns a height from which we should store undo info.""" + return max_height - self._reorg_limit + 1 + + def apply_expiration_extension_fork(self): + # TODO: this can't be reorged + for k, v in self.prefix_db.claim_expiration.iterate(): + self.prefix_db.claim_expiration.stage_delete(k, v) + self.prefix_db.claim_expiration.stage_put( + (bisect_right(self.tx_counts, k.tx_num) + self.coin.nExtendedClaimExpirationTime, + k.tx_num, k.position), v + ) + self.prefix_db.unsafe_commit() + + def write_db_state(self): + """Write (UTXO) state to the batch.""" + if self.db_height > 0: + self.prefix_db.db_state.stage_delete((), self.prefix_db.db_state.get()) + self.prefix_db.db_state.stage_put((), ( + self.genesis_bytes, self.db_height, self.db_tx_count, self.db_tip, + self.utxo_flush_count, int(self.wall_time), self.first_sync, self.db_version, + self.hist_flush_count, self.hist_comp_flush_count, self.hist_comp_cursor, + self.es_sync_height + ) + ) + + def read_db_state(self): + state = self.prefix_db.db_state.get() + + if not state: + self.db_height = -1 + self.db_tx_count = 0 + self.db_tip = b'\0' * 32 + self.db_version = max(self.DB_VERSIONS) + self.utxo_flush_count = 0 + self.wall_time = 0 + self.first_sync = True + self.hist_flush_count = 0 + self.hist_comp_flush_count = -1 + self.hist_comp_cursor = -1 + self.hist_db_version = max(self.DB_VERSIONS) + self.es_sync_height = 0 + else: + self.db_version = state.db_version + if self.db_version not in self.DB_VERSIONS: + raise DBError(f'your DB version is {self.db_version} but this ' + f'software only handles versions {self.DB_VERSIONS}') + # backwards compat + genesis_hash = state.genesis + if genesis_hash.hex() != self.coin.GENESIS_HASH: + raise DBError(f'DB genesis hash {genesis_hash} does not ' + f'match coin {self.coin.GENESIS_HASH}') + self.db_height = state.height + self.db_tx_count = state.tx_count + self.db_tip = state.tip + self.utxo_flush_count = state.utxo_flush_count + self.wall_time = state.wall_time + self.first_sync = state.first_sync + self.hist_flush_count = state.hist_flush_count + self.hist_comp_flush_count = state.comp_flush_count + self.hist_comp_cursor = state.comp_cursor + self.hist_db_version = state.db_version + self.es_sync_height = state.es_sync_height + return state + + def assert_db_state(self): + state = self.prefix_db.db_state.get() + assert self.db_version == state.db_version, f"{self.db_version} != {state.db_version}" + assert self.db_height == state.height, f"{self.db_height} != {state.height}" + assert self.db_tx_count == state.tx_count, f"{self.db_tx_count} != {state.tx_count}" + assert self.db_tip == state.tip, f"{self.db_tip} != {state.tip}" + assert self.first_sync == state.first_sync, f"{self.first_sync} != {state.first_sync}" + assert self.es_sync_height == state.es_sync_height, f"{self.es_sync_height} != {state.es_sync_height}" + + async def all_utxos(self, hashX): + """Return all UTXOs for an address sorted in no particular order.""" + def read_utxos(): + utxos = [] + utxos_append = utxos.append + fs_tx_hash = self.fs_tx_hash + for k, v in self.prefix_db.utxo.iterate(prefix=(hashX, )): + tx_hash, height = fs_tx_hash(k.tx_num) + utxos_append(UTXO(k.tx_num, k.nout, tx_hash, height, v.amount)) + return utxos + + while True: + utxos = await asyncio.get_event_loop().run_in_executor(self._executor, read_utxos) + if all(utxo.tx_hash is not None for utxo in utxos): + return utxos + self.logger.warning(f'all_utxos: tx hash not ' + f'found (reorg?), retrying...') + await sleep(0.25) + + async def lookup_utxos(self, prevouts): + def lookup_utxos(): + utxos = [] + utxo_append = utxos.append + for (tx_hash, nout) in prevouts: + tx_num_val = self.prefix_db.tx_num.get(tx_hash) + if not tx_num_val: + print("no tx num for ", tx_hash[::-1].hex()) + continue + tx_num = tx_num_val.tx_num + hashX_val = self.prefix_db.hashX_utxo.get(tx_hash[:4], tx_num, nout) + if not hashX_val: + continue + hashX = hashX_val.hashX + utxo_value = self.prefix_db.utxo.get(hashX, tx_num, nout) + if utxo_value: + utxo_append((hashX, utxo_value.amount)) + return utxos + return await asyncio.get_event_loop().run_in_executor(self._executor, lookup_utxos) diff --git a/scribe/db/interface.py b/scribe/db/interface.py new file mode 100644 index 0000000..e0f569e --- /dev/null +++ b/scribe/db/interface.py @@ -0,0 +1,273 @@ +import struct +import typing +import rocksdb +from typing import Optional +from scribe.db.common import DB_PREFIXES, COLUMN_SETTINGS +from scribe.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete + + +ROW_TYPES = {} + + +class PrefixRowType(type): + def __new__(cls, name, bases, kwargs): + klass = super().__new__(cls, name, bases, kwargs) + if name != "PrefixRow": + ROW_TYPES[klass.prefix] = klass + cache_size = klass.cache_size + COLUMN_SETTINGS[klass.prefix] = { + 'cache_size': cache_size, + } + return klass + + +class PrefixRow(metaclass=PrefixRowType): + prefix: bytes + key_struct: struct.Struct + value_struct: struct.Struct + key_part_lambdas = [] + cache_size: int = 1024 * 1024 * 64 + + def __init__(self, db: 'rocksdb.DB', op_stack: RevertableOpStack): + self._db = db + self._op_stack = op_stack + self._column_family = self._db.get_column_family(self.prefix) + if not self._column_family.is_valid: + raise RuntimeError('column family is not valid') + + def iterate(self, prefix=None, start=None, stop=None, reverse: bool = False, include_key: bool = True, + include_value: bool = True, fill_cache: bool = True, deserialize_key: bool = True, + deserialize_value: bool = True): + if not prefix and not start and not stop: + prefix = () + if prefix is not None: + prefix = self.pack_partial_key(*prefix) + if stop is None: + try: + stop = (int.from_bytes(prefix, byteorder='big') + 1).to_bytes(len(prefix), byteorder='big') + except OverflowError: + stop = (int.from_bytes(prefix, byteorder='big') + 1).to_bytes(len(prefix) + 1, byteorder='big') + else: + stop = self.pack_partial_key(*stop) + else: + if start is not None: + start = self.pack_partial_key(*start) + if stop is not None: + stop = self.pack_partial_key(*stop) + + if deserialize_key: + key_getter = lambda k: self.unpack_key(k) + else: + key_getter = lambda k: k + if deserialize_value: + value_getter = lambda v: self.unpack_value(v) + else: + value_getter = lambda v: v + + it = self._db.iterator( + start or prefix, self._column_family, iterate_lower_bound=(start or prefix), + iterate_upper_bound=stop, reverse=reverse, include_key=include_key, + include_value=include_value, fill_cache=fill_cache, prefix_same_as_start=False + ) + + if include_key and include_value: + for k, v in it: + yield key_getter(k[1]), value_getter(v) + elif include_key: + for k in it: + yield key_getter(k[1]) + elif include_value: + for v in it: + yield value_getter(v) + else: + for _ in it: + yield None + + def get(self, *key_args, fill_cache=True, deserialize_value=True): + v = self._db.get((self._column_family, self.pack_key(*key_args)), fill_cache=fill_cache) + if v: + return v if not deserialize_value else self.unpack_value(v) + + def get_pending(self, *key_args, fill_cache=True, deserialize_value=True): + packed_key = self.pack_key(*key_args) + last_op = self._op_stack.get_last_op_for_key(packed_key) + if last_op: + if last_op.is_put: + return last_op.value if not deserialize_value else self.unpack_value(last_op.value) + else: # it's a delete + return + v = self._db.get((self._column_family, packed_key), fill_cache=fill_cache) + if v: + return v if not deserialize_value else self.unpack_value(v) + + def stage_put(self, key_args=(), value_args=()): + self._op_stack.append_op(RevertablePut(self.pack_key(*key_args), self.pack_value(*value_args))) + + def stage_delete(self, key_args=(), value_args=()): + self._op_stack.append_op(RevertableDelete(self.pack_key(*key_args), self.pack_value(*value_args))) + + @classmethod + def pack_partial_key(cls, *args) -> bytes: + return cls.prefix + cls.key_part_lambdas[len(args)](*args) + + @classmethod + def pack_key(cls, *args) -> bytes: + return cls.prefix + cls.key_struct.pack(*args) + + @classmethod + def pack_value(cls, *args) -> bytes: + return cls.value_struct.pack(*args) + + @classmethod + def unpack_key(cls, key: bytes): + assert key[:1] == cls.prefix, f"prefix should be {cls.prefix}, got {key[:1]}" + return cls.key_struct.unpack(key[1:]) + + @classmethod + def unpack_value(cls, data: bytes): + return cls.value_struct.unpack(data) + + @classmethod + def unpack_item(cls, key: bytes, value: bytes): + return cls.unpack_key(key), cls.unpack_value(value) + + def estimate_num_keys(self) -> int: + return int(self._db.get_property(b'rocksdb.estimate-num-keys', self._column_family).decode()) + + +class BasePrefixDB: + """ + Base class for a revertable rocksdb database (a rocksdb db where each set of applied changes can be undone) + """ + UNDO_KEY_STRUCT = struct.Struct(b'>Q32s') + PARTIAL_UNDO_KEY_STRUCT = struct.Struct(b'>Q') + + def __init__(self, path, max_open_files=64, secondary_path='', max_undo_depth: int = 200, unsafe_prefixes=None): + column_family_options = {} + for prefix in DB_PREFIXES: + settings = COLUMN_SETTINGS[prefix.value] + column_family_options[prefix.value] = rocksdb.ColumnFamilyOptions() + column_family_options[prefix.value].table_factory = rocksdb.BlockBasedTableFactory( + block_cache=rocksdb.LRUCache(settings['cache_size']), + ) + self.column_families: typing.Dict[bytes, 'rocksdb.ColumnFamilyHandle'] = {} + options = rocksdb.Options( + create_if_missing=True, use_fsync=False, target_file_size_base=33554432, + max_open_files=max_open_files if not secondary_path else -1, create_missing_column_families=True + ) + self._db = rocksdb.DB( + path, options, secondary_name=secondary_path, column_families=column_family_options + ) + for prefix in DB_PREFIXES: + cf = self._db.get_column_family(prefix.value) + if cf is None and not secondary_path: + self._db.create_column_family(prefix.value, column_family_options[prefix.value]) + cf = self._db.get_column_family(prefix.value) + self.column_families[prefix.value] = cf + + self._op_stack = RevertableOpStack(self.get, unsafe_prefixes=unsafe_prefixes) + self._max_undo_depth = max_undo_depth + + def unsafe_commit(self): + """ + Write staged changes to the database without keeping undo information + Changes written cannot be undone + """ + try: + if not len(self._op_stack): + return + with self._db.write_batch(sync=True) as batch: + batch_put = batch.put + batch_delete = batch.delete + get_column_family = self.column_families.__getitem__ + for staged_change in self._op_stack: + column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value) + if staged_change.is_put: + batch_put((column_family, staged_change.key), staged_change.value) + else: + batch_delete((column_family, staged_change.key)) + finally: + self._op_stack.clear() + + def commit(self, height: int, block_hash: bytes): + """ + Write changes for a block height to the database and keep undo information so that the changes can be reverted + """ + undo_ops = self._op_stack.get_undo_ops() + delete_undos = [] + if height > self._max_undo_depth: + delete_undos.extend(self._db.iterator( + start=DB_PREFIXES.undo.value + self.PARTIAL_UNDO_KEY_STRUCT.pack(0), + iterate_upper_bound=DB_PREFIXES.undo.value + self.PARTIAL_UNDO_KEY_STRUCT.pack(height - self._max_undo_depth), + include_value=False + )) + try: + undo_c_f = self.column_families[DB_PREFIXES.undo.value] + with self._db.write_batch(sync=True) as batch: + batch_put = batch.put + batch_delete = batch.delete + get_column_family = self.column_families.__getitem__ + for staged_change in self._op_stack: + column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value) + if staged_change.is_put: + batch_put((column_family, staged_change.key), staged_change.value) + else: + batch_delete((column_family, staged_change.key)) + for undo_to_delete in delete_undos: + batch_delete((undo_c_f, undo_to_delete)) + batch_put((undo_c_f, DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height, block_hash)), undo_ops) + finally: + self._op_stack.clear() + + def rollback(self, height: int, block_hash: bytes): + """ + Revert changes for a block height + """ + undo_key = DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height, block_hash) + undo_c_f = self.column_families[DB_PREFIXES.undo.value] + undo_info = self._db.get((undo_c_f, undo_key)) + self._op_stack.apply_packed_undo_ops(undo_info) + try: + with self._db.write_batch(sync=True) as batch: + batch_put = batch.put + batch_delete = batch.delete + get_column_family = self.column_families.__getitem__ + for staged_change in self._op_stack: + column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value) + if staged_change.is_put: + batch_put((column_family, staged_change.key), staged_change.value) + else: + batch_delete((column_family, staged_change.key)) + # batch_delete(undo_key) + finally: + self._op_stack.clear() + + def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]: + cf = self.column_families[key[:1]] + return self._db.get((cf, key), fill_cache=fill_cache) + + def iterator(self, start: bytes, column_family: 'rocksdb.ColumnFamilyHandle' = None, + iterate_lower_bound: bytes = None, iterate_upper_bound: bytes = None, + reverse: bool = False, include_key: bool = True, include_value: bool = True, + fill_cache: bool = True, prefix_same_as_start: bool = False, auto_prefix_mode: bool = True): + return self._db.iterator( + start=start, column_family=column_family, iterate_lower_bound=iterate_lower_bound, + iterate_upper_bound=iterate_upper_bound, reverse=reverse, include_key=include_key, + include_value=include_value, fill_cache=fill_cache, prefix_same_as_start=prefix_same_as_start, + auto_prefix_mode=auto_prefix_mode + ) + + def close(self): + self._db.close() + + def try_catch_up_with_primary(self): + self._db.try_catch_up_with_primary() + + def stage_raw_put(self, key: bytes, value: bytes): + self._op_stack.append_op(RevertablePut(key, value)) + + def stage_raw_delete(self, key: bytes, value: bytes): + self._op_stack.append_op(RevertableDelete(key, value)) + + def estimate_num_keys(self, column_family: 'rocksdb.ColumnFamilyHandle' = None): + return int(self._db.get_property(b'rocksdb.estimate-num-keys', column_family).decode()) diff --git a/scribe/db/merkle.py b/scribe/db/merkle.py new file mode 100644 index 0000000..5e9a6a6 --- /dev/null +++ b/scribe/db/merkle.py @@ -0,0 +1,258 @@ +# Copyright (c) 2018, Neil Booth +# +# All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# and warranty status of this software. + +"""Merkle trees, branches, proofs and roots.""" + +from asyncio import Event +from math import ceil, log + +from scribe.common import double_sha256 + + +class Merkle: + """Perform merkle tree calculations on binary hashes using a given hash + function. + + If the hash count is not even, the final hash is repeated when + calculating the next merkle layer up the tree. + """ + + def __init__(self, hash_func=double_sha256): + self.hash_func = hash_func + + @staticmethod + def tree_depth(hash_count): + return Merkle.branch_length(hash_count) + 1 + + @staticmethod + def branch_length(hash_count): + """Return the length of a merkle branch given the number of hashes.""" + if not isinstance(hash_count, int): + raise TypeError('hash_count must be an integer') + if hash_count < 1: + raise ValueError('hash_count must be at least 1') + return ceil(log(hash_count, 2)) + + @staticmethod + def branch_and_root(hashes, index, length=None, hash_func=double_sha256): + """Return a (merkle branch, merkle_root) pair given hashes, and the + index of one of those hashes. + """ + hashes = list(hashes) + if not isinstance(index, int): + raise TypeError('index must be an integer') + # This also asserts hashes is not empty + if not 0 <= index < len(hashes): + raise ValueError(f"index '{index}/{len(hashes)}' out of range") + natural_length = Merkle.branch_length(len(hashes)) + if length is None: + length = natural_length + else: + if not isinstance(length, int): + raise TypeError('length must be an integer') + if length < natural_length: + raise ValueError('length out of range') + + branch = [] + for _ in range(length): + if len(hashes) & 1: + hashes.append(hashes[-1]) + branch.append(hashes[index ^ 1]) + index >>= 1 + hashes = [hash_func(hashes[n] + hashes[n + 1]) + for n in range(0, len(hashes), 2)] + + return branch, hashes[0] + + @staticmethod + def root(hashes, length=None): + """Return the merkle root of a non-empty iterable of binary hashes.""" + branch, root = Merkle.branch_and_root(hashes, 0, length) + return root + + # @staticmethod + # def root_from_proof(hash, branch, index, hash_func=double_sha256): + # """Return the merkle root given a hash, a merkle branch to it, and + # its index in the hashes array. + # + # branch is an iterable sorted deepest to shallowest. If the + # returned root is the expected value then the merkle proof is + # verified. + # + # The caller should have confirmed the length of the branch with + # branch_length(). Unfortunately this is not easily done for + # bitcoin transactions as the number of transactions in a block + # is unknown to an SPV client. + # """ + # for elt in branch: + # if index & 1: + # hash = hash_func(elt + hash) + # else: + # hash = hash_func(hash + elt) + # index >>= 1 + # if index: + # raise ValueError('index out of range for branch') + # return hash + + @staticmethod + def level(hashes, depth_higher): + """Return a level of the merkle tree of hashes the given depth + higher than the bottom row of the original tree.""" + size = 1 << depth_higher + root = Merkle.root + return [root(hashes[n: n + size], depth_higher) + for n in range(0, len(hashes), size)] + + @staticmethod + def branch_and_root_from_level(level, leaf_hashes, index, + depth_higher): + """Return a (merkle branch, merkle_root) pair when a merkle-tree has a + level cached. + + To maximally reduce the amount of data hashed in computing a + markle branch, cache a tree of depth N at level N // 2. + + level is a list of hashes in the middle of the tree (returned + by level()) + + leaf_hashes are the leaves needed to calculate a partial branch + up to level. + + depth_higher is how much higher level is than the leaves of the tree + + index is the index in the full list of hashes of the hash whose + merkle branch we want. + """ + if not isinstance(level, list): + raise TypeError("level must be a list") + if not isinstance(leaf_hashes, list): + raise TypeError("leaf_hashes must be a list") + leaf_index = (index >> depth_higher) << depth_higher + leaf_branch, leaf_root = Merkle.branch_and_root( + leaf_hashes, index - leaf_index, depth_higher) + index >>= depth_higher + level_branch, root = Merkle.branch_and_root(level, index) + # Check last so that we know index is in-range + if leaf_root != level[index]: + raise ValueError('leaf hashes inconsistent with level') + return leaf_branch + level_branch, root + + +class MerkleCache: + """A cache to calculate merkle branches efficiently.""" + + def __init__(self, merkle, source_func): + """Initialise a cache hashes taken from source_func: + + async def source_func(index, count): + ... + """ + self.merkle = merkle + self.source_func = source_func + self.length = 0 + self.depth_higher = 0 + self.initialized = Event() + + def _segment_length(self): + return 1 << self.depth_higher + + def _leaf_start(self, index): + """Given a level's depth higher and a hash index, return the leaf + index and leaf hash count needed to calculate a merkle branch. + """ + depth_higher = self.depth_higher + return (index >> depth_higher) << depth_higher + + def _level(self, hashes): + return self.merkle.level(hashes, self.depth_higher) + + async def _extend_to(self, length): + """Extend the length of the cache if necessary.""" + if length <= self.length: + return + # Start from the beginning of any final partial segment. + # Retain the value of depth_higher; in practice this is fine + start = self._leaf_start(self.length) + hashes = await self.source_func(start, length - start) + self.level[start >> self.depth_higher:] = self._level(hashes) + self.length = length + + async def _level_for(self, length): + """Return a (level_length, final_hash) pair for a truncation + of the hashes to the given length.""" + if length == self.length: + return self.level + level = self.level[:length >> self.depth_higher] + leaf_start = self._leaf_start(length) + count = min(self._segment_length(), length - leaf_start) + hashes = await self.source_func(leaf_start, count) + level += self._level(hashes) + return level + + async def initialize(self, length): + """Call to initialize the cache to a source of given length.""" + self.length = length + self.depth_higher = self.merkle.tree_depth(length) // 2 + self.level = self._level(await self.source_func(0, length)) + self.initialized.set() + + def truncate(self, length): + """Truncate the cache so it covers no more than length underlying + hashes.""" + if not isinstance(length, int): + raise TypeError('length must be an integer') + if length <= 0: + raise ValueError('length must be positive') + if length >= self.length: + return + length = self._leaf_start(length) + self.length = length + self.level[length >> self.depth_higher:] = [] + + async def branch_and_root(self, length, index): + """Return a merkle branch and root. Length is the number of + hashes used to calculate the merkle root, index is the position + of the hash to calculate the branch of. + + index must be less than length, which must be at least 1.""" + if not isinstance(length, int): + raise TypeError('length must be an integer') + if not isinstance(index, int): + raise TypeError('index must be an integer') + if length <= 0: + raise ValueError('length must be positive') + if index >= length: + raise ValueError('index must be less than length') + await self.initialized.wait() + await self._extend_to(length) + leaf_start = self._leaf_start(index) + count = min(self._segment_length(), length - leaf_start) + leaf_hashes = await self.source_func(leaf_start, count) + if length < self._segment_length(): + return self.merkle.branch_and_root(leaf_hashes, index) + level = await self._level_for(length) + return self.merkle.branch_and_root_from_level( + level, leaf_hashes, index, self.depth_higher) diff --git a/scribe/db/prefixes.py b/scribe/db/prefixes.py new file mode 100644 index 0000000..dc47013 --- /dev/null +++ b/scribe/db/prefixes.py @@ -0,0 +1,1670 @@ +import typing +import struct +import array +import base64 +import rocksdb +import rocksdb.interfaces +from typing import Union, Tuple, NamedTuple, Optional +from scribe.db.common import DB_PREFIXES +from scribe.db.interface import BasePrefixDB, ROW_TYPES, PrefixRow +from scribe.schema.url import normalize_name + +ACTIVATED_CLAIM_TXO_TYPE = 1 +ACTIVATED_SUPPORT_TXO_TYPE = 2 + + +def length_encoded_name(name: str) -> bytes: + encoded = name.encode('utf-8') + return len(encoded).to_bytes(2, byteorder='big') + encoded + + +def length_prefix(key: str) -> bytes: + return len(key).to_bytes(1, byteorder='big') + key.encode() + + +class UTXOKey(NamedTuple): + hashX: bytes + tx_num: int + nout: int + + def __str__(self): + return f"{self.__class__.__name__}(hashX={self.hashX.hex()}, tx_num={self.tx_num}, nout={self.nout})" + + +class UTXOValue(NamedTuple): + amount: int + + +class HashXUTXOKey(NamedTuple): + short_tx_hash: bytes + tx_num: int + nout: int + + def __str__(self): + return f"{self.__class__.__name__}(short_tx_hash={self.short_tx_hash.hex()}, tx_num={self.tx_num}, nout={self.nout})" + + +class HashXUTXOValue(NamedTuple): + hashX: bytes + + def __str__(self): + return f"{self.__class__.__name__}(hashX={self.hashX.hex()})" + + +class HashXHistoryKey(NamedTuple): + hashX: bytes + height: int + + def __str__(self): + return f"{self.__class__.__name__}(hashX={self.hashX.hex()}, height={self.height})" + + +class HashXHistoryValue(NamedTuple): + hashXes: typing.List[int] + + +class BlockHashKey(NamedTuple): + height: int + + +class BlockHashValue(NamedTuple): + block_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(block_hash={self.block_hash.hex()})" + + +class BlockTxsKey(NamedTuple): + height: int + + +class BlockTxsValue(NamedTuple): + tx_hashes: typing.List[bytes] + + +class TxCountKey(NamedTuple): + height: int + + +class TxCountValue(NamedTuple): + tx_count: int + + +class TxHashKey(NamedTuple): + tx_num: int + + +class TxHashValue(NamedTuple): + tx_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})" + + +class TxNumKey(NamedTuple): + tx_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})" + + +class TxNumValue(NamedTuple): + tx_num: int + + +class TxKey(NamedTuple): + tx_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})" + + +class TxValue(NamedTuple): + raw_tx: bytes + + def __str__(self): + return f"{self.__class__.__name__}(raw_tx={base64.b64encode(self.raw_tx).decode()})" + + +class BlockHeaderKey(NamedTuple): + height: int + + +class BlockHeaderValue(NamedTuple): + header: bytes + + def __str__(self): + return f"{self.__class__.__name__}(header={base64.b64encode(self.header)})" + + +class ClaimToTXOKey(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class ClaimToTXOValue(typing.NamedTuple): + tx_num: int + position: int + root_tx_num: int + root_position: int + amount: int + # activation: int + channel_signature_is_valid: bool + name: str + + @property + def normalized_name(self) -> str: + try: + return normalize_name(self.name) + except UnicodeDecodeError: + return self.name + + +class TXOToClaimKey(typing.NamedTuple): + tx_num: int + position: int + + +class TXOToClaimValue(typing.NamedTuple): + claim_hash: bytes + name: str + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, name={self.name})" + + +class ClaimShortIDKey(typing.NamedTuple): + normalized_name: str + partial_claim_id: str + root_tx_num: int + root_position: int + + def __str__(self): + return f"{self.__class__.__name__}(normalized_name={self.normalized_name}, " \ + f"partial_claim_id={self.partial_claim_id}, " \ + f"root_tx_num={self.root_tx_num}, root_position={self.root_position})" + + +class ClaimShortIDValue(typing.NamedTuple): + tx_num: int + position: int + + +class ClaimToChannelKey(typing.NamedTuple): + claim_hash: bytes + tx_num: int + position: int + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, " \ + f"tx_num={self.tx_num}, position={self.position})" + + +class ClaimToChannelValue(typing.NamedTuple): + signing_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(signing_hash={self.signing_hash.hex()})" + + +class ChannelToClaimKey(typing.NamedTuple): + signing_hash: bytes + name: str + tx_num: int + position: int + + def __str__(self): + return f"{self.__class__.__name__}(signing_hash={self.signing_hash.hex()}, name={self.name}, " \ + f"tx_num={self.tx_num}, position={self.position})" + + +class ChannelToClaimValue(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class ChannelCountKey(typing.NamedTuple): + channel_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(channel_hash={self.channel_hash.hex()})" + + +class ChannelCountValue(typing.NamedTuple): + count: int + + +class SupportAmountKey(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class SupportAmountValue(typing.NamedTuple): + amount: int + + +class ClaimToSupportKey(typing.NamedTuple): + claim_hash: bytes + tx_num: int + position: int + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, tx_num={self.tx_num}, " \ + f"position={self.position})" + + +class ClaimToSupportValue(typing.NamedTuple): + amount: int + + +class SupportToClaimKey(typing.NamedTuple): + tx_num: int + position: int + + +class SupportToClaimValue(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class ClaimExpirationKey(typing.NamedTuple): + expiration: int + tx_num: int + position: int + + +class ClaimExpirationValue(typing.NamedTuple): + claim_hash: bytes + normalized_name: str + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, normalized_name={self.normalized_name})" + + +class ClaimTakeoverKey(typing.NamedTuple): + normalized_name: str + + +class ClaimTakeoverValue(typing.NamedTuple): + claim_hash: bytes + height: int + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, height={self.height})" + + +class PendingActivationKey(typing.NamedTuple): + height: int + txo_type: int + tx_num: int + position: int + + @property + def is_support(self) -> bool: + return self.txo_type == ACTIVATED_SUPPORT_TXO_TYPE + + @property + def is_claim(self) -> bool: + return self.txo_type == ACTIVATED_CLAIM_TXO_TYPE + + +class PendingActivationValue(typing.NamedTuple): + claim_hash: bytes + normalized_name: str + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, normalized_name={self.normalized_name})" + + +class ActivationKey(typing.NamedTuple): + txo_type: int + tx_num: int + position: int + + +class ActivationValue(typing.NamedTuple): + height: int + claim_hash: bytes + normalized_name: str + + def __str__(self): + return f"{self.__class__.__name__}(height={self.height}, claim_hash={self.claim_hash.hex()}, " \ + f"normalized_name={self.normalized_name})" + + +class ActiveAmountKey(typing.NamedTuple): + claim_hash: bytes + txo_type: int + activation_height: int + tx_num: int + position: int + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, txo_type={self.txo_type}, " \ + f"activation_height={self.activation_height}, tx_num={self.tx_num}, position={self.position})" + + +class ActiveAmountValue(typing.NamedTuple): + amount: int + + +class EffectiveAmountKey(typing.NamedTuple): + normalized_name: str + effective_amount: int + tx_num: int + position: int + + +class EffectiveAmountValue(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class RepostKey(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class RepostValue(typing.NamedTuple): + reposted_claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(reposted_claim_hash={self.reposted_claim_hash.hex()})" + + +class RepostedKey(typing.NamedTuple): + reposted_claim_hash: bytes + tx_num: int + position: int + + def __str__(self): + return f"{self.__class__.__name__}(reposted_claim_hash={self.reposted_claim_hash.hex()}, " \ + f"tx_num={self.tx_num}, position={self.position})" + + +class RepostedValue(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class TouchedOrDeletedClaimKey(typing.NamedTuple): + height: int + + +class TouchedOrDeletedClaimValue(typing.NamedTuple): + touched_claims: typing.Set[bytes] + deleted_claims: typing.Set[bytes] + + def __str__(self): + return f"{self.__class__.__name__}(" \ + f"touched_claims={','.join(map(lambda x: x.hex(), self.touched_claims))}," \ + f"deleted_claims={','.join(map(lambda x: x.hex(), self.deleted_claims))})" + + +class DBState(typing.NamedTuple): + genesis: bytes + height: int + tx_count: int + tip: bytes + utxo_flush_count: int + wall_time: int + first_sync: bool + db_version: int + hist_flush_count: int + comp_flush_count: int + comp_cursor: int + es_sync_height: int + + +class ActiveAmountPrefixRow(PrefixRow): + prefix = DB_PREFIXES.active_amount.value + key_struct = struct.Struct(b'>20sBLLH') + value_struct = struct.Struct(b'>Q') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack, + struct.Struct(b'>20sB').pack, + struct.Struct(b'>20sBL').pack, + struct.Struct(b'>20sBLL').pack, + struct.Struct(b'>20sBLLH').pack + ] + cache_size = 1024 * 1024 * 128 + + @classmethod + def pack_key(cls, claim_hash: bytes, txo_type: int, activation_height: int, tx_num: int, position: int): + return super().pack_key(claim_hash, txo_type, activation_height, tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> ActiveAmountKey: + return ActiveAmountKey(*super().unpack_key(key)) + + @classmethod + def unpack_value(cls, data: bytes) -> ActiveAmountValue: + return ActiveAmountValue(*super().unpack_value(data)) + + @classmethod + def pack_value(cls, amount: int) -> bytes: + return cls.value_struct.pack(amount) + + @classmethod + def pack_item(cls, claim_hash: bytes, txo_type: int, activation_height: int, tx_num: int, position: int, amount: int): + return cls.pack_key(claim_hash, txo_type, activation_height, tx_num, position), cls.pack_value(amount) + + +class ClaimToTXOPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_to_txo.value + key_struct = struct.Struct(b'>20s') + value_struct = struct.Struct(b'>LHLHQB') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack + ] + cache_size = 1024 * 1024 * 128 + + @classmethod + def pack_key(cls, claim_hash: bytes): + return super().pack_key(claim_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimToTXOKey: + assert key[:1] == cls.prefix and len(key) == 21 + return ClaimToTXOKey(key[1:]) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimToTXOValue: + tx_num, position, root_tx_num, root_position, amount, channel_signature_is_valid = cls.value_struct.unpack( + data[:21] + ) + name_len = int.from_bytes(data[21:23], byteorder='big') + name = data[23:23 + name_len].decode() + return ClaimToTXOValue( + tx_num, position, root_tx_num, root_position, amount, bool(channel_signature_is_valid), name + ) + + @classmethod + def pack_value(cls, tx_num: int, position: int, root_tx_num: int, root_position: int, amount: int, + channel_signature_is_valid: bool, name: str) -> bytes: + return cls.value_struct.pack( + tx_num, position, root_tx_num, root_position, amount, int(channel_signature_is_valid) + ) + length_encoded_name(name) + + @classmethod + def pack_item(cls, claim_hash: bytes, tx_num: int, position: int, root_tx_num: int, root_position: int, + amount: int, channel_signature_is_valid: bool, name: str): + return cls.pack_key(claim_hash), \ + cls.pack_value(tx_num, position, root_tx_num, root_position, amount, channel_signature_is_valid, name) + + +class TXOToClaimPrefixRow(PrefixRow): + prefix = DB_PREFIXES.txo_to_claim.value + key_struct = struct.Struct(b'>LH') + value_struct = struct.Struct(b'>20s') + cache_size = 1024 * 1024 * 128 + + @classmethod + def pack_key(cls, tx_num: int, position: int): + return super().pack_key(tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> TXOToClaimKey: + return TXOToClaimKey(*super().unpack_key(key)) + + @classmethod + def unpack_value(cls, data: bytes) -> TXOToClaimValue: + claim_hash, = cls.value_struct.unpack(data[:20]) + name_len = int.from_bytes(data[20:22], byteorder='big') + name = data[22:22 + name_len].decode() + return TXOToClaimValue(claim_hash, name) + + @classmethod + def pack_value(cls, claim_hash: bytes, name: str) -> bytes: + return cls.value_struct.pack(claim_hash) + length_encoded_name(name) + + @classmethod + def pack_item(cls, tx_num: int, position: int, claim_hash: bytes, name: str): + return cls.pack_key(tx_num, position), \ + cls.pack_value(claim_hash, name) + + +def shortid_key_helper(struct_fmt): + packer = struct.Struct(struct_fmt).pack + def wrapper(name, *args): + return length_encoded_name(name) + packer(*args) + return wrapper + + +def shortid_key_partial_claim_helper(name: str, partial_claim_id: str): + assert len(partial_claim_id) < 40 + return length_encoded_name(name) + length_prefix(partial_claim_id) + + +class ClaimShortIDPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_short_id_prefix.value + key_struct = struct.Struct(b'>LH') + value_struct = struct.Struct(b'>LH') + key_part_lambdas = [ + lambda: b'', + length_encoded_name, + shortid_key_partial_claim_helper + ] + + @classmethod + def pack_key(cls, name: str, short_claim_id: str, root_tx_num: int, root_position: int): + return cls.prefix + length_encoded_name(name) + length_prefix(short_claim_id) +\ + cls.key_struct.pack(root_tx_num, root_position) + + @classmethod + def pack_value(cls, tx_num: int, position: int): + return super().pack_value(tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimShortIDKey: + assert key[:1] == cls.prefix + name_len = int.from_bytes(key[1:3], byteorder='big') + name = key[3:3 + name_len].decode() + claim_id_len = int.from_bytes(key[3+name_len:4+name_len], byteorder='big') + partial_claim_id = key[4+name_len:4+name_len+claim_id_len].decode() + return ClaimShortIDKey(name, partial_claim_id, *cls.key_struct.unpack(key[4 + name_len + claim_id_len:])) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimShortIDValue: + return ClaimShortIDValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, name: str, partial_claim_id: str, root_tx_num: int, root_position: int, + tx_num: int, position: int): + return cls.pack_key(name, partial_claim_id, root_tx_num, root_position), \ + cls.pack_value(tx_num, position) + + +class ClaimToChannelPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_to_channel.value + key_struct = struct.Struct(b'>20sLH') + value_struct = struct.Struct(b'>20s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack, + struct.Struct(b'>20sL').pack, + struct.Struct(b'>20sLH').pack + ] + + @classmethod + def pack_key(cls, claim_hash: bytes, tx_num: int, position: int): + return super().pack_key(claim_hash, tx_num, position) + + @classmethod + def pack_value(cls, signing_hash: bytes): + return super().pack_value(signing_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimToChannelKey: + return ClaimToChannelKey(*super().unpack_key(key)) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimToChannelValue: + return ClaimToChannelValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, claim_hash: bytes, tx_num: int, position: int, signing_hash: bytes): + return cls.pack_key(claim_hash, tx_num, position), cls.pack_value(signing_hash) + + +def channel_to_claim_helper(struct_fmt): + packer = struct.Struct(struct_fmt).pack + + def wrapper(signing_hash: bytes, name: str, *args): + return signing_hash + length_encoded_name(name) + packer(*args) + + return wrapper + + +class ChannelToClaimPrefixRow(PrefixRow): + prefix = DB_PREFIXES.channel_to_claim.value + key_struct = struct.Struct(b'>LH') + value_struct = struct.Struct(b'>20s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack, + channel_to_claim_helper(b''), + channel_to_claim_helper(b'>s'), + channel_to_claim_helper(b'>L'), + channel_to_claim_helper(b'>LH'), + ] + + @classmethod + def pack_key(cls, signing_hash: bytes, name: str, tx_num: int, position: int): + return cls.prefix + signing_hash + length_encoded_name(name) + cls.key_struct.pack( + tx_num, position + ) + + @classmethod + def unpack_key(cls, key: bytes) -> ChannelToClaimKey: + assert key[:1] == cls.prefix + signing_hash = key[1:21] + name_len = int.from_bytes(key[21:23], byteorder='big') + name = key[23:23 + name_len].decode() + tx_num, position = cls.key_struct.unpack(key[23 + name_len:]) + return ChannelToClaimKey( + signing_hash, name, tx_num, position + ) + + @classmethod + def pack_value(cls, claim_hash: bytes) -> bytes: + return super().pack_value(claim_hash) + + @classmethod + def unpack_value(cls, data: bytes) -> ChannelToClaimValue: + return ChannelToClaimValue(*cls.value_struct.unpack(data)) + + @classmethod + def pack_item(cls, signing_hash: bytes, name: str, tx_num: int, position: int, + claim_hash: bytes): + return cls.pack_key(signing_hash, name, tx_num, position), \ + cls.pack_value(claim_hash) + + +class ClaimToSupportPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_to_support.value + key_struct = struct.Struct(b'>20sLH') + value_struct = struct.Struct(b'>Q') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack, + struct.Struct(b'>20sL').pack, + struct.Struct(b'>20sLH').pack + ] + + @classmethod + def pack_key(cls, claim_hash: bytes, tx_num: int, position: int): + return super().pack_key(claim_hash, tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimToSupportKey: + return ClaimToSupportKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, amount: int) -> bytes: + return super().pack_value(amount) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimToSupportValue: + return ClaimToSupportValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, claim_hash: bytes, tx_num: int, position: int, amount: int): + return cls.pack_key(claim_hash, tx_num, position), \ + cls.pack_value(amount) + + +class SupportToClaimPrefixRow(PrefixRow): + prefix = DB_PREFIXES.support_to_claim.value + key_struct = struct.Struct(b'>LH') + value_struct = struct.Struct(b'>20s') + + @classmethod + def pack_key(cls, tx_num: int, position: int): + return super().pack_key(tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> SupportToClaimKey: + return SupportToClaimKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, claim_hash: bytes) -> bytes: + return super().pack_value(claim_hash) + + @classmethod + def unpack_value(cls, data: bytes) -> SupportToClaimValue: + return SupportToClaimValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, tx_num: int, position: int, claim_hash: bytes): + return cls.pack_key(tx_num, position), \ + cls.pack_value(claim_hash) + + +class ClaimExpirationPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_expiration.value + key_struct = struct.Struct(b'>LLH') + value_struct = struct.Struct(b'>20s') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack, + struct.Struct(b'>LL').pack, + struct.Struct(b'>LLH').pack, + ] + + @classmethod + def pack_key(cls, expiration: int, tx_num: int, position: int) -> bytes: + return super().pack_key(expiration, tx_num, position) + + @classmethod + def pack_value(cls, claim_hash: bytes, name: str) -> bytes: + return cls.value_struct.pack(claim_hash) + length_encoded_name(name) + + @classmethod + def pack_item(cls, expiration: int, tx_num: int, position: int, claim_hash: bytes, name: str) -> typing.Tuple[bytes, bytes]: + return cls.pack_key(expiration, tx_num, position), cls.pack_value(claim_hash, name) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimExpirationKey: + return ClaimExpirationKey(*super().unpack_key(key)) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimExpirationValue: + name_len = int.from_bytes(data[20:22], byteorder='big') + name = data[22:22 + name_len].decode() + claim_id, = cls.value_struct.unpack(data[:20]) + return ClaimExpirationValue(claim_id, name) + + @classmethod + def unpack_item(cls, key: bytes, value: bytes) -> typing.Tuple[ClaimExpirationKey, ClaimExpirationValue]: + return cls.unpack_key(key), cls.unpack_value(value) + + +class ClaimTakeoverPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_takeover.value + value_struct = struct.Struct(b'>20sL') + + key_part_lambdas = [ + lambda: b'', + length_encoded_name + ] + + @classmethod + def pack_key(cls, name: str): + return cls.prefix + length_encoded_name(name) + + @classmethod + def pack_value(cls, claim_hash: bytes, takeover_height: int): + return super().pack_value(claim_hash, takeover_height) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimTakeoverKey: + assert key[:1] == cls.prefix + name_len = int.from_bytes(key[1:3], byteorder='big') + name = key[3:3 + name_len].decode() + return ClaimTakeoverKey(name) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimTakeoverValue: + return ClaimTakeoverValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, name: str, claim_hash: bytes, takeover_height: int): + return cls.pack_key(name), cls.pack_value(claim_hash, takeover_height) + + +class PendingActivationPrefixRow(PrefixRow): + prefix = DB_PREFIXES.pending_activation.value + key_struct = struct.Struct(b'>LBLH') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack, + struct.Struct(b'>LB').pack, + struct.Struct(b'>LBL').pack, + struct.Struct(b'>LBLH').pack + ] + + @classmethod + def pack_key(cls, height: int, txo_type: int, tx_num: int, position: int): + return super().pack_key(height, txo_type, tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> PendingActivationKey: + return PendingActivationKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, claim_hash: bytes, name: str) -> bytes: + return claim_hash + length_encoded_name(name) + + @classmethod + def unpack_value(cls, data: bytes) -> PendingActivationValue: + claim_hash = data[:20] + name_len = int.from_bytes(data[20:22], byteorder='big') + name = data[22:22 + name_len].decode() + return PendingActivationValue(claim_hash, name) + + @classmethod + def pack_item(cls, height: int, txo_type: int, tx_num: int, position: int, claim_hash: bytes, name: str): + return cls.pack_key(height, txo_type, tx_num, position), \ + cls.pack_value(claim_hash, name) + + +class ActivatedPrefixRow(PrefixRow): + prefix = DB_PREFIXES.activated_claim_and_support.value + key_struct = struct.Struct(b'>BLH') + value_struct = struct.Struct(b'>L20s') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>B').pack, + struct.Struct(b'>BL').pack, + struct.Struct(b'>BLH').pack + ] + + @classmethod + def pack_key(cls, txo_type: int, tx_num: int, position: int): + return super().pack_key(txo_type, tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> ActivationKey: + return ActivationKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, height: int, claim_hash: bytes, name: str) -> bytes: + return cls.value_struct.pack(height, claim_hash) + length_encoded_name(name) + + @classmethod + def unpack_value(cls, data: bytes) -> ActivationValue: + height, claim_hash = cls.value_struct.unpack(data[:24]) + name_len = int.from_bytes(data[24:26], byteorder='big') + name = data[26:26 + name_len].decode() + return ActivationValue(height, claim_hash, name) + + @classmethod + def pack_item(cls, txo_type: int, tx_num: int, position: int, height: int, claim_hash: bytes, name: str): + return cls.pack_key(txo_type, tx_num, position), \ + cls.pack_value(height, claim_hash, name) + + +def effective_amount_helper(struct_fmt): + packer = struct.Struct(struct_fmt).pack + + def wrapper(name, *args): + if not args: + return length_encoded_name(name) + if len(args) == 1: + return length_encoded_name(name) + packer(0xffffffffffffffff - args[0]) + return length_encoded_name(name) + packer(0xffffffffffffffff - args[0], *args[1:]) + + return wrapper + + +class EffectiveAmountPrefixRow(PrefixRow): + prefix = DB_PREFIXES.effective_amount.value + key_struct = struct.Struct(b'>QLH') + value_struct = struct.Struct(b'>20s') + key_part_lambdas = [ + lambda: b'', + length_encoded_name, + shortid_key_helper(b'>Q'), + shortid_key_helper(b'>QL'), + shortid_key_helper(b'>QLH'), + ] + cache_size = 1024 * 1024 * 128 + + @classmethod + def pack_key(cls, name: str, effective_amount: int, tx_num: int, position: int): + return cls.prefix + length_encoded_name(name) + cls.key_struct.pack( + 0xffffffffffffffff - effective_amount, tx_num, position + ) + + @classmethod + def unpack_key(cls, key: bytes) -> EffectiveAmountKey: + assert key[:1] == cls.prefix + name_len = int.from_bytes(key[1:3], byteorder='big') + name = key[3:3 + name_len].decode() + ones_comp_effective_amount, tx_num, position = cls.key_struct.unpack(key[3 + name_len:]) + return EffectiveAmountKey(name, 0xffffffffffffffff - ones_comp_effective_amount, tx_num, position) + + @classmethod + def unpack_value(cls, data: bytes) -> EffectiveAmountValue: + return EffectiveAmountValue(*super().unpack_value(data)) + + @classmethod + def pack_value(cls, claim_hash: bytes) -> bytes: + return super().pack_value(claim_hash) + + @classmethod + def pack_item(cls, name: str, effective_amount: int, tx_num: int, position: int, claim_hash: bytes): + return cls.pack_key(name, effective_amount, tx_num, position), cls.pack_value(claim_hash) + + +class RepostPrefixRow(PrefixRow): + prefix = DB_PREFIXES.repost.value + key_struct = struct.Struct(b'>20s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack + ] + + @classmethod + def pack_key(cls, claim_hash: bytes): + return super().pack_key(claim_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> RepostKey: + return RepostKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, reposted_claim_hash: bytes) -> bytes: + return reposted_claim_hash + + @classmethod + def unpack_value(cls, data: bytes) -> RepostValue: + return RepostValue(data) + + @classmethod + def pack_item(cls, claim_hash: bytes, reposted_claim_hash: bytes): + return cls.pack_key(claim_hash), cls.pack_value(reposted_claim_hash) + + +class RepostedPrefixRow(PrefixRow): + prefix = DB_PREFIXES.reposted_claim.value + key_struct = struct.Struct(b'>20sLH') + value_struct = struct.Struct(b'>20s') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack, + struct.Struct(b'>20sL').pack, + struct.Struct(b'>20sLH').pack + ] + + @classmethod + def pack_key(cls, reposted_claim_hash: bytes, tx_num: int, position: int): + return super().pack_key(reposted_claim_hash, tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> RepostedKey: + return RepostedKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, claim_hash: bytes) -> bytes: + return super().pack_value(claim_hash) + + @classmethod + def unpack_value(cls, data: bytes) -> RepostedValue: + return RepostedValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, reposted_claim_hash: bytes, tx_num: int, position: int, claim_hash: bytes): + return cls.pack_key(reposted_claim_hash, tx_num, position), cls.pack_value(claim_hash) + + +class UndoKey(NamedTuple): + height: int + block_hash: bytes + + +class UndoPrefixRow(PrefixRow): + prefix = DB_PREFIXES.undo.value + key_struct = struct.Struct(b'>Q32s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>Q').pack, + struct.Struct(b'>Q32s').pack + ] + + @classmethod + def pack_key(cls, height: int, block_hash: bytes): + return super().pack_key(height, block_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> UndoKey: + return UndoKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, undo_ops: bytes) -> bytes: + return undo_ops + + @classmethod + def unpack_value(cls, data: bytes) -> bytes: + return data + + @classmethod + def pack_item(cls, height: int, block_hash: bytes, undo_ops: bytes): + return cls.pack_key(height, block_hash), cls.pack_value(undo_ops) + + +class BlockHashPrefixRow(PrefixRow): + prefix = DB_PREFIXES.block_hash.value + key_struct = struct.Struct(b'>L') + value_struct = struct.Struct(b'>32s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, height: int) -> bytes: + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> BlockHashKey: + return BlockHashKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, block_hash: bytes) -> bytes: + return super().pack_value(block_hash) + + @classmethod + def unpack_value(cls, data: bytes) -> BlockHashValue: + return BlockHashValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, height: int, block_hash: bytes): + return cls.pack_key(height), cls.pack_value(block_hash) + + +class BlockHeaderPrefixRow(PrefixRow): + prefix = DB_PREFIXES.header.value + key_struct = struct.Struct(b'>L') + value_struct = struct.Struct(b'>112s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, height: int) -> bytes: + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> BlockHeaderKey: + return BlockHeaderKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, header: bytes) -> bytes: + return super().pack_value(header) + + @classmethod + def unpack_value(cls, data: bytes) -> BlockHeaderValue: + return BlockHeaderValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, height: int, header: bytes): + return cls.pack_key(height), cls.pack_value(header) + + +class TXNumPrefixRow(PrefixRow): + prefix = DB_PREFIXES.tx_num.value + key_struct = struct.Struct(b'>32s') + value_struct = struct.Struct(b'>L') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>32s').pack + ] + + @classmethod + def pack_key(cls, tx_hash: bytes) -> bytes: + return super().pack_key(tx_hash) + + @classmethod + def unpack_key(cls, tx_hash: bytes) -> TxNumKey: + return TxNumKey(*super().unpack_key(tx_hash)) + + @classmethod + def pack_value(cls, tx_num: int) -> bytes: + return super().pack_value(tx_num) + + @classmethod + def unpack_value(cls, data: bytes) -> TxNumValue: + return TxNumValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, tx_hash: bytes, tx_num: int): + return cls.pack_key(tx_hash), cls.pack_value(tx_num) + + +class TxCountPrefixRow(PrefixRow): + prefix = DB_PREFIXES.tx_count.value + key_struct = struct.Struct(b'>L') + value_struct = struct.Struct(b'>L') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, height: int) -> bytes: + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> TxCountKey: + return TxCountKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, tx_count: int) -> bytes: + return super().pack_value(tx_count) + + @classmethod + def unpack_value(cls, data: bytes) -> TxCountValue: + return TxCountValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, height: int, tx_count: int): + return cls.pack_key(height), cls.pack_value(tx_count) + + +class TXHashPrefixRow(PrefixRow): + prefix = DB_PREFIXES.tx_hash.value + key_struct = struct.Struct(b'>L') + value_struct = struct.Struct(b'>32s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, tx_num: int) -> bytes: + return super().pack_key(tx_num) + + @classmethod + def unpack_key(cls, key: bytes) -> TxHashKey: + return TxHashKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, tx_hash: bytes) -> bytes: + return super().pack_value(tx_hash) + + @classmethod + def unpack_value(cls, data: bytes) -> TxHashValue: + return TxHashValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, tx_num: int, tx_hash: bytes): + return cls.pack_key(tx_num), cls.pack_value(tx_hash) + + +class TXPrefixRow(PrefixRow): + prefix = DB_PREFIXES.tx.value + key_struct = struct.Struct(b'>32s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>32s').pack + ] + + @classmethod + def pack_key(cls, tx_hash: bytes) -> bytes: + return super().pack_key(tx_hash) + + @classmethod + def unpack_key(cls, tx_hash: bytes) -> TxKey: + return TxKey(*super().unpack_key(tx_hash)) + + @classmethod + def pack_value(cls, tx: bytes) -> bytes: + return tx + + @classmethod + def unpack_value(cls, data: bytes) -> TxValue: + return TxValue(data) + + @classmethod + def pack_item(cls, tx_hash: bytes, raw_tx: bytes): + return cls.pack_key(tx_hash), cls.pack_value(raw_tx) + + +class UTXOPrefixRow(PrefixRow): + prefix = DB_PREFIXES.utxo.value + key_struct = struct.Struct(b'>11sLH') + value_struct = struct.Struct(b'>Q') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>11s').pack, + struct.Struct(b'>11sL').pack, + struct.Struct(b'>11sLH').pack + ] + + @classmethod + def pack_key(cls, hashX: bytes, tx_num, nout: int): + return super().pack_key(hashX, tx_num, nout) + + @classmethod + def unpack_key(cls, key: bytes) -> UTXOKey: + return UTXOKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, amount: int) -> bytes: + return super().pack_value(amount) + + @classmethod + def unpack_value(cls, data: bytes) -> UTXOValue: + return UTXOValue(*cls.value_struct.unpack(data)) + + @classmethod + def pack_item(cls, hashX: bytes, tx_num: int, nout: int, amount: int): + return cls.pack_key(hashX, tx_num, nout), cls.pack_value(amount) + + +class HashXUTXOPrefixRow(PrefixRow): + prefix = DB_PREFIXES.hashx_utxo.value + key_struct = struct.Struct(b'>4sLH') + value_struct = struct.Struct(b'>11s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>4s').pack, + struct.Struct(b'>4sL').pack, + struct.Struct(b'>4sLH').pack + ] + + @classmethod + def pack_key(cls, short_tx_hash: bytes, tx_num, nout: int): + return super().pack_key(short_tx_hash, tx_num, nout) + + @classmethod + def unpack_key(cls, key: bytes) -> HashXUTXOKey: + return HashXUTXOKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, hashX: bytes) -> bytes: + return super().pack_value(hashX) + + @classmethod + def unpack_value(cls, data: bytes) -> HashXUTXOValue: + return HashXUTXOValue(*cls.value_struct.unpack(data)) + + @classmethod + def pack_item(cls, short_tx_hash: bytes, tx_num: int, nout: int, hashX: bytes): + return cls.pack_key(short_tx_hash, tx_num, nout), cls.pack_value(hashX) + + +class HashXHistoryPrefixRow(PrefixRow): + prefix = DB_PREFIXES.hashx_history.value + key_struct = struct.Struct(b'>11sL') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>11s').pack, + struct.Struct(b'>11sL').pack + ] + + @classmethod + def pack_key(cls, hashX: bytes, height: int): + return super().pack_key(hashX, height) + + @classmethod + def unpack_key(cls, key: bytes) -> HashXHistoryKey: + return HashXHistoryKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, history: typing.List[int]) -> bytes: + a = array.array('I') + a.fromlist(history) + return a.tobytes() + + @classmethod + def unpack_value(cls, data: bytes) -> array.array: + a = array.array('I') + a.frombytes(data) + return a + + @classmethod + def pack_item(cls, hashX: bytes, height: int, history: typing.List[int]): + return cls.pack_key(hashX, height), cls.pack_value(history) + + +class TouchedOrDeletedPrefixRow(PrefixRow): + prefix = DB_PREFIXES.touched_or_deleted.value + key_struct = struct.Struct(b'>L') + value_struct = struct.Struct(b'>LL') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, height: int): + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> TouchedOrDeletedClaimKey: + return TouchedOrDeletedClaimKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, touched: typing.Set[bytes], deleted: typing.Set[bytes]) -> bytes: + assert True if not touched else all(len(item) == 20 for item in touched) + assert True if not deleted else all(len(item) == 20 for item in deleted) + return cls.value_struct.pack(len(touched), len(deleted)) + b''.join(sorted(touched)) + b''.join(sorted(deleted)) + + @classmethod + def unpack_value(cls, data: bytes) -> TouchedOrDeletedClaimValue: + touched_len, deleted_len = cls.value_struct.unpack(data[:8]) + data = data[8:] + assert len(data) == 20 * (touched_len + deleted_len) + touched_bytes, deleted_bytes = data[:touched_len*20], data[touched_len*20:] + return TouchedOrDeletedClaimValue( + {touched_bytes[20*i:20*(i+1)] for i in range(touched_len)}, + {deleted_bytes[20*i:20*(i+1)] for i in range(deleted_len)} + ) + + @classmethod + def pack_item(cls, height, touched, deleted): + return cls.pack_key(height), cls.pack_value(touched, deleted) + + +class ChannelCountPrefixRow(PrefixRow): + prefix = DB_PREFIXES.channel_count.value + key_struct = struct.Struct(b'>20s') + value_struct = struct.Struct(b'>L') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack + ] + + @classmethod + def pack_key(cls, channel_hash: bytes): + return super().pack_key(channel_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> ChannelCountKey: + return ChannelCountKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, count: int) -> bytes: + return super().pack_value(count) + + @classmethod + def unpack_value(cls, data: bytes) -> ChannelCountValue: + return ChannelCountValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, channel_hash, count): + return cls.pack_key(channel_hash), cls.pack_value(count) + + +class SupportAmountPrefixRow(PrefixRow): + prefix = DB_PREFIXES.support_amount.value + key_struct = struct.Struct(b'>20s') + value_struct = struct.Struct(b'>Q') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack + ] + + @classmethod + def pack_key(cls, claim_hash: bytes): + return super().pack_key(claim_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> SupportAmountKey: + return SupportAmountKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, amount: int) -> bytes: + return super().pack_value(amount) + + @classmethod + def unpack_value(cls, data: bytes) -> SupportAmountValue: + return SupportAmountValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, claim_hash, amount): + return cls.pack_key(claim_hash), cls.pack_value(amount) + + +class DBStatePrefixRow(PrefixRow): + prefix = DB_PREFIXES.db_state.value + value_struct = struct.Struct(b'>32sLL32sLLBBlllL') + key_struct = struct.Struct(b'') + + key_part_lambdas = [ + lambda: b'' + ] + + @classmethod + def pack_key(cls) -> bytes: + return cls.prefix + + @classmethod + def unpack_key(cls, key: bytes): + return + + @classmethod + def pack_value(cls, genesis: bytes, height: int, tx_count: int, tip: bytes, utxo_flush_count: int, wall_time: int, + first_sync: bool, db_version: int, hist_flush_count: int, comp_flush_count: int, + comp_cursor: int, es_sync_height: int) -> bytes: + return super().pack_value( + genesis, height, tx_count, tip, utxo_flush_count, + wall_time, 1 if first_sync else 0, db_version, hist_flush_count, + comp_flush_count, comp_cursor, es_sync_height + ) + + @classmethod + def unpack_value(cls, data: bytes) -> DBState: + if len(data) == 94: + # TODO: delete this after making a new snapshot - 10/20/21 + # migrate in the es_sync_height if it doesnt exist + data += data[32:36] + return DBState(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, genesis: bytes, height: int, tx_count: int, tip: bytes, utxo_flush_count: int, wall_time: int, + first_sync: bool, db_version: int, hist_flush_count: int, comp_flush_count: int, + comp_cursor: int, es_sync_height: int): + return cls.pack_key(), cls.pack_value( + genesis, height, tx_count, tip, utxo_flush_count, wall_time, first_sync, db_version, hist_flush_count, + comp_flush_count, comp_cursor, es_sync_height + ) + + +class BlockTxsPrefixRow(PrefixRow): + prefix = DB_PREFIXES.block_tx.value + key_struct = struct.Struct(b'>L') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, height: int): + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> BlockTxsKey: + return BlockTxsKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, tx_hashes: typing.List[bytes]) -> bytes: + assert all(len(tx_hash) == 32 for tx_hash in tx_hashes) + return b''.join(tx_hashes) + + @classmethod + def unpack_value(cls, data: bytes) -> BlockTxsValue: + return BlockTxsValue([data[i*32:(i+1)*32] for i in range(len(data) // 32)]) + + @classmethod + def pack_item(cls, height, tx_hashes): + return cls.pack_key(height), cls.pack_value(tx_hashes) + + +class MempoolTxKey(NamedTuple): + tx_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})" + + +class MempoolTxValue(NamedTuple): + raw_tx: bytes + + def __str__(self): + return f"{self.__class__.__name__}(raw_tx={base64.b64encode(self.raw_tx).decode()})" + + +class MempoolTXPrefixRow(PrefixRow): + prefix = DB_PREFIXES.mempool_tx.value + key_struct = struct.Struct(b'>32s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>32s').pack + ] + + @classmethod + def pack_key(cls, tx_hash: bytes) -> bytes: + return super().pack_key(tx_hash) + + @classmethod + def unpack_key(cls, tx_hash: bytes) -> MempoolTxKey: + return MempoolTxKey(*super().unpack_key(tx_hash)) + + @classmethod + def pack_value(cls, tx: bytes) -> bytes: + return tx + + @classmethod + def unpack_value(cls, data: bytes) -> MempoolTxValue: + return MempoolTxValue(data) + + @classmethod + def pack_item(cls, tx_hash: bytes, raw_tx: bytes): + return cls.pack_key(tx_hash), cls.pack_value(raw_tx) + + +class TrendingNotificationKey(typing.NamedTuple): + height: int + claim_hash: bytes + + +class TrendingNotificationValue(typing.NamedTuple): + previous_amount: int + new_amount: int + + +class TrendingNotificationPrefixRow(PrefixRow): + prefix = DB_PREFIXES.trending_notifications.value + key_struct = struct.Struct(b'>L20s') + value_struct = struct.Struct(b'>QQ') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack, + struct.Struct(b'>L20s').pack + ] + + @classmethod + def pack_key(cls, height: int, claim_hash: bytes): + return super().pack_key(height, claim_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> TrendingNotificationKey: + return TrendingNotificationKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, previous_amount: int, new_amount: int) -> bytes: + return super().pack_value(previous_amount, new_amount) + + @classmethod + def unpack_value(cls, data: bytes) -> TrendingNotificationValue: + return TrendingNotificationValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, height, claim_hash, previous_amount, new_amount): + return cls.pack_key(height, claim_hash), cls.pack_value(previous_amount, new_amount) + + +class TouchedHashXKey(NamedTuple): + height: int + + +class TouchedHashXValue(NamedTuple): + touched_hashXs: typing.List[bytes] + + def __str__(self): + return f"{self.__class__.__name__}(touched_hashXs=[{', '.join(map(lambda x: x.hex(), self.touched_hashXs))}])" + + +class TouchedHashXPrefixRow(PrefixRow): + prefix = DB_PREFIXES.touched_hashX.value + key_struct = struct.Struct(b'>L') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, height: int): + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> TouchedHashXKey: + return TouchedHashXKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, touched: typing.List[bytes]) -> bytes: + assert all(map(lambda item: len(item) == 11, touched)) + return b''.join(touched) + + @classmethod + def unpack_value(cls, data: bytes) -> TouchedHashXValue: + return TouchedHashXValue([data[idx*11:(idx*11)+11] for idx in range(len(data) // 11)]) + + @classmethod + def pack_item(cls, height: int, touched: typing.List[bytes]): + return cls.pack_key(height), cls.pack_value(touched) + + +class PrefixDB(BasePrefixDB): + def __init__(self, path: str, cache_mb: int = 128, reorg_limit: int = 200, max_open_files: int = 64, + secondary_path: str = '', unsafe_prefixes: Optional[typing.Set[bytes]] = None): + super().__init__(path, max_open_files=max_open_files, secondary_path=secondary_path, + max_undo_depth=reorg_limit, unsafe_prefixes=unsafe_prefixes) + db = self._db + self.claim_to_support = ClaimToSupportPrefixRow(db, self._op_stack) + self.support_to_claim = SupportToClaimPrefixRow(db, self._op_stack) + self.claim_to_txo = ClaimToTXOPrefixRow(db, self._op_stack) + self.txo_to_claim = TXOToClaimPrefixRow(db, self._op_stack) + self.claim_to_channel = ClaimToChannelPrefixRow(db, self._op_stack) + self.channel_to_claim = ChannelToClaimPrefixRow(db, self._op_stack) + self.claim_short_id = ClaimShortIDPrefixRow(db, self._op_stack) + self.claim_expiration = ClaimExpirationPrefixRow(db, self._op_stack) + self.claim_takeover = ClaimTakeoverPrefixRow(db, self._op_stack) + self.pending_activation = PendingActivationPrefixRow(db, self._op_stack) + self.activated = ActivatedPrefixRow(db, self._op_stack) + self.active_amount = ActiveAmountPrefixRow(db, self._op_stack) + self.effective_amount = EffectiveAmountPrefixRow(db, self._op_stack) + self.repost = RepostPrefixRow(db, self._op_stack) + self.reposted_claim = RepostedPrefixRow(db, self._op_stack) + self.undo = UndoPrefixRow(db, self._op_stack) + self.utxo = UTXOPrefixRow(db, self._op_stack) + self.hashX_utxo = HashXUTXOPrefixRow(db, self._op_stack) + self.hashX_history = HashXHistoryPrefixRow(db, self._op_stack) + self.block_hash = BlockHashPrefixRow(db, self._op_stack) + self.tx_count = TxCountPrefixRow(db, self._op_stack) + self.tx_hash = TXHashPrefixRow(db, self._op_stack) + self.tx_num = TXNumPrefixRow(db, self._op_stack) + self.tx = TXPrefixRow(db, self._op_stack) + self.header = BlockHeaderPrefixRow(db, self._op_stack) + self.touched_or_deleted = TouchedOrDeletedPrefixRow(db, self._op_stack) + self.channel_count = ChannelCountPrefixRow(db, self._op_stack) + self.db_state = DBStatePrefixRow(db, self._op_stack) + self.support_amount = SupportAmountPrefixRow(db, self._op_stack) + self.block_txs = BlockTxsPrefixRow(db, self._op_stack) + self.mempool_tx = MempoolTXPrefixRow(db, self._op_stack) + self.trending_notification = TrendingNotificationPrefixRow(db, self._op_stack) + self.touched_hashX = TouchedHashXPrefixRow(db, self._op_stack) + + +def auto_decode_item(key: bytes, value: bytes) -> Union[Tuple[NamedTuple, NamedTuple], Tuple[bytes, bytes]]: + try: + return ROW_TYPES[key[:1]].unpack_item(key, value) + except KeyError: + return key, value diff --git a/scribe/db/revertable.py b/scribe/db/revertable.py new file mode 100644 index 0000000..64e1d88 --- /dev/null +++ b/scribe/db/revertable.py @@ -0,0 +1,175 @@ +import struct +import logging +from string import printable +from collections import defaultdict +from typing import Tuple, Iterable, Callable, Optional +from scribe.db.common import DB_PREFIXES + +_OP_STRUCT = struct.Struct('>BLL') +log = logging.getLogger(__name__) + + +class RevertableOp: + __slots__ = [ + 'key', + 'value', + ] + is_put = 0 + + def __init__(self, key: bytes, value: bytes): + self.key = key + self.value = value + + @property + def is_delete(self) -> bool: + return not self.is_put + + def invert(self) -> 'RevertableOp': + raise NotImplementedError() + + def pack(self) -> bytes: + """ + Serialize to bytes + """ + return struct.pack( + f'>BLL{len(self.key)}s{len(self.value)}s', int(self.is_put), len(self.key), len(self.value), self.key, + self.value + ) + + @classmethod + def unpack(cls, packed: bytes) -> Tuple['RevertableOp', bytes]: + """ + Deserialize from bytes + + :param packed: bytes containing at least one packed revertable op + :return: tuple of the deserialized op (a put or a delete) and the remaining serialized bytes + """ + is_put, key_len, val_len = _OP_STRUCT.unpack(packed[:9]) + key = packed[9:9 + key_len] + value = packed[9 + key_len:9 + key_len + val_len] + if is_put == 1: + return RevertablePut(key, value), packed[9 + key_len + val_len:] + return RevertableDelete(key, value), packed[9 + key_len + val_len:] + + def __eq__(self, other: 'RevertableOp') -> bool: + return (self.is_put, self.key, self.value) == (other.is_put, other.key, other.value) + + def __repr__(self) -> str: + return str(self) + + def __str__(self) -> str: + from scribe.db.prefixes import auto_decode_item + k, v = auto_decode_item(self.key, self.value) + key = ''.join(c if c in printable else '.' for c in str(k)) + val = ''.join(c if c in printable else '.' for c in str(v)) + return f"{'PUT' if self.is_put else 'DELETE'} {DB_PREFIXES(self.key[:1]).name}: {key} | {val}" + + +class RevertableDelete(RevertableOp): + def invert(self): + return RevertablePut(self.key, self.value) + + +class RevertablePut(RevertableOp): + is_put = True + + def invert(self): + return RevertableDelete(self.key, self.value) + + +class OpStackIntegrity(Exception): + pass + + +class RevertableOpStack: + def __init__(self, get_fn: Callable[[bytes], Optional[bytes]], unsafe_prefixes=None): + """ + This represents a sequence of revertable puts and deletes to a key-value database that checks for integrity + violations when applying the puts and deletes. The integrity checks assure that keys that do not exist + are not deleted, and that when keys are deleted the current value is correctly known so that the delete + may be undone. When putting values, the integrity checks assure that existing values are not overwritten + without first being deleted. Updates are performed by applying a delete op for the old value and a put op + for the new value. + + :param get_fn: getter function from an object implementing `KeyValueStorage` + :param unsafe_prefixes: optional set of prefixes to ignore integrity errors for, violations are still logged + """ + self._get = get_fn + self._items = defaultdict(list) + self._unsafe_prefixes = unsafe_prefixes or set() + + def append_op(self, op: RevertableOp): + """ + Apply a put or delete op, checking that it introduces no integrity errors + """ + + inverted = op.invert() + if self._items[op.key] and inverted == self._items[op.key][-1]: + self._items[op.key].pop() # if the new op is the inverse of the last op, we can safely null both + return + elif self._items[op.key] and self._items[op.key][-1] == op: # duplicate of last op + return # raise an error? + stored_val = self._get(op.key) + has_stored_val = stored_val is not None + delete_stored_op = None if not has_stored_val else RevertableDelete(op.key, stored_val) + will_delete_existing_stored = False if delete_stored_op is None else (delete_stored_op in self._items[op.key]) + try: + if op.is_put and has_stored_val and not will_delete_existing_stored: + raise OpStackIntegrity( + f"db op tries to add on top of existing key without deleting first: {op}" + ) + elif op.is_delete and has_stored_val and stored_val != op.value and not will_delete_existing_stored: + # there is a value and we're not deleting it in this op + # check that a delete for the stored value is in the stack + raise OpStackIntegrity(f"db op tries to delete with incorrect existing value {op}") + elif op.is_delete and not has_stored_val: + raise OpStackIntegrity(f"db op tries to delete nonexistent key: {op}") + elif op.is_delete and stored_val != op.value: + raise OpStackIntegrity(f"db op tries to delete with incorrect value: {op}") + except OpStackIntegrity as err: + if op.key[:1] in self._unsafe_prefixes: + log.debug(f"skipping over integrity error: {err}") + else: + raise err + self._items[op.key].append(op) + + def extend_ops(self, ops: Iterable[RevertableOp]): + """ + Apply a sequence of put or delete ops, checking that they introduce no integrity errors + """ + for op in ops: + self.append_op(op) + + def clear(self): + self._items.clear() + + def __len__(self): + return sum(map(len, self._items.values())) + + def __iter__(self): + for key, ops in self._items.items(): + for op in ops: + yield op + + def __reversed__(self): + for key, ops in self._items.items(): + for op in reversed(ops): + yield op + + def get_undo_ops(self) -> bytes: + """ + Get the serialized bytes to undo all of the changes made by the pending ops + """ + return b''.join(op.invert().pack() for op in reversed(self)) + + def apply_packed_undo_ops(self, packed: bytes): + """ + Unpack and apply a sequence of undo ops from serialized undo bytes + """ + while packed: + op, packed = RevertableOp.unpack(packed) + self.append_op(op) + + def get_last_op_for_key(self, key: bytes) -> Optional[RevertableOp]: + if key in self._items and self._items[key]: + return self._items[key][-1] diff --git a/scribe/elasticsearch/__init__.py b/scribe/elasticsearch/__init__.py new file mode 100644 index 0000000..041e949 --- /dev/null +++ b/scribe/elasticsearch/__init__.py @@ -0,0 +1,2 @@ +from .search import SearchIndex +from .notifier_protocol import ElasticNotifierClientProtocol \ No newline at end of file diff --git a/scribe/elasticsearch/constants.py b/scribe/elasticsearch/constants.py new file mode 100644 index 0000000..afdfd6f --- /dev/null +++ b/scribe/elasticsearch/constants.py @@ -0,0 +1,100 @@ +INDEX_DEFAULT_SETTINGS = { + "settings": + {"analysis": + {"analyzer": { + "default": {"tokenizer": "whitespace", "filter": ["lowercase", "porter_stem"]}}}, + "index": + {"refresh_interval": -1, + "number_of_shards": 1, + "number_of_replicas": 0, + "sort": { + "field": ["trending_score", "release_time"], + "order": ["desc", "desc"] + }} + }, + "mappings": { + "properties": { + "claim_id": { + "fields": { + "keyword": { + "ignore_above": 256, + "type": "keyword" + } + }, + "type": "text", + "index_prefixes": { + "min_chars": 1, + "max_chars": 10 + } + }, + "sd_hash": { + "fields": { + "keyword": { + "ignore_above": 96, + "type": "keyword" + } + }, + "type": "text", + "index_prefixes": { + "min_chars": 1, + "max_chars": 4 + } + }, + "height": {"type": "integer"}, + "claim_type": {"type": "byte"}, + "censor_type": {"type": "byte"}, + "trending_score": {"type": "double"}, + "release_time": {"type": "long"} + } + } +} + +FIELDS = { + '_id', + 'claim_id', 'claim_type', 'claim_name', 'normalized_name', + 'tx_id', 'tx_nout', 'tx_position', + 'short_url', 'canonical_url', + 'is_controlling', 'last_take_over_height', + 'public_key_bytes', 'public_key_id', 'claims_in_channel', + 'channel_id', 'signature', 'signature_digest', 'is_signature_valid', + 'amount', 'effective_amount', 'support_amount', + 'fee_amount', 'fee_currency', + 'height', 'creation_height', 'activation_height', 'expiration_height', + 'stream_type', 'media_type', 'censor_type', + 'title', 'author', 'description', + 'timestamp', 'creation_timestamp', + 'duration', 'release_time', + 'tags', 'languages', 'has_source', 'reposted_claim_type', + 'reposted_claim_id', 'repost_count', 'sd_hash', + 'trending_score', 'tx_num' +} + +TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'description', 'claim_id', 'censoring_channel_id', + 'media_type', 'normalized_name', 'public_key_bytes', 'public_key_id', 'short_url', 'signature', + 'claim_name', 'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id', + 'tags', 'sd_hash'} + +RANGE_FIELDS = { + 'height', 'creation_height', 'activation_height', 'expiration_height', + 'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount', + 'tx_position', 'repost_count', 'limit_claims_per_channel', + 'amount', 'effective_amount', 'support_amount', + 'trending_score', 'censor_type', 'tx_num' +} + +ALL_FIELDS = RANGE_FIELDS | TEXT_FIELDS | FIELDS + +REPLACEMENTS = { + 'claim_name': 'normalized_name', + 'name': 'normalized_name', + 'txid': 'tx_id', + 'nout': 'tx_nout', + 'trending_group': 'trending_score', + 'trending_mixed': 'trending_score', + 'trending_global': 'trending_score', + 'trending_local': 'trending_score', + 'reposted': 'repost_count', + 'stream_types': 'stream_type', + 'media_types': 'media_type', + 'valid_channel_signature': 'is_signature_valid' +} diff --git a/scribe/elasticsearch/fast_ar_trending.py b/scribe/elasticsearch/fast_ar_trending.py new file mode 100644 index 0000000..2e5836b --- /dev/null +++ b/scribe/elasticsearch/fast_ar_trending.py @@ -0,0 +1,117 @@ +FAST_AR_TRENDING_SCRIPT = """ +double softenLBC(double lbc) { return (Math.pow(lbc, 1.0 / 3.0)); } + +double logsumexp(double x, double y) +{ + double top; + if(x > y) + top = x; + else + top = y; + double result = top + Math.log(Math.exp(x-top) + Math.exp(y-top)); + return(result); +} + +double logdiffexp(double big, double small) +{ + return big + Math.log(1.0 - Math.exp(small - big)); +} + +double squash(double x) +{ + if(x < 0.0) +return -Math.log(1.0 - x); + else +return Math.log(x + 1.0); +} + +double unsquash(double x) +{ + if(x < 0.0) + return 1.0 - Math.exp(-x); + else + return Math.exp(x) - 1.0; +} + +double log_to_squash(double x) +{ + return logsumexp(x, 0.0); +} + +double squash_to_log(double x) +{ + //assert x > 0.0; + return logdiffexp(x, 0.0); +} + +double squashed_add(double x, double y) +{ + // squash(unsquash(x) + unsquash(y)) but avoiding overflow. + // Cases where the signs are the same + if (x < 0.0 && y < 0.0) + return -logsumexp(-x, logdiffexp(-y, 0.0)); + if (x >= 0.0 && y >= 0.0) + return logsumexp(x, logdiffexp(y, 0.0)); + // Where the signs differ + if (x >= 0.0 && y < 0.0) + if (Math.abs(x) >= Math.abs(y)) + return logsumexp(0.0, logdiffexp(x, -y)); + else + return -logsumexp(0.0, logdiffexp(-y, x)); + if (x < 0.0 && y >= 0.0) + { + // Addition is commutative, hooray for new math + return squashed_add(y, x); + } + return 0.0; +} + +double squashed_multiply(double x, double y) +{ + // squash(unsquash(x)*unsquash(y)) but avoiding overflow. + int sign; + if(x*y >= 0.0) +sign = 1; + else +sign = -1; + return sign*logsumexp(squash_to_log(Math.abs(x)) + + squash_to_log(Math.abs(y)), 0.0); +} + +// Squashed inflated units +double inflateUnits(int height) { + double timescale = 576.0; // Half life of 400 = e-folding time of a day + // by coincidence, so may as well go with it + return log_to_squash(height / timescale); +} + +double spikePower(double newAmount) { + if (newAmount < 50.0) { + return(0.5); + } else if (newAmount < 85.0) { + return(newAmount / 100.0); + } else { + return(0.85); + } +} + +double spikeMass(double oldAmount, double newAmount) { + double softenedChange = softenLBC(Math.abs(newAmount - oldAmount)); + double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount)); + double power = spikePower(newAmount); + if (oldAmount > newAmount) { + -1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power) + } else { + Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power) + } +} + +for (i in params.src.changes) { + double units = inflateUnits(i.height); + if (ctx._source.trending_score == null) { + ctx._source.trending_score = 0.0; + } + double bigSpike = squashed_multiply(units, squash(spikeMass(i.prev_amount, i.new_amount))); + ctx._source.trending_score = squashed_add(ctx._source.trending_score, bigSpike); +} +""" diff --git a/scribe/elasticsearch/notifier_protocol.py b/scribe/elasticsearch/notifier_protocol.py new file mode 100644 index 0000000..900668c --- /dev/null +++ b/scribe/elasticsearch/notifier_protocol.py @@ -0,0 +1,55 @@ +import typing +import struct +import asyncio +import logging + + +log = logging.getLogger(__name__) + + +class ElasticNotifierProtocol(asyncio.Protocol): + """notifies the reader when ES has written updates""" + + def __init__(self, listeners): + self._listeners = listeners + self.transport: typing.Optional[asyncio.Transport] = None + + def connection_made(self, transport): + self.transport = transport + self._listeners.append(self) + log.info("got es notifier connection") + + def connection_lost(self, exc) -> None: + self._listeners.remove(self) + self.transport = None + + def send_height(self, height: int, block_hash: bytes): + log.info("notify es update '%s'", height) + self.transport.write(struct.pack(b'>Q32s', height, block_hash)) + + +class ElasticNotifierClientProtocol(asyncio.Protocol): + """notifies the reader when ES has written updates""" + + def __init__(self, notifications: asyncio.Queue): + self.notifications = notifications + self.transport: typing.Optional[asyncio.Transport] = None + + def close(self): + if self.transport and not self.transport.is_closing(): + self.transport.close() + + def connection_made(self, transport): + self.transport = transport + log.info("connected to es notifier") + + def connection_lost(self, exc) -> None: + self.transport = None + + def data_received(self, data: bytes) -> None: + try: + height, block_hash = struct.unpack(b'>Q32s', data) + except: + log.exception("failed to decode %s", (data or b'').hex()) + raise + self.notifications.put_nowait((height, block_hash)) diff --git a/scribe/elasticsearch/search.py b/scribe/elasticsearch/search.py new file mode 100644 index 0000000..5089464 --- /dev/null +++ b/scribe/elasticsearch/search.py @@ -0,0 +1,870 @@ +import logging +import time +import asyncio +import struct +from binascii import unhexlify +from collections import Counter, deque +from decimal import Decimal +from operator import itemgetter +from typing import Optional, List, Iterable + +from elasticsearch import AsyncElasticsearch, NotFoundError, ConnectionError +from elasticsearch.helpers import async_streaming_bulk +from scribe.schema.result import Outputs, Censor +from scribe.schema.tags import clean_tags +from scribe.schema.url import normalize_name +from scribe.error import TooManyClaimSearchParametersError +from scribe.common import LRUCache +from scribe.db.common import CLAIM_TYPES, STREAM_TYPES +from scribe.elasticsearch.constants import INDEX_DEFAULT_SETTINGS, REPLACEMENTS, FIELDS, TEXT_FIELDS, \ + RANGE_FIELDS, ALL_FIELDS +from scribe.db.common import ResolveResult + + +def expand_query(**kwargs): + if "amount_order" in kwargs: + kwargs["limit"] = 1 + kwargs["order_by"] = "effective_amount" + kwargs["offset"] = int(kwargs["amount_order"]) - 1 + if 'name' in kwargs: + kwargs['name'] = normalize_name(kwargs.pop('name')) + if kwargs.get('is_controlling') is False: + kwargs.pop('is_controlling') + query = {'must': [], 'must_not': []} + collapse = None + if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None: + kwargs['fee_currency'] = kwargs['fee_currency'].upper() + for key, value in kwargs.items(): + key = key.replace('claim.', '') + many = key.endswith('__in') or isinstance(value, list) + if many and len(value) > 2048: + raise TooManyClaimSearchParametersError(key, 2048) + if many: + key = key.replace('__in', '') + value = list(filter(None, value)) + if value is None or isinstance(value, list) and len(value) == 0: + continue + key = REPLACEMENTS.get(key, key) + if key in FIELDS: + partial_id = False + if key == 'claim_type': + if isinstance(value, str): + value = CLAIM_TYPES[value] + else: + value = [CLAIM_TYPES[claim_type] for claim_type in value] + elif key == 'stream_type': + value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value)) + if key == '_id': + if isinstance(value, Iterable): + value = [item[::-1].hex() for item in value] + else: + value = value[::-1].hex() + if not many and key in ('_id', 'claim_id') and len(value) < 20: + partial_id = True + if key in ('signature_valid', 'has_source'): + continue # handled later + if key in TEXT_FIELDS: + key += '.keyword' + ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'} + if partial_id: + query['must'].append({"prefix": {"claim_id": value}}) + elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops: + operator_length = 2 if value[:2] in ops else 1 + operator, value = value[:operator_length], value[operator_length:] + if key == 'fee_amount': + value = str(Decimal(value)*1000) + query['must'].append({"range": {key: {ops[operator]: value}}}) + elif many: + query['must'].append({"terms": {key: value}}) + else: + if key == 'fee_amount': + value = str(Decimal(value)*1000) + query['must'].append({"term": {key: {"value": value}}}) + elif key == 'not_channel_ids': + for channel_id in value: + query['must_not'].append({"term": {'channel_id.keyword': channel_id}}) + query['must_not'].append({"term": {'_id': channel_id}}) + elif key == 'channel_ids': + query['must'].append({"terms": {'channel_id.keyword': value}}) + elif key == 'claim_ids': + query['must'].append({"terms": {'claim_id.keyword': value}}) + elif key == 'media_types': + query['must'].append({"terms": {'media_type.keyword': value}}) + elif key == 'any_languages': + query['must'].append({"terms": {'languages': clean_tags(value)}}) + elif key == 'any_languages': + query['must'].append({"terms": {'languages': value}}) + elif key == 'all_languages': + query['must'].extend([{"term": {'languages': tag}} for tag in value]) + elif key == 'any_tags': + query['must'].append({"terms": {'tags.keyword': clean_tags(value)}}) + elif key == 'all_tags': + query['must'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)]) + elif key == 'not_tags': + query['must_not'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)]) + elif key == 'not_claim_id': + query['must_not'].extend([{"term": {'claim_id.keyword': cid}} for cid in value]) + elif key == 'limit_claims_per_channel': + collapse = ('channel_id.keyword', value) + if kwargs.get('has_channel_signature'): + query['must'].append({"exists": {"field": "signature"}}) + if 'signature_valid' in kwargs: + query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}}) + elif 'signature_valid' in kwargs: + query.setdefault('should', []) + query["minimum_should_match"] = 1 + query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}}) + query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}}) + if 'has_source' in kwargs: + query.setdefault('should', []) + query["minimum_should_match"] = 1 + is_stream_or_repost = {"terms": {"claim_type": [CLAIM_TYPES['stream'], CLAIM_TYPES['repost']]}} + query['should'].append( + {"bool": {"must": [{"match": {"has_source": kwargs['has_source']}}, is_stream_or_repost]}}) + query['should'].append({"bool": {"must_not": [is_stream_or_repost]}}) + query['should'].append({"bool": {"must": [{"term": {"reposted_claim_type": CLAIM_TYPES['channel']}}]}}) + if kwargs.get('text'): + query['must'].append( + {"simple_query_string": + {"query": kwargs["text"], "fields": [ + "claim_name^4", "channel_name^8", "title^1", "description^.5", "author^1", "tags^.5" + ]}}) + query = { + "_source": {"excludes": ["description", "title"]}, + 'query': {'bool': query}, + "sort": [], + } + if "limit" in kwargs: + query["size"] = kwargs["limit"] + if 'offset' in kwargs: + query["from"] = kwargs["offset"] + if 'order_by' in kwargs: + if isinstance(kwargs["order_by"], str): + kwargs["order_by"] = [kwargs["order_by"]] + for value in kwargs['order_by']: + if 'trending_group' in value: + # fixme: trending_mixed is 0 for all records on variable decay, making sort slow. + continue + is_asc = value.startswith('^') + value = value[1:] if is_asc else value + value = REPLACEMENTS.get(value, value) + if value in TEXT_FIELDS: + value += '.keyword' + query['sort'].append({value: "asc" if is_asc else "desc"}) + if collapse: + query["collapse"] = { + "field": collapse[0], + "inner_hits": { + "name": collapse[0], + "size": collapse[1], + "sort": query["sort"] + } + } + return query + + + +class ChannelResolution(str): + @classmethod + def lookup_error(cls, url): + return LookupError(f'Could not find channel in "{url}".') + + +class StreamResolution(str): + @classmethod + def lookup_error(cls, url): + return LookupError(f'Could not find claim at "{url}".') + + +class IndexVersionMismatch(Exception): + def __init__(self, got_version, expected_version): + self.got_version = got_version + self.expected_version = expected_version + + +class SearchIndex: + VERSION = 1 + + def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200): + self.search_timeout = search_timeout + self.sync_timeout = 600 # wont hit that 99% of the time, but can hit on a fresh import + self.search_client: Optional[AsyncElasticsearch] = None + self.sync_client: Optional[AsyncElasticsearch] = None + self.index = index_prefix + 'claims' + self.logger = logging.getLogger(__name__) + self.claim_cache = LRUCache(2 ** 15) + self.search_cache = LRUCache(2 ** 17) + self._elastic_host = elastic_host + self._elastic_port = elastic_port + + async def get_index_version(self) -> int: + try: + template = await self.sync_client.indices.get_template(self.index) + return template[self.index]['version'] + except NotFoundError: + return 0 + + async def set_index_version(self, version): + await self.sync_client.indices.put_template( + self.index, body={'version': version, 'index_patterns': ['ignored']}, ignore=400 + ) + + async def start(self) -> bool: + if self.sync_client: + return False + hosts = [{'host': self._elastic_host, 'port': self._elastic_port}] + self.sync_client = AsyncElasticsearch(hosts, timeout=self.sync_timeout) + self.search_client = AsyncElasticsearch(hosts, timeout=self.search_timeout) + while True: + try: + await self.sync_client.cluster.health(wait_for_status='yellow') + break + except ConnectionError: + self.logger.warning("Failed to connect to Elasticsearch. Waiting for it!") + await asyncio.sleep(1) + + res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400) + acked = res.get('acknowledged', False) + if acked: + await self.set_index_version(self.VERSION) + return acked + index_version = await self.get_index_version() + if index_version != self.VERSION: + self.logger.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION) + raise IndexVersionMismatch(index_version, self.VERSION) + await self.sync_client.indices.refresh(self.index) + return acked + + async def stop(self): + clients = [c for c in (self.sync_client, self.search_client) if c is not None] + self.sync_client, self.search_client = None, None + if clients: + await asyncio.gather(*(client.close() for client in clients)) + + def delete_index(self): + return self.sync_client.indices.delete(self.index, ignore_unavailable=True) + + async def _consume_claim_producer(self, claim_producer): + count = 0 + async for op, doc in claim_producer: + if op == 'delete': + yield { + '_index': self.index, + '_op_type': 'delete', + '_id': doc + } + else: + yield { + 'doc': {key: value for key, value in doc.items() if key in ALL_FIELDS}, + '_id': doc['claim_id'], + '_index': self.index, + '_op_type': 'update', + 'doc_as_upsert': True + } + count += 1 + if count % 100 == 0: + self.logger.info("Indexing in progress, %d claims.", count) + if count: + self.logger.info("Indexing done for %d claims.", count) + else: + self.logger.debug("Indexing done for %d claims.", count) + + async def claim_consumer(self, claim_producer): + touched = set() + async for ok, item in async_streaming_bulk(self.sync_client, self._consume_claim_producer(claim_producer), + raise_on_error=False): + if not ok: + self.logger.warning("indexing failed for an item: %s", item) + else: + item = item.popitem()[1] + touched.add(item['_id']) + await self.sync_client.indices.refresh(self.index) + self.logger.debug("Indexing done.") + + def update_filter_query(self, censor_type, blockdict, channels=False): + blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()} + if channels: + update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}") + else: + update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}") + key = 'channel_id' if channels else 'claim_id' + update['script'] = { + "source": f"ctx._source.censor_type={censor_type}; " + f"ctx._source.censoring_channel_id=params[ctx._source.{key}];", + "lang": "painless", + "params": blockdict + } + return update + + async def update_trending_score(self, params): + update_trending_score_script = """ + double softenLBC(double lbc) { return (Math.pow(lbc, 1.0 / 3.0)); } + + double logsumexp(double x, double y) + { + double top; + if(x > y) + top = x; + else + top = y; + double result = top + Math.log(Math.exp(x-top) + Math.exp(y-top)); + return(result); + } + + double logdiffexp(double big, double small) + { + return big + Math.log(1.0 - Math.exp(small - big)); + } + + double squash(double x) + { + if(x < 0.0) + return -Math.log(1.0 - x); + else + return Math.log(x + 1.0); + } + + double unsquash(double x) + { + if(x < 0.0) + return 1.0 - Math.exp(-x); + else + return Math.exp(x) - 1.0; + } + + double log_to_squash(double x) + { + return logsumexp(x, 0.0); + } + + double squash_to_log(double x) + { + //assert x > 0.0; + return logdiffexp(x, 0.0); + } + + double squashed_add(double x, double y) + { + // squash(unsquash(x) + unsquash(y)) but avoiding overflow. + // Cases where the signs are the same + if (x < 0.0 && y < 0.0) + return -logsumexp(-x, logdiffexp(-y, 0.0)); + if (x >= 0.0 && y >= 0.0) + return logsumexp(x, logdiffexp(y, 0.0)); + // Where the signs differ + if (x >= 0.0 && y < 0.0) + if (Math.abs(x) >= Math.abs(y)) + return logsumexp(0.0, logdiffexp(x, -y)); + else + return -logsumexp(0.0, logdiffexp(-y, x)); + if (x < 0.0 && y >= 0.0) + { + // Addition is commutative, hooray for new math + return squashed_add(y, x); + } + return 0.0; + } + + double squashed_multiply(double x, double y) + { + // squash(unsquash(x)*unsquash(y)) but avoiding overflow. + int sign; + if(x*y >= 0.0) + sign = 1; + else + sign = -1; + return sign*logsumexp(squash_to_log(Math.abs(x)) + + squash_to_log(Math.abs(y)), 0.0); + } + + // Squashed inflated units + double inflateUnits(int height) { + double timescale = 576.0; // Half life of 400 = e-folding time of a day + // by coincidence, so may as well go with it + return log_to_squash(height / timescale); + } + + double spikePower(double newAmount) { + if (newAmount < 50.0) { + return(0.5); + } else if (newAmount < 85.0) { + return(newAmount / 100.0); + } else { + return(0.85); + } + } + + double spikeMass(double oldAmount, double newAmount) { + double softenedChange = softenLBC(Math.abs(newAmount - oldAmount)); + double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount)); + double power = spikePower(newAmount); + if (oldAmount > newAmount) { + -1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power) + } else { + Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power) + } + } + for (i in params.src.changes) { + double units = inflateUnits(i.height); + if (ctx._source.trending_score == null) { + ctx._source.trending_score = 0.0; + } + double bigSpike = squashed_multiply(units, squash(spikeMass(i.prev_amount, i.new_amount))); + ctx._source.trending_score = squashed_add(ctx._source.trending_score, bigSpike); + } + """ + start = time.perf_counter() + + def producer(): + for claim_id, claim_updates in params.items(): + yield { + '_id': claim_id, + '_index': self.index, + '_op_type': 'update', + 'script': { + 'lang': 'painless', + 'source': update_trending_score_script, + 'params': {'src': { + 'changes': [ + { + 'height': p.height, + 'prev_amount': p.prev_amount / 1E8, + 'new_amount': p.new_amount / 1E8, + } for p in claim_updates + ] + }} + }, + } + if not params: + return + async for ok, item in async_streaming_bulk(self.sync_client, producer(), raise_on_error=False): + if not ok: + self.logger.warning("updating trending failed for an item: %s", item) + await self.sync_client.indices.refresh(self.index) + self.logger.info("updated trending scores in %ims", int((time.perf_counter() - start) * 1000)) + + async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels): + if filtered_streams: + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4) + await self.sync_client.indices.refresh(self.index) + if filtered_channels: + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4) + await self.sync_client.indices.refresh(self.index) + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4) + await self.sync_client.indices.refresh(self.index) + if blocked_streams: + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4) + await self.sync_client.indices.refresh(self.index) + if blocked_channels: + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4) + await self.sync_client.indices.refresh(self.index) + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4) + await self.sync_client.indices.refresh(self.index) + self.clear_caches() + + def clear_caches(self): + self.search_cache.clear() + self.claim_cache.clear() + + async def cached_search(self, kwargs): + total_referenced = [] + cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache) + if cache_item.result is not None: + return cache_item.result + async with cache_item.lock: + if cache_item.result: + return cache_item.result + censor = Censor(Censor.SEARCH) + if kwargs.get('no_totals'): + response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED) + else: + response, offset, total = await self.search(**kwargs) + censor.apply(response) + total_referenced.extend(response) + + if censor.censored: + response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED) + total_referenced.extend(response) + response = [ + ResolveResult( + name=r['claim_name'], + normalized_name=r['normalized_name'], + claim_hash=r['claim_hash'], + tx_num=r['tx_num'], + position=r['tx_nout'], + tx_hash=r['tx_hash'], + height=r['height'], + amount=r['amount'], + short_url=r['short_url'], + is_controlling=r['is_controlling'], + canonical_url=r['canonical_url'], + creation_height=r['creation_height'], + activation_height=r['activation_height'], + expiration_height=r['expiration_height'], + effective_amount=r['effective_amount'], + support_amount=r['support_amount'], + last_takeover_height=r['last_take_over_height'], + claims_in_channel=r['claims_in_channel'], + channel_hash=r['channel_hash'], + reposted_claim_hash=r['reposted_claim_hash'], + reposted=r['reposted'], + signature_valid=r['signature_valid'] + ) for r in response + ] + extra = [ + ResolveResult( + name=r['claim_name'], + normalized_name=r['normalized_name'], + claim_hash=r['claim_hash'], + tx_num=r['tx_num'], + position=r['tx_nout'], + tx_hash=r['tx_hash'], + height=r['height'], + amount=r['amount'], + short_url=r['short_url'], + is_controlling=r['is_controlling'], + canonical_url=r['canonical_url'], + creation_height=r['creation_height'], + activation_height=r['activation_height'], + expiration_height=r['expiration_height'], + effective_amount=r['effective_amount'], + support_amount=r['support_amount'], + last_takeover_height=r['last_take_over_height'], + claims_in_channel=r['claims_in_channel'], + channel_hash=r['channel_hash'], + reposted_claim_hash=r['reposted_claim_hash'], + reposted=r['reposted'], + signature_valid=r['signature_valid'] + ) for r in await self._get_referenced_rows(total_referenced) + ] + result = Outputs.to_base64( + response, extra, offset, total, censor + ) + cache_item.result = result + return result + + async def get_many(self, *claim_ids): + await self.populate_claim_cache(*claim_ids) + return filter(None, map(self.claim_cache.get, claim_ids)) + + async def populate_claim_cache(self, *claim_ids): + missing = [claim_id for claim_id in claim_ids if self.claim_cache.get(claim_id) is None] + if missing: + results = await self.search_client.mget( + index=self.index, body={"ids": missing} + ) + for result in expand_result(filter(lambda doc: doc['found'], results["docs"])): + self.claim_cache.set(result['claim_id'], result) + + + async def search(self, **kwargs): + try: + return await self.search_ahead(**kwargs) + except NotFoundError: + return [], 0, 0 + # return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0) + + async def search_ahead(self, **kwargs): + # 'limit_claims_per_channel' case. Fetch 1000 results, reorder, slice, inflate and return + per_channel_per_page = kwargs.pop('limit_claims_per_channel', 0) or 0 + remove_duplicates = kwargs.pop('remove_duplicates', False) + page_size = kwargs.pop('limit', 10) + offset = kwargs.pop('offset', 0) + kwargs['limit'] = 1000 + cache_item = ResultCacheItem.from_cache(f"ahead{per_channel_per_page}{kwargs}", self.search_cache) + if cache_item.result is not None: + reordered_hits = cache_item.result + else: + async with cache_item.lock: + if cache_item.result: + reordered_hits = cache_item.result + else: + query = expand_query(**kwargs) + search_hits = deque((await self.search_client.search( + query, index=self.index, track_total_hits=False, + _source_includes=['_id', 'channel_id', 'reposted_claim_id', 'creation_height'] + ))['hits']['hits']) + if remove_duplicates: + search_hits = self.__remove_duplicates(search_hits) + if per_channel_per_page > 0: + reordered_hits = self.__search_ahead(search_hits, page_size, per_channel_per_page) + else: + reordered_hits = [(hit['_id'], hit['_source']['channel_id']) for hit in search_hits] + cache_item.result = reordered_hits + result = list(await self.get_many(*(claim_id for claim_id, _ in reordered_hits[offset:(offset + page_size)]))) + return result, 0, len(reordered_hits) + + def __remove_duplicates(self, search_hits: deque) -> deque: + known_ids = {} # claim_id -> (creation_height, hit_id), where hit_id is either reposted claim id or original + dropped = set() + for hit in search_hits: + hit_height, hit_id = hit['_source']['creation_height'], hit['_source']['reposted_claim_id'] or hit['_id'] + if hit_id not in known_ids: + known_ids[hit_id] = (hit_height, hit['_id']) + else: + previous_height, previous_id = known_ids[hit_id] + if hit_height < previous_height: + known_ids[hit_id] = (hit_height, hit['_id']) + dropped.add(previous_id) + else: + dropped.add(hit['_id']) + return deque(hit for hit in search_hits if hit['_id'] not in dropped) + + def __search_ahead(self, search_hits: list, page_size: int, per_channel_per_page: int): + reordered_hits = [] + channel_counters = Counter() + next_page_hits_maybe_check_later = deque() + while search_hits or next_page_hits_maybe_check_later: + if reordered_hits and len(reordered_hits) % page_size == 0: + channel_counters.clear() + elif not reordered_hits: + pass + else: + break # means last page was incomplete and we are left with bad replacements + for _ in range(len(next_page_hits_maybe_check_later)): + claim_id, channel_id = next_page_hits_maybe_check_later.popleft() + if per_channel_per_page > 0 and channel_counters[channel_id] < per_channel_per_page: + reordered_hits.append((claim_id, channel_id)) + channel_counters[channel_id] += 1 + else: + next_page_hits_maybe_check_later.append((claim_id, channel_id)) + while search_hits: + hit = search_hits.popleft() + hit_id, hit_channel_id = hit['_id'], hit['_source']['channel_id'] + if hit_channel_id is None or per_channel_per_page <= 0: + reordered_hits.append((hit_id, hit_channel_id)) + elif channel_counters[hit_channel_id] < per_channel_per_page: + reordered_hits.append((hit_id, hit_channel_id)) + channel_counters[hit_channel_id] += 1 + if len(reordered_hits) % page_size == 0: + break + else: + next_page_hits_maybe_check_later.append((hit_id, hit_channel_id)) + return reordered_hits + + async def _get_referenced_rows(self, txo_rows: List[dict]): + txo_rows = [row for row in txo_rows if isinstance(row, dict)] + referenced_ids = set(filter(None, map(itemgetter('reposted_claim_id'), txo_rows))) + referenced_ids |= set(filter(None, (row['channel_id'] for row in txo_rows))) + referenced_ids |= set(filter(None, (row['censoring_channel_id'] for row in txo_rows))) + + referenced_txos = [] + if referenced_ids: + referenced_txos.extend(await self.get_many(*referenced_ids)) + referenced_ids = set(filter(None, (row['channel_id'] for row in referenced_txos))) + + if referenced_ids: + referenced_txos.extend(await self.get_many(*referenced_ids)) + + return referenced_txos + + +def expand_query(**kwargs): + if "amount_order" in kwargs: + kwargs["limit"] = 1 + kwargs["order_by"] = "effective_amount" + kwargs["offset"] = int(kwargs["amount_order"]) - 1 + if 'name' in kwargs: + kwargs['name'] = normalize_name(kwargs.pop('name')) + if kwargs.get('is_controlling') is False: + kwargs.pop('is_controlling') + query = {'must': [], 'must_not': []} + collapse = None + if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None: + kwargs['fee_currency'] = kwargs['fee_currency'].upper() + for key, value in kwargs.items(): + key = key.replace('claim.', '') + many = key.endswith('__in') or isinstance(value, list) + if many and len(value) > 2048: + raise TooManyClaimSearchParametersError(key, 2048) + if many: + key = key.replace('__in', '') + value = list(filter(None, value)) + if value is None or isinstance(value, list) and len(value) == 0: + continue + key = REPLACEMENTS.get(key, key) + if key in FIELDS: + partial_id = False + if key == 'claim_type': + if isinstance(value, str): + value = CLAIM_TYPES[value] + else: + value = [CLAIM_TYPES[claim_type] for claim_type in value] + elif key == 'stream_type': + value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value)) + if key == '_id': + if isinstance(value, Iterable): + value = [item[::-1].hex() for item in value] + else: + value = value[::-1].hex() + if not many and key in ('_id', 'claim_id', 'sd_hash') and len(value) < 20: + partial_id = True + if key in ('signature_valid', 'has_source'): + continue # handled later + if key in TEXT_FIELDS: + key += '.keyword' + ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'} + if partial_id: + query['must'].append({"prefix": {key: value}}) + elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops: + operator_length = 2 if value[:2] in ops else 1 + operator, value = value[:operator_length], value[operator_length:] + if key == 'fee_amount': + value = str(Decimal(value)*1000) + query['must'].append({"range": {key: {ops[operator]: value}}}) + elif key in RANGE_FIELDS and isinstance(value, list) and all(v[0] in ops for v in value): + range_constraints = [] + for v in value: + operator_length = 2 if v[:2] in ops else 1 + operator, stripped_op_v = v[:operator_length], v[operator_length:] + if key == 'fee_amount': + stripped_op_v = str(Decimal(stripped_op_v)*1000) + range_constraints.append((operator, stripped_op_v)) + query['must'].append({"range": {key: {ops[operator]: v for operator, v in range_constraints}}}) + elif many: + query['must'].append({"terms": {key: value}}) + else: + if key == 'fee_amount': + value = str(Decimal(value)*1000) + query['must'].append({"term": {key: {"value": value}}}) + elif key == 'not_channel_ids': + for channel_id in value: + query['must_not'].append({"term": {'channel_id.keyword': channel_id}}) + query['must_not'].append({"term": {'_id': channel_id}}) + elif key == 'channel_ids': + query['must'].append({"terms": {'channel_id.keyword': value}}) + elif key == 'claim_ids': + query['must'].append({"terms": {'claim_id.keyword': value}}) + elif key == 'media_types': + query['must'].append({"terms": {'media_type.keyword': value}}) + elif key == 'any_languages': + query['must'].append({"terms": {'languages': clean_tags(value)}}) + elif key == 'any_languages': + query['must'].append({"terms": {'languages': value}}) + elif key == 'all_languages': + query['must'].extend([{"term": {'languages': tag}} for tag in value]) + elif key == 'any_tags': + query['must'].append({"terms": {'tags.keyword': clean_tags(value)}}) + elif key == 'all_tags': + query['must'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)]) + elif key == 'not_tags': + query['must_not'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)]) + elif key == 'not_claim_id': + query['must_not'].extend([{"term": {'claim_id.keyword': cid}} for cid in value]) + elif key == 'limit_claims_per_channel': + collapse = ('channel_id.keyword', value) + if kwargs.get('has_channel_signature'): + query['must'].append({"exists": {"field": "signature"}}) + if 'signature_valid' in kwargs: + query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}}) + elif 'signature_valid' in kwargs: + query.setdefault('should', []) + query["minimum_should_match"] = 1 + query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}}) + query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}}) + if 'has_source' in kwargs: + query.setdefault('should', []) + query["minimum_should_match"] = 1 + is_stream_or_repost = {"terms": {"claim_type": [CLAIM_TYPES['stream'], CLAIM_TYPES['repost']]}} + query['should'].append( + {"bool": {"must": [{"match": {"has_source": kwargs['has_source']}}, is_stream_or_repost]}}) + query['should'].append({"bool": {"must_not": [is_stream_or_repost]}}) + query['should'].append({"bool": {"must": [{"term": {"reposted_claim_type": CLAIM_TYPES['channel']}}]}}) + if kwargs.get('text'): + query['must'].append( + {"simple_query_string": + {"query": kwargs["text"], "fields": [ + "claim_name^4", "channel_name^8", "title^1", "description^.5", "author^1", "tags^.5" + ]}}) + query = { + "_source": {"excludes": ["description", "title"]}, + 'query': {'bool': query}, + "sort": [], + } + if "limit" in kwargs: + query["size"] = kwargs["limit"] + if 'offset' in kwargs: + query["from"] = kwargs["offset"] + if 'order_by' in kwargs: + if isinstance(kwargs["order_by"], str): + kwargs["order_by"] = [kwargs["order_by"]] + for value in kwargs['order_by']: + if 'trending_group' in value: + # fixme: trending_mixed is 0 for all records on variable decay, making sort slow. + continue + is_asc = value.startswith('^') + value = value[1:] if is_asc else value + value = REPLACEMENTS.get(value, value) + if value in TEXT_FIELDS: + value += '.keyword' + query['sort'].append({value: "asc" if is_asc else "desc"}) + if collapse: + query["collapse"] = { + "field": collapse[0], + "inner_hits": { + "name": collapse[0], + "size": collapse[1], + "sort": query["sort"] + } + } + return query + + +def expand_result(results): + inner_hits = [] + expanded = [] + for result in results: + if result.get("inner_hits"): + for _, inner_hit in result["inner_hits"].items(): + inner_hits.extend(inner_hit["hits"]["hits"]) + continue + result = result['_source'] + result['claim_hash'] = unhexlify(result['claim_id'])[::-1] + if result['reposted_claim_id']: + result['reposted_claim_hash'] = unhexlify(result['reposted_claim_id'])[::-1] + else: + result['reposted_claim_hash'] = None + result['channel_hash'] = unhexlify(result['channel_id'])[::-1] if result['channel_id'] else None + result['txo_hash'] = unhexlify(result['tx_id'])[::-1] + struct.pack(' str: + return self._result + + @result.setter + def result(self, result: str): + self._result = result + if result is not None: + self.has_result.set() + + @classmethod + def from_cache(cls, cache_key, cache): + cache_item = cache.get(cache_key) + if cache_item is None: + cache_item = cache[cache_key] = ResultCacheItem() + return cache_item diff --git a/scribe/env.py b/scribe/env.py new file mode 100644 index 0000000..d1c6097 --- /dev/null +++ b/scribe/env.py @@ -0,0 +1,393 @@ +import os +import re +import resource +import logging +from collections import namedtuple +from ipaddress import ip_address +from scribe.blockchain.network import LBCMainNet, LBCTestNet, LBCRegTest + + +NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix') + + +SEGMENT_REGEX = re.compile("(?!-)[A-Z_\\d-]{1,63}(? 255: + return False + # strip exactly one dot from the right, if present + if hostname and hostname[-1] == ".": + hostname = hostname[:-1] + return all(SEGMENT_REGEX.match(x) for x in hostname.split(".")) + + +class Env: + + # Peer discovery + PD_OFF, PD_SELF, PD_ON = range(3) + + class Error(Exception): + pass + + def __init__(self, db_dir=None, daemon_url=None, host=None, rpc_host=None, elastic_host=None, + elastic_port=None, loop_policy=None, max_query_workers=None, websocket_host=None, websocket_port=None, + chain=None, es_index_prefix=None, cache_MB=None, reorg_limit=None, tcp_port=None, + udp_port=None, ssl_port=None, ssl_certfile=None, ssl_keyfile=None, rpc_port=None, + prometheus_port=None, max_subscriptions=None, banner_file=None, anon_logs=None, log_sessions=None, + allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None, + payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None, + session_timeout=None, drop_client=None, description=None, daily_fee=None, + database_query_timeout=None, db_max_open_files=64, elastic_notifier_port=None, + blocking_channel_ids=None, filtering_channel_ids=None, peer_hubs=None, peer_announce=None): + self.logger = logging.getLogger(__name__) + self.db_dir = db_dir if db_dir is not None else self.required('DB_DIRECTORY') + self.daemon_url = daemon_url if daemon_url is not None else self.required('DAEMON_URL') + self.db_max_open_files = db_max_open_files + + self.host = host if host is not None else self.default('HOST', 'localhost') + self.rpc_host = rpc_host if rpc_host is not None else self.default('RPC_HOST', 'localhost') + self.elastic_host = elastic_host if elastic_host is not None else self.default('ELASTIC_HOST', 'localhost') + self.elastic_port = elastic_port if elastic_port is not None else self.integer('ELASTIC_PORT', 9200) + self.elastic_notifier_port = elastic_notifier_port if elastic_notifier_port is not None else self.integer('ELASTIC_NOTIFIER_PORT', 19080) + + self.loop_policy = self.set_event_loop_policy( + loop_policy if loop_policy is not None else self.default('EVENT_LOOP_POLICY', None) + ) + self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK']) + self.max_query_workers = max_query_workers if max_query_workers is not None else self.integer('MAX_QUERY_WORKERS', 4) + self.websocket_host = websocket_host if websocket_host is not None else self.default('WEBSOCKET_HOST', self.host) + self.websocket_port = websocket_port if websocket_port is not None else self.integer('WEBSOCKET_PORT', None) + if chain == 'mainnet': + self.coin = LBCMainNet + elif chain == 'testnet': + self.coin = LBCTestNet + else: + self.coin = LBCRegTest + self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default('ES_INDEX_PREFIX', '') + self.cache_MB = cache_MB if cache_MB is not None else self.integer('CACHE_MB', 1024) + self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer('REORG_LIMIT', self.coin.REORG_LIMIT) + # Server stuff + self.tcp_port = tcp_port if tcp_port is not None else self.integer('TCP_PORT', None) + self.udp_port = udp_port if udp_port is not None else self.integer('UDP_PORT', self.tcp_port) + self.ssl_port = ssl_port if ssl_port is not None else self.integer('SSL_PORT', None) + if self.ssl_port: + self.ssl_certfile = ssl_certfile if ssl_certfile is not None else self.required('SSL_CERTFILE') + self.ssl_keyfile = ssl_keyfile if ssl_keyfile is not None else self.required('SSL_KEYFILE') + self.rpc_port = rpc_port if rpc_port is not None else self.integer('RPC_PORT', 8000) + self.prometheus_port = prometheus_port if prometheus_port is not None else self.integer('PROMETHEUS_PORT', 0) + self.max_subscriptions = max_subscriptions if max_subscriptions is not None else self.integer('MAX_SUBSCRIPTIONS', 10000) + self.banner_file = banner_file if banner_file is not None else self.default('BANNER_FILE', None) + # self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file) + self.anon_logs = anon_logs if anon_logs is not None else self.boolean('ANON_LOGS', False) + self.log_sessions = log_sessions if log_sessions is not None else self.integer('LOG_SESSIONS', 3600) + self.allow_lan_udp = allow_lan_udp if allow_lan_udp is not None else self.boolean('ALLOW_LAN_UDP', False) + self.cache_all_tx_hashes = cache_all_tx_hashes if cache_all_tx_hashes is not None else self.boolean('CACHE_ALL_TX_HASHES', False) + self.cache_all_claim_txos = cache_all_claim_txos if cache_all_claim_txos is not None else self.boolean('CACHE_ALL_CLAIM_TXOS', False) + self.country = country if country is not None else self.default('COUNTRY', 'US') + # Peer discovery + self.peer_discovery = self.peer_discovery_enum() + self.peer_announce = peer_announce if peer_announce is not None else self.boolean('PEER_ANNOUNCE', True) + if peer_hubs is not None: + self.peer_hubs = [p.strip("") for p in peer_hubs.split(",")] + else: + self.peer_hubs = self.extract_peer_hubs() + # self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost') + # self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None) + # The electrum client takes the empty string as unspecified + self.payment_address = payment_address if payment_address is not None else self.default('PAYMENT_ADDRESS', '') + self.donation_address = donation_address if donation_address is not None else self.default('DONATION_ADDRESS', '') + # Server limits to help prevent DoS + self.max_send = max_send if max_send is not None else self.integer('MAX_SEND', 1000000) + self.max_receive = max_receive if max_receive is not None else self.integer('MAX_RECEIVE', 1000000) + # self.max_subs = self.integer('MAX_SUBS', 250000) + self.max_sessions = max_sessions if max_sessions is not None else self.sane_max_sessions() + # self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000) + self.session_timeout = session_timeout if session_timeout is not None else self.integer('SESSION_TIMEOUT', 600) + self.drop_client = drop_client if drop_client is not None else self.custom("DROP_CLIENT", None, re.compile) + self.description = description if description is not None else self.default('DESCRIPTION', '') + self.daily_fee = daily_fee if daily_fee is not None else self.string_amount('DAILY_FEE', '0') + + # Identities + clearnet_identity = self.clearnet_identity() + tor_identity = self.tor_identity(clearnet_identity) + self.identities = [identity + for identity in (clearnet_identity, tor_identity) + if identity is not None] + self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \ + (float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0) + + # Filtering / Blocking + self.blocking_channel_ids = blocking_channel_ids if blocking_channel_ids is not None else self.default('BLOCKING_CHANNEL_IDS', '').split(' ') + self.filtering_channel_ids = filtering_channel_ids if filtering_channel_ids is not None else self.default('FILTERING_CHANNEL_IDS', '').split(' ') + + @classmethod + def default(cls, envvar, default): + return os.environ.get(envvar, default) + + @classmethod + def boolean(cls, envvar, default): + default = 'Yes' if default else '' + return bool(cls.default(envvar, default).strip()) + + @classmethod + def required(cls, envvar): + value = os.environ.get(envvar) + if value is None: + raise cls.Error(f'required envvar {envvar} not set') + return value + + @classmethod + def string_amount(cls, envvar, default): + value = os.environ.get(envvar, default) + amount_pattern = re.compile("[0-9]{0,10}(\.[0-9]{1,8})?") + if len(value) > 0 and not amount_pattern.fullmatch(value): + raise cls.Error(f'{value} is not a valid amount for {envvar}') + return value + + @classmethod + def integer(cls, envvar, default): + value = os.environ.get(envvar) + if value is None: + return default + try: + return int(value) + except Exception: + raise cls.Error(f'cannot convert envvar {envvar} value {value} to an integer') + + @classmethod + def custom(cls, envvar, default, parse): + value = os.environ.get(envvar) + if value is None: + return default + try: + return parse(value) + except Exception as e: + raise cls.Error(f'cannot parse envvar {envvar} value {value}') from e + + @classmethod + def obsolete(cls, envvars): + bad = [envvar for envvar in envvars if os.environ.get(envvar)] + if bad: + raise cls.Error(f'remove obsolete os.environment variables {bad}') + + @classmethod + def set_event_loop_policy(cls, policy_name: str = None): + if not policy_name or policy_name == 'default': + import asyncio + return asyncio.get_event_loop_policy() + elif policy_name == 'uvloop': + import uvloop + import asyncio + loop_policy = uvloop.EventLoopPolicy() + asyncio.set_event_loop_policy(loop_policy) + return loop_policy + raise cls.Error(f'unknown event loop policy "{policy_name}"') + + def cs_host(self, *, for_rpc): + """Returns the 'host' argument to pass to asyncio's create_server + call. The result can be a single host name string, a list of + host name strings, or an empty string to bind to all interfaces. + + If rpc is True the host to use for the RPC server is returned. + Otherwise the host to use for SSL/TCP servers is returned. + """ + host = self.rpc_host if for_rpc else self.host + result = [part.strip() for part in host.split(',')] + if len(result) == 1: + result = result[0] + # An empty result indicates all interfaces, which we do not + # permitted for an RPC server. + if for_rpc and not result: + result = 'localhost' + if result == 'localhost': + # 'localhost' resolves to ::1 (ipv6) on many systems, which fails on default setup of + # docker, using 127.0.0.1 instead forces ipv4 + result = '127.0.0.1' + return result + + def sane_max_sessions(self): + """Return the maximum number of sessions to permit. Normally this + is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust + downwards if running with a small open file rlimit.""" + env_value = self.integer('MAX_SESSIONS', 1000) + nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + # We give the DB 250 files; allow ElectrumX 100 for itself + value = max(0, min(env_value, nofile_limit - 350)) + if value < env_value: + self.logger.warning(f'lowered maximum sessions from {env_value:,d} to {value:,d} ' + f'because your open file limit is {nofile_limit:,d}') + return value + + def clearnet_identity(self): + host = self.default('REPORT_HOST', None) + if host is None: + return None + try: + ip = ip_address(host) + except ValueError: + bad = (not is_valid_hostname(host) + or host.lower() == 'localhost') + else: + bad = (ip.is_multicast or ip.is_unspecified + or (ip.is_private and self.peer_announce)) + if bad: + raise self.Error(f'"{host}" is not a valid REPORT_HOST') + tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port) or None + ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port) or None + if tcp_port == ssl_port: + raise self.Error('REPORT_TCP_PORT and REPORT_SSL_PORT ' + f'both resolve to {tcp_port}') + return NetIdentity( + host, + tcp_port, + ssl_port, + '' + ) + + def tor_identity(self, clearnet): + host = self.default('REPORT_HOST_TOR', None) + if host is None: + return None + if not host.endswith('.onion'): + raise self.Error(f'tor host "{host}" must end with ".onion"') + + def port(port_kind): + """Returns the clearnet identity port, if any and not zero, + otherwise the listening port.""" + result = 0 + if clearnet: + result = getattr(clearnet, port_kind) + return result or getattr(self, port_kind) + + tcp_port = self.integer('REPORT_TCP_PORT_TOR', + port('tcp_port')) or None + ssl_port = self.integer('REPORT_SSL_PORT_TOR', + port('ssl_port')) or None + if tcp_port == ssl_port: + raise self.Error('REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR ' + f'both resolve to {tcp_port}') + + return NetIdentity( + host, + tcp_port, + ssl_port, + '_tor', + ) + + def hosts_dict(self): + return {identity.host: {'tcp_port': identity.tcp_port, + 'ssl_port': identity.ssl_port} + for identity in self.identities} + + def peer_discovery_enum(self): + pd = self.default('PEER_DISCOVERY', 'on').strip().lower() + if pd in ('off', ''): + return self.PD_OFF + elif pd == 'self': + return self.PD_SELF + else: + return self.PD_ON + + def extract_peer_hubs(self): + return [hub.strip() for hub in self.default('PEER_HUBS', '').split(',')] + + @classmethod + def contribute_to_arg_parser(cls, parser): + parser.add_argument('--db_dir', type=str, help='path of the directory containing lbry-leveldb', + default=cls.default('DB_DIRECTORY', None)) + parser.add_argument('--daemon_url', + help='URL for rpc from lbrycrd, :@', + default=cls.default('DAEMON_URL', None)) + parser.add_argument('--db_max_open_files', type=int, default=64, + help='number of files rocksdb can have open at a time') + parser.add_argument('--host', type=str, default=cls.default('HOST', 'localhost'), + help='Interface for hub server to listen on') + parser.add_argument('--tcp_port', type=int, default=cls.integer('TCP_PORT', 50001), + help='TCP port to listen on for hub server') + parser.add_argument('--udp_port', type=int, default=cls.integer('UDP_PORT', 50001), + help='UDP port to listen on for hub server') + parser.add_argument('--rpc_host', default=cls.default('RPC_HOST', 'localhost'), type=str, + help='Listening interface for admin rpc') + parser.add_argument('--rpc_port', default=cls.integer('RPC_PORT', 8000), type=int, + help='Listening port for admin rpc') + parser.add_argument('--websocket_host', default=cls.default('WEBSOCKET_HOST', 'localhost'), type=str, + help='Listening interface for websocket') + parser.add_argument('--websocket_port', default=cls.integer('WEBSOCKET_PORT', None), type=int, + help='Listening port for websocket') + + parser.add_argument('--ssl_port', default=cls.integer('SSL_PORT', None), type=int, + help='SSL port to listen on for hub server') + parser.add_argument('--ssl_certfile', default=cls.default('SSL_CERTFILE', None), type=str, + help='Path to SSL cert file') + parser.add_argument('--ssl_keyfile', default=cls.default('SSL_KEYFILE', None), type=str, + help='Path to SSL key file') + parser.add_argument('--reorg_limit', default=cls.integer('REORG_LIMIT', 200), type=int, help='Max reorg depth') + parser.add_argument('--elastic_host', default=cls.default('ELASTIC_HOST', 'localhost'), type=str, + help='elasticsearch host') + parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int, + help='elasticsearch port') + parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str) + parser.add_argument('--loop_policy', default=cls.default('EVENT_LOOP_POLICY', 'default'), type=str, + choices=['default', 'uvloop']) + parser.add_argument('--max_query_workers', type=int, default=cls.integer('MAX_QUERY_WORKERS', 4), + help='number of threads used by the request handler to read the database') + parser.add_argument('--cache_MB', type=int, default=cls.integer('CACHE_MB', 1024), + help='size of the leveldb lru cache, in megabytes') + parser.add_argument('--cache_all_tx_hashes', type=bool, + help='Load all tx hashes into memory. This will make address subscriptions and sync, ' + 'resolve, transaction fetching, and block sync all faster at the expense of higher ' + 'memory usage') + parser.add_argument('--cache_all_claim_txos', type=bool, + help='Load all claim txos into memory. This will make address subscriptions and sync, ' + 'resolve, transaction fetching, and block sync all faster at the expense of higher ' + 'memory usage') + parser.add_argument('--prometheus_port', type=int, default=cls.integer('PROMETHEUS_PORT', 0), + help='port for hub prometheus metrics to listen on, disabled by default') + parser.add_argument('--max_subscriptions', type=int, default=cls.integer('MAX_SUBSCRIPTIONS', 10000), + help='max subscriptions per connection') + parser.add_argument('--banner_file', type=str, default=cls.default('BANNER_FILE', None), + help='path to file containing banner text') + parser.add_argument('--anon_logs', type=bool, default=cls.boolean('ANON_LOGS', False), + help="don't log ip addresses") + parser.add_argument('--allow_lan_udp', type=bool, default=cls.boolean('ALLOW_LAN_UDP', False), + help='reply to hub UDP ping messages from LAN ip addresses') + parser.add_argument('--country', type=str, default=cls.default('COUNTRY', 'US'), help='') + parser.add_argument('--max_send', type=int, default=cls.default('MAX_SEND', 1000000), help='') + parser.add_argument('--max_receive', type=int, default=cls.default('MAX_RECEIVE', 1000000), help='') + parser.add_argument('--max_sessions', type=int, default=cls.default('MAX_SESSIONS', 1000), help='') + parser.add_argument('--session_timeout', type=int, default=cls.default('SESSION_TIMEOUT', 600), help='') + parser.add_argument('--drop_client', type=str, default=cls.default('DROP_CLIENT', None), help='') + parser.add_argument('--description', type=str, default=cls.default('DESCRIPTION', ''), help='') + parser.add_argument('--daily_fee', type=float, default=cls.default('DAILY_FEE', 0.0), help='') + parser.add_argument('--payment_address', type=str, default=cls.default('PAYMENT_ADDRESS', ''), help='') + parser.add_argument('--donation_address', type=str, default=cls.default('DONATION_ADDRESS', ''), help='') + parser.add_argument('--chain', type=str, default=cls.default('NET', 'mainnet'), + help="Which chain to use, default is mainnet", choices=['mainnet', 'regtest', 'testnet']) + parser.add_argument('--query_timeout_ms', type=int, default=cls.integer('QUERY_TIMEOUT_MS', 10000), + help="elasticsearch query timeout") + + parser.add_argument('--blocking_channel_ids', nargs='*', help='', + default=cls.default('BLOCKING_CHANNEL_IDS', '').split(' ')) + parser.add_argument('--filtering_channel_ids', nargs='*', help='', + default=cls.default('FILTERING_CHANNEL_IDS', '').split(' ')) + + @classmethod + def from_arg_parser(cls, args): + return cls( + db_dir=args.db_dir, daemon_url=args.daemon_url, db_max_open_files=args.db_max_open_files, + host=args.host, rpc_host=args.rpc_host, elastic_host=args.elastic_host, elastic_port=args.elastic_port, + loop_policy=args.loop_policy, max_query_workers=args.max_query_workers, websocket_host=args.websocket_host, + websocket_port=args.websocket_port, chain=args.chain, es_index_prefix=args.es_index_prefix, + cache_MB=args.cache_MB, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port, + udp_port=args.udp_port, ssl_port=args.ssl_port, ssl_certfile=args.ssl_certfile, + ssl_keyfile=args.ssl_keyfile, rpc_port=args.rpc_port, prometheus_port=args.prometheus_port, + max_subscriptions=args.max_subscriptions, banner_file=args.banner_file, anon_logs=args.anon_logs, + log_sessions=None, allow_lan_udp=args.allow_lan_udp, + cache_all_tx_hashes=args.cache_all_tx_hashes, cache_all_claim_txos=args.cache_all_claim_txos, + country=args.country, payment_address=args.payment_address, donation_address=args.donation_address, + max_send=args.max_send, max_receive=args.max_receive, max_sessions=args.max_sessions, + session_timeout=args.session_timeout, drop_client=args.drop_client, description=args.description, + daily_fee=args.daily_fee, database_query_timeout=(args.query_timeout_ms / 1000), + blocking_channel_ids=args.blocking_channel_ids, filtering_channel_ids=args.filtering_channel_ids + ) diff --git a/scribe/error/Makefile b/scribe/error/Makefile new file mode 100644 index 0000000..d064ef2 --- /dev/null +++ b/scribe/error/Makefile @@ -0,0 +1,5 @@ +generate: + python generate.py generate > __init__.py + +analyze: + python generate.py analyze diff --git a/scribe/error/README.md b/scribe/error/README.md new file mode 100644 index 0000000..cc5ab7a --- /dev/null +++ b/scribe/error/README.md @@ -0,0 +1,95 @@ +# Exceptions + +Exceptions in LBRY are defined and generated from the Markdown table at the end of this README. + +## Guidelines + +When possible, use [built-in Python exceptions](https://docs.python.org/3/library/exceptions.html) or `aiohttp` [general client](https://docs.aiohttp.org/en/latest/client_reference.html#client-exceptions) / [HTTP](https://docs.aiohttp.org/en/latest/web_exceptions.html) exceptions, unless: +1. You want to provide a better error message (extend the closest built-in/`aiohttp` exception in this case). +2. You need to represent a new situation. + +When defining your own exceptions, consider: +1. Extending a built-in Python or `aiohttp` exception. +2. Using contextual variables in the error message. + +## Table Column Definitions + +Column | Meaning +---|--- +Code | Codes are used only to define the hierarchy of exceptions and do not end up in the generated output, it is okay to re-number things as necessary at anytime to achieve the desired hierarchy. +Name | Becomes the class name of the exception with "Error" appended to the end. Changing names of existing exceptions makes the API backwards incompatible. When extending other exceptions you must specify the full class name, manually adding "Error" as necessary (if extending another SDK exception). +Message | User friendly error message explaining the exceptional event. Supports Python formatted strings: any variables used in the string will be generated as arguments in the `__init__` method. Use `--` to provide a doc string after the error message to be added to the class definition. + +## Exceptions Table + +Code | Name | Message +---:|---|--- +**1xx** | UserInput | User input errors. +**10x** | Command | Errors preparing to execute commands. +101 | CommandDoesNotExist | Command '{command}' does not exist. +102 | CommandDeprecated | Command '{command}' is deprecated. +103 | CommandInvalidArgument | Invalid argument '{argument}' to command '{command}'. +104 | CommandTemporarilyUnavailable | Command '{command}' is temporarily unavailable. -- Such as waiting for required components to start. +105 | CommandPermanentlyUnavailable | Command '{command}' is permanently unavailable. -- such as when required component was intentionally configured not to start. +**11x** | InputValue(ValueError) | Invalid argument value provided to command. +111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid. +112 | InputValueIsNone | None or null is not valid value for argument '{argument}'. +113 | ConflictingInputValue | Only '{first_argument}' or '{second_argument}' is allowed, not both. +114 | InputStringIsBlank | {argument} cannot be blank. +115 | EmptyPublishedFile | Cannot publish empty file: {file_path} +116 | MissingPublishedFile | File does not exist: {file_path} +117 | InvalidStreamURL | Invalid LBRY stream URL: '{url}' -- When an URL cannot be downloaded, such as '@Channel/' or a collection +**2xx** | Configuration | Configuration errors. +201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues. +202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args. +203 | ConfigParse | Failed to parse the configuration file '{path}'. -- Includes the syntax error / line number to help user fix it. +204 | ConfigMissing | Configuration file '{path}' is missing setting that has no default / fallback. +205 | ConfigInvalid | Configuration file '{path}' has setting with invalid value. +**3xx** | Network | **Networking** +301 | NoInternet | No internet connection. +302 | NoUPnPSupport | Router does not support UPnP. +**4xx** | Wallet | **Wallet Errors** +401 | TransactionRejected | Transaction rejected, unknown reason. +402 | TransactionFeeTooLow | Fee too low. +403 | TransactionInvalidSignature | Invalid signature. +404 | InsufficientFunds | Not enough funds to cover this transaction. -- determined by wallet prior to attempting to broadcast a tx; this is different for example from a TX being created and sent but then rejected by lbrycrd for unspendable utxos. +405 | ChannelKeyNotFound | Channel signing key not found. +406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key. +407 | DataDownload | Failed to download blob. *generic* +408 | PrivateKeyNotFound | Couldn't find private key for {key} '{value}'. +410 | Resolve | Failed to resolve '{url}'. +411 | ResolveTimeout | Failed to resolve '{url}' within the timeout. +411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{censor_id}'. +420 | KeyFeeAboveMaxAllowed | {message} +421 | InvalidPassword | Password is invalid. +422 | IncompatibleWalletServer | '{server}:{port}' has an incompatibly old version. +423 | TooManyClaimSearchParameters | {key} cant have more than {limit} items. +424 | AlreadyPurchased | You already have a purchase for claim_id '{claim_id_hex}'. Use --allow-duplicate-purchase flag to override. +431 | ServerPaymentInvalidAddress | Invalid address from wallet server: '{address}' - skipping payment round. +432 | ServerPaymentWalletLocked | Cannot spend funds with locked wallet, skipping payment round. +433 | ServerPaymentFeeAboveMaxAllowed | Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC. +434 | WalletNotLoaded | Wallet {wallet_id} is not loaded. +435 | WalletAlreadyLoaded | Wallet {wallet_path} is already loaded. +436 | WalletNotFound | Wallet not found at {wallet_path}. +437 | WalletAlreadyExists | Wallet {wallet_path} already exists, use `wallet_add` to load it. +**5xx** | Blob | **Blobs** +500 | BlobNotFound | Blob not found. +501 | BlobPermissionDenied | Permission denied to read blob. +502 | BlobTooBig | Blob is too big. +503 | BlobEmpty | Blob is empty. +510 | BlobFailedDecryption | Failed to decrypt blob. +511 | CorruptBlob | Blobs is corrupted. +520 | BlobFailedEncryption | Failed to encrypt blob. +531 | DownloadCancelled | Download was canceled. +532 | DownloadSDTimeout | Failed to download sd blob {download} within timeout. +533 | DownloadDataTimeout | Failed to download data blobs for sd hash {download} within timeout. +534 | InvalidStreamDescriptor | {message} +535 | InvalidData | {message} +536 | InvalidBlobHash | {message} +**6xx** | Component | **Components** +601 | ComponentStartConditionNotMet | Unresolved dependencies for: {components} +602 | ComponentsNotStarted | {message} +**7xx** | CurrencyExchange | **Currency Exchange** +701 | InvalidExchangeRateResponse | Failed to get exchange rate from {source}: {reason} +702 | CurrencyConversion | {message} +703 | InvalidCurrency | Invalid currency: {currency} is not a supported currency. diff --git a/scribe/error/__init__.py b/scribe/error/__init__.py new file mode 100644 index 0000000..7e18f5b --- /dev/null +++ b/scribe/error/__init__.py @@ -0,0 +1,494 @@ +from .base import BaseError, claim_id + + +class UserInputError(BaseError): + """ + User input errors. + """ + + +class CommandError(UserInputError): + """ + Errors preparing to execute commands. + """ + + +class CommandDoesNotExistError(CommandError): + + def __init__(self, command): + self.command = command + super().__init__(f"Command '{command}' does not exist.") + + +class CommandDeprecatedError(CommandError): + + def __init__(self, command): + self.command = command + super().__init__(f"Command '{command}' is deprecated.") + + +class CommandInvalidArgumentError(CommandError): + + def __init__(self, argument, command): + self.argument = argument + self.command = command + super().__init__(f"Invalid argument '{argument}' to command '{command}'.") + + +class CommandTemporarilyUnavailableError(CommandError): + """ + Such as waiting for required components to start. + """ + + def __init__(self, command): + self.command = command + super().__init__(f"Command '{command}' is temporarily unavailable.") + + +class CommandPermanentlyUnavailableError(CommandError): + """ + such as when required component was intentionally configured not to start. + """ + + def __init__(self, command): + self.command = command + super().__init__(f"Command '{command}' is permanently unavailable.") + + +class InputValueError(UserInputError, ValueError): + """ + Invalid argument value provided to command. + """ + + +class GenericInputValueError(InputValueError): + + def __init__(self, value, argument): + self.value = value + self.argument = argument + super().__init__(f"The value '{value}' for argument '{argument}' is not valid.") + + +class InputValueIsNoneError(InputValueError): + + def __init__(self, argument): + self.argument = argument + super().__init__(f"None or null is not valid value for argument '{argument}'.") + + +class ConflictingInputValueError(InputValueError): + + def __init__(self, first_argument, second_argument): + self.first_argument = first_argument + self.second_argument = second_argument + super().__init__(f"Only '{first_argument}' or '{second_argument}' is allowed, not both.") + + +class InputStringIsBlankError(InputValueError): + + def __init__(self, argument): + self.argument = argument + super().__init__(f"{argument} cannot be blank.") + + +class EmptyPublishedFileError(InputValueError): + + def __init__(self, file_path): + self.file_path = file_path + super().__init__(f"Cannot publish empty file: {file_path}") + + +class MissingPublishedFileError(InputValueError): + + def __init__(self, file_path): + self.file_path = file_path + super().__init__(f"File does not exist: {file_path}") + + +class InvalidStreamURLError(InputValueError): + """ + When an URL cannot be downloaded, such as '@Channel/' or a collection + """ + + def __init__(self, url): + self.url = url + super().__init__(f"Invalid LBRY stream URL: '{url}'") + + +class ConfigurationError(BaseError): + """ + Configuration errors. + """ + + +class ConfigWriteError(ConfigurationError): + """ + When writing the default config fails on startup, such as due to permission issues. + """ + + def __init__(self, path): + self.path = path + super().__init__(f"Cannot write configuration file '{path}'.") + + +class ConfigReadError(ConfigurationError): + """ + Can't open the config file user provided via command line args. + """ + + def __init__(self, path): + self.path = path + super().__init__(f"Cannot find provided configuration file '{path}'.") + + +class ConfigParseError(ConfigurationError): + """ + Includes the syntax error / line number to help user fix it. + """ + + def __init__(self, path): + self.path = path + super().__init__(f"Failed to parse the configuration file '{path}'.") + + +class ConfigMissingError(ConfigurationError): + + def __init__(self, path): + self.path = path + super().__init__(f"Configuration file '{path}' is missing setting that has no default / fallback.") + + +class ConfigInvalidError(ConfigurationError): + + def __init__(self, path): + self.path = path + super().__init__(f"Configuration file '{path}' has setting with invalid value.") + + +class NetworkError(BaseError): + """ + **Networking** + """ + + +class NoInternetError(NetworkError): + + def __init__(self): + super().__init__("No internet connection.") + + +class NoUPnPSupportError(NetworkError): + + def __init__(self): + super().__init__("Router does not support UPnP.") + + +class WalletError(BaseError): + """ + **Wallet Errors** + """ + + +class TransactionRejectedError(WalletError): + + def __init__(self): + super().__init__("Transaction rejected, unknown reason.") + + +class TransactionFeeTooLowError(WalletError): + + def __init__(self): + super().__init__("Fee too low.") + + +class TransactionInvalidSignatureError(WalletError): + + def __init__(self): + super().__init__("Invalid signature.") + + +class InsufficientFundsError(WalletError): + """ + determined by wallet prior to attempting to broadcast a tx; this is different for example from a TX + being created and sent but then rejected by lbrycrd for unspendable utxos. + """ + + def __init__(self): + super().__init__("Not enough funds to cover this transaction.") + + +class ChannelKeyNotFoundError(WalletError): + + def __init__(self): + super().__init__("Channel signing key not found.") + + +class ChannelKeyInvalidError(WalletError): + """ + For example, channel was updated but you don't have the updated key. + """ + + def __init__(self): + super().__init__("Channel signing key is out of date.") + + +class DataDownloadError(WalletError): + + def __init__(self): + super().__init__("Failed to download blob. *generic*") + + +class PrivateKeyNotFoundError(WalletError): + + def __init__(self, key, value): + self.key = key + self.value = value + super().__init__(f"Couldn't find private key for {key} '{value}'.") + + +class ResolveError(WalletError): + + def __init__(self, url): + self.url = url + super().__init__(f"Failed to resolve '{url}'.") + + +class ResolveTimeoutError(WalletError): + + def __init__(self, url): + self.url = url + super().__init__(f"Failed to resolve '{url}' within the timeout.") + + +class ResolveCensoredError(WalletError): + + def __init__(self, url, censor_id, censor_row): + self.url = url + self.censor_id = censor_id + self.censor_row = censor_row + super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.") + + +class KeyFeeAboveMaxAllowedError(WalletError): + + def __init__(self, message): + self.message = message + super().__init__(f"{message}") + + +class InvalidPasswordError(WalletError): + + def __init__(self): + super().__init__("Password is invalid.") + + +class IncompatibleWalletServerError(WalletError): + + def __init__(self, server, port): + self.server = server + self.port = port + super().__init__(f"'{server}:{port}' has an incompatibly old version.") + + +class TooManyClaimSearchParametersError(WalletError): + + def __init__(self, key, limit): + self.key = key + self.limit = limit + super().__init__(f"{key} cant have more than {limit} items.") + + +class AlreadyPurchasedError(WalletError): + """ + allow-duplicate-purchase flag to override. + """ + + def __init__(self, claim_id_hex): + self.claim_id_hex = claim_id_hex + super().__init__(f"You already have a purchase for claim_id '{claim_id_hex}'. Use") + + +class ServerPaymentInvalidAddressError(WalletError): + + def __init__(self, address): + self.address = address + super().__init__(f"Invalid address from wallet server: '{address}' - skipping payment round.") + + +class ServerPaymentWalletLockedError(WalletError): + + def __init__(self): + super().__init__("Cannot spend funds with locked wallet, skipping payment round.") + + +class ServerPaymentFeeAboveMaxAllowedError(WalletError): + + def __init__(self, daily_fee, max_fee): + self.daily_fee = daily_fee + self.max_fee = max_fee + super().__init__(f"Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.") + + +class WalletNotLoadedError(WalletError): + + def __init__(self, wallet_id): + self.wallet_id = wallet_id + super().__init__(f"Wallet {wallet_id} is not loaded.") + + +class WalletAlreadyLoadedError(WalletError): + + def __init__(self, wallet_path): + self.wallet_path = wallet_path + super().__init__(f"Wallet {wallet_path} is already loaded.") + + +class WalletNotFoundError(WalletError): + + def __init__(self, wallet_path): + self.wallet_path = wallet_path + super().__init__(f"Wallet not found at {wallet_path}.") + + +class WalletAlreadyExistsError(WalletError): + + def __init__(self, wallet_path): + self.wallet_path = wallet_path + super().__init__(f"Wallet {wallet_path} already exists, use `wallet_add` to load it.") + + +class BlobError(BaseError): + """ + **Blobs** + """ + + +class BlobNotFoundError(BlobError): + + def __init__(self): + super().__init__("Blob not found.") + + +class BlobPermissionDeniedError(BlobError): + + def __init__(self): + super().__init__("Permission denied to read blob.") + + +class BlobTooBigError(BlobError): + + def __init__(self): + super().__init__("Blob is too big.") + + +class BlobEmptyError(BlobError): + + def __init__(self): + super().__init__("Blob is empty.") + + +class BlobFailedDecryptionError(BlobError): + + def __init__(self): + super().__init__("Failed to decrypt blob.") + + +class CorruptBlobError(BlobError): + + def __init__(self): + super().__init__("Blobs is corrupted.") + + +class BlobFailedEncryptionError(BlobError): + + def __init__(self): + super().__init__("Failed to encrypt blob.") + + +class DownloadCancelledError(BlobError): + + def __init__(self): + super().__init__("Download was canceled.") + + +class DownloadSDTimeoutError(BlobError): + + def __init__(self, download): + self.download = download + super().__init__(f"Failed to download sd blob {download} within timeout.") + + +class DownloadDataTimeoutError(BlobError): + + def __init__(self, download): + self.download = download + super().__init__(f"Failed to download data blobs for sd hash {download} within timeout.") + + +class InvalidStreamDescriptorError(BlobError): + + def __init__(self, message): + self.message = message + super().__init__(f"{message}") + + +class InvalidDataError(BlobError): + + def __init__(self, message): + self.message = message + super().__init__(f"{message}") + + +class InvalidBlobHashError(BlobError): + + def __init__(self, message): + self.message = message + super().__init__(f"{message}") + + +class ComponentError(BaseError): + """ + **Components** + """ + + +class ComponentStartConditionNotMetError(ComponentError): + + def __init__(self, components): + self.components = components + super().__init__(f"Unresolved dependencies for: {components}") + + +class ComponentsNotStartedError(ComponentError): + + def __init__(self, message): + self.message = message + super().__init__(f"{message}") + + +class CurrencyExchangeError(BaseError): + """ + **Currency Exchange** + """ + + +class InvalidExchangeRateResponseError(CurrencyExchangeError): + + def __init__(self, source, reason): + self.source = source + self.reason = reason + super().__init__(f"Failed to get exchange rate from {source}: {reason}") + + +class CurrencyConversionError(CurrencyExchangeError): + + def __init__(self, message): + self.message = message + super().__init__(f"{message}") + + +class InvalidCurrencyError(CurrencyExchangeError): + + def __init__(self, currency): + self.currency = currency + super().__init__(f"Invalid currency: {currency} is not a supported currency.") diff --git a/scribe/error/base.py b/scribe/error/base.py new file mode 100644 index 0000000..fce1be2 --- /dev/null +++ b/scribe/error/base.py @@ -0,0 +1,9 @@ +from binascii import hexlify + + +def claim_id(claim_hash): + return hexlify(claim_hash[::-1]).decode() + + +class BaseError(Exception): + pass diff --git a/scribe/error/generate.py b/scribe/error/generate.py new file mode 100644 index 0000000..94e9f6e --- /dev/null +++ b/scribe/error/generate.py @@ -0,0 +1,167 @@ +import re +import sys +import argparse +from pathlib import Path +from textwrap import fill, indent + + +INDENT = ' ' * 4 + +CLASS = """ + +class {name}({parents}):{doc} +""" + +INIT = """ + def __init__({args}):{fields} + super().__init__({format}"{message}") +""" + +FUNCTIONS = ['claim_id'] + + +class ErrorClass: + + def __init__(self, hierarchy, name, message): + self.hierarchy = hierarchy.replace('**', '') + self.other_parents = [] + if '(' in name: + assert ')' in name, f"Missing closing parenthesis in '{name}'." + self.other_parents = name[name.find('(')+1:name.find(')')].split(',') + name = name[:name.find('(')] + self.name = name + self.class_name = name+'Error' + self.message = message + self.comment = "" + if '--' in message: + self.message, self.comment = message.split('--') + self.message = self.message.strip() + self.comment = self.comment.strip() + + @property + def is_leaf(self): + return 'x' not in self.hierarchy + + @property + def code(self): + return self.hierarchy.replace('x', '') + + @property + def parent_codes(self): + return self.hierarchy[0:2], self.hierarchy[0] + + def get_arguments(self): + args = ['self'] + for arg in re.findall('{([a-z0-1_()]+)}', self.message): + for func in FUNCTIONS: + if arg.startswith(f'{func}('): + arg = arg[len(f'{func}('):-1] + break + args.append(arg) + return args + + @staticmethod + def get_fields(args): + if len(args) > 1: + return ''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:]) + return '' + + @staticmethod + def get_doc_string(doc): + if doc: + return f'\n{INDENT}"""\n{indent(fill(doc, 100), INDENT)}\n{INDENT}"""' + return "" + + def render(self, out, parent): + if not parent: + parents = ['BaseError'] + else: + parents = [parent.class_name] + parents += self.other_parents + args = self.get_arguments() + if self.is_leaf: + out.write((CLASS + INIT).format( + name=self.class_name, parents=', '.join(parents), + args=', '.join(args), fields=self.get_fields(args), + message=self.message, doc=self.get_doc_string(self.comment), format='f' if len(args) > 1 else '' + )) + else: + out.write(CLASS.format( + name=self.class_name, parents=', '.join(parents), + doc=self.get_doc_string(self.comment or self.message) + )) + + +def get_errors(): + with open('README.md', 'r') as readme: + lines = iter(readme.readlines()) + for line in lines: + if line.startswith('## Exceptions Table'): + break + for line in lines: + if line.startswith('---:|'): + break + for line in lines: + if not line: + break + yield ErrorClass(*[c.strip() for c in line.split('|')]) + + +def find_parent(stack, child): + for parent_code in child.parent_codes: + parent = stack.get(parent_code) + if parent: + return parent + + +def generate(out): + out.write(f"from .base import BaseError, {', '.join(FUNCTIONS)}\n") + stack = {} + for error in get_errors(): + error.render(out, find_parent(stack, error)) + if not error.is_leaf: + assert error.code not in stack, f"Duplicate code: {error.code}" + stack[error.code] = error + + +def analyze(): + errors = {e.class_name: [] for e in get_errors() if e.is_leaf} + here = Path(__file__).absolute().parents[0] + module = here.parent + for file_path in module.glob('**/*.py'): + if here in file_path.parents: + continue + with open(file_path) as src_file: + src = src_file.read() + for error in errors.keys(): + found = src.count(error) + if found > 0: + errors[error].append((file_path, found)) + + print('Unused Errors:\n') + for error, used in errors.items(): + if used: + print(f' - {error}') + for use in used: + print(f' {use[0].relative_to(module.parent)} {use[1]}') + print('') + + print('') + print('Unused Errors:') + for error, used in errors.items(): + if not used: + print(f' - {error}') + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("action", choices=['generate', 'analyze']) + args = parser.parse_args() + if args.action == "analyze": + analyze() + elif args.action == "generate": + generate(sys.stdout) + + +if __name__ == "__main__": + main() diff --git a/scribe/hub/__init__.py b/scribe/hub/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/scribe/hub/common.py b/scribe/hub/common.py new file mode 100644 index 0000000..6692824 --- /dev/null +++ b/scribe/hub/common.py @@ -0,0 +1,209 @@ +import logging +import itertools +import time +import json +import typing +import asyncio +import inspect +from asyncio import Event +from collections import namedtuple +from functools import partial, lru_cache +from numbers import Number +from asyncio import Queue +from scribe.common import RPCError, CodeMessageError +from scribe import PROMETHEUS_NAMESPACE + + +HISTOGRAM_BUCKETS = ( + .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf') +) +NAMESPACE = "scribe" + + +SignatureInfo = namedtuple('SignatureInfo', 'min_args max_args ' + 'required_names other_names') + +PARSE_ERROR = -32700 +INVALID_REQUEST = -32600 +METHOD_NOT_FOUND = -32601 +INVALID_ARGS = -32602 +INTERNAL_ERROR = -32603 +QUERY_TIMEOUT = -32000 + + +@lru_cache(256) +def signature_info(func): + params = inspect.signature(func).parameters + min_args = max_args = 0 + required_names = [] + other_names = [] + no_names = False + for p in params.values(): + if p.kind == p.POSITIONAL_OR_KEYWORD: + max_args += 1 + if p.default is p.empty: + min_args += 1 + required_names.append(p.name) + else: + other_names.append(p.name) + elif p.kind == p.KEYWORD_ONLY: + other_names.append(p.name) + elif p.kind == p.VAR_POSITIONAL: + max_args = None + elif p.kind == p.VAR_KEYWORD: + other_names = any + elif p.kind == p.POSITIONAL_ONLY: + max_args += 1 + if p.default is p.empty: + min_args += 1 + no_names = True + + if no_names: + other_names = None + + return SignatureInfo(min_args, max_args, required_names, other_names) + + +class BatchError(Exception): + + def __init__(self, request): + self.request = request # BatchRequest object + + +class BatchRequest: + """Used to build a batch request to send to the server. Stores + the + + Attributes batch and results are initially None. + + Adding an invalid request or notification immediately raises a + ProtocolError. + + On exiting the with clause, it will: + + 1) create a Batch object for the requests in the order they were + added. If the batch is empty this raises a ProtocolError. + + 2) set the "batch" attribute to be that batch + + 3) send the batch request and wait for a response + + 4) raise a ProtocolError if the protocol was violated by the + server. Currently this only happens if it gave more than one + response to any request + + 5) otherwise there is precisely one response to each Request. Set + the "results" attribute to the tuple of results; the responses + are ordered to match the Requests in the batch. Notifications + do not get a response. + + 6) if raise_errors is True and any individual response was a JSON + RPC error response, or violated the protocol in some way, a + BatchError exception is raised. Otherwise the caller can be + certain each request returned a standard result. + """ + + def __init__(self, session, raise_errors): + self._session = session + self._raise_errors = raise_errors + self._requests = [] + self.batch = None + self.results = None + + def add_request(self, method, args=()): + self._requests.append(Request(method, args)) + + def add_notification(self, method, args=()): + self._requests.append(Notification(method, args)) + + def __len__(self): + return len(self._requests) + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + if exc_type is None: + self.batch = Batch(self._requests) + message, event = self._session.connection.send_batch(self.batch) + await self._session._send_message(message) + await event.wait() + self.results = event.result + if self._raise_errors: + if any(isinstance(item, Exception) for item in event.result): + raise BatchError(self) + + +class SingleRequest: + __slots__ = ('method', 'args') + + def __init__(self, method, args): + if not isinstance(method, str): + raise ProtocolError(METHOD_NOT_FOUND, + 'method must be a string') + if not isinstance(args, (list, tuple, dict)): + raise ProtocolError.invalid_args('request arguments must be a ' + 'list or a dictionary') + self.args = args + self.method = method + + def __repr__(self): + return f'{self.__class__.__name__}({self.method!r}, {self.args!r})' + + def __eq__(self, other): + return (isinstance(other, self.__class__) and + self.method == other.method and self.args == other.args) + + +class Request(SingleRequest): + def send_result(self, response): + return None + + +class Notification(SingleRequest): + pass + + +class Batch: + __slots__ = ('items', ) + + def __init__(self, items): + if not isinstance(items, (list, tuple)): + raise ProtocolError.invalid_request('items must be a list') + if not items: + raise ProtocolError.empty_batch() + if not (all(isinstance(item, SingleRequest) for item in items) or + all(isinstance(item, Response) for item in items)): + raise ProtocolError.invalid_request('batch must be homogeneous') + self.items = items + + def __len__(self): + return len(self.items) + + def __getitem__(self, item): + return self.items[item] + + def __iter__(self): + return iter(self.items) + + def __repr__(self): + return f'Batch({len(self.items)} items)' + + +class Response: + __slots__ = ('result', ) + + def __init__(self, result): + # Type checking happens when converting to a message + self.result = result + + +class ProtocolError(CodeMessageError): + def __init__(self, code, message): + super().__init__(code, message) + # If not None send this unframed message over the network + self.error_message = None + # If the error was in a JSON response message; its message ID. + # Since None can be a response message ID, "id" means the + # error was not sent in a JSON response + self.response_msg_id = id diff --git a/scribe/hub/framer.py b/scribe/hub/framer.py new file mode 100644 index 0000000..f0f9c34 --- /dev/null +++ b/scribe/hub/framer.py @@ -0,0 +1,51 @@ +from asyncio import Queue + + +class NewlineFramer: + """A framer for a protocol where messages are separated by newlines.""" + + # The default max_size value is motivated by JSONRPC, where a + # normal request will be 250 bytes or less, and a reasonable + # batch may contain 4000 requests. + def __init__(self, max_size=250 * 4000): + """max_size - an anti-DoS measure. If, after processing an incoming + message, buffered data would exceed max_size bytes, that + buffered data is dropped entirely and the framer waits for a + newline character to re-synchronize the stream. + """ + self.max_size = max_size + self.queue = Queue() + self.received_bytes = self.queue.put_nowait + self.synchronizing = False + self.residual = b'' + + def frame(self, message): + return message + b'\n' + + async def receive_message(self): + parts = [] + buffer_size = 0 + while True: + part = self.residual + self.residual = b'' + if not part: + part = await self.queue.get() + + npos = part.find(b'\n') + if npos == -1: + parts.append(part) + buffer_size += len(part) + # Ignore over-sized messages; re-synchronize + if buffer_size <= self.max_size: + continue + self.synchronizing = True + raise MemoryError(f'dropping message over {self.max_size:,d} ' + f'bytes and re-synchronizing') + + tail, self.residual = part[:npos], part[npos + 1:] + if self.synchronizing: + self.synchronizing = False + return await self.receive_message() + else: + parts.append(tail) + return b''.join(parts) diff --git a/scribe/hub/jsonrpc.py b/scribe/hub/jsonrpc.py new file mode 100644 index 0000000..1a732a1 --- /dev/null +++ b/scribe/hub/jsonrpc.py @@ -0,0 +1,616 @@ +import itertools +import json +import typing +import asyncio +from asyncio import Event +from functools import partial +from numbers import Number +from scribe.common import RPCError, CodeMessageError +from scribe.hub.common import Notification, Request, Response, Batch, ProtocolError + + +HISTOGRAM_BUCKETS = ( + .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf') +) +NAMESPACE = "scribe" + + +class JSONRPC: + """Abstract base class that interprets and constructs JSON RPC messages.""" + + # Error codes. See http://www.jsonrpc.org/specification + PARSE_ERROR = -32700 + INVALID_REQUEST = -32600 + METHOD_NOT_FOUND = -32601 + INVALID_ARGS = -32602 + INTERNAL_ERROR = -32603 + QUERY_TIMEOUT = -32000 + + # Codes specific to this library + ERROR_CODE_UNAVAILABLE = -100 + + # Can be overridden by derived classes + allow_batches = True + + @classmethod + def _message_id(cls, message, require_id): + """Validate the message is a dictionary and return its ID. + + Raise an error if the message is invalid or the ID is of an + invalid type. If it has no ID, raise an error if require_id + is True, otherwise return None. + """ + raise NotImplementedError + + @classmethod + def _validate_message(cls, message): + """Validate other parts of the message other than those + done in _message_id.""" + pass + + @classmethod + def _request_args(cls, request): + """Validate the existence and type of the arguments passed + in the request dictionary.""" + raise NotImplementedError + + @classmethod + def _process_request(cls, payload): + request_id = None + try: + request_id = cls._message_id(payload, False) + cls._validate_message(payload) + method = payload.get('method') + if request_id is None: + item = Notification(method, cls._request_args(payload)) + else: + item = Request(method, cls._request_args(payload)) + return item, request_id + except ProtocolError as error: + code, message = error.code, error.message + raise cls._error(code, message, True, request_id) + + @classmethod + def _process_response(cls, payload): + request_id = None + try: + request_id = cls._message_id(payload, True) + cls._validate_message(payload) + return Response(cls.response_value(payload)), request_id + except ProtocolError as error: + code, message = error.code, error.message + raise cls._error(code, message, False, request_id) + + @classmethod + def _message_to_payload(cls, message): + """Returns a Python object or a ProtocolError.""" + try: + return json.loads(message.decode()) + except UnicodeDecodeError: + message = 'messages must be encoded in UTF-8' + except json.JSONDecodeError: + message = 'invalid JSON' + raise cls._error(cls.PARSE_ERROR, message, True, None) + + @classmethod + def _error(cls, code, message, send, msg_id): + error = ProtocolError(code, message) + if send: + error.error_message = cls.response_message(error, msg_id) + else: + error.response_msg_id = msg_id + return error + + # + # External API + # + + @classmethod + def message_to_item(cls, message): + """Translate an unframed received message and return an + (item, request_id) pair. + + The item can be a Request, Notification, Response or a list. + + A JSON RPC error response is returned as an RPCError inside a + Response object. + + If a Batch is returned, request_id is an iterable of request + ids, one per batch member. + + If the message violates the protocol in some way a + ProtocolError is returned, except if the message was + determined to be a response, in which case the ProtocolError + is placed inside a Response object. This is so that client + code can mark a request as having been responded to even if + the response was bad. + + raises: ProtocolError + """ + payload = cls._message_to_payload(message) + if isinstance(payload, dict): + if 'method' in payload: + return cls._process_request(payload) + else: + return cls._process_response(payload) + elif isinstance(payload, list) and cls.allow_batches: + if not payload: + raise cls._error(JSONRPC.INVALID_REQUEST, 'batch is empty', + True, None) + return payload, None + raise cls._error(cls.INVALID_REQUEST, + 'request object must be a dictionary', True, None) + + # Message formation + @classmethod + def request_message(cls, item, request_id): + """Convert an RPCRequest item to a message.""" + assert isinstance(item, Request) + return cls.encode_payload(cls.request_payload(item, request_id)) + + @classmethod + def notification_message(cls, item): + """Convert an RPCRequest item to a message.""" + assert isinstance(item, Notification) + return cls.encode_payload(cls.request_payload(item, None)) + + @classmethod + def response_message(cls, result, request_id): + """Convert a response result (or RPCError) to a message.""" + if isinstance(result, CodeMessageError): + payload = cls.error_payload(result, request_id) + else: + payload = cls.response_payload(result, request_id) + return cls.encode_payload(payload) + + @classmethod + def batch_message(cls, batch, request_ids): + """Convert a request Batch to a message.""" + assert isinstance(batch, Batch) + if not cls.allow_batches: + raise ProtocolError.invalid_request( + 'protocol does not permit batches') + id_iter = iter(request_ids) + rm = cls.request_message + nm = cls.notification_message + parts = (rm(request, next(id_iter)) if isinstance(request, Request) + else nm(request) for request in batch) + return cls.batch_message_from_parts(parts) + + @classmethod + def batch_message_from_parts(cls, messages): + """Convert messages, one per batch item, into a batch message. At + least one message must be passed. + """ + # Comma-separate the messages and wrap the lot in square brackets + middle = b', '.join(messages) + if not middle: + raise ProtocolError.empty_batch() + return b''.join([b'[', middle, b']']) + + @classmethod + def encode_payload(cls, payload): + """Encode a Python object as JSON and convert it to bytes.""" + try: + return json.dumps(payload).encode() + except TypeError: + msg = f'JSON payload encoding error: {payload}' + raise ProtocolError(cls.INTERNAL_ERROR, msg) from None + + +class JSONRPCv1(JSONRPC): + """JSON RPC version 1.0.""" + + allow_batches = False + + @classmethod + def _message_id(cls, message, require_id): + # JSONv1 requires an ID always, but without constraint on its type + # No need to test for a dictionary here as we don't handle batches. + if 'id' not in message: + raise ProtocolError.invalid_request('request has no "id"') + return message['id'] + + @classmethod + def _request_args(cls, request): + args = request.get('params') + if not isinstance(args, list): + raise ProtocolError.invalid_args( + f'invalid request arguments: {args}') + return args + + @classmethod + def _best_effort_error(cls, error): + # Do our best to interpret the error + code = cls.ERROR_CODE_UNAVAILABLE + message = 'no error message provided' + if isinstance(error, str): + message = error + elif isinstance(error, int): + code = error + elif isinstance(error, dict): + if isinstance(error.get('message'), str): + message = error['message'] + if isinstance(error.get('code'), int): + code = error['code'] + + return RPCError(code, message) + + @classmethod + def response_value(cls, payload): + if 'result' not in payload or 'error' not in payload: + raise ProtocolError.invalid_request( + 'response must contain both "result" and "error"') + + result = payload['result'] + error = payload['error'] + if error is None: + return result # It seems None can be a valid result + if result is not None: + raise ProtocolError.invalid_request( + 'response has a "result" and an "error"') + + return cls._best_effort_error(error) + + @classmethod + def request_payload(cls, request, request_id): + """JSON v1 request (or notification) payload.""" + if isinstance(request.args, dict): + raise ProtocolError.invalid_args( + 'JSONRPCv1 does not support named arguments') + return { + 'method': request.method, + 'params': request.args, + 'id': request_id + } + + @classmethod + def response_payload(cls, result, request_id): + """JSON v1 response payload.""" + return { + 'result': result, + 'error': None, + 'id': request_id + } + + @classmethod + def error_payload(cls, error, request_id): + return { + 'result': None, + 'error': {'code': error.code, 'message': error.message}, + 'id': request_id + } + + +class JSONRPCv2(JSONRPC): + """JSON RPC version 2.0.""" + + @classmethod + def _message_id(cls, message, require_id): + if not isinstance(message, dict): + raise ProtocolError.invalid_request( + 'request object must be a dictionary') + if 'id' in message: + request_id = message['id'] + if not isinstance(request_id, (Number, str, type(None))): + raise ProtocolError.invalid_request( + f'invalid "id": {request_id}') + return request_id + else: + if require_id: + raise ProtocolError.invalid_request('request has no "id"') + return None + + @classmethod + def _validate_message(cls, message): + if message.get('jsonrpc') != '2.0': + raise ProtocolError.invalid_request('"jsonrpc" is not "2.0"') + + @classmethod + def _request_args(cls, request): + args = request.get('params', []) + if not isinstance(args, (dict, list)): + raise ProtocolError.invalid_args( + f'invalid request arguments: {args}') + return args + + @classmethod + def response_value(cls, payload): + if 'result' in payload: + if 'error' in payload: + raise ProtocolError.invalid_request( + 'response contains both "result" and "error"') + return payload['result'] + + if 'error' not in payload: + raise ProtocolError.invalid_request( + 'response contains neither "result" nor "error"') + + # Return an RPCError object + error = payload['error'] + if isinstance(error, dict): + code = error.get('code') + message = error.get('message') + if isinstance(code, int) and isinstance(message, str): + return RPCError(code, message) + + raise ProtocolError.invalid_request( + f'ill-formed response error object: {error}') + + @classmethod + def request_payload(cls, request, request_id): + """JSON v2 request (or notification) payload.""" + payload = { + 'jsonrpc': '2.0', + 'method': request.method, + } + # A notification? + if request_id is not None: + payload['id'] = request_id + # Preserve empty dicts as missing params is read as an array + if request.args or request.args == {}: + payload['params'] = request.args + return payload + + @classmethod + def response_payload(cls, result, request_id): + """JSON v2 response payload.""" + return { + 'jsonrpc': '2.0', + 'result': result, + 'id': request_id + } + + @classmethod + def error_payload(cls, error, request_id): + return { + 'jsonrpc': '2.0', + 'error': {'code': error.code, 'message': error.message}, + 'id': request_id + } + + +class JSONRPCLoose(JSONRPC): + """A relaxed version of JSON RPC.""" + + # Don't be so loose we accept any old message ID + _message_id = JSONRPCv2._message_id + _validate_message = JSONRPC._validate_message + _request_args = JSONRPCv2._request_args + # Outoing messages are JSONRPCv2 so we give the other side the + # best chance to assume / detect JSONRPCv2 as default protocol. + error_payload = JSONRPCv2.error_payload + request_payload = JSONRPCv2.request_payload + response_payload = JSONRPCv2.response_payload + + @classmethod + def response_value(cls, payload): + # Return result, unless it is None and there is an error + if payload.get('error') is not None: + if payload.get('result') is not None: + raise ProtocolError.invalid_request( + 'response contains both "result" and "error"') + return JSONRPCv1._best_effort_error(payload['error']) + + if 'result' not in payload: + raise ProtocolError.invalid_request( + 'response contains neither "result" nor "error"') + + # Can be None + return payload['result'] + + +class JSONRPCAutoDetect(JSONRPCv2): + + @classmethod + def message_to_item(cls, message): + return cls.detect_protocol(message), None + + @classmethod + def detect_protocol(cls, message): + """Attempt to detect the protocol from the message.""" + main = cls._message_to_payload(message) + + def protocol_for_payload(payload): + if not isinstance(payload, dict): + return JSONRPCLoose # Will error + # Obey an explicit "jsonrpc" + version = payload.get('jsonrpc') + if version == '2.0': + return JSONRPCv2 + if version == '1.0': + return JSONRPCv1 + + # Now to decide between JSONRPCLoose and JSONRPCv1 if possible + if 'result' in payload and 'error' in payload: + return JSONRPCv1 + return JSONRPCLoose + + if isinstance(main, list): + parts = {protocol_for_payload(payload) for payload in main} + # If all same protocol, return it + if len(parts) == 1: + return parts.pop() + # If strict protocol detected, return it, preferring JSONRPCv2. + # This means a batch of JSONRPCv1 will fail + for protocol in (JSONRPCv2, JSONRPCv1): + if protocol in parts: + return protocol + # Will error if no parts + return JSONRPCLoose + + return protocol_for_payload(main) + + +class JSONRPCConnection: + """Maintains state of a JSON RPC connection, in particular + encapsulating the handling of request IDs. + + protocol - the JSON RPC protocol to follow + max_response_size - responses over this size send an error response + instead. + """ + + _id_counter = itertools.count() + + def __init__(self, protocol): + self._protocol = protocol + # Sent Requests and Batches that have not received a response. + # The key is its request ID; for a batch it is sorted tuple + # of request IDs + self._requests: typing.Dict[str, typing.Tuple[Request, Event]] = {} + # A public attribute intended to be settable dynamically + self.max_response_size = 0 + + def _oversized_response_message(self, request_id): + text = f'response too large (over {self.max_response_size:,d} bytes' + error = RPCError.invalid_request(text) + return self._protocol.response_message(error, request_id) + + def _receive_response(self, result, request_id): + if request_id not in self._requests: + if request_id is None and isinstance(result, RPCError): + message = f'diagnostic error received: {result}' + else: + message = f'response to unsent request (ID: {request_id})' + raise ProtocolError.invalid_request(message) from None + request, event = self._requests.pop(request_id) + event.result = result + event.set() + return [] + + def _receive_request_batch(self, payloads): + def item_send_result(request_id, result): + nonlocal size + part = protocol.response_message(result, request_id) + size += len(part) + 2 + if size > self.max_response_size > 0: + part = self._oversized_response_message(request_id) + parts.append(part) + if len(parts) == count: + return protocol.batch_message_from_parts(parts) + return None + + parts = [] + items = [] + size = 0 + count = 0 + protocol = self._protocol + for payload in payloads: + try: + item, request_id = protocol._process_request(payload) + items.append(item) + if isinstance(item, Request): + count += 1 + item.send_result = partial(item_send_result, request_id) + except ProtocolError as error: + count += 1 + parts.append(error.error_message) + + if not items and parts: + protocol_error = ProtocolError(0, "") + protocol_error.error_message = protocol.batch_message_from_parts(parts) + raise protocol_error + return items + + def _receive_response_batch(self, payloads): + request_ids = [] + results = [] + for payload in payloads: + # Let ProtocolError exceptions through + item, request_id = self._protocol._process_response(payload) + request_ids.append(request_id) + results.append(item.result) + + ordered = sorted(zip(request_ids, results), key=lambda t: t[0]) + ordered_ids, ordered_results = zip(*ordered) + if ordered_ids not in self._requests: + raise ProtocolError.invalid_request('response to unsent batch') + request_batch, event = self._requests.pop(ordered_ids) + event.result = ordered_results + event.set() + return [] + + def _send_result(self, request_id, result): + message = self._protocol.response_message(result, request_id) + if len(message) > self.max_response_size > 0: + message = self._oversized_response_message(request_id) + return message + + def _event(self, request, request_id): + event = Event() + self._requests[request_id] = (request, event) + return event + + # + # External API + # + def send_request(self, request: Request) -> typing.Tuple[bytes, Event]: + """Send a Request. Return a (message, event) pair. + + The message is an unframed message to send over the network. + Wait on the event for the response; which will be in the + "result" attribute. + + Raises: ProtocolError if the request violates the protocol + in some way.. + """ + request_id = next(self._id_counter) + message = self._protocol.request_message(request, request_id) + return message, self._event(request, request_id) + + def send_notification(self, notification): + return self._protocol.notification_message(notification) + + def send_batch(self, batch): + ids = tuple(next(self._id_counter) + for request in batch if isinstance(request, Request)) + message = self._protocol.batch_message(batch, ids) + event = self._event(batch, ids) if ids else None + return message, event + + def receive_message(self, message): + """Call with an unframed message received from the network. + + Raises: ProtocolError if the message violates the protocol in + some way. However, if it happened in a response that can be + paired with a request, the ProtocolError is instead set in the + result attribute of the send_request() that caused the error. + """ + try: + item, request_id = self._protocol.message_to_item(message) + except ProtocolError as e: + if e.response_msg_id is not id: + return self._receive_response(e, e.response_msg_id) + raise + + if isinstance(item, Request): + item.send_result = partial(self._send_result, request_id) + return [item] + if isinstance(item, Notification): + return [item] + if isinstance(item, Response): + return self._receive_response(item.result, request_id) + if isinstance(item, list): + if all(isinstance(payload, dict) + and ('result' in payload or 'error' in payload) + for payload in item): + return self._receive_response_batch(item) + else: + return self._receive_request_batch(item) + else: + # Protocol auto-detection hack + assert issubclass(item, JSONRPC) + self._protocol = item + return self.receive_message(message) + + def raise_pending_requests(self, exception): + exception = exception or asyncio.TimeoutError() + for request, event in self._requests.values(): + event.result = exception + event.set() + self._requests.clear() + + def pending_requests(self): + """All sent requests that have not received a response.""" + return [request for request, event in self._requests.values()] diff --git a/scribe/hub/mempool.py b/scribe/hub/mempool.py new file mode 100644 index 0000000..a772a2c --- /dev/null +++ b/scribe/hub/mempool.py @@ -0,0 +1,200 @@ +import asyncio +import itertools +import attr +import typing +import logging +from collections import defaultdict +from prometheus_client import Histogram +from scribe import PROMETHEUS_NAMESPACE +from scribe.blockchain.transaction.deserializer import Deserializer + +if typing.TYPE_CHECKING: + from scribe.hub.session import SessionManager + from scribe.db import HubDB + + +@attr.s(slots=True) +class MemPoolTx: + prevouts = attr.ib() + # A pair is a (hashX, value) tuple + in_pairs = attr.ib() + out_pairs = attr.ib() + fee = attr.ib() + size = attr.ib() + raw_tx = attr.ib() + + +@attr.s(slots=True) +class MemPoolTxSummary: + hash = attr.ib() + fee = attr.ib() + has_unconfirmed_inputs = attr.ib() + + +NAMESPACE = f"{PROMETHEUS_NAMESPACE}_mempool" +HISTOGRAM_BUCKETS = ( + .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf') +) +mempool_process_time_metric = Histogram( + "processed_mempool", "Time to process mempool and notify touched addresses", + namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS +) + + +class MemPool: + def __init__(self, coin, db: 'HubDB', refresh_secs=1.0): + self.coin = coin + self._db = db + self.logger = logging.getLogger(__name__) + self.txs = {} + self.raw_mempool = {} + self.touched_hashXs: typing.DefaultDict[bytes, typing.Set[bytes]] = defaultdict(set) # None can be a key + self.refresh_secs = refresh_secs + self.mempool_process_time_metric = mempool_process_time_metric + self.session_manager: typing.Optional['SessionManager'] = None + + def refresh(self) -> typing.Set[bytes]: # returns list of new touched hashXs + prefix_db = self._db.prefix_db + new_mempool = {k.tx_hash: v.raw_tx for k, v in prefix_db.mempool_tx.iterate()} # TODO: make this more efficient + self.raw_mempool.clear() + self.raw_mempool.update(new_mempool) + + # hashXs = self.hashXs # hashX: [tx_hash, ...] + touched_hashXs = set() + + # Remove txs that aren't in mempool anymore + for tx_hash in set(self.txs).difference(self.raw_mempool.keys()): + tx = self.txs.pop(tx_hash) + tx_hashXs = {hashX for hashX, value in tx.in_pairs}.union({hashX for hashX, value in tx.out_pairs}) + for hashX in tx_hashXs: + if hashX in self.touched_hashXs and tx_hash in self.touched_hashXs[hashX]: + self.touched_hashXs[hashX].remove(tx_hash) + if not self.touched_hashXs[hashX]: + self.touched_hashXs.pop(hashX) + touched_hashXs.update(tx_hashXs) + + # Re-sync with the new set of hashes + tx_map = {} + for tx_hash, raw_tx in self.raw_mempool.items(): + if tx_hash in self.txs: + continue + tx, tx_size = Deserializer(raw_tx).read_tx_and_vsize() + # Convert the inputs and outputs into (hashX, value) pairs + # Drop generation-like inputs from MemPoolTx.prevouts + txin_pairs = tuple((txin.prev_hash, txin.prev_idx) + for txin in tx.inputs + if not txin.is_generation()) + txout_pairs = tuple((self.coin.hashX_from_txo(txout), txout.value) + for txout in tx.outputs if txout.pk_script) + + tx_map[tx_hash] = MemPoolTx(None, txin_pairs, txout_pairs, 0, tx_size, raw_tx) + + for tx_hash, tx in tx_map.items(): + prevouts = [] + # Look up the prevouts + for prev_hash, prev_index in tx.in_pairs: + if prev_hash in self.txs: # accepted mempool + utxo = self.txs[prev_hash].out_pairs[prev_index] + elif prev_hash in tx_map: # this set of changes + utxo = tx_map[prev_hash].out_pairs[prev_index] + else: # get it from the db + prev_tx_num = prefix_db.tx_num.get(prev_hash) + if not prev_tx_num: + continue + prev_tx_num = prev_tx_num.tx_num + hashX_val = prefix_db.hashX_utxo.get(prev_hash[:4], prev_tx_num, prev_index) + if not hashX_val: + continue + hashX = hashX_val.hashX + utxo_value = prefix_db.utxo.get(hashX, prev_tx_num, prev_index) + utxo = (hashX, utxo_value.amount) + prevouts.append(utxo) + + # Save the prevouts, compute the fee and accept the TX + tx.prevouts = tuple(prevouts) + # Avoid negative fees if dealing with generation-like transactions + # because some in_parts would be missing + tx.fee = max(0, (sum(v for _, v in tx.prevouts) - + sum(v for _, v in tx.out_pairs))) + self.txs[tx_hash] = tx + # print(f"added {tx_hash[::-1].hex()} reader to mempool") + + for hashX, value in itertools.chain(tx.prevouts, tx.out_pairs): + self.touched_hashXs[hashX].add(tx_hash) + touched_hashXs.add(hashX) + return touched_hashXs + + def transaction_summaries(self, hashX): + """Return a list of MemPoolTxSummary objects for the hashX.""" + result = [] + for tx_hash in self.touched_hashXs.get(hashX, ()): + if tx_hash not in self.txs: + continue # the tx hash for the touched address is an input that isn't in mempool anymore + tx = self.txs[tx_hash] + has_ui = any(hash in self.txs for hash, idx in tx.in_pairs) + result.append(MemPoolTxSummary(tx_hash, tx.fee, has_ui)) + return result + + def get_mempool_height(self, tx_hash: bytes) -> int: + # Height Progression + # -2: not broadcast + # -1: in mempool but has unconfirmed inputs + # 0: in mempool and all inputs confirmed + # +num: confirmed in a specific block (height) + if tx_hash not in self.txs: + return -2 + tx = self.txs[tx_hash] + unspent_inputs = any(hash in self.raw_mempool for hash, idx in tx.in_pairs) + if unspent_inputs: + return -1 + return 0 + + async def start(self, height, session_manager: 'SessionManager'): + self.session_manager = session_manager + await self._notify_sessions(height, set(), set()) + + async def on_mempool(self, touched, new_touched, height): + await self._notify_sessions(height, touched, new_touched) + + async def on_block(self, touched, height): + await self._notify_sessions(height, touched, set()) + + async def _notify_sessions(self, height, touched, new_touched): + """Notify sessions about height changes and touched addresses.""" + height_changed = height != self.session_manager.notified_height + if height_changed: + await self.session_manager._refresh_hsub_results(height) + + if not self.session_manager.sessions: + return + + if height_changed: + header_tasks = [ + session.send_notification('blockchain.headers.subscribe', (self.session_manager.hsub_results[session.subscribe_headers_raw], )) + for session in self.session_manager.sessions.values() if session.subscribe_headers + ] + if header_tasks: + self.logger.info(f'notify {len(header_tasks)} sessions of new header') + asyncio.create_task(asyncio.wait(header_tasks)) + for hashX in touched.intersection(self.session_manager.mempool_statuses.keys()): + self.session_manager.mempool_statuses.pop(hashX, None) + # self.bp._chain_executor + await asyncio.get_event_loop().run_in_executor( + self._db._executor, touched.intersection_update, self.session_manager.hashx_subscriptions_by_session.keys() + ) + + if touched or new_touched or (height_changed and self.session_manager.mempool_statuses): + notified_hashxs = 0 + session_hashxes_to_notify = defaultdict(list) + to_notify = touched if height_changed else new_touched + + for hashX in to_notify: + if hashX not in self.session_manager.hashx_subscriptions_by_session: + continue + for session_id in self.session_manager.hashx_subscriptions_by_session[hashX]: + session_hashxes_to_notify[session_id].append(hashX) + notified_hashxs += 1 + for session_id, hashXes in session_hashxes_to_notify.items(): + asyncio.create_task(self.session_manager.sessions[session_id].send_history_notifications(*hashXes)) + if session_hashxes_to_notify: + self.logger.info(f'notified {len(session_hashxes_to_notify)} sessions/{notified_hashxs:,d} touched addresses') diff --git a/scribe/hub/prometheus.py b/scribe/hub/prometheus.py new file mode 100644 index 0000000..3c09c49 --- /dev/null +++ b/scribe/hub/prometheus.py @@ -0,0 +1,68 @@ +import time +import logging +import asyncio +import asyncio.tasks +from aiohttp import web +from prometheus_client import generate_latest as prom_generate_latest +from prometheus_client import Counter, Histogram, Gauge + + +PROBES_IN_FLIGHT = Counter("probes_in_flight", "Number of loop probes in flight", namespace='asyncio') +PROBES_FINISHED = Counter("probes_finished", "Number of finished loop probes", namespace='asyncio') +PROBE_TIMES = Histogram("probe_times", "Loop probe times", namespace='asyncio') +TASK_COUNT = Gauge("running_tasks", "Number of running tasks", namespace='asyncio') + + +def get_loop_metrics(delay=1): + loop = asyncio.get_event_loop() + + def callback(started): + PROBE_TIMES.observe(time.perf_counter() - started - delay) + PROBES_FINISHED.inc() + + async def monitor_loop_responsiveness(): + while True: + now = time.perf_counter() + loop.call_later(delay, callback, now) + PROBES_IN_FLIGHT.inc() + TASK_COUNT.set(len(asyncio.tasks._all_tasks)) + await asyncio.sleep(delay) + + return loop.create_task(monitor_loop_responsiveness()) + + +class PrometheusServer: + def __init__(self, logger=None): + self.runner = None + self.logger = logger or logging.getLogger(__name__) + self._monitor_loop_task = None + + async def start(self, interface: str, port: int): + self.logger.info("start prometheus metrics") + prom_app = web.Application() + prom_app.router.add_get('/metrics', self.handle_metrics_get_request) + self.runner = web.AppRunner(prom_app) + await self.runner.setup() + + metrics_site = web.TCPSite(self.runner, interface, port, shutdown_timeout=.5) + await metrics_site.start() + self.logger.info( + 'prometheus metrics server listening on %s:%i', *metrics_site._server.sockets[0].getsockname()[:2] + ) + self._monitor_loop_task = get_loop_metrics() + + async def handle_metrics_get_request(self, request: web.Request): + try: + return web.Response( + text=prom_generate_latest().decode(), + content_type='text/plain; version=0.0.4' + ) + except Exception: + self.logger.exception('could not generate prometheus data') + raise + + async def stop(self): + if self._monitor_loop_task and not self._monitor_loop_task.done(): + self._monitor_loop_task.cancel() + self._monitor_loop_task = None + await self.runner.cleanup() diff --git a/scribe/hub/session.py b/scribe/hub/session.py new file mode 100644 index 0000000..dedb14a --- /dev/null +++ b/scribe/hub/session.py @@ -0,0 +1,1829 @@ +import os +import ssl +import math +import time +import codecs +import typing +import asyncio +import logging +import itertools +import collections +import inspect +from bisect import bisect_right +from asyncio import Event, sleep +from collections import defaultdict, namedtuple +from contextlib import suppress +from functools import partial, lru_cache +from elasticsearch import ConnectionTimeout +from prometheus_client import Counter, Info, Histogram, Gauge +from scribe.schema.result import Outputs +from scribe.base58 import Base58Error +from scribe.error import ResolveCensoredError, TooManyClaimSearchParametersError +from scribe import __version__, PROTOCOL_MIN, PROTOCOL_MAX, PROMETHEUS_NAMESPACE +from scribe.build_info import BUILD, COMMIT_HASH, DOCKER_TAG +from scribe.db import HubDB +from scribe.elasticsearch import SearchIndex +from scribe.common import sha256, hash_to_hex_str, hex_str_to_hash, HASHX_LEN, version_string, formatted_time +from scribe.common import protocol_version, RPCError, DaemonError, TaskGroup +from scribe.hub.jsonrpc import JSONRPCAutoDetect, JSONRPCConnection, JSONRPCv2, JSONRPC +from scribe.hub.common import BatchRequest, ProtocolError, Request, Batch, Notification +from scribe.hub.framer import NewlineFramer +if typing.TYPE_CHECKING: + from scribe.env import Env + from scribe.blockchain.daemon import LBCDaemon + from scribe.hub.mempool import MemPool + +BAD_REQUEST = 1 +DAEMON_ERROR = 2 + +log = logging.getLogger(__name__) + + + +SignatureInfo = namedtuple('SignatureInfo', 'min_args max_args ' + 'required_names other_names') + + +@lru_cache(256) +def signature_info(func): + params = inspect.signature(func).parameters + min_args = max_args = 0 + required_names = [] + other_names = [] + no_names = False + for p in params.values(): + if p.kind == p.POSITIONAL_OR_KEYWORD: + max_args += 1 + if p.default is p.empty: + min_args += 1 + required_names.append(p.name) + else: + other_names.append(p.name) + elif p.kind == p.KEYWORD_ONLY: + other_names.append(p.name) + elif p.kind == p.VAR_POSITIONAL: + max_args = None + elif p.kind == p.VAR_KEYWORD: + other_names = any + elif p.kind == p.POSITIONAL_ONLY: + max_args += 1 + if p.default is p.empty: + min_args += 1 + no_names = True + + if no_names: + other_names = None + + return SignatureInfo(min_args, max_args, required_names, other_names) + + +def handler_invocation(handler, request): + method, args = request.method, request.args + if handler is None: + raise RPCError(JSONRPC.METHOD_NOT_FOUND, + f'unknown method "{method}"') + + # We must test for too few and too many arguments. How + # depends on whether the arguments were passed as a list or as + # a dictionary. + info = signature_info(handler) + if isinstance(args, (tuple, list)): + if len(args) < info.min_args: + s = '' if len(args) == 1 else 's' + raise RPCError.invalid_args( + f'{len(args)} argument{s} passed to method ' + f'"{method}" but it requires {info.min_args}') + if info.max_args is not None and len(args) > info.max_args: + s = '' if len(args) == 1 else 's' + raise RPCError.invalid_args( + f'{len(args)} argument{s} passed to method ' + f'{method} taking at most {info.max_args}') + return partial(handler, *args) + + # Arguments passed by name + if info.other_names is None: + raise RPCError.invalid_args(f'method "{method}" cannot ' + f'be called with named arguments') + + missing = set(info.required_names).difference(args) + if missing: + s = '' if len(missing) == 1 else 's' + missing = ', '.join(sorted(f'"{name}"' for name in missing)) + raise RPCError.invalid_args(f'method "{method}" requires ' + f'parameter{s} {missing}') + + if info.other_names is not any: + excess = set(args).difference(info.required_names) + excess = excess.difference(info.other_names) + if excess: + s = '' if len(excess) == 1 else 's' + excess = ', '.join(sorted(f'"{name}"' for name in excess)) + raise RPCError.invalid_args(f'method "{method}" does not ' + f'take parameter{s} {excess}') + return partial(handler, **args) + + +def scripthash_to_hashX(scripthash: str) -> bytes: + try: + bin_hash = hex_str_to_hash(scripthash) + if len(bin_hash) == 32: + return bin_hash[:HASHX_LEN] + except Exception: + pass + raise RPCError(BAD_REQUEST, f'{scripthash} is not a valid script hash') + + +def non_negative_integer(value) -> int: + """Return param value it is or can be converted to a non-negative + integer, otherwise raise an RPCError.""" + try: + value = int(value) + if value >= 0: + return value + except ValueError: + pass + raise RPCError(BAD_REQUEST, + f'{value} should be a non-negative integer') + + +def assert_boolean(value) -> bool: + """Return param value it is boolean otherwise raise an RPCError.""" + if value in (False, True): + return value + raise RPCError(BAD_REQUEST, f'{value} should be a boolean value') + + +def assert_tx_hash(value: str) -> None: + """Raise an RPCError if the value is not a valid transaction + hash.""" + try: + if len(bytes.fromhex(value)) == 32: + return + except Exception: + pass + raise RPCError(BAD_REQUEST, f'{value} should be a transaction hash') + + +class Semaphores: + """For aiorpcX's semaphore handling.""" + + def __init__(self, semaphores): + self.semaphores = semaphores + self.acquired = [] + + async def __aenter__(self): + for semaphore in self.semaphores: + await semaphore.acquire() + self.acquired.append(semaphore) + + async def __aexit__(self, exc_type, exc_value, traceback): + for semaphore in self.acquired: + semaphore.release() + + +class SessionGroup: + + def __init__(self, gid: int): + self.gid = gid + # Concurrency per group + self.semaphore = asyncio.Semaphore(20) + + +NAMESPACE = f"{PROMETHEUS_NAMESPACE}_hub" +HISTOGRAM_BUCKETS = ( + .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf') +) + + +class SessionManager: + """Holds global state about all sessions.""" + + version_info_metric = Info( + 'build', 'Wallet server build info (e.g. version, commit hash)', namespace=NAMESPACE + ) + version_info_metric.info({ + 'build': BUILD, + "commit": COMMIT_HASH, + "docker_tag": DOCKER_TAG, + 'version': __version__, + "min_version": version_string(PROTOCOL_MIN), + "cpu_count": str(os.cpu_count()) + }) + session_count_metric = Gauge("session_count", "Number of connected client sessions", namespace=NAMESPACE, + labelnames=("version",)) + request_count_metric = Counter("requests_count", "Number of requests received", namespace=NAMESPACE, + labelnames=("method", "version")) + tx_request_count_metric = Counter("requested_transaction", "Number of transactions requested", namespace=NAMESPACE) + tx_replied_count_metric = Counter("replied_transaction", "Number of transactions responded", namespace=NAMESPACE) + urls_to_resolve_count_metric = Counter("urls_to_resolve", "Number of urls to resolve", namespace=NAMESPACE) + resolved_url_count_metric = Counter("resolved_url", "Number of resolved urls", namespace=NAMESPACE) + + interrupt_count_metric = Counter("interrupt", "Number of interrupted queries", namespace=NAMESPACE) + db_operational_error_metric = Counter( + "operational_error", "Number of queries that raised operational errors", namespace=NAMESPACE + ) + db_error_metric = Counter( + "internal_error", "Number of queries raising unexpected errors", namespace=NAMESPACE + ) + executor_time_metric = Histogram( + "executor_time", "SQLite executor times", namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS + ) + pending_query_metric = Gauge( + "pending_queries_count", "Number of pending and running sqlite queries", namespace=NAMESPACE + ) + + client_version_metric = Counter( + "clients", "Number of connections received per client version", + namespace=NAMESPACE, labelnames=("version",) + ) + address_history_metric = Histogram( + "address_history", "Time to fetch an address history", + namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS + ) + notifications_in_flight_metric = Gauge( + "notifications_in_flight", "Count of notifications in flight", + namespace=NAMESPACE + ) + notifications_sent_metric = Histogram( + "notifications_sent", "Time to send an address notification", + namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS + ) + + def __init__(self, env: 'Env', db: HubDB, mempool: 'MemPool', history_cache, resolve_cache, resolve_outputs_cache, + daemon: 'LBCDaemon', shutdown_event: asyncio.Event, + on_available_callback: typing.Callable[[], None], on_unavailable_callback: typing.Callable[[], None]): + env.max_send = max(350000, env.max_send) + self.env = env + self.db = db + self.on_available_callback = on_available_callback + self.on_unavailable_callback = on_unavailable_callback + self.daemon = daemon + self.mempool = mempool + self.shutdown_event = shutdown_event + self.logger = logging.getLogger(__name__) + self.servers: typing.Dict[str, asyncio.AbstractServer] = {} + self.sessions: typing.Dict[int, 'LBRYElectrumX'] = {} + self.hashx_subscriptions_by_session: typing.DefaultDict[str, typing.Set[int]] = defaultdict(set) + self.mempool_statuses = {} + self.cur_group = SessionGroup(0) + self.txs_sent = 0 + self.start_time = time.time() + self.history_cache = history_cache + self.resolve_cache = resolve_cache + self.resolve_outputs_cache = resolve_outputs_cache + self.notified_height: typing.Optional[int] = None + # Cache some idea of room to avoid recounting on each subscription + self.subs_room = 0 + + self.session_event = Event() + + # Search index + self.search_index = SearchIndex( + self.env.es_index_prefix, self.env.database_query_timeout, + elastic_host=env.elastic_host, elastic_port=env.elastic_port + ) + self.running = False + + async def _start_server(self, kind, *args, **kw_args): + loop = asyncio.get_event_loop() + + if kind == 'RPC': + protocol_class = LocalRPC + else: + protocol_class = LBRYElectrumX + protocol_factory = partial(protocol_class, self, self.db, + self.mempool, kind) + + host, port = args[:2] + try: + self.servers[kind] = await loop.create_server(protocol_factory, *args, **kw_args) + except OSError as e: # don't suppress CancelledError + self.logger.error(f'{kind} server failed to listen on {host}:' + f'{port:d} :{e!r}') + else: + self.logger.info(f'{kind} server listening on {host}:{port:d}') + + async def _start_external_servers(self): + """Start listening on TCP and SSL ports, but only if the respective + port was given in the environment. + """ + env = self.env + host = env.cs_host(for_rpc=False) + if env.tcp_port is not None: + await self._start_server('TCP', host, env.tcp_port) + if env.ssl_port is not None: + sslc = ssl.SSLContext(ssl.PROTOCOL_TLS) + sslc.load_cert_chain(env.ssl_certfile, keyfile=env.ssl_keyfile) + await self._start_server('SSL', host, env.ssl_port, ssl=sslc) + + async def _close_servers(self, kinds): + """Close the servers of the given kinds (TCP etc.).""" + if kinds: + self.logger.info('closing down {} listening servers' + .format(', '.join(kinds))) + for kind in kinds: + server = self.servers.pop(kind, None) + if server: + server.close() + await server.wait_closed() + + async def _manage_servers(self): + paused = False + max_sessions = self.env.max_sessions + low_watermark = int(max_sessions * 0.95) + while True: + await self.session_event.wait() + self.session_event.clear() + if not paused and len(self.sessions) >= max_sessions: + self.on_unavailable_callback() + self.logger.info(f'maximum sessions {max_sessions:,d} ' + f'reached, stopping new connections until ' + f'count drops to {low_watermark:,d}') + await self._close_servers(['TCP', 'SSL']) + paused = True + # Start listening for incoming connections if paused and + # session count has fallen + if paused and len(self.sessions) <= low_watermark: + self.on_available_callback() + self.logger.info('resuming listening for incoming connections') + await self._start_external_servers() + paused = False + + def _group_map(self): + group_map = defaultdict(list) + for session in self.sessions.values(): + group_map[session.group].append(session) + return group_map + + def _sub_count(self) -> int: + return sum(s.sub_count() for s in self.sessions.values()) + + def _lookup_session(self, session_id): + try: + session_id = int(session_id) + except Exception: + pass + else: + for session in self.sessions.values(): + if session.session_id == session_id: + return session + return None + + async def _for_each_session(self, session_ids, operation): + if not isinstance(session_ids, list): + raise RPCError(BAD_REQUEST, 'expected a list of session IDs') + + result = [] + for session_id in session_ids: + session = self._lookup_session(session_id) + if session: + result.append(await operation(session)) + else: + result.append(f'unknown session: {session_id}') + return result + + async def _clear_stale_sessions(self): + """Cut off sessions that haven't done anything for 10 minutes.""" + session_timeout = self.env.session_timeout + while True: + await sleep(session_timeout // 10) + stale_cutoff = time.perf_counter() - session_timeout + stale_sessions = [session for session in self.sessions.values() + if session.last_recv < stale_cutoff] + if stale_sessions: + text = ', '.join(str(session.session_id) + for session in stale_sessions) + self.logger.info(f'closing stale connections {text}') + # Give the sockets some time to close gracefully + if stale_sessions: + await asyncio.wait([ + session.close(force_after=session_timeout // 10) for session in stale_sessions + ]) + + # Consolidate small groups + group_map = self._group_map() + groups = [group for group, sessions in group_map.items() + if len(sessions) <= 5] # fixme: apply session cost here + if len(groups) > 1: + new_group = groups[-1] + for group in groups: + for session in group_map[group]: + session.group = new_group + + def _get_info(self): + """A summary of server state.""" + group_map = self._group_map() + method_counts = collections.defaultdict(int) + error_count = 0 + logged = 0 + paused = 0 + pending_requests = 0 + closing = 0 + + for s in self.sessions.values(): + error_count += s.errors + if s.log_me: + logged += 1 + if not s._can_send.is_set(): + paused += 1 + pending_requests += s.count_pending_items() + if s.is_closing(): + closing += 1 + for request, _ in s.connection._requests.values(): + method_counts[request.method] += 1 + return { + 'closing': closing, + 'daemon': self.daemon.logged_url(), + 'daemon_height': self.daemon.cached_height(), + 'db_height': self.db.db_height, + 'errors': error_count, + 'groups': len(group_map), + 'logged': logged, + 'paused': paused, + 'pid': os.getpid(), + 'peers': [], + 'requests': pending_requests, + 'method_counts': method_counts, + 'sessions': self.session_count(), + 'subs': self._sub_count(), + 'txs_sent': self.txs_sent, + 'uptime': formatted_time(time.time() - self.start_time), + 'version': __version__, + } + + def _group_data(self): + """Returned to the RPC 'groups' call.""" + result = [] + group_map = self._group_map() + for group, sessions in group_map.items(): + result.append([group.gid, + len(sessions), + sum(s.bw_charge for s in sessions), + sum(s.count_pending_items() for s in sessions), + sum(s.txs_sent for s in sessions), + sum(s.sub_count() for s in sessions), + sum(s.recv_count for s in sessions), + sum(s.recv_size for s in sessions), + sum(s.send_count for s in sessions), + sum(s.send_size for s in sessions), + ]) + return result + + async def _electrum_and_raw_headers(self, height): + raw_header = await self.raw_header(height) + electrum_header = self.env.coin.electrum_header(raw_header, height) + return electrum_header, raw_header + + async def _refresh_hsub_results(self, height): + """Refresh the cached header subscription responses to be for height, + and record that as notified_height. + """ + # Paranoia: a reorg could race and leave db_height lower + height = min(height, self.db.db_height) + electrum, raw = await self._electrum_and_raw_headers(height) + self.hsub_results = (electrum, {'hex': raw.hex(), 'height': height}) + self.notified_height = height + + # --- LocalRPC command handlers + + async def rpc_add_peer(self, real_name): + """Add a peer. + + real_name: "bch.electrumx.cash t50001 s50002" for example + """ + await self._notify_peer(real_name) + return f"peer '{real_name}' added" + + async def rpc_disconnect(self, session_ids): + """Disconnect sessions. + + session_ids: array of session IDs + """ + async def close(session): + """Close the session's transport.""" + await session.close(force_after=2) + return f'disconnected {session.session_id}' + + return await self._for_each_session(session_ids, close) + + async def rpc_log(self, session_ids): + """Toggle logging of sessions. + + session_ids: array of session IDs + """ + async def toggle_logging(session): + """Toggle logging of the session.""" + session.toggle_logging() + return f'log {session.session_id}: {session.log_me}' + + return await self._for_each_session(session_ids, toggle_logging) + + async def rpc_daemon_url(self, daemon_url): + """Replace the daemon URL.""" + daemon_url = daemon_url or self.env.daemon_url + try: + self.daemon.set_url(daemon_url) + except Exception as e: + raise RPCError(BAD_REQUEST, f'an error occurred: {e!r}') + return f'now using daemon at {self.daemon.logged_url()}' + + async def rpc_stop(self): + """Shut down the server cleanly.""" + self.shutdown_event.set() + return 'stopping' + + async def rpc_getinfo(self): + """Return summary information about the server process.""" + return self._get_info() + + async def rpc_groups(self): + """Return statistics about the session groups.""" + return self._group_data() + + async def rpc_peers(self): + """Return a list of data about server peers.""" + return self.env.peer_hubs + + async def rpc_query(self, items, limit): + """Return a list of data about server peers.""" + coin = self.env.coin + db = self.db + lines = [] + + def arg_to_hashX(arg): + try: + script = bytes.fromhex(arg) + lines.append(f'Script: {arg}') + return coin.hashX_from_script(script) + except ValueError: + pass + + try: + hashX = coin.address_to_hashX(arg) + except Base58Error as e: + lines.append(e.args[0]) + return None + lines.append(f'Address: {arg}') + return hashX + + for arg in items: + hashX = arg_to_hashX(arg) + if not hashX: + continue + n = None + history = await db.limited_history(hashX, limit=limit) + for n, (tx_hash, height) in enumerate(history): + lines.append(f'History #{n:,d}: height {height:,d} ' + f'tx_hash {hash_to_hex_str(tx_hash)}') + if n is None: + lines.append('No history found') + n = None + utxos = await db.all_utxos(hashX) + for n, utxo in enumerate(utxos, start=1): + lines.append(f'UTXO #{n:,d}: tx_hash ' + f'{hash_to_hex_str(utxo.tx_hash)} ' + f'tx_pos {utxo.tx_pos:,d} height ' + f'{utxo.height:,d} value {utxo.value:,d}') + if n == limit: + break + if n is None: + lines.append('No UTXOs found') + + balance = sum(utxo.value for utxo in utxos) + lines.append(f'Balance: {coin.decimal_value(balance):,f} ' + f'{coin.SHORTNAME}') + + return lines + + # async def rpc_reorg(self, count): + # """Force a reorg of the given number of blocks. + # + # count: number of blocks to reorg + # """ + # count = non_negative_integer(count) + # if not self.bp.force_chain_reorg(count): + # raise RPCError(BAD_REQUEST, 'still catching up with daemon') + # return f'scheduled a reorg of {count:,d} blocks' + + # --- External Interface + + async def serve(self, mempool, server_listening_event): + """Start the RPC server if enabled. When the event is triggered, + start TCP and SSL servers.""" + try: + if self.env.rpc_port is not None: + await self._start_server('RPC', self.env.cs_host(for_rpc=True), + self.env.rpc_port) + self.logger.info(f'max session count: {self.env.max_sessions:,d}') + self.logger.info(f'session timeout: ' + f'{self.env.session_timeout:,d} seconds') + self.logger.info(f'max response size {self.env.max_send:,d} bytes') + if self.env.drop_client is not None: + self.logger.info(f'drop clients matching: {self.env.drop_client.pattern}') + # Start notifications; initialize hsub_results + await mempool.start(self.db.db_height, self) + await self.start_other() + await self._start_external_servers() + server_listening_event.set() + self.on_available_callback() + # Peer discovery should start after the external servers + # because we connect to ourself + await asyncio.wait([ + self._clear_stale_sessions(), + self._manage_servers() + ]) + except Exception as err: + if not isinstance(err, asyncio.CancelledError): + log.exception("hub server died") + raise err + finally: + await self._close_servers(list(self.servers.keys())) + log.info("disconnect %i sessions", len(self.sessions)) + if self.sessions: + await asyncio.wait([ + session.close(force_after=1) for session in self.sessions.values() + ]) + await self.stop_other() + + async def start_other(self): + self.running = True + + async def stop_other(self): + self.running = False + + def session_count(self) -> int: + """The number of connections that we've sent something to.""" + return len(self.sessions) + + async def daemon_request(self, method, *args): + """Catch a DaemonError and convert it to an RPCError.""" + try: + return await getattr(self.daemon, method)(*args) + except DaemonError as e: + raise RPCError(DAEMON_ERROR, f'daemon error: {e!r}') from None + + async def raw_header(self, height): + """Return the binary header at the given height.""" + try: + return await self.db.raw_header(height) + except IndexError: + raise RPCError(BAD_REQUEST, f'height {height:,d} ' + 'out of range') from None + + async def electrum_header(self, height): + """Return the deserialized header at the given height.""" + electrum_header, _ = await self._electrum_and_raw_headers(height) + return electrum_header + + async def broadcast_transaction(self, raw_tx): + hex_hash = await self.daemon.broadcast_transaction(raw_tx) + self.txs_sent += 1 + return hex_hash + + async def limited_history(self, hashX): + """A caching layer.""" + if hashX not in self.history_cache: + # History DoS limit. Each element of history is about 99 + # bytes when encoded as JSON. This limits resource usage + # on bloated history requests, and uses a smaller divisor + # so large requests are logged before refusing them. + limit = self.env.max_send // 97 + self.history_cache[hashX] = await self.db.limited_history(hashX, limit=limit) + return self.history_cache[hashX] + + def _notify_peer(self, peer): + notify_tasks = [ + session.send_notification('blockchain.peers.subscribe', [peer]) + for session in self.sessions.values() if session.subscribe_peers + ] + if notify_tasks: + self.logger.info(f'notify {len(notify_tasks)} sessions of new peers') + asyncio.create_task(asyncio.wait(notify_tasks)) + + def add_session(self, session): + self.sessions[id(session)] = session + self.session_event.set() + gid = int(session.start_time - self.start_time) // 900 + if self.cur_group.gid != gid: + self.cur_group = SessionGroup(gid) + return self.cur_group + + def remove_session(self, session): + """Remove a session from our sessions list if there.""" + session_id = id(session) + for hashX in session.hashX_subs: + sessions = self.hashx_subscriptions_by_session[hashX] + sessions.remove(session_id) + if not sessions: + self.hashx_subscriptions_by_session.pop(hashX) + self.sessions.pop(session_id) + self.session_event.set() + + +class SessionBase(asyncio.Protocol): + """Base class of ElectrumX JSON sessions. + + Each session runs its tasks in asynchronous parallelism with other + sessions. + """ + + MAX_CHUNK_SIZE = 40960 + session_counter = itertools.count() + request_handlers: typing.Dict[str, typing.Callable] = {} + version = '0.5.7' + + RESPONSE_TIMES = Histogram("response_time", "Response times", namespace=NAMESPACE, + labelnames=("method", "version"), buckets=HISTOGRAM_BUCKETS) + NOTIFICATION_COUNT = Counter("notification", "Number of notifications sent (for subscriptions)", + namespace=NAMESPACE, labelnames=("method", "version")) + REQUEST_ERRORS_COUNT = Counter( + "request_error", "Number of requests that returned errors", namespace=NAMESPACE, + labelnames=("method", "version") + ) + RESET_CONNECTIONS = Counter( + "reset_clients", "Number of reset connections by client version", + namespace=NAMESPACE, labelnames=("version",) + ) + max_errors = 10 + + def __init__(self, session_manager: SessionManager, db: 'HubDB', mempool: 'MemPool', kind: str): + connection = JSONRPCConnection(JSONRPCAutoDetect) + self.env = session_manager.env + self.framer = self.default_framer() + self.loop = asyncio.get_event_loop() + self.logger = logging.getLogger(self.__class__.__name__) + self.transport = None + # Set when a connection is made + self._address = None + self._proxy_address = None + # For logger.debug messages + self.verbosity = 0 + # Cleared when the send socket is full + self._can_send = Event() + self._can_send.set() + self._pm_task = None + self._task_group = TaskGroup(self.loop) + # Force-close a connection if a send doesn't succeed in this time + self.max_send_delay = 60 + # Statistics. The RPC object also keeps its own statistics. + self.start_time = time.perf_counter() + self.errors = 0 + self.send_count = 0 + self.send_size = 0 + self.last_send = self.start_time + self.recv_count = 0 + self.recv_size = 0 + self.last_recv = self.start_time + self.last_packet_received = self.start_time + self.connection = connection or self.default_connection() + self.client_version = 'unknown' + + self.logger = logging.getLogger(__name__) + self.session_manager = session_manager + self.db = db + self.mempool = mempool + self.kind = kind # 'RPC', 'TCP' etc. + self.coin = self.env.coin + self.anon_logs = self.env.anon_logs + self.txs_sent = 0 + self.log_me = False + self.daemon_request = self.session_manager.daemon_request + # Hijack the connection so we can log messages + self._receive_message_orig = self.connection.receive_message + self.connection.receive_message = self.receive_message + + async def _limited_wait(self, secs): + try: + await asyncio.wait_for(self._can_send.wait(), secs) + except asyncio.TimeoutError: + self.abort() + raise asyncio.TimeoutError(f'task timed out after {secs}s') + + async def _send_message(self, message): + if not self._can_send.is_set(): + await self._limited_wait(self.max_send_delay) + if not self.is_closing(): + framed_message = self.framer.frame(message) + self.send_size += len(framed_message) + self.send_count += 1 + self.last_send = time.perf_counter() + if self.verbosity >= 4: + self.logger.debug(f'Sending framed message {framed_message}') + self.transport.write(framed_message) + + def _bump_errors(self): + self.errors += 1 + if self.errors >= self.max_errors: + # Don't await self.close() because that is self-cancelling + self._close() + + def _close(self): + if self.transport: + self.transport.close() + + def peer_address(self): + """Returns the peer's address (Python networking address), or None if + no connection or an error. + + This is the result of socket.getpeername() when the connection + was made. + """ + return self._address + + def is_closing(self): + """Return True if the connection is closing.""" + return not self.transport or self.transport.is_closing() + + def abort(self): + """Forcefully close the connection.""" + if self.transport: + self.transport.abort() + + # TODO: replace with synchronous_close + async def close(self, *, force_after=30): + """Close the connection and return when closed.""" + self._close() + if self._pm_task: + with suppress(asyncio.CancelledError): + await asyncio.wait([self._pm_task], timeout=force_after) + self.abort() + await self._pm_task + + def synchronous_close(self): + self._close() + if self._pm_task and not self._pm_task.done(): + self._pm_task.cancel() + + async def _receive_messages(self): + while not self.is_closing(): + try: + message = await self.framer.receive_message() + except MemoryError: + self.logger.warning('received oversized message from %s:%s, dropping connection', + self._address[0], self._address[1]) + self.RESET_CONNECTIONS.labels(version=self.client_version).inc() + self._close() + return + + self.last_recv = time.perf_counter() + self.recv_count += 1 + + try: + requests = self.connection.receive_message(message) + except ProtocolError as e: + self.logger.debug(f'{e}') + if e.error_message: + await self._send_message(e.error_message) + if e.code == JSONRPC.PARSE_ERROR: + self.max_errors = 0 + self._bump_errors() + else: + for request in requests: + await self._task_group.add(self._handle_request(request)) + + async def _handle_request(self, request): + start = time.perf_counter() + try: + result = await self.handle_request(request) + except (ProtocolError, RPCError) as e: + result = e + except asyncio.CancelledError: + raise + except Exception: + reqstr = str(request) + self.logger.exception(f'exception handling {reqstr[:16_000]}') + result = RPCError(JSONRPC.INTERNAL_ERROR, + 'internal server error') + if isinstance(request, Request): + message = request.send_result(result) + self.RESPONSE_TIMES.labels( + method=request.method, + version=self.client_version + ).observe(time.perf_counter() - start) + if message: + await self._send_message(message) + if isinstance(result, Exception): + self._bump_errors() + self.REQUEST_ERRORS_COUNT.labels( + method=request.method, + version=self.client_version + ).inc() + + # External API + def default_connection(self): + """Return a default connection if the user provides none.""" + return JSONRPCConnection(JSONRPCv2) + + async def send_request(self, method, args=()): + """Send an RPC request over the network.""" + if self.is_closing(): + raise asyncio.TimeoutError("Trying to send request on a recently dropped connection.") + message, event = self.connection.send_request(Request(method, args)) + await self._send_message(message) + await event.wait() + result = event.result + if isinstance(result, Exception): + raise result + return result + + async def send_notification(self, method, args=()) -> bool: + """Send an RPC notification over the network.""" + message = self.connection.send_notification(Notification(method, args)) + self.NOTIFICATION_COUNT.labels(method=method, version=self.client_version).inc() + try: + await self._send_message(message) + return True + except asyncio.TimeoutError: + self.logger.info("timeout sending address notification to %s", self.peer_address_str(for_log=True)) + self.abort() + return False + + async def send_notifications(self, notifications) -> bool: + """Send an RPC notification over the network.""" + message, _ = self.connection.send_batch(notifications) + try: + await self._send_message(message) + return True + except asyncio.TimeoutError: + self.logger.info("timeout sending address notification to %s", self.peer_address_str(for_log=True)) + self.abort() + return False + + def send_batch(self, raise_errors=False): + """Return a BatchRequest. Intended to be used like so: + + async with session.send_batch() as batch: + batch.add_request("method1") + batch.add_request("sum", (x, y)) + batch.add_notification("updated") + + for result in batch.results: + ... + + Note that in some circumstances exceptions can be raised; see + BatchRequest doc string. + """ + return BatchRequest(self, raise_errors) + + def data_received(self, framed_message): + """Called by asyncio when a message comes in.""" + self.last_packet_received = time.perf_counter() + if self.verbosity >= 4: + self.logger.debug(f'Received framed message {framed_message}') + self.recv_size += len(framed_message) + self.framer.received_bytes(framed_message) + + def pause_writing(self): + """Transport calls when the send buffer is full.""" + if not self.is_closing(): + self._can_send.clear() + self.transport.pause_reading() + + def resume_writing(self): + """Transport calls when the send buffer has room.""" + if not self._can_send.is_set(): + self._can_send.set() + self.transport.resume_reading() + + def default_framer(self): + return NewlineFramer(self.env.max_receive) + + def peer_address_str(self, *, for_log=True): + """Returns the peer's IP address and port as a human-readable + string, respecting anon logs if the output is for a log.""" + if for_log and self.anon_logs: + return 'xx.xx.xx.xx:xx' + if not self._address: + return 'unknown' + ip_addr_str, port = self._address[:2] + if ':' in ip_addr_str: + return f'[{ip_addr_str}]:{port}' + else: + return f'{ip_addr_str}:{port}' + + def receive_message(self, message): + if self.log_me: + self.logger.info(f'processing {message}') + return self._receive_message_orig(message) + + def toggle_logging(self): + self.log_me = not self.log_me + + def connection_made(self, transport): + """Handle an incoming client connection.""" + self.transport = transport + # This would throw if called on a closed SSL transport. Fixed + # in asyncio in Python 3.6.1 and 3.5.4 + peer_address = transport.get_extra_info('peername') + # If the Socks proxy was used then _address is already set to + # the remote address + if self._address: + self._proxy_address = peer_address + else: + self._address = peer_address + self._pm_task = self.loop.create_task(self._receive_messages()) + + self.session_id = next(self.session_counter) + context = {'conn_id': f'{self.session_id}'} + self.logger = logging.getLogger(__name__) #util.ConnectionLogger(self.logger, context) + self.group = self.session_manager.add_session(self) + self.session_manager.session_count_metric.labels(version=self.client_version).inc() + peer_addr_str = self.peer_address_str() + self.logger.info(f'{self.kind} {peer_addr_str}, ' + f'{self.session_manager.session_count():,d} total') + + def connection_lost(self, exc): + """Handle client disconnection.""" + self.connection.raise_pending_requests(exc) + self._address = None + self.transport = None + self._task_group.cancel() + if self._pm_task: + self._pm_task.cancel() + # Release waiting tasks + self._can_send.set() + + self.session_manager.remove_session(self) + self.session_manager.session_count_metric.labels(version=self.client_version).dec() + msg = '' + if not self._can_send.is_set(): + msg += ' whilst paused' + if self.send_size >= 1024*1024: + msg += ('. Sent {:,d} bytes in {:,d} messages' + .format(self.send_size, self.send_count)) + if msg: + msg = 'disconnected' + msg + self.logger.info(msg) + + def count_pending_items(self): + return len(self.connection.pending_requests()) + + def semaphore(self): + return Semaphores([self.group.semaphore]) + + def sub_count(self): + return 0 + + async def handle_request(self, request): + """Handle an incoming request. ElectrumX doesn't receive + notifications from client sessions. + """ + self.session_manager.request_count_metric.labels(method=request.method, version=self.client_version).inc() + if isinstance(request, Request): + handler = self.request_handlers.get(request.method) + handler = partial(handler, self) + else: + handler = None + coro = handler_invocation(handler, request)() + return await coro + + +class LBRYElectrumX(SessionBase): + """A TCP server that handles incoming Electrum connections.""" + + PROTOCOL_MIN = PROTOCOL_MIN + PROTOCOL_MAX = PROTOCOL_MAX + max_errors = math.inf # don't disconnect people for errors! let them happen... + version = __version__ + cached_server_features = {} + + @classmethod + def initialize_request_handlers(cls): + cls.request_handlers.update({ + 'blockchain.block.get_chunk': cls.block_get_chunk, + 'blockchain.block.get_header': cls.block_get_header, + 'blockchain.estimatefee': cls.estimatefee, + 'blockchain.relayfee': cls.relayfee, + # 'blockchain.scripthash.get_balance': cls.scripthash_get_balance, + 'blockchain.scripthash.get_history': cls.scripthash_get_history, + 'blockchain.scripthash.get_mempool': cls.scripthash_get_mempool, + # 'blockchain.scripthash.listunspent': cls.scripthash_listunspent, + 'blockchain.scripthash.subscribe': cls.scripthash_subscribe, + 'blockchain.transaction.broadcast': cls.transaction_broadcast, + 'blockchain.transaction.get': cls.transaction_get, + 'blockchain.transaction.get_batch': cls.transaction_get_batch, + 'blockchain.transaction.info': cls.transaction_info, + 'blockchain.transaction.get_merkle': cls.transaction_merkle, + # 'server.add_peer': cls.add_peer, + 'server.banner': cls.banner, + 'server.payment_address': cls.payment_address, + 'server.donation_address': cls.donation_address, + 'server.features': cls.server_features_async, + 'server.peers.subscribe': cls.peers_subscribe, + 'server.version': cls.server_version, + 'blockchain.transaction.get_height': cls.transaction_get_height, + 'blockchain.claimtrie.search': cls.claimtrie_search, + 'blockchain.claimtrie.resolve': cls.claimtrie_resolve, + 'blockchain.claimtrie.getclaimbyid': cls.claimtrie_getclaimbyid, + # 'blockchain.claimtrie.getclaimsbyids': cls.claimtrie_getclaimsbyids, + 'blockchain.block.get_server_height': cls.get_server_height, + 'mempool.get_fee_histogram': cls.mempool_compact_histogram, + 'blockchain.block.headers': cls.block_headers, + 'server.ping': cls.ping, + 'blockchain.headers.subscribe': cls.headers_subscribe_False, + # 'blockchain.address.get_balance': cls.address_get_balance, + 'blockchain.address.get_history': cls.address_get_history, + 'blockchain.address.get_mempool': cls.address_get_mempool, + # 'blockchain.address.listunspent': cls.address_listunspent, + 'blockchain.address.subscribe': cls.address_subscribe, + 'blockchain.address.unsubscribe': cls.address_unsubscribe, + }) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if not LBRYElectrumX.request_handlers: + LBRYElectrumX.initialize_request_handlers() + if not LBRYElectrumX.cached_server_features: + LBRYElectrumX.set_server_features(self.env) + self.subscribe_headers = False + self.subscribe_headers_raw = False + self.subscribe_peers = False + self.connection.max_response_size = self.env.max_send + self.hashX_subs = {} + self.sv_seen = False + self.protocol_tuple = self.PROTOCOL_MIN + self.protocol_string = None + self.daemon = self.session_manager.daemon + self.db: HubDB = self.session_manager.db + + @classmethod + def protocol_min_max_strings(cls): + return [version_string(ver) + for ver in (cls.PROTOCOL_MIN, cls.PROTOCOL_MAX)] + + @classmethod + def set_server_features(cls, env): + """Return the server features dictionary.""" + min_str, max_str = cls.protocol_min_max_strings() + cls.cached_server_features.update({ + 'hosts': env.hosts_dict(), + 'pruning': None, + 'server_version': cls.version, + 'protocol_min': min_str, + 'protocol_max': max_str, + 'genesis_hash': env.coin.GENESIS_HASH, + 'description': env.description, + 'payment_address': env.payment_address, + 'donation_address': env.donation_address, + 'daily_fee': env.daily_fee, + 'hash_function': 'sha256', + 'trending_algorithm': 'fast_ar' + }) + + async def server_features_async(self): + return self.cached_server_features + + @classmethod + def server_version_args(cls): + """The arguments to a server.version RPC call to a peer.""" + return [cls.version, cls.protocol_min_max_strings()] + + def protocol_version_string(self): + return version_string(self.protocol_tuple) + + def sub_count(self): + return len(self.hashX_subs) + + async def get_hashX_status(self, hashX: bytes): + mempool_history = self.mempool.transaction_summaries(hashX) + history = ''.join(f'{hash_to_hex_str(tx_hash)}:' + f'{height:d}:' + for tx_hash, height in await self.session_manager.limited_history(hashX)) + history += ''.join(f'{hash_to_hex_str(tx.hash)}:' + f'{-tx.has_unconfirmed_inputs:d}:' + for tx in mempool_history) + if history: + status = sha256(history.encode()).hex() + else: + status = None + return history, status, len(mempool_history) > 0 + + async def send_history_notifications(self, *hashXes: typing.Iterable[bytes]): + notifications = [] + for hashX in hashXes: + alias = self.hashX_subs[hashX] + if len(alias) == 64: + method = 'blockchain.scripthash.subscribe' + else: + method = 'blockchain.address.subscribe' + start = time.perf_counter() + history, status, mempool_status = await self.get_hashX_status(hashX) + if mempool_status: + self.session_manager.mempool_statuses[hashX] = status + else: + self.session_manager.mempool_statuses.pop(hashX, None) + + self.session_manager.address_history_metric.observe(time.perf_counter() - start) + notifications.append((method, (alias, status))) + + start = time.perf_counter() + self.session_manager.notifications_in_flight_metric.inc() + for method, args in notifications: + self.NOTIFICATION_COUNT.labels(method=method, version=self.client_version).inc() + try: + await self.send_notifications( + Batch([Notification(method, (alias, status)) for (method, (alias, status)) in notifications]) + ) + self.session_manager.notifications_sent_metric.observe(time.perf_counter() - start) + finally: + self.session_manager.notifications_in_flight_metric.dec() + + # def get_metrics_or_placeholder_for_api(self, query_name): + # """ Do not hold on to a reference to the metrics + # returned by this method past an `await` or + # you may be working with a stale metrics object. + # """ + # if self.env.track_metrics: + # # return self.session_manager.metrics.for_api(query_name) + # else: + # return APICallMetrics(query_name) + + + # async def run_and_cache_query(self, query_name, kwargs): + # start = time.perf_counter() + # if isinstance(kwargs, dict): + # kwargs['release_time'] = format_release_time(kwargs.get('release_time')) + # try: + # self.session_manager.pending_query_metric.inc() + # return await self.db.search_index.session_query(query_name, kwargs) + # except ConnectionTimeout: + # self.session_manager.interrupt_count_metric.inc() + # raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out') + # finally: + # self.session_manager.pending_query_metric.dec() + # self.session_manager.executor_time_metric.observe(time.perf_counter() - start) + + async def mempool_compact_histogram(self): + return [] #self.mempool.compact_fee_histogram() + + async def claimtrie_search(self, **kwargs): + start = time.perf_counter() + if 'release_time' in kwargs: + release_time = kwargs.pop('release_time') + release_times = release_time if isinstance(release_time, list) else [release_time] + try: + kwargs['release_time'] = [format_release_time(release_time) for release_time in release_times] + except ValueError: + pass + try: + self.session_manager.pending_query_metric.inc() + if 'channel' in kwargs: + channel_url = kwargs.pop('channel') + _, channel_claim, _, _ = await self.db.resolve(channel_url) + if not channel_claim or isinstance(channel_claim, (ResolveCensoredError, LookupError, ValueError)): + return Outputs.to_base64([], [], 0, None, None) + kwargs['channel_id'] = channel_claim.claim_hash.hex() + return await self.session_manager.search_index.cached_search(kwargs) + except ConnectionTimeout: + self.session_manager.interrupt_count_metric.inc() + raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out') + except TooManyClaimSearchParametersError as err: + await asyncio.sleep(2) + self.logger.warning("Got an invalid query from %s, for %s with more than %d elements.", + self.peer_address()[0], err.key, err.limit) + return RPCError(1, str(err)) + finally: + self.session_manager.pending_query_metric.dec() + self.session_manager.executor_time_metric.observe(time.perf_counter() - start) + + async def _cached_resolve_url(self, url): + if url not in self.session_manager.resolve_cache: + self.session_manager.resolve_cache[url] = await self.loop.run_in_executor(self.db._executor, self.db._resolve, url) + return self.session_manager.resolve_cache[url] + + async def claimtrie_resolve(self, *urls) -> str: + sorted_urls = tuple(sorted(urls)) + self.session_manager.urls_to_resolve_count_metric.inc(len(sorted_urls)) + try: + if sorted_urls in self.session_manager.resolve_outputs_cache: + return self.session_manager.resolve_outputs_cache[sorted_urls] + rows, extra = [], [] + for url in urls: + if url not in self.session_manager.resolve_cache: + self.session_manager.resolve_cache[url] = await self._cached_resolve_url(url) + stream, channel, repost, reposted_channel = self.session_manager.resolve_cache[url] + if isinstance(channel, ResolveCensoredError): + rows.append(channel) + extra.append(channel.censor_row) + elif isinstance(stream, ResolveCensoredError): + rows.append(stream) + extra.append(stream.censor_row) + elif channel and not stream: + rows.append(channel) + # print("resolved channel", channel.name.decode()) + if repost: + extra.append(repost) + if reposted_channel: + extra.append(reposted_channel) + elif stream: + # print("resolved stream", stream.name.decode()) + rows.append(stream) + if channel: + # print("and channel", channel.name.decode()) + extra.append(channel) + if repost: + extra.append(repost) + if reposted_channel: + extra.append(reposted_channel) + await asyncio.sleep(0) + self.session_manager.resolve_outputs_cache[sorted_urls] = result = await self.loop.run_in_executor( + None, Outputs.to_base64, rows, extra, 0, None, None + ) + return result + finally: + self.session_manager.resolved_url_count_metric.inc(len(sorted_urls)) + + async def get_server_height(self): + return self.db.db_height + + async def transaction_get_height(self, tx_hash): + self.assert_tx_hash(tx_hash) + + def get_height(): + v = self.db.prefix_db.tx_num.get(tx_hash) + if v: + return bisect_right(self.db.tx_counts, v.tx_num) + return self.mempool.get_mempool_height(tx_hash) + + return await asyncio.get_event_loop().run_in_executor(self.db._executor, get_height) + + async def claimtrie_getclaimbyid(self, claim_id): + rows = [] + extra = [] + stream = await self.db.fs_getclaimbyid(claim_id) + if not stream: + stream = LookupError(f"Could not find claim at {claim_id}") + rows.append(stream) + return Outputs.to_base64(rows, extra, 0, None, None) + + def assert_tx_hash(self, value): + '''Raise an RPCError if the value is not a valid transaction + hash.''' + try: + if len(bytes.fromhex(value)) == 32: + return + except Exception: + pass + raise RPCError(1, f'{value} should be a transaction hash') + + async def subscribe_headers_result(self): + """The result of a header subscription or notification.""" + return self.session_manager.hsub_results[self.subscribe_headers_raw] + + async def _headers_subscribe(self, raw): + """Subscribe to get headers of new blocks.""" + self.subscribe_headers_raw = assert_boolean(raw) + self.subscribe_headers = True + return await self.subscribe_headers_result() + + async def headers_subscribe(self): + """Subscribe to get raw headers of new blocks.""" + return await self._headers_subscribe(True) + + async def headers_subscribe_True(self, raw=True): + """Subscribe to get headers of new blocks.""" + return await self._headers_subscribe(raw) + + async def headers_subscribe_False(self, raw=False): + """Subscribe to get headers of new blocks.""" + return await self._headers_subscribe(raw) + + async def add_peer(self, features): + """Add a peer (but only if the peer resolves to the source).""" + return await self.peer_mgr.on_add_peer(features, self.peer_address()) + + async def peers_subscribe(self): + """Return the server peers as a list of (ip, host, details) tuples.""" + self.subscribe_peers = True + return self.env.peer_hubs + + async def address_status(self, hashX): + """Returns an address status. + + Status is a hex string, but must be None if there is no history. + """ + # Note history is ordered and mempool unordered in electrum-server + # For mempool, height is -1 if it has unconfirmed inputs, otherwise 0 + _, status, has_mempool_history = await self.get_hashX_status(hashX) + if has_mempool_history: + self.session_manager.mempool_statuses[hashX] = status + else: + self.session_manager.mempool_statuses.pop(hashX, None) + return status + + # async def hashX_listunspent(self, hashX): + # """Return the list of UTXOs of a script hash, including mempool + # effects.""" + # utxos = await self.db.all_utxos(hashX) + # utxos = sorted(utxos) + # utxos.extend(await self.mempool.unordered_UTXOs(hashX)) + # spends = await self.mempool.potential_spends(hashX) + # + # return [{'tx_hash': hash_to_hex_str(utxo.tx_hash), + # 'tx_pos': utxo.tx_pos, + # 'height': utxo.height, 'value': utxo.value} + # for utxo in utxos + # if (utxo.tx_hash, utxo.tx_pos) not in spends] + + async def hashX_subscribe(self, hashX, alias): + self.hashX_subs[hashX] = alias + self.session_manager.hashx_subscriptions_by_session[hashX].add(id(self)) + return await self.address_status(hashX) + + async def hashX_unsubscribe(self, hashX, alias): + sessions = self.session_manager.hashx_subscriptions_by_session[hashX] + sessions.remove(id(self)) + if not sessions: + self.hashX_subs.pop(hashX, None) + + def address_to_hashX(self, address): + try: + return self.coin.address_to_hashX(address) + except Exception: + pass + raise RPCError(BAD_REQUEST, f'{address} is not a valid address') + + # async def address_get_balance(self, address): + # """Return the confirmed and unconfirmed balance of an address.""" + # hashX = self.address_to_hashX(address) + # return await self.get_balance(hashX) + + async def address_get_history(self, address): + """Return the confirmed and unconfirmed history of an address.""" + hashX = self.address_to_hashX(address) + return await self.confirmed_and_unconfirmed_history(hashX) + + async def address_get_mempool(self, address): + """Return the mempool transactions touching an address.""" + hashX = self.address_to_hashX(address) + return self.unconfirmed_history(hashX) + + # async def address_listunspent(self, address): + # """Return the list of UTXOs of an address.""" + # hashX = self.address_to_hashX(address) + # return await self.hashX_listunspent(hashX) + + async def address_subscribe(self, *addresses): + """Subscribe to an address. + + address: the address to subscribe to""" + if len(addresses) > 1000: + raise RPCError(BAD_REQUEST, f'too many addresses in subscription request: {len(addresses)}') + results = [] + for address in addresses: + results.append(await self.hashX_subscribe(self.address_to_hashX(address), address)) + await asyncio.sleep(0) + return results + + async def address_unsubscribe(self, address): + """Unsubscribe an address. + + address: the address to unsubscribe""" + hashX = self.address_to_hashX(address) + return await self.hashX_unsubscribe(hashX, address) + + # async def get_balance(self, hashX): + # utxos = await self.db.all_utxos(hashX) + # confirmed = sum(utxo.value for utxo in utxos) + # unconfirmed = await self.mempool.balance_delta(hashX) + # return {'confirmed': confirmed, 'unconfirmed': unconfirmed} + + # async def scripthash_get_balance(self, scripthash): + # """Return the confirmed and unconfirmed balance of a scripthash.""" + # hashX = scripthash_to_hashX(scripthash) + # return await self.get_balance(hashX) + + def unconfirmed_history(self, hashX): + # Note unconfirmed history is unordered in electrum-server + # height is -1 if it has unconfirmed inputs, otherwise 0 + return [{'tx_hash': hash_to_hex_str(tx.hash), + 'height': -tx.has_unconfirmed_inputs, + 'fee': tx.fee} + for tx in self.mempool.transaction_summaries(hashX)] + + async def confirmed_and_unconfirmed_history(self, hashX): + # Note history is ordered but unconfirmed is unordered in e-s + history = await self.session_manager.limited_history(hashX) + conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height} + for tx_hash, height in history] + return conf + self.unconfirmed_history(hashX) + + async def scripthash_get_history(self, scripthash): + """Return the confirmed and unconfirmed history of a scripthash.""" + hashX = scripthash_to_hashX(scripthash) + return await self.confirmed_and_unconfirmed_history(hashX) + + async def scripthash_get_mempool(self, scripthash): + """Return the mempool transactions touching a scripthash.""" + hashX = scripthash_to_hashX(scripthash) + return self.unconfirmed_history(hashX) + + # async def scripthash_listunspent(self, scripthash): + # """Return the list of UTXOs of a scripthash.""" + # hashX = scripthash_to_hashX(scripthash) + # return await self.hashX_listunspent(hashX) + + async def scripthash_subscribe(self, scripthash): + """Subscribe to a script hash. + + scripthash: the SHA256 hash of the script to subscribe to""" + hashX = scripthash_to_hashX(scripthash) + return await self.hashX_subscribe(hashX, scripthash) + + async def _merkle_proof(self, cp_height, height): + max_height = self.db.db_height + if not height <= cp_height <= max_height: + raise RPCError(BAD_REQUEST, + f'require header height {height:,d} <= ' + f'cp_height {cp_height:,d} <= ' + f'chain height {max_height:,d}') + branch, root = await self.db.header_branch_and_root(cp_height + 1, height) + return { + 'branch': [hash_to_hex_str(elt) for elt in branch], + 'root': hash_to_hex_str(root), + } + + async def block_headers(self, start_height, count, cp_height=0, b64=False): + """Return count concatenated block headers as hex for the main chain; + starting at start_height. + + start_height and count must be non-negative integers. At most + MAX_CHUNK_SIZE headers will be returned. + """ + start_height = non_negative_integer(start_height) + count = non_negative_integer(count) + cp_height = non_negative_integer(cp_height) + + max_size = self.MAX_CHUNK_SIZE + count = min(count, max_size) + headers, count = await self.db.read_headers(start_height, count) + + if b64: + headers = self.db.encode_headers(start_height, count, headers) + else: + headers = headers.hex() + result = { + 'base64' if b64 else 'hex': headers, + 'count': count, + 'max': max_size + } + if count and cp_height: + last_height = start_height + count - 1 + result.update(await self._merkle_proof(cp_height, last_height)) + return result + + async def block_get_chunk(self, index): + """Return a chunk of block headers as a hexadecimal string. + + index: the chunk index""" + index = non_negative_integer(index) + size = self.coin.CHUNK_SIZE + start_height = index * size + headers, _ = await self.db.read_headers(start_height, size) + return headers.hex() + + async def block_get_header(self, height): + """The deserialized header at a given height. + + height: the header's height""" + height = non_negative_integer(height) + return await self.session_manager.electrum_header(height) + + def is_tor(self): + """Try to detect if the connection is to a tor hidden service we are + running.""" + peername = self.peer_mgr.proxy_peername() + if not peername: + return False + peer_address = self.peer_address() + return peer_address and peer_address[0] == peername[0] + + async def replaced_banner(self, banner): + network_info = await self.daemon_request('getnetworkinfo') + ni_version = network_info['version'] + major, minor = divmod(ni_version, 1000000) + minor, revision = divmod(minor, 10000) + revision //= 100 + daemon_version = f'{major:d}.{minor:d}.{revision:d}' + for pair in [ + ('$SERVER_VERSION', self.version), + ('$DAEMON_VERSION', daemon_version), + ('$DAEMON_SUBVERSION', network_info['subversion']), + ('$PAYMENT_ADDRESS', self.env.payment_address), + ('$DONATION_ADDRESS', self.env.donation_address), + ]: + banner = banner.replace(*pair) + return banner + + async def payment_address(self): + """Return the payment address as a string, empty if there is none.""" + return self.env.payment_address + + async def donation_address(self): + """Return the donation address as a string, empty if there is none.""" + return self.env.donation_address + + async def banner(self): + """Return the server banner text.""" + banner = f'You are connected to an {self.version} server.' + banner_file = self.env.banner_file + if banner_file: + try: + with codecs.open(banner_file, 'r', 'utf-8') as f: + banner = f.read() + except Exception as e: + self.logger.error(f'reading banner file {banner_file}: {e!r}') + else: + banner = await self.replaced_banner(banner) + + return banner + + async def relayfee(self): + """The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.""" + return await self.daemon_request('relayfee') + + async def estimatefee(self, number): + """The estimated transaction fee per kilobyte to be paid for a + transaction to be included within a certain number of blocks. + + number: the number of blocks + """ + number = non_negative_integer(number) + return await self.daemon_request('estimatefee', number) + + async def ping(self): + """Serves as a connection keep-alive mechanism and for the client to + confirm the server is still responding. + """ + return None + + async def server_version(self, client_name='', client_version=None): + """Returns the server version as a string. + + client_name: a string identifying the client + client_version: the protocol version spoken by the client + """ + if self.protocol_string is not None: + return self.version, self.protocol_string + if self.sv_seen and self.protocol_tuple >= (1, 4): + raise RPCError(BAD_REQUEST, f'server.version already sent') + self.sv_seen = True + + if client_name: + client_name = str(client_name) + if self.env.drop_client is not None and \ + self.env.drop_client.match(client_name): + self.close_after_send = True + raise RPCError(BAD_REQUEST, f'unsupported client: {client_name}') + if self.client_version != client_name[:17]: + self.session_manager.session_count_metric.labels(version=self.client_version).dec() + self.client_version = client_name[:17] + self.session_manager.session_count_metric.labels(version=self.client_version).inc() + self.session_manager.client_version_metric.labels(version=self.client_version).inc() + + # Find the highest common protocol version. Disconnect if + # that protocol version in unsupported. + ptuple, client_min = protocol_version(client_version, self.PROTOCOL_MIN, self.PROTOCOL_MAX) + if ptuple is None: + ptuple, client_min = protocol_version(client_version, (1, 1, 0), (1, 4, 0)) + if ptuple is None: + self.close_after_send = True + raise RPCError(BAD_REQUEST, f'unsupported protocol version: {client_version}') + + self.protocol_tuple = ptuple + self.protocol_string = version_string(ptuple) + return self.version, self.protocol_string + + async def transaction_broadcast(self, raw_tx): + """Broadcast a raw transaction to the network. + + raw_tx: the raw transaction as a hexadecimal string""" + # This returns errors as JSON RPC errors, as is natural + try: + hex_hash = await self.session_manager.broadcast_transaction(raw_tx) + self.txs_sent += 1 + # self.mempool.wakeup.set() + # await asyncio.sleep(0.5) + self.logger.info(f'sent tx: {hex_hash}') + return hex_hash + except DaemonError as e: + error, = e.args + message = error['message'] + self.logger.info(f'error sending transaction: {message}') + raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' + f'network rules.\n\n{message}\n[{raw_tx}]') + + async def transaction_info(self, tx_hash: str): + return (await self.transaction_get_batch(tx_hash))[tx_hash] + + async def transaction_get_batch(self, *tx_hashes): + self.session_manager.tx_request_count_metric.inc(len(tx_hashes)) + if len(tx_hashes) > 100: + raise RPCError(BAD_REQUEST, f'too many tx hashes in request: {len(tx_hashes)}') + for tx_hash in tx_hashes: + assert_tx_hash(tx_hash) + batch_result = await self.db.get_transactions_and_merkles(tx_hashes) + needed_merkles = {} + + for tx_hash in tx_hashes: + if tx_hash in batch_result and batch_result[tx_hash][0]: + continue + tx_hash_bytes = bytes.fromhex(tx_hash)[::-1] + mempool_tx = self.mempool.txs.get(tx_hash_bytes, None) + if mempool_tx: + raw_tx, block_hash = mempool_tx.raw_tx.hex(), None + else: + tx_info = await self.daemon_request('getrawtransaction', tx_hash, 1) + raw_tx = tx_info['hex'] + block_hash = tx_info.get('blockhash') + if block_hash: + block = await self.daemon.deserialised_block(block_hash) + height = block['height'] + try: + pos = block['tx'].index(tx_hash) + except ValueError: + raise RPCError(BAD_REQUEST, f'tx hash {tx_hash} not in ' + f'block {block_hash} at height {height:,d}') + needed_merkles[tx_hash] = raw_tx, block['tx'], pos, height + else: + batch_result[tx_hash] = [raw_tx, {'block_height': -1}] + + if needed_merkles: + for tx_hash, (raw_tx, block_txs, pos, block_height) in needed_merkles.items(): + batch_result[tx_hash] = raw_tx, { + 'merkle': self._get_merkle_branch(block_txs, pos), + 'pos': pos, + 'block_height': block_height + } + await asyncio.sleep(0) # heavy call, give other tasks a chance + self.session_manager.tx_replied_count_metric.inc(len(tx_hashes)) + return batch_result + + async def transaction_get(self, tx_hash, verbose=False): + """Return the serialized raw transaction given its hash + + tx_hash: the transaction hash as a hexadecimal string + verbose: passed on to the daemon + """ + assert_tx_hash(tx_hash) + if verbose not in (True, False): + raise RPCError(BAD_REQUEST, f'"verbose" must be a boolean') + + return await self.daemon_request('getrawtransaction', tx_hash, int(verbose)) + + def _get_merkle_branch(self, tx_hashes, tx_pos): + """Return a merkle branch to a transaction. + + tx_hashes: ordered list of hex strings of tx hashes in a block + tx_pos: index of transaction in tx_hashes to create branch for + """ + hashes = [hex_str_to_hash(hash) for hash in tx_hashes] + branch, root = self.db.merkle.branch_and_root(hashes, tx_pos) + branch = [hash_to_hex_str(hash) for hash in branch] + return branch + + async def transaction_merkle(self, tx_hash, height): + """Return the markle branch to a confirmed transaction given its hash + and height. + + tx_hash: the transaction hash as a hexadecimal string + height: the height of the block it is in + """ + assert_tx_hash(tx_hash) + result = await self.transaction_get_batch(tx_hash) + if tx_hash not in result or result[tx_hash][1]['block_height'] <= 0: + raise RPCError(BAD_REQUEST, f'tx hash {tx_hash} not in ' + f'block at height {height:,d}') + return result[tx_hash][1] + + +class LocalRPC(SessionBase): + """A local TCP RPC server session.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.client = 'RPC' + self.connection._max_response_size = 0 + + def protocol_version_string(self): + return 'RPC' + + +def get_from_possible_keys(dictionary, *keys): + for key in keys: + if key in dictionary: + return dictionary[key] + + +def format_release_time(release_time): + # round release time to 1000 so it caches better + # also set a default so we dont show claims in the future + def roundup_time(number, factor=360): + return int(1 + int(number / factor)) * factor + if isinstance(release_time, str) and len(release_time) > 0: + time_digits = ''.join(filter(str.isdigit, release_time)) + time_prefix = release_time[:-len(time_digits)] + return time_prefix + str(roundup_time(int(time_digits))) + elif isinstance(release_time, int): + return roundup_time(release_time) diff --git a/scribe/hub/udp.py b/scribe/hub/udp.py new file mode 100644 index 0000000..94cf337 --- /dev/null +++ b/scribe/hub/udp.py @@ -0,0 +1,240 @@ +import asyncio +import struct +from time import perf_counter +import logging +from typing import Optional, Tuple, NamedTuple +from scribe.schema.attrs import country_str_to_int, country_int_to_str +from scribe.common import LRUCache, is_valid_public_ipv4 +# from prometheus_client import Counter + + +log = logging.getLogger(__name__) +_MAGIC = 1446058291 # genesis blocktime (which is actually wrong) +# ping_count_metric = Counter("ping_count", "Number of pings received", namespace='wallet_server_status') +_PAD_BYTES = b'\x00' * 64 + + +PROTOCOL_VERSION = 1 + + +class SPVPing(NamedTuple): + magic: int + protocol_version: int + pad_bytes: bytes + + def encode(self): + return struct.pack(b'!lB64s', *self) + + @staticmethod + def make() -> bytes: + return SPVPing(_MAGIC, PROTOCOL_VERSION, _PAD_BYTES).encode() + + @classmethod + def decode(cls, packet: bytes): + decoded = cls(*struct.unpack(b'!lB64s', packet[:69])) + if decoded.magic != _MAGIC: + raise ValueError("invalid magic bytes") + return decoded + + +PONG_ENCODING = b'!BBL32s4sH' + + +class SPVPong(NamedTuple): + protocol_version: int + flags: int + height: int + tip: bytes + source_address_raw: bytes + country: int + + def encode(self): + return struct.pack(PONG_ENCODING, *self) + + @staticmethod + def encode_address(address: str): + return bytes(int(b) for b in address.split(".")) + + @classmethod + def make(cls, flags: int, height: int, tip: bytes, source_address: str, country: str) -> bytes: + return SPVPong( + PROTOCOL_VERSION, flags, height, tip, + cls.encode_address(source_address), + country_str_to_int(country) + ).encode() + + @classmethod + def make_sans_source_address(cls, flags: int, height: int, tip: bytes, country: str) -> Tuple[bytes, bytes]: + pong = cls.make(flags, height, tip, '0.0.0.0', country) + return pong[:38], pong[42:] + + @classmethod + def decode(cls, packet: bytes): + return cls(*struct.unpack(PONG_ENCODING, packet[:44])) + + @property + def available(self) -> bool: + return (self.flags & 0b00000001) > 0 + + @property + def ip_address(self) -> str: + return ".".join(map(str, self.source_address_raw)) + + @property + def country_name(self): + return country_int_to_str(self.country) + + def __repr__(self) -> str: + return f"SPVPong(external_ip={self.ip_address}, version={self.protocol_version}, " \ + f"available={'True' if self.flags & 1 > 0 else 'False'}," \ + f" height={self.height}, tip={self.tip[::-1].hex()}, country={self.country_name})" + + +class SPVServerStatusProtocol(asyncio.DatagramProtocol): + + def __init__( + self, height: int, tip: bytes, country: str, + throttle_cache_size: int = 1024, throttle_reqs_per_sec: int = 10, + allow_localhost: bool = False, allow_lan: bool = False + ): + super().__init__() + self.transport: Optional[asyncio.transports.DatagramTransport] = None + self._height = height + self._tip = tip + self._flags = 0 + self._country = country + self._left_cache = self._right_cache = None + self.update_cached_response() + self._throttle = LRUCache(throttle_cache_size) + self._should_log = LRUCache(throttle_cache_size) + self._min_delay = 1 / throttle_reqs_per_sec + self._allow_localhost = allow_localhost + self._allow_lan = allow_lan + self.closed = asyncio.Event() + + def update_cached_response(self): + self._left_cache, self._right_cache = SPVPong.make_sans_source_address( + self._flags, max(0, self._height), self._tip, self._country + ) + + def set_unavailable(self): + self._flags &= 0b11111110 + self.update_cached_response() + + def set_available(self): + self._flags |= 0b00000001 + self.update_cached_response() + + def set_height(self, height: int, tip: bytes): + self._height, self._tip = height, tip + self.update_cached_response() + + def should_throttle(self, host: str): + now = perf_counter() + last_requested = self._throttle.get(host, default=0) + self._throttle[host] = now + if now - last_requested < self._min_delay: + log_cnt = self._should_log.get(host, default=0) + 1 + if log_cnt % 100 == 0: + log.warning("throttle spv status to %s", host) + self._should_log[host] = log_cnt + return True + return False + + def make_pong(self, host): + return self._left_cache + SPVPong.encode_address(host) + self._right_cache + + def datagram_received(self, data: bytes, addr: Tuple[str, int]): + if self.should_throttle(addr[0]): + return + try: + SPVPing.decode(data) + except (ValueError, struct.error, AttributeError, TypeError): + # log.exception("derp") + return + if addr[1] >= 1024 and is_valid_public_ipv4( + addr[0], allow_localhost=self._allow_localhost, allow_lan=self._allow_lan): + self.transport.sendto(self.make_pong(addr[0]), addr) + else: + log.warning("odd packet from %s:%i", addr[0], addr[1]) + # ping_count_metric.inc() + + def connection_made(self, transport) -> None: + self.transport = transport + self.closed.clear() + + def connection_lost(self, exc: Optional[Exception]) -> None: + self.transport = None + self.closed.set() + + async def close(self): + if self.transport: + self.transport.close() + await self.closed.wait() + + +class StatusServer: + def __init__(self): + self._protocol: Optional[SPVServerStatusProtocol] = None + + async def start(self, height: int, tip: bytes, country: str, interface: str, port: int, allow_lan: bool = False): + if self.is_running: + return + loop = asyncio.get_event_loop() + interface = interface if interface.lower() != 'localhost' else '127.0.0.1' + self._protocol = SPVServerStatusProtocol( + height, tip, country, allow_localhost=interface == '127.0.0.1', allow_lan=allow_lan + ) + await loop.create_datagram_endpoint(lambda: self._protocol, (interface, port)) + log.info("started udp status server on %s:%i", interface, port) + + async def stop(self): + if self.is_running: + await self._protocol.close() + self._protocol = None + + @property + def is_running(self): + return self._protocol is not None + + def set_unavailable(self): + if self.is_running: + self._protocol.set_unavailable() + + def set_available(self): + if self.is_running: + self._protocol.set_available() + + def set_height(self, height: int, tip: bytes): + if self.is_running: + self._protocol.set_height(height, tip) + + +class SPVStatusClientProtocol(asyncio.DatagramProtocol): + + def __init__(self, responses: asyncio.Queue): + super().__init__() + self.transport: Optional[asyncio.transports.DatagramTransport] = None + self.responses = responses + self._ping_packet = SPVPing.make() + + def datagram_received(self, data: bytes, addr: Tuple[str, int]): + try: + self.responses.put_nowait(((addr, perf_counter()), SPVPong.decode(data))) + except (ValueError, struct.error, AttributeError, TypeError, RuntimeError): + return + + def connection_made(self, transport) -> None: + self.transport = transport + + def connection_lost(self, exc: Optional[Exception]) -> None: + self.transport = None + log.info("closed udp spv server selection client") + + def ping(self, server: Tuple[str, int]): + self.transport.sendto(self._ping_packet, server) + + def close(self): + # log.info("close udp client") + if self.transport: + self.transport.close() diff --git a/scribe/readers/__init__.py b/scribe/readers/__init__.py new file mode 100644 index 0000000..0c47c7d --- /dev/null +++ b/scribe/readers/__init__.py @@ -0,0 +1,3 @@ +from scribe.readers.interface import BaseBlockchainReader +from scribe.readers.hub_server import BlockchainReaderServer +from scribe.readers.elastic_sync import ElasticWriter diff --git a/scribe/readers/elastic_sync.py b/scribe/readers/elastic_sync.py new file mode 100644 index 0000000..17afcec --- /dev/null +++ b/scribe/readers/elastic_sync.py @@ -0,0 +1,421 @@ +import os +import signal +import json +import typing +import struct +from collections import defaultdict +import asyncio +import logging +from decimal import Decimal +from elasticsearch import AsyncElasticsearch, NotFoundError +from elasticsearch.helpers import async_streaming_bulk +from prometheus_client import Gauge, Histogram + +from scribe.schema.result import Censor +from scribe import PROMETHEUS_NAMESPACE +from scribe.elasticsearch.notifier_protocol import ElasticNotifierProtocol +from scribe.elasticsearch.search import IndexVersionMismatch, expand_query +from scribe.elasticsearch.constants import ALL_FIELDS, INDEX_DEFAULT_SETTINGS +from scribe.elasticsearch.fast_ar_trending import FAST_AR_TRENDING_SCRIPT +from scribe.readers import BaseBlockchainReader +from scribe.db.revertable import RevertableOp +from scribe.db.common import TrendingNotification, DB_PREFIXES + + +log = logging.getLogger(__name__) + +NAMESPACE = f"{PROMETHEUS_NAMESPACE}_elastic_sync" +HISTOGRAM_BUCKETS = ( + .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf') +) + + +class ElasticWriter(BaseBlockchainReader): + VERSION = 1 + prometheus_namespace = "" + block_count_metric = Gauge( + "block_count", "Number of processed blocks", namespace=NAMESPACE + ) + block_update_time_metric = Histogram( + "block_time", "Block update times", namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS + ) + reorg_count_metric = Gauge( + "reorg_count", "Number of reorgs", namespace=NAMESPACE + ) + + def __init__(self, env): + super().__init__(env, 'lbry-elastic-writer', thread_workers=1, thread_prefix='lbry-elastic-writer') + # self._refresh_interval = 0.1 + self._task = None + self.index = self.env.es_index_prefix + 'claims' + self._elastic_host = env.elastic_host + self._elastic_port = env.elastic_port + self.sync_timeout = 1800 + self.sync_client = None + self._es_info_path = os.path.join(env.db_dir, 'es_info') + self._last_wrote_height = 0 + self._last_wrote_block_hash = None + + self._touched_claims = set() + self._deleted_claims = set() + + self._removed_during_undo = set() + + self._trending = defaultdict(list) + self._advanced = True + self.synchronized = asyncio.Event() + self._listeners: typing.List[ElasticNotifierProtocol] = [] + + async def run_es_notifier(self, synchronized: asyncio.Event): + server = await asyncio.get_event_loop().create_server( + lambda: ElasticNotifierProtocol(self._listeners), '127.0.0.1', self.env.elastic_notifier_port + ) + self.log.info("ES notifier server listening on TCP localhost:%i", self.env.elastic_notifier_port) + synchronized.set() + async with server: + await server.serve_forever() + + def notify_es_notification_listeners(self, height: int, block_hash: bytes): + for p in self._listeners: + p.send_height(height, block_hash) + self.log.info("notify listener %i", height) + + def _read_es_height(self): + info = {} + if os.path.exists(self._es_info_path): + with open(self._es_info_path, 'r') as f: + info.update(json.loads(f.read())) + self._last_wrote_height = int(info.get('height', 0)) + self._last_wrote_block_hash = info.get('block_hash', None) + + async def read_es_height(self): + await asyncio.get_event_loop().run_in_executor(self._executor, self._read_es_height) + + def write_es_height(self, height: int, block_hash: str): + with open(self._es_info_path, 'w') as f: + f.write(json.dumps({'height': height, 'block_hash': block_hash}, indent=2)) + self._last_wrote_height = height + self._last_wrote_block_hash = block_hash + + async def get_index_version(self) -> int: + try: + template = await self.sync_client.indices.get_template(self.index) + return template[self.index]['version'] + except NotFoundError: + return 0 + + async def set_index_version(self, version): + await self.sync_client.indices.put_template( + self.index, body={'version': version, 'index_patterns': ['ignored']}, ignore=400 + ) + + async def start_index(self) -> bool: + if self.sync_client: + return False + hosts = [{'host': self._elastic_host, 'port': self._elastic_port}] + self.sync_client = AsyncElasticsearch(hosts, timeout=self.sync_timeout) + while True: + try: + await self.sync_client.cluster.health(wait_for_status='yellow') + self.log.info("ES is ready to connect to") + break + except ConnectionError: + self.log.warning("Failed to connect to Elasticsearch. Waiting for it!") + await asyncio.sleep(1) + + index_version = await self.get_index_version() + + res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400) + acked = res.get('acknowledged', False) + + if acked: + await self.set_index_version(self.VERSION) + return True + elif index_version != self.VERSION: + self.log.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION) + raise IndexVersionMismatch(index_version, self.VERSION) + else: + await self.sync_client.indices.refresh(self.index) + return False + + async def stop_index(self): + if self.sync_client: + await self.sync_client.close() + self.sync_client = None + + async def delete_index(self): + if self.sync_client: + return await self.sync_client.indices.delete(self.index, ignore_unavailable=True) + + def update_filter_query(self, censor_type, blockdict, channels=False): + blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()} + if channels: + update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}") + else: + update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}") + key = 'channel_id' if channels else 'claim_id' + update['script'] = { + "source": f"ctx._source.censor_type={censor_type}; " + f"ctx._source.censoring_channel_id=params[ctx._source.{key}];", + "lang": "painless", + "params": blockdict + } + return update + + async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels): + if filtered_streams: + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4) + await self.sync_client.indices.refresh(self.index) + if filtered_channels: + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4) + await self.sync_client.indices.refresh(self.index) + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4) + await self.sync_client.indices.refresh(self.index) + if blocked_streams: + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4) + await self.sync_client.indices.refresh(self.index) + if blocked_channels: + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4) + await self.sync_client.indices.refresh(self.index) + await self.sync_client.update_by_query( + self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4) + await self.sync_client.indices.refresh(self.index) + + @staticmethod + def _upsert_claim_query(index, claim): + return { + 'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS}, + '_id': claim['claim_id'], + '_index': index, + '_op_type': 'update', + 'doc_as_upsert': True + } + + @staticmethod + def _delete_claim_query(index, claim_hash: bytes): + return { + '_index': index, + '_op_type': 'delete', + '_id': claim_hash.hex() + } + + @staticmethod + def _update_trending_query(index, claim_hash, notifications): + return { + '_id': claim_hash.hex(), + '_index': index, + '_op_type': 'update', + 'script': { + 'lang': 'painless', + 'source': FAST_AR_TRENDING_SCRIPT, + 'params': {'src': { + 'changes': [ + { + 'height': notification.height, + 'prev_amount': notification.prev_amount / 1E8, + 'new_amount': notification.new_amount / 1E8, + } for notification in notifications + ] + }} + }, + } + + async def _claim_producer(self): + for deleted in self._deleted_claims: + yield self._delete_claim_query(self.index, deleted) + for touched in self._touched_claims: + claim = self.db.claim_producer(touched) + if claim: + yield self._upsert_claim_query(self.index, claim) + for claim_hash, notifications in self._trending.items(): + yield self._update_trending_query(self.index, claim_hash, notifications) + + def advance(self, height: int): + super().advance(height) + + touched_or_deleted = self.db.prefix_db.touched_or_deleted.get(height) + for k, v in self.db.prefix_db.trending_notification.iterate((height,)): + self._trending[k.claim_hash].append(TrendingNotification(k.height, v.previous_amount, v.new_amount)) + if touched_or_deleted: + readded_after_reorg = self._removed_during_undo.intersection(touched_or_deleted.touched_claims) + self._deleted_claims.difference_update(readded_after_reorg) + self._touched_claims.update(touched_or_deleted.touched_claims) + self._deleted_claims.update(touched_or_deleted.deleted_claims) + self._touched_claims.difference_update(self._deleted_claims) + for to_del in touched_or_deleted.deleted_claims: + if to_del in self._trending: + self._trending.pop(to_del) + self._advanced = True + + def unwind(self): + self.db.tx_counts.pop() + reverted_block_hash = self.db.coin.header_hash(self.db.headers.pop()) + packed = self.db.prefix_db.undo.get(len(self.db.tx_counts), reverted_block_hash) + touched_or_deleted = None + claims_to_delete = [] + # find and apply the touched_or_deleted items in the undos for the reverted blocks + assert packed, f'missing undo information for block {len(self.db.tx_counts)}' + while packed: + op, packed = RevertableOp.unpack(packed) + if op.is_delete and op.key.startswith(DB_PREFIXES.touched_or_deleted.value): + assert touched_or_deleted is None, 'only should have one match' + touched_or_deleted = self.db.prefix_db.touched_or_deleted.unpack_value(op.value) + elif op.is_delete and op.key.startswith(DB_PREFIXES.claim_to_txo.value): + v = self.db.prefix_db.claim_to_txo.unpack_value(op.value) + if v.root_tx_num == v.tx_num and v.root_tx_num > self.db.tx_counts[-1]: + claims_to_delete.append(self.db.prefix_db.claim_to_txo.unpack_key(op.key).claim_hash) + if touched_or_deleted: + self._touched_claims.update(set(touched_or_deleted.deleted_claims).union( + touched_or_deleted.touched_claims.difference(set(claims_to_delete)))) + self._deleted_claims.update(claims_to_delete) + self._removed_during_undo.update(claims_to_delete) + self._advanced = True + self.log.warning("delete %i claim and upsert %i from reorg", len(self._deleted_claims), len(self._touched_claims)) + + async def poll_for_changes(self): + await super().poll_for_changes() + cnt = 0 + success = 0 + if self._advanced: + if self._touched_claims or self._deleted_claims or self._trending: + async for ok, item in async_streaming_bulk( + self.sync_client, self._claim_producer(), + raise_on_error=False): + cnt += 1 + if not ok: + self.log.warning("indexing failed for an item: %s", item) + else: + success += 1 + await self.sync_client.indices.refresh(self.index) + await self.db.reload_blocking_filtering_streams() + await self.apply_filters( + self.db.blocked_streams, self.db.blocked_channels, self.db.filtered_streams, + self.db.filtered_channels + ) + self.write_es_height(self.db.db_height, self.db.db_tip[::-1].hex()) + self.log.info("Indexing block %i done. %i/%i successful", self._last_wrote_height, success, cnt) + self._touched_claims.clear() + self._deleted_claims.clear() + self._removed_during_undo.clear() + self._trending.clear() + self._advanced = False + self.synchronized.set() + self.notify_es_notification_listeners(self._last_wrote_height, self.db.db_tip) + + @property + def last_synced_height(self) -> int: + return self._last_wrote_height + + async def start(self, reindex=False): + await super().start() + + def _start_cancellable(run, *args): + _flag = asyncio.Event() + self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag))) + return _flag.wait() + + self.db.open_db() + await self.db.initialize_caches() + await self.read_es_height() + await self.start_index() + self.last_state = self.db.read_db_state() + + await _start_cancellable(self.run_es_notifier) + + if reindex or self._last_wrote_height == 0 and self.db.db_height > 0: + if self._last_wrote_height == 0: + self.log.info("running initial ES indexing of rocksdb at block height %i", self.db.db_height) + else: + self.log.info("reindex (last wrote: %i, db height: %i)", self._last_wrote_height, self.db.db_height) + await self.reindex() + await _start_cancellable(self.refresh_blocks_forever) + + async def stop(self, delete_index=False): + async with self._lock: + while self.cancellable_tasks: + t = self.cancellable_tasks.pop() + if not t.done(): + t.cancel() + if delete_index: + await self.delete_index() + await self.stop_index() + self._executor.shutdown(wait=True) + self._executor = None + self.shutdown_event.set() + + def run(self, reindex=False): + loop = asyncio.get_event_loop() + loop.set_default_executor(self._executor) + + def __exit(): + raise SystemExit() + try: + loop.add_signal_handler(signal.SIGINT, __exit) + loop.add_signal_handler(signal.SIGTERM, __exit) + loop.run_until_complete(self.start(reindex=reindex)) + loop.run_until_complete(self.shutdown_event.wait()) + except (SystemExit, KeyboardInterrupt): + pass + finally: + loop.run_until_complete(self.stop()) + + async def reindex(self): + async with self._lock: + self.log.info("reindexing %i claims (estimate)", self.db.prefix_db.claim_to_txo.estimate_num_keys()) + await self.delete_index() + res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400) + acked = res.get('acknowledged', False) + if acked: + await self.set_index_version(self.VERSION) + await self.sync_client.indices.refresh(self.index) + self.write_es_height(0, self.env.coin.GENESIS_HASH) + await self._sync_all_claims() + await self.sync_client.indices.refresh(self.index) + self.write_es_height(self.db.db_height, self.db.db_tip[::-1].hex()) + self.notify_es_notification_listeners(self.db.db_height, self.db.db_tip) + self.log.info("finished reindexing") + + async def _sync_all_claims(self, batch_size=100000): + def load_historic_trending(): + notifications = self._trending + for k, v in self.db.prefix_db.trending_notification.iterate(): + notifications[k.claim_hash].append(TrendingNotification(k.height, v.previous_amount, v.new_amount)) + + async def all_claims_producer(): + async for claim in self.db.all_claims_producer(batch_size=batch_size): + yield self._upsert_claim_query(self.index, claim) + claim_hash = bytes.fromhex(claim['claim_id']) + if claim_hash in self._trending: + yield self._update_trending_query(self.index, claim_hash, self._trending.pop(claim_hash)) + self._trending.clear() + + self.log.info("loading about %i historic trending updates", self.db.prefix_db.trending_notification.estimate_num_keys()) + await asyncio.get_event_loop().run_in_executor(self._executor, load_historic_trending) + self.log.info("loaded historic trending updates for %i claims", len(self._trending)) + + cnt = 0 + success = 0 + producer = all_claims_producer() + + finished = False + try: + async for ok, item in async_streaming_bulk(self.sync_client, producer, raise_on_error=False): + cnt += 1 + if not ok: + self.log.warning("indexing failed for an item: %s", item) + else: + success += 1 + if cnt % batch_size == 0: + self.log.info(f"indexed {success} claims") + finished = True + await self.sync_client.indices.refresh(self.index) + self.log.info("indexed %i/%i claims", success, cnt) + finally: + if not finished: + await producer.aclose() + self.shutdown_event.set() diff --git a/scribe/readers/hub_server.py b/scribe/readers/hub_server.py new file mode 100644 index 0000000..2d8938f --- /dev/null +++ b/scribe/readers/hub_server.py @@ -0,0 +1,162 @@ +import signal +import asyncio +import typing +from scribe import __version__ +from scribe.blockchain.daemon import LBCDaemon +from scribe.readers import BaseBlockchainReader +from scribe.elasticsearch import ElasticNotifierClientProtocol +from scribe.hub.session import SessionManager +from scribe.hub.mempool import MemPool +from scribe.hub.udp import StatusServer +from scribe.hub.prometheus import PrometheusServer + + +class BlockchainReaderServer(BaseBlockchainReader): + def __init__(self, env): + super().__init__(env, 'lbry-reader', thread_workers=max(1, env.max_query_workers), thread_prefix='hub-worker') + self.history_cache = {} + self.resolve_outputs_cache = {} + self.resolve_cache = {} + self.notifications_to_send = [] + self.mempool_notifications = set() + self.status_server = StatusServer() + self.daemon = LBCDaemon(env.coin, env.daemon_url) # only needed for broadcasting txs + self.prometheus_server: typing.Optional[PrometheusServer] = None + self.mempool = MemPool(self.env.coin, self.db) + self.session_manager = SessionManager( + env, self.db, self.mempool, self.history_cache, self.resolve_cache, + self.resolve_outputs_cache, self.daemon, + self.shutdown_event, + on_available_callback=self.status_server.set_available, + on_unavailable_callback=self.status_server.set_unavailable + ) + self.mempool.session_manager = self.session_manager + self.es_notifications = asyncio.Queue() + self.es_notification_client = ElasticNotifierClientProtocol(self.es_notifications) + self.synchronized = asyncio.Event() + self._es_height = None + self._es_block_hash = None + + def clear_caches(self): + self.history_cache.clear() + self.resolve_outputs_cache.clear() + self.resolve_cache.clear() + # self.clear_search_cache() + # self.mempool.notified_mempool_txs.clear() + + def clear_search_cache(self): + self.session_manager.search_index.clear_caches() + + def advance(self, height: int): + super().advance(height) + touched_hashXs = self.db.prefix_db.touched_hashX.get(height).touched_hashXs + self.notifications_to_send.append((set(touched_hashXs), height)) + + def _detect_changes(self): + super()._detect_changes() + self.mempool_notifications.update(self.mempool.refresh()) + + async def poll_for_changes(self): + await super().poll_for_changes() + if self.db.fs_height <= 0: + return + self.status_server.set_height(self.db.fs_height, self.db.db_tip) + if self.notifications_to_send: + for (touched, height) in self.notifications_to_send: + await self.mempool.on_block(touched, height) + self.log.info("reader advanced to %i", height) + if self._es_height == self.db.db_height: + self.synchronized.set() + if self.mempool_notifications: + await self.mempool.on_mempool( + set(self.mempool.touched_hashXs), self.mempool_notifications, self.db.db_height + ) + self.mempool_notifications.clear() + self.notifications_to_send.clear() + + async def receive_es_notifications(self, synchronized: asyncio.Event): + await asyncio.get_event_loop().create_connection( + lambda: self.es_notification_client, '127.0.0.1', self.env.elastic_notifier_port + ) + synchronized.set() + try: + while True: + self._es_height, self._es_block_hash = await self.es_notifications.get() + self.clear_search_cache() + if self.last_state and self._es_block_hash == self.last_state.tip: + self.synchronized.set() + self.log.info("es and reader are in sync at block %i", self.last_state.height) + else: + self.log.info("es and reader are not yet in sync (block %s vs %s)", self._es_height, + self.db.db_height) + finally: + self.es_notification_client.close() + + async def start(self): + await super().start() + env = self.env + # min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings() + self.log.info(f'software version: {__version__}') + # self.log.info(f'supported protocol versions: {min_str}-{max_str}') + self.log.info(f'event loop policy: {env.loop_policy}') + self.log.info(f'reorg limit is {env.reorg_limit:,d} blocks') + await self.daemon.height() + + def _start_cancellable(run, *args): + _flag = asyncio.Event() + self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag))) + return _flag.wait() + + self.db.open_db() + await self.db.initialize_caches() + + self.last_state = self.db.read_db_state() + + await self.start_prometheus() + if self.env.udp_port and int(self.env.udp_port): + await self.status_server.start( + 0, bytes.fromhex(self.env.coin.GENESIS_HASH)[::-1], self.env.country, + self.env.host, self.env.udp_port, self.env.allow_lan_udp + ) + await _start_cancellable(self.receive_es_notifications) + await _start_cancellable(self.refresh_blocks_forever) + await self.session_manager.search_index.start() + await _start_cancellable(self.session_manager.serve, self.mempool) + + async def stop(self): + await self.status_server.stop() + async with self._lock: + while self.cancellable_tasks: + t = self.cancellable_tasks.pop() + if not t.done(): + t.cancel() + await self.session_manager.search_index.stop() + self.db.close() + if self.prometheus_server: + await self.prometheus_server.stop() + self.prometheus_server = None + await self.daemon.close() + self._executor.shutdown(wait=True) + self._executor = None + self.shutdown_event.set() + + def run(self): + loop = asyncio.get_event_loop() + loop.set_default_executor(self._executor) + + def __exit(): + raise SystemExit() + try: + loop.add_signal_handler(signal.SIGINT, __exit) + loop.add_signal_handler(signal.SIGTERM, __exit) + loop.run_until_complete(self.start()) + loop.run_until_complete(self.shutdown_event.wait()) + except (SystemExit, KeyboardInterrupt): + pass + finally: + loop.run_until_complete(self.stop()) + + async def start_prometheus(self): + if not self.prometheus_server and self.env.prometheus_port: + self.prometheus_server = PrometheusServer() + await self.prometheus_server.start("0.0.0.0", self.env.prometheus_port) diff --git a/scribe/readers/interface.py b/scribe/readers/interface.py new file mode 100644 index 0000000..bd095c9 --- /dev/null +++ b/scribe/readers/interface.py @@ -0,0 +1,119 @@ +import logging +import asyncio +import typing +from concurrent.futures.thread import ThreadPoolExecutor +from prometheus_client import Gauge, Histogram +from scribe import PROMETHEUS_NAMESPACE +from scribe.db.prefixes import DBState +from scribe.db import HubDB + +HISTOGRAM_BUCKETS = ( + .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf') +) + +NAMESPACE = f"{PROMETHEUS_NAMESPACE}_reader" + + +class BaseBlockchainReader: + block_count_metric = Gauge( + "block_count", "Number of processed blocks", namespace=NAMESPACE + ) + block_update_time_metric = Histogram( + "block_time", "Block update times", namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS + ) + reorg_count_metric = Gauge( + "reorg_count", "Number of reorgs", namespace=NAMESPACE + ) + + def __init__(self, env, secondary_name: str, thread_workers: int = 1, thread_prefix: str = 'blockchain-reader'): + self.env = env + self.log = logging.getLogger(__name__).getChild(self.__class__.__name__) + self.shutdown_event = asyncio.Event() + self.cancellable_tasks = [] + self._thread_workers = thread_workers + self._thread_prefix = thread_prefix + self._executor = ThreadPoolExecutor(thread_workers, thread_name_prefix=thread_prefix) + self.db = HubDB( + env.coin, env.db_dir, env.cache_MB, env.reorg_limit, env.cache_all_claim_txos, env.cache_all_tx_hashes, + secondary_name=secondary_name, max_open_files=-1, blocking_channel_ids=env.blocking_channel_ids, + filtering_channel_ids=env.filtering_channel_ids, executor=self._executor + ) + self.last_state: typing.Optional[DBState] = None + self._refresh_interval = 0.1 + self._lock = asyncio.Lock() + + def _detect_changes(self): + try: + self.db.prefix_db.try_catch_up_with_primary() + except: + self.log.exception('failed to update secondary db') + raise + state = self.db.prefix_db.db_state.get() + if not state or state.height <= 0: + return + if self.last_state and self.last_state.height > state.height: + self.log.warning("reorg detected, waiting until the writer has flushed the new blocks to advance") + return + last_height = 0 if not self.last_state else self.last_state.height + rewound = False + if self.last_state: + while True: + if self.db.headers[-1] == self.db.prefix_db.header.get(last_height, deserialize_value=False): + self.log.debug("connects to block %i", last_height) + break + else: + self.log.warning("disconnect block %i", last_height) + self.unwind() + rewound = True + last_height -= 1 + if rewound: + self.reorg_count_metric.inc() + self.db.read_db_state() + if not self.last_state or last_height < state.height: + for height in range(last_height + 1, state.height + 1): + self.log.info("advancing to %i", height) + self.advance(height) + self.clear_caches() + self.last_state = state + self.block_count_metric.set(self.last_state.height) + self.db.blocked_streams, self.db.blocked_channels = self.db.get_streams_and_channels_reposted_by_channel_hashes( + self.db.blocking_channel_hashes + ) + self.db.filtered_streams, self.db.filtered_channels = self.db.get_streams_and_channels_reposted_by_channel_hashes( + self.db.filtering_channel_hashes + ) + + async def poll_for_changes(self): + await asyncio.get_event_loop().run_in_executor(self._executor, self._detect_changes) + + async def refresh_blocks_forever(self, synchronized: asyncio.Event): + while True: + try: + async with self._lock: + await self.poll_for_changes() + except asyncio.CancelledError: + raise + except: + self.log.exception("blockchain reader main loop encountered an unexpected error") + raise + await asyncio.sleep(self._refresh_interval) + synchronized.set() + + def clear_caches(self): + pass + + def advance(self, height: int): + tx_count = self.db.prefix_db.tx_count.get(height).tx_count + assert tx_count not in self.db.tx_counts, f'boom {tx_count} in {len(self.db.tx_counts)} tx counts' + assert len(self.db.tx_counts) == height, f"{len(self.db.tx_counts)} != {height}" + self.db.tx_counts.append(tx_count) + self.db.headers.append(self.db.prefix_db.header.get(height, deserialize_value=False)) + + def unwind(self): + self.db.tx_counts.pop() + self.db.headers.pop() + + async def start(self): + if not self._executor: + self._executor = ThreadPoolExecutor(self._thread_workers, thread_name_prefix=self._thread_prefix) + self.db._executor = self._executor diff --git a/scribe/schema/Makefile b/scribe/schema/Makefile new file mode 100644 index 0000000..917b2a8 --- /dev/null +++ b/scribe/schema/Makefile @@ -0,0 +1,5 @@ +build: + rm types/v2/* -rf + touch types/v2/__init__.py + cd types/v2/ && protoc --python_out=. -I ../../../../../types/v2/proto/ ../../../../../types/v2/proto/*.proto + sed -e 's/^import\ \(.*\)_pb2\ /from . import\ \1_pb2\ /g' -i types/v2/*.py diff --git a/scribe/schema/README.md b/scribe/schema/README.md new file mode 100644 index 0000000..12af8c7 --- /dev/null +++ b/scribe/schema/README.md @@ -0,0 +1,24 @@ +Schema +===== + +Those files are generated from the [types repo](https://github.com/lbryio/types). If you are modifying/adding a new type, make sure it is cloned in the same root folder as the SDK repo, like: + +``` +repos/ + - lbry-sdk/ + - types/ +``` + +Then, [download protoc 3.2.0](https://github.com/protocolbuffers/protobuf/releases/tag/v3.2.0), add it to your PATH. On linux it is: + +```bash +cd ~/.local/bin +wget https://github.com/protocolbuffers/protobuf/releases/download/v3.2.0/protoc-3.2.0-linux-x86_64.zip +unzip protoc-3.2.0-linux-x86_64.zip bin/protoc -d.. +``` + +Finally, `make` should update everything in place. + + +### Why protoc 3.2.0? +Different/newer versions will generate larger diffs and we need to make sure they are good. In theory, we can just update to latest and it will all work, but it is a good practice to check blockchain data and retro compatibility before bumping versions (if you do, please update this section!). diff --git a/scribe/schema/__init__.py b/scribe/schema/__init__.py new file mode 100644 index 0000000..448923f --- /dev/null +++ b/scribe/schema/__init__.py @@ -0,0 +1 @@ +from .claim import Claim diff --git a/scribe/schema/attrs.py b/scribe/schema/attrs.py new file mode 100644 index 0000000..6ba471b --- /dev/null +++ b/scribe/schema/attrs.py @@ -0,0 +1,573 @@ +import json +import logging +import os.path +import hashlib +from typing import Tuple, List +from string import ascii_letters +from decimal import Decimal, ROUND_UP +from google.protobuf.json_format import MessageToDict + +from scribe.base58 import Base58, b58_encode +from scribe.error import MissingPublishedFileError, EmptyPublishedFileError + +from scribe.schema.mime_types import guess_media_type +from scribe.schema.base import Metadata, BaseMessageList +from scribe.schema.tags import clean_tags, normalize_tag +from scribe.schema.types.v2.claim_pb2 import ( + Fee as FeeMessage, + Location as LocationMessage, + Language as LanguageMessage +) + + +log = logging.getLogger(__name__) + + +CENT = 1000000 +COIN = 100*CENT + + +def calculate_sha384_file_hash(file_path): + sha384 = hashlib.sha384() + with open(file_path, 'rb') as f: + for chunk in iter(lambda: f.read(128 * sha384.block_size), b''): + sha384.update(chunk) + return sha384.digest() + + +def country_int_to_str(country: int) -> str: + r = LocationMessage.Country.Name(country) + return r[1:] if r.startswith('R') else r + + +def country_str_to_int(country: str) -> int: + if len(country) == 3: + country = 'R' + country + return LocationMessage.Country.Value(country) + + +class Dimmensional(Metadata): + + __slots__ = () + + @property + def width(self) -> int: + return self.message.width + + @width.setter + def width(self, width: int): + self.message.width = width + + @property + def height(self) -> int: + return self.message.height + + @height.setter + def height(self, height: int): + self.message.height = height + + @property + def dimensions(self) -> Tuple[int, int]: + return self.width, self.height + + @dimensions.setter + def dimensions(self, dimensions: Tuple[int, int]): + self.message.width, self.message.height = dimensions + + def _extract(self, file_metadata, field): + try: + setattr(self, field, file_metadata.getValues(field)[0]) + except: + log.exception(f'Could not extract {field} from file metadata.') + + def update(self, file_metadata=None, height=None, width=None): + if height is not None: + self.height = height + elif file_metadata: + self._extract(file_metadata, 'height') + + if width is not None: + self.width = width + elif file_metadata: + self._extract(file_metadata, 'width') + + +class Playable(Metadata): + + __slots__ = () + + @property + def duration(self) -> int: + return self.message.duration + + @duration.setter + def duration(self, duration: int): + self.message.duration = duration + + def update(self, file_metadata=None, duration=None): + if duration is not None: + self.duration = duration + elif file_metadata: + try: + self.duration = file_metadata.getValues('duration')[0].seconds + except: + log.exception('Could not extract duration from file metadata.') + + +class Image(Dimmensional): + + __slots__ = () + + +class Audio(Playable): + + __slots__ = () + + +class Video(Dimmensional, Playable): + + __slots__ = () + + def update(self, file_metadata=None, height=None, width=None, duration=None): + Dimmensional.update(self, file_metadata, height, width) + Playable.update(self, file_metadata, duration) + + +class Source(Metadata): + + __slots__ = () + + def update(self, file_path=None): + if file_path is not None: + self.name = os.path.basename(file_path) + self.media_type, stream_type = guess_media_type(file_path) + if not os.path.isfile(file_path): + raise MissingPublishedFileError(file_path) + self.size = os.path.getsize(file_path) + if self.size == 0: + raise EmptyPublishedFileError(file_path) + self.file_hash_bytes = calculate_sha384_file_hash(file_path) + return stream_type + + @property + def name(self) -> str: + return self.message.name + + @name.setter + def name(self, name: str): + self.message.name = name + + @property + def size(self) -> int: + return self.message.size + + @size.setter + def size(self, size: int): + self.message.size = size + + @property + def media_type(self) -> str: + return self.message.media_type + + @media_type.setter + def media_type(self, media_type: str): + self.message.media_type = media_type + + @property + def file_hash(self) -> str: + return self.message.hash.hex() + + @file_hash.setter + def file_hash(self, file_hash: str): + self.message.hash = bytes.fromhex(file_hash) + + @property + def file_hash_bytes(self) -> bytes: + return self.message.hash + + @file_hash_bytes.setter + def file_hash_bytes(self, file_hash_bytes: bytes): + self.message.hash = file_hash_bytes + + @property + def sd_hash(self) -> str: + return self.message.sd_hash.hex() + + @sd_hash.setter + def sd_hash(self, sd_hash: str): + self.message.sd_hash = bytes.fromhex(sd_hash) + + @property + def sd_hash_bytes(self) -> bytes: + return self.message.sd_hash + + @sd_hash_bytes.setter + def sd_hash_bytes(self, sd_hash: bytes): + self.message.sd_hash = sd_hash + + @property + def bt_infohash(self) -> str: + return self.message.bt_infohash.hex() + + @bt_infohash.setter + def bt_infohash(self, bt_infohash: str): + self.message.bt_infohash = bytes.fromhex(bt_infohash) + + @property + def bt_infohash_bytes(self) -> bytes: + return self.message.bt_infohash.decode() + + @bt_infohash_bytes.setter + def bt_infohash_bytes(self, bt_infohash: bytes): + self.message.bt_infohash = bt_infohash + + @property + def url(self) -> str: + return self.message.url + + @url.setter + def url(self, url: str): + self.message.url = url + + +class Fee(Metadata): + + __slots__ = () + + def update(self, address: str = None, currency: str = None, amount=None): + if amount: + currency = (currency or self.currency or '').lower() + if not currency: + raise Exception('In order to set a fee amount, please specify a fee currency.') + if currency not in ('lbc', 'btc', 'usd'): + raise Exception(f'Missing or unknown currency provided: {currency}') + setattr(self, currency, Decimal(amount)) + elif currency: + raise Exception('In order to set a fee currency, please specify a fee amount.') + if address: + if not self.currency: + raise Exception('In order to set a fee address, please specify a fee amount and currency.') + self.address = address + + @property + def currency(self) -> str: + if self.message.currency: + return FeeMessage.Currency.Name(self.message.currency) + + @property + def address(self) -> str: + if self.address_bytes: + return b58_encode(self.address_bytes) + + @address.setter + def address(self, address: str): + self.address_bytes = Base58.decode(address) + + @property + def address_bytes(self) -> bytes: + return self.message.address + + @address_bytes.setter + def address_bytes(self, address: bytes): + self.message.address = address + + @property + def amount(self) -> Decimal: + if self.currency == 'LBC': + return self.lbc + if self.currency == 'BTC': + return self.btc + if self.currency == 'USD': + return self.usd + + DEWIES = Decimal(COIN) + + @property + def lbc(self) -> Decimal: + if self.message.currency != FeeMessage.LBC: + raise ValueError('LBC can only be returned for LBC fees.') + return Decimal(self.message.amount / self.DEWIES) + + @lbc.setter + def lbc(self, amount: Decimal): + self.dewies = int(amount * self.DEWIES) + + @property + def dewies(self) -> int: + if self.message.currency != FeeMessage.LBC: + raise ValueError('Dewies can only be returned for LBC fees.') + return self.message.amount + + @dewies.setter + def dewies(self, amount: int): + self.message.amount = amount + self.message.currency = FeeMessage.LBC + + SATOSHIES = Decimal(COIN) + + @property + def btc(self) -> Decimal: + if self.message.currency != FeeMessage.BTC: + raise ValueError('BTC can only be returned for BTC fees.') + return Decimal(self.message.amount / self.SATOSHIES) + + @btc.setter + def btc(self, amount: Decimal): + self.satoshis = int(amount * self.SATOSHIES) + + @property + def satoshis(self) -> int: + if self.message.currency != FeeMessage.BTC: + raise ValueError('Satoshies can only be returned for BTC fees.') + return self.message.amount + + @satoshis.setter + def satoshis(self, amount: int): + self.message.amount = amount + self.message.currency = FeeMessage.BTC + + PENNIES = Decimal('100.0') + PENNY = Decimal('0.01') + + @property + def usd(self) -> Decimal: + if self.message.currency != FeeMessage.USD: + raise ValueError('USD can only be returned for USD fees.') + return Decimal(self.message.amount / self.PENNIES) + + @usd.setter + def usd(self, amount: Decimal): + self.pennies = int(amount.quantize(self.PENNY, ROUND_UP) * self.PENNIES) + + @property + def pennies(self) -> int: + if self.message.currency != FeeMessage.USD: + raise ValueError('Pennies can only be returned for USD fees.') + return self.message.amount + + @pennies.setter + def pennies(self, amount: int): + self.message.amount = amount + self.message.currency = FeeMessage.USD + + +class ClaimReference(Metadata): + + __slots__ = () + + @property + def claim_id(self) -> str: + return self.claim_hash[::-1].hex() + + @claim_id.setter + def claim_id(self, claim_id: str): + self.claim_hash = bytes.fromhex(claim_id)[::-1] + + @property + def claim_hash(self) -> bytes: + return self.message.claim_hash + + @claim_hash.setter + def claim_hash(self, claim_hash: bytes): + self.message.claim_hash = claim_hash + + +class ClaimList(BaseMessageList[ClaimReference]): + + __slots__ = () + item_class = ClaimReference + + @property + def _message(self): + return self.message.claim_references + + def append(self, value): + self.add().claim_id = value + + @property + def ids(self) -> List[str]: + return [c.claim_id for c in self] + + +class Language(Metadata): + + __slots__ = () + + @property + def langtag(self) -> str: + langtag = [] + if self.language: + langtag.append(self.language) + if self.script: + langtag.append(self.script) + if self.region: + langtag.append(self.region) + return '-'.join(langtag) + + @langtag.setter + def langtag(self, langtag: str): + parts = langtag.split('-') + self.language = parts.pop(0) + if parts and len(parts[0]) == 4: + self.script = parts.pop(0) + if parts and len(parts[0]) == 2 and parts[0].isalpha(): + self.region = parts.pop(0) + if parts and len(parts[0]) == 3 and parts[0].isdigit(): + self.region = parts.pop(0) + assert not parts, f"Failed to parse language tag: {langtag}" + + @property + def language(self) -> str: + if self.message.language: + return LanguageMessage.Language.Name(self.message.language) + + @language.setter + def language(self, language: str): + self.message.language = LanguageMessage.Language.Value(language) + + @property + def script(self) -> str: + if self.message.script: + return LanguageMessage.Script.Name(self.message.script) + + @script.setter + def script(self, script: str): + self.message.script = LanguageMessage.Script.Value(script) + + @property + def region(self) -> str: + if self.message.region: + return country_int_to_str(self.message.region) + + @region.setter + def region(self, region: str): + self.message.region = country_str_to_int(region) + + +class LanguageList(BaseMessageList[Language]): + __slots__ = () + item_class = Language + + def append(self, value: str): + self.add().langtag = value + + +class Location(Metadata): + + __slots__ = () + + def from_value(self, value): + if isinstance(value, str) and value.startswith('{'): + value = json.loads(value) + + if isinstance(value, dict): + for key, val in value.items(): + setattr(self, key, val) + + elif isinstance(value, str): + parts = value.split(':') + if len(parts) > 2 or (parts[0] and parts[0][0] in ascii_letters): + country = parts and parts.pop(0) + if country: + self.country = country + state = parts and parts.pop(0) + if state: + self.state = state + city = parts and parts.pop(0) + if city: + self.city = city + code = parts and parts.pop(0) + if code: + self.code = code + latitude = parts and parts.pop(0) + if latitude: + self.latitude = latitude + longitude = parts and parts.pop(0) + if longitude: + self.longitude = longitude + + else: + raise ValueError(f'Could not parse country value: {value}') + + def to_dict(self): + d = MessageToDict(self.message) + if self.message.longitude: + d['longitude'] = self.longitude + if self.message.latitude: + d['latitude'] = self.latitude + return d + + @property + def country(self) -> str: + if self.message.country: + return LocationMessage.Country.Name(self.message.country) + + @country.setter + def country(self, country: str): + self.message.country = LocationMessage.Country.Value(country) + + @property + def state(self) -> str: + return self.message.state + + @state.setter + def state(self, state: str): + self.message.state = state + + @property + def city(self) -> str: + return self.message.city + + @city.setter + def city(self, city: str): + self.message.city = city + + @property + def code(self) -> str: + return self.message.code + + @code.setter + def code(self, code: str): + self.message.code = code + + GPS_PRECISION = Decimal('10000000') + + @property + def latitude(self) -> str: + if self.message.latitude: + return str(Decimal(self.message.latitude) / self.GPS_PRECISION) + + @latitude.setter + def latitude(self, latitude: str): + latitude = Decimal(latitude) + assert -90 <= latitude <= 90, "Latitude must be between -90 and 90 degrees." + self.message.latitude = int(latitude * self.GPS_PRECISION) + + @property + def longitude(self) -> str: + if self.message.longitude: + return str(Decimal(self.message.longitude) / self.GPS_PRECISION) + + @longitude.setter + def longitude(self, longitude: str): + longitude = Decimal(longitude) + assert -180 <= longitude <= 180, "Longitude must be between -180 and 180 degrees." + self.message.longitude = int(longitude * self.GPS_PRECISION) + + +class LocationList(BaseMessageList[Location]): + __slots__ = () + item_class = Location + + def append(self, value): + self.add().from_value(value) + + +class TagList(BaseMessageList[str]): + __slots__ = () + item_class = str + + def append(self, tag: str): + tag = normalize_tag(tag) + if tag and tag not in self.message: + self.message.append(tag) diff --git a/scribe/schema/base.py b/scribe/schema/base.py new file mode 100644 index 0000000..6e9bab4 --- /dev/null +++ b/scribe/schema/base.py @@ -0,0 +1,124 @@ +from binascii import hexlify, unhexlify +from typing import List, Iterator, TypeVar, Generic + +from google.protobuf.message import DecodeError +from google.protobuf.json_format import MessageToDict + + +class Signable: + + __slots__ = ( + 'message', 'version', 'signature', + 'signature_type', 'unsigned_payload', 'signing_channel_hash' + ) + + message_class = None + + def __init__(self, message=None): + self.message = message or self.message_class() + self.version = 2 + self.signature = None + self.signature_type = 'SECP256k1' + self.unsigned_payload = None + self.signing_channel_hash = None + + def clear_signature(self): + self.signature = None + self.unsigned_payload = None + self.signing_channel_hash = None + + @property + def signing_channel_id(self): + return hexlify(self.signing_channel_hash[::-1]).decode() if self.signing_channel_hash else None + + @signing_channel_id.setter + def signing_channel_id(self, channel_id: str): + self.signing_channel_hash = unhexlify(channel_id)[::-1] + + @property + def is_signed(self): + return self.signature is not None + + def to_dict(self): + return MessageToDict(self.message) + + def to_message_bytes(self) -> bytes: + return self.message.SerializeToString() + + def to_bytes(self) -> bytes: + pieces = bytearray() + if self.is_signed: + pieces.append(1) + pieces.extend(self.signing_channel_hash) + pieces.extend(self.signature) + else: + pieces.append(0) + pieces.extend(self.to_message_bytes()) + return bytes(pieces) + + @classmethod + def from_bytes(cls, data: bytes): + signable = cls() + if data[0] == 0: + signable.message.ParseFromString(data[1:]) + elif data[0] == 1: + signable.signing_channel_hash = data[1:21] + signable.signature = data[21:85] + signable.message.ParseFromString(data[85:]) + else: + raise DecodeError('Could not determine message format version.') + return signable + + def __len__(self): + return len(self.to_bytes()) + + def __bytes__(self): + return self.to_bytes() + + +class Metadata: + + __slots__ = 'message', + + def __init__(self, message): + self.message = message + + +I = TypeVar('I') + + +class BaseMessageList(Metadata, Generic[I]): + + __slots__ = () + + item_class = None + + @property + def _message(self): + return self.message + + def add(self) -> I: + return self.item_class(self._message.add()) + + def extend(self, values: List[str]): + for value in values: + self.append(value) + + def append(self, value: str): + raise NotImplemented + + def __len__(self): + return len(self._message) + + def __iter__(self) -> Iterator[I]: + for item in self._message: + yield self.item_class(item) + + def __getitem__(self, item) -> I: + return self.item_class(self._message[item]) + + def __delitem__(self, key): + del self._message[key] + + def __eq__(self, other) -> bool: + return self._message == other diff --git a/scribe/schema/claim.py b/scribe/schema/claim.py new file mode 100644 index 0000000..a4a46d8 --- /dev/null +++ b/scribe/schema/claim.py @@ -0,0 +1,422 @@ +import logging +from typing import List +from binascii import hexlify, unhexlify + +from asn1crypto.keys import PublicKeyInfo +from coincurve import PublicKey as cPublicKey + +from google.protobuf.json_format import MessageToDict +from google.protobuf.message import DecodeError +from hachoir.core.log import log as hachoir_log +from hachoir.parser import createParser as binary_file_parser +from hachoir.metadata import extractMetadata as binary_file_metadata + +from scribe.schema import compat +from scribe.schema.base import Signable +from scribe.schema.mime_types import guess_media_type, guess_stream_type +from scribe.schema.attrs import ( + Source, Playable, Dimmensional, Fee, Image, Video, Audio, + LanguageList, LocationList, ClaimList, ClaimReference, TagList +) +from scribe.schema.types.v2.claim_pb2 import Claim as ClaimMessage +from scribe.error import InputValueIsNoneError + + +hachoir_log.use_print = False +log = logging.getLogger(__name__) + + +class Claim(Signable): + + STREAM = 'stream' + CHANNEL = 'channel' + COLLECTION = 'collection' + REPOST = 'repost' + + __slots__ = () + + message_class = ClaimMessage + + @property + def claim_type(self) -> str: + return self.message.WhichOneof('type') + + def get_message(self, type_name): + message = getattr(self.message, type_name) + if self.claim_type is None: + message.SetInParent() + if self.claim_type != type_name: + raise ValueError(f'Claim is not a {type_name}.') + return message + + @property + def is_stream(self): + return self.claim_type == self.STREAM + + @property + def stream(self) -> 'Stream': + return Stream(self) + + @property + def is_channel(self): + return self.claim_type == self.CHANNEL + + @property + def channel(self) -> 'Channel': + return Channel(self) + + @property + def is_repost(self): + return self.claim_type == self.REPOST + + @property + def repost(self) -> 'Repost': + return Repost(self) + + @property + def is_collection(self): + return self.claim_type == self.COLLECTION + + @property + def collection(self) -> 'Collection': + return Collection(self) + + @classmethod + def from_bytes(cls, data: bytes) -> 'Claim': + try: + return super().from_bytes(data) + except DecodeError: + claim = cls() + if data[0] == ord('{'): + claim.version = 0 + compat.from_old_json_schema(claim, data) + elif data[0] not in (0, 1): + claim.version = 1 + compat.from_types_v1(claim, data) + else: + raise + return claim + + +class BaseClaim: + + __slots__ = 'claim', 'message' + + claim_type = None + object_fields = 'thumbnail', + repeat_fields = 'tags', 'languages', 'locations' + + def __init__(self, claim: Claim = None): + self.claim = claim or Claim() + self.message = self.claim.get_message(self.claim_type) + + def to_dict(self): + claim = MessageToDict(self.claim.message, preserving_proto_field_name=True) + claim.update(claim.pop(self.claim_type)) + if 'languages' in claim: + claim['languages'] = self.langtags + if 'locations' in claim: + claim['locations'] = [l.to_dict() for l in self.locations] + return claim + + def none_check(self, kwargs): + for key, value in kwargs.items(): + if value is None: + raise InputValueIsNoneError(key) + + def update(self, **kwargs): + self.none_check(kwargs) + + for key in list(kwargs): + for field in self.object_fields: + if key.startswith(f'{field}_'): + attr = getattr(self, field) + setattr(attr, key[len(f'{field}_'):], kwargs.pop(key)) + continue + + for l in self.repeat_fields: + field = getattr(self, l) + if kwargs.pop(f'clear_{l}', False): + del field[:] + items = kwargs.pop(l, None) + if items is not None: + if isinstance(items, str): + field.append(items) + elif isinstance(items, list): + field.extend(items) + else: + raise ValueError(f"Unknown {l} value: {items}") + + for key, value in kwargs.items(): + setattr(self, key, value) + + @property + def title(self) -> str: + return self.claim.message.title + + @title.setter + def title(self, title: str): + self.claim.message.title = title + + @property + def description(self) -> str: + return self.claim.message.description + + @description.setter + def description(self, description: str): + self.claim.message.description = description + + @property + def thumbnail(self) -> Source: + return Source(self.claim.message.thumbnail) + + @property + def tags(self) -> List[str]: + return TagList(self.claim.message.tags) + + @property + def languages(self) -> LanguageList: + return LanguageList(self.claim.message.languages) + + @property + def langtags(self) -> List[str]: + return [l.langtag for l in self.languages] + + @property + def locations(self) -> LocationList: + return LocationList(self.claim.message.locations) + + +class Stream(BaseClaim): + + __slots__ = () + + claim_type = Claim.STREAM + + object_fields = BaseClaim.object_fields + ('source',) + + def to_dict(self): + claim = super().to_dict() + if 'source' in claim: + if 'hash' in claim['source']: + claim['source']['hash'] = self.source.file_hash + if 'sd_hash' in claim['source']: + claim['source']['sd_hash'] = self.source.sd_hash + elif 'bt_infohash' in claim['source']: + claim['source']['bt_infohash'] = self.source.bt_infohash + if 'media_type' in claim['source']: + claim['stream_type'] = guess_stream_type(claim['source']['media_type']) + fee = claim.get('fee', {}) + if 'address' in fee: + fee['address'] = self.fee.address + if 'amount' in fee: + fee['amount'] = str(self.fee.amount) + return claim + + def update(self, file_path=None, height=None, width=None, duration=None, **kwargs): + + if kwargs.pop('clear_fee', False): + self.message.ClearField('fee') + else: + self.fee.update( + kwargs.pop('fee_address', None), + kwargs.pop('fee_currency', None), + kwargs.pop('fee_amount', None) + ) + + self.none_check(kwargs) + + if 'sd_hash' in kwargs: + self.source.sd_hash = kwargs.pop('sd_hash') + elif 'bt_infohash' in kwargs: + self.source.bt_infohash = kwargs.pop('bt_infohash') + if 'file_name' in kwargs: + self.source.name = kwargs.pop('file_name') + if 'file_hash' in kwargs: + self.source.file_hash = kwargs.pop('file_hash') + + stream_type = None + if file_path is not None: + stream_type = self.source.update(file_path=file_path) + elif self.source.name: + self.source.media_type, stream_type = guess_media_type(self.source.name) + elif self.source.media_type: + stream_type = guess_stream_type(self.source.media_type) + + if 'file_size' in kwargs: + self.source.size = kwargs.pop('file_size') + + if self.stream_type is not None and self.stream_type != stream_type: + self.message.ClearField(self.stream_type) + + if stream_type in ('image', 'video', 'audio'): + media = getattr(self, stream_type) + media_args = {'file_metadata': None} + if file_path is not None and not all((duration, width, height)): + try: + media_args['file_metadata'] = binary_file_metadata(binary_file_parser(file_path)) + except: + log.exception('Could not read file metadata.') + if isinstance(media, Playable): + media_args['duration'] = duration + if isinstance(media, Dimmensional): + media_args['height'] = height + media_args['width'] = width + media.update(**media_args) + + super().update(**kwargs) + + @property + def author(self) -> str: + return self.message.author + + @author.setter + def author(self, author: str): + self.message.author = author + + @property + def license(self) -> str: + return self.message.license + + @license.setter + def license(self, license: str): + self.message.license = license + + @property + def license_url(self) -> str: + return self.message.license_url + + @license_url.setter + def license_url(self, license_url: str): + self.message.license_url = license_url + + @property + def release_time(self) -> int: + return self.message.release_time + + @release_time.setter + def release_time(self, release_time: int): + self.message.release_time = release_time + + @property + def fee(self) -> Fee: + return Fee(self.message.fee) + + @property + def has_fee(self) -> bool: + return self.message.HasField('fee') + + @property + def has_source(self) -> bool: + return self.message.HasField('source') + + @property + def source(self) -> Source: + return Source(self.message.source) + + @property + def stream_type(self) -> str: + return self.message.WhichOneof('type') + + @property + def image(self) -> Image: + return Image(self.message.image) + + @property + def video(self) -> Video: + return Video(self.message.video) + + @property + def audio(self) -> Audio: + return Audio(self.message.audio) + + +class Channel(BaseClaim): + + __slots__ = () + + claim_type = Claim.CHANNEL + + object_fields = BaseClaim.object_fields + ('cover',) + repeat_fields = BaseClaim.repeat_fields + ('featured',) + + def to_dict(self): + claim = super().to_dict() + claim['public_key'] = self.public_key + if 'featured' in claim: + claim['featured'] = self.featured.ids + return claim + + @property + def public_key(self) -> str: + return hexlify(self.public_key_bytes).decode() + + @public_key.setter + def public_key(self, sd_public_key: str): + self.message.public_key = unhexlify(sd_public_key.encode()) + + @property + def public_key_bytes(self) -> bytes: + if len(self.message.public_key) == 33: + return self.message.public_key + public_key_info = PublicKeyInfo.load(self.message.public_key) + public_key = cPublicKey(public_key_info.native['public_key']) + return public_key.format(compressed=True) + + @public_key_bytes.setter + def public_key_bytes(self, public_key: bytes): + self.message.public_key = public_key + + @property + def email(self) -> str: + return self.message.email + + @email.setter + def email(self, email: str): + self.message.email = email + + @property + def website_url(self) -> str: + return self.message.website_url + + @website_url.setter + def website_url(self, website_url: str): + self.message.website_url = website_url + + @property + def cover(self) -> Source: + return Source(self.message.cover) + + @property + def featured(self) -> ClaimList: + return ClaimList(self.message.featured) + + +class Repost(BaseClaim): + + __slots__ = () + + claim_type = Claim.REPOST + + @property + def reference(self) -> ClaimReference: + return ClaimReference(self.message) + + +class Collection(BaseClaim): + + __slots__ = () + + claim_type = Claim.COLLECTION + + repeat_fields = BaseClaim.repeat_fields + ('claims',) + + def to_dict(self): + claim = super().to_dict() + if claim.pop('claim_references', None): + claim['claims'] = self.claims.ids + return claim + + @property + def claims(self) -> ClaimList: + return ClaimList(self.message) diff --git a/scribe/schema/compat.py b/scribe/schema/compat.py new file mode 100644 index 0000000..2dc99b0 --- /dev/null +++ b/scribe/schema/compat.py @@ -0,0 +1,93 @@ +import json +from decimal import Decimal + +from google.protobuf.message import DecodeError + +from scribe.schema.types.v1.legacy_claim_pb2 import Claim as OldClaimMessage +from scribe.schema.types.v1.certificate_pb2 import KeyType +from scribe.schema.types.v1.fee_pb2 import Fee as FeeMessage + + +def from_old_json_schema(claim, payload: bytes): + try: + value = json.loads(payload) + except: + raise DecodeError('Could not parse JSON.') + stream = claim.stream + stream.source.sd_hash = value['sources']['lbry_sd_hash'] + stream.source.media_type = ( + value.get('content_type', value.get('content-type')) or + 'application/octet-stream' + ) + stream.title = value.get('title', '') + stream.description = value.get('description', '') + if value.get('thumbnail', ''): + stream.thumbnail.url = value.get('thumbnail', '') + stream.author = value.get('author', '') + stream.license = value.get('license', '') + stream.license_url = value.get('license_url', '') + language = value.get('language', '') + if language: + if language.lower() == 'english': + language = 'en' + try: + stream.languages.append(language) + except: + pass + if value.get('nsfw', False): + stream.tags.append('mature') + if "fee" in value and isinstance(value['fee'], dict): + fee = value["fee"] + currency = list(fee.keys())[0] + if currency == 'LBC': + stream.fee.lbc = Decimal(fee[currency]['amount']) + elif currency == 'USD': + stream.fee.usd = Decimal(fee[currency]['amount']) + elif currency == 'BTC': + stream.fee.btc = Decimal(fee[currency]['amount']) + else: + raise DecodeError(f'Unknown currency: {currency}') + stream.fee.address = fee[currency]['address'] + return claim + + +def from_types_v1(claim, payload: bytes): + old = OldClaimMessage() + old.ParseFromString(payload) + if old.claimType == 2: + channel = claim.channel + channel.public_key_bytes = old.certificate.publicKey + else: + stream = claim.stream + stream.title = old.stream.metadata.title + stream.description = old.stream.metadata.description + stream.author = old.stream.metadata.author + stream.license = old.stream.metadata.license + stream.license_url = old.stream.metadata.licenseUrl + stream.thumbnail.url = old.stream.metadata.thumbnail + if old.stream.metadata.HasField('language'): + stream.languages.add().message.language = old.stream.metadata.language + stream.source.media_type = old.stream.source.contentType + stream.source.sd_hash_bytes = old.stream.source.source + if old.stream.metadata.nsfw: + stream.tags.append('mature') + if old.stream.metadata.HasField('fee'): + fee = old.stream.metadata.fee + stream.fee.address_bytes = fee.address + currency = FeeMessage.Currency.Name(fee.currency) + if currency == 'LBC': + stream.fee.lbc = Decimal(fee.amount) + elif currency == 'USD': + stream.fee.usd = Decimal(fee.amount) + elif currency == 'BTC': + stream.fee.btc = Decimal(fee.amount) + else: + raise DecodeError(f'Unsupported currency: {currency}') + if old.HasField('publisherSignature'): + sig = old.publisherSignature + claim.signature = sig.signature + claim.signature_type = KeyType.Name(sig.signatureType) + claim.signing_channel_hash = sig.certificateId[::-1] + old.ClearField("publisherSignature") + claim.unsigned_payload = old.SerializeToString() + return claim diff --git a/scribe/schema/mime_types.py b/scribe/schema/mime_types.py new file mode 100644 index 0000000..62505be --- /dev/null +++ b/scribe/schema/mime_types.py @@ -0,0 +1,214 @@ +import os +import filetype +import logging + +types_map = { + # http://www.iana.org/assignments/media-types + # Type mapping for automated metadata extraction (video, audio, image, document, binary, model) + '.a': ('application/octet-stream', 'binary'), + '.ai': ('application/postscript', 'image'), + '.aif': ('audio/x-aiff', 'audio'), + '.aifc': ('audio/x-aiff', 'audio'), + '.aiff': ('audio/x-aiff', 'audio'), + '.au': ('audio/basic', 'audio'), + '.avi': ('video/x-msvideo', 'video'), + '.bat': ('text/plain', 'document'), + '.bcpio': ('application/x-bcpio', 'binary'), + '.bin': ('application/octet-stream', 'binary'), + '.bmp': ('image/bmp', 'image'), + '.c': ('text/plain', 'document'), + '.cdf': ('application/x-netcdf', 'binary'), + '.cpio': ('application/x-cpio', 'binary'), + '.csh': ('application/x-csh', 'binary'), + '.css': ('text/css', 'document'), + '.csv': ('text/csv', 'document'), + '.dll': ('application/octet-stream', 'binary'), + '.doc': ('application/msword', 'document'), + '.dot': ('application/msword', 'document'), + '.dvi': ('application/x-dvi', 'binary'), + '.eml': ('message/rfc822', 'document'), + '.eps': ('application/postscript', 'document'), + '.epub': ('application/epub+zip', 'document'), + '.etx': ('text/x-setext', 'document'), + '.exe': ('application/octet-stream', 'binary'), + '.gif': ('image/gif', 'image'), + '.gtar': ('application/x-gtar', 'binary'), + '.h': ('text/plain', 'document'), + '.hdf': ('application/x-hdf', 'binary'), + '.htm': ('text/html', 'document'), + '.html': ('text/html', 'document'), + '.ico': ('image/vnd.microsoft.icon', 'image'), + '.ief': ('image/ief', 'image'), + '.iges': ('model/iges', 'model'), + '.jpe': ('image/jpeg', 'image'), + '.jpeg': ('image/jpeg', 'image'), + '.jpg': ('image/jpeg', 'image'), + '.js': ('application/javascript', 'document'), + '.json': ('application/json', 'document'), + '.ksh': ('text/plain', 'document'), + '.latex': ('application/x-latex', 'binary'), + '.m1v': ('video/mpeg', 'video'), + '.m3u': ('application/x-mpegurl', 'audio'), + '.m3u8': ('application/x-mpegurl', 'video'), + '.man': ('application/x-troff-man', 'document'), + '.markdown': ('text/markdown', 'document'), + '.md': ('text/markdown', 'document'), + '.me': ('application/x-troff-me', 'binary'), + '.mht': ('message/rfc822', 'document'), + '.mhtml': ('message/rfc822', 'document'), + '.mif': ('application/x-mif', 'binary'), + '.mov': ('video/quicktime', 'video'), + '.movie': ('video/x-sgi-movie', 'video'), + '.mp2': ('audio/mpeg', 'audio'), + '.mp3': ('audio/mpeg', 'audio'), + '.mp4': ('video/mp4', 'video'), + '.mpa': ('video/mpeg', 'video'), + '.mpd': ('application/dash+xml', 'video'), + '.mpe': ('video/mpeg', 'video'), + '.mpeg': ('video/mpeg', 'video'), + '.mpg': ('video/mpeg', 'video'), + '.ms': ('application/x-troff-ms', 'binary'), + '.m4s': ('video/iso.segment', 'binary'), + '.nc': ('application/x-netcdf', 'binary'), + '.nws': ('message/rfc822', 'document'), + '.o': ('application/octet-stream', 'binary'), + '.obj': ('application/octet-stream', 'model'), + '.oda': ('application/oda', 'binary'), + '.p12': ('application/x-pkcs12', 'binary'), + '.p7c': ('application/pkcs7-mime', 'binary'), + '.pbm': ('image/x-portable-bitmap', 'image'), + '.pdf': ('application/pdf', 'document'), + '.pfx': ('application/x-pkcs12', 'binary'), + '.pgm': ('image/x-portable-graymap', 'image'), + '.pl': ('text/plain', 'document'), + '.png': ('image/png', 'image'), + '.pnm': ('image/x-portable-anymap', 'image'), + '.pot': ('application/vnd.ms-powerpoint', 'document'), + '.ppa': ('application/vnd.ms-powerpoint', 'document'), + '.ppm': ('image/x-portable-pixmap', 'image'), + '.pps': ('application/vnd.ms-powerpoint', 'document'), + '.ppt': ('application/vnd.ms-powerpoint', 'document'), + '.ps': ('application/postscript', 'document'), + '.pwz': ('application/vnd.ms-powerpoint', 'document'), + '.py': ('text/x-python', 'document'), + '.pyc': ('application/x-python-code', 'binary'), + '.pyo': ('application/x-python-code', 'binary'), + '.qt': ('video/quicktime', 'video'), + '.ra': ('audio/x-pn-realaudio', 'audio'), + '.ram': ('application/x-pn-realaudio', 'audio'), + '.ras': ('image/x-cmu-raster', 'image'), + '.rdf': ('application/xml', 'binary'), + '.rgb': ('image/x-rgb', 'image'), + '.roff': ('application/x-troff', 'binary'), + '.rtx': ('text/richtext', 'document'), + '.sgm': ('text/x-sgml', 'document'), + '.sgml': ('text/x-sgml', 'document'), + '.sh': ('application/x-sh', 'document'), + '.shar': ('application/x-shar', 'binary'), + '.snd': ('audio/basic', 'audio'), + '.so': ('application/octet-stream', 'binary'), + '.src': ('application/x-wais-source', 'binary'), + '.stl': ('model/stl', 'model'), + '.sv4cpio': ('application/x-sv4cpio', 'binary'), + '.sv4crc': ('application/x-sv4crc', 'binary'), + '.svg': ('image/svg+xml', 'image'), + '.swf': ('application/x-shockwave-flash', 'binary'), + '.t': ('application/x-troff', 'binary'), + '.tar': ('application/x-tar', 'binary'), + '.tcl': ('application/x-tcl', 'binary'), + '.tex': ('application/x-tex', 'binary'), + '.texi': ('application/x-texinfo', 'binary'), + '.texinfo': ('application/x-texinfo', 'binary'), + '.tif': ('image/tiff', 'image'), + '.tiff': ('image/tiff', 'image'), + '.tr': ('application/x-troff', 'binary'), + '.ts': ('video/mp2t', 'video'), + '.tsv': ('text/tab-separated-values', 'document'), + '.txt': ('text/plain', 'document'), + '.ustar': ('application/x-ustar', 'binary'), + '.vcf': ('text/x-vcard', 'document'), + '.vtt': ('text/vtt', 'document'), + '.wav': ('audio/x-wav', 'audio'), + '.webm': ('video/webm', 'video'), + '.wiz': ('application/msword', 'document'), + '.wsdl': ('application/xml', 'document'), + '.xbm': ('image/x-xbitmap', 'image'), + '.xlb': ('application/vnd.ms-excel', 'document'), + '.xls': ('application/vnd.ms-excel', 'document'), + '.xml': ('text/xml', 'document'), + '.xpdl': ('application/xml', 'document'), + '.xpm': ('image/x-xpixmap', 'image'), + '.xsl': ('application/xml', 'document'), + '.xwd': ('image/x-xwindowdump', 'image'), + '.zip': ('application/zip', 'binary'), + + # These are non-standard types, commonly found in the wild. + '.cbr': ('application/vnd.comicbook-rar', 'document'), + '.cbz': ('application/vnd.comicbook+zip', 'document'), + '.flac': ('audio/flac', 'audio'), + '.lbry': ('application/x-ext-lbry', 'document'), + '.m4a': ('audio/mp4', 'audio'), + '.m4v': ('video/m4v', 'video'), + '.mid': ('audio/midi', 'audio'), + '.midi': ('audio/midi', 'audio'), + '.mkv': ('video/x-matroska', 'video'), + '.mobi': ('application/x-mobipocket-ebook', 'document'), + '.oga': ('audio/ogg', 'audio'), + '.ogv': ('video/ogg', 'video'), + '.ogg': ('video/ogg', 'video'), + '.pct': ('image/pict', 'image'), + '.pic': ('image/pict', 'image'), + '.pict': ('image/pict', 'image'), + '.prc': ('application/x-mobipocket-ebook', 'document'), + '.rtf': ('application/rtf', 'document'), + '.xul': ('text/xul', 'document'), + + # microsoft is special and has its own 'standard' + # https://docs.microsoft.com/en-us/windows/desktop/wmp/file-name-extensions + '.wmv': ('video/x-ms-wmv', 'video') +} + +# maps detected extensions to the possible analogs +# i.e. .cbz file is actually a .zip +synonyms_map = { + '.zip': ['.cbz'], + '.rar': ['.cbr'], + '.ar': ['.a'] +} + +log = logging.getLogger(__name__) + + +def guess_media_type(path): + _, ext = os.path.splitext(path) + extension = ext.strip().lower() + + try: + kind = filetype.guess(path) + if kind: + real_extension = f".{kind.extension}" + + if extension != real_extension: + if extension: + log.warning(f"file extension does not match it's contents: {path}, identified as {real_extension}") + else: + log.debug(f"file {path} does not have extension, identified by it's contents as {real_extension}") + + if extension not in synonyms_map.get(real_extension, []): + extension = real_extension + + except OSError as error: + pass + + if extension[1:]: + if extension in types_map: + return types_map[extension] + return f'application/x-ext-{extension[1:]}', 'binary' + return 'application/octet-stream', 'binary' + + +def guess_stream_type(media_type): + for media, stream in types_map.values(): + if media == media_type: + return stream + return 'binary' diff --git a/scribe/schema/purchase.py b/scribe/schema/purchase.py new file mode 100644 index 0000000..22148e6 --- /dev/null +++ b/scribe/schema/purchase.py @@ -0,0 +1,47 @@ +from google.protobuf.message import DecodeError +from google.protobuf.json_format import MessageToDict +from scribe.schema.types.v2.purchase_pb2 import Purchase as PurchaseMessage +from .attrs import ClaimReference + + +class Purchase(ClaimReference): + + START_BYTE = ord('P') + + __slots__ = () + + def __init__(self, claim_id=None): + super().__init__(PurchaseMessage()) + if claim_id is not None: + self.claim_id = claim_id + + def to_dict(self): + return MessageToDict(self.message) + + def to_message_bytes(self) -> bytes: + return self.message.SerializeToString() + + def to_bytes(self) -> bytes: + pieces = bytearray() + pieces.append(self.START_BYTE) + pieces.extend(self.to_message_bytes()) + return bytes(pieces) + + @classmethod + def has_start_byte(cls, data: bytes): + return data and data[0] == cls.START_BYTE + + @classmethod + def from_bytes(cls, data: bytes): + purchase = cls() + if purchase.has_start_byte(data): + purchase.message.ParseFromString(data[1:]) + else: + raise DecodeError('Message does not start with correct byte.') + return purchase + + def __len__(self): + return len(self.to_bytes()) + + def __bytes__(self): + return self.to_bytes() diff --git a/scribe/schema/result.py b/scribe/schema/result.py new file mode 100644 index 0000000..c361cfe --- /dev/null +++ b/scribe/schema/result.py @@ -0,0 +1,258 @@ +import base64 +from typing import List, TYPE_CHECKING, Union, Optional, NamedTuple +from binascii import hexlify +from itertools import chain + +from scribe.error import ResolveCensoredError +from scribe.schema.types.v2.result_pb2 import Outputs as OutputsMessage +from scribe.schema.types.v2.result_pb2 import Error as ErrorMessage +# if TYPE_CHECKING: +# from lbry_schema.schema.claim import ResolveResult + +INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID) +NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND) +BLOCKED = ErrorMessage.Code.Name(ErrorMessage.BLOCKED) + + +def set_reference(reference, claim_hash, rows): + if claim_hash: + for txo in rows: + if claim_hash == txo.claim_hash: + reference.tx_hash = txo.tx_hash + reference.nout = txo.position + reference.height = txo.height + return + + +class Censor: + + NOT_CENSORED = 0 + SEARCH = 1 + RESOLVE = 2 + + __slots__ = 'censor_type', 'censored' + + def __init__(self, censor_type): + self.censor_type = censor_type + self.censored = {} + + def is_censored(self, row): + return (row.get('censor_type') or self.NOT_CENSORED) >= self.censor_type + + def apply(self, rows): + return [row for row in rows if not self.censor(row)] + + def censor(self, row) -> Optional[bytes]: + if self.is_censored(row): + censoring_channel_hash = bytes.fromhex(row['censoring_channel_id'])[::-1] + self.censored.setdefault(censoring_channel_hash, set()) + self.censored[censoring_channel_hash].add(row['tx_hash']) + return censoring_channel_hash + return None + + def to_message(self, outputs: OutputsMessage, extra_txo_rows: dict): + for censoring_channel_hash, count in self.censored.items(): + blocked = outputs.blocked.add() + blocked.count = len(count) + set_reference(blocked.channel, censoring_channel_hash, extra_txo_rows) + outputs.blocked_total += len(count) + + +class ResolveResult(NamedTuple): + name: str + normalized_name: str + claim_hash: bytes + tx_num: int + position: int + tx_hash: bytes + height: int + amount: int + short_url: str + is_controlling: bool + canonical_url: str + creation_height: int + activation_height: int + expiration_height: int + effective_amount: int + support_amount: int + reposted: int + last_takeover_height: Optional[int] + claims_in_channel: Optional[int] + channel_hash: Optional[bytes] + reposted_claim_hash: Optional[bytes] + signature_valid: Optional[bool] + + + +class Outputs: + + __slots__ = 'txos', 'extra_txos', 'txs', 'offset', 'total', 'blocked', 'blocked_total' + + def __init__(self, txos: List, extra_txos: List, txs: set, + offset: int, total: int, blocked: List, blocked_total: int): + self.txos = txos + self.txs = txs + self.extra_txos = extra_txos + self.offset = offset + self.total = total + self.blocked = blocked + self.blocked_total = blocked_total + + def inflate(self, txs): + tx_map = {tx.hash: tx for tx in txs} + for txo_message in self.extra_txos: + self.message_to_txo(txo_message, tx_map) + txos = [self.message_to_txo(txo_message, tx_map) for txo_message in self.txos] + return txos, self.inflate_blocked(tx_map) + + def inflate_blocked(self, tx_map): + return { + "total": self.blocked_total, + "channels": [{ + 'channel': self.message_to_txo(blocked.channel, tx_map), + 'blocked': blocked.count + } for blocked in self.blocked] + } + + def message_to_txo(self, txo_message, tx_map): + if txo_message.WhichOneof('meta') == 'error': + error = { + 'error': { + 'name': txo_message.error.Code.Name(txo_message.error.code), + 'text': txo_message.error.text, + } + } + if error['error']['name'] == BLOCKED: + error['error']['censor'] = self.message_to_txo( + txo_message.error.blocked.channel, tx_map + ) + return error + + tx = tx_map.get(txo_message.tx_hash) + if not tx: + return + txo = tx.outputs[txo_message.nout] + if txo_message.WhichOneof('meta') == 'claim': + claim = txo_message.claim + txo.meta = { + 'short_url': f'lbry://{claim.short_url}', + 'canonical_url': f'lbry://{claim.canonical_url or claim.short_url}', + 'reposted': claim.reposted, + 'is_controlling': claim.is_controlling, + 'take_over_height': claim.take_over_height, + 'creation_height': claim.creation_height, + 'activation_height': claim.activation_height, + 'expiration_height': claim.expiration_height, + 'effective_amount': claim.effective_amount, + 'support_amount': claim.support_amount, + # 'trending_group': claim.trending_group, + # 'trending_mixed': claim.trending_mixed, + # 'trending_local': claim.trending_local, + # 'trending_global': claim.trending_global, + } + if claim.HasField('channel'): + txo.channel = tx_map[claim.channel.tx_hash].outputs[claim.channel.nout] + if claim.HasField('repost'): + txo.reposted_claim = tx_map[claim.repost.tx_hash].outputs[claim.repost.nout] + try: + if txo.claim.is_channel: + txo.meta['claims_in_channel'] = claim.claims_in_channel + except: + pass + return txo + + @classmethod + def from_base64(cls, data: str) -> 'Outputs': + return cls.from_bytes(base64.b64decode(data)) + + @classmethod + def from_bytes(cls, data: bytes) -> 'Outputs': + outputs = OutputsMessage() + outputs.ParseFromString(data) + txs = set() + for txo_message in chain(outputs.txos, outputs.extra_txos): + if txo_message.WhichOneof('meta') == 'error': + continue + txs.add((hexlify(txo_message.tx_hash[::-1]).decode(), txo_message.height)) + return cls( + outputs.txos, outputs.extra_txos, txs, + outputs.offset, outputs.total, + outputs.blocked, outputs.blocked_total + ) + + @classmethod + def from_grpc(cls, outputs: OutputsMessage) -> 'Outputs': + txs = set() + for txo_message in chain(outputs.txos, outputs.extra_txos): + if txo_message.WhichOneof('meta') == 'error': + continue + txs.add((hexlify(txo_message.tx_hash[::-1]).decode(), txo_message.height)) + return cls( + outputs.txos, outputs.extra_txos, txs, + outputs.offset, outputs.total, + outputs.blocked, outputs.blocked_total + ) + + @classmethod + def to_base64(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked=None) -> str: + return base64.b64encode(cls.to_bytes(txo_rows, extra_txo_rows, offset, total, blocked)).decode() + + @classmethod + def to_bytes(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked: Censor = None) -> bytes: + page = OutputsMessage() + page.offset = offset + if total is not None: + page.total = total + if blocked is not None: + blocked.to_message(page, extra_txo_rows) + for row in extra_txo_rows: + txo_message: 'OutputsMessage' = page.extra_txos.add() + if not isinstance(row, Exception): + if row.channel_hash: + set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows) + if row.reposted_claim_hash: + set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows) + cls.encode_txo(txo_message, row) + + for row in txo_rows: + # cls.row_to_message(row, page.txos.add(), extra_txo_rows) + txo_message: 'OutputsMessage' = page.txos.add() + cls.encode_txo(txo_message, row) + if not isinstance(row, Exception): + if row.channel_hash: + set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows) + if row.reposted_claim_hash: + set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows) + elif isinstance(row, ResolveCensoredError): + set_reference(txo_message.error.blocked.channel, row.censor_id, extra_txo_rows) + return page.SerializeToString() + + @classmethod + def encode_txo(cls, txo_message, resolve_result: Union[ResolveResult, Exception]): + if isinstance(resolve_result, Exception): + txo_message.error.text = resolve_result.args[0] + if isinstance(resolve_result, ValueError): + txo_message.error.code = ErrorMessage.INVALID + elif isinstance(resolve_result, LookupError): + txo_message.error.code = ErrorMessage.NOT_FOUND + elif isinstance(resolve_result, ResolveCensoredError): + txo_message.error.code = ErrorMessage.BLOCKED + return + txo_message.tx_hash = resolve_result.tx_hash + txo_message.nout = resolve_result.position + txo_message.height = resolve_result.height + txo_message.claim.short_url = resolve_result.short_url + txo_message.claim.reposted = resolve_result.reposted + txo_message.claim.is_controlling = resolve_result.is_controlling + txo_message.claim.creation_height = resolve_result.creation_height + txo_message.claim.activation_height = resolve_result.activation_height + txo_message.claim.expiration_height = resolve_result.expiration_height + txo_message.claim.effective_amount = resolve_result.effective_amount + txo_message.claim.support_amount = resolve_result.support_amount + + if resolve_result.canonical_url is not None: + txo_message.claim.canonical_url = resolve_result.canonical_url + if resolve_result.last_takeover_height is not None: + txo_message.claim.take_over_height = resolve_result.last_takeover_height + if resolve_result.claims_in_channel is not None: + txo_message.claim.claims_in_channel = resolve_result.claims_in_channel diff --git a/scribe/schema/support.py b/scribe/schema/support.py new file mode 100644 index 0000000..35f60f6 --- /dev/null +++ b/scribe/schema/support.py @@ -0,0 +1,23 @@ +from scribe.schema.base import Signable +from scribe.schema.types.v2.support_pb2 import Support as SupportMessage + + +class Support(Signable): + __slots__ = () + message_class = SupportMessage + + @property + def emoji(self) -> str: + return self.message.emoji + + @emoji.setter + def emoji(self, emoji: str): + self.message.emoji = emoji + + @property + def comment(self) -> str: + return self.message.comment + + @comment.setter + def comment(self, comment: str): + self.message.comment = comment diff --git a/scribe/schema/tags.py b/scribe/schema/tags.py new file mode 100644 index 0000000..2d3cf79 --- /dev/null +++ b/scribe/schema/tags.py @@ -0,0 +1,13 @@ +from typing import List +import re + +MULTI_SPACE_RE = re.compile(r"\s{2,}") +WEIRD_CHARS_RE = re.compile(r"[#!~]") + + +def normalize_tag(tag: str): + return MULTI_SPACE_RE.sub(' ', WEIRD_CHARS_RE.sub(' ', tag.lower().replace("'", ""))).strip() + + +def clean_tags(tags: List[str]): + return [tag for tag in {normalize_tag(tag) for tag in tags} if tag] diff --git a/scribe/schema/types/__init__.py b/scribe/schema/types/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/scribe/schema/types/v1/__init__.py b/scribe/schema/types/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/scribe/schema/types/v1/certificate_pb2.py b/scribe/schema/types/v1/certificate_pb2.py new file mode 100644 index 0000000..d42df90 --- /dev/null +++ b/scribe/schema/types/v1/certificate_pb2.py @@ -0,0 +1,146 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: certificate.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='certificate.proto', + package='legacy_pb', + syntax='proto2', + serialized_options=None, + serialized_pb=_b('\n\x11\x63\x65rtificate.proto\x12\tlegacy_pb\"\xa2\x01\n\x0b\x43\x65rtificate\x12/\n\x07version\x18\x01 \x02(\x0e\x32\x1e.legacy_pb.Certificate.Version\x12#\n\x07keyType\x18\x02 \x02(\x0e\x32\x12.legacy_pb.KeyType\x12\x11\n\tpublicKey\x18\x04 \x02(\x0c\"*\n\x07Version\x12\x13\n\x0fUNKNOWN_VERSION\x10\x00\x12\n\n\x06_0_0_1\x10\x01*Q\n\x07KeyType\x12\x1b\n\x17UNKNOWN_PUBLIC_KEY_TYPE\x10\x00\x12\x0c\n\x08NIST256p\x10\x01\x12\x0c\n\x08NIST384p\x10\x02\x12\r\n\tSECP256k1\x10\x03') +) + +_KEYTYPE = _descriptor.EnumDescriptor( + name='KeyType', + full_name='legacy_pb.KeyType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_PUBLIC_KEY_TYPE', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NIST256p', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NIST384p', index=2, number=2, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SECP256k1', index=3, number=3, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=197, + serialized_end=278, +) +_sym_db.RegisterEnumDescriptor(_KEYTYPE) + +KeyType = enum_type_wrapper.EnumTypeWrapper(_KEYTYPE) +UNKNOWN_PUBLIC_KEY_TYPE = 0 +NIST256p = 1 +NIST384p = 2 +SECP256k1 = 3 + + +_CERTIFICATE_VERSION = _descriptor.EnumDescriptor( + name='Version', + full_name='legacy_pb.Certificate.Version', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_VERSION', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='_0_0_1', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=153, + serialized_end=195, +) +_sym_db.RegisterEnumDescriptor(_CERTIFICATE_VERSION) + + +_CERTIFICATE = _descriptor.Descriptor( + name='Certificate', + full_name='legacy_pb.Certificate', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='legacy_pb.Certificate.version', index=0, + number=1, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='keyType', full_name='legacy_pb.Certificate.keyType', index=1, + number=2, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='publicKey', full_name='legacy_pb.Certificate.publicKey', index=2, + number=4, type=12, cpp_type=9, label=2, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _CERTIFICATE_VERSION, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=33, + serialized_end=195, +) + +_CERTIFICATE.fields_by_name['version'].enum_type = _CERTIFICATE_VERSION +_CERTIFICATE.fields_by_name['keyType'].enum_type = _KEYTYPE +_CERTIFICATE_VERSION.containing_type = _CERTIFICATE +DESCRIPTOR.message_types_by_name['Certificate'] = _CERTIFICATE +DESCRIPTOR.enum_types_by_name['KeyType'] = _KEYTYPE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Certificate = _reflection.GeneratedProtocolMessageType('Certificate', (_message.Message,), dict( + DESCRIPTOR = _CERTIFICATE, + __module__ = 'certificate_pb2' + # @@protoc_insertion_point(class_scope:legacy_pb.Certificate) + )) +_sym_db.RegisterMessage(Certificate) + + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v1/fee_pb2.py b/scribe/schema/types/v1/fee_pb2.py new file mode 100644 index 0000000..aa43011 --- /dev/null +++ b/scribe/schema/types/v1/fee_pb2.py @@ -0,0 +1,148 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: fee.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='fee.proto', + package='legacy_pb', + syntax='proto2', + serialized_options=None, + serialized_pb=_b('\n\tfee.proto\x12\tlegacy_pb\"\xe3\x01\n\x03\x46\x65\x65\x12\'\n\x07version\x18\x01 \x02(\x0e\x32\x16.legacy_pb.Fee.Version\x12)\n\x08\x63urrency\x18\x02 \x02(\x0e\x32\x17.legacy_pb.Fee.Currency\x12\x0f\n\x07\x61\x64\x64ress\x18\x03 \x02(\x0c\x12\x0e\n\x06\x61mount\x18\x04 \x02(\x02\"*\n\x07Version\x12\x13\n\x0fUNKNOWN_VERSION\x10\x00\x12\n\n\x06_0_0_1\x10\x01\";\n\x08\x43urrency\x12\x14\n\x10UNKNOWN_CURRENCY\x10\x00\x12\x07\n\x03LBC\x10\x01\x12\x07\n\x03\x42TC\x10\x02\x12\x07\n\x03USD\x10\x03') +) + + + +_FEE_VERSION = _descriptor.EnumDescriptor( + name='Version', + full_name='legacy_pb.Fee.Version', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_VERSION', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='_0_0_1', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=149, + serialized_end=191, +) +_sym_db.RegisterEnumDescriptor(_FEE_VERSION) + +_FEE_CURRENCY = _descriptor.EnumDescriptor( + name='Currency', + full_name='legacy_pb.Fee.Currency', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_CURRENCY', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LBC', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BTC', index=2, number=2, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='USD', index=3, number=3, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=193, + serialized_end=252, +) +_sym_db.RegisterEnumDescriptor(_FEE_CURRENCY) + + +_FEE = _descriptor.Descriptor( + name='Fee', + full_name='legacy_pb.Fee', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='legacy_pb.Fee.version', index=0, + number=1, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='currency', full_name='legacy_pb.Fee.currency', index=1, + number=2, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='address', full_name='legacy_pb.Fee.address', index=2, + number=3, type=12, cpp_type=9, label=2, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='amount', full_name='legacy_pb.Fee.amount', index=3, + number=4, type=2, cpp_type=6, label=2, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FEE_VERSION, + _FEE_CURRENCY, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=25, + serialized_end=252, +) + +_FEE.fields_by_name['version'].enum_type = _FEE_VERSION +_FEE.fields_by_name['currency'].enum_type = _FEE_CURRENCY +_FEE_VERSION.containing_type = _FEE +_FEE_CURRENCY.containing_type = _FEE +DESCRIPTOR.message_types_by_name['Fee'] = _FEE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Fee = _reflection.GeneratedProtocolMessageType('Fee', (_message.Message,), dict( + DESCRIPTOR = _FEE, + __module__ = 'fee_pb2' + # @@protoc_insertion_point(class_scope:legacy_pb.Fee) + )) +_sym_db.RegisterMessage(Fee) + + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v1/legacy_claim_pb2.py b/scribe/schema/types/v1/legacy_claim_pb2.py new file mode 100644 index 0000000..7e37f25 --- /dev/null +++ b/scribe/schema/types/v1/legacy_claim_pb2.py @@ -0,0 +1,158 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: legacy_claim.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import stream_pb2 as stream__pb2 +from . import certificate_pb2 as certificate__pb2 +from . import signature_pb2 as signature__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='legacy_claim.proto', + package='legacy_pb', + syntax='proto2', + serialized_options=None, + serialized_pb=_b('\n\x12legacy_claim.proto\x12\tlegacy_pb\x1a\x0cstream.proto\x1a\x11\x63\x65rtificate.proto\x1a\x0fsignature.proto\"\xd9\x02\n\x05\x43laim\x12)\n\x07version\x18\x01 \x02(\x0e\x32\x18.legacy_pb.Claim.Version\x12-\n\tclaimType\x18\x02 \x02(\x0e\x32\x1a.legacy_pb.Claim.ClaimType\x12!\n\x06stream\x18\x03 \x01(\x0b\x32\x11.legacy_pb.Stream\x12+\n\x0b\x63\x65rtificate\x18\x04 \x01(\x0b\x32\x16.legacy_pb.Certificate\x12\x30\n\x12publisherSignature\x18\x05 \x01(\x0b\x32\x14.legacy_pb.Signature\"*\n\x07Version\x12\x13\n\x0fUNKNOWN_VERSION\x10\x00\x12\n\n\x06_0_0_1\x10\x01\"H\n\tClaimType\x12\x16\n\x12UNKNOWN_CLAIM_TYPE\x10\x00\x12\x0e\n\nstreamType\x10\x01\x12\x13\n\x0f\x63\x65rtificateType\x10\x02') + , + dependencies=[stream__pb2.DESCRIPTOR,certificate__pb2.DESCRIPTOR,signature__pb2.DESCRIPTOR,]) + + + +_CLAIM_VERSION = _descriptor.EnumDescriptor( + name='Version', + full_name='legacy_pb.Claim.Version', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_VERSION', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='_0_0_1', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=313, + serialized_end=355, +) +_sym_db.RegisterEnumDescriptor(_CLAIM_VERSION) + +_CLAIM_CLAIMTYPE = _descriptor.EnumDescriptor( + name='ClaimType', + full_name='legacy_pb.Claim.ClaimType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_CLAIM_TYPE', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='streamType', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='certificateType', index=2, number=2, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=357, + serialized_end=429, +) +_sym_db.RegisterEnumDescriptor(_CLAIM_CLAIMTYPE) + + +_CLAIM = _descriptor.Descriptor( + name='Claim', + full_name='legacy_pb.Claim', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='legacy_pb.Claim.version', index=0, + number=1, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='claimType', full_name='legacy_pb.Claim.claimType', index=1, + number=2, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='stream', full_name='legacy_pb.Claim.stream', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='certificate', full_name='legacy_pb.Claim.certificate', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='publisherSignature', full_name='legacy_pb.Claim.publisherSignature', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _CLAIM_VERSION, + _CLAIM_CLAIMTYPE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=84, + serialized_end=429, +) + +_CLAIM.fields_by_name['version'].enum_type = _CLAIM_VERSION +_CLAIM.fields_by_name['claimType'].enum_type = _CLAIM_CLAIMTYPE +_CLAIM.fields_by_name['stream'].message_type = stream__pb2._STREAM +_CLAIM.fields_by_name['certificate'].message_type = certificate__pb2._CERTIFICATE +_CLAIM.fields_by_name['publisherSignature'].message_type = signature__pb2._SIGNATURE +_CLAIM_VERSION.containing_type = _CLAIM +_CLAIM_CLAIMTYPE.containing_type = _CLAIM +DESCRIPTOR.message_types_by_name['Claim'] = _CLAIM +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Claim = _reflection.GeneratedProtocolMessageType('Claim', (_message.Message,), dict( + DESCRIPTOR = _CLAIM, + __module__ = 'legacy_claim_pb2' + # @@protoc_insertion_point(class_scope:legacy_pb.Claim) + )) +_sym_db.RegisterMessage(Claim) + + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v1/metadata_pb2.py b/scribe/schema/types/v1/metadata_pb2.py new file mode 100644 index 0000000..631f206 --- /dev/null +++ b/scribe/schema/types/v1/metadata_pb2.py @@ -0,0 +1,936 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: metadata.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import fee_pb2 as fee__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='metadata.proto', + package='legacy_pb', + syntax='proto2', + serialized_options=None, + serialized_pb=_b('\n\x0emetadata.proto\x12\tlegacy_pb\x1a\tfee.proto\"\xfc\x0e\n\x08Metadata\x12,\n\x07version\x18\x01 \x02(\x0e\x32\x1b.legacy_pb.Metadata.Version\x12.\n\x08language\x18\x02 \x02(\x0e\x32\x1c.legacy_pb.Metadata.Language\x12\r\n\x05title\x18\x03 \x02(\t\x12\x13\n\x0b\x64\x65scription\x18\x04 \x02(\t\x12\x0e\n\x06\x61uthor\x18\x05 \x02(\t\x12\x0f\n\x07license\x18\x06 \x02(\t\x12\x0c\n\x04nsfw\x18\x07 \x02(\x08\x12\x1b\n\x03\x66\x65\x65\x18\x08 \x01(\x0b\x32\x0e.legacy_pb.Fee\x12\x11\n\tthumbnail\x18\t \x01(\t\x12\x0f\n\x07preview\x18\n \x01(\t\x12\x12\n\nlicenseUrl\x18\x0b \x01(\t\"N\n\x07Version\x12\x13\n\x0fUNKNOWN_VERSION\x10\x00\x12\n\n\x06_0_0_1\x10\x01\x12\n\n\x06_0_0_2\x10\x02\x12\n\n\x06_0_0_3\x10\x03\x12\n\n\x06_0_1_0\x10\x04\"\x99\x0c\n\x08Language\x12\x14\n\x10UNKNOWN_LANGUAGE\x10\x00\x12\x06\n\x02\x65n\x10\x01\x12\x06\n\x02\x61\x61\x10\x02\x12\x06\n\x02\x61\x62\x10\x03\x12\x06\n\x02\x61\x65\x10\x04\x12\x06\n\x02\x61\x66\x10\x05\x12\x06\n\x02\x61k\x10\x06\x12\x06\n\x02\x61m\x10\x07\x12\x06\n\x02\x61n\x10\x08\x12\x06\n\x02\x61r\x10\t\x12\x06\n\x02\x61s\x10\n\x12\x06\n\x02\x61v\x10\x0b\x12\x06\n\x02\x61y\x10\x0c\x12\x06\n\x02\x61z\x10\r\x12\x06\n\x02\x62\x61\x10\x0e\x12\x06\n\x02\x62\x65\x10\x0f\x12\x06\n\x02\x62g\x10\x10\x12\x06\n\x02\x62h\x10\x11\x12\x06\n\x02\x62i\x10\x12\x12\x06\n\x02\x62m\x10\x13\x12\x06\n\x02\x62n\x10\x14\x12\x06\n\x02\x62o\x10\x15\x12\x06\n\x02\x62r\x10\x16\x12\x06\n\x02\x62s\x10\x17\x12\x06\n\x02\x63\x61\x10\x18\x12\x06\n\x02\x63\x65\x10\x19\x12\x06\n\x02\x63h\x10\x1a\x12\x06\n\x02\x63o\x10\x1b\x12\x06\n\x02\x63r\x10\x1c\x12\x06\n\x02\x63s\x10\x1d\x12\x06\n\x02\x63u\x10\x1e\x12\x06\n\x02\x63v\x10\x1f\x12\x06\n\x02\x63y\x10 \x12\x06\n\x02\x64\x61\x10!\x12\x06\n\x02\x64\x65\x10\"\x12\x06\n\x02\x64v\x10#\x12\x06\n\x02\x64z\x10$\x12\x06\n\x02\x65\x65\x10%\x12\x06\n\x02\x65l\x10&\x12\x06\n\x02\x65o\x10\'\x12\x06\n\x02\x65s\x10(\x12\x06\n\x02\x65t\x10)\x12\x06\n\x02\x65u\x10*\x12\x06\n\x02\x66\x61\x10+\x12\x06\n\x02\x66\x66\x10,\x12\x06\n\x02\x66i\x10-\x12\x06\n\x02\x66j\x10.\x12\x06\n\x02\x66o\x10/\x12\x06\n\x02\x66r\x10\x30\x12\x06\n\x02\x66y\x10\x31\x12\x06\n\x02ga\x10\x32\x12\x06\n\x02gd\x10\x33\x12\x06\n\x02gl\x10\x34\x12\x06\n\x02gn\x10\x35\x12\x06\n\x02gu\x10\x36\x12\x06\n\x02gv\x10\x37\x12\x06\n\x02ha\x10\x38\x12\x06\n\x02he\x10\x39\x12\x06\n\x02hi\x10:\x12\x06\n\x02ho\x10;\x12\x06\n\x02hr\x10<\x12\x06\n\x02ht\x10=\x12\x06\n\x02hu\x10>\x12\x06\n\x02hy\x10?\x12\x06\n\x02hz\x10@\x12\x06\n\x02ia\x10\x41\x12\x06\n\x02id\x10\x42\x12\x06\n\x02ie\x10\x43\x12\x06\n\x02ig\x10\x44\x12\x06\n\x02ii\x10\x45\x12\x06\n\x02ik\x10\x46\x12\x06\n\x02io\x10G\x12\x06\n\x02is\x10H\x12\x06\n\x02it\x10I\x12\x06\n\x02iu\x10J\x12\x06\n\x02ja\x10K\x12\x06\n\x02jv\x10L\x12\x06\n\x02ka\x10M\x12\x06\n\x02kg\x10N\x12\x06\n\x02ki\x10O\x12\x06\n\x02kj\x10P\x12\x06\n\x02kk\x10Q\x12\x06\n\x02kl\x10R\x12\x06\n\x02km\x10S\x12\x06\n\x02kn\x10T\x12\x06\n\x02ko\x10U\x12\x06\n\x02kr\x10V\x12\x06\n\x02ks\x10W\x12\x06\n\x02ku\x10X\x12\x06\n\x02kv\x10Y\x12\x06\n\x02kw\x10Z\x12\x06\n\x02ky\x10[\x12\x06\n\x02la\x10\\\x12\x06\n\x02lb\x10]\x12\x06\n\x02lg\x10^\x12\x06\n\x02li\x10_\x12\x06\n\x02ln\x10`\x12\x06\n\x02lo\x10\x61\x12\x06\n\x02lt\x10\x62\x12\x06\n\x02lu\x10\x63\x12\x06\n\x02lv\x10\x64\x12\x06\n\x02mg\x10\x65\x12\x06\n\x02mh\x10\x66\x12\x06\n\x02mi\x10g\x12\x06\n\x02mk\x10h\x12\x06\n\x02ml\x10i\x12\x06\n\x02mn\x10j\x12\x06\n\x02mr\x10k\x12\x06\n\x02ms\x10l\x12\x06\n\x02mt\x10m\x12\x06\n\x02my\x10n\x12\x06\n\x02na\x10o\x12\x06\n\x02nb\x10p\x12\x06\n\x02nd\x10q\x12\x06\n\x02ne\x10r\x12\x06\n\x02ng\x10s\x12\x06\n\x02nl\x10t\x12\x06\n\x02nn\x10u\x12\x06\n\x02no\x10v\x12\x06\n\x02nr\x10w\x12\x06\n\x02nv\x10x\x12\x06\n\x02ny\x10y\x12\x06\n\x02oc\x10z\x12\x06\n\x02oj\x10{\x12\x06\n\x02om\x10|\x12\x06\n\x02or\x10}\x12\x06\n\x02os\x10~\x12\x06\n\x02pa\x10\x7f\x12\x07\n\x02pi\x10\x80\x01\x12\x07\n\x02pl\x10\x81\x01\x12\x07\n\x02ps\x10\x82\x01\x12\x07\n\x02pt\x10\x83\x01\x12\x07\n\x02qu\x10\x84\x01\x12\x07\n\x02rm\x10\x85\x01\x12\x07\n\x02rn\x10\x86\x01\x12\x07\n\x02ro\x10\x87\x01\x12\x07\n\x02ru\x10\x88\x01\x12\x07\n\x02rw\x10\x89\x01\x12\x07\n\x02sa\x10\x8a\x01\x12\x07\n\x02sc\x10\x8b\x01\x12\x07\n\x02sd\x10\x8c\x01\x12\x07\n\x02se\x10\x8d\x01\x12\x07\n\x02sg\x10\x8e\x01\x12\x07\n\x02si\x10\x8f\x01\x12\x07\n\x02sk\x10\x90\x01\x12\x07\n\x02sl\x10\x91\x01\x12\x07\n\x02sm\x10\x92\x01\x12\x07\n\x02sn\x10\x93\x01\x12\x07\n\x02so\x10\x94\x01\x12\x07\n\x02sq\x10\x95\x01\x12\x07\n\x02sr\x10\x96\x01\x12\x07\n\x02ss\x10\x97\x01\x12\x07\n\x02st\x10\x98\x01\x12\x07\n\x02su\x10\x99\x01\x12\x07\n\x02sv\x10\x9a\x01\x12\x07\n\x02sw\x10\x9b\x01\x12\x07\n\x02ta\x10\x9c\x01\x12\x07\n\x02te\x10\x9d\x01\x12\x07\n\x02tg\x10\x9e\x01\x12\x07\n\x02th\x10\x9f\x01\x12\x07\n\x02ti\x10\xa0\x01\x12\x07\n\x02tk\x10\xa1\x01\x12\x07\n\x02tl\x10\xa2\x01\x12\x07\n\x02tn\x10\xa3\x01\x12\x07\n\x02to\x10\xa4\x01\x12\x07\n\x02tr\x10\xa5\x01\x12\x07\n\x02ts\x10\xa6\x01\x12\x07\n\x02tt\x10\xa7\x01\x12\x07\n\x02tw\x10\xa8\x01\x12\x07\n\x02ty\x10\xa9\x01\x12\x07\n\x02ug\x10\xaa\x01\x12\x07\n\x02uk\x10\xab\x01\x12\x07\n\x02ur\x10\xac\x01\x12\x07\n\x02uz\x10\xad\x01\x12\x07\n\x02ve\x10\xae\x01\x12\x07\n\x02vi\x10\xaf\x01\x12\x07\n\x02vo\x10\xb0\x01\x12\x07\n\x02wa\x10\xb1\x01\x12\x07\n\x02wo\x10\xb2\x01\x12\x07\n\x02xh\x10\xb3\x01\x12\x07\n\x02yi\x10\xb4\x01\x12\x07\n\x02yo\x10\xb5\x01\x12\x07\n\x02za\x10\xb6\x01\x12\x07\n\x02zh\x10\xb7\x01\x12\x07\n\x02zu\x10\xb8\x01') + , + dependencies=[fee__pb2.DESCRIPTOR,]) + + + +_METADATA_VERSION = _descriptor.EnumDescriptor( + name='Version', + full_name='legacy_pb.Metadata.Version', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_VERSION', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='_0_0_1', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='_0_0_2', index=2, number=2, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='_0_0_3', index=3, number=3, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='_0_1_0', index=4, number=4, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=315, + serialized_end=393, +) +_sym_db.RegisterEnumDescriptor(_METADATA_VERSION) + +_METADATA_LANGUAGE = _descriptor.EnumDescriptor( + name='Language', + full_name='legacy_pb.Metadata.Language', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_LANGUAGE', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='en', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='aa', index=2, number=2, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ab', index=3, number=3, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ae', index=4, number=4, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='af', index=5, number=5, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ak', index=6, number=6, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='am', index=7, number=7, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='an', index=8, number=8, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ar', index=9, number=9, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='as', index=10, number=10, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='av', index=11, number=11, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ay', index=12, number=12, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='az', index=13, number=13, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ba', index=14, number=14, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='be', index=15, number=15, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bg', index=16, number=16, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bh', index=17, number=17, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bi', index=18, number=18, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bm', index=19, number=19, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bn', index=20, number=20, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bo', index=21, number=21, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='br', index=22, number=22, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bs', index=23, number=23, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ca', index=24, number=24, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ce', index=25, number=25, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ch', index=26, number=26, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='co', index=27, number=27, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='cr', index=28, number=28, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='cs', index=29, number=29, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='cu', index=30, number=30, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='cv', index=31, number=31, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='cy', index=32, number=32, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='da', index=33, number=33, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='de', index=34, number=34, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='dv', index=35, number=35, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='dz', index=36, number=36, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ee', index=37, number=37, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='el', index=38, number=38, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='eo', index=39, number=39, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='es', index=40, number=40, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='et', index=41, number=41, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='eu', index=42, number=42, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fa', index=43, number=43, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ff', index=44, number=44, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fi', index=45, number=45, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fj', index=46, number=46, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fo', index=47, number=47, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fr', index=48, number=48, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fy', index=49, number=49, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ga', index=50, number=50, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='gd', index=51, number=51, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='gl', index=52, number=52, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='gn', index=53, number=53, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='gu', index=54, number=54, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='gv', index=55, number=55, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ha', index=56, number=56, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='he', index=57, number=57, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='hi', index=58, number=58, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ho', index=59, number=59, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='hr', index=60, number=60, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ht', index=61, number=61, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='hu', index=62, number=62, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='hy', index=63, number=63, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='hz', index=64, number=64, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ia', index=65, number=65, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='id', index=66, number=66, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ie', index=67, number=67, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ig', index=68, number=68, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ii', index=69, number=69, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ik', index=70, number=70, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='io', index=71, number=71, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='is', index=72, number=72, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='it', index=73, number=73, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='iu', index=74, number=74, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ja', index=75, number=75, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='jv', index=76, number=76, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ka', index=77, number=77, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kg', index=78, number=78, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ki', index=79, number=79, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kj', index=80, number=80, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kk', index=81, number=81, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kl', index=82, number=82, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='km', index=83, number=83, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kn', index=84, number=84, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ko', index=85, number=85, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kr', index=86, number=86, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ks', index=87, number=87, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ku', index=88, number=88, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kv', index=89, number=89, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kw', index=90, number=90, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ky', index=91, number=91, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='la', index=92, number=92, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lb', index=93, number=93, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lg', index=94, number=94, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='li', index=95, number=95, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ln', index=96, number=96, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lo', index=97, number=97, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lt', index=98, number=98, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lu', index=99, number=99, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lv', index=100, number=100, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mg', index=101, number=101, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mh', index=102, number=102, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mi', index=103, number=103, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mk', index=104, number=104, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ml', index=105, number=105, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mn', index=106, number=106, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mr', index=107, number=107, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ms', index=108, number=108, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mt', index=109, number=109, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='my', index=110, number=110, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='na', index=111, number=111, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nb', index=112, number=112, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nd', index=113, number=113, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ne', index=114, number=114, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ng', index=115, number=115, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nl', index=116, number=116, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nn', index=117, number=117, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='no', index=118, number=118, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nr', index=119, number=119, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nv', index=120, number=120, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ny', index=121, number=121, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='oc', index=122, number=122, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='oj', index=123, number=123, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='om', index=124, number=124, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='or', index=125, number=125, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='os', index=126, number=126, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='pa', index=127, number=127, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='pi', index=128, number=128, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='pl', index=129, number=129, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ps', index=130, number=130, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='pt', index=131, number=131, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='qu', index=132, number=132, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='rm', index=133, number=133, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='rn', index=134, number=134, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ro', index=135, number=135, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ru', index=136, number=136, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='rw', index=137, number=137, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sa', index=138, number=138, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sc', index=139, number=139, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sd', index=140, number=140, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='se', index=141, number=141, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sg', index=142, number=142, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='si', index=143, number=143, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sk', index=144, number=144, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sl', index=145, number=145, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sm', index=146, number=146, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sn', index=147, number=147, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='so', index=148, number=148, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sq', index=149, number=149, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sr', index=150, number=150, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ss', index=151, number=151, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='st', index=152, number=152, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='su', index=153, number=153, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sv', index=154, number=154, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sw', index=155, number=155, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ta', index=156, number=156, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='te', index=157, number=157, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tg', index=158, number=158, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='th', index=159, number=159, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ti', index=160, number=160, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tk', index=161, number=161, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tl', index=162, number=162, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tn', index=163, number=163, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='to', index=164, number=164, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tr', index=165, number=165, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ts', index=166, number=166, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tt', index=167, number=167, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tw', index=168, number=168, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ty', index=169, number=169, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ug', index=170, number=170, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='uk', index=171, number=171, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ur', index=172, number=172, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='uz', index=173, number=173, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ve', index=174, number=174, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='vi', index=175, number=175, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='vo', index=176, number=176, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='wa', index=177, number=177, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='wo', index=178, number=178, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='xh', index=179, number=179, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='yi', index=180, number=180, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='yo', index=181, number=181, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='za', index=182, number=182, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='zh', index=183, number=183, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='zu', index=184, number=184, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=396, + serialized_end=1957, +) +_sym_db.RegisterEnumDescriptor(_METADATA_LANGUAGE) + + +_METADATA = _descriptor.Descriptor( + name='Metadata', + full_name='legacy_pb.Metadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='legacy_pb.Metadata.version', index=0, + number=1, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='language', full_name='legacy_pb.Metadata.language', index=1, + number=2, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='title', full_name='legacy_pb.Metadata.title', index=2, + number=3, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='description', full_name='legacy_pb.Metadata.description', index=3, + number=4, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='author', full_name='legacy_pb.Metadata.author', index=4, + number=5, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='license', full_name='legacy_pb.Metadata.license', index=5, + number=6, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='nsfw', full_name='legacy_pb.Metadata.nsfw', index=6, + number=7, type=8, cpp_type=7, label=2, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='fee', full_name='legacy_pb.Metadata.fee', index=7, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='thumbnail', full_name='legacy_pb.Metadata.thumbnail', index=8, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='preview', full_name='legacy_pb.Metadata.preview', index=9, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='licenseUrl', full_name='legacy_pb.Metadata.licenseUrl', index=10, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _METADATA_VERSION, + _METADATA_LANGUAGE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=41, + serialized_end=1957, +) + +_METADATA.fields_by_name['version'].enum_type = _METADATA_VERSION +_METADATA.fields_by_name['language'].enum_type = _METADATA_LANGUAGE +_METADATA.fields_by_name['fee'].message_type = fee__pb2._FEE +_METADATA_VERSION.containing_type = _METADATA +_METADATA_LANGUAGE.containing_type = _METADATA +DESCRIPTOR.message_types_by_name['Metadata'] = _METADATA +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Metadata = _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), dict( + DESCRIPTOR = _METADATA, + __module__ = 'metadata_pb2' + # @@protoc_insertion_point(class_scope:legacy_pb.Metadata) + )) +_sym_db.RegisterMessage(Metadata) + + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v1/signature_pb2.py b/scribe/schema/types/v1/signature_pb2.py new file mode 100644 index 0000000..8e1663f --- /dev/null +++ b/scribe/schema/types/v1/signature_pb2.py @@ -0,0 +1,118 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: signature.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import certificate_pb2 as certificate__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='signature.proto', + package='legacy_pb', + syntax='proto2', + serialized_options=None, + serialized_pb=_b('\n\x0fsignature.proto\x12\tlegacy_pb\x1a\x11\x63\x65rtificate.proto\"\xbb\x01\n\tSignature\x12-\n\x07version\x18\x01 \x02(\x0e\x32\x1c.legacy_pb.Signature.Version\x12)\n\rsignatureType\x18\x02 \x02(\x0e\x32\x12.legacy_pb.KeyType\x12\x11\n\tsignature\x18\x03 \x02(\x0c\x12\x15\n\rcertificateId\x18\x04 \x02(\x0c\"*\n\x07Version\x12\x13\n\x0fUNKNOWN_VERSION\x10\x00\x12\n\n\x06_0_0_1\x10\x01') + , + dependencies=[certificate__pb2.DESCRIPTOR,]) + + + +_SIGNATURE_VERSION = _descriptor.EnumDescriptor( + name='Version', + full_name='legacy_pb.Signature.Version', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_VERSION', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='_0_0_1', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=195, + serialized_end=237, +) +_sym_db.RegisterEnumDescriptor(_SIGNATURE_VERSION) + + +_SIGNATURE = _descriptor.Descriptor( + name='Signature', + full_name='legacy_pb.Signature', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='legacy_pb.Signature.version', index=0, + number=1, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='signatureType', full_name='legacy_pb.Signature.signatureType', index=1, + number=2, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='signature', full_name='legacy_pb.Signature.signature', index=2, + number=3, type=12, cpp_type=9, label=2, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='certificateId', full_name='legacy_pb.Signature.certificateId', index=3, + number=4, type=12, cpp_type=9, label=2, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _SIGNATURE_VERSION, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=50, + serialized_end=237, +) + +_SIGNATURE.fields_by_name['version'].enum_type = _SIGNATURE_VERSION +_SIGNATURE.fields_by_name['signatureType'].enum_type = certificate__pb2._KEYTYPE +_SIGNATURE_VERSION.containing_type = _SIGNATURE +DESCRIPTOR.message_types_by_name['Signature'] = _SIGNATURE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Signature = _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), dict( + DESCRIPTOR = _SIGNATURE, + __module__ = 'signature_pb2' + # @@protoc_insertion_point(class_scope:legacy_pb.Signature) + )) +_sym_db.RegisterMessage(Signature) + + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v1/source_pb2.py b/scribe/schema/types/v1/source_pb2.py new file mode 100644 index 0000000..7c3bf69 --- /dev/null +++ b/scribe/schema/types/v1/source_pb2.py @@ -0,0 +1,140 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: source.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='source.proto', + package='legacy_pb', + syntax='proto2', + serialized_options=None, + serialized_pb=_b('\n\x0csource.proto\x12\tlegacy_pb\"\xf2\x01\n\x06Source\x12*\n\x07version\x18\x01 \x02(\x0e\x32\x19.legacy_pb.Source.Version\x12\x31\n\nsourceType\x18\x02 \x02(\x0e\x32\x1d.legacy_pb.Source.SourceTypes\x12\x0e\n\x06source\x18\x03 \x02(\x0c\x12\x13\n\x0b\x63ontentType\x18\x04 \x02(\t\"*\n\x07Version\x12\x13\n\x0fUNKNOWN_VERSION\x10\x00\x12\n\n\x06_0_0_1\x10\x01\"8\n\x0bSourceTypes\x12\x17\n\x13UNKNOWN_SOURCE_TYPE\x10\x00\x12\x10\n\x0clbry_sd_hash\x10\x01') +) + + + +_SOURCE_VERSION = _descriptor.EnumDescriptor( + name='Version', + full_name='legacy_pb.Source.Version', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_VERSION', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='_0_0_1', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=170, + serialized_end=212, +) +_sym_db.RegisterEnumDescriptor(_SOURCE_VERSION) + +_SOURCE_SOURCETYPES = _descriptor.EnumDescriptor( + name='SourceTypes', + full_name='legacy_pb.Source.SourceTypes', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_SOURCE_TYPE', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lbry_sd_hash', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=214, + serialized_end=270, +) +_sym_db.RegisterEnumDescriptor(_SOURCE_SOURCETYPES) + + +_SOURCE = _descriptor.Descriptor( + name='Source', + full_name='legacy_pb.Source', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='legacy_pb.Source.version', index=0, + number=1, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='sourceType', full_name='legacy_pb.Source.sourceType', index=1, + number=2, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='source', full_name='legacy_pb.Source.source', index=2, + number=3, type=12, cpp_type=9, label=2, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='contentType', full_name='legacy_pb.Source.contentType', index=3, + number=4, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _SOURCE_VERSION, + _SOURCE_SOURCETYPES, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28, + serialized_end=270, +) + +_SOURCE.fields_by_name['version'].enum_type = _SOURCE_VERSION +_SOURCE.fields_by_name['sourceType'].enum_type = _SOURCE_SOURCETYPES +_SOURCE_VERSION.containing_type = _SOURCE +_SOURCE_SOURCETYPES.containing_type = _SOURCE +DESCRIPTOR.message_types_by_name['Source'] = _SOURCE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Source = _reflection.GeneratedProtocolMessageType('Source', (_message.Message,), dict( + DESCRIPTOR = _SOURCE, + __module__ = 'source_pb2' + # @@protoc_insertion_point(class_scope:legacy_pb.Source) + )) +_sym_db.RegisterMessage(Source) + + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v1/stream_pb2.py b/scribe/schema/types/v1/stream_pb2.py new file mode 100644 index 0000000..ec96d0b --- /dev/null +++ b/scribe/schema/types/v1/stream_pb2.py @@ -0,0 +1,113 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: stream.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import metadata_pb2 as metadata__pb2 +from . import source_pb2 as source__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='stream.proto', + package='legacy_pb', + syntax='proto2', + serialized_options=None, + serialized_pb=_b('\n\x0cstream.proto\x12\tlegacy_pb\x1a\x0emetadata.proto\x1a\x0csource.proto\"\xaa\x01\n\x06Stream\x12*\n\x07version\x18\x01 \x02(\x0e\x32\x19.legacy_pb.Stream.Version\x12%\n\x08metadata\x18\x02 \x02(\x0b\x32\x13.legacy_pb.Metadata\x12!\n\x06source\x18\x03 \x02(\x0b\x32\x11.legacy_pb.Source\"*\n\x07Version\x12\x13\n\x0fUNKNOWN_VERSION\x10\x00\x12\n\n\x06_0_0_1\x10\x01') + , + dependencies=[metadata__pb2.DESCRIPTOR,source__pb2.DESCRIPTOR,]) + + + +_STREAM_VERSION = _descriptor.EnumDescriptor( + name='Version', + full_name='legacy_pb.Stream.Version', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_VERSION', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='_0_0_1', index=1, number=1, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=186, + serialized_end=228, +) +_sym_db.RegisterEnumDescriptor(_STREAM_VERSION) + + +_STREAM = _descriptor.Descriptor( + name='Stream', + full_name='legacy_pb.Stream', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='legacy_pb.Stream.version', index=0, + number=1, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='metadata', full_name='legacy_pb.Stream.metadata', index=1, + number=2, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='source', full_name='legacy_pb.Stream.source', index=2, + number=3, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _STREAM_VERSION, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=58, + serialized_end=228, +) + +_STREAM.fields_by_name['version'].enum_type = _STREAM_VERSION +_STREAM.fields_by_name['metadata'].message_type = metadata__pb2._METADATA +_STREAM.fields_by_name['source'].message_type = source__pb2._SOURCE +_STREAM_VERSION.containing_type = _STREAM +DESCRIPTOR.message_types_by_name['Stream'] = _STREAM +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Stream = _reflection.GeneratedProtocolMessageType('Stream', (_message.Message,), dict( + DESCRIPTOR = _STREAM, + __module__ = 'stream_pb2' + # @@protoc_insertion_point(class_scope:legacy_pb.Stream) + )) +_sym_db.RegisterMessage(Stream) + + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v2/__init__.py b/scribe/schema/types/v2/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/scribe/schema/types/v2/claim_pb2.py b/scribe/schema/types/v2/claim_pb2.py new file mode 100644 index 0000000..d63f739 --- /dev/null +++ b/scribe/schema/types/v2/claim_pb2.py @@ -0,0 +1,4692 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: claim.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='claim.proto', + package='pb', + syntax='proto3', + serialized_pb=_b('\n\x0b\x63laim.proto\x12\x02pb\"\xab\x02\n\x05\x43laim\x12\x1c\n\x06stream\x18\x01 \x01(\x0b\x32\n.pb.StreamH\x00\x12\x1e\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\x0b.pb.ChannelH\x00\x12#\n\ncollection\x18\x03 \x01(\x0b\x32\r.pb.ClaimListH\x00\x12$\n\x06repost\x18\x04 \x01(\x0b\x32\x12.pb.ClaimReferenceH\x00\x12\r\n\x05title\x18\x08 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\t \x01(\t\x12\x1d\n\tthumbnail\x18\n \x01(\x0b\x32\n.pb.Source\x12\x0c\n\x04tags\x18\x0b \x03(\t\x12\x1f\n\tlanguages\x18\x0c \x03(\x0b\x32\x0c.pb.Language\x12\x1f\n\tlocations\x18\r \x03(\x0b\x32\x0c.pb.LocationB\x06\n\x04type\"\x84\x02\n\x06Stream\x12\x1a\n\x06source\x18\x01 \x01(\x0b\x32\n.pb.Source\x12\x0e\n\x06\x61uthor\x18\x02 \x01(\t\x12\x0f\n\x07license\x18\x03 \x01(\t\x12\x13\n\x0blicense_url\x18\x04 \x01(\t\x12\x14\n\x0crelease_time\x18\x05 \x01(\x03\x12\x14\n\x03\x66\x65\x65\x18\x06 \x01(\x0b\x32\x07.pb.Fee\x12\x1a\n\x05image\x18\n \x01(\x0b\x32\t.pb.ImageH\x00\x12\x1a\n\x05video\x18\x0b \x01(\x0b\x32\t.pb.VideoH\x00\x12\x1a\n\x05\x61udio\x18\x0c \x01(\x0b\x32\t.pb.AudioH\x00\x12 \n\x08software\x18\r \x01(\x0b\x32\x0c.pb.SoftwareH\x00\x42\x06\n\x04type\"}\n\x07\x43hannel\x12\x12\n\npublic_key\x18\x01 \x01(\x0c\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x13\n\x0bwebsite_url\x18\x03 \x01(\t\x12\x19\n\x05\x63over\x18\x04 \x01(\x0b\x32\n.pb.Source\x12\x1f\n\x08\x66\x65\x61tured\x18\x05 \x01(\x0b\x32\r.pb.ClaimList\"$\n\x0e\x43laimReference\x12\x12\n\nclaim_hash\x18\x01 \x01(\x0c\"\x90\x01\n\tClaimList\x12)\n\tlist_type\x18\x01 \x01(\x0e\x32\x16.pb.ClaimList.ListType\x12,\n\x10\x63laim_references\x18\x02 \x03(\x0b\x32\x12.pb.ClaimReference\"*\n\x08ListType\x12\x0e\n\nCOLLECTION\x10\x00\x12\x0e\n\nDERIVATION\x10\x02\"y\n\x06Source\x12\x0c\n\x04hash\x18\x01 \x01(\x0c\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04size\x18\x03 \x01(\x04\x12\x12\n\nmedia_type\x18\x04 \x01(\t\x12\x0b\n\x03url\x18\x05 \x01(\t\x12\x0f\n\x07sd_hash\x18\x06 \x01(\x0c\x12\x13\n\x0b\x62t_infohash\x18\x07 \x01(\x0c\"\x87\x01\n\x03\x46\x65\x65\x12\"\n\x08\x63urrency\x18\x01 \x01(\x0e\x32\x10.pb.Fee.Currency\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0c\x12\x0e\n\x06\x61mount\x18\x03 \x01(\x04\";\n\x08\x43urrency\x12\x14\n\x10UNKNOWN_CURRENCY\x10\x00\x12\x07\n\x03LBC\x10\x01\x12\x07\n\x03\x42TC\x10\x02\x12\x07\n\x03USD\x10\x03\"&\n\x05Image\x12\r\n\x05width\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\r\"R\n\x05Video\x12\r\n\x05width\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\r\x12\x10\n\x08\x64uration\x18\x03 \x01(\r\x12\x18\n\x05\x61udio\x18\x0f \x01(\x0b\x32\t.pb.Audio\"\x19\n\x05\x41udio\x12\x10\n\x08\x64uration\x18\x01 \x01(\r\"l\n\x08Software\x12\n\n\x02os\x18\x01 \x01(\t\"T\n\x02OS\x12\x0e\n\nUNKNOWN_OS\x10\x00\x12\x07\n\x03\x41NY\x10\x01\x12\t\n\x05LINUX\x10\x02\x12\x0b\n\x07WINDOWS\x10\x03\x12\x07\n\x03MAC\x10\x04\x12\x0b\n\x07\x41NDROID\x10\x05\x12\x07\n\x03IOS\x10\x06\"\xc7\x1d\n\x08Language\x12\'\n\x08language\x18\x01 \x01(\x0e\x32\x15.pb.Language.Language\x12#\n\x06script\x18\x02 \x01(\x0e\x32\x13.pb.Language.Script\x12$\n\x06region\x18\x03 \x01(\x0e\x32\x14.pb.Location.Country\"\x99\x0c\n\x08Language\x12\x14\n\x10UNKNOWN_LANGUAGE\x10\x00\x12\x06\n\x02\x65n\x10\x01\x12\x06\n\x02\x61\x61\x10\x02\x12\x06\n\x02\x61\x62\x10\x03\x12\x06\n\x02\x61\x65\x10\x04\x12\x06\n\x02\x61\x66\x10\x05\x12\x06\n\x02\x61k\x10\x06\x12\x06\n\x02\x61m\x10\x07\x12\x06\n\x02\x61n\x10\x08\x12\x06\n\x02\x61r\x10\t\x12\x06\n\x02\x61s\x10\n\x12\x06\n\x02\x61v\x10\x0b\x12\x06\n\x02\x61y\x10\x0c\x12\x06\n\x02\x61z\x10\r\x12\x06\n\x02\x62\x61\x10\x0e\x12\x06\n\x02\x62\x65\x10\x0f\x12\x06\n\x02\x62g\x10\x10\x12\x06\n\x02\x62h\x10\x11\x12\x06\n\x02\x62i\x10\x12\x12\x06\n\x02\x62m\x10\x13\x12\x06\n\x02\x62n\x10\x14\x12\x06\n\x02\x62o\x10\x15\x12\x06\n\x02\x62r\x10\x16\x12\x06\n\x02\x62s\x10\x17\x12\x06\n\x02\x63\x61\x10\x18\x12\x06\n\x02\x63\x65\x10\x19\x12\x06\n\x02\x63h\x10\x1a\x12\x06\n\x02\x63o\x10\x1b\x12\x06\n\x02\x63r\x10\x1c\x12\x06\n\x02\x63s\x10\x1d\x12\x06\n\x02\x63u\x10\x1e\x12\x06\n\x02\x63v\x10\x1f\x12\x06\n\x02\x63y\x10 \x12\x06\n\x02\x64\x61\x10!\x12\x06\n\x02\x64\x65\x10\"\x12\x06\n\x02\x64v\x10#\x12\x06\n\x02\x64z\x10$\x12\x06\n\x02\x65\x65\x10%\x12\x06\n\x02\x65l\x10&\x12\x06\n\x02\x65o\x10\'\x12\x06\n\x02\x65s\x10(\x12\x06\n\x02\x65t\x10)\x12\x06\n\x02\x65u\x10*\x12\x06\n\x02\x66\x61\x10+\x12\x06\n\x02\x66\x66\x10,\x12\x06\n\x02\x66i\x10-\x12\x06\n\x02\x66j\x10.\x12\x06\n\x02\x66o\x10/\x12\x06\n\x02\x66r\x10\x30\x12\x06\n\x02\x66y\x10\x31\x12\x06\n\x02ga\x10\x32\x12\x06\n\x02gd\x10\x33\x12\x06\n\x02gl\x10\x34\x12\x06\n\x02gn\x10\x35\x12\x06\n\x02gu\x10\x36\x12\x06\n\x02gv\x10\x37\x12\x06\n\x02ha\x10\x38\x12\x06\n\x02he\x10\x39\x12\x06\n\x02hi\x10:\x12\x06\n\x02ho\x10;\x12\x06\n\x02hr\x10<\x12\x06\n\x02ht\x10=\x12\x06\n\x02hu\x10>\x12\x06\n\x02hy\x10?\x12\x06\n\x02hz\x10@\x12\x06\n\x02ia\x10\x41\x12\x06\n\x02id\x10\x42\x12\x06\n\x02ie\x10\x43\x12\x06\n\x02ig\x10\x44\x12\x06\n\x02ii\x10\x45\x12\x06\n\x02ik\x10\x46\x12\x06\n\x02io\x10G\x12\x06\n\x02is\x10H\x12\x06\n\x02it\x10I\x12\x06\n\x02iu\x10J\x12\x06\n\x02ja\x10K\x12\x06\n\x02jv\x10L\x12\x06\n\x02ka\x10M\x12\x06\n\x02kg\x10N\x12\x06\n\x02ki\x10O\x12\x06\n\x02kj\x10P\x12\x06\n\x02kk\x10Q\x12\x06\n\x02kl\x10R\x12\x06\n\x02km\x10S\x12\x06\n\x02kn\x10T\x12\x06\n\x02ko\x10U\x12\x06\n\x02kr\x10V\x12\x06\n\x02ks\x10W\x12\x06\n\x02ku\x10X\x12\x06\n\x02kv\x10Y\x12\x06\n\x02kw\x10Z\x12\x06\n\x02ky\x10[\x12\x06\n\x02la\x10\\\x12\x06\n\x02lb\x10]\x12\x06\n\x02lg\x10^\x12\x06\n\x02li\x10_\x12\x06\n\x02ln\x10`\x12\x06\n\x02lo\x10\x61\x12\x06\n\x02lt\x10\x62\x12\x06\n\x02lu\x10\x63\x12\x06\n\x02lv\x10\x64\x12\x06\n\x02mg\x10\x65\x12\x06\n\x02mh\x10\x66\x12\x06\n\x02mi\x10g\x12\x06\n\x02mk\x10h\x12\x06\n\x02ml\x10i\x12\x06\n\x02mn\x10j\x12\x06\n\x02mr\x10k\x12\x06\n\x02ms\x10l\x12\x06\n\x02mt\x10m\x12\x06\n\x02my\x10n\x12\x06\n\x02na\x10o\x12\x06\n\x02nb\x10p\x12\x06\n\x02nd\x10q\x12\x06\n\x02ne\x10r\x12\x06\n\x02ng\x10s\x12\x06\n\x02nl\x10t\x12\x06\n\x02nn\x10u\x12\x06\n\x02no\x10v\x12\x06\n\x02nr\x10w\x12\x06\n\x02nv\x10x\x12\x06\n\x02ny\x10y\x12\x06\n\x02oc\x10z\x12\x06\n\x02oj\x10{\x12\x06\n\x02om\x10|\x12\x06\n\x02or\x10}\x12\x06\n\x02os\x10~\x12\x06\n\x02pa\x10\x7f\x12\x07\n\x02pi\x10\x80\x01\x12\x07\n\x02pl\x10\x81\x01\x12\x07\n\x02ps\x10\x82\x01\x12\x07\n\x02pt\x10\x83\x01\x12\x07\n\x02qu\x10\x84\x01\x12\x07\n\x02rm\x10\x85\x01\x12\x07\n\x02rn\x10\x86\x01\x12\x07\n\x02ro\x10\x87\x01\x12\x07\n\x02ru\x10\x88\x01\x12\x07\n\x02rw\x10\x89\x01\x12\x07\n\x02sa\x10\x8a\x01\x12\x07\n\x02sc\x10\x8b\x01\x12\x07\n\x02sd\x10\x8c\x01\x12\x07\n\x02se\x10\x8d\x01\x12\x07\n\x02sg\x10\x8e\x01\x12\x07\n\x02si\x10\x8f\x01\x12\x07\n\x02sk\x10\x90\x01\x12\x07\n\x02sl\x10\x91\x01\x12\x07\n\x02sm\x10\x92\x01\x12\x07\n\x02sn\x10\x93\x01\x12\x07\n\x02so\x10\x94\x01\x12\x07\n\x02sq\x10\x95\x01\x12\x07\n\x02sr\x10\x96\x01\x12\x07\n\x02ss\x10\x97\x01\x12\x07\n\x02st\x10\x98\x01\x12\x07\n\x02su\x10\x99\x01\x12\x07\n\x02sv\x10\x9a\x01\x12\x07\n\x02sw\x10\x9b\x01\x12\x07\n\x02ta\x10\x9c\x01\x12\x07\n\x02te\x10\x9d\x01\x12\x07\n\x02tg\x10\x9e\x01\x12\x07\n\x02th\x10\x9f\x01\x12\x07\n\x02ti\x10\xa0\x01\x12\x07\n\x02tk\x10\xa1\x01\x12\x07\n\x02tl\x10\xa2\x01\x12\x07\n\x02tn\x10\xa3\x01\x12\x07\n\x02to\x10\xa4\x01\x12\x07\n\x02tr\x10\xa5\x01\x12\x07\n\x02ts\x10\xa6\x01\x12\x07\n\x02tt\x10\xa7\x01\x12\x07\n\x02tw\x10\xa8\x01\x12\x07\n\x02ty\x10\xa9\x01\x12\x07\n\x02ug\x10\xaa\x01\x12\x07\n\x02uk\x10\xab\x01\x12\x07\n\x02ur\x10\xac\x01\x12\x07\n\x02uz\x10\xad\x01\x12\x07\n\x02ve\x10\xae\x01\x12\x07\n\x02vi\x10\xaf\x01\x12\x07\n\x02vo\x10\xb0\x01\x12\x07\n\x02wa\x10\xb1\x01\x12\x07\n\x02wo\x10\xb2\x01\x12\x07\n\x02xh\x10\xb3\x01\x12\x07\n\x02yi\x10\xb4\x01\x12\x07\n\x02yo\x10\xb5\x01\x12\x07\n\x02za\x10\xb6\x01\x12\x07\n\x02zh\x10\xb7\x01\x12\x07\n\x02zu\x10\xb8\x01\"\xaa\x10\n\x06Script\x12\x12\n\x0eUNKNOWN_SCRIPT\x10\x00\x12\x08\n\x04\x41\x64lm\x10\x01\x12\x08\n\x04\x41\x66\x61k\x10\x02\x12\x08\n\x04\x41ghb\x10\x03\x12\x08\n\x04\x41hom\x10\x04\x12\x08\n\x04\x41rab\x10\x05\x12\x08\n\x04\x41ran\x10\x06\x12\x08\n\x04\x41rmi\x10\x07\x12\x08\n\x04\x41rmn\x10\x08\x12\x08\n\x04\x41vst\x10\t\x12\x08\n\x04\x42\x61li\x10\n\x12\x08\n\x04\x42\x61mu\x10\x0b\x12\x08\n\x04\x42\x61ss\x10\x0c\x12\x08\n\x04\x42\x61tk\x10\r\x12\x08\n\x04\x42\x65ng\x10\x0e\x12\x08\n\x04\x42hks\x10\x0f\x12\x08\n\x04\x42lis\x10\x10\x12\x08\n\x04\x42opo\x10\x11\x12\x08\n\x04\x42rah\x10\x12\x12\x08\n\x04\x42rai\x10\x13\x12\x08\n\x04\x42ugi\x10\x14\x12\x08\n\x04\x42uhd\x10\x15\x12\x08\n\x04\x43\x61km\x10\x16\x12\x08\n\x04\x43\x61ns\x10\x17\x12\x08\n\x04\x43\x61ri\x10\x18\x12\x08\n\x04\x43ham\x10\x19\x12\x08\n\x04\x43her\x10\x1a\x12\x08\n\x04\x43irt\x10\x1b\x12\x08\n\x04\x43opt\x10\x1c\x12\x08\n\x04\x43pmn\x10\x1d\x12\x08\n\x04\x43prt\x10\x1e\x12\x08\n\x04\x43yrl\x10\x1f\x12\x08\n\x04\x43yrs\x10 \x12\x08\n\x04\x44\x65va\x10!\x12\x08\n\x04\x44ogr\x10\"\x12\x08\n\x04\x44srt\x10#\x12\x08\n\x04\x44upl\x10$\x12\x08\n\x04\x45gyd\x10%\x12\x08\n\x04\x45gyh\x10&\x12\x08\n\x04\x45gyp\x10\'\x12\x08\n\x04\x45lba\x10(\x12\x08\n\x04\x45lym\x10)\x12\x08\n\x04\x45thi\x10*\x12\x08\n\x04Geok\x10+\x12\x08\n\x04Geor\x10,\x12\x08\n\x04Glag\x10-\x12\x08\n\x04Gong\x10.\x12\x08\n\x04Gonm\x10/\x12\x08\n\x04Goth\x10\x30\x12\x08\n\x04Gran\x10\x31\x12\x08\n\x04Grek\x10\x32\x12\x08\n\x04Gujr\x10\x33\x12\x08\n\x04Guru\x10\x34\x12\x08\n\x04Hanb\x10\x35\x12\x08\n\x04Hang\x10\x36\x12\x08\n\x04Hani\x10\x37\x12\x08\n\x04Hano\x10\x38\x12\x08\n\x04Hans\x10\x39\x12\x08\n\x04Hant\x10:\x12\x08\n\x04Hatr\x10;\x12\x08\n\x04Hebr\x10<\x12\x08\n\x04Hira\x10=\x12\x08\n\x04Hluw\x10>\x12\x08\n\x04Hmng\x10?\x12\x08\n\x04Hmnp\x10@\x12\x08\n\x04Hrkt\x10\x41\x12\x08\n\x04Hung\x10\x42\x12\x08\n\x04Inds\x10\x43\x12\x08\n\x04Ital\x10\x44\x12\x08\n\x04Jamo\x10\x45\x12\x08\n\x04Java\x10\x46\x12\x08\n\x04Jpan\x10G\x12\x08\n\x04Jurc\x10H\x12\x08\n\x04Kali\x10I\x12\x08\n\x04Kana\x10J\x12\x08\n\x04Khar\x10K\x12\x08\n\x04Khmr\x10L\x12\x08\n\x04Khoj\x10M\x12\x08\n\x04Kitl\x10N\x12\x08\n\x04Kits\x10O\x12\x08\n\x04Knda\x10P\x12\x08\n\x04Kore\x10Q\x12\x08\n\x04Kpel\x10R\x12\x08\n\x04Kthi\x10S\x12\x08\n\x04Lana\x10T\x12\x08\n\x04Laoo\x10U\x12\x08\n\x04Latf\x10V\x12\x08\n\x04Latg\x10W\x12\x08\n\x04Latn\x10X\x12\x08\n\x04Leke\x10Y\x12\x08\n\x04Lepc\x10Z\x12\x08\n\x04Limb\x10[\x12\x08\n\x04Lina\x10\\\x12\x08\n\x04Linb\x10]\x12\x08\n\x04Lisu\x10^\x12\x08\n\x04Loma\x10_\x12\x08\n\x04Lyci\x10`\x12\x08\n\x04Lydi\x10\x61\x12\x08\n\x04Mahj\x10\x62\x12\x08\n\x04Maka\x10\x63\x12\x08\n\x04Mand\x10\x64\x12\x08\n\x04Mani\x10\x65\x12\x08\n\x04Marc\x10\x66\x12\x08\n\x04Maya\x10g\x12\x08\n\x04Medf\x10h\x12\x08\n\x04Mend\x10i\x12\x08\n\x04Merc\x10j\x12\x08\n\x04Mero\x10k\x12\x08\n\x04Mlym\x10l\x12\x08\n\x04Modi\x10m\x12\x08\n\x04Mong\x10n\x12\x08\n\x04Moon\x10o\x12\x08\n\x04Mroo\x10p\x12\x08\n\x04Mtei\x10q\x12\x08\n\x04Mult\x10r\x12\x08\n\x04Mymr\x10s\x12\x08\n\x04Nand\x10t\x12\x08\n\x04Narb\x10u\x12\x08\n\x04Nbat\x10v\x12\x08\n\x04Newa\x10w\x12\x08\n\x04Nkdb\x10x\x12\x08\n\x04Nkgb\x10y\x12\x08\n\x04Nkoo\x10z\x12\x08\n\x04Nshu\x10{\x12\x08\n\x04Ogam\x10|\x12\x08\n\x04Olck\x10}\x12\x08\n\x04Orkh\x10~\x12\x08\n\x04Orya\x10\x7f\x12\t\n\x04Osge\x10\x80\x01\x12\t\n\x04Osma\x10\x81\x01\x12\t\n\x04Palm\x10\x82\x01\x12\t\n\x04Pauc\x10\x83\x01\x12\t\n\x04Perm\x10\x84\x01\x12\t\n\x04Phag\x10\x85\x01\x12\t\n\x04Phli\x10\x86\x01\x12\t\n\x04Phlp\x10\x87\x01\x12\t\n\x04Phlv\x10\x88\x01\x12\t\n\x04Phnx\x10\x89\x01\x12\t\n\x04Plrd\x10\x8a\x01\x12\t\n\x04Piqd\x10\x8b\x01\x12\t\n\x04Prti\x10\x8c\x01\x12\t\n\x04Qaaa\x10\x8d\x01\x12\t\n\x04Qabx\x10\x8e\x01\x12\t\n\x04Rjng\x10\x8f\x01\x12\t\n\x04Rohg\x10\x90\x01\x12\t\n\x04Roro\x10\x91\x01\x12\t\n\x04Runr\x10\x92\x01\x12\t\n\x04Samr\x10\x93\x01\x12\t\n\x04Sara\x10\x94\x01\x12\t\n\x04Sarb\x10\x95\x01\x12\t\n\x04Saur\x10\x96\x01\x12\t\n\x04Sgnw\x10\x97\x01\x12\t\n\x04Shaw\x10\x98\x01\x12\t\n\x04Shrd\x10\x99\x01\x12\t\n\x04Shui\x10\x9a\x01\x12\t\n\x04Sidd\x10\x9b\x01\x12\t\n\x04Sind\x10\x9c\x01\x12\t\n\x04Sinh\x10\x9d\x01\x12\t\n\x04Sogd\x10\x9e\x01\x12\t\n\x04Sogo\x10\x9f\x01\x12\t\n\x04Sora\x10\xa0\x01\x12\t\n\x04Soyo\x10\xa1\x01\x12\t\n\x04Sund\x10\xa2\x01\x12\t\n\x04Sylo\x10\xa3\x01\x12\t\n\x04Syrc\x10\xa4\x01\x12\t\n\x04Syre\x10\xa5\x01\x12\t\n\x04Syrj\x10\xa6\x01\x12\t\n\x04Syrn\x10\xa7\x01\x12\t\n\x04Tagb\x10\xa8\x01\x12\t\n\x04Takr\x10\xa9\x01\x12\t\n\x04Tale\x10\xaa\x01\x12\t\n\x04Talu\x10\xab\x01\x12\t\n\x04Taml\x10\xac\x01\x12\t\n\x04Tang\x10\xad\x01\x12\t\n\x04Tavt\x10\xae\x01\x12\t\n\x04Telu\x10\xaf\x01\x12\t\n\x04Teng\x10\xb0\x01\x12\t\n\x04Tfng\x10\xb1\x01\x12\t\n\x04Tglg\x10\xb2\x01\x12\t\n\x04Thaa\x10\xb3\x01\x12\t\n\x04Thai\x10\xb4\x01\x12\t\n\x04Tibt\x10\xb5\x01\x12\t\n\x04Tirh\x10\xb6\x01\x12\t\n\x04Ugar\x10\xb7\x01\x12\t\n\x04Vaii\x10\xb8\x01\x12\t\n\x04Visp\x10\xb9\x01\x12\t\n\x04Wara\x10\xba\x01\x12\t\n\x04Wcho\x10\xbb\x01\x12\t\n\x04Wole\x10\xbc\x01\x12\t\n\x04Xpeo\x10\xbd\x01\x12\t\n\x04Xsux\x10\xbe\x01\x12\t\n\x04Yiii\x10\xbf\x01\x12\t\n\x04Zanb\x10\xc0\x01\x12\t\n\x04Zinh\x10\xc1\x01\x12\t\n\x04Zmth\x10\xc2\x01\x12\t\n\x04Zsye\x10\xc3\x01\x12\t\n\x04Zsym\x10\xc4\x01\x12\t\n\x04Zxxx\x10\xc5\x01\x12\t\n\x04Zyyy\x10\xc6\x01\x12\t\n\x04Zzzz\x10\xc7\x01\"\xec)\n\x08Location\x12%\n\x07\x63ountry\x18\x01 \x01(\x0e\x32\x14.pb.Location.Country\x12\r\n\x05state\x18\x02 \x01(\t\x12\x0c\n\x04\x63ity\x18\x03 \x01(\t\x12\x0c\n\x04\x63ode\x18\x04 \x01(\t\x12\x10\n\x08latitude\x18\x05 \x01(\x11\x12\x11\n\tlongitude\x18\x06 \x01(\x11\"\xe8(\n\x07\x43ountry\x12\x13\n\x0fUNKNOWN_COUNTRY\x10\x00\x12\x06\n\x02\x41\x46\x10\x01\x12\x06\n\x02\x41X\x10\x02\x12\x06\n\x02\x41L\x10\x03\x12\x06\n\x02\x44Z\x10\x04\x12\x06\n\x02\x41S\x10\x05\x12\x06\n\x02\x41\x44\x10\x06\x12\x06\n\x02\x41O\x10\x07\x12\x06\n\x02\x41I\x10\x08\x12\x06\n\x02\x41Q\x10\t\x12\x06\n\x02\x41G\x10\n\x12\x06\n\x02\x41R\x10\x0b\x12\x06\n\x02\x41M\x10\x0c\x12\x06\n\x02\x41W\x10\r\x12\x06\n\x02\x41U\x10\x0e\x12\x06\n\x02\x41T\x10\x0f\x12\x06\n\x02\x41Z\x10\x10\x12\x06\n\x02\x42S\x10\x11\x12\x06\n\x02\x42H\x10\x12\x12\x06\n\x02\x42\x44\x10\x13\x12\x06\n\x02\x42\x42\x10\x14\x12\x06\n\x02\x42Y\x10\x15\x12\x06\n\x02\x42\x45\x10\x16\x12\x06\n\x02\x42Z\x10\x17\x12\x06\n\x02\x42J\x10\x18\x12\x06\n\x02\x42M\x10\x19\x12\x06\n\x02\x42T\x10\x1a\x12\x06\n\x02\x42O\x10\x1b\x12\x06\n\x02\x42Q\x10\x1c\x12\x06\n\x02\x42\x41\x10\x1d\x12\x06\n\x02\x42W\x10\x1e\x12\x06\n\x02\x42V\x10\x1f\x12\x06\n\x02\x42R\x10 \x12\x06\n\x02IO\x10!\x12\x06\n\x02\x42N\x10\"\x12\x06\n\x02\x42G\x10#\x12\x06\n\x02\x42\x46\x10$\x12\x06\n\x02\x42I\x10%\x12\x06\n\x02KH\x10&\x12\x06\n\x02\x43M\x10\'\x12\x06\n\x02\x43\x41\x10(\x12\x06\n\x02\x43V\x10)\x12\x06\n\x02KY\x10*\x12\x06\n\x02\x43\x46\x10+\x12\x06\n\x02TD\x10,\x12\x06\n\x02\x43L\x10-\x12\x06\n\x02\x43N\x10.\x12\x06\n\x02\x43X\x10/\x12\x06\n\x02\x43\x43\x10\x30\x12\x06\n\x02\x43O\x10\x31\x12\x06\n\x02KM\x10\x32\x12\x06\n\x02\x43G\x10\x33\x12\x06\n\x02\x43\x44\x10\x34\x12\x06\n\x02\x43K\x10\x35\x12\x06\n\x02\x43R\x10\x36\x12\x06\n\x02\x43I\x10\x37\x12\x06\n\x02HR\x10\x38\x12\x06\n\x02\x43U\x10\x39\x12\x06\n\x02\x43W\x10:\x12\x06\n\x02\x43Y\x10;\x12\x06\n\x02\x43Z\x10<\x12\x06\n\x02\x44K\x10=\x12\x06\n\x02\x44J\x10>\x12\x06\n\x02\x44M\x10?\x12\x06\n\x02\x44O\x10@\x12\x06\n\x02\x45\x43\x10\x41\x12\x06\n\x02\x45G\x10\x42\x12\x06\n\x02SV\x10\x43\x12\x06\n\x02GQ\x10\x44\x12\x06\n\x02\x45R\x10\x45\x12\x06\n\x02\x45\x45\x10\x46\x12\x06\n\x02\x45T\x10G\x12\x06\n\x02\x46K\x10H\x12\x06\n\x02\x46O\x10I\x12\x06\n\x02\x46J\x10J\x12\x06\n\x02\x46I\x10K\x12\x06\n\x02\x46R\x10L\x12\x06\n\x02GF\x10M\x12\x06\n\x02PF\x10N\x12\x06\n\x02TF\x10O\x12\x06\n\x02GA\x10P\x12\x06\n\x02GM\x10Q\x12\x06\n\x02GE\x10R\x12\x06\n\x02\x44\x45\x10S\x12\x06\n\x02GH\x10T\x12\x06\n\x02GI\x10U\x12\x06\n\x02GR\x10V\x12\x06\n\x02GL\x10W\x12\x06\n\x02GD\x10X\x12\x06\n\x02GP\x10Y\x12\x06\n\x02GU\x10Z\x12\x06\n\x02GT\x10[\x12\x06\n\x02GG\x10\\\x12\x06\n\x02GN\x10]\x12\x06\n\x02GW\x10^\x12\x06\n\x02GY\x10_\x12\x06\n\x02HT\x10`\x12\x06\n\x02HM\x10\x61\x12\x06\n\x02VA\x10\x62\x12\x06\n\x02HN\x10\x63\x12\x06\n\x02HK\x10\x64\x12\x06\n\x02HU\x10\x65\x12\x06\n\x02IS\x10\x66\x12\x06\n\x02IN\x10g\x12\x06\n\x02ID\x10h\x12\x06\n\x02IR\x10i\x12\x06\n\x02IQ\x10j\x12\x06\n\x02IE\x10k\x12\x06\n\x02IM\x10l\x12\x06\n\x02IL\x10m\x12\x06\n\x02IT\x10n\x12\x06\n\x02JM\x10o\x12\x06\n\x02JP\x10p\x12\x06\n\x02JE\x10q\x12\x06\n\x02JO\x10r\x12\x06\n\x02KZ\x10s\x12\x06\n\x02KE\x10t\x12\x06\n\x02KI\x10u\x12\x06\n\x02KP\x10v\x12\x06\n\x02KR\x10w\x12\x06\n\x02KW\x10x\x12\x06\n\x02KG\x10y\x12\x06\n\x02LA\x10z\x12\x06\n\x02LV\x10{\x12\x06\n\x02LB\x10|\x12\x06\n\x02LS\x10}\x12\x06\n\x02LR\x10~\x12\x06\n\x02LY\x10\x7f\x12\x07\n\x02LI\x10\x80\x01\x12\x07\n\x02LT\x10\x81\x01\x12\x07\n\x02LU\x10\x82\x01\x12\x07\n\x02MO\x10\x83\x01\x12\x07\n\x02MK\x10\x84\x01\x12\x07\n\x02MG\x10\x85\x01\x12\x07\n\x02MW\x10\x86\x01\x12\x07\n\x02MY\x10\x87\x01\x12\x07\n\x02MV\x10\x88\x01\x12\x07\n\x02ML\x10\x89\x01\x12\x07\n\x02MT\x10\x8a\x01\x12\x07\n\x02MH\x10\x8b\x01\x12\x07\n\x02MQ\x10\x8c\x01\x12\x07\n\x02MR\x10\x8d\x01\x12\x07\n\x02MU\x10\x8e\x01\x12\x07\n\x02YT\x10\x8f\x01\x12\x07\n\x02MX\x10\x90\x01\x12\x07\n\x02\x46M\x10\x91\x01\x12\x07\n\x02MD\x10\x92\x01\x12\x07\n\x02MC\x10\x93\x01\x12\x07\n\x02MN\x10\x94\x01\x12\x07\n\x02ME\x10\x95\x01\x12\x07\n\x02MS\x10\x96\x01\x12\x07\n\x02MA\x10\x97\x01\x12\x07\n\x02MZ\x10\x98\x01\x12\x07\n\x02MM\x10\x99\x01\x12\x07\n\x02NA\x10\x9a\x01\x12\x07\n\x02NR\x10\x9b\x01\x12\x07\n\x02NP\x10\x9c\x01\x12\x07\n\x02NL\x10\x9d\x01\x12\x07\n\x02NC\x10\x9e\x01\x12\x07\n\x02NZ\x10\x9f\x01\x12\x07\n\x02NI\x10\xa0\x01\x12\x07\n\x02NE\x10\xa1\x01\x12\x07\n\x02NG\x10\xa2\x01\x12\x07\n\x02NU\x10\xa3\x01\x12\x07\n\x02NF\x10\xa4\x01\x12\x07\n\x02MP\x10\xa5\x01\x12\x07\n\x02NO\x10\xa6\x01\x12\x07\n\x02OM\x10\xa7\x01\x12\x07\n\x02PK\x10\xa8\x01\x12\x07\n\x02PW\x10\xa9\x01\x12\x07\n\x02PS\x10\xaa\x01\x12\x07\n\x02PA\x10\xab\x01\x12\x07\n\x02PG\x10\xac\x01\x12\x07\n\x02PY\x10\xad\x01\x12\x07\n\x02PE\x10\xae\x01\x12\x07\n\x02PH\x10\xaf\x01\x12\x07\n\x02PN\x10\xb0\x01\x12\x07\n\x02PL\x10\xb1\x01\x12\x07\n\x02PT\x10\xb2\x01\x12\x07\n\x02PR\x10\xb3\x01\x12\x07\n\x02QA\x10\xb4\x01\x12\x07\n\x02RE\x10\xb5\x01\x12\x07\n\x02RO\x10\xb6\x01\x12\x07\n\x02RU\x10\xb7\x01\x12\x07\n\x02RW\x10\xb8\x01\x12\x07\n\x02\x42L\x10\xb9\x01\x12\x07\n\x02SH\x10\xba\x01\x12\x07\n\x02KN\x10\xbb\x01\x12\x07\n\x02LC\x10\xbc\x01\x12\x07\n\x02MF\x10\xbd\x01\x12\x07\n\x02PM\x10\xbe\x01\x12\x07\n\x02VC\x10\xbf\x01\x12\x07\n\x02WS\x10\xc0\x01\x12\x07\n\x02SM\x10\xc1\x01\x12\x07\n\x02ST\x10\xc2\x01\x12\x07\n\x02SA\x10\xc3\x01\x12\x07\n\x02SN\x10\xc4\x01\x12\x07\n\x02RS\x10\xc5\x01\x12\x07\n\x02SC\x10\xc6\x01\x12\x07\n\x02SL\x10\xc7\x01\x12\x07\n\x02SG\x10\xc8\x01\x12\x07\n\x02SX\x10\xc9\x01\x12\x07\n\x02SK\x10\xca\x01\x12\x07\n\x02SI\x10\xcb\x01\x12\x07\n\x02SB\x10\xcc\x01\x12\x07\n\x02SO\x10\xcd\x01\x12\x07\n\x02ZA\x10\xce\x01\x12\x07\n\x02GS\x10\xcf\x01\x12\x07\n\x02SS\x10\xd0\x01\x12\x07\n\x02\x45S\x10\xd1\x01\x12\x07\n\x02LK\x10\xd2\x01\x12\x07\n\x02SD\x10\xd3\x01\x12\x07\n\x02SR\x10\xd4\x01\x12\x07\n\x02SJ\x10\xd5\x01\x12\x07\n\x02SZ\x10\xd6\x01\x12\x07\n\x02SE\x10\xd7\x01\x12\x07\n\x02\x43H\x10\xd8\x01\x12\x07\n\x02SY\x10\xd9\x01\x12\x07\n\x02TW\x10\xda\x01\x12\x07\n\x02TJ\x10\xdb\x01\x12\x07\n\x02TZ\x10\xdc\x01\x12\x07\n\x02TH\x10\xdd\x01\x12\x07\n\x02TL\x10\xde\x01\x12\x07\n\x02TG\x10\xdf\x01\x12\x07\n\x02TK\x10\xe0\x01\x12\x07\n\x02TO\x10\xe1\x01\x12\x07\n\x02TT\x10\xe2\x01\x12\x07\n\x02TN\x10\xe3\x01\x12\x07\n\x02TR\x10\xe4\x01\x12\x07\n\x02TM\x10\xe5\x01\x12\x07\n\x02TC\x10\xe6\x01\x12\x07\n\x02TV\x10\xe7\x01\x12\x07\n\x02UG\x10\xe8\x01\x12\x07\n\x02UA\x10\xe9\x01\x12\x07\n\x02\x41\x45\x10\xea\x01\x12\x07\n\x02GB\x10\xeb\x01\x12\x07\n\x02US\x10\xec\x01\x12\x07\n\x02UM\x10\xed\x01\x12\x07\n\x02UY\x10\xee\x01\x12\x07\n\x02UZ\x10\xef\x01\x12\x07\n\x02VU\x10\xf0\x01\x12\x07\n\x02VE\x10\xf1\x01\x12\x07\n\x02VN\x10\xf2\x01\x12\x07\n\x02VG\x10\xf3\x01\x12\x07\n\x02VI\x10\xf4\x01\x12\x07\n\x02WF\x10\xf5\x01\x12\x07\n\x02\x45H\x10\xf6\x01\x12\x07\n\x02YE\x10\xf7\x01\x12\x07\n\x02ZM\x10\xf8\x01\x12\x07\n\x02ZW\x10\xf9\x01\x12\t\n\x04R001\x10\xfa\x01\x12\t\n\x04R002\x10\xfb\x01\x12\t\n\x04R015\x10\xfc\x01\x12\t\n\x04R012\x10\xfd\x01\x12\t\n\x04R818\x10\xfe\x01\x12\t\n\x04R434\x10\xff\x01\x12\t\n\x04R504\x10\x80\x02\x12\t\n\x04R729\x10\x81\x02\x12\t\n\x04R788\x10\x82\x02\x12\t\n\x04R732\x10\x83\x02\x12\t\n\x04R202\x10\x84\x02\x12\t\n\x04R014\x10\x85\x02\x12\t\n\x04R086\x10\x86\x02\x12\t\n\x04R108\x10\x87\x02\x12\t\n\x04R174\x10\x88\x02\x12\t\n\x04R262\x10\x89\x02\x12\t\n\x04R232\x10\x8a\x02\x12\t\n\x04R231\x10\x8b\x02\x12\t\n\x04R260\x10\x8c\x02\x12\t\n\x04R404\x10\x8d\x02\x12\t\n\x04R450\x10\x8e\x02\x12\t\n\x04R454\x10\x8f\x02\x12\t\n\x04R480\x10\x90\x02\x12\t\n\x04R175\x10\x91\x02\x12\t\n\x04R508\x10\x92\x02\x12\t\n\x04R638\x10\x93\x02\x12\t\n\x04R646\x10\x94\x02\x12\t\n\x04R690\x10\x95\x02\x12\t\n\x04R706\x10\x96\x02\x12\t\n\x04R728\x10\x97\x02\x12\t\n\x04R800\x10\x98\x02\x12\t\n\x04R834\x10\x99\x02\x12\t\n\x04R894\x10\x9a\x02\x12\t\n\x04R716\x10\x9b\x02\x12\t\n\x04R017\x10\x9c\x02\x12\t\n\x04R024\x10\x9d\x02\x12\t\n\x04R120\x10\x9e\x02\x12\t\n\x04R140\x10\x9f\x02\x12\t\n\x04R148\x10\xa0\x02\x12\t\n\x04R178\x10\xa1\x02\x12\t\n\x04R180\x10\xa2\x02\x12\t\n\x04R226\x10\xa3\x02\x12\t\n\x04R266\x10\xa4\x02\x12\t\n\x04R678\x10\xa5\x02\x12\t\n\x04R018\x10\xa6\x02\x12\t\n\x04R072\x10\xa7\x02\x12\t\n\x04R748\x10\xa8\x02\x12\t\n\x04R426\x10\xa9\x02\x12\t\n\x04R516\x10\xaa\x02\x12\t\n\x04R710\x10\xab\x02\x12\t\n\x04R011\x10\xac\x02\x12\t\n\x04R204\x10\xad\x02\x12\t\n\x04R854\x10\xae\x02\x12\t\n\x04R132\x10\xaf\x02\x12\t\n\x04R384\x10\xb0\x02\x12\t\n\x04R270\x10\xb1\x02\x12\t\n\x04R288\x10\xb2\x02\x12\t\n\x04R324\x10\xb3\x02\x12\t\n\x04R624\x10\xb4\x02\x12\t\n\x04R430\x10\xb5\x02\x12\t\n\x04R466\x10\xb6\x02\x12\t\n\x04R478\x10\xb7\x02\x12\t\n\x04R562\x10\xb8\x02\x12\t\n\x04R566\x10\xb9\x02\x12\t\n\x04R654\x10\xba\x02\x12\t\n\x04R686\x10\xbb\x02\x12\t\n\x04R694\x10\xbc\x02\x12\t\n\x04R768\x10\xbd\x02\x12\t\n\x04R019\x10\xbe\x02\x12\t\n\x04R419\x10\xbf\x02\x12\t\n\x04R029\x10\xc0\x02\x12\t\n\x04R660\x10\xc1\x02\x12\t\n\x04R028\x10\xc2\x02\x12\t\n\x04R533\x10\xc3\x02\x12\t\n\x04R044\x10\xc4\x02\x12\t\n\x04R052\x10\xc5\x02\x12\t\n\x04R535\x10\xc6\x02\x12\t\n\x04R092\x10\xc7\x02\x12\t\n\x04R136\x10\xc8\x02\x12\t\n\x04R192\x10\xc9\x02\x12\t\n\x04R531\x10\xca\x02\x12\t\n\x04R212\x10\xcb\x02\x12\t\n\x04R214\x10\xcc\x02\x12\t\n\x04R308\x10\xcd\x02\x12\t\n\x04R312\x10\xce\x02\x12\t\n\x04R332\x10\xcf\x02\x12\t\n\x04R388\x10\xd0\x02\x12\t\n\x04R474\x10\xd1\x02\x12\t\n\x04R500\x10\xd2\x02\x12\t\n\x04R630\x10\xd3\x02\x12\t\n\x04R652\x10\xd4\x02\x12\t\n\x04R659\x10\xd5\x02\x12\t\n\x04R662\x10\xd6\x02\x12\t\n\x04R663\x10\xd7\x02\x12\t\n\x04R670\x10\xd8\x02\x12\t\n\x04R534\x10\xd9\x02\x12\t\n\x04R780\x10\xda\x02\x12\t\n\x04R796\x10\xdb\x02\x12\t\n\x04R850\x10\xdc\x02\x12\t\n\x04R013\x10\xdd\x02\x12\t\n\x04R084\x10\xde\x02\x12\t\n\x04R188\x10\xdf\x02\x12\t\n\x04R222\x10\xe0\x02\x12\t\n\x04R320\x10\xe1\x02\x12\t\n\x04R340\x10\xe2\x02\x12\t\n\x04R484\x10\xe3\x02\x12\t\n\x04R558\x10\xe4\x02\x12\t\n\x04R591\x10\xe5\x02\x12\t\n\x04R005\x10\xe6\x02\x12\t\n\x04R032\x10\xe7\x02\x12\t\n\x04R068\x10\xe8\x02\x12\t\n\x04R074\x10\xe9\x02\x12\t\n\x04R076\x10\xea\x02\x12\t\n\x04R152\x10\xeb\x02\x12\t\n\x04R170\x10\xec\x02\x12\t\n\x04R218\x10\xed\x02\x12\t\n\x04R238\x10\xee\x02\x12\t\n\x04R254\x10\xef\x02\x12\t\n\x04R328\x10\xf0\x02\x12\t\n\x04R600\x10\xf1\x02\x12\t\n\x04R604\x10\xf2\x02\x12\t\n\x04R239\x10\xf3\x02\x12\t\n\x04R740\x10\xf4\x02\x12\t\n\x04R858\x10\xf5\x02\x12\t\n\x04R862\x10\xf6\x02\x12\t\n\x04R021\x10\xf7\x02\x12\t\n\x04R060\x10\xf8\x02\x12\t\n\x04R124\x10\xf9\x02\x12\t\n\x04R304\x10\xfa\x02\x12\t\n\x04R666\x10\xfb\x02\x12\t\n\x04R840\x10\xfc\x02\x12\t\n\x04R010\x10\xfd\x02\x12\t\n\x04R142\x10\xfe\x02\x12\t\n\x04R143\x10\xff\x02\x12\t\n\x04R398\x10\x80\x03\x12\t\n\x04R417\x10\x81\x03\x12\t\n\x04R762\x10\x82\x03\x12\t\n\x04R795\x10\x83\x03\x12\t\n\x04R860\x10\x84\x03\x12\t\n\x04R030\x10\x85\x03\x12\t\n\x04R156\x10\x86\x03\x12\t\n\x04R344\x10\x87\x03\x12\t\n\x04R446\x10\x88\x03\x12\t\n\x04R408\x10\x89\x03\x12\t\n\x04R392\x10\x8a\x03\x12\t\n\x04R496\x10\x8b\x03\x12\t\n\x04R410\x10\x8c\x03\x12\t\n\x04R035\x10\x8d\x03\x12\t\n\x04R096\x10\x8e\x03\x12\t\n\x04R116\x10\x8f\x03\x12\t\n\x04R360\x10\x90\x03\x12\t\n\x04R418\x10\x91\x03\x12\t\n\x04R458\x10\x92\x03\x12\t\n\x04R104\x10\x93\x03\x12\t\n\x04R608\x10\x94\x03\x12\t\n\x04R702\x10\x95\x03\x12\t\n\x04R764\x10\x96\x03\x12\t\n\x04R626\x10\x97\x03\x12\t\n\x04R704\x10\x98\x03\x12\t\n\x04R034\x10\x99\x03\x12\t\n\x04R004\x10\x9a\x03\x12\t\n\x04R050\x10\x9b\x03\x12\t\n\x04R064\x10\x9c\x03\x12\t\n\x04R356\x10\x9d\x03\x12\t\n\x04R364\x10\x9e\x03\x12\t\n\x04R462\x10\x9f\x03\x12\t\n\x04R524\x10\xa0\x03\x12\t\n\x04R586\x10\xa1\x03\x12\t\n\x04R144\x10\xa2\x03\x12\t\n\x04R145\x10\xa3\x03\x12\t\n\x04R051\x10\xa4\x03\x12\t\n\x04R031\x10\xa5\x03\x12\t\n\x04R048\x10\xa6\x03\x12\t\n\x04R196\x10\xa7\x03\x12\t\n\x04R268\x10\xa8\x03\x12\t\n\x04R368\x10\xa9\x03\x12\t\n\x04R376\x10\xaa\x03\x12\t\n\x04R400\x10\xab\x03\x12\t\n\x04R414\x10\xac\x03\x12\t\n\x04R422\x10\xad\x03\x12\t\n\x04R512\x10\xae\x03\x12\t\n\x04R634\x10\xaf\x03\x12\t\n\x04R682\x10\xb0\x03\x12\t\n\x04R275\x10\xb1\x03\x12\t\n\x04R760\x10\xb2\x03\x12\t\n\x04R792\x10\xb3\x03\x12\t\n\x04R784\x10\xb4\x03\x12\t\n\x04R887\x10\xb5\x03\x12\t\n\x04R150\x10\xb6\x03\x12\t\n\x04R151\x10\xb7\x03\x12\t\n\x04R112\x10\xb8\x03\x12\t\n\x04R100\x10\xb9\x03\x12\t\n\x04R203\x10\xba\x03\x12\t\n\x04R348\x10\xbb\x03\x12\t\n\x04R616\x10\xbc\x03\x12\t\n\x04R498\x10\xbd\x03\x12\t\n\x04R642\x10\xbe\x03\x12\t\n\x04R643\x10\xbf\x03\x12\t\n\x04R703\x10\xc0\x03\x12\t\n\x04R804\x10\xc1\x03\x12\t\n\x04R154\x10\xc2\x03\x12\t\n\x04R248\x10\xc3\x03\x12\t\n\x04R830\x10\xc4\x03\x12\t\n\x04R831\x10\xc5\x03\x12\t\n\x04R832\x10\xc6\x03\x12\t\n\x04R680\x10\xc7\x03\x12\t\n\x04R208\x10\xc8\x03\x12\t\n\x04R233\x10\xc9\x03\x12\t\n\x04R234\x10\xca\x03\x12\t\n\x04R246\x10\xcb\x03\x12\t\n\x04R352\x10\xcc\x03\x12\t\n\x04R372\x10\xcd\x03\x12\t\n\x04R833\x10\xce\x03\x12\t\n\x04R428\x10\xcf\x03\x12\t\n\x04R440\x10\xd0\x03\x12\t\n\x04R578\x10\xd1\x03\x12\t\n\x04R744\x10\xd2\x03\x12\t\n\x04R752\x10\xd3\x03\x12\t\n\x04R826\x10\xd4\x03\x12\t\n\x04R039\x10\xd5\x03\x12\t\n\x04R008\x10\xd6\x03\x12\t\n\x04R020\x10\xd7\x03\x12\t\n\x04R070\x10\xd8\x03\x12\t\n\x04R191\x10\xd9\x03\x12\t\n\x04R292\x10\xda\x03\x12\t\n\x04R300\x10\xdb\x03\x12\t\n\x04R336\x10\xdc\x03\x12\t\n\x04R380\x10\xdd\x03\x12\t\n\x04R470\x10\xde\x03\x12\t\n\x04R499\x10\xdf\x03\x12\t\n\x04R807\x10\xe0\x03\x12\t\n\x04R620\x10\xe1\x03\x12\t\n\x04R674\x10\xe2\x03\x12\t\n\x04R688\x10\xe3\x03\x12\t\n\x04R705\x10\xe4\x03\x12\t\n\x04R724\x10\xe5\x03\x12\t\n\x04R155\x10\xe6\x03\x12\t\n\x04R040\x10\xe7\x03\x12\t\n\x04R056\x10\xe8\x03\x12\t\n\x04R250\x10\xe9\x03\x12\t\n\x04R276\x10\xea\x03\x12\t\n\x04R438\x10\xeb\x03\x12\t\n\x04R442\x10\xec\x03\x12\t\n\x04R492\x10\xed\x03\x12\t\n\x04R528\x10\xee\x03\x12\t\n\x04R756\x10\xef\x03\x12\t\n\x04R009\x10\xf0\x03\x12\t\n\x04R053\x10\xf1\x03\x12\t\n\x04R036\x10\xf2\x03\x12\t\n\x04R162\x10\xf3\x03\x12\t\n\x04R166\x10\xf4\x03\x12\t\n\x04R334\x10\xf5\x03\x12\t\n\x04R554\x10\xf6\x03\x12\t\n\x04R574\x10\xf7\x03\x12\t\n\x04R054\x10\xf8\x03\x12\t\n\x04R242\x10\xf9\x03\x12\t\n\x04R540\x10\xfa\x03\x12\t\n\x04R598\x10\xfb\x03\x12\t\n\x04R090\x10\xfc\x03\x12\t\n\x04R548\x10\xfd\x03\x12\t\n\x04R057\x10\xfe\x03\x12\t\n\x04R316\x10\xff\x03\x12\t\n\x04R296\x10\x80\x04\x12\t\n\x04R584\x10\x81\x04\x12\t\n\x04R583\x10\x82\x04\x12\t\n\x04R520\x10\x83\x04\x12\t\n\x04R580\x10\x84\x04\x12\t\n\x04R585\x10\x85\x04\x12\t\n\x04R581\x10\x86\x04\x12\t\n\x04R061\x10\x87\x04\x12\t\n\x04R016\x10\x88\x04\x12\t\n\x04R184\x10\x89\x04\x12\t\n\x04R258\x10\x8a\x04\x12\t\n\x04R570\x10\x8b\x04\x12\t\n\x04R612\x10\x8c\x04\x12\t\n\x04R882\x10\x8d\x04\x12\t\n\x04R772\x10\x8e\x04\x12\t\n\x04R776\x10\x8f\x04\x12\t\n\x04R798\x10\x90\x04\x12\t\n\x04R876\x10\x91\x04\x62\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + +_CLAIMLIST_LISTTYPE = _descriptor.EnumDescriptor( + name='ListType', + full_name='pb.ClaimList.ListType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='COLLECTION', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DERIVATION', index=1, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=852, + serialized_end=894, +) +_sym_db.RegisterEnumDescriptor(_CLAIMLIST_LISTTYPE) + +_FEE_CURRENCY = _descriptor.EnumDescriptor( + name='Currency', + full_name='pb.Fee.Currency', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_CURRENCY', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LBC', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BTC', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='USD', index=3, number=3, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1096, + serialized_end=1155, +) +_sym_db.RegisterEnumDescriptor(_FEE_CURRENCY) + +_SOFTWARE_OS = _descriptor.EnumDescriptor( + name='OS', + full_name='pb.Software.OS', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_OS', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ANY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LINUX', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='WINDOWS', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MAC', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ANDROID', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IOS', index=6, number=6, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1332, + serialized_end=1416, +) +_sym_db.RegisterEnumDescriptor(_SOFTWARE_OS) + +_LANGUAGE_LANGUAGE = _descriptor.EnumDescriptor( + name='Language', + full_name='pb.Language.Language', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_LANGUAGE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='en', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='aa', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ab', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ae', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='af', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ak', index=6, number=6, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='am', index=7, number=7, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='an', index=8, number=8, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ar', index=9, number=9, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='as', index=10, number=10, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='av', index=11, number=11, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ay', index=12, number=12, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='az', index=13, number=13, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ba', index=14, number=14, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='be', index=15, number=15, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bg', index=16, number=16, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bh', index=17, number=17, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bi', index=18, number=18, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bm', index=19, number=19, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bn', index=20, number=20, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bo', index=21, number=21, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='br', index=22, number=22, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='bs', index=23, number=23, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ca', index=24, number=24, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ce', index=25, number=25, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ch', index=26, number=26, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='co', index=27, number=27, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='cr', index=28, number=28, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='cs', index=29, number=29, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='cu', index=30, number=30, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='cv', index=31, number=31, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='cy', index=32, number=32, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='da', index=33, number=33, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='de', index=34, number=34, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='dv', index=35, number=35, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='dz', index=36, number=36, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ee', index=37, number=37, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='el', index=38, number=38, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='eo', index=39, number=39, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='es', index=40, number=40, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='et', index=41, number=41, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='eu', index=42, number=42, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fa', index=43, number=43, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ff', index=44, number=44, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fi', index=45, number=45, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fj', index=46, number=46, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fo', index=47, number=47, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fr', index=48, number=48, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='fy', index=49, number=49, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ga', index=50, number=50, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='gd', index=51, number=51, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='gl', index=52, number=52, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='gn', index=53, number=53, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='gu', index=54, number=54, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='gv', index=55, number=55, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ha', index=56, number=56, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='he', index=57, number=57, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='hi', index=58, number=58, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ho', index=59, number=59, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='hr', index=60, number=60, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ht', index=61, number=61, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='hu', index=62, number=62, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='hy', index=63, number=63, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='hz', index=64, number=64, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ia', index=65, number=65, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='id', index=66, number=66, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ie', index=67, number=67, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ig', index=68, number=68, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ii', index=69, number=69, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ik', index=70, number=70, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='io', index=71, number=71, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='is', index=72, number=72, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='it', index=73, number=73, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='iu', index=74, number=74, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ja', index=75, number=75, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='jv', index=76, number=76, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ka', index=77, number=77, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kg', index=78, number=78, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ki', index=79, number=79, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kj', index=80, number=80, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kk', index=81, number=81, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kl', index=82, number=82, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='km', index=83, number=83, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kn', index=84, number=84, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ko', index=85, number=85, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kr', index=86, number=86, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ks', index=87, number=87, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ku', index=88, number=88, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kv', index=89, number=89, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='kw', index=90, number=90, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ky', index=91, number=91, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='la', index=92, number=92, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lb', index=93, number=93, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lg', index=94, number=94, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='li', index=95, number=95, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ln', index=96, number=96, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lo', index=97, number=97, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lt', index=98, number=98, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lu', index=99, number=99, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='lv', index=100, number=100, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mg', index=101, number=101, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mh', index=102, number=102, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mi', index=103, number=103, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mk', index=104, number=104, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ml', index=105, number=105, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mn', index=106, number=106, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mr', index=107, number=107, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ms', index=108, number=108, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='mt', index=109, number=109, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='my', index=110, number=110, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='na', index=111, number=111, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nb', index=112, number=112, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nd', index=113, number=113, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ne', index=114, number=114, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ng', index=115, number=115, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nl', index=116, number=116, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nn', index=117, number=117, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='no', index=118, number=118, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nr', index=119, number=119, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='nv', index=120, number=120, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ny', index=121, number=121, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='oc', index=122, number=122, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='oj', index=123, number=123, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='om', index=124, number=124, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='or', index=125, number=125, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='os', index=126, number=126, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='pa', index=127, number=127, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='pi', index=128, number=128, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='pl', index=129, number=129, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ps', index=130, number=130, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='pt', index=131, number=131, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='qu', index=132, number=132, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='rm', index=133, number=133, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='rn', index=134, number=134, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ro', index=135, number=135, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ru', index=136, number=136, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='rw', index=137, number=137, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sa', index=138, number=138, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sc', index=139, number=139, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sd', index=140, number=140, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='se', index=141, number=141, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sg', index=142, number=142, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='si', index=143, number=143, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sk', index=144, number=144, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sl', index=145, number=145, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sm', index=146, number=146, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sn', index=147, number=147, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='so', index=148, number=148, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sq', index=149, number=149, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sr', index=150, number=150, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ss', index=151, number=151, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='st', index=152, number=152, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='su', index=153, number=153, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sv', index=154, number=154, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='sw', index=155, number=155, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ta', index=156, number=156, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='te', index=157, number=157, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tg', index=158, number=158, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='th', index=159, number=159, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ti', index=160, number=160, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tk', index=161, number=161, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tl', index=162, number=162, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tn', index=163, number=163, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='to', index=164, number=164, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tr', index=165, number=165, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ts', index=166, number=166, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tt', index=167, number=167, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='tw', index=168, number=168, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ty', index=169, number=169, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ug', index=170, number=170, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='uk', index=171, number=171, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ur', index=172, number=172, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='uz', index=173, number=173, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ve', index=174, number=174, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='vi', index=175, number=175, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='vo', index=176, number=176, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='wa', index=177, number=177, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='wo', index=178, number=178, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='xh', index=179, number=179, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='yi', index=180, number=180, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='yo', index=181, number=181, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='za', index=182, number=182, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='zh', index=183, number=183, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='zu', index=184, number=184, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1548, + serialized_end=3109, +) +_sym_db.RegisterEnumDescriptor(_LANGUAGE_LANGUAGE) + +_LANGUAGE_SCRIPT = _descriptor.EnumDescriptor( + name='Script', + full_name='pb.Language.Script', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_SCRIPT', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Adlm', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Afak', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Aghb', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Ahom', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Arab', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Aran', index=6, number=6, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Armi', index=7, number=7, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Armn', index=8, number=8, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Avst', index=9, number=9, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Bali', index=10, number=10, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Bamu', index=11, number=11, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Bass', index=12, number=12, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Batk', index=13, number=13, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Beng', index=14, number=14, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Bhks', index=15, number=15, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Blis', index=16, number=16, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Bopo', index=17, number=17, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Brah', index=18, number=18, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Brai', index=19, number=19, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Bugi', index=20, number=20, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Buhd', index=21, number=21, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Cakm', index=22, number=22, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Cans', index=23, number=23, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Cari', index=24, number=24, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Cham', index=25, number=25, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Cher', index=26, number=26, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Cirt', index=27, number=27, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Copt', index=28, number=28, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Cpmn', index=29, number=29, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Cprt', index=30, number=30, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Cyrl', index=31, number=31, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Cyrs', index=32, number=32, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Deva', index=33, number=33, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Dogr', index=34, number=34, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Dsrt', index=35, number=35, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Dupl', index=36, number=36, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Egyd', index=37, number=37, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Egyh', index=38, number=38, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Egyp', index=39, number=39, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Elba', index=40, number=40, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Elym', index=41, number=41, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Ethi', index=42, number=42, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Geok', index=43, number=43, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Geor', index=44, number=44, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Glag', index=45, number=45, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Gong', index=46, number=46, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Gonm', index=47, number=47, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Goth', index=48, number=48, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Gran', index=49, number=49, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Grek', index=50, number=50, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Gujr', index=51, number=51, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Guru', index=52, number=52, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hanb', index=53, number=53, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hang', index=54, number=54, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hani', index=55, number=55, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hano', index=56, number=56, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hans', index=57, number=57, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hant', index=58, number=58, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hatr', index=59, number=59, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hebr', index=60, number=60, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hira', index=61, number=61, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hluw', index=62, number=62, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hmng', index=63, number=63, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hmnp', index=64, number=64, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hrkt', index=65, number=65, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Hung', index=66, number=66, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Inds', index=67, number=67, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Ital', index=68, number=68, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Jamo', index=69, number=69, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Java', index=70, number=70, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Jpan', index=71, number=71, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Jurc', index=72, number=72, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Kali', index=73, number=73, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Kana', index=74, number=74, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Khar', index=75, number=75, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Khmr', index=76, number=76, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Khoj', index=77, number=77, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Kitl', index=78, number=78, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Kits', index=79, number=79, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Knda', index=80, number=80, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Kore', index=81, number=81, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Kpel', index=82, number=82, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Kthi', index=83, number=83, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Lana', index=84, number=84, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Laoo', index=85, number=85, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Latf', index=86, number=86, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Latg', index=87, number=87, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Latn', index=88, number=88, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Leke', index=89, number=89, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Lepc', index=90, number=90, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Limb', index=91, number=91, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Lina', index=92, number=92, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Linb', index=93, number=93, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Lisu', index=94, number=94, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Loma', index=95, number=95, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Lyci', index=96, number=96, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Lydi', index=97, number=97, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mahj', index=98, number=98, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Maka', index=99, number=99, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mand', index=100, number=100, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mani', index=101, number=101, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Marc', index=102, number=102, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Maya', index=103, number=103, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Medf', index=104, number=104, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mend', index=105, number=105, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Merc', index=106, number=106, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mero', index=107, number=107, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mlym', index=108, number=108, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Modi', index=109, number=109, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mong', index=110, number=110, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Moon', index=111, number=111, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mroo', index=112, number=112, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mtei', index=113, number=113, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mult', index=114, number=114, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Mymr', index=115, number=115, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Nand', index=116, number=116, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Narb', index=117, number=117, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Nbat', index=118, number=118, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Newa', index=119, number=119, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Nkdb', index=120, number=120, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Nkgb', index=121, number=121, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Nkoo', index=122, number=122, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Nshu', index=123, number=123, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Ogam', index=124, number=124, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Olck', index=125, number=125, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Orkh', index=126, number=126, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Orya', index=127, number=127, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Osge', index=128, number=128, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Osma', index=129, number=129, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Palm', index=130, number=130, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Pauc', index=131, number=131, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Perm', index=132, number=132, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Phag', index=133, number=133, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Phli', index=134, number=134, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Phlp', index=135, number=135, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Phlv', index=136, number=136, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Phnx', index=137, number=137, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Plrd', index=138, number=138, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Piqd', index=139, number=139, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Prti', index=140, number=140, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Qaaa', index=141, number=141, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Qabx', index=142, number=142, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Rjng', index=143, number=143, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Rohg', index=144, number=144, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Roro', index=145, number=145, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Runr', index=146, number=146, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Samr', index=147, number=147, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sara', index=148, number=148, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sarb', index=149, number=149, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Saur', index=150, number=150, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sgnw', index=151, number=151, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Shaw', index=152, number=152, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Shrd', index=153, number=153, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Shui', index=154, number=154, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sidd', index=155, number=155, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sind', index=156, number=156, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sinh', index=157, number=157, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sogd', index=158, number=158, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sogo', index=159, number=159, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sora', index=160, number=160, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Soyo', index=161, number=161, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sund', index=162, number=162, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Sylo', index=163, number=163, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Syrc', index=164, number=164, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Syre', index=165, number=165, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Syrj', index=166, number=166, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Syrn', index=167, number=167, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Tagb', index=168, number=168, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Takr', index=169, number=169, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Tale', index=170, number=170, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Talu', index=171, number=171, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Taml', index=172, number=172, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Tang', index=173, number=173, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Tavt', index=174, number=174, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Telu', index=175, number=175, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Teng', index=176, number=176, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Tfng', index=177, number=177, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Tglg', index=178, number=178, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Thaa', index=179, number=179, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Thai', index=180, number=180, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Tibt', index=181, number=181, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Tirh', index=182, number=182, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Ugar', index=183, number=183, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Vaii', index=184, number=184, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Visp', index=185, number=185, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Wara', index=186, number=186, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Wcho', index=187, number=187, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Wole', index=188, number=188, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Xpeo', index=189, number=189, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Xsux', index=190, number=190, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Yiii', index=191, number=191, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Zanb', index=192, number=192, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Zinh', index=193, number=193, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Zmth', index=194, number=194, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Zsye', index=195, number=195, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Zsym', index=196, number=196, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Zxxx', index=197, number=197, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Zyyy', index=198, number=198, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='Zzzz', index=199, number=199, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=3112, + serialized_end=5202, +) +_sym_db.RegisterEnumDescriptor(_LANGUAGE_SCRIPT) + +_LOCATION_COUNTRY = _descriptor.EnumDescriptor( + name='Country', + full_name='pb.Location.Country', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_COUNTRY', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AF', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AX', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AL', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DZ', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AS', index=5, number=5, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AD', index=6, number=6, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AO', index=7, number=7, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AI', index=8, number=8, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AQ', index=9, number=9, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AG', index=10, number=10, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AR', index=11, number=11, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AM', index=12, number=12, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AW', index=13, number=13, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AU', index=14, number=14, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AT', index=15, number=15, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AZ', index=16, number=16, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BS', index=17, number=17, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BH', index=18, number=18, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BD', index=19, number=19, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BB', index=20, number=20, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BY', index=21, number=21, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BE', index=22, number=22, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BZ', index=23, number=23, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BJ', index=24, number=24, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BM', index=25, number=25, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BT', index=26, number=26, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BO', index=27, number=27, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BQ', index=28, number=28, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BA', index=29, number=29, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BW', index=30, number=30, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BV', index=31, number=31, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BR', index=32, number=32, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IO', index=33, number=33, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BN', index=34, number=34, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BG', index=35, number=35, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BF', index=36, number=36, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BI', index=37, number=37, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KH', index=38, number=38, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CM', index=39, number=39, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CA', index=40, number=40, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CV', index=41, number=41, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KY', index=42, number=42, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CF', index=43, number=43, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TD', index=44, number=44, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CL', index=45, number=45, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CN', index=46, number=46, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CX', index=47, number=47, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CC', index=48, number=48, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CO', index=49, number=49, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KM', index=50, number=50, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CG', index=51, number=51, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CD', index=52, number=52, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CK', index=53, number=53, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CR', index=54, number=54, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CI', index=55, number=55, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HR', index=56, number=56, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CU', index=57, number=57, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CW', index=58, number=58, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CY', index=59, number=59, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CZ', index=60, number=60, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DK', index=61, number=61, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DJ', index=62, number=62, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DM', index=63, number=63, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DO', index=64, number=64, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EC', index=65, number=65, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EG', index=66, number=66, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SV', index=67, number=67, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GQ', index=68, number=68, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ER', index=69, number=69, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EE', index=70, number=70, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ET', index=71, number=71, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FK', index=72, number=72, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FO', index=73, number=73, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FJ', index=74, number=74, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FI', index=75, number=75, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FR', index=76, number=76, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GF', index=77, number=77, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PF', index=78, number=78, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TF', index=79, number=79, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GA', index=80, number=80, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GM', index=81, number=81, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GE', index=82, number=82, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DE', index=83, number=83, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GH', index=84, number=84, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GI', index=85, number=85, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GR', index=86, number=86, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GL', index=87, number=87, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GD', index=88, number=88, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GP', index=89, number=89, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GU', index=90, number=90, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GT', index=91, number=91, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GG', index=92, number=92, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GN', index=93, number=93, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GW', index=94, number=94, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GY', index=95, number=95, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HT', index=96, number=96, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HM', index=97, number=97, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VA', index=98, number=98, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HN', index=99, number=99, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HK', index=100, number=100, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HU', index=101, number=101, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IS', index=102, number=102, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IN', index=103, number=103, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ID', index=104, number=104, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IR', index=105, number=105, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IQ', index=106, number=106, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IE', index=107, number=107, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IM', index=108, number=108, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IL', index=109, number=109, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='IT', index=110, number=110, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='JM', index=111, number=111, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='JP', index=112, number=112, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='JE', index=113, number=113, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='JO', index=114, number=114, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KZ', index=115, number=115, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KE', index=116, number=116, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KI', index=117, number=117, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KP', index=118, number=118, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KR', index=119, number=119, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KW', index=120, number=120, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KG', index=121, number=121, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LA', index=122, number=122, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LV', index=123, number=123, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LB', index=124, number=124, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LS', index=125, number=125, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LR', index=126, number=126, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LY', index=127, number=127, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LI', index=128, number=128, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LT', index=129, number=129, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LU', index=130, number=130, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MO', index=131, number=131, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MK', index=132, number=132, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MG', index=133, number=133, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MW', index=134, number=134, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MY', index=135, number=135, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MV', index=136, number=136, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ML', index=137, number=137, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MT', index=138, number=138, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MH', index=139, number=139, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MQ', index=140, number=140, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MR', index=141, number=141, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MU', index=142, number=142, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='YT', index=143, number=143, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MX', index=144, number=144, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FM', index=145, number=145, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MD', index=146, number=146, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MC', index=147, number=147, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MN', index=148, number=148, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ME', index=149, number=149, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MS', index=150, number=150, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MA', index=151, number=151, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MZ', index=152, number=152, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MM', index=153, number=153, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NA', index=154, number=154, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NR', index=155, number=155, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NP', index=156, number=156, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NL', index=157, number=157, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NC', index=158, number=158, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NZ', index=159, number=159, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NI', index=160, number=160, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NE', index=161, number=161, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NG', index=162, number=162, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NU', index=163, number=163, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NF', index=164, number=164, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MP', index=165, number=165, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NO', index=166, number=166, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='OM', index=167, number=167, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PK', index=168, number=168, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PW', index=169, number=169, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PS', index=170, number=170, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PA', index=171, number=171, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PG', index=172, number=172, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PY', index=173, number=173, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PE', index=174, number=174, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PH', index=175, number=175, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PN', index=176, number=176, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PL', index=177, number=177, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PT', index=178, number=178, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PR', index=179, number=179, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='QA', index=180, number=180, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RE', index=181, number=181, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RO', index=182, number=182, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RU', index=183, number=183, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RW', index=184, number=184, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BL', index=185, number=185, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SH', index=186, number=186, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='KN', index=187, number=187, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LC', index=188, number=188, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MF', index=189, number=189, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PM', index=190, number=190, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VC', index=191, number=191, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='WS', index=192, number=192, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SM', index=193, number=193, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ST', index=194, number=194, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SA', index=195, number=195, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SN', index=196, number=196, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RS', index=197, number=197, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SC', index=198, number=198, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SL', index=199, number=199, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SG', index=200, number=200, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SX', index=201, number=201, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SK', index=202, number=202, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SI', index=203, number=203, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SB', index=204, number=204, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SO', index=205, number=205, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ZA', index=206, number=206, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GS', index=207, number=207, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SS', index=208, number=208, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ES', index=209, number=209, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='LK', index=210, number=210, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SD', index=211, number=211, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SR', index=212, number=212, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SJ', index=213, number=213, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SZ', index=214, number=214, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SE', index=215, number=215, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CH', index=216, number=216, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SY', index=217, number=217, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TW', index=218, number=218, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TJ', index=219, number=219, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TZ', index=220, number=220, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TH', index=221, number=221, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TL', index=222, number=222, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TG', index=223, number=223, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TK', index=224, number=224, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TO', index=225, number=225, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TT', index=226, number=226, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TN', index=227, number=227, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TR', index=228, number=228, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TM', index=229, number=229, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TC', index=230, number=230, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TV', index=231, number=231, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UG', index=232, number=232, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UA', index=233, number=233, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='AE', index=234, number=234, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GB', index=235, number=235, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='US', index=236, number=236, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UM', index=237, number=237, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UY', index=238, number=238, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UZ', index=239, number=239, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VU', index=240, number=240, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VE', index=241, number=241, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VN', index=242, number=242, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VG', index=243, number=243, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='VI', index=244, number=244, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='WF', index=245, number=245, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='EH', index=246, number=246, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='YE', index=247, number=247, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ZM', index=248, number=248, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ZW', index=249, number=249, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R001', index=250, number=250, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R002', index=251, number=251, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R015', index=252, number=252, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R012', index=253, number=253, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R818', index=254, number=254, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R434', index=255, number=255, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R504', index=256, number=256, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R729', index=257, number=257, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R788', index=258, number=258, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R732', index=259, number=259, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R202', index=260, number=260, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R014', index=261, number=261, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R086', index=262, number=262, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R108', index=263, number=263, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R174', index=264, number=264, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R262', index=265, number=265, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R232', index=266, number=266, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R231', index=267, number=267, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R260', index=268, number=268, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R404', index=269, number=269, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R450', index=270, number=270, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R454', index=271, number=271, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R480', index=272, number=272, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R175', index=273, number=273, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R508', index=274, number=274, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R638', index=275, number=275, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R646', index=276, number=276, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R690', index=277, number=277, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R706', index=278, number=278, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R728', index=279, number=279, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R800', index=280, number=280, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R834', index=281, number=281, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R894', index=282, number=282, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R716', index=283, number=283, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R017', index=284, number=284, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R024', index=285, number=285, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R120', index=286, number=286, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R140', index=287, number=287, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R148', index=288, number=288, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R178', index=289, number=289, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R180', index=290, number=290, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R226', index=291, number=291, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R266', index=292, number=292, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R678', index=293, number=293, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R018', index=294, number=294, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R072', index=295, number=295, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R748', index=296, number=296, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R426', index=297, number=297, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R516', index=298, number=298, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R710', index=299, number=299, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R011', index=300, number=300, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R204', index=301, number=301, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R854', index=302, number=302, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R132', index=303, number=303, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R384', index=304, number=304, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R270', index=305, number=305, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R288', index=306, number=306, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R324', index=307, number=307, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R624', index=308, number=308, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R430', index=309, number=309, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R466', index=310, number=310, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R478', index=311, number=311, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R562', index=312, number=312, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R566', index=313, number=313, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R654', index=314, number=314, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R686', index=315, number=315, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R694', index=316, number=316, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R768', index=317, number=317, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R019', index=318, number=318, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R419', index=319, number=319, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R029', index=320, number=320, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R660', index=321, number=321, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R028', index=322, number=322, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R533', index=323, number=323, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R044', index=324, number=324, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R052', index=325, number=325, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R535', index=326, number=326, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R092', index=327, number=327, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R136', index=328, number=328, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R192', index=329, number=329, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R531', index=330, number=330, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R212', index=331, number=331, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R214', index=332, number=332, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R308', index=333, number=333, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R312', index=334, number=334, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R332', index=335, number=335, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R388', index=336, number=336, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R474', index=337, number=337, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R500', index=338, number=338, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R630', index=339, number=339, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R652', index=340, number=340, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R659', index=341, number=341, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R662', index=342, number=342, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R663', index=343, number=343, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R670', index=344, number=344, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R534', index=345, number=345, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R780', index=346, number=346, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R796', index=347, number=347, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R850', index=348, number=348, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R013', index=349, number=349, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R084', index=350, number=350, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R188', index=351, number=351, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R222', index=352, number=352, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R320', index=353, number=353, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R340', index=354, number=354, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R484', index=355, number=355, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R558', index=356, number=356, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R591', index=357, number=357, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R005', index=358, number=358, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R032', index=359, number=359, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R068', index=360, number=360, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R074', index=361, number=361, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R076', index=362, number=362, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R152', index=363, number=363, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R170', index=364, number=364, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R218', index=365, number=365, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R238', index=366, number=366, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R254', index=367, number=367, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R328', index=368, number=368, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R600', index=369, number=369, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R604', index=370, number=370, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R239', index=371, number=371, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R740', index=372, number=372, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R858', index=373, number=373, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R862', index=374, number=374, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R021', index=375, number=375, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R060', index=376, number=376, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R124', index=377, number=377, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R304', index=378, number=378, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R666', index=379, number=379, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R840', index=380, number=380, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R010', index=381, number=381, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R142', index=382, number=382, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R143', index=383, number=383, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R398', index=384, number=384, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R417', index=385, number=385, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R762', index=386, number=386, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R795', index=387, number=387, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R860', index=388, number=388, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R030', index=389, number=389, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R156', index=390, number=390, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R344', index=391, number=391, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R446', index=392, number=392, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R408', index=393, number=393, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R392', index=394, number=394, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R496', index=395, number=395, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R410', index=396, number=396, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R035', index=397, number=397, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R096', index=398, number=398, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R116', index=399, number=399, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R360', index=400, number=400, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R418', index=401, number=401, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R458', index=402, number=402, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R104', index=403, number=403, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R608', index=404, number=404, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R702', index=405, number=405, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R764', index=406, number=406, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R626', index=407, number=407, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R704', index=408, number=408, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R034', index=409, number=409, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R004', index=410, number=410, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R050', index=411, number=411, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R064', index=412, number=412, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R356', index=413, number=413, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R364', index=414, number=414, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R462', index=415, number=415, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R524', index=416, number=416, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R586', index=417, number=417, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R144', index=418, number=418, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R145', index=419, number=419, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R051', index=420, number=420, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R031', index=421, number=421, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R048', index=422, number=422, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R196', index=423, number=423, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R268', index=424, number=424, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R368', index=425, number=425, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R376', index=426, number=426, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R400', index=427, number=427, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R414', index=428, number=428, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R422', index=429, number=429, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R512', index=430, number=430, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R634', index=431, number=431, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R682', index=432, number=432, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R275', index=433, number=433, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R760', index=434, number=434, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R792', index=435, number=435, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R784', index=436, number=436, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R887', index=437, number=437, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R150', index=438, number=438, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R151', index=439, number=439, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R112', index=440, number=440, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R100', index=441, number=441, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R203', index=442, number=442, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R348', index=443, number=443, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R616', index=444, number=444, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R498', index=445, number=445, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R642', index=446, number=446, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R643', index=447, number=447, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R703', index=448, number=448, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R804', index=449, number=449, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R154', index=450, number=450, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R248', index=451, number=451, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R830', index=452, number=452, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R831', index=453, number=453, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R832', index=454, number=454, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R680', index=455, number=455, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R208', index=456, number=456, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R233', index=457, number=457, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R234', index=458, number=458, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R246', index=459, number=459, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R352', index=460, number=460, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R372', index=461, number=461, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R833', index=462, number=462, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R428', index=463, number=463, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R440', index=464, number=464, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R578', index=465, number=465, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R744', index=466, number=466, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R752', index=467, number=467, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R826', index=468, number=468, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R039', index=469, number=469, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R008', index=470, number=470, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R020', index=471, number=471, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R070', index=472, number=472, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R191', index=473, number=473, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R292', index=474, number=474, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R300', index=475, number=475, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R336', index=476, number=476, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R380', index=477, number=477, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R470', index=478, number=478, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R499', index=479, number=479, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R807', index=480, number=480, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R620', index=481, number=481, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R674', index=482, number=482, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R688', index=483, number=483, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R705', index=484, number=484, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R724', index=485, number=485, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R155', index=486, number=486, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R040', index=487, number=487, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R056', index=488, number=488, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R250', index=489, number=489, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R276', index=490, number=490, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R438', index=491, number=491, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R442', index=492, number=492, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R492', index=493, number=493, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R528', index=494, number=494, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R756', index=495, number=495, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R009', index=496, number=496, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R053', index=497, number=497, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R036', index=498, number=498, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R162', index=499, number=499, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R166', index=500, number=500, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R334', index=501, number=501, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R554', index=502, number=502, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R574', index=503, number=503, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R054', index=504, number=504, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R242', index=505, number=505, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R540', index=506, number=506, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R598', index=507, number=507, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R090', index=508, number=508, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R548', index=509, number=509, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R057', index=510, number=510, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R316', index=511, number=511, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R296', index=512, number=512, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R584', index=513, number=513, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R583', index=514, number=514, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R520', index=515, number=515, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R580', index=516, number=516, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R585', index=517, number=517, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R581', index=518, number=518, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R061', index=519, number=519, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R016', index=520, number=520, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R184', index=521, number=521, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R258', index=522, number=522, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R570', index=523, number=523, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R612', index=524, number=524, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R882', index=525, number=525, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R772', index=526, number=526, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R776', index=527, number=527, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R798', index=528, number=528, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='R876', index=529, number=529, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=5337, + serialized_end=10561, +) +_sym_db.RegisterEnumDescriptor(_LOCATION_COUNTRY) + + +_CLAIM = _descriptor.Descriptor( + name='Claim', + full_name='pb.Claim', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='stream', full_name='pb.Claim.stream', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='channel', full_name='pb.Claim.channel', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='collection', full_name='pb.Claim.collection', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='repost', full_name='pb.Claim.repost', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='title', full_name='pb.Claim.title', index=4, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='description', full_name='pb.Claim.description', index=5, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='thumbnail', full_name='pb.Claim.thumbnail', index=6, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tags', full_name='pb.Claim.tags', index=7, + number=11, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='languages', full_name='pb.Claim.languages', index=8, + number=12, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='locations', full_name='pb.Claim.locations', index=9, + number=13, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='type', full_name='pb.Claim.type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=20, + serialized_end=319, +) + + +_STREAM = _descriptor.Descriptor( + name='Stream', + full_name='pb.Stream', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='source', full_name='pb.Stream.source', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='author', full_name='pb.Stream.author', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='license', full_name='pb.Stream.license', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='license_url', full_name='pb.Stream.license_url', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='release_time', full_name='pb.Stream.release_time', index=4, + number=5, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fee', full_name='pb.Stream.fee', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='image', full_name='pb.Stream.image', index=6, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='video', full_name='pb.Stream.video', index=7, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='audio', full_name='pb.Stream.audio', index=8, + number=12, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='software', full_name='pb.Stream.software', index=9, + number=13, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='type', full_name='pb.Stream.type', + index=0, containing_type=None, fields=[]), + ], + serialized_start=322, + serialized_end=582, +) + + +_CHANNEL = _descriptor.Descriptor( + name='Channel', + full_name='pb.Channel', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='public_key', full_name='pb.Channel.public_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='email', full_name='pb.Channel.email', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='website_url', full_name='pb.Channel.website_url', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cover', full_name='pb.Channel.cover', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='featured', full_name='pb.Channel.featured', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=584, + serialized_end=709, +) + + +_CLAIMREFERENCE = _descriptor.Descriptor( + name='ClaimReference', + full_name='pb.ClaimReference', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='claim_hash', full_name='pb.ClaimReference.claim_hash', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=711, + serialized_end=747, +) + + +_CLAIMLIST = _descriptor.Descriptor( + name='ClaimList', + full_name='pb.ClaimList', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='list_type', full_name='pb.ClaimList.list_type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='claim_references', full_name='pb.ClaimList.claim_references', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _CLAIMLIST_LISTTYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=750, + serialized_end=894, +) + + +_SOURCE = _descriptor.Descriptor( + name='Source', + full_name='pb.Source', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='hash', full_name='pb.Source.hash', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='name', full_name='pb.Source.name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='size', full_name='pb.Source.size', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='media_type', full_name='pb.Source.media_type', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='url', full_name='pb.Source.url', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sd_hash', full_name='pb.Source.sd_hash', index=5, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bt_infohash', full_name='pb.Source.bt_infohash', index=6, + number=7, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=896, + serialized_end=1017, +) + + +_FEE = _descriptor.Descriptor( + name='Fee', + full_name='pb.Fee', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='currency', full_name='pb.Fee.currency', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='address', full_name='pb.Fee.address', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='amount', full_name='pb.Fee.amount', index=2, + number=3, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _FEE_CURRENCY, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1020, + serialized_end=1155, +) + + +_IMAGE = _descriptor.Descriptor( + name='Image', + full_name='pb.Image', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='width', full_name='pb.Image.width', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='height', full_name='pb.Image.height', index=1, + number=2, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1157, + serialized_end=1195, +) + + +_VIDEO = _descriptor.Descriptor( + name='Video', + full_name='pb.Video', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='width', full_name='pb.Video.width', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='height', full_name='pb.Video.height', index=1, + number=2, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='duration', full_name='pb.Video.duration', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='audio', full_name='pb.Video.audio', index=3, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1197, + serialized_end=1279, +) + + +_AUDIO = _descriptor.Descriptor( + name='Audio', + full_name='pb.Audio', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='duration', full_name='pb.Audio.duration', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1281, + serialized_end=1306, +) + + +_SOFTWARE = _descriptor.Descriptor( + name='Software', + full_name='pb.Software', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='os', full_name='pb.Software.os', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _SOFTWARE_OS, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1308, + serialized_end=1416, +) + + +_LANGUAGE = _descriptor.Descriptor( + name='Language', + full_name='pb.Language', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='language', full_name='pb.Language.language', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='script', full_name='pb.Language.script', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='region', full_name='pb.Language.region', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _LANGUAGE_LANGUAGE, + _LANGUAGE_SCRIPT, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1419, + serialized_end=5202, +) + + +_LOCATION = _descriptor.Descriptor( + name='Location', + full_name='pb.Location', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='country', full_name='pb.Location.country', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='state', full_name='pb.Location.state', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='city', full_name='pb.Location.city', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='code', full_name='pb.Location.code', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='latitude', full_name='pb.Location.latitude', index=4, + number=5, type=17, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='longitude', full_name='pb.Location.longitude', index=5, + number=6, type=17, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _LOCATION_COUNTRY, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5205, + serialized_end=10561, +) + +_CLAIM.fields_by_name['stream'].message_type = _STREAM +_CLAIM.fields_by_name['channel'].message_type = _CHANNEL +_CLAIM.fields_by_name['collection'].message_type = _CLAIMLIST +_CLAIM.fields_by_name['repost'].message_type = _CLAIMREFERENCE +_CLAIM.fields_by_name['thumbnail'].message_type = _SOURCE +_CLAIM.fields_by_name['languages'].message_type = _LANGUAGE +_CLAIM.fields_by_name['locations'].message_type = _LOCATION +_CLAIM.oneofs_by_name['type'].fields.append( + _CLAIM.fields_by_name['stream']) +_CLAIM.fields_by_name['stream'].containing_oneof = _CLAIM.oneofs_by_name['type'] +_CLAIM.oneofs_by_name['type'].fields.append( + _CLAIM.fields_by_name['channel']) +_CLAIM.fields_by_name['channel'].containing_oneof = _CLAIM.oneofs_by_name['type'] +_CLAIM.oneofs_by_name['type'].fields.append( + _CLAIM.fields_by_name['collection']) +_CLAIM.fields_by_name['collection'].containing_oneof = _CLAIM.oneofs_by_name['type'] +_CLAIM.oneofs_by_name['type'].fields.append( + _CLAIM.fields_by_name['repost']) +_CLAIM.fields_by_name['repost'].containing_oneof = _CLAIM.oneofs_by_name['type'] +_STREAM.fields_by_name['source'].message_type = _SOURCE +_STREAM.fields_by_name['fee'].message_type = _FEE +_STREAM.fields_by_name['image'].message_type = _IMAGE +_STREAM.fields_by_name['video'].message_type = _VIDEO +_STREAM.fields_by_name['audio'].message_type = _AUDIO +_STREAM.fields_by_name['software'].message_type = _SOFTWARE +_STREAM.oneofs_by_name['type'].fields.append( + _STREAM.fields_by_name['image']) +_STREAM.fields_by_name['image'].containing_oneof = _STREAM.oneofs_by_name['type'] +_STREAM.oneofs_by_name['type'].fields.append( + _STREAM.fields_by_name['video']) +_STREAM.fields_by_name['video'].containing_oneof = _STREAM.oneofs_by_name['type'] +_STREAM.oneofs_by_name['type'].fields.append( + _STREAM.fields_by_name['audio']) +_STREAM.fields_by_name['audio'].containing_oneof = _STREAM.oneofs_by_name['type'] +_STREAM.oneofs_by_name['type'].fields.append( + _STREAM.fields_by_name['software']) +_STREAM.fields_by_name['software'].containing_oneof = _STREAM.oneofs_by_name['type'] +_CHANNEL.fields_by_name['cover'].message_type = _SOURCE +_CHANNEL.fields_by_name['featured'].message_type = _CLAIMLIST +_CLAIMLIST.fields_by_name['list_type'].enum_type = _CLAIMLIST_LISTTYPE +_CLAIMLIST.fields_by_name['claim_references'].message_type = _CLAIMREFERENCE +_CLAIMLIST_LISTTYPE.containing_type = _CLAIMLIST +_FEE.fields_by_name['currency'].enum_type = _FEE_CURRENCY +_FEE_CURRENCY.containing_type = _FEE +_VIDEO.fields_by_name['audio'].message_type = _AUDIO +_SOFTWARE_OS.containing_type = _SOFTWARE +_LANGUAGE.fields_by_name['language'].enum_type = _LANGUAGE_LANGUAGE +_LANGUAGE.fields_by_name['script'].enum_type = _LANGUAGE_SCRIPT +_LANGUAGE.fields_by_name['region'].enum_type = _LOCATION_COUNTRY +_LANGUAGE_LANGUAGE.containing_type = _LANGUAGE +_LANGUAGE_SCRIPT.containing_type = _LANGUAGE +_LOCATION.fields_by_name['country'].enum_type = _LOCATION_COUNTRY +_LOCATION_COUNTRY.containing_type = _LOCATION +DESCRIPTOR.message_types_by_name['Claim'] = _CLAIM +DESCRIPTOR.message_types_by_name['Stream'] = _STREAM +DESCRIPTOR.message_types_by_name['Channel'] = _CHANNEL +DESCRIPTOR.message_types_by_name['ClaimReference'] = _CLAIMREFERENCE +DESCRIPTOR.message_types_by_name['ClaimList'] = _CLAIMLIST +DESCRIPTOR.message_types_by_name['Source'] = _SOURCE +DESCRIPTOR.message_types_by_name['Fee'] = _FEE +DESCRIPTOR.message_types_by_name['Image'] = _IMAGE +DESCRIPTOR.message_types_by_name['Video'] = _VIDEO +DESCRIPTOR.message_types_by_name['Audio'] = _AUDIO +DESCRIPTOR.message_types_by_name['Software'] = _SOFTWARE +DESCRIPTOR.message_types_by_name['Language'] = _LANGUAGE +DESCRIPTOR.message_types_by_name['Location'] = _LOCATION + +Claim = _reflection.GeneratedProtocolMessageType('Claim', (_message.Message,), dict( + DESCRIPTOR = _CLAIM, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Claim) + )) +_sym_db.RegisterMessage(Claim) + +Stream = _reflection.GeneratedProtocolMessageType('Stream', (_message.Message,), dict( + DESCRIPTOR = _STREAM, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Stream) + )) +_sym_db.RegisterMessage(Stream) + +Channel = _reflection.GeneratedProtocolMessageType('Channel', (_message.Message,), dict( + DESCRIPTOR = _CHANNEL, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Channel) + )) +_sym_db.RegisterMessage(Channel) + +ClaimReference = _reflection.GeneratedProtocolMessageType('ClaimReference', (_message.Message,), dict( + DESCRIPTOR = _CLAIMREFERENCE, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.ClaimReference) + )) +_sym_db.RegisterMessage(ClaimReference) + +ClaimList = _reflection.GeneratedProtocolMessageType('ClaimList', (_message.Message,), dict( + DESCRIPTOR = _CLAIMLIST, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.ClaimList) + )) +_sym_db.RegisterMessage(ClaimList) + +Source = _reflection.GeneratedProtocolMessageType('Source', (_message.Message,), dict( + DESCRIPTOR = _SOURCE, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Source) + )) +_sym_db.RegisterMessage(Source) + +Fee = _reflection.GeneratedProtocolMessageType('Fee', (_message.Message,), dict( + DESCRIPTOR = _FEE, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Fee) + )) +_sym_db.RegisterMessage(Fee) + +Image = _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), dict( + DESCRIPTOR = _IMAGE, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Image) + )) +_sym_db.RegisterMessage(Image) + +Video = _reflection.GeneratedProtocolMessageType('Video', (_message.Message,), dict( + DESCRIPTOR = _VIDEO, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Video) + )) +_sym_db.RegisterMessage(Video) + +Audio = _reflection.GeneratedProtocolMessageType('Audio', (_message.Message,), dict( + DESCRIPTOR = _AUDIO, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Audio) + )) +_sym_db.RegisterMessage(Audio) + +Software = _reflection.GeneratedProtocolMessageType('Software', (_message.Message,), dict( + DESCRIPTOR = _SOFTWARE, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Software) + )) +_sym_db.RegisterMessage(Software) + +Language = _reflection.GeneratedProtocolMessageType('Language', (_message.Message,), dict( + DESCRIPTOR = _LANGUAGE, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Language) + )) +_sym_db.RegisterMessage(Language) + +Location = _reflection.GeneratedProtocolMessageType('Location', (_message.Message,), dict( + DESCRIPTOR = _LOCATION, + __module__ = 'claim_pb2' + # @@protoc_insertion_point(class_scope:pb.Location) + )) +_sym_db.RegisterMessage(Location) + + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v2/hub_pb2.py b/scribe/schema/types/v2/hub_pb2.py new file mode 100644 index 0000000..1cc0f47 --- /dev/null +++ b/scribe/schema/types/v2/hub_pb2.py @@ -0,0 +1,960 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: hub.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import result_pb2 as result__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='hub.proto', + package='pb', + syntax='proto3', + serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\thub.proto\x12\x02pb\x1a\x0cresult.proto\"\x0e\n\x0c\x45mptyMessage\".\n\rServerMessage\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\t\"N\n\x0cHelloMessage\x12\x0c\n\x04port\x18\x01 \x01(\t\x12\x0c\n\x04host\x18\x02 \x01(\t\x12\"\n\x07servers\x18\x03 \x03(\x0b\x32\x11.pb.ServerMessage\"0\n\x0fInvertibleField\x12\x0e\n\x06invert\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x03(\t\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"j\n\nRangeField\x12\x1d\n\x02op\x18\x01 \x01(\x0e\x32\x11.pb.RangeField.Op\x12\r\n\x05value\x18\x02 \x03(\x05\".\n\x02Op\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x01\x12\x07\n\x03GTE\x10\x02\x12\x06\n\x02LT\x10\x03\x12\x06\n\x02GT\x10\x04\"\x8e\x0c\n\rSearchRequest\x12%\n\x08\x63laim_id\x18\x01 \x01(\x0b\x32\x13.pb.InvertibleField\x12\'\n\nchannel_id\x18\x02 \x01(\x0b\x32\x13.pb.InvertibleField\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\r\n\x05limit\x18\x04 \x01(\x05\x12\x10\n\x08order_by\x18\x05 \x03(\t\x12\x0e\n\x06offset\x18\x06 \x01(\r\x12\x16\n\x0eis_controlling\x18\x07 \x01(\x08\x12\x1d\n\x15last_take_over_height\x18\x08 \x01(\t\x12\x12\n\nclaim_name\x18\t \x01(\t\x12\x17\n\x0fnormalized_name\x18\n \x01(\t\x12#\n\x0btx_position\x18\x0b \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06\x61mount\x18\x0c \x03(\x0b\x32\x0e.pb.RangeField\x12!\n\ttimestamp\x18\r \x03(\x0b\x32\x0e.pb.RangeField\x12*\n\x12\x63reation_timestamp\x18\x0e \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06height\x18\x0f \x03(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0f\x63reation_height\x18\x10 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x61\x63tivation_height\x18\x11 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x65xpiration_height\x18\x12 \x03(\x0b\x32\x0e.pb.RangeField\x12$\n\x0crelease_time\x18\x13 \x03(\x0b\x32\x0e.pb.RangeField\x12\x11\n\tshort_url\x18\x14 \x01(\t\x12\x15\n\rcanonical_url\x18\x15 \x01(\t\x12\r\n\x05title\x18\x16 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x17 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x18 \x01(\t\x12\x12\n\nclaim_type\x18\x19 \x03(\t\x12$\n\x0crepost_count\x18\x1a \x03(\x0b\x32\x0e.pb.RangeField\x12\x13\n\x0bstream_type\x18\x1b \x03(\t\x12\x12\n\nmedia_type\x18\x1c \x03(\t\x12\"\n\nfee_amount\x18\x1d \x03(\x0b\x32\x0e.pb.RangeField\x12\x14\n\x0c\x66\x65\x65_currency\x18\x1e \x01(\t\x12 \n\x08\x64uration\x18\x1f \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11reposted_claim_id\x18 \x01(\t\x12#\n\x0b\x63\x65nsor_type\x18! \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11\x63laims_in_channel\x18\" \x01(\t\x12)\n\x12is_signature_valid\x18$ \x01(\x0b\x32\r.pb.BoolValue\x12(\n\x10\x65\x66\x66\x65\x63tive_amount\x18% \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0esupport_amount\x18& \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_score\x18\' \x03(\x0b\x32\x0e.pb.RangeField\x12\r\n\x05tx_id\x18+ \x01(\t\x12 \n\x07tx_nout\x18, \x01(\x0b\x32\x0f.pb.UInt32Value\x12\x11\n\tsignature\x18- \x01(\t\x12\x18\n\x10signature_digest\x18. \x01(\t\x12\x18\n\x10public_key_bytes\x18/ \x01(\t\x12\x15\n\rpublic_key_id\x18\x30 \x01(\t\x12\x10\n\x08\x61ny_tags\x18\x31 \x03(\t\x12\x10\n\x08\x61ll_tags\x18\x32 \x03(\t\x12\x10\n\x08not_tags\x18\x33 \x03(\t\x12\x1d\n\x15has_channel_signature\x18\x34 \x01(\x08\x12!\n\nhas_source\x18\x35 \x01(\x0b\x32\r.pb.BoolValue\x12 \n\x18limit_claims_per_channel\x18\x36 \x01(\x05\x12\x15\n\rany_languages\x18\x37 \x03(\t\x12\x15\n\rall_languages\x18\x38 \x03(\t\x12\x19\n\x11remove_duplicates\x18\x39 \x01(\x08\x12\x11\n\tno_totals\x18: \x01(\x08\x12\x0f\n\x07sd_hash\x18; \x01(\t2\x88\x03\n\x03Hub\x12*\n\x06Search\x12\x11.pb.SearchRequest\x1a\x0b.pb.Outputs\"\x00\x12+\n\x04Ping\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12-\n\x05Hello\x12\x10.pb.HelloMessage\x1a\x10.pb.HelloMessage\"\x00\x12/\n\x07\x41\x64\x64Peer\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12\x35\n\rPeerSubscribe\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12.\n\x07Version\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12/\n\x08\x46\x65\x61tures\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12\x30\n\tBroadcast\x12\x10.pb.EmptyMessage\x1a\x0f.pb.UInt32Value\"\x00\x42&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3' + , + dependencies=[result__pb2.DESCRIPTOR,]) + + + +_RANGEFIELD_OP = _descriptor.EnumDescriptor( + name='Op', + full_name='pb.RangeField.Op', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='EQ', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LTE', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='GTE', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='LT', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='GT', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=373, + serialized_end=419, +) +_sym_db.RegisterEnumDescriptor(_RANGEFIELD_OP) + + +_EMPTYMESSAGE = _descriptor.Descriptor( + name='EmptyMessage', + full_name='pb.EmptyMessage', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=31, + serialized_end=45, +) + + +_SERVERMESSAGE = _descriptor.Descriptor( + name='ServerMessage', + full_name='pb.ServerMessage', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='address', full_name='pb.ServerMessage.address', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='port', full_name='pb.ServerMessage.port', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=47, + serialized_end=93, +) + + +_HELLOMESSAGE = _descriptor.Descriptor( + name='HelloMessage', + full_name='pb.HelloMessage', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='port', full_name='pb.HelloMessage.port', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='host', full_name='pb.HelloMessage.host', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='servers', full_name='pb.HelloMessage.servers', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=95, + serialized_end=173, +) + + +_INVERTIBLEFIELD = _descriptor.Descriptor( + name='InvertibleField', + full_name='pb.InvertibleField', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='invert', full_name='pb.InvertibleField.invert', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='pb.InvertibleField.value', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=175, + serialized_end=223, +) + + +_STRINGVALUE = _descriptor.Descriptor( + name='StringValue', + full_name='pb.StringValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='pb.StringValue.value', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=225, + serialized_end=253, +) + + +_BOOLVALUE = _descriptor.Descriptor( + name='BoolValue', + full_name='pb.BoolValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='pb.BoolValue.value', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=255, + serialized_end=281, +) + + +_UINT32VALUE = _descriptor.Descriptor( + name='UInt32Value', + full_name='pb.UInt32Value', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='pb.UInt32Value.value', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=283, + serialized_end=311, +) + + +_RANGEFIELD = _descriptor.Descriptor( + name='RangeField', + full_name='pb.RangeField', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='op', full_name='pb.RangeField.op', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='pb.RangeField.value', index=1, + number=2, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _RANGEFIELD_OP, + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=313, + serialized_end=419, +) + + +_SEARCHREQUEST = _descriptor.Descriptor( + name='SearchRequest', + full_name='pb.SearchRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='claim_id', full_name='pb.SearchRequest.claim_id', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='channel_id', full_name='pb.SearchRequest.channel_id', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='text', full_name='pb.SearchRequest.text', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='limit', full_name='pb.SearchRequest.limit', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='order_by', full_name='pb.SearchRequest.order_by', index=4, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='offset', full_name='pb.SearchRequest.offset', index=5, + number=6, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='is_controlling', full_name='pb.SearchRequest.is_controlling', index=6, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='last_take_over_height', full_name='pb.SearchRequest.last_take_over_height', index=7, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='claim_name', full_name='pb.SearchRequest.claim_name', index=8, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='normalized_name', full_name='pb.SearchRequest.normalized_name', index=9, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tx_position', full_name='pb.SearchRequest.tx_position', index=10, + number=11, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='amount', full_name='pb.SearchRequest.amount', index=11, + number=12, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='timestamp', full_name='pb.SearchRequest.timestamp', index=12, + number=13, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='creation_timestamp', full_name='pb.SearchRequest.creation_timestamp', index=13, + number=14, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='height', full_name='pb.SearchRequest.height', index=14, + number=15, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='creation_height', full_name='pb.SearchRequest.creation_height', index=15, + number=16, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='activation_height', full_name='pb.SearchRequest.activation_height', index=16, + number=17, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='expiration_height', full_name='pb.SearchRequest.expiration_height', index=17, + number=18, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='release_time', full_name='pb.SearchRequest.release_time', index=18, + number=19, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='short_url', full_name='pb.SearchRequest.short_url', index=19, + number=20, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='canonical_url', full_name='pb.SearchRequest.canonical_url', index=20, + number=21, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='title', full_name='pb.SearchRequest.title', index=21, + number=22, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='author', full_name='pb.SearchRequest.author', index=22, + number=23, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='description', full_name='pb.SearchRequest.description', index=23, + number=24, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='claim_type', full_name='pb.SearchRequest.claim_type', index=24, + number=25, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='repost_count', full_name='pb.SearchRequest.repost_count', index=25, + number=26, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='stream_type', full_name='pb.SearchRequest.stream_type', index=26, + number=27, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='media_type', full_name='pb.SearchRequest.media_type', index=27, + number=28, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fee_amount', full_name='pb.SearchRequest.fee_amount', index=28, + number=29, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fee_currency', full_name='pb.SearchRequest.fee_currency', index=29, + number=30, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='duration', full_name='pb.SearchRequest.duration', index=30, + number=31, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reposted_claim_id', full_name='pb.SearchRequest.reposted_claim_id', index=31, + number=32, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='censor_type', full_name='pb.SearchRequest.censor_type', index=32, + number=33, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='claims_in_channel', full_name='pb.SearchRequest.claims_in_channel', index=33, + number=34, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='is_signature_valid', full_name='pb.SearchRequest.is_signature_valid', index=34, + number=36, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='effective_amount', full_name='pb.SearchRequest.effective_amount', index=35, + number=37, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='support_amount', full_name='pb.SearchRequest.support_amount', index=36, + number=38, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='trending_score', full_name='pb.SearchRequest.trending_score', index=37, + number=39, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tx_id', full_name='pb.SearchRequest.tx_id', index=38, + number=43, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tx_nout', full_name='pb.SearchRequest.tx_nout', index=39, + number=44, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='signature', full_name='pb.SearchRequest.signature', index=40, + number=45, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='signature_digest', full_name='pb.SearchRequest.signature_digest', index=41, + number=46, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='public_key_bytes', full_name='pb.SearchRequest.public_key_bytes', index=42, + number=47, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='public_key_id', full_name='pb.SearchRequest.public_key_id', index=43, + number=48, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='any_tags', full_name='pb.SearchRequest.any_tags', index=44, + number=49, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='all_tags', full_name='pb.SearchRequest.all_tags', index=45, + number=50, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='not_tags', full_name='pb.SearchRequest.not_tags', index=46, + number=51, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='has_channel_signature', full_name='pb.SearchRequest.has_channel_signature', index=47, + number=52, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='has_source', full_name='pb.SearchRequest.has_source', index=48, + number=53, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='limit_claims_per_channel', full_name='pb.SearchRequest.limit_claims_per_channel', index=49, + number=54, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='any_languages', full_name='pb.SearchRequest.any_languages', index=50, + number=55, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='all_languages', full_name='pb.SearchRequest.all_languages', index=51, + number=56, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='remove_duplicates', full_name='pb.SearchRequest.remove_duplicates', index=52, + number=57, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='no_totals', full_name='pb.SearchRequest.no_totals', index=53, + number=58, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='sd_hash', full_name='pb.SearchRequest.sd_hash', index=54, + number=59, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=422, + serialized_end=1972, +) + +_HELLOMESSAGE.fields_by_name['servers'].message_type = _SERVERMESSAGE +_RANGEFIELD.fields_by_name['op'].enum_type = _RANGEFIELD_OP +_RANGEFIELD_OP.containing_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['claim_id'].message_type = _INVERTIBLEFIELD +_SEARCHREQUEST.fields_by_name['channel_id'].message_type = _INVERTIBLEFIELD +_SEARCHREQUEST.fields_by_name['tx_position'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['amount'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['timestamp'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['creation_timestamp'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['height'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['creation_height'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['activation_height'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['expiration_height'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['release_time'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['repost_count'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['fee_amount'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['duration'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['censor_type'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['is_signature_valid'].message_type = _BOOLVALUE +_SEARCHREQUEST.fields_by_name['effective_amount'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['support_amount'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['trending_score'].message_type = _RANGEFIELD +_SEARCHREQUEST.fields_by_name['tx_nout'].message_type = _UINT32VALUE +_SEARCHREQUEST.fields_by_name['has_source'].message_type = _BOOLVALUE +DESCRIPTOR.message_types_by_name['EmptyMessage'] = _EMPTYMESSAGE +DESCRIPTOR.message_types_by_name['ServerMessage'] = _SERVERMESSAGE +DESCRIPTOR.message_types_by_name['HelloMessage'] = _HELLOMESSAGE +DESCRIPTOR.message_types_by_name['InvertibleField'] = _INVERTIBLEFIELD +DESCRIPTOR.message_types_by_name['StringValue'] = _STRINGVALUE +DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE +DESCRIPTOR.message_types_by_name['UInt32Value'] = _UINT32VALUE +DESCRIPTOR.message_types_by_name['RangeField'] = _RANGEFIELD +DESCRIPTOR.message_types_by_name['SearchRequest'] = _SEARCHREQUEST +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +EmptyMessage = _reflection.GeneratedProtocolMessageType('EmptyMessage', (_message.Message,), { + 'DESCRIPTOR' : _EMPTYMESSAGE, + '__module__' : 'hub_pb2' + # @@protoc_insertion_point(class_scope:pb.EmptyMessage) + }) +_sym_db.RegisterMessage(EmptyMessage) + +ServerMessage = _reflection.GeneratedProtocolMessageType('ServerMessage', (_message.Message,), { + 'DESCRIPTOR' : _SERVERMESSAGE, + '__module__' : 'hub_pb2' + # @@protoc_insertion_point(class_scope:pb.ServerMessage) + }) +_sym_db.RegisterMessage(ServerMessage) + +HelloMessage = _reflection.GeneratedProtocolMessageType('HelloMessage', (_message.Message,), { + 'DESCRIPTOR' : _HELLOMESSAGE, + '__module__' : 'hub_pb2' + # @@protoc_insertion_point(class_scope:pb.HelloMessage) + }) +_sym_db.RegisterMessage(HelloMessage) + +InvertibleField = _reflection.GeneratedProtocolMessageType('InvertibleField', (_message.Message,), { + 'DESCRIPTOR' : _INVERTIBLEFIELD, + '__module__' : 'hub_pb2' + # @@protoc_insertion_point(class_scope:pb.InvertibleField) + }) +_sym_db.RegisterMessage(InvertibleField) + +StringValue = _reflection.GeneratedProtocolMessageType('StringValue', (_message.Message,), { + 'DESCRIPTOR' : _STRINGVALUE, + '__module__' : 'hub_pb2' + # @@protoc_insertion_point(class_scope:pb.StringValue) + }) +_sym_db.RegisterMessage(StringValue) + +BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), { + 'DESCRIPTOR' : _BOOLVALUE, + '__module__' : 'hub_pb2' + # @@protoc_insertion_point(class_scope:pb.BoolValue) + }) +_sym_db.RegisterMessage(BoolValue) + +UInt32Value = _reflection.GeneratedProtocolMessageType('UInt32Value', (_message.Message,), { + 'DESCRIPTOR' : _UINT32VALUE, + '__module__' : 'hub_pb2' + # @@protoc_insertion_point(class_scope:pb.UInt32Value) + }) +_sym_db.RegisterMessage(UInt32Value) + +RangeField = _reflection.GeneratedProtocolMessageType('RangeField', (_message.Message,), { + 'DESCRIPTOR' : _RANGEFIELD, + '__module__' : 'hub_pb2' + # @@protoc_insertion_point(class_scope:pb.RangeField) + }) +_sym_db.RegisterMessage(RangeField) + +SearchRequest = _reflection.GeneratedProtocolMessageType('SearchRequest', (_message.Message,), { + 'DESCRIPTOR' : _SEARCHREQUEST, + '__module__' : 'hub_pb2' + # @@protoc_insertion_point(class_scope:pb.SearchRequest) + }) +_sym_db.RegisterMessage(SearchRequest) + + +DESCRIPTOR._options = None + +_HUB = _descriptor.ServiceDescriptor( + name='Hub', + full_name='pb.Hub', + file=DESCRIPTOR, + index=0, + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_start=1975, + serialized_end=2367, + methods=[ + _descriptor.MethodDescriptor( + name='Search', + full_name='pb.Hub.Search', + index=0, + containing_service=None, + input_type=_SEARCHREQUEST, + output_type=result__pb2._OUTPUTS, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Ping', + full_name='pb.Hub.Ping', + index=1, + containing_service=None, + input_type=_EMPTYMESSAGE, + output_type=_STRINGVALUE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Hello', + full_name='pb.Hub.Hello', + index=2, + containing_service=None, + input_type=_HELLOMESSAGE, + output_type=_HELLOMESSAGE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='AddPeer', + full_name='pb.Hub.AddPeer', + index=3, + containing_service=None, + input_type=_SERVERMESSAGE, + output_type=_STRINGVALUE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='PeerSubscribe', + full_name='pb.Hub.PeerSubscribe', + index=4, + containing_service=None, + input_type=_SERVERMESSAGE, + output_type=_STRINGVALUE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Version', + full_name='pb.Hub.Version', + index=5, + containing_service=None, + input_type=_EMPTYMESSAGE, + output_type=_STRINGVALUE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Features', + full_name='pb.Hub.Features', + index=6, + containing_service=None, + input_type=_EMPTYMESSAGE, + output_type=_STRINGVALUE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Broadcast', + full_name='pb.Hub.Broadcast', + index=7, + containing_service=None, + input_type=_EMPTYMESSAGE, + output_type=_UINT32VALUE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), +]) +_sym_db.RegisterServiceDescriptor(_HUB) + +DESCRIPTOR.services_by_name['Hub'] = _HUB + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v2/hub_pb2_grpc.py b/scribe/schema/types/v2/hub_pb2_grpc.py new file mode 100644 index 0000000..16fdafe --- /dev/null +++ b/scribe/schema/types/v2/hub_pb2_grpc.py @@ -0,0 +1,298 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from . import hub_pb2 as hub__pb2 +from . import result_pb2 as result__pb2 + + +class HubStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Search = channel.unary_unary( + '/pb.Hub/Search', + request_serializer=hub__pb2.SearchRequest.SerializeToString, + response_deserializer=result__pb2.Outputs.FromString, + ) + self.Ping = channel.unary_unary( + '/pb.Hub/Ping', + request_serializer=hub__pb2.EmptyMessage.SerializeToString, + response_deserializer=hub__pb2.StringValue.FromString, + ) + self.Hello = channel.unary_unary( + '/pb.Hub/Hello', + request_serializer=hub__pb2.HelloMessage.SerializeToString, + response_deserializer=hub__pb2.HelloMessage.FromString, + ) + self.AddPeer = channel.unary_unary( + '/pb.Hub/AddPeer', + request_serializer=hub__pb2.ServerMessage.SerializeToString, + response_deserializer=hub__pb2.StringValue.FromString, + ) + self.PeerSubscribe = channel.unary_unary( + '/pb.Hub/PeerSubscribe', + request_serializer=hub__pb2.ServerMessage.SerializeToString, + response_deserializer=hub__pb2.StringValue.FromString, + ) + self.Version = channel.unary_unary( + '/pb.Hub/Version', + request_serializer=hub__pb2.EmptyMessage.SerializeToString, + response_deserializer=hub__pb2.StringValue.FromString, + ) + self.Features = channel.unary_unary( + '/pb.Hub/Features', + request_serializer=hub__pb2.EmptyMessage.SerializeToString, + response_deserializer=hub__pb2.StringValue.FromString, + ) + self.Broadcast = channel.unary_unary( + '/pb.Hub/Broadcast', + request_serializer=hub__pb2.EmptyMessage.SerializeToString, + response_deserializer=hub__pb2.UInt32Value.FromString, + ) + + +class HubServicer(object): + """Missing associated documentation comment in .proto file.""" + + def Search(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Ping(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Hello(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddPeer(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PeerSubscribe(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Version(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Features(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Broadcast(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_HubServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Search': grpc.unary_unary_rpc_method_handler( + servicer.Search, + request_deserializer=hub__pb2.SearchRequest.FromString, + response_serializer=result__pb2.Outputs.SerializeToString, + ), + 'Ping': grpc.unary_unary_rpc_method_handler( + servicer.Ping, + request_deserializer=hub__pb2.EmptyMessage.FromString, + response_serializer=hub__pb2.StringValue.SerializeToString, + ), + 'Hello': grpc.unary_unary_rpc_method_handler( + servicer.Hello, + request_deserializer=hub__pb2.HelloMessage.FromString, + response_serializer=hub__pb2.HelloMessage.SerializeToString, + ), + 'AddPeer': grpc.unary_unary_rpc_method_handler( + servicer.AddPeer, + request_deserializer=hub__pb2.ServerMessage.FromString, + response_serializer=hub__pb2.StringValue.SerializeToString, + ), + 'PeerSubscribe': grpc.unary_unary_rpc_method_handler( + servicer.PeerSubscribe, + request_deserializer=hub__pb2.ServerMessage.FromString, + response_serializer=hub__pb2.StringValue.SerializeToString, + ), + 'Version': grpc.unary_unary_rpc_method_handler( + servicer.Version, + request_deserializer=hub__pb2.EmptyMessage.FromString, + response_serializer=hub__pb2.StringValue.SerializeToString, + ), + 'Features': grpc.unary_unary_rpc_method_handler( + servicer.Features, + request_deserializer=hub__pb2.EmptyMessage.FromString, + response_serializer=hub__pb2.StringValue.SerializeToString, + ), + 'Broadcast': grpc.unary_unary_rpc_method_handler( + servicer.Broadcast, + request_deserializer=hub__pb2.EmptyMessage.FromString, + response_serializer=hub__pb2.UInt32Value.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'pb.Hub', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Hub(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def Search(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/pb.Hub/Search', + hub__pb2.SearchRequest.SerializeToString, + result__pb2.Outputs.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Ping(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/pb.Hub/Ping', + hub__pb2.EmptyMessage.SerializeToString, + hub__pb2.StringValue.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Hello(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/pb.Hub/Hello', + hub__pb2.HelloMessage.SerializeToString, + hub__pb2.HelloMessage.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AddPeer(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/pb.Hub/AddPeer', + hub__pb2.ServerMessage.SerializeToString, + hub__pb2.StringValue.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PeerSubscribe(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/pb.Hub/PeerSubscribe', + hub__pb2.ServerMessage.SerializeToString, + hub__pb2.StringValue.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Version(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/pb.Hub/Version', + hub__pb2.EmptyMessage.SerializeToString, + hub__pb2.StringValue.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Features(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/pb.Hub/Features', + hub__pb2.EmptyMessage.SerializeToString, + hub__pb2.StringValue.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Broadcast(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/pb.Hub/Broadcast', + hub__pb2.EmptyMessage.SerializeToString, + hub__pb2.UInt32Value.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/scribe/schema/types/v2/purchase_pb2.py b/scribe/schema/types/v2/purchase_pb2.py new file mode 100644 index 0000000..a2f02e5 --- /dev/null +++ b/scribe/schema/types/v2/purchase_pb2.py @@ -0,0 +1,69 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: purchase.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='purchase.proto', + package='pb', + syntax='proto3', + serialized_pb=_b('\n\x0epurchase.proto\x12\x02pb\"\x1e\n\x08Purchase\x12\x12\n\nclaim_hash\x18\x01 \x01(\x0c\x62\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_PURCHASE = _descriptor.Descriptor( + name='Purchase', + full_name='pb.Purchase', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='claim_hash', full_name='pb.Purchase.claim_hash', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=22, + serialized_end=52, +) + +DESCRIPTOR.message_types_by_name['Purchase'] = _PURCHASE + +Purchase = _reflection.GeneratedProtocolMessageType('Purchase', (_message.Message,), dict( + DESCRIPTOR = _PURCHASE, + __module__ = 'purchase_pb2' + # @@protoc_insertion_point(class_scope:pb.Purchase) + )) +_sym_db.RegisterMessage(Purchase) + + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v2/result_pb2.py b/scribe/schema/types/v2/result_pb2.py new file mode 100644 index 0000000..be36eef --- /dev/null +++ b/scribe/schema/types/v2/result_pb2.py @@ -0,0 +1,464 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: result.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='result.proto', + package='pb', + syntax='proto3', + serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xe6\x02\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_score\x18\x16 \x01(\x01\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.OutputB&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3' +) + + + +_ERROR_CODE = _descriptor.EnumDescriptor( + name='Code', + full_name='pb.Error.Code', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='UNKNOWN_CODE', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='NOT_FOUND', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='INVALID', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='BLOCKED', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=744, + serialized_end=809, +) +_sym_db.RegisterEnumDescriptor(_ERROR_CODE) + + +_OUTPUTS = _descriptor.Descriptor( + name='Outputs', + full_name='pb.Outputs', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='txos', full_name='pb.Outputs.txos', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extra_txos', full_name='pb.Outputs.extra_txos', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='total', full_name='pb.Outputs.total', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='offset', full_name='pb.Outputs.offset', index=3, + number=4, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='blocked', full_name='pb.Outputs.blocked', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='blocked_total', full_name='pb.Outputs.blocked_total', index=5, + number=6, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21, + serialized_end=172, +) + + +_OUTPUT = _descriptor.Descriptor( + name='Output', + full_name='pb.Output', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='tx_hash', full_name='pb.Output.tx_hash', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='nout', full_name='pb.Output.nout', index=1, + number=2, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='height', full_name='pb.Output.height', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='claim', full_name='pb.Output.claim', index=3, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='error', full_name='pb.Output.error', index=4, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='meta', full_name='pb.Output.meta', + index=0, containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[]), + ], + serialized_start=174, + serialized_end=297, +) + + +_CLAIMMETA = _descriptor.Descriptor( + name='ClaimMeta', + full_name='pb.ClaimMeta', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='channel', full_name='pb.ClaimMeta.channel', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='repost', full_name='pb.ClaimMeta.repost', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='short_url', full_name='pb.ClaimMeta.short_url', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='canonical_url', full_name='pb.ClaimMeta.canonical_url', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='is_controlling', full_name='pb.ClaimMeta.is_controlling', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='take_over_height', full_name='pb.ClaimMeta.take_over_height', index=5, + number=6, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='creation_height', full_name='pb.ClaimMeta.creation_height', index=6, + number=7, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='activation_height', full_name='pb.ClaimMeta.activation_height', index=7, + number=8, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='expiration_height', full_name='pb.ClaimMeta.expiration_height', index=8, + number=9, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='claims_in_channel', full_name='pb.ClaimMeta.claims_in_channel', index=9, + number=10, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='reposted', full_name='pb.ClaimMeta.reposted', index=10, + number=11, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='effective_amount', full_name='pb.ClaimMeta.effective_amount', index=11, + number=20, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='support_amount', full_name='pb.ClaimMeta.support_amount', index=12, + number=21, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='trending_score', full_name='pb.ClaimMeta.trending_score', index=13, + number=22, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=300, + serialized_end=658, +) + + +_ERROR = _descriptor.Descriptor( + name='Error', + full_name='pb.Error', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='pb.Error.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='text', full_name='pb.Error.text', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='blocked', full_name='pb.Error.blocked', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _ERROR_CODE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=661, + serialized_end=809, +) + + +_BLOCKED = _descriptor.Descriptor( + name='Blocked', + full_name='pb.Blocked', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='count', full_name='pb.Blocked.count', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='channel', full_name='pb.Blocked.channel', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=811, + serialized_end=864, +) + +_OUTPUTS.fields_by_name['txos'].message_type = _OUTPUT +_OUTPUTS.fields_by_name['extra_txos'].message_type = _OUTPUT +_OUTPUTS.fields_by_name['blocked'].message_type = _BLOCKED +_OUTPUT.fields_by_name['claim'].message_type = _CLAIMMETA +_OUTPUT.fields_by_name['error'].message_type = _ERROR +_OUTPUT.oneofs_by_name['meta'].fields.append( + _OUTPUT.fields_by_name['claim']) +_OUTPUT.fields_by_name['claim'].containing_oneof = _OUTPUT.oneofs_by_name['meta'] +_OUTPUT.oneofs_by_name['meta'].fields.append( + _OUTPUT.fields_by_name['error']) +_OUTPUT.fields_by_name['error'].containing_oneof = _OUTPUT.oneofs_by_name['meta'] +_CLAIMMETA.fields_by_name['channel'].message_type = _OUTPUT +_CLAIMMETA.fields_by_name['repost'].message_type = _OUTPUT +_ERROR.fields_by_name['code'].enum_type = _ERROR_CODE +_ERROR.fields_by_name['blocked'].message_type = _BLOCKED +_ERROR_CODE.containing_type = _ERROR +_BLOCKED.fields_by_name['channel'].message_type = _OUTPUT +DESCRIPTOR.message_types_by_name['Outputs'] = _OUTPUTS +DESCRIPTOR.message_types_by_name['Output'] = _OUTPUT +DESCRIPTOR.message_types_by_name['ClaimMeta'] = _CLAIMMETA +DESCRIPTOR.message_types_by_name['Error'] = _ERROR +DESCRIPTOR.message_types_by_name['Blocked'] = _BLOCKED +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), { + 'DESCRIPTOR' : _OUTPUTS, + '__module__' : 'result_pb2' + # @@protoc_insertion_point(class_scope:pb.Outputs) + }) +_sym_db.RegisterMessage(Outputs) + +Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), { + 'DESCRIPTOR' : _OUTPUT, + '__module__' : 'result_pb2' + # @@protoc_insertion_point(class_scope:pb.Output) + }) +_sym_db.RegisterMessage(Output) + +ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), { + 'DESCRIPTOR' : _CLAIMMETA, + '__module__' : 'result_pb2' + # @@protoc_insertion_point(class_scope:pb.ClaimMeta) + }) +_sym_db.RegisterMessage(ClaimMeta) + +Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), { + 'DESCRIPTOR' : _ERROR, + '__module__' : 'result_pb2' + # @@protoc_insertion_point(class_scope:pb.Error) + }) +_sym_db.RegisterMessage(Error) + +Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), { + 'DESCRIPTOR' : _BLOCKED, + '__module__' : 'result_pb2' + # @@protoc_insertion_point(class_scope:pb.Blocked) + }) +_sym_db.RegisterMessage(Blocked) + + +DESCRIPTOR._options = None +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/types/v2/result_pb2_grpc.py b/scribe/schema/types/v2/result_pb2_grpc.py new file mode 100644 index 0000000..2daafff --- /dev/null +++ b/scribe/schema/types/v2/result_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/scribe/schema/types/v2/support_pb2.py b/scribe/schema/types/v2/support_pb2.py new file mode 100644 index 0000000..2e3fdfc --- /dev/null +++ b/scribe/schema/types/v2/support_pb2.py @@ -0,0 +1,76 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: support.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='support.proto', + package='pb', + syntax='proto3', + serialized_pb=_b('\n\rsupport.proto\x12\x02pb\")\n\x07Support\x12\r\n\x05\x65moji\x18\x01 \x01(\t\x12\x0f\n\x07\x63omment\x18\x02 \x01(\tb\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_SUPPORT = _descriptor.Descriptor( + name='Support', + full_name='pb.Support', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='emoji', full_name='pb.Support.emoji', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='comment', full_name='pb.Support.comment', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=21, + serialized_end=62, +) + +DESCRIPTOR.message_types_by_name['Support'] = _SUPPORT + +Support = _reflection.GeneratedProtocolMessageType('Support', (_message.Message,), dict( + DESCRIPTOR = _SUPPORT, + __module__ = 'support_pb2' + # @@protoc_insertion_point(class_scope:pb.Support) + )) +_sym_db.RegisterMessage(Support) + + +# @@protoc_insertion_point(module_scope) diff --git a/scribe/schema/url.py b/scribe/schema/url.py new file mode 100644 index 0000000..a1081b1 --- /dev/null +++ b/scribe/schema/url.py @@ -0,0 +1,130 @@ +import re +import unicodedata +from typing import NamedTuple, Tuple + + +def _create_url_regex(): + # see https://spec.lbry.com/ and test_url.py + invalid_names_regex = \ + r"[^=&#:$@%?;\"/\\<>%{}|^~`\[\]" \ + r"\u0000-\u0020\uD800-\uDFFF\uFFFE-\uFFFF]+" + + def _named(name, regex): + return "(?P<" + name + ">" + regex + ")" + + def _group(regex): + return "(?:" + regex + ")" + + def _oneof(*choices): + return _group('|'.join(choices)) + + def _claim(name, prefix=""): + return _group( + _named(name+"_name", prefix + invalid_names_regex) + + _oneof( + _group('[:#]' + _named(name+"_claim_id", "[0-9a-f]{1,40}")), + _group(r'\$' + _named(name+"_amount_order", '[1-9][0-9]*')) + ) + '?' + ) + + return ( + '^' + + _named("scheme", "lbry://") + '?' + + _oneof( + _group(_claim("channel_with_stream", "@") + "/" + _claim("stream_in_channel")), + _claim("channel", "@"), + _claim("stream") + ) + + '$' + ) + + +URL_REGEX = _create_url_regex() + + +def normalize_name(name): + return unicodedata.normalize('NFD', name).casefold() + + +class PathSegment(NamedTuple): + name: str + claim_id: str = None + amount_order: int = None + + @property + def normalized(self): + return normalize_name(self.name) + + @property + def is_shortid(self): + return self.claim_id is not None and len(self.claim_id) < 40 + + @property + def is_fullid(self): + return self.claim_id is not None and len(self.claim_id) == 40 + + def to_dict(self): + q = {'name': self.name} + if self.claim_id is not None: + q['claim_id'] = self.claim_id + if self.amount_order is not None: + q['amount_order'] = self.amount_order + return q + + def __str__(self): + if self.claim_id is not None: + return f"{self.name}:{self.claim_id}" + elif self.amount_order is not None: + return f"{self.name}${self.amount_order}" + return self.name + + +class URL(NamedTuple): + stream: PathSegment + channel: PathSegment + + @property + def has_channel(self): + return self.channel is not None + + @property + def has_stream(self): + return self.stream is not None + + @property + def has_stream_in_channel(self): + return self.has_channel and self.has_stream + + @property + def parts(self) -> Tuple: + if self.has_stream_in_channel: + return self.channel, self.stream + if self.has_channel: + return self.channel, + return self.stream, + + def __str__(self): + return f"lbry://{'/'.join(str(p) for p in self.parts)}" + + @classmethod + def parse(cls, url): + match = re.match(URL_REGEX, url) + + if match is None: + raise ValueError('Invalid LBRY URL') + + segments = {} + parts = match.groupdict() + for segment in ('channel', 'stream', 'channel_with_stream', 'stream_in_channel'): + if parts[f'{segment}_name'] is not None: + segments[segment] = PathSegment( + parts[f'{segment}_name'], + parts[f'{segment}_claim_id'], + parts[f'{segment}_amount_order'] + ) + + if 'channel_with_stream' in segments: + segments['channel'] = segments['channel_with_stream'] + segments['stream'] = segments['stream_in_channel'] + + return cls(segments.get('stream', None), segments.get('channel', None)) diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..857e80e --- /dev/null +++ b/setup.py @@ -0,0 +1,69 @@ +import os +from scribe import __name__, __version__ +from setuptools import setup, find_packages + +BASE = os.path.dirname(__file__) +with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh: + long_description = fh.read() + + +setup( + name=__name__, + version=__version__, + author="LBRY Inc.", + author_email="hello@lbry.com", + url="https://lbry.com", + description="A decentralized media library and marketplace", + long_description=long_description, + long_description_content_type="text/markdown", + keywords="lbry protocol electrum spv", + license='MIT', + python_requires='>=3.7', + packages=find_packages(exclude=('tests',)), + zip_safe=False, + entry_points={ + 'console_scripts': [ + 'scribe=scribe.cli:run_writer_forever', + 'scribe-hub=scribe.cli:run_server_forever', + 'scribe-elastic-sync=scribe.cli:run_es_sync_forever', + ], + }, + install_requires=[ + 'aiohttp==3.5.4', + 'certifi>=2018.11.29', + # 'colorama==0.3.7', + # 'distro==1.4.0', + 'base58==1.0.0', + 'cffi==1.13.2', + 'cryptography==2.5', + 'protobuf==3.17.2', + 'msgpack==0.6.1', + 'prometheus_client==0.7.1', + 'ecdsa==0.13.3', + 'pyyaml==5.3.1', + 'prometheus_client==0.7.1', + 'coincurve==15.0.0', + 'pbkdf2==1.3', + 'attrs==18.2.0', + 'elasticsearch==7.10.1', + 'lbry-rocksdb==0.8.2', + 'uvloop' + ], + extras_require={ + 'lint': ['pylint==2.10.0'], + 'test': ['coverage'], + }, + classifiers=[ + 'Framework :: AsyncIO', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 3', + 'Operating System :: OS Independent', + 'Topic :: Internet', + 'Topic :: Software Development :: Testing', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: System :: Distributed Computing', + 'Topic :: Utilities', + ], +) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_resolve_command.py b/tests/test_resolve_command.py new file mode 100644 index 0000000..6a266df --- /dev/null +++ b/tests/test_resolve_command.py @@ -0,0 +1,1798 @@ +import asyncio +import json +import hashlib +import sys +from bisect import bisect_right +from binascii import hexlify, unhexlify +from collections import defaultdict +from typing import NamedTuple, List +from lbry.testcase import CommandTestCase +from lbry.wallet.transaction import Transaction, Output +from scribe.schema.compat import OldClaimMessage +from lbry.crypto.hash import sha256 +from lbry.crypto.base58 import Base58 + + +class ClaimStateValue(NamedTuple): + claim_id: str + activation_height: int + active_in_lbrycrd: bool + + +class BaseResolveTestCase(CommandTestCase): + + def assertMatchESClaim(self, claim_from_es, claim_from_db): + self.assertEqual(claim_from_es['claim_hash'][::-1].hex(), claim_from_db.claim_hash.hex()) + self.assertEqual(claim_from_es['claim_id'], claim_from_db.claim_hash.hex()) + self.assertEqual(claim_from_es['activation_height'], claim_from_db.activation_height, f"es height: {claim_from_es['activation_height']}, rocksdb height: {claim_from_db.activation_height}") + self.assertEqual(claim_from_es['last_take_over_height'], claim_from_db.last_takeover_height) + self.assertEqual(claim_from_es['tx_id'], claim_from_db.tx_hash[::-1].hex()) + self.assertEqual(claim_from_es['tx_nout'], claim_from_db.position) + self.assertEqual(claim_from_es['amount'], claim_from_db.amount) + self.assertEqual(claim_from_es['effective_amount'], claim_from_db.effective_amount) + + def assertMatchDBClaim(self, expected, claim): + self.assertEqual(expected['claimid'], claim.claim_hash.hex()) + self.assertEqual(expected['validatheight'], claim.activation_height) + self.assertEqual(expected['lasttakeoverheight'], claim.last_takeover_height) + self.assertEqual(expected['txid'], claim.tx_hash[::-1].hex()) + self.assertEqual(expected['n'], claim.position) + self.assertEqual(expected['amount'], claim.amount) + self.assertEqual(expected['effectiveamount'], claim.effective_amount) + + async def assertResolvesToClaimId(self, name, claim_id): + other = await self.resolve(name) + if claim_id is None: + self.assertIn('error', other) + self.assertEqual(other['error']['name'], 'NOT_FOUND') + claims_from_es = (await self.conductor.spv_node.server.session_manager.search_index.search(name=name))[0] + claims_from_es = [c['claim_hash'][::-1].hex() for c in claims_from_es] + self.assertNotIn(claim_id, claims_from_es) + else: + claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(claim_id=claim_id) + self.assertEqual(claim_id, other['claim_id']) + self.assertEqual(claim_id, claim_from_es[0][0]['claim_hash'][::-1].hex()) + + async def assertNoClaimForName(self, name: str): + lbrycrd_winning = json.loads(await self.blockchain._cli_cmnd('getclaimsforname', name)) + stream, channel, _, _ = await self.conductor.spv_node.server.db.resolve(name) + if 'claims' in lbrycrd_winning and lbrycrd_winning['claims'] is not None: + self.assertEqual(len(lbrycrd_winning['claims']), 0) + if stream is not None: + self.assertIsInstance(stream, LookupError) + else: + self.assertIsInstance(channel, LookupError) + claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(name=name) + self.assertListEqual([], claim_from_es[0]) + + async def assertNoClaim(self, name: str, claim_id: str): + expected = json.loads(await self.blockchain._cli_cmnd('getclaimsfornamebyid', name, '["' + claim_id + '"]')) + if 'claims' in expected and expected['claims'] is not None: + # ensure that if we do have the matching claim that it is not active + self.assertEqual(expected['claims'][0]['effectiveamount'], 0) + + claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(claim_id=claim_id) + self.assertListEqual([], claim_from_es[0]) + claim = await self.conductor.spv_node.server.db.fs_getclaimbyid(claim_id) + self.assertIsNone(claim) + + async def assertMatchWinningClaim(self, name): + expected = json.loads(await self.blockchain._cli_cmnd('getclaimsfornamebybid', name, "[0]")) + stream, channel, _, _ = await self.conductor.spv_node.server.db.resolve(name) + claim = stream if stream else channel + expected['claims'][0]['lasttakeoverheight'] = expected['lasttakeoverheight'] + await self._assertMatchClaim(expected['claims'][0], claim) + return claim + + async def _assertMatchClaim(self, expected, claim): + self.assertMatchDBClaim(expected, claim) + claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search( + claim_id=claim.claim_hash.hex() + ) + self.assertEqual(len(claim_from_es[0]), 1) + self.assertMatchESClaim(claim_from_es[0][0], claim) + self._check_supports(claim.claim_hash.hex(), expected.get('supports', []), + claim_from_es[0][0]['support_amount']) + + async def assertMatchClaim(self, name, claim_id, is_active_in_lbrycrd=True): + claim = await self.conductor.spv_node.server.db.fs_getclaimbyid(claim_id) + claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search( + claim_id=claim.claim_hash.hex() + ) + self.assertEqual(len(claim_from_es[0]), 1) + self.assertEqual(claim_from_es[0][0]['claim_hash'][::-1].hex(), claim.claim_hash.hex()) + self.assertMatchESClaim(claim_from_es[0][0], claim) + + expected = json.loads(await self.blockchain._cli_cmnd('getclaimsfornamebyid', name, '["' + claim_id + '"]')) + if is_active_in_lbrycrd: + if not expected: + self.assertIsNone(claim) + return + expected['claims'][0]['lasttakeoverheight'] = expected['lasttakeoverheight'] + self.assertMatchDBClaim(expected['claims'][0], claim) + self._check_supports(claim.claim_hash.hex(), expected['claims'][0].get('supports', []), + claim_from_es[0][0]['support_amount']) + else: + if 'claims' in expected and expected['claims'] is not None: + # ensure that if we do have the matching claim that it is not active + self.assertEqual(expected['claims'][0]['effectiveamount'], 0) + return claim + + async def assertMatchClaimIsWinning(self, name, claim_id): + self.assertEqual(claim_id, (await self.assertMatchWinningClaim(name)).claim_hash.hex()) + await self.assertMatchClaimsForName(name) + + def _check_supports(self, claim_id, lbrycrd_supports, es_support_amount): + total_lbrycrd_amount = 0.0 + total_es_amount = 0.0 + active_es_amount = 0.0 + db = self.conductor.spv_node.server.db + es_supports = db.get_supports(bytes.fromhex(claim_id)) + + # we're only concerned about active supports here, and they should match + self.assertTrue(len(es_supports) >= len(lbrycrd_supports)) + + for i, (tx_num, position, amount) in enumerate(es_supports): + total_es_amount += amount + valid_height = db.get_activation(tx_num, position, is_support=True) + if valid_height > db.db_height: + continue + active_es_amount += amount + txid = db.prefix_db.tx_hash.get(tx_num, deserialize_value=False)[::-1].hex() + support = next(filter(lambda s: s['txid'] == txid and s['n'] == position, lbrycrd_supports)) + total_lbrycrd_amount += support['amount'] + self.assertEqual(support['height'], bisect_right(db.tx_counts, tx_num)) + self.assertEqual(support['validatheight'], valid_height) + + self.assertEqual(total_es_amount, es_support_amount) + self.assertEqual(active_es_amount, total_lbrycrd_amount) + + async def assertMatchClaimsForName(self, name): + expected = json.loads(await self.blockchain._cli_cmnd('getclaimsforname', name, "", "true")) + db = self.conductor.spv_node.server.db + + for c in expected['claims']: + c['lasttakeoverheight'] = expected['lasttakeoverheight'] + claim_id = c['claimid'] + claim_hash = bytes.fromhex(claim_id) + claim = db._fs_get_claim_by_hash(claim_hash) + self.assertMatchDBClaim(c, claim) + + claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search( + claim_id=claim_id + ) + self.assertEqual(len(claim_from_es[0]), 1) + self.assertEqual(claim_from_es[0][0]['claim_hash'][::-1].hex(), claim_id) + self.assertMatchESClaim(claim_from_es[0][0], claim) + self._check_supports(claim_id, c.get('supports', []), + claim_from_es[0][0]['support_amount']) + + async def assertNameState(self, height: int, name: str, winning_claim_id: str, last_takeover_height: int, + non_winning_claims: List[ClaimStateValue]): + self.assertEqual(height, self.conductor.spv_node.server.db.db_height) + await self.assertMatchClaimIsWinning(name, winning_claim_id) + for non_winning in non_winning_claims: + claim = await self.assertMatchClaim( + name, non_winning.claim_id, is_active_in_lbrycrd=non_winning.active_in_lbrycrd + ) + self.assertEqual(non_winning.activation_height, claim.activation_height) + self.assertEqual(last_takeover_height, claim.last_takeover_height) + + +class ResolveCommand(BaseResolveTestCase): + async def test_colliding_short_id(self): + prefixes = defaultdict(list) + + colliding_claim_ids = [] + first_claims_one_char_shortid = {} + + while True: + chan = self.get_claim_id( + await self.channel_create('@abc', '0.01', allow_duplicate_name=True) + ) + if chan[:1] not in first_claims_one_char_shortid: + first_claims_one_char_shortid[chan[:1]] = chan + prefixes[chan[:2]].append(chan) + if len(prefixes[chan[:2]]) > 1: + colliding_claim_ids.extend(prefixes[chan[:2]]) + break + first_claim = first_claims_one_char_shortid[colliding_claim_ids[0][:1]] + await self.assertResolvesToClaimId( + f'@abc#{colliding_claim_ids[0][:1]}', first_claim + ) + collision_depth = 0 + for c1, c2 in zip(colliding_claim_ids[0], colliding_claim_ids[1]): + if c1 == c2: + collision_depth += 1 + else: + break + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[0][:2]}', colliding_claim_ids[0]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[0][:7]}', colliding_claim_ids[0]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[0][:17]}', colliding_claim_ids[0]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[0]}', colliding_claim_ids[0]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1][:collision_depth + 1]}', colliding_claim_ids[1]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1][:7]}', colliding_claim_ids[1]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1][:17]}', colliding_claim_ids[1]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1]}', colliding_claim_ids[1]) + + async def test_abandon_channel_and_claims_in_same_tx(self): + channel_id = self.get_claim_id( + await self.channel_create('@abc', '0.01') + ) + await self.stream_create('foo', '0.01', channel_id=channel_id) + await self.channel_update(channel_id, bid='0.001') + foo2_id = self.get_claim_id(await self.stream_create('foo2', '0.01', channel_id=channel_id)) + await self.stream_update(foo2_id, bid='0.0001', channel_id=channel_id, confirm=False) + tx = await self.stream_create('foo3', '0.01', channel_id=channel_id, confirm=False, return_tx=True) + await self.ledger.wait(tx) + + # db = self.conductor.spv_node.server.bp.db + # claims = list(db.all_claims_producer()) + # print("claims", claims) + await self.daemon.jsonrpc_txo_spend(blocking=True) + await self.generate(1) + await self.assertNoClaimForName('@abc') + await self.assertNoClaimForName('foo') + await self.assertNoClaimForName('foo2') + await self.assertNoClaimForName('foo3') + + async def test_resolve_response(self): + channel_id = self.get_claim_id( + await self.channel_create('@abc', '0.01') + ) + + # resolving a channel @abc + response = await self.resolve('lbry://@abc') + self.assertEqual(response['name'], '@abc') + self.assertEqual(response['value_type'], 'channel') + self.assertEqual(response['meta']['claims_in_channel'], 0) + + await self.stream_create('foo', '0.01', channel_id=channel_id) + await self.stream_create('foo2', '0.01', channel_id=channel_id) + + # resolving a channel @abc with some claims in it + response['confirmations'] += 2 + response['meta']['claims_in_channel'] = 2 + self.assertEqual(response, await self.resolve('lbry://@abc')) + + # resolving claim foo within channel @abc + claim = await self.resolve('lbry://@abc/foo') + self.assertEqual(claim['name'], 'foo') + self.assertEqual(claim['value_type'], 'stream') + self.assertEqual(claim['signing_channel']['name'], '@abc') + self.assertTrue(claim['is_channel_signature_valid']) + self.assertEqual( + claim['timestamp'], + self.ledger.headers.estimated_timestamp(claim['height']) + ) + self.assertEqual( + claim['signing_channel']['timestamp'], + self.ledger.headers.estimated_timestamp(claim['signing_channel']['height']) + ) + + # resolving claim foo by itself + self.assertEqual(claim, await self.resolve('lbry://foo')) + # resolving from the given permanent url + self.assertEqual(claim, await self.resolve(claim['permanent_url'])) + + # resolving multiple at once + response = await self.out(self.daemon.jsonrpc_resolve(['lbry://foo', 'lbry://foo2'])) + self.assertSetEqual({'lbry://foo', 'lbry://foo2'}, set(response)) + claim = response['lbry://foo2'] + self.assertEqual(claim['name'], 'foo2') + self.assertEqual(claim['value_type'], 'stream') + self.assertEqual(claim['signing_channel']['name'], '@abc') + self.assertTrue(claim['is_channel_signature_valid']) + + # resolve has correct confirmations + tx_details = await self.blockchain.get_raw_transaction(claim['txid']) + self.assertEqual(claim['confirmations'], json.loads(tx_details)['confirmations']) + + # FIXME : claimname/updateclaim is gone. #3480 wip, unblock #3479" + # resolve handles invalid data + # await self.blockchain_claim_name("gibberish", hexlify(b"{'invalid':'json'}").decode(), "0.1") + # await self.generate(1) + # response = await self.out(self.daemon.jsonrpc_resolve("lbry://gibberish")) + # self.assertSetEqual({'lbry://gibberish'}, set(response)) + # claim = response['lbry://gibberish'] + # self.assertEqual(claim['name'], 'gibberish') + # self.assertNotIn('value', claim) + + # resolve retries + await self.conductor.spv_node.stop() + resolve_task = asyncio.create_task(self.resolve('foo')) + await self.conductor.spv_node.start(self.conductor.lbcwallet_node) + self.assertIsNotNone((await resolve_task)['claim_id']) + + async def test_winning_by_effective_amount(self): + # first one remains winner unless something else changes + claim_id1 = self.get_claim_id( + await self.channel_create('@foo', allow_duplicate_name=True)) + await self.assertResolvesToClaimId('@foo', claim_id1) + claim_id2 = self.get_claim_id( + await self.channel_create('@foo', allow_duplicate_name=True)) + await self.assertResolvesToClaimId('@foo', claim_id1) + claim_id3 = self.get_claim_id( + await self.channel_create('@foo', allow_duplicate_name=True)) + await self.assertResolvesToClaimId('@foo', claim_id1) + # supports change the winner + await self.support_create(claim_id3, '0.09') + await self.assertResolvesToClaimId('@foo', claim_id3) + await self.support_create(claim_id2, '0.19') + await self.assertResolvesToClaimId('@foo', claim_id2) + await self.support_create(claim_id1, '0.29') + await self.assertResolvesToClaimId('@foo', claim_id1) + + await self.support_abandon(claim_id1) + await self.assertResolvesToClaimId('@foo', claim_id2) + + async def test_advanced_resolve(self): + claim_id1 = self.get_claim_id( + await self.stream_create('foo', '0.7', allow_duplicate_name=True)) + await self.assertResolvesToClaimId('foo$1', claim_id1) + claim_id2 = self.get_claim_id( + await self.stream_create('foo', '0.8', allow_duplicate_name=True)) + await self.assertResolvesToClaimId('foo$1', claim_id2) + await self.assertResolvesToClaimId('foo$2', claim_id1) + claim_id3 = self.get_claim_id( + await self.stream_create('foo', '0.9', allow_duplicate_name=True)) + # plain winning claim + await self.assertResolvesToClaimId('foo', claim_id3) + + # amount order resolution + await self.assertResolvesToClaimId('foo$1', claim_id3) + await self.assertResolvesToClaimId('foo$2', claim_id2) + await self.assertResolvesToClaimId('foo$3', claim_id1) + await self.assertResolvesToClaimId('foo$4', None) + + # async def test_partial_claim_id_resolve(self): + # # add some noise + # await self.channel_create('@abc', '0.1', allow_duplicate_name=True) + # await self.channel_create('@abc', '0.2', allow_duplicate_name=True) + # await self.channel_create('@abc', '1.0', allow_duplicate_name=True) + # + # channel_id = self.get_claim_id(await self.channel_create('@abc', '1.1', allow_duplicate_name=True)) + # await self.assertResolvesToClaimId(f'@abc', channel_id) + # await self.assertResolvesToClaimId(f'@abc#{channel_id[:10]}', channel_id) + # await self.assertResolvesToClaimId(f'@abc#{channel_id}', channel_id) + # + # channel = await self.claim_get(channel_id) + # await self.assertResolvesToClaimId(channel['short_url'], channel_id) + # await self.assertResolvesToClaimId(channel['canonical_url'], channel_id) + # await self.assertResolvesToClaimId(channel['permanent_url'], channel_id) + # + # # add some noise + # await self.stream_create('foo', '0.1', allow_duplicate_name=True, channel_id=channel['claim_id']) + # await self.stream_create('foo', '0.2', allow_duplicate_name=True, channel_id=channel['claim_id']) + # await self.stream_create('foo', '0.3', allow_duplicate_name=True, channel_id=channel['claim_id']) + # + # claim_id1 = self.get_claim_id( + # await self.stream_create('foo', '0.7', allow_duplicate_name=True, channel_id=channel['claim_id'])) + # claim1 = await self.claim_get(claim_id=claim_id1) + # + # await self.assertResolvesToClaimId('foo', claim_id1) + # await self.assertResolvesToClaimId('@abc/foo', claim_id1) + # await self.assertResolvesToClaimId(claim1['short_url'], claim_id1) + # await self.assertResolvesToClaimId(claim1['canonical_url'], claim_id1) + # await self.assertResolvesToClaimId(claim1['permanent_url'], claim_id1) + # + # claim_id2 = self.get_claim_id( + # await self.stream_create('foo', '0.8', allow_duplicate_name=True, channel_id=channel['claim_id'])) + # claim2 = await self.claim_get(claim_id=claim_id2) + # await self.assertResolvesToClaimId('foo', claim_id2) + # await self.assertResolvesToClaimId('@abc/foo', claim_id2) + # await self.assertResolvesToClaimId(claim2['short_url'], claim_id2) + # await self.assertResolvesToClaimId(claim2['canonical_url'], claim_id2) + # await self.assertResolvesToClaimId(claim2['permanent_url'], claim_id2) + + async def test_abandoned_channel_with_signed_claims(self): + channel = (await self.channel_create('@abc', '1.0'))['outputs'][0] + orphan_claim = await self.stream_create('on-channel-claim', '0.0001', channel_id=channel['claim_id']) + abandoned_channel_id = channel['claim_id'] + await self.channel_abandon(txid=channel['txid'], nout=0) + channel = (await self.channel_create('@abc', '1.0'))['outputs'][0] + orphan_claim_id = self.get_claim_id(orphan_claim) + + # Original channel doesn't exists anymore, so the signature is invalid. For invalid signatures, resolution is + # only possible outside a channel + self.assertEqual( + {'error': { + 'name': 'NOT_FOUND', + 'text': 'Could not find claim at "lbry://@abc/on-channel-claim".', + }}, + await self.resolve('lbry://@abc/on-channel-claim') + ) + response = await self.resolve('lbry://on-channel-claim') + self.assertFalse(response['is_channel_signature_valid']) + self.assertEqual({'channel_id': abandoned_channel_id}, response['signing_channel']) + direct_uri = 'lbry://on-channel-claim#' + orphan_claim_id + response = await self.resolve(direct_uri) + self.assertFalse(response['is_channel_signature_valid']) + self.assertEqual({'channel_id': abandoned_channel_id}, response['signing_channel']) + await self.stream_abandon(claim_id=orphan_claim_id) + + uri = 'lbry://@abc/on-channel-claim' + # now, claim something on this channel (it will update the invalid claim, but we save and forcefully restore) + valid_claim = await self.stream_create('on-channel-claim', '0.00000001', channel_id=channel['claim_id']) + # resolves normally + response = await self.resolve(uri) + self.assertTrue(response['is_channel_signature_valid']) + + # ooops! claimed a valid conflict! (this happens on the wild, mostly by accident or race condition) + await self.stream_create( + 'on-channel-claim', '0.00000001', channel_id=channel['claim_id'], allow_duplicate_name=True + ) + + # it still resolves! but to the older claim + response = await self.resolve(uri) + self.assertTrue(response['is_channel_signature_valid']) + self.assertEqual(response['txid'], valid_claim['txid']) + claims = [await self.resolve('on-channel-claim'), await self.resolve('on-channel-claim$2')] + self.assertEqual(2, len(claims)) + self.assertEqual( + {channel['claim_id']}, {claim['signing_channel']['claim_id'] for claim in claims} + ) + + async def test_normalization_resolution(self): + + one = 'ΣίσυφοςfiÆ' + two = 'ΣΊΣΥΦΟσFIæ' + + c1 = await self.stream_create(one, '0.1') + c2 = await self.stream_create(two, '0.2') + + loser_id = self.get_claim_id(c1) + winner_id = self.get_claim_id(c2) + + # winning_one = await self.check_lbrycrd_winning(one) + await self.assertMatchClaimIsWinning(two, winner_id) + + claim1 = await self.resolve(f'lbry://{one}') + claim2 = await self.resolve(f'lbry://{two}') + claim3 = await self.resolve(f'lbry://{one}:{winner_id[:5]}') + claim4 = await self.resolve(f'lbry://{two}:{winner_id[:5]}') + + claim5 = await self.resolve(f'lbry://{one}:{loser_id[:5]}') + claim6 = await self.resolve(f'lbry://{two}:{loser_id[:5]}') + + self.assertEqual(winner_id, claim1['claim_id']) + self.assertEqual(winner_id, claim2['claim_id']) + self.assertEqual(winner_id, claim3['claim_id']) + self.assertEqual(winner_id, claim4['claim_id']) + + self.assertEqual(two, claim1['name']) + self.assertEqual(two, claim2['name']) + self.assertEqual(two, claim3['name']) + self.assertEqual(two, claim4['name']) + + self.assertEqual(loser_id, claim5['claim_id']) + self.assertEqual(loser_id, claim6['claim_id']) + self.assertEqual(one, claim5['name']) + self.assertEqual(one, claim6['name']) + + async def test_resolve_old_claim(self): + channel = await self.daemon.jsonrpc_channel_create('@olds', '1.0', blocking=True) + await self.confirm_tx(channel.id) + address = channel.outputs[0].get_address(self.account.ledger) + claim = generate_signed_legacy(address, channel.outputs[0]) + tx = await Transaction.claim_create('example', claim.SerializeToString(), 1, address, [self.account], self.account) + await tx.sign([self.account]) + await self.broadcast_and_confirm(tx) + + response = await self.resolve('@olds/example') + self.assertTrue('is_channel_signature_valid' in response, str(response)) + self.assertTrue(response['is_channel_signature_valid']) + + claim.publisherSignature.signature = bytes(reversed(claim.publisherSignature.signature)) + tx = await Transaction.claim_create( + 'bad_example', claim.SerializeToString(), 1, address, [self.account], self.account + ) + await tx.sign([self.account]) + await self.broadcast_and_confirm(tx) + + response = await self.resolve('bad_example') + self.assertFalse(response['is_channel_signature_valid']) + self.assertEqual( + {'error': { + 'name': 'NOT_FOUND', + 'text': 'Could not find claim at "@olds/bad_example".', + }}, + await self.resolve('@olds/bad_example') + ) + + async def test_resolve_with_includes(self): + wallet2 = await self.daemon.jsonrpc_wallet_create('wallet2', create_account=True) + address2 = await self.daemon.jsonrpc_address_unused(wallet_id=wallet2.id) + + await self.wallet_send('1.0', address2) + + stream = await self.stream_create( + 'priced', '0.1', wallet_id=wallet2.id, + fee_amount='0.5', fee_currency='LBC', fee_address=address2 + ) + stream_id = self.get_claim_id(stream) + + resolve = await self.resolve('priced') + self.assertNotIn('is_my_output', resolve) + self.assertNotIn('purchase_receipt', resolve) + self.assertNotIn('sent_supports', resolve) + self.assertNotIn('sent_tips', resolve) + self.assertNotIn('received_tips', resolve) + + # is_my_output + resolve = await self.resolve('priced', include_is_my_output=True) + self.assertFalse(resolve['is_my_output']) + resolve = await self.resolve('priced', wallet_id=wallet2.id, include_is_my_output=True) + self.assertTrue(resolve['is_my_output']) + + # purchase receipt + resolve = await self.resolve('priced', include_purchase_receipt=True) + self.assertNotIn('purchase_receipt', resolve) + await self.purchase_create(stream_id) + resolve = await self.resolve('priced', include_purchase_receipt=True) + self.assertEqual('0.5', resolve['purchase_receipt']['amount']) + + # my supports and my tips + resolve = await self.resolve( + 'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True + ) + self.assertEqual('0.0', resolve['sent_supports']) + self.assertEqual('0.0', resolve['sent_tips']) + self.assertEqual('0.0', resolve['received_tips']) + await self.support_create(stream_id, '0.3') + await self.support_create(stream_id, '0.2') + await self.support_create(stream_id, '0.4', tip=True) + await self.support_create(stream_id, '0.5', tip=True) + resolve = await self.resolve( + 'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True + ) + self.assertEqual('0.5', resolve['sent_supports']) + self.assertEqual('0.9', resolve['sent_tips']) + self.assertEqual('0.0', resolve['received_tips']) + + resolve = await self.resolve( + 'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True, + wallet_id=wallet2.id + ) + self.assertEqual('0.0', resolve['sent_supports']) + self.assertEqual('0.0', resolve['sent_tips']) + self.assertEqual('0.9', resolve['received_tips']) + self.assertEqual('1.4', resolve['meta']['support_amount']) + + # make sure nothing is leaked between wallets through cached tx/txos + resolve = await self.resolve('priced') + self.assertNotIn('is_my_output', resolve) + self.assertNotIn('purchase_receipt', resolve) + self.assertNotIn('sent_supports', resolve) + self.assertNotIn('sent_tips', resolve) + self.assertNotIn('received_tips', resolve) + + +class ResolveClaimTakeovers(BaseResolveTestCase): + async def test_channel_invalidation(self): + channel_id = (await self.channel_create('@test', '0.1'))['outputs'][0]['claim_id'] + channel_id2 = (await self.channel_create('@other', '0.1'))['outputs'][0]['claim_id'] + + async def make_claim(name, amount, channel_id=None): + return ( + await self.stream_create(name, amount, channel_id=channel_id) + )['outputs'][0]['claim_id'] + + unsigned_then_signed = await make_claim('unsigned_then_signed', '0.1') + unsigned_then_updated_then_signed = await make_claim('unsigned_then_updated_then_signed', '0.1') + signed_then_unsigned = await make_claim( + 'signed_then_unsigned', '0.01', channel_id=channel_id + ) + signed_then_signed_different_chan = await make_claim( + 'signed_then_signed_different_chan', '0.01', channel_id=channel_id + ) + + self.assertIn("error", await self.resolve('@test/unsigned_then_signed')) + await self.assertMatchClaimIsWinning('unsigned_then_signed', unsigned_then_signed) + self.assertIn("error", await self.resolve('@test/unsigned_then_updated_then_signed')) + await self.assertMatchClaimIsWinning('unsigned_then_updated_then_signed', unsigned_then_updated_then_signed) + self.assertDictEqual( + await self.resolve('@test/signed_then_unsigned'), await self.resolve('signed_then_unsigned') + ) + await self.assertMatchClaimIsWinning('signed_then_unsigned', signed_then_unsigned) + # sign 'unsigned_then_signed' and update it + await self.ledger.wait(await self.daemon.jsonrpc_stream_update( + unsigned_then_signed, '0.09', channel_id=channel_id)) + + await self.ledger.wait(await self.daemon.jsonrpc_stream_update(unsigned_then_updated_then_signed, '0.09')) + await self.ledger.wait(await self.daemon.jsonrpc_stream_update( + unsigned_then_updated_then_signed, '0.09', channel_id=channel_id)) + + await self.ledger.wait(await self.daemon.jsonrpc_stream_update( + signed_then_unsigned, '0.09', clear_channel=True)) + + await self.ledger.wait(await self.daemon.jsonrpc_stream_update( + signed_then_signed_different_chan, '0.09', channel_id=channel_id2)) + + await self.daemon.jsonrpc_txo_spend(type='channel', claim_id=channel_id) + + signed3 = await make_claim('signed3', '0.01', channel_id=channel_id) + signed4 = await make_claim('signed4', '0.01', channel_id=channel_id2) + + self.assertIn("error", await self.resolve('@test')) + self.assertIn("error", await self.resolve('@test/signed1')) + self.assertIn("error", await self.resolve('@test/unsigned_then_updated_then_signed')) + self.assertIn("error", await self.resolve('@test/unsigned_then_signed')) + self.assertIn("error", await self.resolve('@test/signed3')) + self.assertIn("error", await self.resolve('@test/signed4')) + + await self.assertMatchClaimIsWinning('signed_then_unsigned', signed_then_unsigned) + await self.assertMatchClaimIsWinning('unsigned_then_signed', unsigned_then_signed) + await self.assertMatchClaimIsWinning('unsigned_then_updated_then_signed', unsigned_then_updated_then_signed) + await self.assertMatchClaimIsWinning('signed_then_signed_different_chan', signed_then_signed_different_chan) + await self.assertMatchClaimIsWinning('signed3', signed3) + await self.assertMatchClaimIsWinning('signed4', signed4) + + self.assertDictEqual(await self.resolve('@other/signed_then_signed_different_chan'), + await self.resolve('signed_then_signed_different_chan')) + self.assertDictEqual(await self.resolve('@other/signed4'), + await self.resolve('signed4')) + + async def _test_activation_delay(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # sanity check + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(9) + # not yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + # the new claim should have activated + await self.assertMatchClaimIsWinning(name, second_claim_id) + return first_claim_id, second_claim_id + + async def test_activation_delay(self): + await self._test_activation_delay() + + async def test_activation_delay_then_abandon_then_reclaim(self): + name = 'derp' + first_claim_id, second_claim_id = await self._test_activation_delay() + await self.daemon.jsonrpc_txo_spend(type='stream', claim_id=first_claim_id) + await self.daemon.jsonrpc_txo_spend(type='stream', claim_id=second_claim_id) + await self.generate(1) + await self.assertNoClaimForName(name) + await self._test_activation_delay() + + async def create_stream_claim(self, amount: str, name='derp') -> str: + return (await self.stream_create(name, amount, allow_duplicate_name=True))['outputs'][0]['claim_id'] + + async def assertNameState(self, height: int, name: str, winning_claim_id: str, last_takeover_height: int, + non_winning_claims: List[ClaimStateValue]): + self.assertEqual(height, self.conductor.spv_node.server.db.db_height) + await self.assertMatchClaimIsWinning(name, winning_claim_id) + for non_winning in non_winning_claims: + claim = await self.assertMatchClaim(name, + non_winning.claim_id, is_active_in_lbrycrd=non_winning.active_in_lbrycrd + ) + self.assertEqual(non_winning.activation_height, claim.activation_height) + self.assertEqual(last_takeover_height, claim.last_takeover_height) + + async def test_delay_takeover_with_update(self): + name = 'derp' + first_claim_id = await self.create_stream_claim('0.2', name) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + second_claim_id = await self.create_stream_claim('0.1', name) + third_claim_id = await self.create_stream_claim('0.1', name) + await self.generate(8) + await self.assertNameState( + height=537, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=False), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=538, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=539, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=True) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.21') + await self.generate(1) + await self.assertNameState( + height=540, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(9) + await self.assertNameState( + height=549, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=550, name=name, winning_claim_id=third_claim_id, last_takeover_height=550, + non_winning_claims=[ + ClaimStateValue(first_claim_id, activation_height=207, active_in_lbrycrd=True), + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True) + ] + ) + + async def test_delay_takeover_with_update_then_update_to_lower_before_takeover(self): + name = 'derp' + first_claim_id = await self.create_stream_claim('0.2', name) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + second_claim_id = await self.create_stream_claim('0.1', name) + third_claim_id = await self.create_stream_claim('0.1', name) + await self.generate(8) + await self.assertNameState( + height=537, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=False), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=538, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=539, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=True) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.21') + await self.generate(1) + await self.assertNameState( + height=540, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(8) + await self.assertNameState( + height=548, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.09') + + await self.generate(1) + await self.assertNameState( + height=549, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=559, active_in_lbrycrd=False) + ] + ) + await self.generate(10) + await self.assertNameState( + height=559, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=559, active_in_lbrycrd=True) + ] + ) + + async def test_delay_takeover_with_update_then_update_to_lower_on_takeover(self): + name = 'derp' + first_claim_id = await self.create_stream_claim('0.2', name) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + second_claim_id = await self.create_stream_claim('0.1', name) + third_claim_id = await self.create_stream_claim('0.1', name) + await self.generate(8) + await self.assertNameState( + height=537, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=False), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=538, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=539, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=True) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.21') + await self.generate(1) + await self.assertNameState( + height=540, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(8) + await self.assertNameState( + height=548, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=549, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.09') + await self.generate(1) + await self.assertNameState( + height=550, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=560, active_in_lbrycrd=False) + ] + ) + await self.generate(10) + await self.assertNameState( + height=560, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=560, active_in_lbrycrd=True) + ] + ) + + async def test_delay_takeover_with_update_then_update_to_lower_after_takeover(self): + name = 'derp' + first_claim_id = await self.create_stream_claim('0.2', name) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + second_claim_id = await self.create_stream_claim('0.1', name) + third_claim_id = await self.create_stream_claim('0.1', name) + await self.generate(8) + await self.assertNameState( + height=537, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=False), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + await self.generate(1) + await self.assertNameState( + height=538, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=539, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=True) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.21') + await self.generate(1) + await self.assertNameState( + height=540, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(8) + await self.assertNameState( + height=548, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=549, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=550, name=name, winning_claim_id=third_claim_id, last_takeover_height=550, + non_winning_claims=[ + ClaimStateValue(first_claim_id, activation_height=207, active_in_lbrycrd=True), + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.09') + await self.generate(1) + await self.assertNameState( + height=551, name=name, winning_claim_id=first_claim_id, last_takeover_height=551, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=551, active_in_lbrycrd=True) + ] + ) + + async def test_resolve_signed_claims_with_fees(self): + channel_name = '@abc' + channel_id = self.get_claim_id( + await self.channel_create(channel_name, '0.01') + ) + self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex()) + stream_name = 'foo' + stream_with_no_fee = self.get_claim_id( + await self.stream_create(stream_name, '0.01', channel_id=channel_id) + ) + stream_with_fee = self.get_claim_id( + await self.stream_create('with_a_fee', '0.01', channel_id=channel_id, fee_amount='1', fee_currency='LBC') + ) + greater_than_or_equal_to_zero = [ + claim['claim_id'] for claim in ( + await self.conductor.spv_node.server.session_manager.search_index.search( + channel_id=channel_id, fee_amount=">=0" + ))[0] + ] + self.assertEqual(2, len(greater_than_or_equal_to_zero)) + self.assertSetEqual(set(greater_than_or_equal_to_zero), {stream_with_no_fee, stream_with_fee}) + greater_than_zero = [ + claim['claim_id'] for claim in ( + await self.conductor.spv_node.server.session_manager.search_index.search( + channel_id=channel_id, fee_amount=">0" + ))[0] + ] + self.assertEqual(1, len(greater_than_zero)) + self.assertSetEqual(set(greater_than_zero), {stream_with_fee}) + equal_to_zero = [ + claim['claim_id'] for claim in ( + await self.conductor.spv_node.server.session_manager.search_index.search( + channel_id=channel_id, fee_amount="<=0" + ))[0] + ] + self.assertEqual(1, len(equal_to_zero)) + self.assertSetEqual(set(equal_to_zero), {stream_with_no_fee}) + + async def test_spec_example(self): + # https://spec.lbry.com/#claim-activation-example + # this test has adjusted block heights from the example because it uses the regtest chain instead of mainnet + # on regtest, claims expire much faster, so we can't do the ~1000 block delay in the spec example exactly + + name = 'test' + await self.generate(494) + address = (await self.account.receiving.get_addresses(True))[0] + await self.send_to_address_and_wait(address, 400.0) + await self.account.ledger.on_address.first + await self.generate(100) + self.assertEqual(800, self.conductor.spv_node.server.db.db_height) + + # Block 801: Claim A for 10 LBC is accepted. + # It is the first claim, so it immediately becomes active and controlling. + # State: A(10) is controlling + claim_id_A = (await self.stream_create(name, '10.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, claim_id_A) + + # Block 1121: Claim B for 20 LBC is accepted. + # Its activation height is 1121 + min(4032, floor((1121-801) / 32)) = 1121 + 10 = 1131. + # State: A(10) is controlling, B(20) is accepted. + await self.generate(32 * 10 - 1) + self.assertEqual(1120, self.conductor.spv_node.server.db.db_height) + claim_id_B = (await self.stream_create(name, '20.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + claim_B, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_B}") + self.assertEqual(1121, self.conductor.spv_node.server.db.db_height) + self.assertEqual(1131, claim_B.activation_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + + # Block 1122: Support X for 14 LBC for claim A is accepted. + # Since it is a support for the controlling claim, it activates immediately. + # State: A(10+14) is controlling, B(20) is accepted. + await self.support_create(claim_id_A, bid='14.0') + self.assertEqual(1122, self.conductor.spv_node.server.db.db_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + + # Block 1123: Claim C for 50 LBC is accepted. + # The activation height is 1123 + min(4032, floor((1123-801) / 32)) = 1123 + 10 = 1133. + # State: A(10+14) is controlling, B(20) is accepted, C(50) is accepted. + claim_id_C = (await self.stream_create(name, '50.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + self.assertEqual(1123, self.conductor.spv_node.server.db.db_height) + claim_C, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_C}") + self.assertEqual(1133, claim_C.activation_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + + await self.generate(7) + self.assertEqual(1130, self.conductor.spv_node.server.db.db_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + await self.generate(1) + + # Block 1131: Claim B activates. It has 20 LBC, while claim A has 24 LBC (10 original + 14 from support X). There is no takeover, and claim A remains controlling. + # State: A(10+14) is controlling, B(20) is active, C(50) is accepted. + self.assertEqual(1131, self.conductor.spv_node.server.db.db_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + + # Block 1132: Claim D for 300 LBC is accepted. The activation height is 1132 + min(4032, floor((1132-801) / 32)) = 1132 + 10 = 1142. + # State: A(10+14) is controlling, B(20) is active, C(50) is accepted, D(300) is accepted. + claim_id_D = (await self.stream_create(name, '300.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + self.assertEqual(1132, self.conductor.spv_node.server.db.db_height) + claim_D, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_D}") + self.assertEqual(False, claim_D.is_controlling) + self.assertEqual(801, claim_D.last_takeover_height) + self.assertEqual(1142, claim_D.activation_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + + # Block 1133: Claim C activates. It has 50 LBC, while claim A has 24 LBC, so a takeover is initiated. The takeover height for this name is set to 1133, and therefore the activation delay for all the claims becomes min(4032, floor((1133-1133) / 32)) = 0. All the claims become active. The totals for each claim are recalculated, and claim D becomes controlling because it has the highest total. + # State: A(10+14) is active, B(20) is active, C(50) is active, D(300) is controlling + await self.generate(1) + self.assertEqual(1133, self.conductor.spv_node.server.db.db_height) + claim_D, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_D}") + self.assertEqual(True, claim_D.is_controlling) + self.assertEqual(1133, claim_D.last_takeover_height) + self.assertEqual(1133, claim_D.activation_height) + await self.assertMatchClaimIsWinning(name, claim_id_D) + + async def test_early_takeover(self): + name = 'derp' + # block 207 + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(96) + # block 304, activates at 307 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # block 305, activates at 308 (but gets triggered early by the takeover by the second claim) + third_claim_id = (await self.stream_create(name, '0.3', allow_duplicate_name=True))['outputs'][0]['claim_id'] + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + + async def test_early_takeover_zero_delay(self): + name = 'derp' + # block 207 + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(96) + # block 304, activates at 307 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # on block 307 make a third claim with a yet higher amount, it takes over with no delay because the + # second claim activates and begins the takeover on this block + third_claim_id = (await self.stream_create(name, '0.3', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, third_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + + async def test_early_takeover_from_support_zero_delay(self): + name = 'derp' + # block 207 + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(96) + # block 304, activates at 307 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + third_claim_id = (await self.stream_create(name, '0.19', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + tx = await self.daemon.jsonrpc_support_create(third_claim_id, '0.1') + await self.ledger.wait(tx) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + + async def test_early_takeover_from_support_and_claim_zero_delay(self): + name = 'derp' + # block 207 + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(96) + # block 304, activates at 307 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + + file_path = self.create_upload_file(data=b'hi!') + tx = await self.daemon.jsonrpc_stream_create(name, '0.19', file_path=file_path, allow_duplicate_name=True) + await self.ledger.wait(tx) + third_claim_id = tx.outputs[0].claim_id + + wallet = self.daemon.wallet_manager.get_wallet_or_default(None) + funding_accounts = wallet.get_accounts_or_all(None) + amount = self.daemon.get_dewies_or_error("amount", '0.1') + account = wallet.get_account_or_default(None) + claim_address = await account.receiving.get_or_create_usable_address() + tx = await Transaction.support( + 'derp', third_claim_id, amount, claim_address, funding_accounts, funding_accounts[0], None + ) + await tx.sign(funding_accounts) + await self.daemon.broadcast_or_release(tx, True) + await self.ledger.wait(tx) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + + async def test_early_takeover_abandoned_controlling_support(self): + name = 'derp' + # block 207 + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0][ + 'claim_id'] + tx = await self.daemon.jsonrpc_support_create(first_claim_id, '0.2') + await self.ledger.wait(tx) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(96) + # block 304, activates at 307 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0][ + 'claim_id'] + # block 305, activates at 308 (but gets triggered early by the takeover by the second claim) + third_claim_id = (await self.stream_create(name, '0.3', allow_duplicate_name=True))['outputs'][0][ + 'claim_id'] + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.daemon.jsonrpc_txo_spend(type='support', txid=tx.id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + + async def test_block_takeover_with_delay_1_support(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + self.assertEqual(first_claim_id, (await self.assertMatchWinningClaim(name)).claim_hash.hex()) + await self.generate(320) + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # sanity check + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + for _ in range(8): + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # prevent the takeover by adding a support one block before the takeover happens + await self.support_create(first_claim_id, bid='1.0') + await self.assertMatchClaimIsWinning(name, first_claim_id) + # one more block until activation + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + + async def test_block_takeover_with_delay_0_support(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # sanity check + await self.assertMatchClaimIsWinning(name, first_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(9) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # prevent the takeover by adding a support on the same block the takeover would happen + await self.support_create(first_claim_id, bid='1.0') + await self.assertMatchClaimIsWinning(name, first_claim_id) + + async def _test_almost_prevent_takeover(self, name: str, blocks: int = 9): + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # sanity check + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(blocks) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # prevent the takeover by adding a support on the same block the takeover would happen + tx = await self.daemon.jsonrpc_support_create(first_claim_id, '1.0') + await self.ledger.wait(tx) + return first_claim_id, second_claim_id, tx + + async def test_almost_prevent_takeover_remove_support_same_block_supported(self): + name = 'derp' + first_claim_id, second_claim_id, tx = await self._test_almost_prevent_takeover(name, 9) + await self.daemon.jsonrpc_txo_spend(type='support', txid=tx.id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, second_claim_id) + + async def test_almost_prevent_takeover_remove_support_one_block_after_supported(self): + name = 'derp' + first_claim_id, second_claim_id, tx = await self._test_almost_prevent_takeover(name, 8) + await self.generate(1) + await self.daemon.jsonrpc_txo_spend(type='support', txid=tx.id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, second_claim_id) + + async def test_abandon_before_takeover(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # sanity check + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(8) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # abandon the winning claim + await self.daemon.jsonrpc_txo_spend(type='stream', claim_id=first_claim_id) + await self.generate(1) + # the takeover and activation should happen a block earlier than they would have absent the abandon + await self.assertMatchClaimIsWinning(name, second_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, second_claim_id) + + async def test_abandon_before_takeover_no_delay_update(self): # TODO: fix race condition line 506 + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + # block 527 + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # block 528 + # sanity check + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.assertMatchClaimsForName(name) + await self.generate(8) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.assertMatchClaimsForName(name) + # abandon the winning claim + await self.daemon.jsonrpc_txo_spend(type='stream', claim_id=first_claim_id) + await self.daemon.jsonrpc_stream_update(second_claim_id, '0.1') + await self.generate(1) + + # the takeover and activation should happen a block earlier than they would have absent the abandon + await self.assertMatchClaimIsWinning(name, second_claim_id) + await self.assertMatchClaimsForName(name) + await self.generate(1) + # await self.ledger.on_header.where(lambda e: e.height == 537) + await self.assertMatchClaimIsWinning(name, second_claim_id) + await self.assertMatchClaimsForName(name) + + async def test_abandon_controlling_support_before_pending_takeover(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + controlling_support_tx = await self.daemon.jsonrpc_support_create(first_claim_id, '0.9') + await self.ledger.wait(controlling_support_tx) + self.assertEqual(first_claim_id, (await self.assertMatchWinningClaim(name)).claim_hash.hex()) + await self.generate(321) + + second_claim_id = (await self.stream_create(name, '0.9', allow_duplicate_name=True))['outputs'][0]['claim_id'] + + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(8) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # abandon the support that causes the winning claim to have the highest staked + tx = await self.daemon.jsonrpc_txo_spend(type='support', txid=controlling_support_tx.id, blocking=True) + await self.generate(1) + await self.assertNameState(538, name, first_claim_id, last_takeover_height=207, non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=539, active_in_lbrycrd=False) + ]) + await self.generate(1) + await self.assertNameState(539, name, second_claim_id, last_takeover_height=539, non_winning_claims=[ + ClaimStateValue(first_claim_id, activation_height=207, active_in_lbrycrd=True) + ]) + + async def test_remove_controlling_support(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.2'))['outputs'][0]['claim_id'] + first_support_tx = await self.daemon.jsonrpc_support_create(first_claim_id, '0.9') + await self.ledger.wait(first_support_tx) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) # give the first claim long enough for a 10 block takeover delay + await self.assertNameState(527, name, first_claim_id, last_takeover_height=207, non_winning_claims=[]) + + # make a second claim which will take over the name + second_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertNameState(528, name, first_claim_id, last_takeover_height=207, non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=False) + ]) + + second_claim_support_tx = await self.daemon.jsonrpc_support_create(second_claim_id, '1.5') + await self.ledger.wait(second_claim_support_tx) + await self.generate(1) # neither the second claim or its support have activated yet + await self.assertNameState(529, name, first_claim_id, last_takeover_height=207, non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=False) + ]) + await self.generate(9) # claim activates, but is not yet winning + await self.assertNameState(538, name, first_claim_id, last_takeover_height=207, non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True) + ]) + await self.generate(1) # support activates, takeover happens + await self.assertNameState(539, name, second_claim_id, last_takeover_height=539, non_winning_claims=[ + ClaimStateValue(first_claim_id, activation_height=207, active_in_lbrycrd=True) + ]) + + await self.daemon.jsonrpc_txo_spend(type='support', claim_id=second_claim_id, blocking=True) + await self.generate(1) # support activates, takeover happens + await self.assertNameState(540, name, first_claim_id, last_takeover_height=540, non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True) + ]) + + async def test_claim_expiration(self): + name = 'derp' + # starts at height 206 + vanishing_claim = (await self.stream_create('vanish', '0.1'))['outputs'][0]['claim_id'] + + await self.generate(493) + # in block 701 and 702 + first_claim_id = (await self.stream_create(name, '0.3'))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning('vanish', vanishing_claim) + await self.generate(100) # block 801, expiration fork happened + await self.assertNoClaimForName('vanish') + # second claim is in block 802 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(498) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, second_claim_id) + await self.generate(100) + await self.assertMatchClaimIsWinning(name, second_claim_id) + await self.generate(1) + await self.assertNoClaimForName(name) + + async def _test_add_non_winning_already_claimed(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + self.assertEqual(first_claim_id, (await self.assertMatchWinningClaim(name)).claim_hash.hex()) + await self.generate(32) + + second_claim_id = (await self.stream_create(name, '0.01', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertNoClaim(name, second_claim_id) + self.assertEqual( + len((await self.conductor.spv_node.server.session_manager.search_index.search(claim_name=name))[0]), 1 + ) + await self.generate(1) + await self.assertMatchClaim(name, second_claim_id) + self.assertEqual( + len((await self.conductor.spv_node.server.session_manager.search_index.search(claim_name=name))[0]), 2 + ) + + async def test_abandon_controlling_same_block_as_new_claim(self): + name = 'derp' + + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + await self.generate(64) + await self.assertNameState(271, name, first_claim_id, last_takeover_height=207, non_winning_claims=[]) + + await self.daemon.jsonrpc_txo_spend(type='stream', claim_id=first_claim_id) + second_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertNameState(272, name, second_claim_id, last_takeover_height=272, non_winning_claims=[]) + + async def test_trending(self): + async def get_trending_score(claim_id): + return (await self.conductor.spv_node.server.session_manager.search_index.search( + claim_id=claim_id + ))[0][0]['trending_score'] + + claim_id1 = (await self.stream_create('derp', '1.0'))['outputs'][0]['claim_id'] + COIN = int(1E8) + + self.assertEqual(self.conductor.spv_node.writer.height, 207) + self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put( + (208, bytes.fromhex(claim_id1)), (0, 10 * COIN) + ) + await self.generate(1) + self.assertEqual(self.conductor.spv_node.writer.height, 208) + + self.assertEqual(1.7090807854206793, await get_trending_score(claim_id1)) + self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put( + (209, bytes.fromhex(claim_id1)), (10 * COIN, 100 * COIN) + ) + await self.generate(1) + self.assertEqual(self.conductor.spv_node.writer.height, 209) + self.assertEqual(2.2437974397778886, await get_trending_score(claim_id1)) + self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put( + (309, bytes.fromhex(claim_id1)), (100 * COIN, 1000000 * COIN) + ) + await self.generate(100) + self.assertEqual(self.conductor.spv_node.writer.height, 309) + self.assertEqual(5.157053472135866, await get_trending_score(claim_id1)) + + self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put( + (409, bytes.fromhex(claim_id1)), (1000000 * COIN, 1 * COIN) + ) + + await self.generate(99) + self.assertEqual(self.conductor.spv_node.writer.height, 408) + self.assertEqual(5.157053472135866, await get_trending_score(claim_id1)) + + await self.generate(1) + self.assertEqual(self.conductor.spv_node.writer.height, 409) + + self.assertEqual(-3.4256156592205627, await get_trending_score(claim_id1)) + search_results = (await self.conductor.spv_node.server.session_manager.search_index.search(claim_name="derp"))[0] + self.assertEqual(1, len(search_results)) + self.assertListEqual([claim_id1], [c['claim_id'] for c in search_results]) + + +class ResolveAfterReorg(BaseResolveTestCase): + async def reorg(self, start): + blocks = self.ledger.headers.height - start + self.blockchain.block_expected = start - 1 + + + prepare = self.ledger.on_header.where(self.blockchain.is_expected_block) + self.conductor.spv_node.server.synchronized.clear() + + # go back to start + await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode()) + # go to previous + 1 + await self.blockchain.generate(blocks + 2) + + await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate + await self.conductor.spv_node.server.synchronized.wait() + # await asyncio.wait_for(self.on_header(self.blockchain.block_expected), 30.0) + + async def assertBlockHash(self, height): + reader_db = self.conductor.spv_node.server.db + block_hash = await self.blockchain.get_block_hash(height) + + self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode()) + self.assertEqual(block_hash, (await reader_db.fs_block_hashes(height, 1))[0][::-1].hex()) + txids = [ + tx_hash[::-1].hex() for tx_hash in reader_db.get_block_txs(height) + ] + txs = await reader_db.get_transactions_and_merkles(txids) + block_txs = (await self.conductor.spv_node.server.daemon.deserialised_block(block_hash))['tx'] + self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions') + self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order') + + async def test_reorg(self): + self.assertEqual(self.ledger.headers.height, 206) + + channel_name = '@abc' + channel_id = self.get_claim_id( + await self.channel_create(channel_name, '0.01') + ) + + await self.assertNameState( + height=207, name='@abc', winning_claim_id=channel_id, last_takeover_height=207, + non_winning_claims=[] + ) + + await self.reorg(206) + + await self.assertNameState( + height=208, name='@abc', winning_claim_id=channel_id, last_takeover_height=207, + non_winning_claims=[] + ) + + # await self.assertNoClaimForName(channel_name) + # self.assertNotIn('error', await self.resolve(channel_name)) + + stream_name = 'foo' + stream_id = self.get_claim_id( + await self.stream_create(stream_name, '0.01', channel_id=channel_id) + ) + + await self.assertNameState( + height=209, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209, + non_winning_claims=[] + ) + await self.reorg(206) + await self.assertNameState( + height=210, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209, + non_winning_claims=[] + ) + + await self.support_create(stream_id, '0.01') + + await self.assertNameState( + height=211, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209, + non_winning_claims=[] + ) + await self.reorg(206) + # self.assertNotIn('error', await self.resolve(stream_name)) + await self.assertNameState( + height=212, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209, + non_winning_claims=[] + ) + + await self.stream_abandon(stream_id) + self.assertNotIn('error', await self.resolve(channel_name)) + self.assertIn('error', await self.resolve(stream_name)) + self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex()) + await self.assertNoClaimForName(stream_name) + # TODO: check @abc/foo too + + await self.reorg(206) + self.assertNotIn('error', await self.resolve(channel_name)) + self.assertIn('error', await self.resolve(stream_name)) + self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex()) + await self.assertNoClaimForName(stream_name) + + await self.channel_abandon(channel_id) + self.assertIn('error', await self.resolve(channel_name)) + self.assertIn('error', await self.resolve(stream_name)) + await self.reorg(206) + self.assertIn('error', await self.resolve(channel_name)) + self.assertIn('error', await self.resolve(stream_name)) + + async def test_reorg_change_claim_height(self): + # sanity check + result = await self.resolve('hovercraft') # TODO: do these for claim_search and resolve both + self.assertIn('error', result) + + still_valid = await self.daemon.jsonrpc_stream_create( + 'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(still_valid) + await self.generate(1) + # create a claim and verify it's returned by claim_search + self.assertEqual(self.ledger.headers.height, 207) + await self.assertBlockHash(207) + + broadcast_tx = await self.daemon.jsonrpc_stream_create( + 'hovercraft', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(broadcast_tx) + await self.support_create(still_valid.outputs[0].claim_id, '0.01') + + await self.ledger.wait(broadcast_tx, self.blockchain.block_expected) + self.assertEqual(self.ledger.headers.height, 208) + await self.assertBlockHash(208) + + claim = await self.resolve('hovercraft') + self.assertEqual(claim['txid'], broadcast_tx.id) + self.assertEqual(claim['height'], 208) + + # check that our tx is in block 208 as returned by lbrycrdd + invalidated_block_hash = (await self.ledger.headers.hash(208)).decode() + block_207 = await self.blockchain.get_block(invalidated_block_hash) + self.assertIn(claim['txid'], block_207['tx']) + self.assertEqual(208, claim['height']) + + # reorg the last block dropping our claim tx + await self.blockchain.invalidate_block(invalidated_block_hash) + await self.conductor.clear_mempool() + await self.blockchain.generate(2) + + # wait for the client to catch up and verify the reorg + await asyncio.wait_for(self.on_header(209), 3.0) + await self.assertBlockHash(207) + await self.assertBlockHash(208) + await self.assertBlockHash(209) + + # verify the claim was dropped from block 208 as returned by lbrycrdd + reorg_block_hash = await self.blockchain.get_block_hash(208) + self.assertNotEqual(invalidated_block_hash, reorg_block_hash) + block_207 = await self.blockchain.get_block(reorg_block_hash) + self.assertNotIn(claim['txid'], block_207['tx']) + + client_reorg_block_hash = (await self.ledger.headers.hash(208)).decode() + self.assertEqual(client_reorg_block_hash, reorg_block_hash) + + # verify the dropped claim is no longer returned by claim search + self.assertDictEqual( + {'error': {'name': 'NOT_FOUND', 'text': 'Could not find claim at "hovercraft".'}}, + await self.resolve('hovercraft') + ) + + # verify the claim published a block earlier wasn't also reverted + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + + # broadcast the claim in a different block + new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode()) + self.assertEqual(broadcast_tx.id, new_txid) + await self.blockchain.generate(1) + + # wait for the client to catch up + await asyncio.wait_for(self.on_header(210), 3.0) + + # verify the claim is in the new block and that it is returned by claim_search + republished = await self.resolve('hovercraft') + self.assertEqual(210, republished['height']) + self.assertEqual(claim['claim_id'], republished['claim_id']) + + # this should still be unchanged + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + + async def test_reorg_drop_claim(self): + # sanity check + result = await self.resolve('hovercraft') # TODO: do these for claim_search and resolve both + self.assertIn('error', result) + + still_valid = await self.daemon.jsonrpc_stream_create( + 'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(still_valid) + await self.generate(1) + + # create a claim and verify it's returned by claim_search + self.assertEqual(self.ledger.headers.height, 207) + await self.assertBlockHash(207) + + broadcast_tx = await self.daemon.jsonrpc_stream_create( + 'hovercraft', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(broadcast_tx) + await self.generate(1) + await self.ledger.wait(broadcast_tx, self.blockchain.block_expected) + self.assertEqual(self.ledger.headers.height, 208) + await self.assertBlockHash(208) + + claim = await self.resolve('hovercraft') + self.assertEqual(claim['txid'], broadcast_tx.id) + self.assertEqual(claim['height'], 208) + + # check that our tx is in block 208 as returned by lbrycrdd + invalidated_block_hash = (await self.ledger.headers.hash(208)).decode() + block_207 = await self.blockchain.get_block(invalidated_block_hash) + self.assertIn(claim['txid'], block_207['tx']) + self.assertEqual(208, claim['height']) + + # reorg the last block dropping our claim tx + await self.blockchain.invalidate_block(invalidated_block_hash) + await self.conductor.clear_mempool() + await self.blockchain.generate(2) + + # wait for the client to catch up and verify the reorg + await asyncio.wait_for(self.on_header(209), 30.0) + await self.assertBlockHash(207) + await self.assertBlockHash(208) + await self.assertBlockHash(209) + + # verify the claim was dropped from block 208 as returned by lbrycrdd + reorg_block_hash = await self.blockchain.get_block_hash(208) + self.assertNotEqual(invalidated_block_hash, reorg_block_hash) + block_207 = await self.blockchain.get_block(reorg_block_hash) + self.assertNotIn(claim['txid'], block_207['tx']) + + client_reorg_block_hash = (await self.ledger.headers.hash(208)).decode() + self.assertEqual(client_reorg_block_hash, reorg_block_hash) + + # verify the dropped claim is no longer returned by claim search + self.assertDictEqual( + {'error': {'name': 'NOT_FOUND', 'text': 'Could not find claim at "hovercraft".'}}, + await self.resolve('hovercraft') + ) + + # verify the claim published a block earlier wasn't also reverted + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + + # broadcast the claim in a different block + new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode()) + self.assertEqual(broadcast_tx.id, new_txid) + await self.blockchain.generate(1) + + # wait for the client to catch up + await asyncio.wait_for(self.on_header(210), 1.0) + + # verify the claim is in the new block and that it is returned by claim_search + republished = await self.resolve('hovercraft') + self.assertEqual(210, republished['height']) + self.assertEqual(claim['claim_id'], republished['claim_id']) + + # this should still be unchanged + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + + +def generate_signed_legacy(address: bytes, output: Output): + decoded_address = Base58.decode(address) + claim = OldClaimMessage() + claim.ParseFromString(unhexlify( + '080110011aee04080112a604080410011a2b4865726520617265203520526561736f6e73204920e29da4e' + 'fb88f204e657874636c6f7564207c20544c4722920346696e64206f7574206d6f72652061626f7574204e' + '657874636c6f75643a2068747470733a2f2f6e657874636c6f75642e636f6d2f0a0a596f752063616e206' + '6696e64206d65206f6e20746865736520736f6369616c733a0a202a20466f72756d733a2068747470733a' + '2f2f666f72756d2e6865617679656c656d656e742e696f2f0a202a20506f64636173743a2068747470733' + 'a2f2f6f6666746f706963616c2e6e65740a202a2050617472656f6e3a2068747470733a2f2f7061747265' + '6f6e2e636f6d2f7468656c696e757867616d65720a202a204d657263683a2068747470733a2f2f7465657' + '37072696e672e636f6d2f73746f7265732f6f6666696369616c2d6c696e75782d67616d65720a202a2054' + '77697463683a2068747470733a2f2f7477697463682e74762f786f6e64616b0a202a20547769747465723' + 'a2068747470733a2f2f747769747465722e636f6d2f7468656c696e757867616d65720a0a2e2e2e0a6874' + '7470733a2f2f7777772e796f75747562652e636f6d2f77617463683f763d4672546442434f535f66632a0' + 'f546865204c696e75782047616d6572321c436f7079726967687465642028636f6e746163742061757468' + '6f722938004a2968747470733a2f2f6265726b2e6e696e6a612f7468756d626e61696c732f46725464424' + '34f535f666352005a001a41080110011a30040e8ac6e89c061f982528c23ad33829fd7146435bf7a4cc22' + 'f0bff70c4fe0b91fd36da9a375e3e1c171db825bf5d1f32209766964656f2f6d70342a5c080110031a406' + '2b2dd4c45e364030fbfad1a6fefff695ebf20ea33a5381b947753e2a0ca359989a5cc7d15e5392a0d354c' + '0b68498382b2701b22c03beb8dcb91089031b871e72214feb61536c007cdf4faeeaab4876cb397feaf6b51' + )) + claim.ClearField("publisherSignature") + digest = sha256(b''.join([ + decoded_address, + claim.SerializeToString(), + output.claim_hash[::-1] + ])) + signature = output.private_key.sign_compact(digest) + claim.publisherSignature.version = 1 + claim.publisherSignature.signatureType = 1 + claim.publisherSignature.signature = signature + claim.publisherSignature.certificateId = output.claim_hash[::-1] + return claim diff --git a/tests/test_revertable.py b/tests/test_revertable.py new file mode 100644 index 0000000..37cbc59 --- /dev/null +++ b/tests/test_revertable.py @@ -0,0 +1,237 @@ +import unittest +import tempfile +import shutil +from scribe.db.revertable import RevertableOpStack, RevertableDelete, RevertablePut, OpStackIntegrity +from scribe.db.prefixes import ClaimToTXOPrefixRow, PrefixDB + + +class TestRevertableOpStack(unittest.TestCase): + def setUp(self): + self.fake_db = {} + self.stack = RevertableOpStack(self.fake_db.get) + + def tearDown(self) -> None: + self.stack.clear() + self.fake_db.clear() + + def process_stack(self): + for op in self.stack: + if op.is_put: + self.fake_db[op.key] = op.value + else: + self.fake_db.pop(op.key) + self.stack.clear() + + def update(self, key1: bytes, value1: bytes, key2: bytes, value2: bytes): + self.stack.append_op(RevertableDelete(key1, value1)) + self.stack.append_op(RevertablePut(key2, value2)) + + def test_simplify(self): + key1 = ClaimToTXOPrefixRow.pack_key(b'\x01' * 20) + key2 = ClaimToTXOPrefixRow.pack_key(b'\x02' * 20) + key3 = ClaimToTXOPrefixRow.pack_key(b'\x03' * 20) + key4 = ClaimToTXOPrefixRow.pack_key(b'\x04' * 20) + + val1 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'derp') + val2 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'oops') + val3 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'other') + + # check that we can't delete a non existent value + with self.assertRaises(OpStackIntegrity): + self.stack.append_op(RevertableDelete(key1, val1)) + + self.stack.append_op(RevertablePut(key1, val1)) + self.assertEqual(1, len(self.stack)) + self.stack.append_op(RevertableDelete(key1, val1)) + self.assertEqual(0, len(self.stack)) + + self.stack.append_op(RevertablePut(key1, val1)) + self.assertEqual(1, len(self.stack)) + # try to delete the wrong value + with self.assertRaises(OpStackIntegrity): + self.stack.append_op(RevertableDelete(key2, val2)) + + self.stack.append_op(RevertableDelete(key1, val1)) + self.assertEqual(0, len(self.stack)) + self.stack.append_op(RevertablePut(key2, val3)) + self.assertEqual(1, len(self.stack)) + + self.process_stack() + + self.assertDictEqual({key2: val3}, self.fake_db) + + # check that we can't put on top of the existing stored value + with self.assertRaises(OpStackIntegrity): + self.stack.append_op(RevertablePut(key2, val1)) + + self.assertEqual(0, len(self.stack)) + self.stack.append_op(RevertableDelete(key2, val3)) + self.assertEqual(1, len(self.stack)) + self.stack.append_op(RevertablePut(key2, val3)) + self.assertEqual(0, len(self.stack)) + + self.update(key2, val3, key2, val1) + self.assertEqual(2, len(self.stack)) + + self.process_stack() + self.assertDictEqual({key2: val1}, self.fake_db) + + self.update(key2, val1, key2, val2) + self.assertEqual(2, len(self.stack)) + self.update(key2, val2, key2, val3) + self.update(key2, val3, key2, val2) + self.update(key2, val2, key2, val3) + self.update(key2, val3, key2, val2) + with self.assertRaises(OpStackIntegrity): + self.update(key2, val3, key2, val2) + self.update(key2, val2, key2, val3) + self.assertEqual(2, len(self.stack)) + self.stack.append_op(RevertableDelete(key2, val3)) + self.process_stack() + self.assertDictEqual({}, self.fake_db) + + self.stack.append_op(RevertablePut(key2, val3)) + self.process_stack() + with self.assertRaises(OpStackIntegrity): + self.update(key2, val2, key2, val2) + self.update(key2, val3, key2, val2) + self.assertDictEqual({key2: val3}, self.fake_db) + undo = self.stack.get_undo_ops() + self.process_stack() + self.assertDictEqual({key2: val2}, self.fake_db) + self.stack.apply_packed_undo_ops(undo) + self.process_stack() + self.assertDictEqual({key2: val3}, self.fake_db) + + +class TestRevertablePrefixDB(unittest.TestCase): + def setUp(self): + self.tmp_dir = tempfile.mkdtemp() + self.db = PrefixDB(self.tmp_dir, cache_mb=1, max_open_files=32) + + def tearDown(self) -> None: + self.db.close() + shutil.rmtree(self.tmp_dir) + + def test_rollback(self): + name = 'derp' + claim_hash1 = 20 * b'\x00' + claim_hash2 = 20 * b'\x01' + claim_hash3 = 20 * b'\x02' + + takeover_height = 10000000 + + self.assertIsNone(self.db.claim_takeover.get(name)) + self.db.claim_takeover.stage_put((name,), (claim_hash1, takeover_height)) + self.assertIsNone(self.db.claim_takeover.get(name)) + self.assertEqual(10000000, self.db.claim_takeover.get_pending(name).height) + + self.db.commit(10000000, b'\x00' * 32) + self.assertEqual(10000000, self.db.claim_takeover.get(name).height) + + self.db.claim_takeover.stage_delete((name,), (claim_hash1, takeover_height)) + self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 1)) + self.db.claim_takeover.stage_delete((name,), (claim_hash2, takeover_height + 1)) + self.db.commit(10000001, b'\x01' * 32) + self.assertIsNone(self.db.claim_takeover.get(name)) + self.db.claim_takeover.stage_put((name,), (claim_hash3, takeover_height + 2)) + self.db.commit(10000002, b'\x02' * 32) + self.assertEqual(10000002, self.db.claim_takeover.get(name).height) + + self.db.claim_takeover.stage_delete((name,), (claim_hash3, takeover_height + 2)) + self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 3)) + self.db.commit(10000003, b'\x03' * 32) + self.assertEqual(10000003, self.db.claim_takeover.get(name).height) + + self.db.rollback(10000003, b'\x03' * 32) + self.assertEqual(10000002, self.db.claim_takeover.get(name).height) + self.db.rollback(10000002, b'\x02' * 32) + self.assertIsNone(self.db.claim_takeover.get(name)) + self.db.rollback(10000001, b'\x01' * 32) + self.assertEqual(10000000, self.db.claim_takeover.get(name).height) + self.db.rollback(10000000, b'\x00' * 32) + self.assertIsNone(self.db.claim_takeover.get(name)) + + def test_hub_db_iterator(self): + name = 'derp' + claim_hash0 = 20 * b'\x00' + claim_hash1 = 20 * b'\x01' + claim_hash2 = 20 * b'\x02' + claim_hash3 = 20 * b'\x03' + overflow_value = 0xffffffff + self.db.claim_expiration.stage_put((99, 999, 0), (claim_hash0, name)) + self.db.claim_expiration.stage_put((100, 1000, 0), (claim_hash1, name)) + self.db.claim_expiration.stage_put((100, 1001, 0), (claim_hash2, name)) + self.db.claim_expiration.stage_put((101, 1002, 0), (claim_hash3, name)) + self.db.claim_expiration.stage_put((overflow_value - 1, 1003, 0), (claim_hash3, name)) + self.db.claim_expiration.stage_put((overflow_value, 1004, 0), (claim_hash3, name)) + self.db.tx_num.stage_put((b'\x00' * 32,), (101,)) + self.db.claim_takeover.stage_put((name,), (claim_hash3, 101)) + self.db.db_state.stage_put((), (b'n?\xcf\x12\x99\xd4\xec]y\xc3\xa4\xc9\x1dbJJ\xcf\x9e.\x17=\x95\xa1\xa0POgvihuV', 0, 1, b'VuhivgOP\xa0\xa1\x95=\x17.\x9e\xcfJJb\x1d\xc9\xa4\xc3y]\xec\xd4\x99\x12\xcf?n', 1, 0, 1, 7, 1, -1, -1, 0)) + self.db.unsafe_commit() + + state = self.db.db_state.get() + self.assertEqual(b'n?\xcf\x12\x99\xd4\xec]y\xc3\xa4\xc9\x1dbJJ\xcf\x9e.\x17=\x95\xa1\xa0POgvihuV', state.genesis) + + self.assertListEqual( + [], list(self.db.claim_expiration.iterate(prefix=(98,))) + ) + self.assertListEqual( + list(self.db.claim_expiration.iterate(start=(98,), stop=(99,))), + list(self.db.claim_expiration.iterate(prefix=(98,))) + ) + self.assertListEqual( + list(self.db.claim_expiration.iterate(start=(99,), stop=(100,))), + list(self.db.claim_expiration.iterate(prefix=(99,))) + ) + self.assertListEqual( + [ + ((99, 999, 0), (claim_hash0, name)), + ], list(self.db.claim_expiration.iterate(prefix=(99,))) + ) + self.assertListEqual( + [ + ((100, 1000, 0), (claim_hash1, name)), + ((100, 1001, 0), (claim_hash2, name)) + ], list(self.db.claim_expiration.iterate(prefix=(100,))) + ) + self.assertListEqual( + list(self.db.claim_expiration.iterate(start=(100,), stop=(101,))), + list(self.db.claim_expiration.iterate(prefix=(100,))) + ) + self.assertListEqual( + [ + ((overflow_value - 1, 1003, 0), (claim_hash3, name)) + ], list(self.db.claim_expiration.iterate(prefix=(overflow_value - 1,))) + ) + self.assertListEqual( + [ + ((overflow_value, 1004, 0), (claim_hash3, name)) + ], list(self.db.claim_expiration.iterate(prefix=(overflow_value,))) + ) + + def test_hub_db_iterator_start_stop(self): + tx_num = 101 + for x in range(255): + claim_hash = 20 * chr(x).encode() + self.db.active_amount.stage_put((claim_hash, 1, 200, tx_num, 1), (100000,)) + self.db.active_amount.stage_put((claim_hash, 1, 201, tx_num + 1, 1), (200000,)) + self.db.active_amount.stage_put((claim_hash, 1, 202, tx_num + 2, 1), (300000,)) + tx_num += 3 + self.db.unsafe_commit() + + def get_active_amount_as_of_height(claim_hash: bytes, height: int) -> int: + for v in self.db.active_amount.iterate( + start=(claim_hash, 1, 0), stop=(claim_hash, 1, height + 1), + include_key=False, reverse=True): + return v.amount + return 0 + + for x in range(255): + claim_hash = 20 * chr(x).encode() + self.assertEqual(300000, get_active_amount_as_of_height(claim_hash, 300)) + self.assertEqual(300000, get_active_amount_as_of_height(claim_hash, 203)) + self.assertEqual(300000, get_active_amount_as_of_height(claim_hash, 202)) + self.assertEqual(200000, get_active_amount_as_of_height(claim_hash, 201)) + self.assertEqual(100000, get_active_amount_as_of_height(claim_hash, 200)) + self.assertEqual(0, get_active_amount_as_of_height(claim_hash, 199)) diff --git a/tests/testcase.py b/tests/testcase.py new file mode 100644 index 0000000..f9d3865 --- /dev/null +++ b/tests/testcase.py @@ -0,0 +1,748 @@ +import os +import sys +import json +import shutil +import logging +import tempfile +import functools +import asyncio +from asyncio.runners import _cancel_all_tasks # type: ignore +import unittest +from unittest.case import _Outcome +from typing import Optional +from time import time, perf_counter +from binascii import unhexlify +from functools import partial + +from lbry.wallet import WalletManager, Wallet, Ledger, Account, Transaction +from lbry.conf import Config +from lbry.wallet.util import satoshis_to_coins +from lbry.wallet.dewies import lbc_to_dewies +from lbry.wallet.orchstr8 import Conductor +from lbry.wallet.orchstr8.node import LBCWalletNode, WalletNode, HubNode +from scribe.schema.claim import Claim + +from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty +from lbry.extras.daemon.components import Component, WalletComponent +from lbry.extras.daemon.components import ( + DHT_COMPONENT, + HASH_ANNOUNCER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT, + UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, LIBTORRENT_COMPONENT +) +from lbry.extras.daemon.componentmanager import ComponentManager +from lbry.extras.daemon.exchange_rate_manager import ( + ExchangeRateManager, ExchangeRate, BittrexBTCFeed, BittrexUSDFeed +) +from lbry.extras.daemon.storage import SQLiteStorage +from lbry.blob.blob_manager import BlobManager +from lbry.stream.reflector.server import ReflectorServer +from lbry.blob_exchange.server import BlobServer + + +class ColorHandler(logging.StreamHandler): + + level_color = { + logging.DEBUG: "black", + logging.INFO: "light_gray", + logging.WARNING: "yellow", + logging.ERROR: "red" + } + + color_code = dict( + black=30, + red=31, + green=32, + yellow=33, + blue=34, + magenta=35, + cyan=36, + white=37, + light_gray='0;37', + dark_gray='1;30' + ) + + def emit(self, record): + try: + msg = self.format(record) + color_name = self.level_color.get(record.levelno, "black") + color_code = self.color_code[color_name] + stream = self.stream + stream.write(f'\x1b[{color_code}m{msg}\x1b[0m') + stream.write(self.terminator) + self.flush() + except Exception: + self.handleError(record) + + +HANDLER = ColorHandler(sys.stdout) +HANDLER.setFormatter( + logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +) +logging.getLogger().addHandler(HANDLER) + + +class AsyncioTestCase(unittest.TestCase): + # Implementation inspired by discussion: + # https://bugs.python.org/issue32972 + + LOOP_SLOW_CALLBACK_DURATION = 0.2 + TIMEOUT = 120.0 + + maxDiff = None + + async def asyncSetUp(self): # pylint: disable=C0103 + pass + + async def asyncTearDown(self): # pylint: disable=C0103 + pass + + def run(self, result=None): # pylint: disable=R0915 + orig_result = result + if result is None: + result = self.defaultTestResult() + startTestRun = getattr(result, 'startTestRun', None) # pylint: disable=C0103 + if startTestRun is not None: + startTestRun() + + result.startTest(self) + + testMethod = getattr(self, self._testMethodName) # pylint: disable=C0103 + if (getattr(self.__class__, "__unittest_skip__", False) or + getattr(testMethod, "__unittest_skip__", False)): + # If the class or method was skipped. + try: + skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') + or getattr(testMethod, '__unittest_skip_why__', '')) + self._addSkip(result, self, skip_why) + finally: + result.stopTest(self) + return + expecting_failure_method = getattr(testMethod, + "__unittest_expecting_failure__", False) + expecting_failure_class = getattr(self, + "__unittest_expecting_failure__", False) + expecting_failure = expecting_failure_class or expecting_failure_method + outcome = _Outcome(result) + + self.loop = asyncio.new_event_loop() # pylint: disable=W0201 + asyncio.set_event_loop(self.loop) + self.loop.set_debug(True) + self.loop.slow_callback_duration = self.LOOP_SLOW_CALLBACK_DURATION + + try: + self._outcome = outcome + + with outcome.testPartExecutor(self): + self.setUp() + self.add_timeout() + self.loop.run_until_complete(self.asyncSetUp()) + if outcome.success: + outcome.expecting_failure = expecting_failure + with outcome.testPartExecutor(self, isTest=True): + maybe_coroutine = testMethod() + if asyncio.iscoroutine(maybe_coroutine): + self.add_timeout() + self.loop.run_until_complete(maybe_coroutine) + outcome.expecting_failure = False + with outcome.testPartExecutor(self): + self.add_timeout() + self.loop.run_until_complete(self.asyncTearDown()) + self.tearDown() + + self.doAsyncCleanups() + + try: + _cancel_all_tasks(self.loop) + self.loop.run_until_complete(self.loop.shutdown_asyncgens()) + finally: + asyncio.set_event_loop(None) + self.loop.close() + + for test, reason in outcome.skipped: + self._addSkip(result, test, reason) + self._feedErrorsToResult(result, outcome.errors) + if outcome.success: + if expecting_failure: + if outcome.expectedFailure: + self._addExpectedFailure(result, outcome.expectedFailure) + else: + self._addUnexpectedSuccess(result) + else: + result.addSuccess(self) + return result + finally: + result.stopTest(self) + if orig_result is None: + stopTestRun = getattr(result, 'stopTestRun', None) # pylint: disable=C0103 + if stopTestRun is not None: + stopTestRun() # pylint: disable=E1102 + + # explicitly break reference cycles: + # outcome.errors -> frame -> outcome -> outcome.errors + # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure + outcome.errors.clear() + outcome.expectedFailure = None + + # clear the outcome, no more needed + self._outcome = None + + def doAsyncCleanups(self): # pylint: disable=C0103 + outcome = self._outcome or _Outcome() + while self._cleanups: + function, args, kwargs = self._cleanups.pop() + with outcome.testPartExecutor(self): + maybe_coroutine = function(*args, **kwargs) + if asyncio.iscoroutine(maybe_coroutine): + self.add_timeout() + self.loop.run_until_complete(maybe_coroutine) + + def cancel(self): + for task in asyncio.all_tasks(self.loop): + if not task.done(): + task.print_stack() + task.cancel() + + def add_timeout(self): + if self.TIMEOUT: + self.loop.call_later(self.TIMEOUT, self.cancel) + + +class AdvanceTimeTestCase(AsyncioTestCase): + + async def asyncSetUp(self): + self._time = 0 # pylint: disable=W0201 + self.loop.time = functools.wraps(self.loop.time)(lambda: self._time) + await super().asyncSetUp() + + async def advance(self, seconds): + while self.loop._ready: + await asyncio.sleep(0) + self._time += seconds + await asyncio.sleep(0) + while self.loop._ready: + await asyncio.sleep(0) + + +class IntegrationTestCase(AsyncioTestCase): + + SEED = None + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.conductor: Optional[Conductor] = None + self.blockchain: Optional[LBCWalletNode] = None + self.hub: Optional[HubNode] = None + self.wallet_node: Optional[WalletNode] = None + self.manager: Optional[WalletManager] = None + self.ledger: Optional[Ledger] = None + self.wallet: Optional[Wallet] = None + self.account: Optional[Account] = None + + async def asyncSetUp(self): + from time import perf_counter + start = perf_counter() + self.conductor = Conductor(seed=self.SEED) + + await self.conductor.start_lbcd() + self.addCleanup(self.conductor.stop_lbcd) + print(f"{perf_counter() - start}s to start lbcd") + start = perf_counter() + await self.conductor.start_lbcwallet() + self.addCleanup(self.conductor.stop_lbcwallet) + print(f"{perf_counter() - start}s to start lbcwallet") + start = perf_counter() + await self.conductor.start_spv() + self.addCleanup(self.conductor.stop_spv) + print(f"{perf_counter() - start}s to start spv") + start = perf_counter() + await self.conductor.start_wallet() + self.addCleanup(self.conductor.stop_wallet) + print(f"{perf_counter() - start}s to start wallet") + start = perf_counter() + await self.conductor.start_hub() + self.addCleanup(self.conductor.stop_hub) + print(f"{perf_counter() - start}s to start go hub") + + self.blockchain = self.conductor.lbcwallet_node + self.hub = self.conductor.hub_node + self.wallet_node = self.conductor.wallet_node + self.manager = self.wallet_node.manager + self.ledger = self.wallet_node.ledger + self.wallet = self.wallet_node.wallet + self.account = self.wallet_node.wallet.default_account + + async def assertBalance(self, account, expected_balance: str): # pylint: disable=C0103 + balance = await account.get_balance() + self.assertEqual(satoshis_to_coins(balance), expected_balance) + + def broadcast(self, tx): + return self.ledger.broadcast(tx) + + async def broadcast_and_confirm(self, tx, ledger=None): + ledger = ledger or self.ledger + notifications = asyncio.create_task(ledger.wait(tx)) + await ledger.broadcast(tx) + await notifications + await self.generate_and_wait(1, [tx.id], ledger) + + async def on_header(self, height): + if self.ledger.headers.height < height: + await self.ledger.on_header.where( + lambda e: e.height == height + ) + return True + + async def send_to_address_and_wait(self, address, amount, blocks_to_generate=0, ledger=None): + tx_watch = [] + txid = None + done = False + watcher = (ledger or self.ledger).on_transaction.where( + lambda e: e.tx.id == txid or done or tx_watch.append(e.tx.id) + ) + + txid = await self.blockchain.send_to_address(address, amount) + done = txid in tx_watch + await watcher + + await self.generate_and_wait(blocks_to_generate, [txid], ledger) + return txid + + async def generate_and_wait(self, blocks_to_generate, txids, ledger=None): + if blocks_to_generate > 0: + watcher = (ledger or self.ledger).on_transaction.where( + lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda + ) + self.conductor.spv_node.server.synchronized.clear() + await self.blockchain.generate(blocks_to_generate) + height = self.blockchain.block_expected + await watcher + while True: + await self.conductor.spv_node.server.synchronized.wait() + self.conductor.spv_node.server.synchronized.clear() + if self.conductor.spv_node.server.db.db_height >= height: + break + + def on_address_update(self, address): + return self.ledger.on_transaction.where( + lambda e: e.address == address + ) + + def on_transaction_address(self, tx, address): + return self.ledger.on_transaction.where( + lambda e: e.tx.id == tx.id and e.address == address + ) + + async def generate(self, blocks): + """ Ask lbrycrd to generate some blocks and wait until ledger has them. """ + prepare = self.ledger.on_header.where(self.blockchain.is_expected_block) + height = self.blockchain.block_expected + self.conductor.spv_node.server.synchronized.clear() + await self.blockchain.generate(blocks) + await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate + while True: + await self.conductor.spv_node.server.synchronized.wait() + self.conductor.spv_node.server.synchronized.clear() + if self.conductor.spv_node.server.db.db_height >= height: + break + + +class FakeExchangeRateManager(ExchangeRateManager): + + def __init__(self, market_feeds, rates): # pylint: disable=super-init-not-called + self.market_feeds = market_feeds + for feed in self.market_feeds: + feed.last_check = time() + feed.rate = ExchangeRate(feed.market, rates[feed.market], time()) + + def start(self): + pass + + def stop(self): + pass + + +def get_fake_exchange_rate_manager(rates=None): + return FakeExchangeRateManager( + [BittrexBTCFeed(), BittrexUSDFeed()], + rates or {'BTCLBC': 3.0, 'USDLBC': 2.0} + ) + + +class ExchangeRateManagerComponent(Component): + component_name = EXCHANGE_RATE_MANAGER_COMPONENT + + def __init__(self, component_manager, rates=None): + super().__init__(component_manager) + self.exchange_rate_manager = get_fake_exchange_rate_manager(rates) + + @property + def component(self) -> ExchangeRateManager: + return self.exchange_rate_manager + + async def start(self): + self.exchange_rate_manager.start() + + async def stop(self): + self.exchange_rate_manager.stop() + + +class CommandTestCase(IntegrationTestCase): + + VERBOSITY = logging.WARN + blob_lru_cache_size = 0 + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.daemon = None + self.daemons = [] + self.server_config = None + self.server_storage = None + self.extra_wallet_nodes = [] + self.extra_wallet_node_port = 5280 + self.server_blob_manager = None + self.server = None + self.reflector = None + self.skip_libtorrent = True + + async def asyncSetUp(self): + start = perf_counter() + + logging.getLogger('lbry.blob_exchange').setLevel(self.VERBOSITY) + logging.getLogger('lbry.daemon').setLevel(self.VERBOSITY) + logging.getLogger('lbry.stream').setLevel(self.VERBOSITY) + logging.getLogger('lbry.wallet').setLevel(self.VERBOSITY) + + await super().asyncSetUp() + print("first setup", perf_counter() - start) + start = perf_counter() + + self.daemon = await self.add_daemon(self.wallet_node) + + await self.account.ensure_address_gap() + address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0] + await self.send_to_address_and_wait(address, 10, 6) + print("sent to address and waited", perf_counter() - start) + + server_tmp_dir = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, server_tmp_dir) + self.server_config = Config( + data_dir=server_tmp_dir, + wallet_dir=server_tmp_dir, + save_files=True, + download_dir=server_tmp_dir + ) + self.server_config.transaction_cache_size = 10000 + self.server_storage = SQLiteStorage(self.server_config, ':memory:') + await self.server_storage.open() + + self.server_blob_manager = BlobManager(self.loop, server_tmp_dir, self.server_storage, self.server_config) + self.server = BlobServer(self.loop, self.server_blob_manager, 'bQEaw42GXsgCAGio1nxFncJSyRmnztSCjP') + self.server.start_server(5567, '127.0.0.1') + await self.server.started_listening.wait() + + self.reflector = ReflectorServer(self.server_blob_manager) + self.reflector.start_server(5566, '127.0.0.1') + await self.reflector.started_listening.wait() + self.addCleanup(self.reflector.stop_server) + + async def asyncTearDown(self): + await super().asyncTearDown() + for wallet_node in self.extra_wallet_nodes: + await wallet_node.stop(cleanup=True) + for daemon in self.daemons: + daemon.component_manager.get_component('wallet')._running = False + await daemon.stop() + + async def add_daemon(self, wallet_node=None, seed=None): + start_wallet_node = False + if wallet_node is None: + wallet_node = WalletNode( + self.wallet_node.manager_class, + self.wallet_node.ledger_class, + port=self.extra_wallet_node_port + ) + self.extra_wallet_node_port += 1 + start_wallet_node = True + + upload_dir = os.path.join(wallet_node.data_path, 'uploads') + os.mkdir(upload_dir) + + conf = Config( + # needed during instantiation to access known_hubs path + data_dir=wallet_node.data_path, + wallet_dir=wallet_node.data_path, + save_files=True, + download_dir=wallet_node.data_path + ) + conf.upload_dir = upload_dir # not a real conf setting + conf.share_usage_data = False + conf.use_upnp = False + conf.reflect_streams = True + conf.blockchain_name = 'lbrycrd_regtest' + conf.lbryum_servers = [(self.conductor.spv_node.hostname, self.conductor.spv_node.port)] + conf.reflector_servers = [('127.0.0.1', 5566)] + conf.fixed_peers = [('127.0.0.1', 5567)] + conf.known_dht_nodes = [] + conf.blob_lru_cache_size = self.blob_lru_cache_size + conf.transaction_cache_size = 10000 + conf.components_to_skip = [ + DHT_COMPONENT, UPNP_COMPONENT, HASH_ANNOUNCER_COMPONENT, + PEER_PROTOCOL_SERVER_COMPONENT + ] + if self.skip_libtorrent: + conf.components_to_skip.append(LIBTORRENT_COMPONENT) + + if start_wallet_node: + await wallet_node.start(self.conductor.spv_node, seed=seed, config=conf) + self.extra_wallet_nodes.append(wallet_node) + else: + wallet_node.manager.config = conf + wallet_node.manager.ledger.config['known_hubs'] = conf.known_hubs + + def wallet_maker(component_manager): + wallet_component = WalletComponent(component_manager) + wallet_component.wallet_manager = wallet_node.manager + wallet_component._running = True + return wallet_component + + daemon = Daemon(conf, ComponentManager( + conf, skip_components=conf.components_to_skip, wallet=wallet_maker, + exchange_rate_manager=partial(ExchangeRateManagerComponent, rates={ + 'BTCLBC': 1.0, 'USDLBC': 2.0 + }) + )) + await daemon.initialize() + self.daemons.append(daemon) + wallet_node.manager.old_db = daemon.storage + return daemon + + async def confirm_tx(self, txid, ledger=None): + """ Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """ + # await (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid) + on_tx = (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid) + await asyncio.wait([self.generate(1), on_tx], timeout=5) + + # # actually, if it's in the mempool or in the block we're fine + # await self.generate_and_wait(1, [txid], ledger=ledger) + # return txid + + return txid + + async def on_transaction_dict(self, tx): + await self.ledger.wait(Transaction(unhexlify(tx['hex']))) + + @staticmethod + def get_all_addresses(tx): + addresses = set() + for txi in tx['inputs']: + addresses.add(txi['address']) + for txo in tx['outputs']: + addresses.add(txo['address']) + return list(addresses) + + async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True): + txid = await self.blockchain._cli_cmnd('claimname', name, value, amount) + if confirm: + await self.generate(1) + return txid + + async def blockchain_update_name(self, txid: str, value: str, amount: str, confirm=True): + txid = await self.blockchain._cli_cmnd('updateclaim', txid, value, amount) + if confirm: + await self.generate(1) + return txid + + async def out(self, awaitable): + """ Serializes lbrynet API results to JSON then loads and returns it as dictionary. """ + return json.loads(jsonrpc_dumps_pretty(await awaitable, ledger=self.ledger))['result'] + + def sout(self, value): + """ Synchronous version of `out` method. """ + return json.loads(jsonrpc_dumps_pretty(value, ledger=self.ledger))['result'] + + async def confirm_and_render(self, awaitable, confirm, return_tx=False) -> Transaction: + tx = await awaitable + if confirm: + await self.ledger.wait(tx) + await self.generate(1) + await self.ledger.wait(tx, self.blockchain.block_expected) + if not return_tx: + return self.sout(tx) + return tx + + async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None, blocking=False): + account = (daemon or self.daemon).wallet_manager.default_account + claim_address = await account.receiving.get_or_create_usable_address() + claim = Claim() + claim.channel.public_key_bytes = pubkey_bytes + tx = await Transaction.claim_create( + name, claim, lbc_to_dewies(price), + claim_address, [self.account], self.account + ) + await tx.sign([self.account]) + await (daemon or self.daemon).broadcast_or_release(tx, blocking) + return self.sout(tx) + + def create_upload_file(self, data, prefix=None, suffix=None): + file_path = tempfile.mktemp(prefix=prefix or "tmp", suffix=suffix or "", dir=self.daemon.conf.upload_dir) + with open(file_path, 'w+b') as file: + file.write(data) + file.flush() + return file.name + + async def stream_create( + self, name='hovercraft', bid='1.0', file_path=None, + data=b'hi!', confirm=True, prefix=None, suffix=None, return_tx=False, **kwargs): + if file_path is None and data is not None: + file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix) + return await self.confirm_and_render( + self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm, return_tx + ) + + async def stream_update( + self, claim_id, data=None, prefix=None, suffix=None, confirm=True, return_tx=False, **kwargs): + if data is not None: + file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix) + return await self.confirm_and_render( + self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm, return_tx + ) + return await self.confirm_and_render( + self.daemon.jsonrpc_stream_update(claim_id, **kwargs), confirm + ) + + async def stream_repost(self, claim_id, name='repost', bid='1.0', confirm=True, **kwargs): + return await self.confirm_and_render( + self.daemon.jsonrpc_stream_repost(claim_id=claim_id, name=name, bid=bid, **kwargs), confirm + ) + + async def stream_abandon(self, *args, confirm=True, **kwargs): + if 'blocking' not in kwargs: + kwargs['blocking'] = False + return await self.confirm_and_render( + self.daemon.jsonrpc_stream_abandon(*args, **kwargs), confirm + ) + + async def purchase_create(self, *args, confirm=True, **kwargs): + return await self.confirm_and_render( + self.daemon.jsonrpc_purchase_create(*args, **kwargs), confirm + ) + + async def publish(self, name, *args, confirm=True, **kwargs): + return await self.confirm_and_render( + self.daemon.jsonrpc_publish(name, *args, **kwargs), confirm + ) + + async def channel_create(self, name='@arena', bid='1.0', confirm=True, **kwargs): + return await self.confirm_and_render( + self.daemon.jsonrpc_channel_create(name, bid, **kwargs), confirm + ) + + async def channel_update(self, claim_id, confirm=True, **kwargs): + return await self.confirm_and_render( + self.daemon.jsonrpc_channel_update(claim_id, **kwargs), confirm + ) + + async def channel_abandon(self, *args, confirm=True, **kwargs): + if 'blocking' not in kwargs: + kwargs['blocking'] = False + return await self.confirm_and_render( + self.daemon.jsonrpc_channel_abandon(*args, **kwargs), confirm + ) + + async def collection_create( + self, name='firstcollection', bid='1.0', confirm=True, **kwargs): + return await self.confirm_and_render( + self.daemon.jsonrpc_collection_create(name, bid, **kwargs), confirm + ) + + async def collection_update( + self, claim_id, confirm=True, **kwargs): + return await self.confirm_and_render( + self.daemon.jsonrpc_collection_update(claim_id, **kwargs), confirm + ) + + async def collection_abandon(self, *args, confirm=True, **kwargs): + if 'blocking' not in kwargs: + kwargs['blocking'] = False + return await self.confirm_and_render( + self.daemon.jsonrpc_stream_abandon(*args, **kwargs), confirm + ) + + async def support_create(self, claim_id, bid='1.0', confirm=True, **kwargs): + return await self.confirm_and_render( + self.daemon.jsonrpc_support_create(claim_id, bid, **kwargs), confirm + ) + + async def support_abandon(self, *args, confirm=True, **kwargs): + if 'blocking' not in kwargs: + kwargs['blocking'] = False + return await self.confirm_and_render( + self.daemon.jsonrpc_support_abandon(*args, **kwargs), confirm + ) + + async def account_send(self, *args, confirm=True, **kwargs): + return await self.confirm_and_render( + self.daemon.jsonrpc_account_send(*args, **kwargs), confirm + ) + + async def wallet_send(self, *args, confirm=True, **kwargs): + return await self.confirm_and_render( + self.daemon.jsonrpc_wallet_send(*args, **kwargs), confirm + ) + + async def txo_spend(self, *args, confirm=True, **kwargs): + txs = await self.daemon.jsonrpc_txo_spend(*args, **kwargs) + if confirm: + await asyncio.wait([self.ledger.wait(tx) for tx in txs]) + await self.generate(1) + await asyncio.wait([self.ledger.wait(tx, self.blockchain.block_expected) for tx in txs]) + return self.sout(txs) + + async def blob_clean(self): + return await self.out(self.daemon.jsonrpc_blob_clean()) + + async def status(self): + return await self.out(self.daemon.jsonrpc_status()) + + async def resolve(self, uri, **kwargs): + return (await self.out(self.daemon.jsonrpc_resolve(uri, **kwargs)))[uri] + + async def claim_search(self, **kwargs): + return (await self.out(self.daemon.jsonrpc_claim_search(**kwargs)))['items'] + + async def get_claim_by_claim_id(self, claim_id): + return await self.out(self.ledger.get_claim_by_claim_id(claim_id)) + + async def file_list(self, *args, **kwargs): + return (await self.out(self.daemon.jsonrpc_file_list(*args, **kwargs)))['items'] + + async def txo_list(self, *args, **kwargs): + return (await self.out(self.daemon.jsonrpc_txo_list(*args, **kwargs)))['items'] + + async def txo_sum(self, *args, **kwargs): + return await self.out(self.daemon.jsonrpc_txo_sum(*args, **kwargs)) + + async def txo_plot(self, *args, **kwargs): + return await self.out(self.daemon.jsonrpc_txo_plot(*args, **kwargs)) + + async def claim_list(self, *args, **kwargs): + return (await self.out(self.daemon.jsonrpc_claim_list(*args, **kwargs)))['items'] + + async def stream_list(self, *args, **kwargs): + return (await self.out(self.daemon.jsonrpc_stream_list(*args, **kwargs)))['items'] + + async def channel_list(self, *args, **kwargs): + return (await self.out(self.daemon.jsonrpc_channel_list(*args, **kwargs)))['items'] + + async def transaction_list(self, *args, **kwargs): + return (await self.out(self.daemon.jsonrpc_transaction_list(*args, **kwargs)))['items'] + + async def blob_list(self, *args, **kwargs): + return (await self.out(self.daemon.jsonrpc_blob_list(*args, **kwargs)))['items'] + + @staticmethod + def get_claim_id(tx): + return tx['outputs'][0]['claim_id'] + + def assertItemCount(self, result, count): # pylint: disable=invalid-name + self.assertEqual(count, result['total_items'])