Merge branch 'autogen_docs'

* autogen_docs:
  Consolidated all 3 scripts into 1, better error messages
  ImportError checking and kill process after completion
  Review and script fixes
  Added tests for removing short args in cli
  Added scripts to autogenerate docs and api from docstring
This commit is contained in:
Alex Grintsvayg 2018-03-05 18:19:06 -05:00
commit 59d56acf09
38 changed files with 6279 additions and 2164 deletions

View file

@ -13,7 +13,7 @@ at anytime.
*
### Fixed
*
* fixed the inconsistencies in API and CLI docstrings
*
### Deprecated
@ -23,13 +23,12 @@ at anytime.
### Changed
*
*
### Added
*
* scripts to autogenerate documentation
*
### Removed
*
* short(single dashed) arguments for `lbrynet-cli`
*
@ -105,7 +104,6 @@ at anytime.
* old storage classes used by the file manager, wallet, and blob manager
* old `.db` database files from the data directory
## [0.18.0] - 2017-11-08
### Fixed
* Fixed amount of close nodes to add to list in case of extension to neighbouring k-buckets

315
docs/404.html Normal file
View file

@ -0,0 +1,315 @@
<!DOCTYPE html>
<html lang="en" class="no-js">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<meta name="lang:clipboard.copy" content="Copy to clipboard">
<meta name="lang:clipboard.copied" content="Copied to clipboard">
<meta name="lang:search.language" content="en">
<meta name="lang:search.pipeline.stopwords" content="True">
<meta name="lang:search.pipeline.trimmer" content="True">
<meta name="lang:search.result.none" content="No matching documents">
<meta name="lang:search.result.one" content="1 matching document">
<meta name="lang:search.result.other" content="# matching documents">
<meta name="lang:search.tokenizer" content="[\s\-]+">
<link rel="shortcut icon" href="/assets/images/favicon.png">
<meta name="generator" content="mkdocs-0.17.2, mkdocs-material-2.6.6">
<title>LBRY</title>
<link rel="stylesheet" href="/assets/stylesheets/application.78aab2dc.css">
<script src="/assets/javascripts/modernizr.1aa3b519.js"></script>
<link href="https://fonts.gstatic.com" rel="preconnect" crossorigin>
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,400i,700|Roboto+Mono">
<style>body,input{font-family:"Roboto","Helvetica Neue",Helvetica,Arial,sans-serif}code,kbd,pre{font-family:"Roboto Mono","Courier New",Courier,monospace}</style>
<link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons">
</head>
<body dir="ltr">
<svg class="md-svg">
<defs>
<svg xmlns="http://www.w3.org/2000/svg" width="416" height="448"
viewBox="0 0 416 448" id="github">
<path fill="currentColor" d="M160 304q0 10-3.125 20.5t-10.75 19-18.125
8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19 18.125-8.5
18.125 8.5 10.75 19 3.125 20.5zM320 304q0 10-3.125 20.5t-10.75
19-18.125 8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19
18.125-8.5 18.125 8.5 10.75 19 3.125 20.5zM360
304q0-30-17.25-51t-46.75-21q-10.25 0-48.75 5.25-17.75 2.75-39.25
2.75t-39.25-2.75q-38-5.25-48.75-5.25-29.5 0-46.75 21t-17.25 51q0 22 8
38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0
37.25-1.75t35-7.375 30.5-15 20.25-25.75 8-38.375zM416 260q0 51.75-15.25
82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5-41.75
1.125q-19.5 0-35.5-0.75t-36.875-3.125-38.125-7.5-34.25-12.875-30.25-20.25-21.5-28.75q-15.5-30.75-15.5-82.75
0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25
30.875q36.75-8.75 77.25-8.75 37 0 70 8 26.25-20.5
46.75-30.25t47.25-9.75q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34
99.5z" />
</svg>
</defs>
</svg>
<input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="drawer">
<input class="md-toggle" data-md-toggle="search" type="checkbox" id="search">
<label class="md-overlay" data-md-component="overlay" for="drawer"></label>
<header class="md-header" data-md-component="header">
<nav class="md-header-nav md-grid">
<div class="md-flex">
<div class="md-flex__cell md-flex__cell--shrink">
<a href="/" title="LBRY" class="md-header-nav__button md-logo">
<i class="md-icon"></i>
</a>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<label class="md-icon md-icon--menu md-header-nav__button" for="drawer"></label>
</div>
<div class="md-flex__cell md-flex__cell--stretch">
<div class="md-flex__ellipsis md-header-nav__title" data-md-component="title">
<span class="md-header-nav__topic">
LBRY
</span>
<span class="md-header-nav__topic">
</span>
</div>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<label class="md-icon md-icon--search md-header-nav__button" for="search"></label>
<div class="md-search" data-md-component="search" role="dialog">
<label class="md-search__overlay" for="search"></label>
<div class="md-search__inner" role="search">
<form class="md-search__form" name="search">
<input type="text" class="md-search__input" name="query" placeholder="Search" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="query" data-md-state="active">
<label class="md-icon md-search__icon" for="search"></label>
<button type="reset" class="md-icon md-search__icon" data-md-component="reset" tabindex="-1">
&#xE5CD;
</button>
</form>
<div class="md-search__output">
<div class="md-search__scrollwrap" data-md-scrollfix>
<div class="md-search-result" data-md-component="result">
<div class="md-search-result__meta">
Type to start searching
</div>
<ol class="md-search-result__list"></ol>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<div class="md-header-nav__source">
<a href="https://github.com/lbryio/lbry/" title="Go to repository" class="md-source" data-md-source="github">
<div class="md-source__icon">
<svg viewBox="0 0 24 24" width="24" height="24">
<use xlink:href="#github" width="24" height="24"></use>
</svg>
</div>
<div class="md-source__repository">
GitHub
</div>
</a>
</div>
</div>
</div>
</nav>
</header>
<div class="md-container">
<main class="md-main">
<div class="md-main__inner md-grid" data-md-component="container">
<div class="md-sidebar md-sidebar--primary" data-md-component="navigation">
<div class="md-sidebar__scrollwrap">
<div class="md-sidebar__inner">
<nav class="md-nav md-nav--primary" data-md-level="0">
<label class="md-nav__title md-nav__title--site" for="drawer">
<span class="md-nav__button md-logo">
<i class="md-icon"></i>
</span>
LBRY
</label>
<div class="md-nav__source">
<a href="https://github.com/lbryio/lbry/" title="Go to repository" class="md-source" data-md-source="github">
<div class="md-source__icon">
<svg viewBox="0 0 24 24" width="24" height="24">
<use xlink:href="#github" width="24" height="24"></use>
</svg>
</div>
<div class="md-source__repository">
GitHub
</div>
</a>
</div>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="/" title="API" class="md-nav__link">
API
</a>
</li>
<li class="md-nav__item">
<a href="/cli/" title="CLI" class="md-nav__link">
CLI
</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<div class="md-content">
<article class="md-content__inner md-typeset">
<h1>404 - Not found</h1>
</article>
</div>
</div>
</main>
<footer class="md-footer">
<div class="md-footer-meta md-typeset">
<div class="md-footer-meta__inner md-grid">
<div class="md-footer-copyright">
powered by
<a href="http://www.mkdocs.org">MkDocs</a>
and
<a href="https://squidfunk.github.io/mkdocs-material/">
Material for MkDocs</a>
</div>
<div class="md-footer-social">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<a href="https://github.com/lbryio" class="md-footer-social__link fa fa-github"></a>
<a href="https://twitter.com/lbryio" class="md-footer-social__link fa fa-twitter"></a>
<a href="https://facebook.com/lbryio" class="md-footer-social__link fa fa-facebook"></a>
</div>
</div>
</div>
</footer>
</div>
<script src="/assets/javascripts/application.02434462.js"></script>
<script>app.initialize({version:"0.17.2",url:{base:""}})</script>
<script>!function(e,a,t,n,o,c,i){e.GoogleAnalyticsObject=o,e.ga=e.ga||function(){(e.ga.q=e.ga.q||[]).push(arguments)},e.ga.l=1*new Date,c=a.createElement(t),i=a.getElementsByTagName(t)[0],c.async=1,c.src="https://www.google-analytics.com/analytics.js",i.parentNode.insertBefore(c,i)}(window,document,"script",0,"ga"),ga("create","UA-60403362-1","auto"),ga("set","anonymizeIp",!0),ga("send","pageview");var links=document.getElementsByTagName("a");if(Array.prototype.map.call(links,function(e){e.host!=document.location.host&&e.addEventListener("click",function(){var a=e.getAttribute("data-md-action")||"follow";ga("send","event","outbound",a,e.href)})}),document.forms.search){var query=document.forms.search.query;query.addEventListener("blur",function(){if(this.value){var e=document.location.pathname;ga("send","pageview",e+"?q="+this.value)}})}</script>
</body>
</html>

Binary file not shown.

After

Width:  |  Height:  |  Size: 521 B

View file

@ -0,0 +1,20 @@
<svg xmlns="http://www.w3.org/2000/svg" width="352" height="448"
viewBox="0 0 352 448" id="bitbucket">
<path fill="currentColor" d="M203.75 214.75q2 15.75-12.625 25.25t-27.875
1.5q-9.75-4.25-13.375-14.5t-0.125-20.5 13-14.5q9-4.5 18.125-3t16 8.875
6.875 16.875zM231.5 209.5q-3.5-26.75-28.25-41t-49.25-3.25q-15.75
7-25.125 22.125t-8.625 32.375q1 22.75 19.375 38.75t41.375 14q22.75-2
38-21t12.5-42zM291.25
74q-5-6.75-14-11.125t-14.5-5.5-17.75-3.125q-72.75-11.75-141.5 0.5-10.75
1.75-16.5 3t-13.75 5.5-12.5 10.75q7.5 7 19 11.375t18.375 5.5 21.875
2.875q57 7.25 112 0.25 15.75-2 22.375-3t18.125-5.375 18.75-11.625zM305.5
332.75q-2 6.5-3.875 19.125t-3.5 21-7.125 17.5-14.5 14.125q-21.5
12-47.375 17.875t-50.5 5.5-50.375-4.625q-11.5-2-20.375-4.5t-19.125-6.75-18.25-10.875-13-15.375q-6.25-24-14.25-73l1.5-4
4.5-2.25q55.75 37 126.625 37t126.875-37q5.25 1.5 6 5.75t-1.25 11.25-2
9.25zM350.75 92.5q-6.5 41.75-27.75 163.75-1.25 7.5-6.75 14t-10.875
10-13.625 7.75q-63 31.5-152.5
22-62-6.75-98.5-34.75-3.75-3-6.375-6.625t-4.25-8.75-2.25-8.5-1.5-9.875-1.375-8.75q-2.25-12.5-6.625-37.5t-7-40.375-5.875-36.875-5.5-39.5q0.75-6.5
4.375-12.125t7.875-9.375 11.25-7.5 11.5-5.625 12-4.625q31.25-11.5
78.25-16 94.75-9.25 169 12.5 38.75 11.5 53.75 30.5 4 5 4.125
12.75t-1.375 13.5z" />
</svg>

After

Width:  |  Height:  |  Size: 1.4 KiB

View file

@ -0,0 +1,18 @@
<svg xmlns="http://www.w3.org/2000/svg" width="416" height="448"
viewBox="0 0 416 448" id="github">
<path fill="currentColor" d="M160 304q0 10-3.125 20.5t-10.75 19-18.125
8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19 18.125-8.5
18.125 8.5 10.75 19 3.125 20.5zM320 304q0 10-3.125 20.5t-10.75
19-18.125 8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19
18.125-8.5 18.125 8.5 10.75 19 3.125 20.5zM360
304q0-30-17.25-51t-46.75-21q-10.25 0-48.75 5.25-17.75 2.75-39.25
2.75t-39.25-2.75q-38-5.25-48.75-5.25-29.5 0-46.75 21t-17.25 51q0 22 8
38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0
37.25-1.75t35-7.375 30.5-15 20.25-25.75 8-38.375zM416 260q0 51.75-15.25
82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5-41.75
1.125q-19.5 0-35.5-0.75t-36.875-3.125-38.125-7.5-34.25-12.875-30.25-20.25-21.5-28.75q-15.5-30.75-15.5-82.75
0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25
30.875q36.75-8.75 77.25-8.75 37 0 70 8 26.25-20.5
46.75-30.25t47.25-9.75q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34
99.5z" />
</svg>

After

Width:  |  Height:  |  Size: 1.2 KiB

View file

@ -0,0 +1,38 @@
<svg xmlns="http://www.w3.org/2000/svg" width="500" height="500"
viewBox="0 0 500 500" id="gitlab">
<g transform="translate(156.197863, 1.160267)">
<path fill="currentColor"
d="M93.667,473.347L93.667,473.347l90.684-279.097H2.983L93.667,
473.347L93.667,473.347z" />
</g>
<g transform="translate(28.531199, 1.160800)" opacity="0.7">
<path fill="currentColor"
d="M221.333,473.345L130.649,194.25H3.557L221.333,473.345L221.333,
473.345z" />
</g>
<g transform="translate(0.088533, 0.255867)" opacity="0.5">
<path fill="currentColor"
d="M32,195.155L32,195.155L4.441,279.97c-2.513,7.735,0.24,16.21,6.821,
20.99l238.514,173.29 L32,195.155L32,195.155z" />
</g>
<g transform="translate(29.421866, 280.255593)">
<path fill="currentColor"
d="M2.667-84.844h127.092L75.14-252.942c-2.811-8.649-15.047-8.649-17.856,
0L2.667-84.844 L2.667-84.844z" />
</g>
<g transform="translate(247.197860, 1.160800)" opacity="0.7">
<path fill="currentColor"
d="M2.667,473.345L93.351,194.25h127.092L2.667,473.345L2.667,
473.345z" />
</g>
<g transform="translate(246.307061, 0.255867)" opacity="0.5">
<path fill="currentColor"
d="M221.334,195.155L221.334,195.155l27.559,84.815c2.514,7.735-0.24,
16.21-6.821,20.99 L3.557,474.25L221.334,195.155L221.334,195.155z" />
</g>
<g transform="translate(336.973725, 280.255593)">
<path fill="currentColor"
d="M130.667-84.844H3.575l54.618-168.098c2.811-8.649,15.047-8.649,
17.856,0L130.667-84.844 L130.667-84.844z" />
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.6 KiB

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1 @@
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,i,n;e.da=function(){this.pipeline.reset(),this.pipeline.add(e.da.trimmer,e.da.stopWordFilter,e.da.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.da.stemmer))},e.da.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA--",e.da.trimmer=e.trimmerSupport.generateTrimmer(e.da.wordCharacters),e.Pipeline.registerFunction(e.da.trimmer,"trimmer-da"),e.da.stemmer=(r=e.stemmerSupport.Among,i=e.stemmerSupport.SnowballProgram,n=new function(){var e,n,t,s=[new r("hed",-1,1),new r("ethed",0,1),new r("ered",-1,1),new r("e",-1,1),new r("erede",3,1),new r("ende",3,1),new r("erende",5,1),new r("ene",3,1),new r("erne",3,1),new r("ere",3,1),new r("en",-1,1),new r("heden",10,1),new r("eren",10,1),new r("er",-1,1),new r("heder",13,1),new r("erer",13,1),new r("s",-1,2),new r("heds",16,1),new r("es",16,1),new r("endes",18,1),new r("erendes",19,1),new r("enes",18,1),new r("ernes",18,1),new r("eres",18,1),new r("ens",16,1),new r("hedens",24,1),new r("erens",24,1),new r("ers",16,1),new r("ets",16,1),new r("erets",28,1),new r("et",-1,1),new r("eret",30,1)],o=[new r("gd",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1)],a=[new r("ig",-1,1),new r("lig",0,1),new r("elig",1,1),new r("els",-1,1),new r("løst",-1,2)],d=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],u=[239,254,42,3,0,0,0,0,0,0,0,0,0,0,0,0,16],c=new i;function l(){var e,r=c.limit-c.cursor;c.cursor>=n&&(e=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,c.find_among_b(o,4)?(c.bra=c.cursor,c.limit_backward=e,c.cursor=c.limit-r,c.cursor>c.limit_backward&&(c.cursor--,c.bra=c.cursor,c.slice_del())):c.limit_backward=e)}this.setCurrent=function(e){c.setCurrent(e)},this.getCurrent=function(){return c.getCurrent()},this.stem=function(){var r,i=c.cursor;return function(){var r,i=c.cursor+3;if(n=c.limit,0<=i&&i<=c.limit){for(e=i;;){if(r=c.cursor,c.in_grouping(d,97,248)){c.cursor=r;break}if(c.cursor=r,r>=c.limit)return;c.cursor++}for(;!c.out_grouping(d,97,248);){if(c.cursor>=c.limit)return;c.cursor++}(n=c.cursor)<e&&(n=e)}}(),c.limit_backward=i,c.cursor=c.limit,function(){var e,r;if(c.cursor>=n&&(r=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,e=c.find_among_b(s,32),c.limit_backward=r,e))switch(c.bra=c.cursor,e){case 1:c.slice_del();break;case 2:c.in_grouping_b(u,97,229)&&c.slice_del()}}(),c.cursor=c.limit,l(),c.cursor=c.limit,function(){var e,r,i,t=c.limit-c.cursor;if(c.ket=c.cursor,c.eq_s_b(2,"st")&&(c.bra=c.cursor,c.eq_s_b(2,"ig")&&c.slice_del()),c.cursor=c.limit-t,c.cursor>=n&&(r=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,e=c.find_among_b(a,5),c.limit_backward=r,e))switch(c.bra=c.cursor,e){case 1:c.slice_del(),i=c.limit-c.cursor,l(),c.cursor=c.limit-i;break;case 2:c.slice_from("løs")}}(),c.cursor=c.limit,c.cursor>=n&&(r=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,c.out_grouping_b(d,97,248)?(c.bra=c.cursor,t=c.slice_to(t),c.limit_backward=r,c.eq_v_b(t)&&c.slice_del()):c.limit_backward=r),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}),e.Pipeline.registerFunction(e.da.stemmer,"stemmer-da"),e.da.stopWordFilter=e.generateStopWordFilter("ad af alle alt anden at blev blive bliver da de dem den denne der deres det dette dig din disse dog du efter eller en end er et for fra ham han hans har havde have hende hendes her hos hun hvad hvis hvor i ikke ind jeg jer jo kunne man mange med meget men mig min mine mit mod ned noget nogle nu når og også om op os over på selv sig sin sine sit skal skulle som sådan thi til ud under var vi vil ville vor være været".split(" ")),e.Pipeline.registerFunction(e.da.stopWordFilter,"stopWordFilter-da")}});

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1 @@
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r="2"==e.version[0];e.jp=function(){this.pipeline.reset(),this.pipeline.add(e.jp.stopWordFilter,e.jp.stemmer),r?this.tokenizer=e.jp.tokenizer:(e.tokenizer&&(e.tokenizer=e.jp.tokenizer),this.tokenizerFn&&(this.tokenizerFn=e.jp.tokenizer))};var t=new e.TinySegmenter;e.jp.tokenizer=function(n){if(!arguments.length||null==n||null==n)return[];if(Array.isArray(n))return n.map(function(t){return r?new e.Token(t.toLowerCase()):t.toLowerCase()});for(var i=n.toString().toLowerCase().replace(/^\s+/,""),o=i.length-1;o>=0;o--)if(/\S/.test(i.charAt(o))){i=i.substring(0,o+1);break}return t.segment(i).filter(function(e){return!!e}).map(function(t){return r?new e.Token(t):t})},e.jp.stemmer=function(e){return e},e.Pipeline.registerFunction(e.jp.stemmer,"stemmer-jp"),e.jp.wordCharacters="一二三四五六七八九十百千万億兆一-龠々〆ヵヶぁ-んァ-ヴーア-ン゙a-zA-Z--0-9-",e.jp.stopWordFilter=function(t){if(-1===e.jp.stopWordFilter.stopWords.indexOf(r?t.toString():t))return t},e.jp.stopWordFilter=e.generateStopWordFilter("これ それ あれ この その あの ここ そこ あそこ こちら どこ だれ なに なん 何 私 貴方 貴方方 我々 私達 あの人 あのかた 彼女 彼 です あります おります います は が の に を で え から まで より も どの と し それで しかし".split(" ")),e.Pipeline.registerFunction(e.jp.stopWordFilter,"stopWordFilter-jp")}});

View file

@ -0,0 +1 @@
!function(e,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(e.lunr)}(this,function(){return function(e){e.multiLanguage=function(){for(var i=Array.prototype.slice.call(arguments),t=i.join("-"),r="",n=[],s=[],p=0;p<i.length;++p)"en"==i[p]?(r+="\\w",n.unshift(e.stopWordFilter),n.push(e.stemmer),s.push(e.stemmer)):(r+=e[i[p]].wordCharacters,n.unshift(e[i[p]].stopWordFilter),n.push(e[i[p]].stemmer),s.push(e[i[p]].stemmer));var o=e.trimmerSupport.generateTrimmer(r);return e.Pipeline.registerFunction(o,"lunr-multi-trimmer-"+t),n.unshift(o),function(){this.pipeline.reset(),this.pipeline.add.apply(this.pipeline,n),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add.apply(this.searchPipeline,s))}}}});

View file

@ -0,0 +1 @@
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,n,i;e.no=function(){this.pipeline.reset(),this.pipeline.add(e.no.trimmer,e.no.stopWordFilter,e.no.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.no.stemmer))},e.no.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA--",e.no.trimmer=e.trimmerSupport.generateTrimmer(e.no.wordCharacters),e.Pipeline.registerFunction(e.no.trimmer,"trimmer-no"),e.no.stemmer=(r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){var e,i,t=[new r("a",-1,1),new r("e",-1,1),new r("ede",1,1),new r("ande",1,1),new r("ende",1,1),new r("ane",1,1),new r("ene",1,1),new r("hetene",6,1),new r("erte",1,3),new r("en",-1,1),new r("heten",9,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",12,1),new r("s",-1,2),new r("as",14,1),new r("es",14,1),new r("edes",16,1),new r("endes",16,1),new r("enes",16,1),new r("hetenes",19,1),new r("ens",14,1),new r("hetens",21,1),new r("ers",14,1),new r("ets",14,1),new r("et",-1,1),new r("het",25,1),new r("ert",-1,3),new r("ast",-1,1)],o=[new r("dt",-1,-1),new r("vt",-1,-1)],s=[new r("leg",-1,1),new r("eleg",0,1),new r("ig",-1,1),new r("eig",2,1),new r("lig",2,1),new r("elig",4,1),new r("els",-1,1),new r("lov",-1,1),new r("elov",7,1),new r("slov",7,1),new r("hetslov",9,1)],a=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],m=[119,125,149,1],l=new n;this.setCurrent=function(e){l.setCurrent(e)},this.getCurrent=function(){return l.getCurrent()},this.stem=function(){var r,n,u,d,c=l.cursor;return function(){var r,n=l.cursor+3;if(i=l.limit,0<=n||n<=l.limit){for(e=n;;){if(r=l.cursor,l.in_grouping(a,97,248)){l.cursor=r;break}if(r>=l.limit)return;l.cursor=r+1}for(;!l.out_grouping(a,97,248);){if(l.cursor>=l.limit)return;l.cursor++}(i=l.cursor)<e&&(i=e)}}(),l.limit_backward=c,l.cursor=l.limit,function(){var e,r,n;if(l.cursor>=i&&(r=l.limit_backward,l.limit_backward=i,l.ket=l.cursor,e=l.find_among_b(t,29),l.limit_backward=r,e))switch(l.bra=l.cursor,e){case 1:l.slice_del();break;case 2:n=l.limit-l.cursor,l.in_grouping_b(m,98,122)?l.slice_del():(l.cursor=l.limit-n,l.eq_s_b(1,"k")&&l.out_grouping_b(a,97,248)&&l.slice_del());break;case 3:l.slice_from("er")}}(),l.cursor=l.limit,n=l.limit-l.cursor,l.cursor>=i&&(r=l.limit_backward,l.limit_backward=i,l.ket=l.cursor,l.find_among_b(o,2)?(l.bra=l.cursor,l.limit_backward=r,l.cursor=l.limit-n,l.cursor>l.limit_backward&&(l.cursor--,l.bra=l.cursor,l.slice_del())):l.limit_backward=r),l.cursor=l.limit,l.cursor>=i&&(d=l.limit_backward,l.limit_backward=i,l.ket=l.cursor,(u=l.find_among_b(s,11))?(l.bra=l.cursor,l.limit_backward=d,1==u&&l.slice_del()):l.limit_backward=d),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}),e.Pipeline.registerFunction(e.no.stemmer,"stemmer-no"),e.no.stopWordFilter=e.generateStopWordFilter("alle at av bare begge ble blei bli blir blitt både båe da de deg dei deim deira deires dem den denne der dere deres det dette di din disse ditt du dykk dykkar då eg ein eit eitt eller elles en enn er et ett etter for fordi fra før ha hadde han hans har hennar henne hennes her hjå ho hoe honom hoss hossen hun hva hvem hver hvilke hvilken hvis hvor hvordan hvorfor i ikke ikkje ikkje ingen ingi inkje inn inni ja jeg kan kom korleis korso kun kunne kva kvar kvarhelst kven kvi kvifor man mange me med medan meg meget mellom men mi min mine mitt mot mykje ned no noe noen noka noko nokon nokor nokre nå når og også om opp oss over på samme seg selv si si sia sidan siden sin sine sitt sjøl skal skulle slik so som som somme somt så sånn til um upp ut uten var vart varte ved vere verte vi vil ville vore vors vort vår være være vært å".split(" ")),e.Pipeline.registerFunction(e.no.stopWordFilter,"stopWordFilter-no")}});

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1 @@
!function(r,t){"function"==typeof define&&define.amd?define(t):"object"==typeof exports?module.exports=t():t()(r.lunr)}(this,function(){return function(r){r.stemmerSupport={Among:function(r,t,i,s){if(this.toCharArray=function(r){for(var t=r.length,i=new Array(t),s=0;s<t;s++)i[s]=r.charCodeAt(s);return i},!r&&""!=r||!t&&0!=t||!i)throw"Bad Among initialisation: s:"+r+", substring_i: "+t+", result: "+i;this.s_size=r.length,this.s=this.toCharArray(r),this.substring_i=t,this.result=i,this.method=s},SnowballProgram:function(){var r;return{bra:0,ket:0,limit:0,cursor:0,limit_backward:0,setCurrent:function(t){r=t,this.cursor=0,this.limit=t.length,this.limit_backward=0,this.bra=this.cursor,this.ket=this.limit},getCurrent:function(){var t=r;return r=null,t},in_grouping:function(t,i,s){if(this.cursor<this.limit){var e=r.charCodeAt(this.cursor);if(e<=s&&e>=i&&t[(e-=i)>>3]&1<<(7&e))return this.cursor++,!0}return!1},in_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e<=s&&e>=i&&t[(e-=i)>>3]&1<<(7&e))return this.cursor--,!0}return!1},out_grouping:function(t,i,s){if(this.cursor<this.limit){var e=r.charCodeAt(this.cursor);if(e>s||e<i)return this.cursor++,!0;if(!(t[(e-=i)>>3]&1<<(7&e)))return this.cursor++,!0}return!1},out_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e>s||e<i)return this.cursor--,!0;if(!(t[(e-=i)>>3]&1<<(7&e)))return this.cursor--,!0}return!1},eq_s:function(t,i){if(this.limit-this.cursor<t)return!1;for(var s=0;s<t;s++)if(r.charCodeAt(this.cursor+s)!=i.charCodeAt(s))return!1;return this.cursor+=t,!0},eq_s_b:function(t,i){if(this.cursor-this.limit_backward<t)return!1;for(var s=0;s<t;s++)if(r.charCodeAt(this.cursor-t+s)!=i.charCodeAt(s))return!1;return this.cursor-=t,!0},find_among:function(t,i){for(var s=0,e=i,n=this.cursor,u=this.limit,o=0,h=0,c=!1;;){for(var a=s+(e-s>>1),f=0,l=o<h?o:h,_=t[a],m=l;m<_.s_size;m++){if(n+l==u){f=-1;break}if(f=r.charCodeAt(n+l)-_.s[m])break;l++}if(f<0?(e=a,h=l):(s=a,o=l),e-s<=1){if(s>0||e==s||c)break;c=!0}}for(;;){if(o>=(_=t[s]).s_size){if(this.cursor=n+_.s_size,!_.method)return _.result;var b=_.method();if(this.cursor=n+_.s_size,b)return _.result}if((s=_.substring_i)<0)return 0}},find_among_b:function(t,i){for(var s=0,e=i,n=this.cursor,u=this.limit_backward,o=0,h=0,c=!1;;){for(var a=s+(e-s>>1),f=0,l=o<h?o:h,_=(m=t[a]).s_size-1-l;_>=0;_--){if(n-l==u){f=-1;break}if(f=r.charCodeAt(n-1-l)-m.s[_])break;l++}if(f<0?(e=a,h=l):(s=a,o=l),e-s<=1){if(s>0||e==s||c)break;c=!0}}for(;;){var m;if(o>=(m=t[s]).s_size){if(this.cursor=n-m.s_size,!m.method)return m.result;var b=m.method();if(this.cursor=n-m.s_size,b)return m.result}if((s=m.substring_i)<0)return 0}},replace_s:function(t,i,s){var e=s.length-(i-t),n=r.substring(0,t),u=r.substring(i);return r=n+s+u,this.limit+=e,this.cursor>=i?this.cursor+=e:this.cursor>t&&(this.cursor=t),e},slice_check:function(){if(this.bra<0||this.bra>this.ket||this.ket>this.limit||this.limit>r.length)throw"faulty slice operation"},slice_from:function(r){this.slice_check(),this.replace_s(this.bra,this.ket,r)},slice_del:function(){this.slice_from("")},insert:function(r,t,i){var s=this.replace_s(r,t,i);r<=this.bra&&(this.bra+=s),r<=this.ket&&(this.ket+=s)},slice_to:function(){return this.slice_check(),r.substring(this.bra,this.ket)},eq_v_b:function(r){return this.eq_s_b(r.length,r)}}}},r.trimmerSupport={generateTrimmer:function(r){var t=new RegExp("^[^"+r+"]+"),i=new RegExp("[^"+r+"]+$");return function(r){return"function"==typeof r.update?r.update(function(r){return r.replace(t,"").replace(i,"")}):r.replace(t,"").replace(i,"")}}}}});

View file

@ -0,0 +1 @@
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,n,t;e.sv=function(){this.pipeline.reset(),this.pipeline.add(e.sv.trimmer,e.sv.stopWordFilter,e.sv.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.sv.stemmer))},e.sv.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA--",e.sv.trimmer=e.trimmerSupport.generateTrimmer(e.sv.wordCharacters),e.Pipeline.registerFunction(e.sv.trimmer,"trimmer-sv"),e.sv.stemmer=(r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,t=new function(){var e,t,i=[new r("a",-1,1),new r("arna",0,1),new r("erna",0,1),new r("heterna",2,1),new r("orna",0,1),new r("ad",-1,1),new r("e",-1,1),new r("ade",6,1),new r("ande",6,1),new r("arne",6,1),new r("are",6,1),new r("aste",6,1),new r("en",-1,1),new r("anden",12,1),new r("aren",12,1),new r("heten",12,1),new r("ern",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",18,1),new r("or",-1,1),new r("s",-1,2),new r("as",21,1),new r("arnas",22,1),new r("ernas",22,1),new r("ornas",22,1),new r("es",21,1),new r("ades",26,1),new r("andes",26,1),new r("ens",21,1),new r("arens",29,1),new r("hetens",29,1),new r("erns",21,1),new r("at",-1,1),new r("andet",-1,1),new r("het",-1,1),new r("ast",-1,1)],s=[new r("dd",-1,-1),new r("gd",-1,-1),new r("nn",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1),new r("tt",-1,-1)],a=[new r("ig",-1,1),new r("lig",0,1),new r("els",-1,1),new r("fullt",-1,3),new r("löst",-1,2)],o=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,24,0,32],u=[119,127,149],m=new n;this.setCurrent=function(e){m.setCurrent(e)},this.getCurrent=function(){return m.getCurrent()},this.stem=function(){var r,n=m.cursor;return function(){var r,n=m.cursor+3;if(t=m.limit,0<=n||n<=m.limit){for(e=n;;){if(r=m.cursor,m.in_grouping(o,97,246)){m.cursor=r;break}if(m.cursor=r,m.cursor>=m.limit)return;m.cursor++}for(;!m.out_grouping(o,97,246);){if(m.cursor>=m.limit)return;m.cursor++}(t=m.cursor)<e&&(t=e)}}(),m.limit_backward=n,m.cursor=m.limit,function(){var e,r=m.limit_backward;if(m.cursor>=t&&(m.limit_backward=t,m.cursor=m.limit,m.ket=m.cursor,e=m.find_among_b(i,37),m.limit_backward=r,e))switch(m.bra=m.cursor,e){case 1:m.slice_del();break;case 2:m.in_grouping_b(u,98,121)&&m.slice_del()}}(),m.cursor=m.limit,r=m.limit_backward,m.cursor>=t&&(m.limit_backward=t,m.cursor=m.limit,m.find_among_b(s,7)&&(m.cursor=m.limit,m.ket=m.cursor,m.cursor>m.limit_backward&&(m.bra=--m.cursor,m.slice_del())),m.limit_backward=r),m.cursor=m.limit,function(){var e,r;if(m.cursor>=t){if(r=m.limit_backward,m.limit_backward=t,m.cursor=m.limit,m.ket=m.cursor,e=m.find_among_b(a,5))switch(m.bra=m.cursor,e){case 1:m.slice_del();break;case 2:m.slice_from("lös");break;case 3:m.slice_from("full")}m.limit_backward=r}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return t.setCurrent(e),t.stem(),t.getCurrent()}):(t.setCurrent(e),t.stem(),t.getCurrent())}),e.Pipeline.registerFunction(e.sv.stemmer,"stemmer-sv"),e.sv.stopWordFilter=e.generateStopWordFilter("alla allt att av blev bli blir blivit de dem den denna deras dess dessa det detta dig din dina ditt du där då efter ej eller en er era ert ett från för ha hade han hans har henne hennes hon honom hur här i icke ingen inom inte jag ju kan kunde man med mellan men mig min mina mitt mot mycket ni nu när någon något några och om oss på samma sedan sig sin sina sitta själv skulle som så sådan sådana sådant till under upp ut utan vad var vara varför varit varje vars vart vem vi vid vilka vilkas vilken vilket vår våra vårt än är åt över".split(" ")),e.Pipeline.registerFunction(e.sv.stopWordFilter,"stopWordFilter-sv")}});

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

2530
docs/cli/index.html Normal file

File diff suppressed because it is too large Load diff

2262
docs/index.html Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,759 +0,0 @@
# LBRY JSON-RPC API Documentation
## blob_announce_all
```text
Announce all blobs to the DHT
Args:
None
Returns:
(str) Success/fail message
```
## blob_delete
```text
Delete a blob
Args:
'blob_hash': (str) hash of blob to get
Returns:
(str) Success/fail message
```
## blob_get
```text
Download and return a blob
Args:
'blob_hash': (str) blob hash of blob to get
'timeout'(optional): (int) timeout in number of seconds
'encoding'(optional): (str) by default no attempt at decoding is made,
can be set to one of the following decoders:
'json'
'payment_rate_manager'(optional): if not given the default payment rate manager
will be used. supported alternative rate managers:
'only-free'
Returns
(str) Success/Fail message or (dict) decoded data
```
## blob_list
```text
Returns blob hashes. If not given filters, returns all blobs known by the blob manager
Args:
'uri' (optional): (str) filter by blobs in stream for winning claim
'stream_hash' (optional): (str) filter by blobs in given stream hash
'sd_hash' (optional): (str) filter by blobs in given sd hash
'needed' (optional): (bool) only return needed blobs
'finished' (optional): (bool) only return finished blobs
'page_size' (optional): (int) limit number of results returned
'page' (optional): (int) filter to page x of [page_size] results
Returns:
(list) List of blob hashes
```
## blob_reflect_all
```text
Reflects all saved blobs
Args:
None
Returns:
(bool) true if successful
```
## block_show
```text
Get contents of a block
Args:
'blockhash': (str) hash of the block to look up
Returns:
(dict) Requested block
```
## channel_list_mine
```text
Get my channels
Returns:
(list) ClaimDict
```
## channel_new
```text
Generate a publisher key and create a new certificate claim
Args:
'channel_name': (str) '@' prefixed name
'amount': (float) amount to claim name
Returns:
(dict) Dictionary containing result of the claim
{
'tx' : (str) hex encoded transaction
'txid' : (str) txid of resulting claim
'nout' : (int) nout of the resulting claim
'fee' : (float) fee paid for the claim transaction
'claim_id' : (str) claim ID of the resulting claim
}
```
## claim_abandon
```text
Abandon a name and reclaim credits from the claim
Args:
'claim_id': (str) claim_id of claim
Return:
(dict) Dictionary containing result of the claim
{
txid : (str) txid of resulting transaction
fee : (float) fee paid for the transaction
}
```
## claim_list
```text
Get claims for a name
Args:
'name': (str) search for claims on this name
Returns
(dict) State of claims assigned for the name
{
'claims': (list) list of claims for the name
[
{
'amount': (float) amount assigned to the claim
'effective_amount': (float) total amount assigned to the claim,
including supports
'claim_id': (str) claim ID of the claim
'height': (int) height of block containing the claim
'txid': (str) txid of the claim
'nout': (int) nout of the claim
'supports': (list) a list of supports attached to the claim
'value': (str) the value of the claim
},
]
'supports_without_claims': (list) supports without any claims attached to them
'last_takeover_height': (int) the height of last takeover for the name
}
```
## claim_list_mine
```text
List my name claims
Args:
None
Returns
(list) List of name claims owned by user
[
{
'address': (str) address that owns the claim
'amount': (float) amount assigned to the claim
'blocks_to_expiration': (int) number of blocks until it expires
'category': (str) "claim", "update" , or "support"
'claim_id': (str) claim ID of the claim
'confirmations': (int) number of blocks of confirmations for the claim
'expiration_height': (int) the block height which the claim will expire
'expired': (bool) true if expired, false otherwise
'height': (int) height of the block containing the claim
'is_spent': (bool) true if claim is abandoned, false otherwise
'name': (str) name of the claim
'txid': (str) txid of the cliam
'nout': (int) nout of the claim
'value': (str) value of the claim
},
]
```
## claim_new_support
```text
Support a name claim
Args:
'name': (str) Name of claim
'claim_id': (str) claim ID of claim to support
'amount': (float) amount to support by
Return:
(dict) Dictionary containing result of the claim
{
txid : (str) txid of resulting support claim
nout : (int) nout of the resulting support claim
fee : (float) fee paid for the transaction
}
```
## claim_show
```text
Resolve claim info from a LBRY name
Args:
'name': (str) name to look up, do not include lbry:// prefix
'txid'(optional): (str) if specified, look for claim with this txid
'nout'(optional): (int) if specified, look for claim with this nout
'claim_id'(optional): (str) if specified, look for claim with this claim_id
Returns:
(dict) Dictionary containing claim info, (bool) false if claim is not
resolvable
{
'txid': (str) txid of claim
'nout': (int) nout of claim
'amount': (float) amount of claim
'value': (str) value of claim
'height' : (int) height of claim takeover
'claim_id': (str) claim ID of claim
'supports': (list) list of supports associated with claim
}
```
## commands
```text
Return a list of available commands
Returns:
(list) list of available commands
```
## daemon_stop
```text
Stop lbrynet-daemon
Returns:
(string) Shutdown message
```
## descriptor_get
```text
Download and return a sd blob
Args:
'sd_hash': (str) hash of sd blob
'timeout'(optional): (int) timeout in number of seconds
'payment_rate_manager'(optional): (str) if not given the default payment rate manager
will be used. supported alternative rate managers:
only-free
Returns
(str) Success/Fail message or (dict) decoded data
```
## file_delete
```text
Delete a lbry file
Args:
'name' (optional): (str) delete file by lbry name,
'sd_hash' (optional): (str) delete file by sd hash,
'file_name' (optional): (str) delete file by the name in the downloads folder,
'stream_hash' (optional): (str) delete file by stream hash,
'claim_id' (optional): (str) delete file by claim ID,
'outpoint' (optional): (str) delete file by claim outpoint,
'rowid': (optional): (int) delete file by rowid in the file manager
'delete_target_file' (optional): (bool) delete file from downloads folder,
defaults to true if false only the blobs and
db entries will be deleted
Returns:
(bool) true if deletion was successful
```
## file_list
```text
List files limited by optional filters
Args:
'name' (optional): (str) filter files by lbry name,
'sd_hash' (optional): (str) filter files by sd hash,
'file_name' (optional): (str) filter files by the name in the downloads folder,
'stream_hash' (optional): (str) filter files by stream hash,
'claim_id' (optional): (str) filter files by claim id,
'outpoint' (optional): (str) filter files by claim outpoint,
'rowid' (optional): (int) filter files by internal row id,
'full_status': (optional): (bool) if true populate the 'message' and 'size' fields
Returns:
(list) List of files
[
{
'completed': (bool) true if download is completed,
'file_name': (str) name of file,
'download_directory': (str) download directory,
'points_paid': (float) credit paid to download file,
'stopped': (bool) true if download is stopped,
'stream_hash': (str) stream hash of file,
'stream_name': (str) stream name ,
'suggested_file_name': (str) suggested file name,
'sd_hash': (str) sd hash of file,
'name': (str) name claim attached to file
'outpoint': (str) claim outpoint attached to file
'claim_id': (str) claim ID attached to file,
'download_path': (str) download path of file,
'mime_type': (str) mime type of file,
'key': (str) key attached to file,
'total_bytes': (int) file size in bytes, None if full_status is false
'written_bytes': (int) written size in bytes
'message': (str), None if full_status is false
'metadata': (dict) Metadata dictionary
},
]
```
## file_set_status
```text
Start or stop downloading a file
Args:
'status': (str) "start" or "stop"
'name' (optional): (str) start file by lbry name,
'sd_hash' (optional): (str) start file by the hash in the name claim,
'file_name' (optional): (str) start file by its name in the downloads folder,
Returns:
(str) Confirmation message
```
## get
```text
Download stream from a LBRY name.
Args:
'uri': (str) lbry uri to download
'file_name'(optional): (str) a user specified name for the downloaded file
'timeout'(optional): (int) download timeout in number of seconds
'download_directory'(optional): (str) path to directory where file will be saved
Returns:
(dict) Dictionary containing information about the stream
{
'completed': (bool) true if download is completed,
'file_name': (str) name of file,
'download_directory': (str) download directory,
'points_paid': (float) credit paid to download file,
'stopped': (bool) true if download is stopped,
'stream_hash': (str) stream hash of file,
'stream_name': (str) stream name,
'suggested_file_name': (str) suggested file name,
'sd_hash': (str) sd hash of file,
'name': (str) name claim attached to file
'outpoint': (str) claim outpoint attached to file
'claim_id': (str) claim ID attached to file,
'download_path': (str) download path of file,
'mime_type': (str) mime type of file,
'key': (str) key attached to file,
'total_bytes': (int) file size in bytes, None if full_status is false
'written_bytes': (int) written size in bytes
'message': (str), None if full_status is false
'metadata': (dict) Metadata dictionary
}
```
## get_availability
```text
Get stream availability for lbry uri
Args:
'uri' : (str) lbry uri
'sd_timeout' (optional): (int) sd blob download timeout
'peer_timeout' (optional): (int) how long to look for peers
Returns:
(float) Peers per blob / total blobs
```
## help
```text
Return a useful message for an API command
Args:
'command'(optional): (str) command to retrieve documentation for
Returns:
(str) if given a command, returns documentation about that command
otherwise returns general help message
```
## peer_list
```text
Get peers for blob hash
Args:
'blob_hash': (str) blob hash
'timeout'(optional): (int) peer search timeout in seconds
Returns:
(list) List of contacts
```
## publish
```text
Make a new name claim and publish associated data to lbrynet,
update over existing claim if user already has a claim for name.
Fields required in the final Metadata are:
'title'
'description'
'author'
'language'
'license',
'nsfw'
Metadata can be set by either using the metadata argument or by setting individual arguments
fee, title, description, author, language, license, license_url, thumbnail, preview, nsfw,
or sources. Individual arguments will overwrite the fields specified in metadata argument.
Args:
'name': (str) name to be claimed
'bid': (float) amount of credits to commit in this claim,
'metadata'(optional): (dict) Metadata to associate with the claim.
'file_path'(optional): (str) path to file to be associated with name. If provided,
a lbry stream of this file will be used in 'sources'.
If no path is given but a metadata dict is provided, the source
from the given metadata will be used.
'fee'(optional): (dict) Dictionary representing key fee to download content:
{currency_symbol: {'amount': float, 'address': str, optional}}
supported currencies: LBC, USD, BTC
If an address is not provided a new one will be automatically
generated. Default fee is zero.
'title'(optional): (str) title of the file
'description'(optional): (str) description of the file
'author'(optional): (str) author of the file
'language'(optional): (str), language code
'license'(optional): (str) license for the file
'license_url'(optional): (str) URL to license
'thumbnail'(optional): (str) thumbnail URL for the file
'preview'(optional): (str) preview URL for the file
'nsfw'(optional): (bool) True if not safe for work
'sources'(optional): (dict){'lbry_sd_hash':sd_hash} specifies sd hash of file
'channel_name' (optional): (str) name of the publisher channel
Returns:
(dict) Dictionary containing result of the claim
{
'tx' : (str) hex encoded transaction
'txid' : (str) txid of resulting claim
'nout' : (int) nout of the resulting claim
'fee' : (float) fee paid for the claim transaction
'claim_id' : (str) claim ID of the resulting claim
}
```
## reflect
```text
Reflect a stream
Args:
'sd_hash': (str) sd_hash of lbry file
Returns:
(bool) true if successful
```
## report_bug
```text
Report a bug to slack
Args:
'message': (str) message to send
Returns:
(bool) true if successful
```
## resolve
```text
Resolve a LBRY URI
Args:
'uri': (str) uri to download
Returns:
None if nothing can be resolved, otherwise:
If uri resolves to a channel or a claim in a channel:
'certificate': {
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number,
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'depth': (int) claim depth,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'supports: (list) list of supports [{'txid': txid,
'nout': nout,
'amount': amount}],
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
If uri resolves to a channel:
'claims_in_channel': [
{
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number,
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'depth': (int) claim depth,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'supports: (list) list of supports [{'txid': txid,
'nout': nout,
'amount': amount}],
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
]
If uri resolves to a claim:
'claim': {
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number,
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'depth': (int) claim depth,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'channel_name': (str) channel name if claim is in a channel
'supports: (list) list of supports [{'txid': txid,
'nout': nout,
'amount': amount}]
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
}
```
## resolve_name
```text
Resolve stream info from a LBRY name
Args:
'name': (str) name to look up, do not include lbry:// prefix
Returns:
(dict) Metadata dictionary from name claim, None if the name is not
resolvable
```
## send_amount_to_address
```text
Send credits to an address
Args:
'amount': (float) the amount to send
'address': (str) the address of the recipient in base58
Returns:
(bool) true if payment successfully scheduled
```
## settings_get
```text
Get daemon settings
Returns:
(dict) Dictionary of daemon settings
See ADJUSTABLE_SETTINGS in lbrynet/conf.py for full list of settings
```
## settings_set
```text
Set daemon settings
Args:
'run_on_startup': (bool) currently not supported
'data_rate': (float) data rate,
'max_key_fee': (float) maximum key fee,
'disable_max_key_fee': (bool) true to disable max_key_fee check,
'download_directory': (str) path of where files are downloaded,
'peer_port': (int) port through which daemon should connect,
'max_upload': (float), currently not supported
'max_download': (float), currently not supported
'download_timeout': (int) download timeout in seconds
'search_timeout': (float) search timeout in seconds
'cache_time': (int) cache timeout in seconds
Returns:
(dict) Updated dictionary of daemon settings
```
## status
```text
Return daemon status
Args:
'session_status' (optional): (bool) true to return session status,
default is false
Returns:
(dict) Daemon status dictionary
```
## stream_cost_estimate
```text
Get estimated cost for a lbry stream
Args:
'name': (str) lbry name
'size' (optional): (int) stream size, in bytes. if provided an sd blob
won't be downloaded.
Returns:
(float) Estimated cost in lbry credits, returns None if uri is not
resolveable
```
## transaction_list
```text
List transactions belonging to wallet
Args:
None
Returns:
(list) List of transactions
```
## transaction_show
```text
Get a decoded transaction from a txid
Args:
'txid': (str) txid of transaction
Returns:
(dict) JSON formatted transaction
```
## version
```text
Get lbry version information
Args:
None
Returns:
(dict) Dictionary of lbry version information
{
'build': (str) build type (e.g. "dev", "rc", "release"),
'ip': (str) remote ip, if available,
'lbrynet_version': (str) lbrynet_version,
'lbryum_version': (str) lbryum_version,
'lbryschema_version': (str) lbryschema_version,
'os_release': (str) os release string
'os_system': (str) os name
'platform': (str) platform string
'processor': (str) processor type,
'python_version': (str) python version,
}
```
## wallet_balance
```text
Return the balance of the wallet
Args:
'address' (optional): If address is provided only that balance will be given
'include_unconfirmed' (optional): If set unconfirmed balance will be included in
the only takes effect when address is also provided.
Returns:
(float) amount of lbry credits in wallet
```
## wallet_is_address_mine
```text
Checks if an address is associated with the current wallet.
Args:
'address': (str) address to check in base58
Returns:
(bool) true, if address is associated with current wallet
```
## wallet_list
```text
List wallet addresses
Args:
None
Returns:
List of wallet addresses
```
## wallet_new_address
```text
Generate a new wallet address
Args:
None
Returns:
(str) New wallet address in base58
```
## wallet_public_key
```text
Get public key from wallet address
Args:
'address': (str) wallet address in base58
Returns:
(list) list of public keys associated with address.
Could contain more than one public key if multisig.
```
## wallet_unused_address
```text
Return an address containing no balance, will create
a new address if there is none.
Args:
None
Returns:
(str) Unused wallet address in base58
```

File diff suppressed because one or more lines are too long

View file

@ -997,17 +997,16 @@ class Daemon(AuthJSONRPCServer):
############################################################################
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(session_status="-s", dht_status="-d")
def jsonrpc_status(self, session_status=False, dht_status=False):
"""
Get daemon status
Usage:
status [-s] [-d]
status [--session_status] [--dht_status]
Options:
-s : include session status in results
-d : include dht network and peer status
--session_status : (bool) include session status in results
--dht_status : (bool) include dht network and peer status
Returns:
(dict) lbrynet-daemon status
@ -1107,6 +1106,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
version
Options:
None
Returns:
(dict) Dictionary of lbry version information
{
@ -1135,6 +1137,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
report_bug (<message> | --message=<message>)
Options:
--message=<message> : (str) Description of the bug
Returns:
(bool) true if successful
"""
@ -1155,6 +1160,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
settings_get
Options:
None
Returns:
(dict) Dictionary of daemon settings
See ADJUSTABLE_SETTINGS in lbrynet/conf.py for full list of settings
@ -1184,29 +1192,27 @@ class Daemon(AuthJSONRPCServer):
[--auto_renew_claim_height_delta=<auto_renew_claim_height_delta>]
Options:
--download_directory=<download_directory> : (str)
--data_rate=<data_rate> : (float), 0.0001
--download_timeout=<download_timeout> : (int), 180
--peer_port=<peer_port> : (int), 3333
--max_key_fee=<max_key_fee> : (dict) maximum key fee for downloads,
in the format: {
"currency": <currency_symbol>,
"amount": <amount>
}. In the CLI, it must be an escaped
JSON string
Supported currency symbols:
LBC
BTC
USD
--disable_max_key_fee=<disable_max_key_fee> : (bool), False
--use_upnp=<use_upnp> : (bool), True
--run_reflector_server=<run_reflector_server> : (bool), False
--cache_time=<cache_time> : (int), 150
--reflect_uploads=<reflect_uploads> : (bool), True
--share_usage_data=<share_usage_data> : (bool), True
--peer_search_timeout=<peer_search_timeout> : (int), 3
--sd_download_timeout=<sd_download_timeout> : (int), 3
--auto_renew_claim_height_delta=<auto_renew_claim_height_delta> : (int), 0
--download_directory=<download_directory> : (str) path of download directory
--data_rate=<data_rate> : (float) 0.0001
--download_timeout=<download_timeout> : (int) 180
--peer_port=<peer_port> : (int) 3333
--max_key_fee=<max_key_fee> : (dict) maximum key fee for downloads,
in the format:
{
'currency': <currency_symbol>,
'amount': <amount>
}.
In the CLI, it must be an escaped JSON string
Supported currency symbols: LBC, USD, BTC
--disable_max_key_fee=<disable_max_key_fee> : (bool) False
--use_upnp=<use_upnp> : (bool) True
--run_reflector_server=<run_reflector_server> : (bool) False
--cache_time=<cache_time> : (int) 150
--reflect_uploads=<reflect_uploads> : (bool) True
--share_usage_data=<share_usage_data> : (bool) True
--peer_search_timeout=<peer_search_timeout> : (int) 3
--sd_download_timeout=<sd_download_timeout> : (int) 3
--auto_renew_claim_height_delta=<auto_renew_claim_height_delta> : (int) 0
claims set to expire within this many blocks will be
automatically renewed after startup (if set to 0, renews
will not be made automatically)
@ -1227,7 +1233,10 @@ class Daemon(AuthJSONRPCServer):
help [<command> | --command=<command>]
Options:
<command>, --command=<command> : command to retrieve documentation for
--command=<command> : (str) command to retrieve documentation for
Returns:
(str) Help message
"""
if command is None:
@ -1256,22 +1265,25 @@ class Daemon(AuthJSONRPCServer):
Usage:
commands
Options:
None
Returns:
(list) list of available commands
"""
return self._render_response(sorted([command for command in self.callable_methods.keys()]))
@AuthJSONRPCServer.flags(include_unconfirmed='-u')
def jsonrpc_wallet_balance(self, address=None, include_unconfirmed=False):
"""
Return the balance of the wallet
Usage:
wallet_balance [<address> | --address=<address>] [-u]
wallet_balance [<address> | --address=<address>] [--include_unconfirmed]
Options:
<address> : If provided only the balance for this address will be given
-u : Include unconfirmed
--address=<address> : (str) If provided only the balance for this
address will be given
--include_unconfirmed : (bool) Include unconfirmed
Returns:
(float) amount of lbry credits in wallet
@ -1288,7 +1300,10 @@ class Daemon(AuthJSONRPCServer):
Unlock an encrypted wallet
Usage:
wallet_unlock (<password>)
wallet_unlock (<password> | --password=<password>)
Options:
--password=<password> : (str) password for unlocking wallet
Returns:
(bool) true if wallet is unlocked, otherwise false
@ -1312,6 +1327,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
wallet_decrypt
Options:
None
Returns:
(bool) true if wallet is decrypted, otherwise false
"""
@ -1327,7 +1345,10 @@ class Daemon(AuthJSONRPCServer):
the password
Usage:
wallet_encrypt (<new_password>)
wallet_encrypt (<new_password> | --new_password=<new_password>)
Options:
--new_password=<new_password> : (str) password string to be used for encrypting wallet
Returns:
(bool) true if wallet is decrypted, otherwise false
@ -1345,6 +1366,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
daemon_stop
Options:
None
Returns:
(string) Shutdown message
"""
@ -1355,7 +1379,6 @@ class Daemon(AuthJSONRPCServer):
defer.returnValue(response)
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(full_status='-f')
def jsonrpc_file_list(self, **kwargs):
"""
List files limited by optional filters
@ -1364,22 +1387,23 @@ class Daemon(AuthJSONRPCServer):
file_list [--sd_hash=<sd_hash>] [--file_name=<file_name>] [--stream_hash=<stream_hash>]
[--rowid=<rowid>] [--claim_id=<claim_id>] [--outpoint=<outpoint>] [--txid=<txid>] [--nout=<nout>]
[--channel_claim_id=<channel_claim_id>] [--channel_name=<channel_name>]
[--claim_name=<claim_name>] [-f]
[--claim_name=<claim_name>] [--full_status]
Options:
--sd_hash=<sd_hash> : get file with matching sd hash
--file_name=<file_name> : get file with matching file name in the
--sd_hash=<sd_hash> : (str) get file with matching sd hash
--file_name=<file_name> : (str) get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : get file with matching stream hash
--rowid=<rowid> : get file with matching row id
--claim_id=<claim_id> : get file with matching claim id
--outpoint=<outpoint> : get file with matching claim outpoint
--txid=<txid> : get file with matching claim txid
--nout=<nout> : get file with matching claim nout
--channel_claim_id=<channel_claim_id> : get file with matching channel claim id
--channel_name=<channel_name> : get file with matching channel name
--claim_name=<claim_name> : get file with matching claim name
-f : full status, populate the 'message' and 'size' fields
--stream_hash=<stream_hash> : (str) get file with matching stream hash
--rowid=<rowid> : (int) get file with matching row id
--claim_id=<claim_id> : (str) get file with matching claim id
--outpoint=<outpoint> : (str) get file with matching claim outpoint
--txid=<txid> : (str) get file with matching claim txid
--nout=<nout> : (int) get file with matching claim nout
--channel_claim_id=<channel_claim_id> : (str) get file with matching channel claim id
--channel_name=<channel_name> : (str) get file with matching channel name
--claim_name=<claim_name> : (str) get file with matching claim name
--full_status : (bool) full status, populate the
'message' and 'size' fields
Returns:
(list) List of files
@ -1420,16 +1444,16 @@ class Daemon(AuthJSONRPCServer):
defer.returnValue(response)
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(force='-f')
def jsonrpc_resolve_name(self, name, force=False):
"""
Resolve stream info from a LBRY name
Usage:
resolve_name <name> [-f]
resolve_name (<name> | --name=<name>) [--force]
Options:
-f : force refresh and do not check cache
--name=<name> : (str) the name to resolve
--force : (bool) force refresh and do not check cache
Returns:
(dict) Metadata dictionary from name claim, None if the name is not
@ -1454,11 +1478,11 @@ class Daemon(AuthJSONRPCServer):
[<claim_id> | --claim_id=<claim_id>]
Options:
<txid>, --txid=<txid> : look for claim with this txid, nout must
also be specified
<nout>, --nout=<nout> : look for claim with this nout, txid must
also be specified
<claim_id>, --claim_id=<claim_id> : look for claim with this claim id
--txid=<txid> : (str) look for claim with this txid, nout must
also be specified
--nout=<nout> : (int) look for claim with this nout, txid must
also be specified
--claim_id=<claim_id> : (str) look for claim with this claim id
Returns:
(dict) Dictionary containing claim info as below,
@ -1492,16 +1516,17 @@ class Daemon(AuthJSONRPCServer):
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(force='-f')
def jsonrpc_resolve(self, force=False, uri=None, uris=[]):
"""
Resolve given LBRY URIs
Usage:
resolve [-f] (<uri> | --uri=<uri>) [<uris>...]
resolve [--force] (<uri> | --uri=<uri>) [<uris>...]
Options:
-f : force refresh and ignore cache
--force : (bool) force refresh and ignore cache
--uri=<uri> : (str) uri to resolve
--uris=<uris> : (list) uris to resolve
Returns:
Dictionary of results, keyed by uri
@ -1591,8 +1616,9 @@ class Daemon(AuthJSONRPCServer):
Options:
<file_name> : specified name for the downloaded file
<timeout> : download timeout in number of seconds
--uri=<uri> : (str) uri of the content to download
--file_name=<file_name> : (str) specified name for the downloaded file
--timeout=<timeout> : (int) download timeout in number of seconds
Returns:
(dict) Dictionary containing information about the stream
@ -1675,15 +1701,16 @@ class Daemon(AuthJSONRPCServer):
Start or stop downloading a file
Usage:
file_set_status <status> [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>]
file_set_status (<status> | --status=<status>) [--sd_hash=<sd_hash>]
[--file_name=<file_name>] [--stream_hash=<stream_hash>] [--rowid=<rowid>]
Options:
--sd_hash=<sd_hash> : set status of file with matching sd hash
--file_name=<file_name> : set status of file with matching file name in the
--status=<status> : (str) one of "start" or "stop"
--sd_hash=<sd_hash> : (str) set status of file with matching sd hash
--file_name=<file_name> : (str) set status of file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : set status of file with matching stream hash
--rowid=<rowid> : set status of file with matching row id
--stream_hash=<stream_hash> : (str) set status of file with matching stream hash
--rowid=<rowid> : (int) set status of file with matching row id
Returns:
(str) Confirmation message
@ -1710,33 +1737,32 @@ class Daemon(AuthJSONRPCServer):
@AuthJSONRPCServer.auth_required
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(delete_from_download_dir='-f', delete_all='--delete_all')
def jsonrpc_file_delete(self, delete_from_download_dir=False, delete_all=False, **kwargs):
"""
Delete a LBRY file
Usage:
file_delete [-f] [--delete_all] [--sd_hash=<sd_hash>] [--file_name=<file_name>]
file_delete [--delete_from_download_dir] [--delete_all] [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>] [--claim_id=<claim_id>] [--txid=<txid>]
[--nout=<nout>] [--claim_name=<claim_name>] [--channel_claim_id=<channel_claim_id>]
[--channel_name=<channel_name>]
Options:
-f, --delete_from_download_dir : delete file from download directory,
--delete_from_download_dir : (bool) delete file from download directory,
instead of just deleting blobs
--delete_all : if there are multiple matching files,
--delete_all : (bool) if there are multiple matching files,
allow the deletion of multiple files.
Otherwise do not delete anything.
--sd_hash=<sd_hash> : delete by file sd hash
--file_name<file_name> : delete by file name in downloads folder
--stream_hash=<stream_hash> : delete by file stream hash
--rowid=<rowid> : delete by file row id
--claim_id=<claim_id> : delete by file claim id
--txid=<txid> : delete by file claim txid
--nout=<nout> : delete by file claim nout
--claim_name=<claim_name> : delete by file claim name
--channel_claim_id=<channel_claim_id> : delete by file channel claim id
--channel_name=<channel_name> : delete by file channel claim name
--sd_hash=<sd_hash> : (str) delete by file sd hash
--file_name<file_name> : (str) delete by file name in downloads folder
--stream_hash=<stream_hash> : (str) delete by file stream hash
--rowid=<rowid> : (int) delete by file row id
--claim_id=<claim_id> : (str) delete by file claim id
--txid=<txid> : (str) delete by file claim txid
--nout=<nout> : (int) delete by file claim nout
--claim_name=<claim_name> : (str) delete by file claim name
--channel_claim_id=<channel_claim_id> : (str) delete by file channel claim id
--channel_name=<channel_name> : (str) delete by file channel claim name
Returns:
(bool) true if deletion was successful
@ -1776,10 +1802,11 @@ class Daemon(AuthJSONRPCServer):
Get estimated cost for a lbry stream
Usage:
stream_cost_estimate <uri> [<size> | --size=<size>]
stream_cost_estimate (<uri> | --uri=<uri>) [<size> | --size=<size>]
Options:
<size>, --size=<size> : stream size in bytes. if provided an sd blob won't be
--uri=<uri> : (str) uri to use
--size=<size> : (float) stream size in bytes. if provided an sd blob won't be
downloaded.
Returns:
@ -1799,6 +1826,10 @@ class Daemon(AuthJSONRPCServer):
channel_new (<channel_name> | --channel_name=<channel_name>)
(<amount> | --amount=<amount>)
Options:
--channel_name=<channel_name> : (str) name of the channel prefixed with '@'
--amount=<amount> : (float) bid amount on the channel
Returns:
(dict) Dictionary containing result of the claim
{
@ -1838,6 +1869,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
channel_list
Options:
None
Returns:
(list) ClaimDict, includes 'is_mine' field to indicate if the certificate claim
is in the wallet.
@ -1856,6 +1890,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
channel_list_mine
Options:
None
Returns:
(list) ClaimDict
"""
@ -1871,6 +1908,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
channel_export (<claim_id> | --claim_id=<claim_id>)
Options:
--claim_id=<claim_id> : (str) Claim ID to export information about
Returns:
(str) Serialized certificate information
"""
@ -1888,6 +1928,9 @@ class Daemon(AuthJSONRPCServer):
channel_import (<serialized_certificate_info> |
--serialized_certificate_info=<serialized_certificate_info>)
Options:
--serialized_certificate_info=<serialized_certificate_info> : (str) certificate info
Returns:
(dict) Result dictionary
"""
@ -1928,13 +1971,15 @@ class Daemon(AuthJSONRPCServer):
[--claim_address=<claim_address>] [--change_address=<change_address>]
Options:
--metadata=<metadata> : ClaimDict to associate with the claim.
--file_path=<file_path> : path to file to be associated with name. If provided,
--name=<name> : (str) name of the content
--bid=<bid> : (float) amount to back the claim
--metadata=<metadata> : (dict) ClaimDict to associate with the claim.
--file_path=<file_path> : (str) path to file to be associated with name. If provided,
a lbry stream of this file will be used in 'sources'.
If no path is given but a sources dict is provided,
it will be used. If neither are provided, an
error is raised.
--fee=<fee> : Dictionary representing key fee to download content:
--fee=<fee> : (dict) Dictionary representing key fee to download content:
{
'currency': currency_symbol,
'amount': float,
@ -1943,22 +1988,22 @@ class Daemon(AuthJSONRPCServer):
supported currencies: LBC, USD, BTC
If an address is not provided a new one will be
automatically generated. Default fee is zero.
--title=<title> : title of the publication
--description=<description> : description of the publication
--author=<author> : author of the publication
--language=<language> : language of the publication
--license=<license> : publication license
--license_url=<license_url> : publication license url
--thumbnail=<thumbnail> : thumbnail url
--preview=<preview> : preview url
--nsfw=<nsfw> : title of the publication
--sources=<sources> : {'lbry_sd_hash':sd_hash} specifies sd hash of file
--channel_name=<channel_name> : name of the publisher channel name in the wallet
--channel_id=<channel_id> : claim id of the publisher channel, does not check
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--author=<author> : (str) author of the publication
--language=<language> : (str) language of the publication
--license=<license> : (str) publication license
--license_url=<license_url> : (str) publication license url
--thumbnail=<thumbnail> : (str) thumbnail url
--preview=<preview> : (str) preview url
--nsfw=<nsfw> : (bool) title of the publication
--sources=<sources> : (str) {'lbry_sd_hash': sd_hash} specifies sd hash of file
--channel_name=<channel_name> : (str) name of the publisher channel name in the wallet
--channel_id=<channel_id> : (str) claim id of the publisher channel, does not check
for channel claim being in the wallet. This allows
publishing to a channel where only the certificate
private key is in the wallet.
--claim_address=<claim_address> : address where the claim is sent to, if not specified
--claim_address=<claim_address> : (str) address where the claim is sent to, if not specified
new address wil automatically be created
Returns:
@ -2106,7 +2151,12 @@ class Daemon(AuthJSONRPCServer):
claim_abandon [<claim_id> | --claim_id=<claim_id>]
[<txid> | --txid=<txid>] [<nout> | --nout=<nout>]
Return:
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
Returns:
(dict) Dictionary containing result of the claim
{
txid : (str) txid of resulting transaction
@ -2134,7 +2184,12 @@ class Daemon(AuthJSONRPCServer):
claim_new_support (<name> | --name=<name>) (<claim_id> | --claim_id=<claim_id>)
(<amount> | --amount=<amount>)
Return:
Options:
--name=<name> : (str) name of the claim to support
--claim_id=<claim_id> : (str) claim_id of the claim to support
--amount=<amount> : (float) amount of support
Returns:
(dict) Dictionary containing result of the claim
{
txid : (str) txid of resulting support claim
@ -2156,7 +2211,11 @@ class Daemon(AuthJSONRPCServer):
Usage:
claim_renew (<outpoint> | --outpoint=<outpoint>) | (<height> | --height=<height>)
Return:
Options:
--outpoint=<outpoint> : (str) outpoint of the claim to renew
--height=<height> : (str) update claims expiring before or at this block height
Returns:
(dict) Dictionary where key is the the original claim's outpoint and
value is the result of the renewal
{
@ -2198,8 +2257,21 @@ class Daemon(AuthJSONRPCServer):
[<amount> | --amount=<amount>]
Options:
<amount> : Amount of credits to claim name for, defaults to the current amount
on the claim
--claim_id=<claim_id> : (str) claim_id to send
--address=<address> : (str) address to send the claim to
--amount<amount> : (int) Amount of credits to claim name for, defaults to the current amount
on the claim
Returns:
(dict) Dictionary containing result of the claim
{
'tx' : (str) hex encoded transaction
'txid' : (str) txid of resulting claim
'nout' : (int) nout of the resulting claim
'fee' : (float) fee paid for the claim transaction
'claim_id' : (str) claim ID of the resulting claim
}
"""
result = yield self.session.wallet.send_claim_to_address(claim_id, address, amount)
response = yield self._render_response(result)
@ -2214,7 +2286,10 @@ class Daemon(AuthJSONRPCServer):
Usage:
claim_list_mine
Returns
Options:
None
Returns:
(list) List of name claims owned by user
[
{
@ -2249,7 +2324,10 @@ class Daemon(AuthJSONRPCServer):
Usage:
claim_list (<name> | --name=<name>)
Returns
Options:
--name=<name> : (str) name of the claim to list info about
Returns:
(dict) State of claims assigned for the name
{
'claims': (list) list of claims for the name
@ -2286,9 +2364,11 @@ class Daemon(AuthJSONRPCServer):
[--page_size=<page_size>]
Options:
--page=<page> : which page of results to return where page 1 is the first
page, defaults to no pages
--page_size=<page_size> : number of results in a page, default of 10
--uri=<uri> : (str) uri of the channel
--uris=<uris> : (list) uris of the channel
--page=<page> : (int) which page of results to return where page 1 is the first
page, defaults to no pages
--page_size=<page_size> : (int) number of results in a page, default of 10
Returns:
{
@ -2371,6 +2451,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
transaction_list
Options:
None
Returns:
(list) List of transactions
@ -2429,6 +2512,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
transaction_show (<txid> | --txid=<txid>)
Options:
--txid=<txid> : (str) txid of the transaction
Returns:
(dict) JSON formatted transaction
"""
@ -2445,6 +2531,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
wallet_is_address_mine (<address> | --address=<address>)
Options:
--address=<address> : (str) address to check
Returns:
(bool) true, if address is associated with current wallet
"""
@ -2461,6 +2550,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
wallet_public_key (<address> | --address=<address>)
Options:
--address=<address> : (str) address for which to get the public key
Returns:
(list) list of public keys associated with address.
Could contain more than one public key if multisig.
@ -2479,6 +2571,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
wallet_list
Options:
None
Returns:
List of wallet addresses
"""
@ -2495,6 +2590,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
wallet_new_address
Options:
None
Returns:
(str) New wallet address in base58
"""
@ -2517,6 +2615,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
wallet_unused_address
Options:
None
Returns:
(str) Unused wallet address in base58
"""
@ -2540,6 +2641,10 @@ class Daemon(AuthJSONRPCServer):
Usage:
send_amount_to_address (<amount> | --amount=<amount>) (<address> | --address=<address>)
Options:
--amount=<amount> : (float) amount to send
--address=<address> : (str) address to send credits to
Returns:
(bool) true if payment successfully scheduled
"""
@ -2568,7 +2673,12 @@ class Daemon(AuthJSONRPCServer):
wallet_send (<amount> | --amount=<amount>)
((<address> | --address=<address>) | (<claim_id> | --claim_id=<claim_id>))
Return:
Options:
--amount=<amount> : (float) amount of credit to send
--address=<address> : (str) address to send credits to
--claim_id=<claim_id> : (float) claim_id of the claim to send to tip to
Returns:
If sending to an address:
(bool) true if payment successfully scheduled
@ -2612,6 +2722,11 @@ class Daemon(AuthJSONRPCServer):
(<num_addresses> | --num_addresses=<num_addresses>)
(<amount> | --amount=<amount>)
Options:
--no_broadcast : (bool) whether to broadcast or not
--num_addresses=<num_addresses> : (int) num of addresses to create
--amount=<amount> : (float) initial amount in each address
Returns:
(dict) the resulting transaction
"""
@ -2635,6 +2750,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
utxo_list
Options:
None
Returns:
(list) List of unspent transaction outputs (UTXOs)
[
@ -2671,8 +2789,8 @@ class Daemon(AuthJSONRPCServer):
block_show (<blockhash> | --blockhash=<blockhash>) | (<height> | --height=<height>)
Options:
<blockhash>, --blockhash=<blockhash> : hash of the block to look up
<height>, --height=<height> : height of the block to look up
--blockhash=<blockhash> : (str) hash of the block to look up
--height=<height> : (int) height of the block to look up
Returns:
(dict) Requested block
@ -2701,17 +2819,18 @@ class Daemon(AuthJSONRPCServer):
[--encoding=<encoding>] [--payment_rate_manager=<payment_rate_manager>]
Options:
--timeout=<timeout> : timeout in number of seconds
--encoding=<encoding> : by default no attempt at decoding is made,
can be set to one of the
--blob_hash=<blob_hash> : (str) blob hash of the blob to get
--timeout=<timeout> : (int) timeout in number of seconds
--encoding=<encoding> : (str) by default no attempt at decoding
is made, can be set to one of the
following decoders:
'json'
--payment_rate_manager=<payment_rate_manager> : if not given the default payment rate
--payment_rate_manager=<payment_rate_manager> : (str) if not given the default payment rate
manager will be used.
supported alternative rate managers:
'only-free'
Returns
Returns:
(str) Success/Fail message or (dict) decoded data
"""
@ -2742,6 +2861,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
blob_delete (<blob_hash> | --blob_hash=<blob_hash)
Options:
--blob_hash=<blob_hash> : (str) blob hash of the blob to delete
Returns:
(str) Success/fail message
"""
@ -2766,7 +2888,8 @@ class Daemon(AuthJSONRPCServer):
peer_list (<blob_hash> | --blob_hash=<blob_hash>) [<timeout> | --timeout=<timeout>]
Options:
<timeout>, --timeout=<timeout> : peer search timeout in seconds
--blob_hash=<blob_hash> : (str) find available peers for this blob hash
--timeout=<timeout> : (int) peer search timeout in seconds
Returns:
(list) List of contacts
@ -2780,24 +2903,23 @@ class Daemon(AuthJSONRPCServer):
return d
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(announce_all="-a")
def jsonrpc_blob_announce(self, announce_all=None, blob_hash=None,
stream_hash=None, sd_hash=None):
"""
Announce blobs to the DHT
Usage:
blob_announce [-a] [<blob_hash> | --blob_hash=<blob_hash>]
blob_announce [--announce_all] [<blob_hash> | --blob_hash=<blob_hash>]
[<stream_hash> | --stream_hash=<stream_hash>]
[<sd_hash> | --sd_hash=<sd_hash>]
Options:
-a : announce all the blobs possessed by user
<blob_hash>, --blob_hash=<blob_hash> : announce a blob, specified by blob_hash
<stream_hash>, --stream_hash=<stream_hash> : announce all blobs associated with
stream_hash
<sd_hash>, --sd_hash=<sd_hash> : announce all blobs associated with
sd_hash and the sd_hash itself
--announce_all=<announce_all> : (bool) announce all the blobs possessed by user
--blob_hash=<blob_hash> : (str) announce a blob, specified by blob_hash
--stream_hash=<stream_hash> : (str) announce all blobs associated with
stream_hash
--sd_hash=<sd_hash> : (str) announce all blobs associated with
sd_hash and the sd_hash itself
Returns:
(bool) true if successful
@ -2830,6 +2952,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
blob_announce_all
Options:
None
Returns:
(str) Success/fail message
"""
@ -2846,12 +2971,12 @@ class Daemon(AuthJSONRPCServer):
[--reflector=<reflector>]
Options:
--sd_hash=<sd_hash> : get file with matching sd hash
--file_name=<file_name> : get file with matching file name in the
--sd_hash=<sd_hash> : (str) get file with matching sd hash
--file_name=<file_name> : (str) get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : get file with matching stream hash
--rowid=<rowid> : get file with matching row id
--reflector=<reflector> : reflector server, ip address or url
--stream_hash=<stream_hash> : (str) get file with matching stream hash
--rowid=<rowid> : (int) get file with matching row id
--reflector=<reflector> : (str) reflector server, ip address or url
by default choose a server from the config
Returns:
@ -2871,25 +2996,26 @@ class Daemon(AuthJSONRPCServer):
defer.returnValue(results)
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(needed="-n", finished="-f")
def jsonrpc_blob_list(self, uri=None, stream_hash=None, sd_hash=None, needed=None,
finished=None, page_size=None, page=None):
"""
Returns blob hashes. If not given filters, returns all blobs known by the blob manager
Usage:
blob_list [-n] [-f] [<uri> | --uri=<uri>] [<stream_hash> | --stream_hash=<stream_hash>]
[<sd_hash> | --sd_hash=<sd_hash>] [<page_size> | --page_size=<page_size>]
blob_list [--needed] [--finished] [<uri> | --uri=<uri>]
[<stream_hash> | --stream_hash=<stream_hash>]
[<sd_hash> | --sd_hash=<sd_hash>]
[<page_size> | --page_size=<page_size>]
[<page> | --page=<page>]
Options:
-n : only return needed blobs
-f : only return finished blobs
<uri>, --uri=<uri> : filter blobs by stream in a uri
<stream_hash>, --stream_hash=<stream_hash> : filter blobs by stream hash
<sd_hash>, --sd_hash=<sd_hash> : filter blobs by sd hash
<page_size>, --page_size=<page_size> : results page size
<page>, --page=<page> : page of results to return
--needed : (bool) only return needed blobs
--finished : (bool) only return finished blobs
--uri=<uri> : (str) filter blobs by stream in a uri
--stream_hash=<stream_hash> : (str) filter blobs by stream hash
--sd_hash=<sd_hash> : (str) filter blobs by sd hash
--page_size=<page_size> : (int) results page size
--page=<page> : (int) page of results to return
Returns:
(list) List of blob hashes
@ -2935,6 +3061,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
blob_reflect_all
Options:
None
Returns:
(bool) true if successful
"""
@ -2951,6 +3080,9 @@ class Daemon(AuthJSONRPCServer):
Usage:
routing_table_get
Options:
None
Returns:
(dict) dictionary containing routing and contact information
{
@ -3028,8 +3160,10 @@ class Daemon(AuthJSONRPCServer):
[<blob_timeout> | --blob_timeout=<blob_timeout>]
Options:
<search_timeout> : how long to search for peers for the blob in the dht
<blob_timeout> : how long to try downloading from a peer
--blob_hash=<blob_hash> : (str) check availability for this blob hash
--search_timeout=<search_timeout> : (int) how long to search for peers for the blob
in the dht
--blob_timeout=<blob_timeout> : (int) how long to try downloading from a peer
Returns:
(dict) {
@ -3051,8 +3185,9 @@ class Daemon(AuthJSONRPCServer):
[<peer_timeout> | --peer_timeout=<peer_timeout>]
Options:
<sd_timeout>, --sd_timeout=<sd_timeout> : sd blob download timeout
<peer_timeout>, --peer_timeout=<peer_timeout> : how long to look for peers
--uri=<uri> : (str) check availability for this uri
--sd_timeout=<sd_timeout> : (int) sd blob download timeout
--peer_timeout=<peer_timeout> : (int) how long to look for peers
Returns:
(float) Peers per blob / total blobs
@ -3066,12 +3201,15 @@ class Daemon(AuthJSONRPCServer):
Get stream availability for lbry uri
Usage:
stream_availability (<uri>) [<search_timeout> | --search_timeout=<search_timeout>]
stream_availability (<uri> | --uri=<uri>)
[<search_timeout> | --search_timeout=<search_timeout>]
[<blob_timeout> | --blob_timeout=<blob_timeout>]
Options:
<search_timeout> : how long to search for peers for the blob in the dht
<blob_timeout> : how long to try downloading from a peer
--uri=<uri> : (str) check availability for this uri
--search_timeout=<search_timeout> : (int) how long to search for peers for the blob
in the dht
--search_timeout=<blob_timeout> : (int) how long to try downloading from a peer
Returns:
(dict) {
@ -3160,21 +3298,22 @@ class Daemon(AuthJSONRPCServer):
defer.returnValue(response)
@defer.inlineCallbacks
@AuthJSONRPCServer.flags(a_arg='-a', b_arg='-b')
def jsonrpc_cli_test_command(self, pos_arg, pos_args=[], pos_arg2=None, pos_arg3=None,
a_arg=False, b_arg=False):
"""
This command is only for testing the CLI argument parsing
Usage:
cli_test_command [-a] [-b] (<pos_arg> | --pos_arg=<pos_arg>)
cli_test_command [--a_arg] [--b_arg] (<pos_arg> | --pos_arg=<pos_arg>)
[<pos_args>...] [--pos_arg2=<pos_arg2>]
[--pos_arg3=<pos_arg3>]
Options:
-a, --a_arg : a arg
-b, --b_arg : b arg
<pos_arg2>, --pos_arg2=<pos_arg2> : pos arg 2
<pos_arg3>, --pos_arg3=<pos_arg3> : pos arg 3
--a_arg : a arg
--b_arg : b arg
--pos_arg=<pos_arg> : pos arg
--pos_args=<pos_args> : pos args
--pos_arg2=<pos_arg2> : pos arg 2
--pos_arg3=<pos_arg3> : pos arg 3
Returns:
pos args
"""

View file

@ -52,6 +52,7 @@ class TestIntegration(unittest.TestCase):
def tearDownClass(cls):
shell_command(['lbrynet-cli', 'daemon_stop'])
def test_cli(self):
help_out,err = lbrynet_cli(['help'])
self.assertTrue(help_out)
@ -66,6 +67,7 @@ class TestIntegration(unittest.TestCase):
out = json.loads(out)
self.assertTrue(out['is_running'])
def test_cli_docopts(self):
out,err = lbrynet_cli(['cli_test_command'])
self.assertEqual('',out)
@ -92,18 +94,23 @@ class TestIntegration(unittest.TestCase):
# TODO: variable length arguments don't have guess_type() on them
self.assertEqual([1,['2','3'],None,None,False,False], out)
out,err = lbrynet_cli(['cli_test_command','1','-a'])
out = json.loads(out)
self.assertEqual([1,[],None,None,True,False], out)
out,err = lbrynet_cli(['cli_test_command','1','--a_arg'])
out = json.loads(out)
self.assertEqual([1,[],None,None,True,False], out)
out,err = lbrynet_cli(['cli_test_command','1','-a','-b'])
out,err = lbrynet_cli(['cli_test_command','1','--a_arg', '--b_arg'])
out = json.loads(out)
self.assertEqual([1,[],None,None,True,True], out)
def test_cli_docopts_with_short_args(self):
out,err = lbrynet_cli(['cli_test_command','1','-a'])
self.assertRaises(ValueError, json.loads, out)
out,err = lbrynet_cli(['cli_test_command','1','-a','-b'])
self.assertRaises(ValueError, json.loads, out)
def test_status(self):
out = lbrynet.status()
self.assertTrue(out['is_running'])

View file

@ -3,8 +3,9 @@ repo_url: https://github.com/lbryio/lbry
pages:
- "API": index.md
- "CLI": cli.md
theme: material
site_dir: docs_build
theme: material
site_dir: docs
docs_dir: docs_build
google_analytics:
- 'UA-60403362-1'
- 'auto'

View file

@ -1,68 +0,0 @@
# -*- coding: utf-8 -*-
# Generate docs: python gen_api_docs.py
# See docs: pip install mkdocs; mkdocs serve
# Push docs: mkdocs gh-deploy
import inspect
import os.path as op
import re
import sys
from lbrynet.daemon.Daemon import Daemon
def _name(obj):
if hasattr(obj, '__name__'):
return obj.__name__
elif inspect.isdatadescriptor(obj):
return obj.fget.__name__
def _anchor(name):
anchor = name.lower().replace(' ', '-')
anchor = re.sub(r'[^\w\- ]', '', anchor)
return anchor
_docstring_header_pattern = re.compile(r'^([^\n]+)\n[\-\=]{3,}$', flags=re.MULTILINE)
_docstring_parameters_pattern = re.compile(r'^([^ \n]+) \: ([^\n]+)$', flags=re.MULTILINE)
def _replace_docstring_header(paragraph):
"""Process NumPy-like function docstrings."""
# Replace Markdown headers in docstrings with light headers in bold.
paragraph = re.sub(_docstring_header_pattern, r'*\1*', paragraph)
paragraph = re.sub(_docstring_parameters_pattern, r'\n* `\1` (\2)\n', paragraph)
return paragraph
def _doc(obj):
docstr = (inspect.getdoc(obj) or '').strip()
return _replace_docstring_header(docstr)
def _link(name, anchor=None):
return "[{name}](#{anchor})".format(name=name, anchor=anchor or _anchor(name))
def main():
curdir = op.dirname(op.realpath(__file__))
cli_doc_path = op.realpath(op.join(curdir, '..', 'docs', 'cli.md'))
# toc = ''
doc = ''
# Table of contents
for method_name in sorted(Daemon.callable_methods.keys()):
method = Daemon.callable_methods[method_name]
# toc += '* ' + _link(method_name, _anchor(method_name)) + "\n"
doc += '## ' + method_name + "\n\n```text\n" + _doc(method) + "\n```\n\n"
text = "# LBRY Command Line Documentation\n\n" + doc
with open(cli_doc_path, 'w+') as f:
f.write(text)
if __name__ == '__main__':
sys.exit(main())

197
scripts/gen_docs.py Normal file
View file

@ -0,0 +1,197 @@
# -*- coding: utf-8 -*-
# Generate docs: python gen_api_docs.py
# See docs: pip install mkdocs; mkdocs serve
# Push docs: mkdocs build
import re
import inspect
import subprocess
import os
from lbrynet.daemon.Daemon import Daemon
try:
import mkdocs
except ImportError:
raise ImportError("mkdocs is not installed")
try:
from tabulate import tabulate
except ImportError:
raise ImportError("tabulate is not installed")
INDENT = " "
REQD_CMD_REGEX = r"\(.*?=<(?P<reqd>.*?)>\)"
OPT_CMD_REGEX = r"\[.*?=<(?P<opt>.*?)>\]"
CMD_REGEX = r"--.*?(?P<cmd>.*?)[=,\s,<]"
DOCS_DIR = "docs_build"
def _cli_tabulate_options(_options_docstr, method):
_option_list = []
for line in _options_docstr.splitlines():
if (line.strip().startswith("--")):
# separates command name and description
parts = line.split(":", 1)
# separates command type(in brackets) and description
new_parts = parts[1].lstrip().split(" ", 1)
else:
parts = [line]
# len will be 2 when there's cmd name and description
if len(parts) == 2:
_option_list.append([parts[0], ":", new_parts[0], new_parts[1]])
# len will be 1 when there's continuation of multiline description in the next line
# check `blob_announce`'s `stream_hash` command
elif len(parts) == 1:
_option_list.append([None, None, None, parts[0]])
else:
print "Error: Ill formatted doc string for {}".format(method)
print "Error causing line: {}".format(line)
# tabulate to make the options look pretty
_options_docstr_no_indent = tabulate(_option_list, missingval="", tablefmt="plain")
# Indent the options properly
_options_docstr = ""
for line in _options_docstr_no_indent.splitlines():
_options_docstr += INDENT + line + '\n'
return _options_docstr
def _api_tabulate_options(_options_docstr, method, reqd_matches, opt_matches):
_option_list = []
for line in _options_docstr.splitlines():
if (line.strip().startswith("--")):
# separates command name and description
parts = line.split(":", 1)
# checks whether the command is optional or required
# and remove the cli type formatting and convert to
# api style formatitng
match = re.findall(CMD_REGEX, parts[0])
if match[0] not in reqd_matches:
parts[0] = "'" + match[0] + "'"
else:
parts[0] = "'" + match[0] + "' (required)"
# separates command type(in brackets) and description
new_parts = parts[1].lstrip().split(" ", 1)
else:
parts = [line]
# len will be 2 when there's cmd name and description
if len(parts) == 2:
_option_list.append([parts[0], ":", new_parts[0], new_parts[1]])
# len will be 1 when there's continuation of multiline description in the next line
# check `blob_announce`'s `stream_hash` command
elif len(parts) == 1:
_option_list.append([None, None, None, parts[0]])
else:
print "Error: Ill formatted doc string for {}".format(method)
print "Error causing line: {}".format(line)
# tabulate to make the options look pretty
_options_docstr_no_indent = tabulate(_option_list, missingval="", tablefmt="plain")
# tabulate to make the options look pretty
_options_docstr = ""
for line in _options_docstr_no_indent.splitlines():
_options_docstr += INDENT + line + '\n'
return _options_docstr
def _cli_doc(obj):
docstr = (inspect.getdoc(obj) or '').strip()
try:
_usage_docstr, _docstr_after_options = docstr.split("Options:", 1)
_options_docstr, _returns_docstr = _docstr_after_options.split("Returns:", 1)
except(ValueError):
print "Error: Ill formatted doc string for {}".format(obj)
print "Please ensure that the docstring has all the three headings i.e. \"Usage:\""
print "\"Options:\" and \"Returns:\" exactly as specified, including the colon"
return "Error!"
try:
_options_docstr = _cli_tabulate_options(_options_docstr.strip(), obj)
except Exception as e:
print "Please make sure that the individual options are properly formatted"
print "It should be strictly of the format:"
print "--command_name=<command_name> : (type) desc"
print e.message
docstr = _usage_docstr + \
"\nOptions:\n" + \
_options_docstr + \
"\nReturns:" + \
_returns_docstr
return docstr
def _api_doc(obj):
docstr = (inspect.getdoc(obj) or '').strip()
try:
_desc, _docstr_after_desc = docstr.split("Usage:", 1)
_usage_docstr, _docstr_after_options = _docstr_after_desc.split("Options:", 1)
_options_docstr, _returns_docstr = _docstr_after_options.split("Returns:", 1)
except(ValueError):
print "Error: Ill formatted doc string for {}".format(obj)
print "Please ensure that the docstring has all the three headings i.e. \"Usage:\""
print "\"Options:\" and \"Returns:\" exactly as specified, including the colon"
return "Error!"
opt_matches = re.findall(OPT_CMD_REGEX, _usage_docstr)
reqd_matches = re.findall(REQD_CMD_REGEX, _usage_docstr)
try:
_options_docstr = _api_tabulate_options(_options_docstr.strip(), obj, reqd_matches, opt_matches)
except Exception as e:
print "Please make sure that the individual options are properly formatted"
print "It should be strictly of the format:"
print "--command_name=<command_name> : (type) desc"
print e.message
docstr = _desc + \
"Args:\n" + \
_options_docstr + \
"\nReturns:" + \
_returns_docstr
return docstr
def main():
curdir = os.path.dirname(os.path.realpath(__file__))
api_doc_path = os.path.realpath(os.path.join(curdir, '..', DOCS_DIR, 'index.md'))
cli_doc_path = os.path.realpath(os.path.join(curdir, '..', DOCS_DIR, 'cli.md'))
_api_docs = ''
_cli_docs = ''
for method_name in sorted(Daemon.callable_methods.keys()):
method = Daemon.callable_methods[method_name]
_api_docs += '## ' + method_name + "\n\n```text\n" + _api_doc(method) + "\n```\n\n"
_cli_docs += '## ' + method_name + "\n\n```text\n" + _cli_doc(method) + "\n```\n\n"
_api_docs = "# LBRY JSON-RPC API Documentation\n\n" + _api_docs
with open(api_doc_path, 'w+') as f:
f.write(_api_docs)
_cli_docs = "# LBRY JSON-RPC API Documentation\n\n" + _cli_docs
with open(cli_doc_path, 'w+') as f:
f.write(_cli_docs)
if __name__ == '__main__':
cwd = os.path.dirname(os.path.realpath(__file__))
cwd = os.path.realpath(os.path.join(cwd, ".."))
directory = os.path.join(cwd, "docs_build")
if not os.path.exists(directory):
os.makedirs(directory)
proc = subprocess.Popen("exec mkdocs build", cwd=cwd, shell=True)
proc.kill()