forked from LBRYCommunity/lbry-sdk
Merge pull request #3036 from lbryio/fwss_fix_parser
fix doc generator for docopt edge cases
This commit is contained in:
commit
e1c33dccab
4 changed files with 462 additions and 505 deletions
|
@ -154,14 +154,14 @@ def claim_kwargs(
|
||||||
# "COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
|
# "COUNTRY:STATE:CITY:CODE:LATITUDE:LONGITUDE"
|
||||||
# making sure to include colon for blank values, for
|
# making sure to include colon for blank values, for
|
||||||
# example to provide only the city:
|
# example to provide only the city:
|
||||||
# ... --locations="::Manchester"
|
# ...--locations="::Manchester"
|
||||||
# with all values set:
|
# with all values set:
|
||||||
# ... --locations="US:NH:Manchester:03101:42.990605:-71.460989"
|
# ...--locations="US:NH:Manchester:03101:42.990605:-71.460989"
|
||||||
# optionally, you can just pass the "LATITUDE:LONGITUDE":
|
# optionally, you can just pass the "LATITUDE:LONGITUDE":
|
||||||
# ... --locations="42.990605:-71.460989"
|
# ...--locations="42.990605:-71.460989"
|
||||||
# finally, you can also pass JSON string of dictionary
|
# finally, you can also pass JSON string of dictionary
|
||||||
# on the command line as you would via JSON RPC
|
# on the command line as you would via JSON RPC
|
||||||
# ... --locations="{'country': 'US', 'state': 'NH'}"
|
# ...--locations="{'country': 'US', 'state': 'NH'}"
|
||||||
account_id: str = None, # account to hold the claim
|
account_id: str = None, # account to hold the claim
|
||||||
claim_address: str = None, # specific address where the claim is held, if not specified
|
claim_address: str = None, # specific address where the claim is held, if not specified
|
||||||
# it will be determined automatically from the account
|
# it will be determined automatically from the account
|
||||||
|
@ -293,10 +293,10 @@ def signed_filter_kwargs(
|
||||||
# see --channel_id if you need to filter by
|
# see --channel_id if you need to filter by
|
||||||
# multiple channels at the same time,
|
# multiple channels at the same time,
|
||||||
# includes results with invalid signatures,
|
# includes results with invalid signatures,
|
||||||
# use in conjunction with --valid_channel_signature
|
# use in conjunction with "--valid_channel_signature"
|
||||||
channel_id: StrOrList = None, # signed by any of these channels including invalid signatures,
|
channel_id: StrOrList = None, # signed by any of these channels including invalid signatures,
|
||||||
# implies --has_channel_signature,
|
# implies --has_channel_signature,
|
||||||
# use in conjunction with --valid_channel_signature
|
# use in conjunction with "--valid_channel_signature"
|
||||||
not_channel_id: StrOrList = None, # exclude everything signed by any of these channels
|
not_channel_id: StrOrList = None, # exclude everything signed by any of these channels
|
||||||
has_channel_signature=False, # results with a channel signature (valid or invalid)
|
has_channel_signature=False, # results with a channel signature (valid or invalid)
|
||||||
valid_channel_signature=False, # results with a valid channel signature or no signature,
|
valid_channel_signature=False, # results with a valid channel signature or no signature,
|
||||||
|
@ -352,13 +352,13 @@ def txo_filter_kwargs(
|
||||||
is_not_spent=False, # only show not spent txos
|
is_not_spent=False, # only show not spent txos
|
||||||
is_my_input_or_output=False, # txos which have your inputs or your outputs,
|
is_my_input_or_output=False, # txos which have your inputs or your outputs,
|
||||||
# if using this flag the other related flags
|
# if using this flag the other related flags
|
||||||
# are ignored (--is_my_output, --is_my_input, etc)
|
# are ignored. ("--is_my_output", "--is_my_input", etc)
|
||||||
is_my_output=False, # show outputs controlled by you
|
is_my_output=False, # show outputs controlled by you
|
||||||
is_not_my_output=False, # show outputs not controlled by you
|
is_not_my_output=False, # show outputs not controlled by you
|
||||||
is_my_input=False, # show outputs created by you
|
is_my_input=False, # show outputs created by you
|
||||||
is_not_my_input=False, # show outputs not created by you
|
is_not_my_input=False, # show outputs not created by you
|
||||||
exclude_internal_transfers=False, # excludes any outputs that are exactly this combination:
|
exclude_internal_transfers=False, # excludes any outputs that are exactly this combination:
|
||||||
# "--is_my_input --is_my_output --type=other"
|
# "--is_my_input" + "--is_my_output" + "--type=other"
|
||||||
# this allows to exclude "change" payments, this
|
# this allows to exclude "change" payments, this
|
||||||
# flag can be used in combination with any of the other flags
|
# flag can be used in combination with any of the other flags
|
||||||
account_id: StrOrList = None, # id(s) of the account(s) to query
|
account_id: StrOrList = None, # id(s) of the account(s) to query
|
||||||
|
@ -614,7 +614,7 @@ class API:
|
||||||
get <uri> [<file_name> | --file_name=<file_name>]
|
get <uri> [<file_name> | --file_name=<file_name>]
|
||||||
[<download_directory> | --download_directory=<download_directory>]
|
[<download_directory> | --download_directory=<download_directory>]
|
||||||
[<timeout> | --timeout=<timeout>]
|
[<timeout> | --timeout=<timeout>]
|
||||||
[--save_file=<save_file>] [--wallet_id=<wallet_id>]
|
[--save_file] [--wallet_id=<wallet_id>]
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return await self.service.get(
|
return await self.service.get(
|
||||||
|
@ -1548,7 +1548,7 @@ class API:
|
||||||
is_spent=False, # shows previous claim updates and abandons
|
is_spent=False, # shows previous claim updates and abandons
|
||||||
resolve=False, # resolves each claim to provide additional metadata
|
resolve=False, # resolves each claim to provide additional metadata
|
||||||
include_received_tips=False, # calculate the amount of tips recieved for claim outputs
|
include_received_tips=False, # calculate the amount of tips recieved for claim outputs
|
||||||
**claim_filter_and_signed_filter_and_stream_filter_and_pagination_kwargs
|
**claim_filter_and_stream_filter_and_pagination_kwargs
|
||||||
) -> Paginated[Output]: # streams and channels in wallet
|
) -> Paginated[Output]: # streams and channels in wallet
|
||||||
"""
|
"""
|
||||||
List my stream and channel claims.
|
List my stream and channel claims.
|
||||||
|
@ -1603,7 +1603,7 @@ class API:
|
||||||
# 'support_amount', 'trending_group', 'trending_mixed', 'trending_local',
|
# 'support_amount', 'trending_group', 'trending_mixed', 'trending_local',
|
||||||
# 'trending_global', 'activation_height'
|
# 'trending_global', 'activation_height'
|
||||||
protobuf=False, # protobuf encoded result
|
protobuf=False, # protobuf encoded result
|
||||||
**claim_filter_and_signed_filter_and_stream_filter_and_pagination_kwargs
|
**claim_filter_and_stream_filter_and_pagination_kwargs
|
||||||
) -> Paginated[Output]: # search results
|
) -> Paginated[Output]: # search results
|
||||||
"""
|
"""
|
||||||
Search for stream and channel claims on the blockchain.
|
Search for stream and channel claims on the blockchain.
|
||||||
|
@ -1629,7 +1629,7 @@ class API:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
claim_filter_dict, kwargs = pop_kwargs('claim_filter', claim_filter_kwargs(
|
claim_filter_dict, kwargs = pop_kwargs('claim_filter', claim_filter_kwargs(
|
||||||
**claim_filter_and_signed_filter_and_stream_filter_and_pagination_kwargs
|
**claim_filter_and_stream_filter_and_pagination_kwargs
|
||||||
))
|
))
|
||||||
pagination, kwargs = pop_kwargs('pagination', pagination_kwargs(**kwargs))
|
pagination, kwargs = pop_kwargs('pagination', pagination_kwargs(**kwargs))
|
||||||
wallet = self.wallets.get_or_default(wallet_id)
|
wallet = self.wallets.get_or_default(wallet_id)
|
||||||
|
@ -2756,7 +2756,7 @@ class API:
|
||||||
List my transaction outputs.
|
List my transaction outputs.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
txo list [--include_received_tips] [--resolve] [--order_by]
|
txo list [--include_received_tips] [--resolve] [--order_by=<order_by>]
|
||||||
{kwargs}
|
{kwargs}
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -2789,7 +2789,11 @@ class API:
|
||||||
self,
|
self,
|
||||||
batch_size=500, # number of txos to spend per transactions
|
batch_size=500, # number of txos to spend per transactions
|
||||||
include_full_tx=False, # include entire tx in output and not just the txid
|
include_full_tx=False, # include entire tx in output and not just the txid
|
||||||
**txo_filter_and_tx_kwargs
|
change_account_id: str = None, # account to send excess change (LBC)
|
||||||
|
fund_account_id: StrOrList = None, # accounts to fund the transaction
|
||||||
|
preview=False, # do not broadcast the transaction
|
||||||
|
no_wait=False, # do not wait for mempool confirmation
|
||||||
|
**txo_filter_kwargs
|
||||||
) -> List[Transaction]:
|
) -> List[Transaction]:
|
||||||
"""
|
"""
|
||||||
Spend transaction outputs, batching into multiple transactions as necessary.
|
Spend transaction outputs, batching into multiple transactions as necessary.
|
||||||
|
@ -2820,7 +2824,7 @@ class API:
|
||||||
return txs
|
return txs
|
||||||
return [{'txid': tx.id} for tx in txs]
|
return [{'txid': tx.id} for tx in txs]
|
||||||
|
|
||||||
async def txo_sum(self, **txo_filter_and_tx_kwargs) -> int: # sum of filtered outputs
|
async def txo_sum(self, **txo_filter_kwargs) -> int: # sum of filtered outputs
|
||||||
"""
|
"""
|
||||||
Sum of transaction outputs.
|
Sum of transaction outputs.
|
||||||
|
|
||||||
|
@ -2839,9 +2843,9 @@ class API:
|
||||||
self,
|
self,
|
||||||
days_back=0, # number of days back from today
|
days_back=0, # number of days back from today
|
||||||
# (not compatible with --start_day, --days_after, --end_day)
|
# (not compatible with --start_day, --days_after, --end_day)
|
||||||
start_day: str = None, # start on specific date (YYYY-MM-DD) (instead of --days_back)
|
start_day: str = None, # start on specific date (format: YYYY-MM-DD) (instead of --days_back)
|
||||||
days_after: int = None, # end number of days after --start_day (instead of --end_day)
|
days_after: int = None, # end number of days after --start_day (instead of using --end_day)
|
||||||
end_day: str = None, # end on specific date (YYYY-MM-DD) (instead of --days_after)
|
end_day: str = None, # end on specific date (format: YYYY-MM-DD) (instead of --days_after)
|
||||||
**txo_filter_and_pagination_kwargs
|
**txo_filter_and_pagination_kwargs
|
||||||
) -> List:
|
) -> List:
|
||||||
"""
|
"""
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -146,6 +146,7 @@ def parse_method(method, expanders: dict) -> dict:
|
||||||
'returns': None
|
'returns': None
|
||||||
}
|
}
|
||||||
src = inspect.getsource(method)
|
src = inspect.getsource(method)
|
||||||
|
known_names = set()
|
||||||
for tokens in produce_argument_tokens(src):
|
for tokens in produce_argument_tokens(src):
|
||||||
if tokens[0].string == '**':
|
if tokens[0].string == '**':
|
||||||
tokens.pop(0)
|
tokens.pop(0)
|
||||||
|
@ -156,12 +157,21 @@ def parse_method(method, expanders: dict) -> dict:
|
||||||
for expander_name in expander_names.split('_and_'):
|
for expander_name in expander_names.split('_and_'):
|
||||||
if expander_name not in expanders:
|
if expander_name not in expanders:
|
||||||
raise Exception(f"Expander '{expander_name}' not found, used by {d['name']}.")
|
raise Exception(f"Expander '{expander_name}' not found, used by {d['name']}.")
|
||||||
d['arguments'].extend(expanders[expander_name])
|
for expanded in expanders[expander_name]:
|
||||||
d['kwargs'].extend(expanders[expander_name])
|
if expanded['name'] in known_names:
|
||||||
|
raise Exception(
|
||||||
|
f"Duplicate argument '{expanded['name']}' in '{d['name']}'. "
|
||||||
|
f"Expander '{expander_name}' is attempting to add an argument which is "
|
||||||
|
f"already defined in the '{d['name']}' command (possibly by another expander)."
|
||||||
|
)
|
||||||
|
d['arguments'].append(expanded)
|
||||||
|
d['kwargs'].append(expanded)
|
||||||
|
known_names.add(expanded['name'])
|
||||||
else:
|
else:
|
||||||
arg = parse_argument(tokens, d['name'])
|
arg = parse_argument(tokens, d['name'])
|
||||||
if arg:
|
if arg:
|
||||||
d['arguments'].append(arg)
|
d['arguments'].append(arg)
|
||||||
|
known_names.add(arg['name'])
|
||||||
d['returns'] = parse_return(produce_return_tokens(src))
|
d['returns'] = parse_return(produce_return_tokens(src))
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
@ -207,9 +217,16 @@ def generate_options(method, indent) -> List[str]:
|
||||||
if 'default' in arg:
|
if 'default' in arg:
|
||||||
if arg['type'] != 'bool':
|
if arg['type'] != 'bool':
|
||||||
text += f" [default: {arg['default']}]"
|
text += f" [default: {arg['default']}]"
|
||||||
wrapped = textwrap.wrap(text, LINE_WIDTH-len(left))
|
wrapped = textwrap.wrap(text, LINE_WIDTH-len(left), break_long_words=False)
|
||||||
lines = [f"{left}{wrapped.pop(0)}"]
|
lines = [f"{left}{wrapped.pop(0)}"]
|
||||||
for line in wrapped:
|
for line in wrapped:
|
||||||
|
if line.strip().startswith('--'):
|
||||||
|
raise Exception(
|
||||||
|
f"Word wrapping the description for argument '{arg['name']}' in method "
|
||||||
|
f"'{method['method'].__name__}' resulted in a line which starts with '--' and this will "
|
||||||
|
f"break docopt. Try wrapping the '--' in quotes. Instead of --foo do \"--foo\". "
|
||||||
|
f"Line which caused this issue is:\n{line.strip()}"
|
||||||
|
)
|
||||||
lines.append(f"{' '*len(left)} {line}")
|
lines.append(f"{' '*len(left)} {line}")
|
||||||
options.extend(lines)
|
options.extend(lines)
|
||||||
return options
|
return options
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
from unittest import TestCase
|
from unittest import TestCase, mock
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
from lbry.service.api import Paginated, Wallet
|
|
||||||
|
from docopt import docopt, DocoptExit
|
||||||
|
|
||||||
|
from lbry.service.api import API, Paginated, Wallet, expander
|
||||||
from lbry.service.parser import (
|
from lbry.service.parser import (
|
||||||
parse_method, get_expanders, get_api_definitions,
|
parse_method, get_expanders, get_api_definitions,
|
||||||
generate_options
|
generate_options
|
||||||
|
@ -57,9 +60,55 @@ class FakeAPI:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@expander
|
||||||
|
def test_kwargs(
|
||||||
|
somevalue=1
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@expander
|
||||||
|
def another_test_kwargs(
|
||||||
|
somevalue=1,
|
||||||
|
bad_description=3, # using linebreaks makes docopt very very --angry
|
||||||
|
):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CommandWithRepeatedArgs(FakeAPI):
|
||||||
|
def thing_bad(self, **test_and_another_test_kwargs) -> Wallet:
|
||||||
|
"""bad thing"""
|
||||||
|
|
||||||
|
|
||||||
|
class CommandWithDoubleDashAtLineStart(FakeAPI):
|
||||||
|
def thing_bad(self, **another_test_kwargs):
|
||||||
|
"""bad thing"""
|
||||||
|
|
||||||
|
|
||||||
class TestParser(TestCase):
|
class TestParser(TestCase):
|
||||||
maxDiff = None
|
maxDiff = None
|
||||||
|
|
||||||
|
def test_parse_does_not_allow_duplicate_arguments(self):
|
||||||
|
with self.assertRaises(Exception) as exc:
|
||||||
|
parse_method(CommandWithRepeatedArgs.thing_bad, get_expanders())
|
||||||
|
self.assertEqual(
|
||||||
|
exc.exception.args[0],
|
||||||
|
"Duplicate argument 'somevalue' in 'thing_bad'. "
|
||||||
|
"Expander 'another_test' is attempting to add an argument which is already defined "
|
||||||
|
"in the 'thing_bad' command (possibly by another expander)."
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_parse_does_not_allow_two_dashes_at_start_of_line(self):
|
||||||
|
with self.assertRaises(Exception) as exc:
|
||||||
|
get_api_definitions(CommandWithDoubleDashAtLineStart)
|
||||||
|
self.assertEqual(
|
||||||
|
exc.exception.args[0],
|
||||||
|
"Word wrapping the description for argument 'bad_description' in method 'thing_bad' "
|
||||||
|
"resulted in a line which starts with '--' and this will break docopt. Try wrapping "
|
||||||
|
"the '--' in quotes. Instead of --foo do \"--foo\". Line which caused this issue is:"
|
||||||
|
"\n--angry [default: 3]"
|
||||||
|
)
|
||||||
|
|
||||||
def test_parse_method(self):
|
def test_parse_method(self):
|
||||||
expanders = get_expanders()
|
expanders = get_expanders()
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
@ -148,6 +197,14 @@ class TestParser(TestCase):
|
||||||
class TestGenerator(TestCase):
|
class TestGenerator(TestCase):
|
||||||
maxDiff = None
|
maxDiff = None
|
||||||
|
|
||||||
|
def test_generated_api_works_in_docopt(self):
|
||||||
|
from lbry.service.metadata import interface
|
||||||
|
for command in interface["commands"].values():
|
||||||
|
with mock.patch('sys.exit') as exit:
|
||||||
|
with self.assertRaises(DocoptExit):
|
||||||
|
docopt(command["help"], ["--help"])
|
||||||
|
self.assertTrue(exit.called)
|
||||||
|
|
||||||
def test_generate_options(self):
|
def test_generate_options(self):
|
||||||
expanders = get_expanders()
|
expanders = get_expanders()
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
|
Loading…
Reference in a new issue