Unify paginated endpoints logic
This commit is contained in:
parent
c61fae05d6
commit
bd1df09098
3 changed files with 62 additions and 39 deletions
|
@ -111,6 +111,7 @@ CONNECTION_MESSAGES = {
|
|||
|
||||
SHORT_ID_LEN = 20
|
||||
MAX_UPDATE_FEE_ESTIMATE = 0.3
|
||||
DEFAULT_PAGE_SIZE = 50
|
||||
|
||||
|
||||
def encode_pagination_doc(items):
|
||||
|
@ -124,20 +125,31 @@ def encode_pagination_doc(items):
|
|||
|
||||
|
||||
async def maybe_paginate(get_records: Callable, get_record_count: Callable,
|
||||
page: Optional[int], page_size: Optional[int], **constraints):
|
||||
if None not in (page, page_size):
|
||||
page: Optional[int], page_size: Optional[int],
|
||||
no_totals: Optional[bool] = None, **constraints):
|
||||
if page is None and page_size is None:
|
||||
return await get_records(**constraints)
|
||||
if no_totals is not None:
|
||||
constraints["no_totals"] = no_totals
|
||||
if page is None:
|
||||
page = 1
|
||||
if page_size is None or page_size > DEFAULT_PAGE_SIZE:
|
||||
page_size = DEFAULT_PAGE_SIZE
|
||||
constraints.update({
|
||||
"offset": page_size * (page - 1),
|
||||
"limit": page_size
|
||||
})
|
||||
total_items = await get_record_count(**constraints)
|
||||
return {
|
||||
result = {
|
||||
"items": await get_records(**constraints),
|
||||
"total_pages": int((total_items + (page_size - 1)) / page_size),
|
||||
"total_items": total_items,
|
||||
"page": page, "page_size": page_size
|
||||
}
|
||||
return await get_records(**constraints)
|
||||
if not no_totals:
|
||||
total_items = await get_record_count(**constraints)
|
||||
result.update({
|
||||
"total_pages": (total_items + (page_size - 1)) // page_size,
|
||||
"total_items": total_items,
|
||||
})
|
||||
return result
|
||||
|
||||
|
||||
def sort_claim_results(claims):
|
||||
|
@ -1943,7 +1955,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
return maybe_paginate(claims, claim_count, page, page_size)
|
||||
|
||||
@requires(WALLET_COMPONENT)
|
||||
async def jsonrpc_claim_search(self, **kwargs):
|
||||
def jsonrpc_claim_search(self, page=None, page_size=None, no_totals=None, **kwargs):
|
||||
"""
|
||||
Search for stream and channel claims on the blockchain.
|
||||
|
||||
|
@ -2068,18 +2080,19 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
Returns: {Paginated[Output]}
|
||||
"""
|
||||
async def claims(**kwargs):
|
||||
return (await self.ledger.claim_search(**kwargs))[0]
|
||||
async def claim_count(**kwargs):
|
||||
return (await self.ledger.claim_search(**kwargs))[2]
|
||||
|
||||
if kwargs.pop('valid_channel_signature', False):
|
||||
kwargs['signature_valid'] = 1
|
||||
if kwargs.pop('invalid_channel_signature', False):
|
||||
kwargs['signature_valid'] = 0
|
||||
page_num, page_size = abs(kwargs.pop('page', 1)), min(abs(kwargs.pop('page_size', 10)), 50)
|
||||
kwargs.update({'offset': page_size * (page_num - 1), 'limit': page_size})
|
||||
txos, offset, total = await self.ledger.claim_search(**kwargs)
|
||||
result = {"items": txos, "page": page_num, "page_size": page_size}
|
||||
if not kwargs.pop('no_totals', False):
|
||||
result['total_pages'] = int((total + (page_size - 1)) / page_size)
|
||||
result['total_items'] = total
|
||||
return result
|
||||
return maybe_paginate(
|
||||
claims, claim_count,
|
||||
page, page_size, no_totals, **kwargs
|
||||
)
|
||||
|
||||
CHANNEL_DOC = """
|
||||
Create, update, abandon and list your channel claims.
|
||||
|
|
|
@ -276,7 +276,7 @@ class CommandTestCase(IntegrationTestCase):
|
|||
return await self.out(self.daemon.jsonrpc_resolve(uri))
|
||||
|
||||
async def claim_search(self, **kwargs):
|
||||
return (await self.out(self.daemon.jsonrpc_claim_search(**kwargs)))['items']
|
||||
return await self.out(self.daemon.jsonrpc_claim_search(**kwargs))
|
||||
|
||||
@staticmethod
|
||||
def get_claim_id(tx):
|
||||
|
|
|
@ -7,6 +7,7 @@ from urllib.request import urlopen
|
|||
|
||||
from torba.client.errors import InsufficientFundsError
|
||||
|
||||
from lbry.extras.daemon.Daemon import DEFAULT_PAGE_SIZE
|
||||
from lbry.testcase import CommandTestCase
|
||||
from lbry.wallet.transaction import Transaction
|
||||
|
||||
|
@ -42,20 +43,20 @@ class ClaimSearchCommand(ClaimTestCase):
|
|||
self.channel = await self.channel_create('@abc', '1.0')
|
||||
self.channel_id = self.get_claim_id(self.channel)
|
||||
|
||||
async def create_lots_of_streams(self):
|
||||
async def create_lots_of_streams(self, claims=4, blocks=3):
|
||||
tx = await self.daemon.jsonrpc_account_fund(None, None, '0.001', outputs=100, broadcast=True)
|
||||
await self.confirm_tx(tx.id)
|
||||
# 4 claims per block, 3 blocks. Sorted by height (descending) then claim name (ascending).
|
||||
# 4 claims per block, 3 blocks (by default). Sorted by height (descending) then claim name (ascending).
|
||||
self.streams = []
|
||||
for j in range(3):
|
||||
for j in range(blocks):
|
||||
same_height_claims = []
|
||||
for k in range(3):
|
||||
for k in range(claims - 1):
|
||||
claim_tx = await self.stream_create(
|
||||
f'c{j}-{k}', '0.000001', channel_id=self.channel_id, confirm=False)
|
||||
same_height_claims.append(claim_tx['outputs'][0]['name'])
|
||||
await self.on_transaction_dict(claim_tx)
|
||||
claim_tx = await self.stream_create(
|
||||
f'c{j}-4', '0.000001', channel_id=self.channel_id, confirm=True)
|
||||
f'c{j}-{claims - 1}', '0.000001', channel_id=self.channel_id, confirm=True)
|
||||
same_height_claims.append(claim_tx['outputs'][0]['name'])
|
||||
self.streams = same_height_claims + self.streams
|
||||
|
||||
|
@ -138,28 +139,37 @@ class ClaimSearchCommand(ClaimTestCase):
|
|||
|
||||
async def test_pagination(self):
|
||||
await self.create_channel()
|
||||
await self.create_lots_of_streams()
|
||||
await self.create_lots_of_streams(10, 10)
|
||||
|
||||
page = await self.claim_search(page_size=20, channel='@abc', order_by=['height', '^name'])
|
||||
page_claim_ids = [item['name'] for item in page]
|
||||
self.assertEqual(page_claim_ids, self.streams)
|
||||
page_claim_ids = [item['name'] for item in page['items']]
|
||||
self.assertEqual(page_claim_ids, self.streams[:20])
|
||||
|
||||
page = await self.claim_search(page_size=6, channel='@abc', order_by=['height', '^name'])
|
||||
page_claim_ids = [item['name'] for item in page]
|
||||
page_claim_ids = [item['name'] for item in page['items']]
|
||||
self.assertEqual(page_claim_ids, self.streams[:6])
|
||||
|
||||
page = await self.claim_search(page=2, page_size=6, channel='@abc', order_by=['height', '^name'])
|
||||
page_claim_ids = [item['name'] for item in page]
|
||||
self.assertEqual(page_claim_ids, self.streams[6:])
|
||||
page_claim_ids = [item['name'] for item in page['items']]
|
||||
self.assertEqual(page_claim_ids, self.streams[6:(2 * 6)])
|
||||
|
||||
out_of_bounds = await self.claim_search(page=2, page_size=20, channel='@abc')
|
||||
self.assertEqual(out_of_bounds, [])
|
||||
page = await self.claim_search(page=1, channel='@abc', order_by=['height', '^name'])
|
||||
page_claim_ids = [item['name'] for item in page['items']]
|
||||
self.assertEqual(page_claim_ids, self.streams[:DEFAULT_PAGE_SIZE])
|
||||
|
||||
results = await self.daemon.jsonrpc_claim_search()
|
||||
self.assertEqual(results['total_pages'], 2)
|
||||
self.assertEqual(results['total_items'], 13)
|
||||
page = await self.claim_search(page=2, channel='@abc', order_by=['height', '^name'])
|
||||
page_claim_ids = [item['name'] for item in page['items']]
|
||||
self.assertEqual(page_claim_ids, self.streams[DEFAULT_PAGE_SIZE:(2 * DEFAULT_PAGE_SIZE)])
|
||||
|
||||
results = await self.daemon.jsonrpc_claim_search(no_totals=True)
|
||||
out_of_bounds = await self.claim_search(page=20, page_size=20, channel='@abc')
|
||||
self.assertEqual(out_of_bounds['items'], [])
|
||||
|
||||
total_claims = 10 * 10 + 1
|
||||
results = await self.claim_search(page=1)
|
||||
self.assertEqual(results['total_pages'], (total_claims + DEFAULT_PAGE_SIZE - 1) // DEFAULT_PAGE_SIZE)
|
||||
self.assertEqual(results['total_items'], total_claims)
|
||||
|
||||
results = await self.claim_search(page=1, no_totals=True)
|
||||
self.assertNotIn('total_pages', results)
|
||||
self.assertNotIn('total_items', results)
|
||||
|
||||
|
|
Loading…
Reference in a new issue