2021-08-30 18:30:38 +02:00
|
|
|
import time
|
2021-01-17 09:50:49 +01:00
|
|
|
import asyncio
|
|
|
|
import struct
|
2021-03-24 09:35:31 +01:00
|
|
|
from binascii import unhexlify
|
2021-04-23 05:50:35 +02:00
|
|
|
from collections import Counter, deque
|
2021-01-19 08:37:31 +01:00
|
|
|
from decimal import Decimal
|
|
|
|
from operator import itemgetter
|
2021-03-24 09:35:31 +01:00
|
|
|
from typing import Optional, List, Iterable, Union
|
2021-01-17 09:50:49 +01:00
|
|
|
|
2021-02-12 01:45:41 +01:00
|
|
|
from elasticsearch import AsyncElasticsearch, NotFoundError, ConnectionError
|
2021-02-23 00:47:56 +01:00
|
|
|
from elasticsearch.helpers import async_streaming_bulk
|
2021-08-06 17:55:33 +02:00
|
|
|
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
|
2021-01-20 05:20:50 +01:00
|
|
|
from lbry.schema.result import Outputs, Censor
|
2021-01-19 08:37:31 +01:00
|
|
|
from lbry.schema.tags import clean_tags
|
2021-01-20 07:19:21 +01:00
|
|
|
from lbry.schema.url import URL, normalize_name
|
2021-03-02 23:58:54 +01:00
|
|
|
from lbry.utils import LRUCache
|
2021-01-19 08:37:31 +01:00
|
|
|
from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES
|
2021-03-15 20:08:04 +01:00
|
|
|
from lbry.wallet.server.db.elasticsearch.constants import INDEX_DEFAULT_SETTINGS, REPLACEMENTS, FIELDS, TEXT_FIELDS, \
|
2021-07-20 23:09:39 +02:00
|
|
|
RANGE_FIELDS, ALL_FIELDS
|
2021-02-12 01:45:41 +01:00
|
|
|
from lbry.wallet.server.util import class_logger
|
2021-05-28 20:10:35 +02:00
|
|
|
from lbry.wallet.server.db.common import ResolveResult
|
2021-01-17 09:50:49 +01:00
|
|
|
|
|
|
|
|
2021-03-11 05:41:55 +01:00
|
|
|
class ChannelResolution(str):
|
2021-03-24 09:35:31 +01:00
|
|
|
@classmethod
|
|
|
|
def lookup_error(cls, url):
|
|
|
|
return LookupError(f'Could not find channel in "{url}".')
|
2021-03-11 05:41:55 +01:00
|
|
|
|
|
|
|
|
|
|
|
class StreamResolution(str):
|
2021-03-24 09:35:31 +01:00
|
|
|
@classmethod
|
|
|
|
def lookup_error(cls, url):
|
|
|
|
return LookupError(f'Could not find claim at "{url}".')
|
2021-03-11 05:41:55 +01:00
|
|
|
|
|
|
|
|
2021-05-07 18:42:52 +02:00
|
|
|
class IndexVersionMismatch(Exception):
|
|
|
|
def __init__(self, got_version, expected_version):
|
|
|
|
self.got_version = got_version
|
|
|
|
self.expected_version = expected_version
|
|
|
|
|
|
|
|
|
2021-01-19 08:37:31 +01:00
|
|
|
class SearchIndex:
|
2021-05-07 18:42:52 +02:00
|
|
|
VERSION = 1
|
|
|
|
|
2021-10-19 22:17:43 +02:00
|
|
|
def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200):
|
2021-03-09 04:19:58 +01:00
|
|
|
self.search_timeout = search_timeout
|
|
|
|
self.sync_timeout = 600 # wont hit that 99% of the time, but can hit on a fresh import
|
|
|
|
self.search_client: Optional[AsyncElasticsearch] = None
|
2021-03-24 09:35:31 +01:00
|
|
|
self.sync_client: Optional[AsyncElasticsearch] = None
|
2021-01-19 08:37:31 +01:00
|
|
|
self.index = index_prefix + 'claims'
|
2021-02-12 01:45:41 +01:00
|
|
|
self.logger = class_logger(__name__, self.__class__.__name__)
|
2021-03-14 08:56:53 +01:00
|
|
|
self.claim_cache = LRUCache(2 ** 15)
|
|
|
|
self.search_cache = LRUCache(2 ** 17)
|
2021-03-29 19:16:49 +02:00
|
|
|
self._elastic_host = elastic_host
|
|
|
|
self._elastic_port = elastic_port
|
2021-01-17 09:50:49 +01:00
|
|
|
|
2021-05-07 18:42:52 +02:00
|
|
|
async def get_index_version(self) -> int:
|
|
|
|
try:
|
|
|
|
template = await self.sync_client.indices.get_template(self.index)
|
|
|
|
return template[self.index]['version']
|
|
|
|
except NotFoundError:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
async def set_index_version(self, version):
|
|
|
|
await self.sync_client.indices.put_template(
|
|
|
|
self.index, body={'version': version, 'index_patterns': ['ignored']}, ignore=400
|
|
|
|
)
|
|
|
|
|
2021-05-12 05:21:03 +02:00
|
|
|
async def start(self) -> bool:
|
2021-03-24 09:35:31 +01:00
|
|
|
if self.sync_client:
|
2021-05-12 05:21:03 +02:00
|
|
|
return False
|
2021-03-29 19:16:49 +02:00
|
|
|
hosts = [{'host': self._elastic_host, 'port': self._elastic_port}]
|
|
|
|
self.sync_client = AsyncElasticsearch(hosts, timeout=self.sync_timeout)
|
|
|
|
self.search_client = AsyncElasticsearch(hosts, timeout=self.search_timeout)
|
2021-02-12 01:45:41 +01:00
|
|
|
while True:
|
|
|
|
try:
|
2021-03-24 09:35:31 +01:00
|
|
|
await self.sync_client.cluster.health(wait_for_status='yellow')
|
2021-02-12 01:45:41 +01:00
|
|
|
break
|
|
|
|
except ConnectionError:
|
|
|
|
self.logger.warning("Failed to connect to Elasticsearch. Waiting for it!")
|
|
|
|
await asyncio.sleep(1)
|
2021-05-07 18:42:52 +02:00
|
|
|
|
2021-03-24 09:35:31 +01:00
|
|
|
res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400)
|
2021-05-07 18:42:52 +02:00
|
|
|
acked = res.get('acknowledged', False)
|
|
|
|
if acked:
|
|
|
|
await self.set_index_version(self.VERSION)
|
|
|
|
return acked
|
|
|
|
index_version = await self.get_index_version()
|
|
|
|
if index_version != self.VERSION:
|
|
|
|
self.logger.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION)
|
2021-05-07 22:51:19 +02:00
|
|
|
raise IndexVersionMismatch(index_version, self.VERSION)
|
2021-09-25 20:59:42 +02:00
|
|
|
await self.sync_client.indices.refresh(self.index)
|
2021-05-07 18:42:52 +02:00
|
|
|
return acked
|
2021-01-17 09:50:49 +01:00
|
|
|
|
2022-02-01 19:43:01 +01:00
|
|
|
async def stop(self):
|
|
|
|
clients = [c for c in (self.sync_client, self.search_client) if c is not None]
|
2021-03-24 09:35:31 +01:00
|
|
|
self.sync_client, self.search_client = None, None
|
2022-02-01 19:43:01 +01:00
|
|
|
if clients:
|
|
|
|
await asyncio.gather(*(client.close() for client in clients))
|
2021-01-19 08:37:31 +01:00
|
|
|
|
|
|
|
def delete_index(self):
|
2021-03-24 09:35:31 +01:00
|
|
|
return self.sync_client.indices.delete(self.index, ignore_unavailable=True)
|
2021-01-19 08:37:31 +01:00
|
|
|
|
2021-03-05 07:16:40 +01:00
|
|
|
async def _consume_claim_producer(self, claim_producer):
|
|
|
|
count = 0
|
2021-07-27 22:11:27 +02:00
|
|
|
async for op, doc in claim_producer:
|
2021-02-22 20:42:43 +01:00
|
|
|
if op == 'delete':
|
2021-08-06 20:11:28 +02:00
|
|
|
yield {
|
|
|
|
'_index': self.index,
|
|
|
|
'_op_type': 'delete',
|
|
|
|
'_id': doc
|
|
|
|
}
|
2021-02-22 20:42:43 +01:00
|
|
|
else:
|
2021-08-06 20:11:28 +02:00
|
|
|
yield {
|
|
|
|
'doc': {key: value for key, value in doc.items() if key in ALL_FIELDS},
|
|
|
|
'_id': doc['claim_id'],
|
|
|
|
'_index': self.index,
|
|
|
|
'_op_type': 'update',
|
|
|
|
'doc_as_upsert': True
|
|
|
|
}
|
2021-03-05 07:16:40 +01:00
|
|
|
count += 1
|
2021-03-05 09:47:45 +01:00
|
|
|
if count % 100 == 0:
|
2021-08-30 18:16:07 +02:00
|
|
|
self.logger.info("Indexing in progress, %d claims.", count)
|
2021-06-06 19:08:15 +02:00
|
|
|
if count:
|
|
|
|
self.logger.info("Indexing done for %d claims.", count)
|
|
|
|
else:
|
|
|
|
self.logger.debug("Indexing done for %d claims.", count)
|
2021-02-22 20:42:43 +01:00
|
|
|
|
2021-03-05 07:16:40 +01:00
|
|
|
async def claim_consumer(self, claim_producer):
|
2021-03-05 08:32:48 +01:00
|
|
|
touched = set()
|
2021-03-24 09:35:31 +01:00
|
|
|
async for ok, item in async_streaming_bulk(self.sync_client, self._consume_claim_producer(claim_producer),
|
2021-03-08 05:29:08 +01:00
|
|
|
raise_on_error=False):
|
2021-02-23 00:47:56 +01:00
|
|
|
if not ok:
|
|
|
|
self.logger.warning("indexing failed for an item: %s", item)
|
2021-03-05 08:32:48 +01:00
|
|
|
else:
|
|
|
|
item = item.popitem()[1]
|
|
|
|
touched.add(item['_id'])
|
2021-03-24 09:35:31 +01:00
|
|
|
await self.sync_client.indices.refresh(self.index)
|
2021-06-06 19:08:15 +02:00
|
|
|
self.logger.debug("Indexing done.")
|
2021-01-19 08:37:31 +01:00
|
|
|
|
2021-03-24 09:35:31 +01:00
|
|
|
def update_filter_query(self, censor_type, blockdict, channels=False):
|
2021-08-30 18:16:07 +02:00
|
|
|
blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()}
|
2021-03-24 09:35:31 +01:00
|
|
|
if channels:
|
|
|
|
update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
|
|
|
else:
|
|
|
|
update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
|
|
|
key = 'channel_id' if channels else 'claim_id'
|
|
|
|
update['script'] = {
|
2021-08-30 18:16:07 +02:00
|
|
|
"source": f"ctx._source.censor_type={censor_type}; "
|
|
|
|
f"ctx._source.censoring_channel_id=params[ctx._source.{key}];",
|
2021-03-24 09:35:31 +01:00
|
|
|
"lang": "painless",
|
|
|
|
"params": blockdict
|
|
|
|
}
|
|
|
|
return update
|
|
|
|
|
2021-09-03 06:33:40 +02:00
|
|
|
async def update_trending_score(self, params):
|
2021-08-30 18:16:07 +02:00
|
|
|
update_trending_score_script = """
|
2021-10-21 05:43:10 +02:00
|
|
|
double softenLBC(double lbc) { return (Math.pow(lbc, 1.0 / 3.0)); }
|
|
|
|
|
|
|
|
double logsumexp(double x, double y)
|
|
|
|
{
|
|
|
|
double top;
|
|
|
|
if(x > y)
|
|
|
|
top = x;
|
|
|
|
else
|
|
|
|
top = y;
|
|
|
|
double result = top + Math.log(Math.exp(x-top) + Math.exp(y-top));
|
|
|
|
return(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
double logdiffexp(double big, double small)
|
|
|
|
{
|
|
|
|
return big + Math.log(1.0 - Math.exp(small - big));
|
|
|
|
}
|
|
|
|
|
|
|
|
double squash(double x)
|
|
|
|
{
|
|
|
|
if(x < 0.0)
|
|
|
|
return -Math.log(1.0 - x);
|
|
|
|
else
|
|
|
|
return Math.log(x + 1.0);
|
|
|
|
}
|
|
|
|
|
|
|
|
double unsquash(double x)
|
|
|
|
{
|
|
|
|
if(x < 0.0)
|
|
|
|
return 1.0 - Math.exp(-x);
|
|
|
|
else
|
|
|
|
return Math.exp(x) - 1.0;
|
|
|
|
}
|
|
|
|
|
|
|
|
double log_to_squash(double x)
|
|
|
|
{
|
|
|
|
return logsumexp(x, 0.0);
|
|
|
|
}
|
|
|
|
|
|
|
|
double squash_to_log(double x)
|
|
|
|
{
|
|
|
|
//assert x > 0.0;
|
|
|
|
return logdiffexp(x, 0.0);
|
|
|
|
}
|
|
|
|
|
|
|
|
double squashed_add(double x, double y)
|
|
|
|
{
|
|
|
|
// squash(unsquash(x) + unsquash(y)) but avoiding overflow.
|
|
|
|
// Cases where the signs are the same
|
|
|
|
if (x < 0.0 && y < 0.0)
|
|
|
|
return -logsumexp(-x, logdiffexp(-y, 0.0));
|
|
|
|
if (x >= 0.0 && y >= 0.0)
|
|
|
|
return logsumexp(x, logdiffexp(y, 0.0));
|
|
|
|
// Where the signs differ
|
|
|
|
if (x >= 0.0 && y < 0.0)
|
|
|
|
if (Math.abs(x) >= Math.abs(y))
|
|
|
|
return logsumexp(0.0, logdiffexp(x, -y));
|
|
|
|
else
|
|
|
|
return -logsumexp(0.0, logdiffexp(-y, x));
|
|
|
|
if (x < 0.0 && y >= 0.0)
|
|
|
|
{
|
|
|
|
// Addition is commutative, hooray for new math
|
|
|
|
return squashed_add(y, x);
|
|
|
|
}
|
|
|
|
return 0.0;
|
|
|
|
}
|
|
|
|
|
|
|
|
double squashed_multiply(double x, double y)
|
|
|
|
{
|
|
|
|
// squash(unsquash(x)*unsquash(y)) but avoiding overflow.
|
|
|
|
int sign;
|
|
|
|
if(x*y >= 0.0)
|
|
|
|
sign = 1;
|
|
|
|
else
|
|
|
|
sign = -1;
|
|
|
|
return sign*logsumexp(squash_to_log(Math.abs(x))
|
|
|
|
+ squash_to_log(Math.abs(y)), 0.0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Squashed inflated units
|
2021-09-07 20:42:25 +02:00
|
|
|
double inflateUnits(int height) {
|
2021-10-21 05:43:10 +02:00
|
|
|
double timescale = 576.0; // Half life of 400 = e-folding time of a day
|
|
|
|
// by coincidence, so may as well go with it
|
|
|
|
return log_to_squash(height / timescale);
|
2021-09-07 20:42:25 +02:00
|
|
|
}
|
2021-10-21 05:43:10 +02:00
|
|
|
|
2021-09-03 06:33:40 +02:00
|
|
|
double spikePower(double newAmount) {
|
|
|
|
if (newAmount < 50.0) {
|
2021-10-21 05:43:10 +02:00
|
|
|
return(0.5);
|
2021-09-03 06:33:40 +02:00
|
|
|
} else if (newAmount < 85.0) {
|
2021-10-21 05:43:10 +02:00
|
|
|
return(newAmount / 100.0);
|
2021-09-03 06:33:40 +02:00
|
|
|
} else {
|
2021-10-21 05:43:10 +02:00
|
|
|
return(0.85);
|
2021-09-03 06:33:40 +02:00
|
|
|
}
|
2021-08-30 18:16:07 +02:00
|
|
|
}
|
2021-10-21 05:43:10 +02:00
|
|
|
|
2021-09-03 06:33:40 +02:00
|
|
|
double spikeMass(double oldAmount, double newAmount) {
|
|
|
|
double softenedChange = softenLBC(Math.abs(newAmount - oldAmount));
|
|
|
|
double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount));
|
2021-09-07 20:42:25 +02:00
|
|
|
double power = spikePower(newAmount);
|
2021-09-03 06:33:40 +02:00
|
|
|
if (oldAmount > newAmount) {
|
2021-09-07 20:42:25 +02:00
|
|
|
-1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
2021-09-03 06:33:40 +02:00
|
|
|
} else {
|
2021-09-07 20:42:25 +02:00
|
|
|
Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
2021-09-03 06:33:40 +02:00
|
|
|
}
|
2021-08-30 18:16:07 +02:00
|
|
|
}
|
2021-09-03 06:33:40 +02:00
|
|
|
for (i in params.src.changes) {
|
2021-09-08 01:27:45 +02:00
|
|
|
double units = inflateUnits(i.height);
|
2021-10-21 05:43:10 +02:00
|
|
|
if (ctx._source.trending_score == null) {
|
|
|
|
ctx._source.trending_score = 0.0;
|
2021-09-03 06:33:40 +02:00
|
|
|
}
|
2021-10-21 05:43:10 +02:00
|
|
|
double bigSpike = squashed_multiply(units, squash(spikeMass(i.prev_amount, i.new_amount)));
|
|
|
|
ctx._source.trending_score = squashed_add(ctx._source.trending_score, bigSpike);
|
2021-09-03 06:33:40 +02:00
|
|
|
}
|
|
|
|
"""
|
2021-08-30 18:30:38 +02:00
|
|
|
start = time.perf_counter()
|
2021-09-03 06:33:40 +02:00
|
|
|
|
|
|
|
def producer():
|
|
|
|
for claim_id, claim_updates in params.items():
|
|
|
|
yield {
|
|
|
|
'_id': claim_id,
|
|
|
|
'_index': self.index,
|
|
|
|
'_op_type': 'update',
|
|
|
|
'script': {
|
|
|
|
'lang': 'painless',
|
|
|
|
'source': update_trending_score_script,
|
|
|
|
'params': {'src': {
|
|
|
|
'changes': [
|
|
|
|
{
|
|
|
|
'height': p.height,
|
2021-10-21 05:43:10 +02:00
|
|
|
'prev_amount': p.prev_amount / 1E8,
|
|
|
|
'new_amount': p.new_amount / 1E8,
|
2021-09-03 06:33:40 +02:00
|
|
|
} for p in claim_updates
|
|
|
|
]
|
|
|
|
}}
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if not params:
|
|
|
|
return
|
|
|
|
async for ok, item in async_streaming_bulk(self.sync_client, producer(), raise_on_error=False):
|
|
|
|
if not ok:
|
|
|
|
self.logger.warning("updating trending failed for an item: %s", item)
|
|
|
|
await self.sync_client.indices.refresh(self.index)
|
2021-09-13 16:50:02 +02:00
|
|
|
self.logger.info("updated trending scores in %ims", int((time.perf_counter() - start) * 1000))
|
2021-08-30 18:16:07 +02:00
|
|
|
|
2021-01-29 07:41:53 +01:00
|
|
|
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
|
|
|
|
if filtered_streams:
|
2021-03-24 09:35:31 +01:00
|
|
|
await self.sync_client.update_by_query(
|
|
|
|
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4)
|
|
|
|
await self.sync_client.indices.refresh(self.index)
|
2021-01-29 07:41:53 +01:00
|
|
|
if filtered_channels:
|
2021-03-24 09:35:31 +01:00
|
|
|
await self.sync_client.update_by_query(
|
|
|
|
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4)
|
|
|
|
await self.sync_client.indices.refresh(self.index)
|
|
|
|
await self.sync_client.update_by_query(
|
|
|
|
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4)
|
|
|
|
await self.sync_client.indices.refresh(self.index)
|
2021-01-29 07:41:53 +01:00
|
|
|
if blocked_streams:
|
2021-03-24 09:35:31 +01:00
|
|
|
await self.sync_client.update_by_query(
|
|
|
|
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4)
|
|
|
|
await self.sync_client.indices.refresh(self.index)
|
2021-01-29 07:41:53 +01:00
|
|
|
if blocked_channels:
|
2021-03-24 09:35:31 +01:00
|
|
|
await self.sync_client.update_by_query(
|
|
|
|
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4)
|
|
|
|
await self.sync_client.indices.refresh(self.index)
|
|
|
|
await self.sync_client.update_by_query(
|
|
|
|
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4)
|
|
|
|
await self.sync_client.indices.refresh(self.index)
|
2021-05-12 09:40:43 +02:00
|
|
|
self.clear_caches()
|
2021-05-12 02:38:05 +02:00
|
|
|
|
|
|
|
def clear_caches(self):
|
2021-03-05 09:39:36 +01:00
|
|
|
self.search_cache.clear()
|
2021-03-11 07:19:15 +01:00
|
|
|
self.claim_cache.clear()
|
2021-01-29 07:41:53 +01:00
|
|
|
|
2021-05-27 19:35:41 +02:00
|
|
|
async def cached_search(self, kwargs):
|
2021-01-30 03:38:15 +01:00
|
|
|
total_referenced = []
|
2021-05-27 19:35:41 +02:00
|
|
|
cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache)
|
|
|
|
if cache_item.result is not None:
|
|
|
|
return cache_item.result
|
|
|
|
async with cache_item.lock:
|
|
|
|
if cache_item.result:
|
2021-03-12 19:44:30 +01:00
|
|
|
return cache_item.result
|
2021-05-27 19:35:41 +02:00
|
|
|
censor = Censor(Censor.SEARCH)
|
|
|
|
if kwargs.get('no_totals'):
|
|
|
|
response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
|
|
|
else:
|
|
|
|
response, offset, total = await self.search(**kwargs)
|
|
|
|
censor.apply(response)
|
|
|
|
total_referenced.extend(response)
|
2021-05-28 20:10:35 +02:00
|
|
|
|
2021-05-27 19:35:41 +02:00
|
|
|
if censor.censored:
|
|
|
|
response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
2021-02-01 20:02:34 +01:00
|
|
|
total_referenced.extend(response)
|
2021-05-28 20:10:35 +02:00
|
|
|
response = [
|
|
|
|
ResolveResult(
|
2021-09-15 19:17:26 +02:00
|
|
|
name=r['claim_name'],
|
|
|
|
normalized_name=r['normalized_name'],
|
2021-05-28 20:10:35 +02:00
|
|
|
claim_hash=r['claim_hash'],
|
|
|
|
tx_num=r['tx_num'],
|
|
|
|
position=r['tx_nout'],
|
|
|
|
tx_hash=r['tx_hash'],
|
|
|
|
height=r['height'],
|
|
|
|
amount=r['amount'],
|
|
|
|
short_url=r['short_url'],
|
|
|
|
is_controlling=r['is_controlling'],
|
|
|
|
canonical_url=r['canonical_url'],
|
|
|
|
creation_height=r['creation_height'],
|
|
|
|
activation_height=r['activation_height'],
|
|
|
|
expiration_height=r['expiration_height'],
|
|
|
|
effective_amount=r['effective_amount'],
|
|
|
|
support_amount=r['support_amount'],
|
|
|
|
last_takeover_height=r['last_take_over_height'],
|
|
|
|
claims_in_channel=r['claims_in_channel'],
|
|
|
|
channel_hash=r['channel_hash'],
|
2021-06-02 17:00:27 +02:00
|
|
|
reposted_claim_hash=r['reposted_claim_hash'],
|
2021-06-18 03:20:57 +02:00
|
|
|
reposted=r['reposted'],
|
|
|
|
signature_valid=r['signature_valid']
|
2021-05-28 20:10:35 +02:00
|
|
|
) for r in response
|
|
|
|
]
|
|
|
|
extra = [
|
|
|
|
ResolveResult(
|
2021-09-15 19:17:26 +02:00
|
|
|
name=r['claim_name'],
|
|
|
|
normalized_name=r['normalized_name'],
|
2021-05-28 20:10:35 +02:00
|
|
|
claim_hash=r['claim_hash'],
|
|
|
|
tx_num=r['tx_num'],
|
|
|
|
position=r['tx_nout'],
|
|
|
|
tx_hash=r['tx_hash'],
|
|
|
|
height=r['height'],
|
|
|
|
amount=r['amount'],
|
|
|
|
short_url=r['short_url'],
|
|
|
|
is_controlling=r['is_controlling'],
|
|
|
|
canonical_url=r['canonical_url'],
|
|
|
|
creation_height=r['creation_height'],
|
|
|
|
activation_height=r['activation_height'],
|
|
|
|
expiration_height=r['expiration_height'],
|
|
|
|
effective_amount=r['effective_amount'],
|
|
|
|
support_amount=r['support_amount'],
|
|
|
|
last_takeover_height=r['last_take_over_height'],
|
|
|
|
claims_in_channel=r['claims_in_channel'],
|
|
|
|
channel_hash=r['channel_hash'],
|
2021-06-02 17:00:27 +02:00
|
|
|
reposted_claim_hash=r['reposted_claim_hash'],
|
2021-06-18 03:20:57 +02:00
|
|
|
reposted=r['reposted'],
|
|
|
|
signature_valid=r['signature_valid']
|
2021-05-28 20:10:35 +02:00
|
|
|
) for r in await self._get_referenced_rows(total_referenced)
|
|
|
|
]
|
2021-05-27 19:35:41 +02:00
|
|
|
result = Outputs.to_base64(
|
2021-05-28 20:10:35 +02:00
|
|
|
response, extra, offset, total, censor
|
2021-05-27 19:35:41 +02:00
|
|
|
)
|
|
|
|
cache_item.result = result
|
|
|
|
return result
|
|
|
|
|
2021-03-02 03:23:38 +01:00
|
|
|
async def get_many(self, *claim_ids):
|
2021-03-24 09:35:31 +01:00
|
|
|
await self.populate_claim_cache(*claim_ids)
|
|
|
|
return filter(None, map(self.claim_cache.get, claim_ids))
|
|
|
|
|
|
|
|
async def populate_claim_cache(self, *claim_ids):
|
|
|
|
missing = [claim_id for claim_id in claim_ids if self.claim_cache.get(claim_id) is None]
|
2021-03-02 23:58:54 +01:00
|
|
|
if missing:
|
2021-03-09 04:19:58 +01:00
|
|
|
results = await self.search_client.mget(
|
2021-03-14 08:56:53 +01:00
|
|
|
index=self.index, body={"ids": missing}
|
2021-03-09 04:19:58 +01:00
|
|
|
)
|
2021-03-14 08:56:53 +01:00
|
|
|
for result in expand_result(filter(lambda doc: doc['found'], results["docs"])):
|
2021-03-05 08:32:48 +01:00
|
|
|
self.claim_cache.set(result['claim_id'], result)
|
|
|
|
|
|
|
|
|
2021-01-19 08:37:31 +01:00
|
|
|
async def search(self, **kwargs):
|
|
|
|
try:
|
2021-05-17 19:54:39 +02:00
|
|
|
return await self.search_ahead(**kwargs)
|
2021-01-19 08:37:31 +01:00
|
|
|
except NotFoundError:
|
|
|
|
return [], 0, 0
|
2021-06-18 03:20:57 +02:00
|
|
|
# return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0)
|
2021-04-14 17:16:49 +02:00
|
|
|
|
|
|
|
async def search_ahead(self, **kwargs):
|
|
|
|
# 'limit_claims_per_channel' case. Fetch 1000 results, reorder, slice, inflate and return
|
2021-05-19 07:35:11 +02:00
|
|
|
per_channel_per_page = kwargs.pop('limit_claims_per_channel', 0) or 0
|
2021-05-19 08:05:51 +02:00
|
|
|
remove_duplicates = kwargs.pop('remove_duplicates', False)
|
2021-04-28 18:28:38 +02:00
|
|
|
page_size = kwargs.pop('limit', 10)
|
2021-04-14 17:16:49 +02:00
|
|
|
offset = kwargs.pop('offset', 0)
|
|
|
|
kwargs['limit'] = 1000
|
2021-04-23 05:50:35 +02:00
|
|
|
cache_item = ResultCacheItem.from_cache(f"ahead{per_channel_per_page}{kwargs}", self.search_cache)
|
|
|
|
if cache_item.result is not None:
|
2021-04-28 18:28:38 +02:00
|
|
|
reordered_hits = cache_item.result
|
2021-04-23 05:50:35 +02:00
|
|
|
else:
|
|
|
|
async with cache_item.lock:
|
|
|
|
if cache_item.result:
|
2021-04-28 18:28:38 +02:00
|
|
|
reordered_hits = cache_item.result
|
2021-04-23 05:50:35 +02:00
|
|
|
else:
|
|
|
|
query = expand_query(**kwargs)
|
2021-05-19 07:35:11 +02:00
|
|
|
search_hits = deque((await self.search_client.search(
|
2021-05-19 08:05:51 +02:00
|
|
|
query, index=self.index, track_total_hits=False,
|
|
|
|
_source_includes=['_id', 'channel_id', 'reposted_claim_id', 'creation_height']
|
2021-05-19 07:35:11 +02:00
|
|
|
))['hits']['hits'])
|
2021-05-19 08:05:51 +02:00
|
|
|
if remove_duplicates:
|
|
|
|
search_hits = self.__remove_duplicates(search_hits)
|
2021-05-19 07:35:11 +02:00
|
|
|
if per_channel_per_page > 0:
|
2021-05-19 08:05:51 +02:00
|
|
|
reordered_hits = self.__search_ahead(search_hits, page_size, per_channel_per_page)
|
2021-05-19 07:35:11 +02:00
|
|
|
else:
|
|
|
|
reordered_hits = [(hit['_id'], hit['_source']['channel_id']) for hit in search_hits]
|
2021-04-28 18:28:38 +02:00
|
|
|
cache_item.result = reordered_hits
|
2021-05-17 19:54:39 +02:00
|
|
|
result = list(await self.get_many(*(claim_id for claim_id, _ in reordered_hits[offset:(offset + page_size)])))
|
|
|
|
return result, 0, len(reordered_hits)
|
2021-04-23 05:50:35 +02:00
|
|
|
|
2021-05-20 06:20:25 +02:00
|
|
|
def __remove_duplicates(self, search_hits: deque) -> deque:
|
2021-05-19 08:05:51 +02:00
|
|
|
known_ids = {} # claim_id -> (creation_height, hit_id), where hit_id is either reposted claim id or original
|
|
|
|
dropped = set()
|
|
|
|
for hit in search_hits:
|
|
|
|
hit_height, hit_id = hit['_source']['creation_height'], hit['_source']['reposted_claim_id'] or hit['_id']
|
|
|
|
if hit_id not in known_ids:
|
|
|
|
known_ids[hit_id] = (hit_height, hit['_id'])
|
|
|
|
else:
|
|
|
|
previous_height, previous_id = known_ids[hit_id]
|
|
|
|
if hit_height < previous_height:
|
|
|
|
known_ids[hit_id] = (hit_height, hit['_id'])
|
|
|
|
dropped.add(previous_id)
|
|
|
|
else:
|
|
|
|
dropped.add(hit['_id'])
|
2021-05-20 06:20:25 +02:00
|
|
|
return deque(hit for hit in search_hits if hit['_id'] not in dropped)
|
2021-05-19 08:05:51 +02:00
|
|
|
|
|
|
|
def __search_ahead(self, search_hits: list, page_size: int, per_channel_per_page: int):
|
2021-04-28 18:28:38 +02:00
|
|
|
reordered_hits = []
|
|
|
|
channel_counters = Counter()
|
|
|
|
next_page_hits_maybe_check_later = deque()
|
|
|
|
while search_hits or next_page_hits_maybe_check_later:
|
|
|
|
if reordered_hits and len(reordered_hits) % page_size == 0:
|
|
|
|
channel_counters.clear()
|
|
|
|
elif not reordered_hits:
|
|
|
|
pass
|
2021-04-14 17:16:49 +02:00
|
|
|
else:
|
|
|
|
break # means last page was incomplete and we are left with bad replacements
|
2021-04-28 18:28:38 +02:00
|
|
|
for _ in range(len(next_page_hits_maybe_check_later)):
|
|
|
|
claim_id, channel_id = next_page_hits_maybe_check_later.popleft()
|
2021-05-19 07:35:11 +02:00
|
|
|
if per_channel_per_page > 0 and channel_counters[channel_id] < per_channel_per_page:
|
2021-04-28 18:28:38 +02:00
|
|
|
reordered_hits.append((claim_id, channel_id))
|
|
|
|
channel_counters[channel_id] += 1
|
2021-04-14 17:16:49 +02:00
|
|
|
else:
|
2021-04-28 18:28:38 +02:00
|
|
|
next_page_hits_maybe_check_later.append((claim_id, channel_id))
|
|
|
|
while search_hits:
|
|
|
|
hit = search_hits.popleft()
|
2021-04-14 17:16:49 +02:00
|
|
|
hit_id, hit_channel_id = hit['_id'], hit['_source']['channel_id']
|
2021-05-19 07:35:11 +02:00
|
|
|
if hit_channel_id is None or per_channel_per_page <= 0:
|
2021-04-28 18:28:38 +02:00
|
|
|
reordered_hits.append((hit_id, hit_channel_id))
|
|
|
|
elif channel_counters[hit_channel_id] < per_channel_per_page:
|
|
|
|
reordered_hits.append((hit_id, hit_channel_id))
|
|
|
|
channel_counters[hit_channel_id] += 1
|
|
|
|
if len(reordered_hits) % page_size == 0:
|
2021-04-14 17:16:49 +02:00
|
|
|
break
|
|
|
|
else:
|
2021-04-28 18:28:38 +02:00
|
|
|
next_page_hits_maybe_check_later.append((hit_id, hit_channel_id))
|
|
|
|
return reordered_hits
|
2021-01-19 08:37:31 +01:00
|
|
|
|
|
|
|
async def _get_referenced_rows(self, txo_rows: List[dict]):
|
|
|
|
txo_rows = [row for row in txo_rows if isinstance(row, dict)]
|
2021-03-14 08:56:53 +01:00
|
|
|
referenced_ids = set(filter(None, map(itemgetter('reposted_claim_id'), txo_rows)))
|
|
|
|
referenced_ids |= set(filter(None, (row['channel_id'] for row in txo_rows)))
|
2021-07-20 23:25:37 +02:00
|
|
|
referenced_ids |= set(filter(None, (row['censoring_channel_id'] for row in txo_rows)))
|
2021-01-19 08:37:31 +01:00
|
|
|
|
2021-03-14 08:56:53 +01:00
|
|
|
referenced_txos = []
|
|
|
|
if referenced_ids:
|
|
|
|
referenced_txos.extend(await self.get_many(*referenced_ids))
|
|
|
|
referenced_ids = set(filter(None, (row['channel_id'] for row in referenced_txos)))
|
2021-01-19 08:37:31 +01:00
|
|
|
|
2021-03-14 08:56:53 +01:00
|
|
|
if referenced_ids:
|
|
|
|
referenced_txos.extend(await self.get_many(*referenced_ids))
|
2021-01-19 08:37:31 +01:00
|
|
|
|
2021-03-14 08:56:53 +01:00
|
|
|
return referenced_txos
|
2021-01-17 09:50:49 +01:00
|
|
|
|
|
|
|
|
2021-01-19 08:37:31 +01:00
|
|
|
def expand_query(**kwargs):
|
2021-01-31 21:55:27 +01:00
|
|
|
if "amount_order" in kwargs:
|
|
|
|
kwargs["limit"] = 1
|
|
|
|
kwargs["order_by"] = "effective_amount"
|
|
|
|
kwargs["offset"] = int(kwargs["amount_order"]) - 1
|
2021-01-20 07:19:21 +01:00
|
|
|
if 'name' in kwargs:
|
|
|
|
kwargs['name'] = normalize_name(kwargs.pop('name'))
|
2021-02-14 00:06:00 +01:00
|
|
|
if kwargs.get('is_controlling') is False:
|
|
|
|
kwargs.pop('is_controlling')
|
2021-01-19 08:37:31 +01:00
|
|
|
query = {'must': [], 'must_not': []}
|
|
|
|
collapse = None
|
2021-06-18 03:20:57 +02:00
|
|
|
if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None:
|
|
|
|
kwargs['fee_currency'] = kwargs['fee_currency'].upper()
|
2021-01-19 08:37:31 +01:00
|
|
|
for key, value in kwargs.items():
|
|
|
|
key = key.replace('claim.', '')
|
2021-01-20 00:38:03 +01:00
|
|
|
many = key.endswith('__in') or isinstance(value, list)
|
2021-08-05 20:58:41 +02:00
|
|
|
if many and len(value) > 2048:
|
2021-08-06 17:55:33 +02:00
|
|
|
raise TooManyClaimSearchParametersError(key, 2048)
|
2021-01-19 08:37:31 +01:00
|
|
|
if many:
|
|
|
|
key = key.replace('__in', '')
|
2021-03-05 05:08:40 +01:00
|
|
|
value = list(filter(None, value))
|
|
|
|
if value is None or isinstance(value, list) and len(value) == 0:
|
|
|
|
continue
|
2021-01-19 08:37:31 +01:00
|
|
|
key = REPLACEMENTS.get(key, key)
|
|
|
|
if key in FIELDS:
|
2021-01-27 04:28:58 +01:00
|
|
|
partial_id = False
|
2021-01-19 08:37:31 +01:00
|
|
|
if key == 'claim_type':
|
|
|
|
if isinstance(value, str):
|
|
|
|
value = CLAIM_TYPES[value]
|
|
|
|
else:
|
|
|
|
value = [CLAIM_TYPES[claim_type] for claim_type in value]
|
2021-09-13 17:56:44 +02:00
|
|
|
elif key == 'stream_type':
|
|
|
|
value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
|
2021-01-19 08:37:31 +01:00
|
|
|
if key == '_id':
|
|
|
|
if isinstance(value, Iterable):
|
2021-03-24 09:35:31 +01:00
|
|
|
value = [item[::-1].hex() for item in value]
|
2021-01-19 08:37:31 +01:00
|
|
|
else:
|
2021-03-24 09:35:31 +01:00
|
|
|
value = value[::-1].hex()
|
2021-12-08 08:27:57 +01:00
|
|
|
if not many and key in ('_id', 'claim_id', 'sd_hash') and len(value) < 20:
|
2021-01-27 02:26:45 +01:00
|
|
|
partial_id = True
|
2021-07-22 06:33:13 +02:00
|
|
|
if key in ('signature_valid', 'has_source'):
|
2021-01-19 08:37:31 +01:00
|
|
|
continue # handled later
|
|
|
|
if key in TEXT_FIELDS:
|
|
|
|
key += '.keyword'
|
|
|
|
ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'}
|
2021-01-27 02:26:45 +01:00
|
|
|
if partial_id:
|
2021-12-08 08:27:57 +01:00
|
|
|
query['must'].append({"prefix": {key: value}})
|
2021-01-27 02:26:45 +01:00
|
|
|
elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops:
|
2021-01-19 08:37:31 +01:00
|
|
|
operator_length = 2 if value[:2] in ops else 1
|
|
|
|
operator, value = value[:operator_length], value[operator_length:]
|
|
|
|
if key == 'fee_amount':
|
2021-03-05 09:39:36 +01:00
|
|
|
value = str(Decimal(value)*1000)
|
2021-01-19 08:37:31 +01:00
|
|
|
query['must'].append({"range": {key: {ops[operator]: value}}})
|
2021-12-02 21:32:09 +01:00
|
|
|
elif key in RANGE_FIELDS and isinstance(value, list) and all(v[0] in ops for v in value):
|
|
|
|
range_constraints = []
|
|
|
|
for v in value:
|
|
|
|
operator_length = 2 if v[:2] in ops else 1
|
|
|
|
operator, stripped_op_v = v[:operator_length], v[operator_length:]
|
|
|
|
if key == 'fee_amount':
|
|
|
|
stripped_op_v = str(Decimal(stripped_op_v)*1000)
|
|
|
|
range_constraints.append((operator, stripped_op_v))
|
|
|
|
query['must'].append({"range": {key: {ops[operator]: v for operator, v in range_constraints}}})
|
2021-01-19 08:37:31 +01:00
|
|
|
elif many:
|
|
|
|
query['must'].append({"terms": {key: value}})
|
|
|
|
else:
|
|
|
|
if key == 'fee_amount':
|
2021-03-05 09:39:36 +01:00
|
|
|
value = str(Decimal(value)*1000)
|
2021-01-19 08:37:31 +01:00
|
|
|
query['must'].append({"term": {key: {"value": value}}})
|
|
|
|
elif key == 'not_channel_ids':
|
|
|
|
for channel_id in value:
|
|
|
|
query['must_not'].append({"term": {'channel_id.keyword': channel_id}})
|
|
|
|
query['must_not'].append({"term": {'_id': channel_id}})
|
|
|
|
elif key == 'channel_ids':
|
|
|
|
query['must'].append({"terms": {'channel_id.keyword': value}})
|
2021-02-10 01:38:41 +01:00
|
|
|
elif key == 'claim_ids':
|
|
|
|
query['must'].append({"terms": {'claim_id.keyword': value}})
|
2021-01-19 08:37:31 +01:00
|
|
|
elif key == 'media_types':
|
|
|
|
query['must'].append({"terms": {'media_type.keyword': value}})
|
|
|
|
elif key == 'any_languages':
|
|
|
|
query['must'].append({"terms": {'languages': clean_tags(value)}})
|
|
|
|
elif key == 'any_languages':
|
|
|
|
query['must'].append({"terms": {'languages': value}})
|
|
|
|
elif key == 'all_languages':
|
|
|
|
query['must'].extend([{"term": {'languages': tag}} for tag in value])
|
|
|
|
elif key == 'any_tags':
|
2021-01-20 00:38:03 +01:00
|
|
|
query['must'].append({"terms": {'tags.keyword': clean_tags(value)}})
|
2021-01-19 08:37:31 +01:00
|
|
|
elif key == 'all_tags':
|
2021-01-20 00:38:03 +01:00
|
|
|
query['must'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
2021-01-19 08:37:31 +01:00
|
|
|
elif key == 'not_tags':
|
2021-01-20 00:38:03 +01:00
|
|
|
query['must_not'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
2021-01-31 06:43:45 +01:00
|
|
|
elif key == 'not_claim_id':
|
|
|
|
query['must_not'].extend([{"term": {'claim_id.keyword': cid}} for cid in value])
|
2021-01-19 08:37:31 +01:00
|
|
|
elif key == 'limit_claims_per_channel':
|
|
|
|
collapse = ('channel_id.keyword', value)
|
|
|
|
if kwargs.get('has_channel_signature'):
|
2021-08-06 20:11:28 +02:00
|
|
|
query['must'].append({"exists": {"field": "signature"}})
|
2021-01-19 08:37:31 +01:00
|
|
|
if 'signature_valid' in kwargs:
|
2021-07-22 00:41:12 +02:00
|
|
|
query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
2021-01-19 08:37:31 +01:00
|
|
|
elif 'signature_valid' in kwargs:
|
|
|
|
query.setdefault('should', [])
|
|
|
|
query["minimum_should_match"] = 1
|
2021-08-06 20:11:28 +02:00
|
|
|
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}})
|
2021-07-22 00:41:12 +02:00
|
|
|
query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
2021-03-25 08:46:21 +01:00
|
|
|
if 'has_source' in kwargs:
|
|
|
|
query.setdefault('should', [])
|
|
|
|
query["minimum_should_match"] = 1
|
2021-03-26 04:27:05 +01:00
|
|
|
is_stream_or_repost = {"terms": {"claim_type": [CLAIM_TYPES['stream'], CLAIM_TYPES['repost']]}}
|
|
|
|
query['should'].append(
|
|
|
|
{"bool": {"must": [{"match": {"has_source": kwargs['has_source']}}, is_stream_or_repost]}})
|
|
|
|
query['should'].append({"bool": {"must_not": [is_stream_or_repost]}})
|
2021-04-16 09:35:12 +02:00
|
|
|
query['should'].append({"bool": {"must": [{"term": {"reposted_claim_type": CLAIM_TYPES['channel']}}]}})
|
2021-02-13 06:16:49 +01:00
|
|
|
if kwargs.get('text'):
|
|
|
|
query['must'].append(
|
2021-01-19 22:38:31 +01:00
|
|
|
{"simple_query_string":
|
2021-01-19 08:37:31 +01:00
|
|
|
{"query": kwargs["text"], "fields": [
|
2021-01-19 22:38:31 +01:00
|
|
|
"claim_name^4", "channel_name^8", "title^1", "description^.5", "author^1", "tags^.5"
|
2021-02-13 06:16:49 +01:00
|
|
|
]}})
|
2021-01-19 08:37:31 +01:00
|
|
|
query = {
|
2021-01-27 06:56:43 +01:00
|
|
|
"_source": {"excludes": ["description", "title"]},
|
2021-01-19 08:37:31 +01:00
|
|
|
'query': {'bool': query},
|
|
|
|
"sort": [],
|
|
|
|
}
|
|
|
|
if "limit" in kwargs:
|
|
|
|
query["size"] = kwargs["limit"]
|
|
|
|
if 'offset' in kwargs:
|
|
|
|
query["from"] = kwargs["offset"]
|
|
|
|
if 'order_by' in kwargs:
|
2021-01-31 21:55:27 +01:00
|
|
|
if isinstance(kwargs["order_by"], str):
|
|
|
|
kwargs["order_by"] = [kwargs["order_by"]]
|
2021-01-19 08:37:31 +01:00
|
|
|
for value in kwargs['order_by']:
|
2021-03-02 23:58:54 +01:00
|
|
|
if 'trending_group' in value:
|
2021-03-02 03:23:38 +01:00
|
|
|
# fixme: trending_mixed is 0 for all records on variable decay, making sort slow.
|
|
|
|
continue
|
2021-01-19 08:37:31 +01:00
|
|
|
is_asc = value.startswith('^')
|
|
|
|
value = value[1:] if is_asc else value
|
|
|
|
value = REPLACEMENTS.get(value, value)
|
|
|
|
if value in TEXT_FIELDS:
|
|
|
|
value += '.keyword'
|
|
|
|
query['sort'].append({value: "asc" if is_asc else "desc"})
|
|
|
|
if collapse:
|
|
|
|
query["collapse"] = {
|
|
|
|
"field": collapse[0],
|
|
|
|
"inner_hits": {
|
|
|
|
"name": collapse[0],
|
|
|
|
"size": collapse[1],
|
|
|
|
"sort": query["sort"]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return query
|
|
|
|
|
|
|
|
|
|
|
|
def expand_result(results):
|
|
|
|
inner_hits = []
|
2021-03-02 03:23:38 +01:00
|
|
|
expanded = []
|
2021-01-19 08:37:31 +01:00
|
|
|
for result in results:
|
|
|
|
if result.get("inner_hits"):
|
|
|
|
for _, inner_hit in result["inner_hits"].items():
|
|
|
|
inner_hits.extend(inner_hit["hits"]["hits"])
|
|
|
|
continue
|
2021-03-02 03:23:38 +01:00
|
|
|
result = result['_source']
|
2021-01-19 08:37:31 +01:00
|
|
|
result['claim_hash'] = unhexlify(result['claim_id'])[::-1]
|
|
|
|
if result['reposted_claim_id']:
|
|
|
|
result['reposted_claim_hash'] = unhexlify(result['reposted_claim_id'])[::-1]
|
|
|
|
else:
|
|
|
|
result['reposted_claim_hash'] = None
|
|
|
|
result['channel_hash'] = unhexlify(result['channel_id'])[::-1] if result['channel_id'] else None
|
|
|
|
result['txo_hash'] = unhexlify(result['tx_id'])[::-1] + struct.pack('<I', result['tx_nout'])
|
2021-01-20 05:20:50 +01:00
|
|
|
result['tx_hash'] = unhexlify(result['tx_id'])[::-1]
|
2021-07-22 00:41:12 +02:00
|
|
|
result['reposted'] = result.pop('repost_count')
|
|
|
|
result['signature_valid'] = result.pop('is_signature_valid')
|
2021-08-12 22:08:52 +02:00
|
|
|
# result['normalized'] = result.pop('normalized_name')
|
2021-05-27 19:35:41 +02:00
|
|
|
# if result['censoring_channel_hash']:
|
|
|
|
# result['censoring_channel_hash'] = unhexlify(result['censoring_channel_hash'])[::-1]
|
2021-03-02 03:23:38 +01:00
|
|
|
expanded.append(result)
|
2021-01-19 08:37:31 +01:00
|
|
|
if inner_hits:
|
|
|
|
return expand_result(inner_hits)
|
2021-03-02 03:23:38 +01:00
|
|
|
return expanded
|
2021-03-05 09:39:36 +01:00
|
|
|
|
|
|
|
|
|
|
|
class ResultCacheItem:
|
|
|
|
__slots__ = '_result', 'lock', 'has_result'
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.has_result = asyncio.Event()
|
|
|
|
self.lock = asyncio.Lock()
|
|
|
|
self._result = None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def result(self) -> str:
|
|
|
|
return self._result
|
|
|
|
|
|
|
|
@result.setter
|
|
|
|
def result(self, result: str):
|
|
|
|
self._result = result
|
|
|
|
if result is not None:
|
|
|
|
self.has_result.set()
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_cache(cls, cache_key, cache):
|
|
|
|
cache_item = cache.get(cache_key)
|
|
|
|
if cache_item is None:
|
|
|
|
cache_item = cache[cache_key] = ResultCacheItem()
|
|
|
|
return cache_item
|