forked from LBRYCommunity/lbry-sdk
non blocking claim producer
This commit is contained in:
parent
72e45b5cb1
commit
ff960fda0e
3 changed files with 34 additions and 9 deletions
|
@ -46,7 +46,7 @@ class Env:
|
||||||
self.trending_whale_half_life = math.log2(0.1 ** (1 / (3 + self.integer('TRENDING_WHALE_DECAY_RATE', 24)))) + 1
|
self.trending_whale_half_life = math.log2(0.1 ** (1 / (3 + self.integer('TRENDING_WHALE_DECAY_RATE', 24)))) + 1
|
||||||
self.trending_whale_threshold = float(self.integer('TRENDING_WHALE_THRESHOLD', 10000)) * 1E8
|
self.trending_whale_threshold = float(self.integer('TRENDING_WHALE_THRESHOLD', 10000)) * 1E8
|
||||||
|
|
||||||
self.max_query_workers = self.integer('MAX_QUERY_WORKERS', None)
|
self.max_query_workers = self.integer('MAX_QUERY_WORKERS', 4)
|
||||||
self.individual_tag_indexes = self.boolean('INDIVIDUAL_TAG_INDEXES', True)
|
self.individual_tag_indexes = self.boolean('INDIVIDUAL_TAG_INDEXES', True)
|
||||||
self.track_metrics = self.boolean('TRACK_METRICS', False)
|
self.track_metrics = self.boolean('TRACK_METRICS', False)
|
||||||
self.websocket_host = self.default('WEBSOCKET_HOST', self.host)
|
self.websocket_host = self.default('WEBSOCKET_HOST', self.host)
|
||||||
|
|
|
@ -742,22 +742,47 @@ class LevelDB:
|
||||||
|
|
||||||
async def claims_producer(self, claim_hashes: Set[bytes]):
|
async def claims_producer(self, claim_hashes: Set[bytes]):
|
||||||
batch = []
|
batch = []
|
||||||
for claim_hash in claim_hashes:
|
results = []
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
def produce_claim(claim_hash):
|
||||||
if claim_hash not in self.claim_to_txo:
|
if claim_hash not in self.claim_to_txo:
|
||||||
self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex())
|
self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex())
|
||||||
continue
|
return
|
||||||
name = self.claim_to_txo[claim_hash].normalized_name
|
name = self.claim_to_txo[claim_hash].normalized_name
|
||||||
if not self.prefix_db.claim_takeover.get(name):
|
if not self.prefix_db.claim_takeover.get(name):
|
||||||
self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex())
|
self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex())
|
||||||
continue
|
return
|
||||||
claim = self._fs_get_claim_by_hash(claim_hash)
|
claim_txo = self.claim_to_txo.get(claim_hash)
|
||||||
|
if not claim_txo:
|
||||||
|
return
|
||||||
|
activation = self.get_activation(claim_txo.tx_num, claim_txo.position)
|
||||||
|
claim = self._prepare_resolve_result(
|
||||||
|
claim_txo.tx_num, claim_txo.position, claim_hash, claim_txo.name, claim_txo.root_tx_num,
|
||||||
|
claim_txo.root_position, activation, claim_txo.channel_signature_is_valid
|
||||||
|
)
|
||||||
if claim:
|
if claim:
|
||||||
batch.append(claim)
|
batch.append(claim)
|
||||||
batch.sort(key=lambda x: x.tx_hash)
|
|
||||||
for claim in batch:
|
def get_metadata(claim):
|
||||||
meta = self._prepare_claim_metadata(claim.claim_hash, claim)
|
meta = self._prepare_claim_metadata(claim.claim_hash, claim)
|
||||||
if meta:
|
if meta:
|
||||||
|
results.append(meta)
|
||||||
|
|
||||||
|
if claim_hashes:
|
||||||
|
await asyncio.wait(
|
||||||
|
[loop.run_in_executor(None, produce_claim, claim_hash) for claim_hash in claim_hashes]
|
||||||
|
)
|
||||||
|
batch.sort(key=lambda x: x.tx_hash)
|
||||||
|
|
||||||
|
if batch:
|
||||||
|
await asyncio.wait(
|
||||||
|
[loop.run_in_executor(None, get_metadata, claim) for claim in batch]
|
||||||
|
)
|
||||||
|
for meta in results:
|
||||||
yield meta
|
yield meta
|
||||||
|
|
||||||
batch.clear()
|
batch.clear()
|
||||||
|
|
||||||
def get_activated_at_height(self, height: int) -> DefaultDict[PendingActivationValue, List[PendingActivationKey]]:
|
def get_activated_at_height(self, height: int) -> DefaultDict[PendingActivationValue, List[PendingActivationKey]]:
|
||||||
|
|
|
@ -69,7 +69,7 @@ class Server:
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
executor = ThreadPoolExecutor(4)
|
executor = ThreadPoolExecutor(self.env.max_query_workers)
|
||||||
loop.set_default_executor(executor)
|
loop.set_default_executor(executor)
|
||||||
|
|
||||||
def __exit():
|
def __exit():
|
||||||
|
|
Loading…
Reference in a new issue