threadpools for block processor and es sync reader

This commit is contained in:
Jack Robison 2021-10-14 15:40:20 -04:00 committed by Victor Shyba
parent a4ad1bb0a9
commit eeaf9a72e2
4 changed files with 43 additions and 71 deletions

View file

@ -3,6 +3,7 @@ import asyncio
import typing import typing
from bisect import bisect_right from bisect import bisect_right
from struct import pack, unpack from struct import pack, unpack
from concurrent.futures.thread import ThreadPoolExecutor
from typing import Optional, List, Tuple, Set, DefaultDict, Dict, NamedTuple from typing import Optional, List, Tuple, Set, DefaultDict, Dict, NamedTuple
from prometheus_client import Gauge, Histogram from prometheus_client import Gauge, Histogram
from collections import defaultdict from collections import defaultdict
@ -203,6 +204,8 @@ class BlockProcessor:
self.env = env self.env = env
self.db = db self.db = db
self.daemon = daemon self.daemon = daemon
self._chain_executor = ThreadPoolExecutor(1, thread_name_prefix='block-processor')
self._sync_reader_executor = ThreadPoolExecutor(1, thread_name_prefix='hub-es-sync')
self.mempool = MemPool(env.coin, daemon, db, self.state_lock) self.mempool = MemPool(env.coin, daemon, db, self.state_lock)
self.shutdown_event = shutdown_event self.shutdown_event = shutdown_event
self.coin = env.coin self.coin = env.coin
@ -299,7 +302,11 @@ class BlockProcessor:
for claim_hash in self.removed_claims_to_send_es: for claim_hash in self.removed_claims_to_send_es:
yield 'delete', claim_hash.hex() yield 'delete', claim_hash.hex()
async for claim in self.db.claims_producer(self.touched_claims_to_send_es):
to_update = await asyncio.get_event_loop().run_in_executor(
self._sync_reader_executor, self.db.claims_producer, self.touched_claims_to_send_es
)
for claim in to_update:
yield 'update', claim yield 'update', claim
async def run_in_thread_with_lock(self, func, *args): async def run_in_thread_with_lock(self, func, *args):
@ -310,13 +317,12 @@ class BlockProcessor:
# consistent and not being updated elsewhere. # consistent and not being updated elsewhere.
async def run_in_thread_locked(): async def run_in_thread_locked():
async with self.state_lock: async with self.state_lock:
return await asyncio.get_event_loop().run_in_executor(None, func, *args) return await asyncio.get_event_loop().run_in_executor(self._chain_executor, func, *args)
return await asyncio.shield(run_in_thread_locked()) return await asyncio.shield(run_in_thread_locked())
@staticmethod async def run_in_thread(self, func, *args):
async def run_in_thread(func, *args):
async def run_in_thread(): async def run_in_thread():
return await asyncio.get_event_loop().run_in_executor(None, func, *args) return await asyncio.get_event_loop().run_in_executor(self._chain_executor, func, *args)
return await asyncio.shield(run_in_thread()) return await asyncio.shield(run_in_thread())
async def check_and_advance_blocks(self, raw_blocks): async def check_and_advance_blocks(self, raw_blocks):
@ -1746,5 +1752,6 @@ class BlockProcessor:
self.status_server.stop() self.status_server.stop()
# Shut down block processing # Shut down block processing
self.logger.info('closing the DB for a clean shutdown...') self.logger.info('closing the DB for a clean shutdown...')
self._sync_reader_executor.shutdown(wait=True)
self._chain_executor.shutdown(wait=True)
self.db.close() self.db.close()
# self.executor.shutdown(wait=True)

View file

@ -696,14 +696,11 @@ class LevelDB:
yield meta yield meta
batch.clear() batch.clear()
async def claims_producer(self, claim_hashes: Set[bytes]): def claims_producer(self, claim_hashes: Set[bytes]):
loop = asyncio.get_event_loop()
def produce_claims(claims):
batch = [] batch = []
_results = [] results = []
for claim_hash in claims: for claim_hash in claim_hashes:
if claim_hash not in self.claim_to_txo: if claim_hash not in self.claim_to_txo:
self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex()) self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex())
continue continue
@ -727,14 +724,8 @@ class LevelDB:
for claim in batch: for claim in batch:
_meta = self._prepare_claim_metadata(claim.claim_hash, claim) _meta = self._prepare_claim_metadata(claim.claim_hash, claim)
if _meta: if _meta:
_results.append(_meta) results.append(_meta)
return _results return results
if claim_hashes:
results = await loop.run_in_executor(None, produce_claims, claim_hashes)
for meta in results:
yield meta
def get_activated_at_height(self, height: int) -> DefaultDict[PendingActivationValue, List[PendingActivationKey]]: def get_activated_at_height(self, height: int) -> DefaultDict[PendingActivationValue, List[PendingActivationKey]]:
activated = defaultdict(list) activated = defaultdict(list)

View file

@ -69,7 +69,7 @@ class Server:
def run(self): def run(self):
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(self.env.max_query_workers) executor = ThreadPoolExecutor(self.env.max_query_workers, thread_name_prefix='hub-worker')
loop.set_default_executor(executor) loop.set_default_executor(executor)
def __exit(): def __exit():

View file

@ -633,7 +633,7 @@ class SessionManager:
self.mempool_statuses.pop(hashX, None) self.mempool_statuses.pop(hashX, None)
await asyncio.get_event_loop().run_in_executor( await asyncio.get_event_loop().run_in_executor(
None, touched.intersection_update, self.hashx_subscriptions_by_session.keys() self.bp._chain_executor, touched.intersection_update, self.hashx_subscriptions_by_session.keys()
) )
if touched or new_touched or (height_changed and self.mempool_statuses): if touched or new_touched or (height_changed and self.mempool_statuses):
@ -775,10 +775,9 @@ class LBRYSessionManager(SessionManager):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.query_executor = None
self.websocket = None self.websocket = None
# self.metrics = ServerLoadData() # self.metrics = ServerLoadData()
self.metrics_loop = None # self.metrics_loop = None
self.running = False self.running = False
if self.env.websocket_host is not None and self.env.websocket_port is not None: if self.env.websocket_host is not None and self.env.websocket_port is not None:
self.websocket = AdminWebSocket(self) self.websocket = AdminWebSocket(self)
@ -795,12 +794,6 @@ class LBRYSessionManager(SessionManager):
async def start_other(self): async def start_other(self):
self.running = True self.running = True
if self.env.max_query_workers is not None and self.env.max_query_workers == 0:
self.query_executor = ThreadPoolExecutor(max_workers=1)
else:
self.query_executor = ProcessPoolExecutor(
max_workers=self.env.max_query_workers or max(os.cpu_count(), 4)
)
if self.websocket is not None: if self.websocket is not None:
await self.websocket.start() await self.websocket.start()
@ -808,7 +801,6 @@ class LBRYSessionManager(SessionManager):
self.running = False self.running = False
if self.websocket is not None: if self.websocket is not None:
await self.websocket.stop() await self.websocket.stop()
self.query_executor.shutdown()
class LBRYElectrumX(SessionBase): class LBRYElectrumX(SessionBase):
@ -971,24 +963,6 @@ class LBRYElectrumX(SessionBase):
# else: # else:
# return APICallMetrics(query_name) # return APICallMetrics(query_name)
async def run_in_executor(self, query_name, func, kwargs):
start = time.perf_counter()
try:
self.session_mgr.pending_query_metric.inc()
result = await asyncio.get_running_loop().run_in_executor(
self.session_mgr.query_executor, func, kwargs
)
except asyncio.CancelledError:
raise
except Exception:
log.exception("dear devs, please handle this exception better")
self.session_mgr.db_error_metric.inc()
raise RPCError(JSONRPC.INTERNAL_ERROR, 'unknown server error')
else:
return base64.b64encode(result).decode()
finally:
self.session_mgr.pending_query_metric.dec()
self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
# async def run_and_cache_query(self, query_name, kwargs): # async def run_and_cache_query(self, query_name, kwargs):
# start = time.perf_counter() # start = time.perf_counter()