avoid readding the same hash when tracker is busy with too many files

This commit is contained in:
Victor Shyba 2022-03-08 17:32:35 -03:00
parent 28fdd62945
commit 61c99abcf1

View file

@ -131,6 +131,7 @@ class TrackerClient:
self.announce_port = announce_port self.announce_port = announce_port
self.servers = servers self.servers = servers
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
self.tasks = {}
async def start(self): async def start(self):
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint( self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
@ -145,7 +146,10 @@ class TrackerClient:
self.EVENT_CONTROLLER.close() self.EVENT_CONTROLLER.close()
def on_hash(self, info_hash): def on_hash(self, info_hash):
asyncio.ensure_future(self.get_peer_list(info_hash)) if info_hash not in self.tasks:
fut = asyncio.ensure_future(self.get_peer_list(info_hash))
fut.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
self.tasks[info_hash] = fut
async def get_peer_list(self, info_hash, stopped=False): async def get_peer_list(self, info_hash, stopped=False):
found = [] found = []