remove dht locks
This commit is contained in:
parent
a121110743
commit
43ac928f0b
2 changed files with 38 additions and 43 deletions
|
@ -64,7 +64,7 @@ class Node:
|
||||||
# ping the set of peers; upon success/failure the routing able and last replied/failed time will be updated
|
# ping the set of peers; upon success/failure the routing able and last replied/failed time will be updated
|
||||||
to_ping = [peer for peer in set(total_peers) if self.protocol.peer_manager.peer_is_good(peer) is not True]
|
to_ping = [peer for peer in set(total_peers) if self.protocol.peer_manager.peer_is_good(peer) is not True]
|
||||||
if to_ping:
|
if to_ping:
|
||||||
await self.protocol.ping_queue.enqueue_maybe_ping(*to_ping, delay=0)
|
self.protocol.ping_queue.enqueue_maybe_ping(*to_ping, delay=0)
|
||||||
|
|
||||||
fut = asyncio.Future(loop=self.loop)
|
fut = asyncio.Future(loop=self.loop)
|
||||||
self.loop.call_later(constants.refresh_interval, fut.set_result, None)
|
self.loop.call_later(constants.refresh_interval, fut.set_result, None)
|
||||||
|
|
|
@ -192,16 +192,14 @@ class PingQueue:
|
||||||
self._process_task: asyncio.Task = None
|
self._process_task: asyncio.Task = None
|
||||||
self._next_task: asyncio.Future = None
|
self._next_task: asyncio.Future = None
|
||||||
self._next_timer: asyncio.TimerHandle = None
|
self._next_timer: asyncio.TimerHandle = None
|
||||||
self._lock = asyncio.Lock()
|
|
||||||
self._running = False
|
self._running = False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def running(self):
|
def running(self):
|
||||||
return self._running
|
return self._running
|
||||||
|
|
||||||
async def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
|
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
|
||||||
delay = constants.check_refresh_interval if delay is None else delay
|
delay = constants.check_refresh_interval if delay is None else delay
|
||||||
async with self._lock:
|
|
||||||
for peer in peers:
|
for peer in peers:
|
||||||
if delay and peer not in self._enqueued_contacts:
|
if delay and peer not in self._enqueued_contacts:
|
||||||
self._pending_contacts[peer] = self._loop.time() + delay
|
self._pending_contacts[peer] = self._loop.time() + delay
|
||||||
|
@ -223,7 +221,6 @@ class PingQueue:
|
||||||
while True:
|
while True:
|
||||||
tasks = []
|
tasks = []
|
||||||
|
|
||||||
async with self._lock:
|
|
||||||
if self._enqueued_contacts or self._pending_contacts:
|
if self._enqueued_contacts or self._pending_contacts:
|
||||||
now = self._loop.time()
|
now = self._loop.time()
|
||||||
scheduled = [k for k, d in self._pending_contacts.items() if now >= d]
|
scheduled = [k for k, d in self._pending_contacts.items() if now >= d]
|
||||||
|
@ -282,7 +279,6 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
self.data_store = DictDataStore(self.loop, self.peer_manager)
|
self.data_store = DictDataStore(self.loop, self.peer_manager)
|
||||||
self.ping_queue = PingQueue(self.loop, self)
|
self.ping_queue = PingQueue(self.loop, self)
|
||||||
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
|
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
|
||||||
self.lock = asyncio.Lock(loop=self.loop)
|
|
||||||
self.rpc_timeout = rpc_timeout
|
self.rpc_timeout = rpc_timeout
|
||||||
self._split_lock = asyncio.Lock(loop=self.loop)
|
self._split_lock = asyncio.Lock(loop=self.loop)
|
||||||
|
|
||||||
|
@ -424,7 +420,7 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
# will be added to our routing table if successful
|
# will be added to our routing table if successful
|
||||||
is_good = self.peer_manager.peer_is_good(peer)
|
is_good = self.peer_manager.peer_is_good(peer)
|
||||||
if is_good is None:
|
if is_good is None:
|
||||||
await self.ping_queue.enqueue_maybe_ping(peer)
|
self.ping_queue.enqueue_maybe_ping(peer)
|
||||||
elif is_good is True:
|
elif is_good is True:
|
||||||
await self.add_peer(peer)
|
await self.add_peer(peer)
|
||||||
|
|
||||||
|
@ -553,7 +549,6 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
if message.rpc_id in self.sent_messages:
|
if message.rpc_id in self.sent_messages:
|
||||||
self.sent_messages.pop(message.rpc_id)
|
self.sent_messages.pop(message.rpc_id)
|
||||||
|
|
||||||
async with self.lock:
|
|
||||||
if isinstance(message, RequestDatagram):
|
if isinstance(message, RequestDatagram):
|
||||||
response_fut = self.loop.create_future()
|
response_fut = self.loop.create_future()
|
||||||
response_fut.add_done_callback(pop_from_sent_messages)
|
response_fut.add_done_callback(pop_from_sent_messages)
|
||||||
|
|
Loading…
Add table
Reference in a new issue