diff --git a/lbry/blob/blob_file.py b/lbry/blob/blob_file.py index 62ae64ca5..9145cd30b 100644 --- a/lbry/blob/blob_file.py +++ b/lbry/blob/blob_file.py @@ -87,8 +87,8 @@ class AbstractBlob: self.blob_completed_callback = blob_completed_callback self.blob_directory = blob_directory self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {} - self.verified: asyncio.Event = asyncio.Event(loop=self.loop) - self.writing: asyncio.Event = asyncio.Event(loop=self.loop) + self.verified: asyncio.Event = asyncio.Event() + self.writing: asyncio.Event = asyncio.Event() self.readers: typing.List[typing.BinaryIO] = [] self.added_on = added_on or time.time() self.is_mine = is_mine @@ -222,7 +222,7 @@ class AbstractBlob: peer_port: typing.Optional[int] = None) -> HashBlobWriter: if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed(): raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}") - fut = asyncio.Future(loop=self.loop) + fut = asyncio.Future() writer = HashBlobWriter(self.blob_hash, self.get_length, fut) self.writers[(peer_address, peer_port)] = writer diff --git a/lbry/blob_exchange/client.py b/lbry/blob_exchange/client.py index 61920c5b7..d3a680b3e 100644 --- a/lbry/blob_exchange/client.py +++ b/lbry/blob_exchange/client.py @@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol): self.buf = b'' # this is here to handle the race when the downloader is closed right as response_fut gets a result - self.closed = asyncio.Event(loop=self.loop) + self.closed = asyncio.Event() def data_received(self, data: bytes): if self.connection_manager: @@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol): self.transport.write(msg) if self.connection_manager: self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg)) - response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop) + response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout) availability_response = response.get_availability_response() price_response = response.get_price_response() blob_response = response.get_blob_response() @@ -151,7 +151,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol): f" timeout in {self.peer_timeout}" log.debug(msg) msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}" - await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop) + await asyncio.wait_for(self.writer.finished, self.peer_timeout) # wait for the io to finish await self.blob.verified.wait() log.info("%s at %fMB/s", msg, @@ -187,7 +187,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol): try: self._blob_bytes_received = 0 self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port) - self._response_fut = asyncio.Future(loop=self.loop) + self._response_fut = asyncio.Future() return await self._download_blob() except OSError: # i'm not sure how to fix this race condition - jack @@ -244,7 +244,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract try: if not connected_protocol: await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port), - peer_connect_timeout, loop=loop) + peer_connect_timeout) connected_protocol = protocol if blob is None or blob.get_is_verified() or not blob.is_writeable(): # blob is None happens when we are just opening a connection diff --git a/lbry/blob_exchange/downloader.py b/lbry/blob_exchange/downloader.py index 9363bd8ca..6ba62c402 100644 --- a/lbry/blob_exchange/downloader.py +++ b/lbry/blob_exchange/downloader.py @@ -30,7 +30,7 @@ class BlobDownloader: self.failures: typing.Dict['KademliaPeer', int] = {} self.connection_failures: typing.Set['KademliaPeer'] = set() self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {} - self.is_running = asyncio.Event(loop=self.loop) + self.is_running = asyncio.Event() def should_race_continue(self, blob: 'AbstractBlob'): max_probes = self.config.max_connections_per_download * (1 if self.connections else 10) @@ -65,7 +65,7 @@ class BlobDownloader: async def new_peer_or_finished(self): active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)] - await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED') + await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED') def cleanup_active(self): if not self.active_connections and not self.connections: @@ -126,7 +126,7 @@ class BlobDownloader: async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node', blob_hash: str) -> 'AbstractBlob': - search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download) + search_queue = asyncio.Queue(maxsize=config.max_connections_per_download) search_queue.put_nowait(blob_hash) peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue) fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers) diff --git a/lbry/blob_exchange/server.py b/lbry/blob_exchange/server.py index 8ee9212ff..4009f22a4 100644 --- a/lbry/blob_exchange/server.py +++ b/lbry/blob_exchange/server.py @@ -25,19 +25,19 @@ class BlobServerProtocol(asyncio.Protocol): self.idle_timeout = idle_timeout self.transfer_timeout = transfer_timeout self.server_task: typing.Optional[asyncio.Task] = None - self.started_listening = asyncio.Event(loop=self.loop) + self.started_listening = asyncio.Event() self.buf = b'' self.transport: typing.Optional[asyncio.Transport] = None self.lbrycrd_address = lbrycrd_address self.peer_address_and_port: typing.Optional[str] = None - self.started_transfer = asyncio.Event(loop=self.loop) - self.transfer_finished = asyncio.Event(loop=self.loop) + self.started_transfer = asyncio.Event() + self.transfer_finished = asyncio.Event() self.close_on_idle_task: typing.Optional[asyncio.Task] = None async def close_on_idle(self): while self.transport: try: - await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop) + await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout) except asyncio.TimeoutError: log.debug("closing idle connection from %s", self.peer_address_and_port) return self.close() @@ -101,7 +101,7 @@ class BlobServerProtocol(asyncio.Protocol): log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port) self.started_transfer.set() try: - sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop) + sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout) if sent and sent > 0: self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent) log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port) @@ -157,7 +157,7 @@ class BlobServer: self.loop = loop self.blob_manager = blob_manager self.server_task: typing.Optional[asyncio.Task] = None - self.started_listening = asyncio.Event(loop=self.loop) + self.started_listening = asyncio.Event() self.lbrycrd_address = lbrycrd_address self.idle_timeout = idle_timeout self.transfer_timeout = transfer_timeout diff --git a/lbry/connection_manager.py b/lbry/connection_manager.py index 57be52694..88e0055c0 100644 --- a/lbry/connection_manager.py +++ b/lbry/connection_manager.py @@ -67,7 +67,7 @@ class ConnectionManager: while True: last = time.perf_counter() - await asyncio.sleep(0.1, loop=self.loop) + await asyncio.sleep(0.1) self._status['incoming_bps'].clear() self._status['outgoing_bps'].clear() now = time.perf_counter() diff --git a/lbry/dht/blob_announcer.py b/lbry/dht/blob_announcer.py index 9629e06b6..f1194e8c5 100644 --- a/lbry/dht/blob_announcer.py +++ b/lbry/dht/blob_announcer.py @@ -50,7 +50,7 @@ class BlobAnnouncer: while batch_size: if not self.node.joined.is_set(): await self.node.joined.wait() - await asyncio.sleep(60, loop=self.loop) + await asyncio.sleep(60) if not self.node.protocol.routing_table.get_peers(): log.warning("No peers in DHT, announce round skipped") continue @@ -59,7 +59,7 @@ class BlobAnnouncer: log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue)) while len(self.announce_queue) > 0: log.info("%i blobs to announce", len(self.announce_queue)) - await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)], loop=self.loop) + await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)]) announced = list(filter(None, self.announced)) if announced: await self.storage.update_last_announced_blobs(announced) diff --git a/lbry/dht/node.py b/lbry/dht/node.py index 74270c404..ef86d7302 100644 --- a/lbry/dht/node.py +++ b/lbry/dht/node.py @@ -37,7 +37,7 @@ class Node: self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout, split_buckets_under_index, is_bootstrap_node) self.listening_port: asyncio.DatagramTransport = None - self.joined = asyncio.Event(loop=self.loop) + self.joined = asyncio.Event() self._join_task: asyncio.Task = None self._refresh_task: asyncio.Task = None self._storage = storage @@ -79,7 +79,7 @@ class Node: else: if force_once: break - fut = asyncio.Future(loop=self.loop) + fut = asyncio.Future() self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None) await fut continue @@ -93,7 +93,7 @@ class Node: if force_once: break - fut = asyncio.Future(loop=self.loop) + fut = asyncio.Future() self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None) await fut @@ -108,7 +108,7 @@ class Node: for peer in peers: log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port) stored_to_tup = await asyncio.gather( - *(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop + *(self.protocol.store_to_peer(hash_value, peer) for peer in peers) ) stored_to = [node_id for node_id, contacted in stored_to_tup if contacted] if stored_to: @@ -182,14 +182,14 @@ class Node: for address, udp_port in known_node_urls or [] ])) except socket.gaierror: - await asyncio.sleep(30, loop=self.loop) + await asyncio.sleep(30) continue self.protocol.peer_manager.reset() self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0) await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32) - await asyncio.sleep(1, loop=self.loop) + await asyncio.sleep(1) def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None): self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls)) @@ -271,7 +271,7 @@ class Node: def accumulate_peers(self, search_queue: asyncio.Queue, peer_queue: typing.Optional[asyncio.Queue] = None ) -> typing.Tuple[asyncio.Queue, asyncio.Task]: - queue = peer_queue or asyncio.Queue(loop=self.loop) + queue = peer_queue or asyncio.Queue() return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue)) diff --git a/lbry/dht/protocol/iterative_find.py b/lbry/dht/protocol/iterative_find.py index b9678ea1e..6af8cdb9a 100644 --- a/lbry/dht/protocol/iterative_find.py +++ b/lbry/dht/protocol/iterative_find.py @@ -83,7 +83,7 @@ class IterativeFinder(AsyncIterator): self.contacted: typing.Set['KademliaPeer'] = set() self.distance = Distance(key) - self.iteration_queue = asyncio.Queue(loop=self.loop) + self.iteration_queue = asyncio.Queue() self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {} self.iteration_count = 0 diff --git a/lbry/dht/protocol/protocol.py b/lbry/dht/protocol/protocol.py index 89563c89b..79a2acf3c 100644 --- a/lbry/dht/protocol/protocol.py +++ b/lbry/dht/protocol/protocol.py @@ -253,7 +253,7 @@ class PingQueue: del self._pending_contacts[peer] self.maybe_ping(peer) break - await asyncio.sleep(1, loop=self._loop) + await asyncio.sleep(1) def start(self): assert not self._running @@ -319,10 +319,10 @@ class KademliaProtocol(DatagramProtocol): self.ping_queue = PingQueue(self.loop, self) self.node_rpc = KademliaRPC(self, self.loop, self.peer_port) self.rpc_timeout = rpc_timeout - self._split_lock = asyncio.Lock(loop=self.loop) + self._split_lock = asyncio.Lock() self._to_remove: typing.Set['KademliaPeer'] = set() self._to_add: typing.Set['KademliaPeer'] = set() - self._wakeup_routing_task = asyncio.Event(loop=self.loop) + self._wakeup_routing_task = asyncio.Event() self.maintaing_routing_task: typing.Optional[asyncio.Task] = None @functools.lru_cache(128) @@ -385,7 +385,7 @@ class KademliaProtocol(DatagramProtocol): while self._to_add: async with self._split_lock: await self._add_peer(self._to_add.pop()) - await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop) + await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1)) self._wakeup_routing_task.clear() def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram): diff --git a/lbry/extras/daemon/componentmanager.py b/lbry/extras/daemon/componentmanager.py index f9f7903ae..82b2d6db3 100644 --- a/lbry/extras/daemon/componentmanager.py +++ b/lbry/extras/daemon/componentmanager.py @@ -42,7 +42,7 @@ class ComponentManager: self.analytics_manager = analytics_manager self.component_classes = {} self.components = set() - self.started = asyncio.Event(loop=self.loop) + self.started = asyncio.Event() self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop()) for component_name, component_class in self.default_component_classes.items(): diff --git a/lbry/extras/daemon/components.py b/lbry/extras/daemon/components.py index 33deccee9..1e5e6a445 100644 --- a/lbry/extras/daemon/components.py +++ b/lbry/extras/daemon/components.py @@ -551,7 +551,7 @@ class UPnPComponent(Component): while True: if now: await self._maintain_redirects() - await asyncio.sleep(360, loop=self.component_manager.loop) + await asyncio.sleep(360) async def _maintain_redirects(self): # setup the gateway if necessary @@ -673,7 +673,7 @@ class UPnPComponent(Component): log.info("Removing upnp redirects: %s", self.upnp_redirects) await asyncio.wait([ self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items() - ], loop=self.component_manager.loop) + ]) if self._maintain_redirects_task and not self._maintain_redirects_task.done(): self._maintain_redirects_task.cancel() diff --git a/lbry/file/file_manager.py b/lbry/file/file_manager.py index 6b398d975..67c1630d7 100644 --- a/lbry/file/file_manager.py +++ b/lbry/file/file_manager.py @@ -240,8 +240,7 @@ class FileManager: claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier) stream.set_claim(claim_info, claim) if save_file: - await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download), - loop=self.loop) + await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download)) return stream except asyncio.TimeoutError: error = DownloadDataTimeoutError(stream.sd_hash) diff --git a/lbry/file/source.py b/lbry/file/source.py index 0cded2f6c..ba5bb311f 100644 --- a/lbry/file/source.py +++ b/lbry/file/source.py @@ -47,10 +47,10 @@ class ManagedDownloadSource: self.analytics_manager = analytics_manager self.downloader = None - self.saving = asyncio.Event(loop=self.loop) - self.finished_writing = asyncio.Event(loop=self.loop) - self.started_writing = asyncio.Event(loop=self.loop) - self.finished_write_attempt = asyncio.Event(loop=self.loop) + self.saving = asyncio.Event() + self.finished_writing = asyncio.Event() + self.started_writing = asyncio.Event() + self.finished_write_attempt = asyncio.Event() # @classmethod # async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str, diff --git a/lbry/file/source_manager.py b/lbry/file/source_manager.py index 356dcb74d..72c1709dd 100644 --- a/lbry/file/source_manager.py +++ b/lbry/file/source_manager.py @@ -54,7 +54,7 @@ class SourceManager: self.storage = storage self.analytics_manager = analytics_manager self._sources: typing.Dict[str, ManagedDownloadSource] = {} - self.started = asyncio.Event(loop=self.loop) + self.started = asyncio.Event() def add(self, source: ManagedDownloadSource): self._sources[source.identifier] = source diff --git a/lbry/stream/downloader.py b/lbry/stream/downloader.py index 1f78979b7..39e24b37e 100644 --- a/lbry/stream/downloader.py +++ b/lbry/stream/downloader.py @@ -27,8 +27,8 @@ class StreamDownloader: self.config = config self.blob_manager = blob_manager self.sd_hash = sd_hash - self.search_queue = asyncio.Queue(loop=loop) # blob hashes to feed into the iterative finder - self.peer_queue = asyncio.Queue(loop=loop) # new peers to try + self.search_queue = asyncio.Queue() # blob hashes to feed into the iterative finder + self.peer_queue = asyncio.Queue() # new peers to try self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue) self.descriptor: typing.Optional[StreamDescriptor] = descriptor self.node: typing.Optional['Node'] = None @@ -72,7 +72,7 @@ class StreamDownloader: now = self.loop.time() sd_blob = await asyncio.wait_for( self.blob_downloader.download_blob(self.sd_hash, connection_id), - self.config.blob_download_timeout, loop=self.loop + self.config.blob_download_timeout ) log.info("downloaded sd blob %s", self.sd_hash) self.time_to_descriptor = self.loop.time() - now @@ -111,7 +111,7 @@ class StreamDownloader: raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}") blob = await asyncio.wait_for( self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id), - self.config.blob_download_timeout * 10, loop=self.loop + self.config.blob_download_timeout * 10 ) return blob diff --git a/lbry/stream/managed_stream.py b/lbry/stream/managed_stream.py index b548af7c8..a6be77ce4 100644 --- a/lbry/stream/managed_stream.py +++ b/lbry/stream/managed_stream.py @@ -60,9 +60,9 @@ class ManagedStream(ManagedDownloadSource): self.file_output_task: typing.Optional[asyncio.Task] = None self.delayed_stop_task: typing.Optional[asyncio.Task] = None self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = [] - self.fully_reflected = asyncio.Event(loop=self.loop) - self.streaming = asyncio.Event(loop=self.loop) - self._running = asyncio.Event(loop=self.loop) + self.fully_reflected = asyncio.Event() + self.streaming = asyncio.Event() + self._running = asyncio.Event() @property def sd_hash(self) -> str: @@ -161,7 +161,7 @@ class ManagedStream(ManagedDownloadSource): log.info("start downloader for stream (sd hash: %s)", self.sd_hash) self._running.set() try: - await asyncio.wait_for(self.downloader.start(), timeout, loop=self.loop) + await asyncio.wait_for(self.downloader.start(), timeout) except asyncio.TimeoutError: self._running.clear() raise DownloadSDTimeoutError(self.sd_hash) @@ -321,7 +321,7 @@ class ManagedStream(ManagedDownloadSource): await self.update_status(ManagedStream.STATUS_RUNNING) self.file_output_task = self.loop.create_task(self._save_file(self.full_path)) try: - await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout, loop=self.loop) + await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout) except asyncio.TimeoutError: log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id) self.stop_tasks() @@ -401,7 +401,7 @@ class ManagedStream(ManagedDownloadSource): self.sd_hash[:6]) await self.stop() return - await asyncio.sleep(1, loop=self.loop) + await asyncio.sleep(1) def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]: if '=' in get_range: diff --git a/lbry/stream/reflector/server.py b/lbry/stream/reflector/server.py index aa41f7bc7..cc221b885 100644 --- a/lbry/stream/reflector/server.py +++ b/lbry/stream/reflector/server.py @@ -21,7 +21,7 @@ class ReflectorServerProtocol(asyncio.Protocol): self.loop = asyncio.get_event_loop() self.blob_manager = blob_manager self.server_task: asyncio.Task = None - self.started_listening = asyncio.Event(loop=self.loop) + self.started_listening = asyncio.Event() self.buf = b'' self.transport: asyncio.StreamWriter = None self.writer: typing.Optional['HashBlobWriter'] = None @@ -29,9 +29,9 @@ class ReflectorServerProtocol(asyncio.Protocol): self.descriptor: typing.Optional['StreamDescriptor'] = None self.sd_blob: typing.Optional['BlobFile'] = None self.received = [] - self.incoming = incoming_event or asyncio.Event(loop=self.loop) - self.not_incoming = not_incoming_event or asyncio.Event(loop=self.loop) - self.stop_event = stop_event or asyncio.Event(loop=self.loop) + self.incoming = incoming_event or asyncio.Event() + self.not_incoming = not_incoming_event or asyncio.Event() + self.stop_event = stop_event or asyncio.Event() self.chunk_size = response_chunk_size self.wait_for_stop_task: typing.Optional[asyncio.Task] = None self.partial_event = partial_event @@ -94,7 +94,7 @@ class ReflectorServerProtocol(asyncio.Protocol): self.incoming.set() self.send_response({"send_sd_blob": True}) try: - await asyncio.wait_for(self.sd_blob.verified.wait(), 30, loop=self.loop) + await asyncio.wait_for(self.sd_blob.verified.wait(), 30) self.descriptor = await StreamDescriptor.from_stream_descriptor_blob( self.loop, self.blob_manager.blob_dir, self.sd_blob ) @@ -140,7 +140,7 @@ class ReflectorServerProtocol(asyncio.Protocol): self.incoming.set() self.send_response({"send_blob": True}) try: - await asyncio.wait_for(blob.verified.wait(), 30, loop=self.loop) + await asyncio.wait_for(blob.verified.wait(), 30) self.send_response({"received_blob": True}) except asyncio.TimeoutError: self.send_response({"received_blob": False}) @@ -162,10 +162,10 @@ class ReflectorServer: self.loop = asyncio.get_event_loop() self.blob_manager = blob_manager self.server_task: typing.Optional[asyncio.Task] = None - self.started_listening = asyncio.Event(loop=self.loop) - self.stopped_listening = asyncio.Event(loop=self.loop) - self.incoming_event = incoming_event or asyncio.Event(loop=self.loop) - self.not_incoming_event = not_incoming_event or asyncio.Event(loop=self.loop) + self.started_listening = asyncio.Event() + self.stopped_listening = asyncio.Event() + self.incoming_event = incoming_event or asyncio.Event() + self.not_incoming_event = not_incoming_event or asyncio.Event() self.response_chunk_size = response_chunk_size self.stop_event = stop_event self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants diff --git a/lbry/stream/stream_manager.py b/lbry/stream/stream_manager.py index 6b4f705ff..7ecf7e442 100644 --- a/lbry/stream/stream_manager.py +++ b/lbry/stream/stream_manager.py @@ -54,7 +54,7 @@ class StreamManager(SourceManager): self.re_reflect_task: Optional[asyncio.Task] = None self.update_stream_finished_futs: typing.List[asyncio.Future] = [] self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {} - self.started = asyncio.Event(loop=self.loop) + self.started = asyncio.Event() @property def streams(self): @@ -150,7 +150,7 @@ class StreamManager(SourceManager): file_info['added_on'], file_info['fully_reflected'] ))) if add_stream_tasks: - await asyncio.gather(*add_stream_tasks, loop=self.loop) + await asyncio.gather(*add_stream_tasks) log.info("Started stream manager with %i files", len(self._sources)) if not self.node: log.info("no DHT node given, resuming downloads trusting that we can contact reflector") @@ -159,7 +159,6 @@ class StreamManager(SourceManager): self.resume_saving_task = asyncio.ensure_future(asyncio.gather( *(self._sources[sd_hash].save_file(file_name, download_directory) for (file_name, download_directory, sd_hash) in to_resume_saving), - loop=self.loop )) async def reflect_streams(self): @@ -186,14 +185,14 @@ class StreamManager(SourceManager): batch.append(self.reflect_stream(stream)) if len(batch) >= self.config.concurrent_reflector_uploads: log.debug("waiting for batch of %s reflecting streams", len(batch)) - await asyncio.gather(*batch, loop=self.loop) + await asyncio.gather(*batch) log.debug("done processing %s streams", len(batch)) batch = [] if batch: log.debug("waiting for batch of %s reflecting streams", len(batch)) - await asyncio.gather(*batch, loop=self.loop) + await asyncio.gather(*batch) log.debug("done processing %s streams", len(batch)) - await asyncio.sleep(300, loop=self.loop) + await asyncio.sleep(300) async def start(self): await super().start() diff --git a/lbry/torrent/session.py b/lbry/torrent/session.py index 887dc70da..d1b118e8c 100644 --- a/lbry/torrent/session.py +++ b/lbry/torrent/session.py @@ -22,9 +22,9 @@ class TorrentHandle: self._loop = loop self._executor = executor self._handle: libtorrent.torrent_handle = handle - self.started = asyncio.Event(loop=loop) - self.finished = asyncio.Event(loop=loop) - self.metadata_completed = asyncio.Event(loop=loop) + self.started = asyncio.Event() + self.finished = asyncio.Event() + self.metadata_completed = asyncio.Event() self.size = 0 self.total_wanted_done = 0 self.name = '' @@ -87,7 +87,7 @@ class TorrentHandle: self._show_status() if self.finished.is_set(): break - await asyncio.sleep(0.1, loop=self._loop) + await asyncio.sleep(0.1) async def pause(self): await self._loop.run_in_executor( @@ -150,7 +150,7 @@ class TorrentSession: await self._loop.run_in_executor( self._executor, self._pop_alerts ) - await asyncio.sleep(1, loop=self._loop) + await asyncio.sleep(1) async def pause(self): await self._loop.run_in_executor( diff --git a/lbry/torrent/torrent.py b/lbry/torrent/torrent.py index 04a8544c7..66a14d15e 100644 --- a/lbry/torrent/torrent.py +++ b/lbry/torrent/torrent.py @@ -36,7 +36,7 @@ class Torrent: def __init__(self, loop, handle): self._loop = loop self._handle = handle - self.finished = asyncio.Event(loop=loop) + self.finished = asyncio.Event() def _threaded_update_status(self): status = self._handle.status() @@ -58,7 +58,7 @@ class Torrent: log.info("finished downloading torrent!") await self.pause() break - await asyncio.sleep(1, loop=self._loop) + await asyncio.sleep(1) async def pause(self): log.info("pause torrent") diff --git a/lbry/utils.py b/lbry/utils.py index 08b445e1f..a28b1c6dd 100644 --- a/lbry/utils.py +++ b/lbry/utils.py @@ -450,8 +450,8 @@ def is_running_from_bundle(): class LockWithMetrics(asyncio.Lock): - def __init__(self, acquire_metric, held_time_metric, loop=None): - super().__init__(loop=loop) + def __init__(self, acquire_metric, held_time_metric): + super().__init__() self._acquire_metric = acquire_metric self._lock_held_time_metric = held_time_metric self._lock_acquired_time = None diff --git a/lbry/wallet/rpc/session.py b/lbry/wallet/rpc/session.py index 762bb21cd..7b3d7ecde 100644 --- a/lbry/wallet/rpc/session.py +++ b/lbry/wallet/rpc/session.py @@ -395,8 +395,8 @@ class RPCSession(SessionBase): namespace=NAMESPACE, labelnames=("version",) ) - def __init__(self, *, framer=None, loop=None, connection=None): - super().__init__(framer=framer, loop=loop) + def __init__(self, *, framer=None, connection=None): + super().__init__(framer=framer) self.connection = connection or self.default_connection() self.client_version = 'unknown' diff --git a/tests/dht_mocks.py b/tests/dht_mocks.py index 4bebcfaf1..cf303215c 100644 --- a/tests/dht_mocks.py +++ b/tests/dht_mocks.py @@ -51,7 +51,8 @@ def mock_network_loop(loop: asyncio.AbstractEventLoop, return rx.datagram_received(data, from_addr) protocol = proto_lam() - transport = asyncio.DatagramTransport(extra={'socket': mock_sock}) + transport = mock.Mock(spec=asyncio.DatagramTransport) + transport.get_extra_info = lambda k: {'socket': mock_sock}[k] transport.is_closing = lambda: False transport.close = lambda: mock_sock.close() mock_sock.sendto = sendto diff --git a/tests/integration/datanetwork/test_file_commands.py b/tests/integration/datanetwork/test_file_commands.py index f0185d2b8..99023e20c 100644 --- a/tests/integration/datanetwork/test_file_commands.py +++ b/tests/integration/datanetwork/test_file_commands.py @@ -368,7 +368,7 @@ class FileCommands(CommandTestCase): self.assertNotIn('error', resp) self.assertTrue(os.path.isfile(path)) self.daemon.file_manager.stop() - await asyncio.sleep(0.01, loop=self.loop) # FIXME: this sleep should not be needed + await asyncio.sleep(0.01) # FIXME: this sleep should not be needed self.assertFalse(os.path.isfile(path)) async def test_incomplete_downloads_retry(self): diff --git a/tests/integration/datanetwork/test_streaming.py b/tests/integration/datanetwork/test_streaming.py index 7a4efa7d2..4a61f08cc 100644 --- a/tests/integration/datanetwork/test_streaming.py +++ b/tests/integration/datanetwork/test_streaming.py @@ -414,6 +414,6 @@ class RangeRequestsLRUCache(CommandTestCase): # running with cache size 0 gets through without errors without # this since the server doesn't stop immediately - await asyncio.sleep(1, loop=self.loop) + await asyncio.sleep(1) await self._request_stream() diff --git a/tests/unit/blob/test_blob_file.py b/tests/unit/blob/test_blob_file.py index ba7923002..ff632adb5 100644 --- a/tests/unit/blob/test_blob_file.py +++ b/tests/unit/blob/test_blob_file.py @@ -36,7 +36,7 @@ class TestBlob(AsyncioTestCase): writer.write(self.blob_bytes) await blob.verified.wait() self.assertTrue(blob.get_is_verified()) - await asyncio.sleep(0, loop=self.loop) # wait for the db save task + await asyncio.sleep(0) # wait for the db save task return blob async def _test_close_writers_on_finished(self, blob_class=AbstractBlob, blob_directory=None): @@ -48,7 +48,7 @@ class TestBlob(AsyncioTestCase): with self.assertRaises(InvalidDataError): writers[1].write(self.blob_bytes * 2) await writers[1].finished - await asyncio.sleep(0, loop=self.loop) + await asyncio.sleep(0) self.assertEqual(4, len(blob.writers)) # write the blob @@ -208,7 +208,7 @@ class TestBlob(AsyncioTestCase): async def read_blob_buffer(): with reader as read_handle: self.assertEqual(1, len(blob.readers)) - await asyncio.sleep(2, loop=self.loop) + await asyncio.sleep(2) self.assertEqual(0, len(blob.readers)) return read_handle.read() diff --git a/tests/unit/blob_exchange/test_transfer_blob.py b/tests/unit/blob_exchange/test_transfer_blob.py index 5943c3874..786793064 100644 --- a/tests/unit/blob_exchange/test_transfer_blob.py +++ b/tests/unit/blob_exchange/test_transfer_blob.py @@ -183,7 +183,7 @@ class TestBlobExchange(BlobExchangeTestBase): writer.write(mock_blob_bytes) return self.loop.create_task(_inner()) - await asyncio.gather(write_task(writer1), write_task(writer2), loop=self.loop) + await asyncio.gather(write_task(writer1), write_task(writer2)) self.assertDictEqual({1: mock_blob_bytes, 2: mock_blob_bytes}, results) self.assertEqual(1, write_called_count) @@ -239,7 +239,8 @@ class TestBlobExchange(BlobExchangeTestBase): async def test_server_chunked_request(self): blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed" server_protocol = BlobServerProtocol(self.loop, self.server_blob_manager, self.server.lbrycrd_address) - transport = asyncio.Transport(extra={'peername': ('ip', 90)}) + transport = mock.Mock(spec=asyncio.Transport) + transport.get_extra_info = lambda k: {'peername': ('ip', 90)}[k] received_data = BytesIO() transport.is_closing = lambda: received_data.closed transport.write = received_data.write @@ -269,7 +270,7 @@ class TestBlobExchange(BlobExchangeTestBase): client_blob.delete() # wait for less than the idle timeout - await asyncio.sleep(0.5, loop=self.loop) + await asyncio.sleep(0.5) # download the blob again downloaded, protocol2 = await request_blob(self.loop, client_blob, self.server_from_client.address, @@ -283,10 +284,10 @@ class TestBlobExchange(BlobExchangeTestBase): client_blob.delete() # check that the connection times out from the server side - await asyncio.sleep(0.9, loop=self.loop) + await asyncio.sleep(0.9) self.assertFalse(protocol.transport.is_closing()) self.assertIsNotNone(protocol.transport._sock) - await asyncio.sleep(0.1, loop=self.loop) + await asyncio.sleep(0.1) self.assertIsNone(protocol.transport) def test_max_request_size(self): @@ -322,7 +323,7 @@ class TestBlobExchange(BlobExchangeTestBase): server_blob = self.server_blob_manager.get_blob(blob_hash) async def sendfile(writer): - await asyncio.sleep(2, loop=self.loop) + await asyncio.sleep(2) return 0 server_blob.sendfile = sendfile @@ -346,7 +347,7 @@ class TestBlobExchange(BlobExchangeTestBase): def _mock_accumulate_peers(q1, q2=None): async def _task(): pass - q2 = q2 or asyncio.Queue(loop=self.loop) + q2 = q2 or asyncio.Queue() return q2, self.loop.create_task(_task()) mock_node.accumulate_peers = _mock_accumulate_peers diff --git a/tests/unit/core/test_utils.py b/tests/unit/core/test_utils.py index 1246e3995..9774dc0d3 100644 --- a/tests/unit/core/test_utils.py +++ b/tests/unit/core/test_utils.py @@ -72,14 +72,14 @@ class CacheConcurrentDecoratorTests(AsyncioTestCase): @utils.cache_concurrent async def foo(self, arg1, arg2=None, delay=1): self.called.append((arg1, arg2, delay)) - await asyncio.sleep(delay, loop=self.loop) + await asyncio.sleep(delay) self.counter += 1 self.finished.append((arg1, arg2, delay)) return object() async def test_gather_duplicates(self): result = await asyncio.gather( - self.loop.create_task(self.foo(1)), self.loop.create_task(self.foo(1)), loop=self.loop + self.loop.create_task(self.foo(1)), self.loop.create_task(self.foo(1)) ) self.assertEqual(1, len(self.called)) self.assertEqual(1, len(self.finished)) @@ -93,7 +93,7 @@ class CacheConcurrentDecoratorTests(AsyncioTestCase): with self.assertRaises(asyncio.CancelledError): await asyncio.gather( - t1, self.loop.create_task(self.foo(1)), loop=self.loop + t1, self.loop.create_task(self.foo(1)) ) self.assertEqual(1, len(self.called)) self.assertEqual(0, len(self.finished)) diff --git a/tests/unit/dht/test_blob_announcer.py b/tests/unit/dht/test_blob_announcer.py index be445aae7..4fb08b927 100644 --- a/tests/unit/dht/test_blob_announcer.py +++ b/tests/unit/dht/test_blob_announcer.py @@ -128,7 +128,7 @@ class TestBlobAnnouncer(AsyncioTestCase): await self.chain_peer(constants.generate_id(current + 4), '1.2.3.13') last = await self.chain_peer(constants.generate_id(current + 5), '1.2.3.14') - search_q, peer_q = asyncio.Queue(loop=self.loop), asyncio.Queue(loop=self.loop) + search_q, peer_q = asyncio.Queue(), asyncio.Queue() search_q.put_nowait(blob1) _, task = last.accumulate_peers(search_q, peer_q) diff --git a/tests/unit/lbrynet_daemon/test_Daemon.py b/tests/unit/lbrynet_daemon/test_Daemon.py index 6cbd57b04..37fe2e753 100644 --- a/tests/unit/lbrynet_daemon/test_Daemon.py +++ b/tests/unit/lbrynet_daemon/test_Daemon.py @@ -2,7 +2,6 @@ import unittest from unittest import mock import json -from lbry.conf import Config from lbry.extras.daemon.storage import SQLiteStorage from lbry.extras.daemon.componentmanager import ComponentManager from lbry.extras.daemon.components import DATABASE_COMPONENT, DHT_COMPONENT, WALLET_COMPONENT @@ -11,6 +10,7 @@ from lbry.extras.daemon.components import UPNP_COMPONENT, BLOB_COMPONENT from lbry.extras.daemon.components import PEER_PROTOCOL_SERVER_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT from lbry.extras.daemon.daemon import Daemon as LBRYDaemon from lbry.wallet import WalletManager, Wallet +from lbry.conf import Config from tests import test_utils # from tests.mocks import mock_conf_settings, FakeNetwork, FakeFileManager diff --git a/tests/unit/stream/test_managed_stream.py b/tests/unit/stream/test_managed_stream.py index 3f488708f..387fbef77 100644 --- a/tests/unit/stream/test_managed_stream.py +++ b/tests/unit/stream/test_managed_stream.py @@ -109,9 +109,9 @@ class TestManagedStream(BlobExchangeTestBase): await self._test_transfer_stream(10, stop_when_done=False) self.assertEqual(self.stream.status, "finished") self.assertTrue(self.stream._running.is_set()) - await asyncio.sleep(0.5, loop=self.loop) + await asyncio.sleep(0.5) self.assertTrue(self.stream._running.is_set()) - await asyncio.sleep(2, loop=self.loop) + await asyncio.sleep(2) self.assertEqual(self.stream.status, "finished") self.assertFalse(self.stream._running.is_set()) diff --git a/tests/unit/stream/test_reflector.py b/tests/unit/stream/test_reflector.py index 205de99bd..ed7494f5b 100644 --- a/tests/unit/stream/test_reflector.py +++ b/tests/unit/stream/test_reflector.py @@ -86,13 +86,13 @@ class TestReflector(AsyncioTestCase): self.assertListEqual(sent, []) async def test_reflect_stream(self): - return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=50), 3, loop=self.loop) + return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=50), 3) async def test_reflect_stream_but_reflector_changes_its_mind(self): - return await asyncio.wait_for(self._test_reflect_stream(partial_needs=True), 3, loop=self.loop) + return await asyncio.wait_for(self._test_reflect_stream(partial_needs=True), 3) async def test_reflect_stream_small_response_chunks(self): - return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=30), 3, loop=self.loop) + return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=30), 3) async def test_announces(self): to_announce = await self.storage.get_blobs_to_announce() diff --git a/tests/unit/stream/test_stream_manager.py b/tests/unit/stream/test_stream_manager.py index 76e2a5c04..ba6d8dbc8 100644 --- a/tests/unit/stream/test_stream_manager.py +++ b/tests/unit/stream/test_stream_manager.py @@ -174,7 +174,7 @@ class TestStreamManager(BlobExchangeTestBase): await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager) else: await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager) - await asyncio.sleep(0, loop=self.loop) + await asyncio.sleep(0) self.assertTrue(checked_analytics_event) async def test_time_to_first_bytes(self): @@ -317,7 +317,7 @@ class TestStreamManager(BlobExchangeTestBase): stream.downloader.node = self.stream_manager.node await stream.save_file() await stream.finished_writing.wait() - await asyncio.sleep(0, loop=self.loop) + await asyncio.sleep(0) self.assertTrue(stream.finished) self.assertFalse(stream.running) self.assertTrue(os.path.isfile(os.path.join(self.client_dir, "test_file"))) @@ -355,7 +355,7 @@ class TestStreamManager(BlobExchangeTestBase): self.stream_manager.analytics_manager._post = check_post await self._test_download_error_on_start(expected_error, timeout) - await asyncio.sleep(0, loop=self.loop) + await asyncio.sleep(0) self.assertListEqual([expected_error.__name__], received) async def test_insufficient_funds(self): @@ -448,7 +448,7 @@ class TestStreamManager(BlobExchangeTestBase): self.assertDictEqual(self.stream_manager.streams, {}) stream = await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager) await stream.finished_writing.wait() - await asyncio.sleep(0, loop=self.loop) + await asyncio.sleep(0) self.stream_manager.stop() self.client_blob_manager.stop() # partial removal, only sd blob is missing.