forked from LBRYCommunity/lbry-sdk
upgrade pylint and fix lint errors
This commit is contained in:
parent
69ea65835d
commit
dc1d9e1c84
17 changed files with 39 additions and 35 deletions
4
Makefile
4
Makefile
|
@ -4,8 +4,8 @@ install:
|
||||||
pip install -e .
|
pip install -e .
|
||||||
|
|
||||||
tools:
|
tools:
|
||||||
pip install mypy==0.701 pylint==2.4.4
|
pip install pylint==2.10.0
|
||||||
pip install coverage astroid pylint
|
pip install coverage
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
pylint --rcfile=setup.cfg lbry
|
pylint --rcfile=setup.cfg lbry
|
||||||
|
|
|
@ -203,7 +203,7 @@ class MaxKeyFee(Setting[dict]):
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
self.no_cli_name,
|
self.no_cli_name,
|
||||||
help=f"Disable maximum key fee check.",
|
help="Disable maximum key fee check.",
|
||||||
dest=self.name,
|
dest=self.name,
|
||||||
const=None,
|
const=None,
|
||||||
action="store_const",
|
action="store_const",
|
||||||
|
|
|
@ -556,7 +556,7 @@ class KademliaProtocol(DatagramProtocol):
|
||||||
address[0], address[1], OLD_PROTOCOL_ERRORS[error_datagram.response]
|
address[0], address[1], OLD_PROTOCOL_ERRORS[error_datagram.response]
|
||||||
)
|
)
|
||||||
|
|
||||||
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: # pylint: disable=arguments-differ
|
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: # pylint: disable=arguments-renamed
|
||||||
try:
|
try:
|
||||||
message = decode_datagram(datagram)
|
message = decode_datagram(datagram)
|
||||||
except (ValueError, TypeError, DecodeError):
|
except (ValueError, TypeError, DecodeError):
|
||||||
|
|
|
@ -50,7 +50,7 @@ class KBucket:
|
||||||
self.peers.append(peer)
|
self.peers.append(peer)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
for i in range(len(self.peers)):
|
for i, _ in enumerate(self.peers):
|
||||||
local_peer = self.peers[i]
|
local_peer = self.peers[i]
|
||||||
if local_peer.node_id == peer.node_id:
|
if local_peer.node_id == peer.node_id:
|
||||||
self.peers.remove(local_peer)
|
self.peers.remove(local_peer)
|
||||||
|
|
|
@ -181,7 +181,7 @@ def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDa
|
||||||
def make_compact_ip(address: str) -> bytearray:
|
def make_compact_ip(address: str) -> bytearray:
|
||||||
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
|
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
|
||||||
if len(compact_ip) != 4:
|
if len(compact_ip) != 4:
|
||||||
raise ValueError(f"invalid IPv4 length")
|
raise ValueError("invalid IPv4 length")
|
||||||
return compact_ip
|
return compact_ip
|
||||||
|
|
||||||
|
|
||||||
|
@ -190,7 +190,7 @@ def make_compact_address(node_id: bytes, address: str, port: int) -> bytearray:
|
||||||
if not 0 < port < 65536:
|
if not 0 < port < 65536:
|
||||||
raise ValueError(f'Invalid port: {port}')
|
raise ValueError(f'Invalid port: {port}')
|
||||||
if len(node_id) != constants.HASH_BITS // 8:
|
if len(node_id) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid node node_id length")
|
raise ValueError("invalid node node_id length")
|
||||||
return compact_ip + port.to_bytes(2, 'big') + node_id
|
return compact_ip + port.to_bytes(2, 'big') + node_id
|
||||||
|
|
||||||
|
|
||||||
|
@ -201,5 +201,5 @@ def decode_compact_address(compact_address: bytes) -> typing.Tuple[bytes, str, i
|
||||||
if not 0 < port < 65536:
|
if not 0 < port < 65536:
|
||||||
raise ValueError(f'Invalid port: {port}')
|
raise ValueError(f'Invalid port: {port}')
|
||||||
if len(node_id) != constants.HASH_BITS // 8:
|
if len(node_id) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid node node_id length")
|
raise ValueError("invalid node node_id length")
|
||||||
return node_id, address, port
|
return node_id, address, port
|
||||||
|
|
|
@ -63,7 +63,7 @@ class ErrorClass:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_fields(args):
|
def get_fields(args):
|
||||||
if len(args) > 1:
|
if len(args) > 1:
|
||||||
return f''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
|
return ''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
|
@ -101,7 +101,7 @@ class ArgumentParser(argparse.ArgumentParser):
|
||||||
self._optionals.title = 'Options'
|
self._optionals.title = 'Options'
|
||||||
if group_name is None:
|
if group_name is None:
|
||||||
self.epilog = (
|
self.epilog = (
|
||||||
f"Run 'lbrynet COMMAND --help' for more information on a command or group."
|
"Run 'lbrynet COMMAND --help' for more information on a command or group."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.epilog = (
|
self.epilog = (
|
||||||
|
|
|
@ -1701,9 +1701,9 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
'change': {'gap': change_gap, 'maximum_uses_per_address': change_max_uses},
|
'change': {'gap': change_gap, 'maximum_uses_per_address': change_max_uses},
|
||||||
'receiving': {'gap': receiving_gap, 'maximum_uses_per_address': receiving_max_uses},
|
'receiving': {'gap': receiving_gap, 'maximum_uses_per_address': receiving_max_uses},
|
||||||
}
|
}
|
||||||
for chain_name in address_changes:
|
for chain_name, changes in address_changes.items():
|
||||||
chain = getattr(account, chain_name)
|
chain = getattr(account, chain_name)
|
||||||
for attr, value in address_changes[chain_name].items():
|
for attr, value in changes.items():
|
||||||
if value is not None:
|
if value is not None:
|
||||||
setattr(chain, attr, value)
|
setattr(chain, attr, value)
|
||||||
change_made = True
|
change_made = True
|
||||||
|
@ -2240,15 +2240,15 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
txo = await self.ledger.get_claim_by_claim_id(accounts, claim_id, include_purchase_receipt=True)
|
txo = await self.ledger.get_claim_by_claim_id(accounts, claim_id, include_purchase_receipt=True)
|
||||||
if not isinstance(txo, Output) or not txo.is_claim:
|
if not isinstance(txo, Output) or not txo.is_claim:
|
||||||
# TODO: use error from lbry.error
|
# TODO: use error from lbry.error
|
||||||
raise Exception(f"Could not find claim with claim_id '{claim_id}'. ")
|
raise Exception(f"Could not find claim with claim_id '{claim_id}'.")
|
||||||
elif url:
|
elif url:
|
||||||
txo = (await self.ledger.resolve(accounts, [url], include_purchase_receipt=True))[url]
|
txo = (await self.ledger.resolve(accounts, [url], include_purchase_receipt=True))[url]
|
||||||
if not isinstance(txo, Output) or not txo.is_claim:
|
if not isinstance(txo, Output) or not txo.is_claim:
|
||||||
# TODO: use error from lbry.error
|
# TODO: use error from lbry.error
|
||||||
raise Exception(f"Could not find claim with url '{url}'. ")
|
raise Exception(f"Could not find claim with url '{url}'.")
|
||||||
else:
|
else:
|
||||||
# TODO: use error from lbry.error
|
# TODO: use error from lbry.error
|
||||||
raise Exception(f"Missing argument claim_id or url. ")
|
raise Exception("Missing argument claim_id or url.")
|
||||||
if not allow_duplicate_purchase and txo.purchase_receipt:
|
if not allow_duplicate_purchase and txo.purchase_receipt:
|
||||||
# TODO: use error from lbry.error
|
# TODO: use error from lbry.error
|
||||||
raise Exception(
|
raise Exception(
|
||||||
|
@ -4091,15 +4091,15 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
txo = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
|
txo = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
|
||||||
if not isinstance(txo, Output) or not txo.is_claim:
|
if not isinstance(txo, Output) or not txo.is_claim:
|
||||||
# TODO: use error from lbry.error
|
# TODO: use error from lbry.error
|
||||||
raise Exception(f"Could not find collection with claim_id '{claim_id}'. ")
|
raise Exception(f"Could not find collection with claim_id '{claim_id}'.")
|
||||||
elif url:
|
elif url:
|
||||||
txo = (await self.ledger.resolve(wallet.accounts, [url]))[url]
|
txo = (await self.ledger.resolve(wallet.accounts, [url]))[url]
|
||||||
if not isinstance(txo, Output) or not txo.is_claim:
|
if not isinstance(txo, Output) or not txo.is_claim:
|
||||||
# TODO: use error from lbry.error
|
# TODO: use error from lbry.error
|
||||||
raise Exception(f"Could not find collection with url '{url}'. ")
|
raise Exception(f"Could not find collection with url '{url}'.")
|
||||||
else:
|
else:
|
||||||
# TODO: use error from lbry.error
|
# TODO: use error from lbry.error
|
||||||
raise Exception(f"Missing argument claim_id or url. ")
|
raise Exception("Missing argument claim_id or url.")
|
||||||
|
|
||||||
page_num, page_size = abs(page), min(abs(page_size), 50)
|
page_num, page_size = abs(page), min(abs(page_size), 50)
|
||||||
items = await self.ledger.resolve_collection(txo, page_size * (page_num - 1), page_size)
|
items = await self.ledger.resolve_collection(txo, page_size * (page_num - 1), page_size)
|
||||||
|
@ -5072,7 +5072,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
'buckets': {}
|
'buckets': {}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i in range(len(self.dht_node.protocol.routing_table.buckets)):
|
for i, _ in enumerate(self.dht_node.protocol.routing_table.buckets):
|
||||||
result['buckets'][i] = []
|
result['buckets'][i] = []
|
||||||
for peer in self.dht_node.protocol.routing_table.buckets[i].peers:
|
for peer in self.dht_node.protocol.routing_table.buckets[i].peers:
|
||||||
host = {
|
host = {
|
||||||
|
|
|
@ -123,7 +123,7 @@ class JSONResponseEncoder(JSONEncoder):
|
||||||
self.ledger = ledger
|
self.ledger = ledger
|
||||||
self.include_protobuf = include_protobuf
|
self.include_protobuf = include_protobuf
|
||||||
|
|
||||||
def default(self, obj): # pylint: disable=method-hidden,arguments-differ,too-many-return-statements
|
def default(self, obj): # pylint: disable=method-hidden,arguments-renamed,too-many-return-statements
|
||||||
if isinstance(obj, Account):
|
if isinstance(obj, Account):
|
||||||
return self.encode_account(obj)
|
return self.encode_account(obj)
|
||||||
if isinstance(obj, Wallet):
|
if isinstance(obj, Wallet):
|
||||||
|
|
|
@ -617,7 +617,7 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
).fetchall()
|
).fetchall()
|
||||||
download_dir = binascii.hexlify(self.conf.download_dir.encode()).decode()
|
download_dir = binascii.hexlify(self.conf.download_dir.encode()).decode()
|
||||||
transaction.executemany(
|
transaction.executemany(
|
||||||
f"update file set download_directory=? where stream_hash=?",
|
"update file set download_directory=? where stream_hash=?",
|
||||||
((download_dir, stream_hash) for stream_hash in stream_hashes)
|
((download_dir, stream_hash) for stream_hash in stream_hashes)
|
||||||
).fetchall()
|
).fetchall()
|
||||||
await self.db.run_with_foreign_keys_disabled(_recover)
|
await self.db.run_with_foreign_keys_disabled(_recover)
|
||||||
|
@ -861,6 +861,6 @@ class SQLiteStorage(SQLiteMixin):
|
||||||
transaction.execute('delete from peer').fetchall()
|
transaction.execute('delete from peer').fetchall()
|
||||||
transaction.executemany(
|
transaction.executemany(
|
||||||
'insert into peer(node_id, address, udp_port, tcp_port) values (?, ?, ?, ?)',
|
'insert into peer(node_id, address, udp_port, tcp_port) values (?, ?, ?, ?)',
|
||||||
tuple([(binascii.hexlify(p.node_id), p.address, p.udp_port, p.tcp_port) for p in peers])
|
((binascii.hexlify(p.node_id), p.address, p.udp_port, p.tcp_port) for p in peers)
|
||||||
).fetchall()
|
).fetchall()
|
||||||
return await self.db.run(_save_kademlia_peers)
|
return await self.db.run(_save_kademlia_peers)
|
||||||
|
|
|
@ -69,8 +69,8 @@ class VideoFileAnalyzer:
|
||||||
version = str(e)
|
version = str(e)
|
||||||
if code != 0 or not version.startswith("ffmpeg"):
|
if code != 0 or not version.startswith("ffmpeg"):
|
||||||
log.warning("Unable to run ffmpeg, but it was requested. Code: %d; Message: %s", code, version)
|
log.warning("Unable to run ffmpeg, but it was requested. Code: %d; Message: %s", code, version)
|
||||||
raise FileNotFoundError(f"Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
|
raise FileNotFoundError("Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
|
||||||
f"and ensure that it is callable via PATH or conf.ffmpeg_path")
|
"and ensure that it is callable via PATH or conf.ffmpeg_path")
|
||||||
log.debug("Using %s at %s", version.splitlines()[0].split(" Copyright")[0], self._which_ffmpeg)
|
log.debug("Using %s at %s", version.splitlines()[0].split(" Copyright")[0], self._which_ffmpeg)
|
||||||
return version
|
return version
|
||||||
|
|
||||||
|
|
|
@ -254,7 +254,7 @@ class ManagedStream(ManagedDownloadSource):
|
||||||
self.finished_writing.clear()
|
self.finished_writing.clear()
|
||||||
self.started_writing.clear()
|
self.started_writing.clear()
|
||||||
try:
|
try:
|
||||||
open(output_path, 'wb').close()
|
open(output_path, 'wb').close() # pylint: disable=consider-using-with
|
||||||
async for blob_info, decrypted in self._aiter_read_stream(connection_id=self.SAVING_ID):
|
async for blob_info, decrypted in self._aiter_read_stream(connection_id=self.SAVING_ID):
|
||||||
log.info("write blob %i/%i", blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
|
log.info("write blob %i/%i", blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
|
||||||
await self.loop.run_in_executor(None, self._write_decrypted_blob, output_path, decrypted)
|
await self.loop.run_in_executor(None, self._write_decrypted_blob, output_path, decrypted)
|
||||||
|
|
|
@ -155,7 +155,7 @@ def async_timed_cache(duration: int):
|
||||||
async def _inner(*args, **kwargs) -> typing.Any:
|
async def _inner(*args, **kwargs) -> typing.Any:
|
||||||
loop = asyncio.get_running_loop()
|
loop = asyncio.get_running_loop()
|
||||||
time_now = loop.time()
|
time_now = loop.time()
|
||||||
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
|
key = (args, tuple(kwargs.items()))
|
||||||
if key in cache and (time_now - cache[key][1] < duration):
|
if key in cache and (time_now - cache[key][1] < duration):
|
||||||
return cache[key][0]
|
return cache[key][0]
|
||||||
to_cache = await func(*args, **kwargs)
|
to_cache = await func(*args, **kwargs)
|
||||||
|
@ -173,7 +173,7 @@ def cache_concurrent(async_fn):
|
||||||
|
|
||||||
@functools.wraps(async_fn)
|
@functools.wraps(async_fn)
|
||||||
async def wrapper(*args, **kwargs):
|
async def wrapper(*args, **kwargs):
|
||||||
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
|
key = (args, tuple(kwargs.items()))
|
||||||
cache[key] = cache.get(key) or asyncio.create_task(async_fn(*args, **kwargs))
|
cache[key] = cache.get(key) or asyncio.create_task(async_fn(*args, **kwargs))
|
||||||
try:
|
try:
|
||||||
return await cache[key]
|
return await cache[key]
|
||||||
|
@ -342,7 +342,7 @@ def lru_cache_concurrent(cache_size: typing.Optional[int] = None,
|
||||||
|
|
||||||
@functools.wraps(async_fn)
|
@functools.wraps(async_fn)
|
||||||
async def _inner(*args, **kwargs):
|
async def _inner(*args, **kwargs):
|
||||||
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
|
key = (args, tuple(kwargs.items()))
|
||||||
if key in lru_cache:
|
if key in lru_cache:
|
||||||
return lru_cache.get(key)
|
return lru_cache.get(key)
|
||||||
|
|
||||||
|
|
|
@ -250,7 +250,7 @@ class Account:
|
||||||
generator_name = address_generator.get('name', HierarchicalDeterministic.name)
|
generator_name = address_generator.get('name', HierarchicalDeterministic.name)
|
||||||
self.address_generator = self.address_generators[generator_name]
|
self.address_generator = self.address_generators[generator_name]
|
||||||
self.receiving, self.change = self.address_generator.from_dict(self, address_generator)
|
self.receiving, self.change = self.address_generator.from_dict(self, address_generator)
|
||||||
self.address_managers = {am.chain_number: am for am in {self.receiving, self.change}}
|
self.address_managers = {am.chain_number: am for am in (self.receiving, self.change)}
|
||||||
self.channel_keys = channel_keys
|
self.channel_keys = channel_keys
|
||||||
ledger.add_account(self)
|
ledger.add_account(self)
|
||||||
wallet.add_account(self)
|
wallet.add_account(self)
|
||||||
|
|
|
@ -46,9 +46,11 @@ class _KeyBase:
|
||||||
if len(raw_serkey) != 33:
|
if len(raw_serkey) != 33:
|
||||||
raise ValueError('raw_serkey must have length 33')
|
raise ValueError('raw_serkey must have length 33')
|
||||||
|
|
||||||
return (ver_bytes + bytes((self.depth,))
|
return (
|
||||||
+ self.parent_fingerprint() + self.n.to_bytes(4, 'big')
|
ver_bytes + bytes((self.depth,))
|
||||||
+ self.chain_code + raw_serkey)
|
+ self.parent_fingerprint() + self.n.to_bytes(4, 'big')
|
||||||
|
+ self.chain_code + raw_serkey
|
||||||
|
)
|
||||||
|
|
||||||
def identifier(self):
|
def identifier(self):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
|
@ -82,10 +82,10 @@ class AIOSQLite:
|
||||||
"read_count", "Number of database reads", namespace="daemon_database"
|
"read_count", "Number of database reads", namespace="daemon_database"
|
||||||
)
|
)
|
||||||
acquire_write_lock_metric = Histogram(
|
acquire_write_lock_metric = Histogram(
|
||||||
f'write_lock_acquired', 'Time to acquire the write lock', namespace="daemon_database", buckets=HISTOGRAM_BUCKETS
|
'write_lock_acquired', 'Time to acquire the write lock', namespace="daemon_database", buckets=HISTOGRAM_BUCKETS
|
||||||
)
|
)
|
||||||
held_write_lock_metric = Histogram(
|
held_write_lock_metric = Histogram(
|
||||||
f'write_lock_held', 'Length of time the write lock is held for', namespace="daemon_database",
|
'write_lock_held', 'Length of time the write lock is held for', namespace="daemon_database",
|
||||||
buckets=HISTOGRAM_BUCKETS
|
buckets=HISTOGRAM_BUCKETS
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -506,7 +506,7 @@ def _get_spendable_utxos(transaction: sqlite3.Connection, accounts: List, decode
|
||||||
amount_to_reserve: int, reserved_amount: int, floor: int, ceiling: int,
|
amount_to_reserve: int, reserved_amount: int, floor: int, ceiling: int,
|
||||||
fee_per_byte: int) -> int:
|
fee_per_byte: int) -> int:
|
||||||
accounts_fmt = ",".join(["?"] * len(accounts))
|
accounts_fmt = ",".join(["?"] * len(accounts))
|
||||||
txo_query = f"""
|
txo_query = """
|
||||||
SELECT tx.txid, txo.txoid, tx.raw, tx.height, txo.position as nout, tx.is_verified, txo.amount FROM txo
|
SELECT tx.txid, txo.txoid, tx.raw, tx.height, txo.position as nout, tx.is_verified, txo.amount FROM txo
|
||||||
INNER JOIN account_address USING (address)
|
INNER JOIN account_address USING (address)
|
||||||
LEFT JOIN txi USING (txoid)
|
LEFT JOIN txi USING (txoid)
|
||||||
|
|
|
@ -21,6 +21,7 @@ disable=
|
||||||
c-extension-no-member,
|
c-extension-no-member,
|
||||||
fixme,
|
fixme,
|
||||||
broad-except,
|
broad-except,
|
||||||
|
raise-missing-from,
|
||||||
no-else-return,
|
no-else-return,
|
||||||
cyclic-import,
|
cyclic-import,
|
||||||
missing-docstring,
|
missing-docstring,
|
||||||
|
@ -37,5 +38,6 @@ disable=
|
||||||
too-many-public-methods,
|
too-many-public-methods,
|
||||||
too-many-return-statements,
|
too-many-return-statements,
|
||||||
too-many-instance-attributes,
|
too-many-instance-attributes,
|
||||||
|
unspecified-encoding,
|
||||||
protected-access,
|
protected-access,
|
||||||
unused-argument
|
unused-argument
|
||||||
|
|
Loading…
Reference in a new issue