Fix spelling

This commit is contained in:
Hugo 2019-10-02 21:04:30 +03:00 committed by Lex Berezhny
parent 3f89ed4579
commit 3a3a9b5f4e
22 changed files with 32 additions and 32 deletions

View file

@ -1694,7 +1694,7 @@
{
"name": "support_amount",
"type": "int",
"description": "limit by supports and tips recieved (supports equality constraints)",
"description": "limit by supports and tips received (supports equality constraints)",
"is_required": false
},
{
@ -1992,7 +1992,7 @@
{
"name": "visible",
"type": "bool",
"description": "Select only Visisble Comments",
"description": "Select only Visible Comments",
"is_required": false
},
{
@ -2189,7 +2189,7 @@
{
"name": "comparison",
"type": "str",
"description": "logical comparision, (eq | ne | g | ge | l | le)",
"description": "logical comparison, (eq | ne | g | ge | l | le)",
"is_required": false
}
],

View file

@ -67,7 +67,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.blob.set_length(blob_response.length)
elif blob_response and not blob_response.error and self.blob.blob_hash != blob_response.blob_hash:
# the server started sending a blob we didn't request
log.warning("%s started sending blob we didnt request %s instead of %s", self.peer_address,
log.warning("%s started sending blob we didn't request %s instead of %s", self.peer_address,
blob_response.blob_hash, self.blob.blob_hash)
return
if response.responses:
@ -139,7 +139,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
log.warning("data rate rejected by %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
if not blob_response or blob_response.error:
log.warning("blob cant be downloaded from %s:%i", self.peer_address, self.peer_port)
log.warning("blob can't be downloaded from %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
if not blob_response.error and blob_response.blob_hash != self.blob.blob_hash:
log.warning("incoming blob hash mismatch from %s:%i", self.peer_address, self.peer_port)

View file

@ -36,7 +36,7 @@ class Node:
total_peers: typing.List['KademliaPeer'] = []
# add all peers in the routing table
total_peers.extend(self.protocol.routing_table.get_peers())
# add all the peers who have announed blobs to us
# add all the peers who have announced blobs to us
total_peers.extend(self.protocol.data_store.get_storing_contacts())
# get ids falling in the midpoint of each bucket that hasn't been recently updated

View file

@ -1726,7 +1726,7 @@ class Daemon(metaclass=JSONRPCServerType):
--blobs_in_stream<blobs_in_stream> : (int) get file with matching blobs in stream
--blobs_remaining=<blobs_remaining> : (int) amount of remaining blobs to download
--sort=<sort_by> : (str) field to sort by (one of the above filter fields)
--comparison=<comparison> : (str) logical comparision, (eq | ne | g | ge | l | le)
--comparison=<comparison> : (str) logical comparison, (eq | ne | g | ge | l | le)
Returns: {List[File]}
"""
@ -1975,7 +1975,7 @@ class Daemon(metaclass=JSONRPCServerType):
a release time the publish time is used instead
(supports equality constraints)
--amount=<amount> : (int) limit by claim value (supports equality constraints)
--support_amount=<support_amount>: (int) limit by supports and tips recieved (supports
--support_amount=<support_amount>: (int) limit by supports and tips received (supports
equality constraints)
--effective_amount=<effective_amount>: (int) limit by total value (initial claim value plus
all tips and supports received), this amount is
@ -3695,7 +3695,7 @@ class Daemon(metaclass=JSONRPCServerType):
--is_channel_signature_valid : (bool) Only include comments with valid signatures.
[Warning: Paginated total size will not change, even
if list reduces]
--visible : (bool) Select only Visisble Comments
--visible : (bool) Select only Visible Comments
--hidden : (bool) Select only Hidden Comments
Returns:

View file

@ -442,7 +442,7 @@ class StreamManager:
except asyncio.TimeoutError:
error = DownloadDataTimeout(stream.sd_hash)
raise error
except Exception as err: # forgive data timeout, dont delete stream
except Exception as err: # forgive data timeout, don't delete stream
error = err
raise
finally:

View file

@ -79,7 +79,7 @@ class LBC(Coin):
@classmethod
def address_from_script(cls, script):
'''Given a pk_script, return the adddress it pays to, or None.'''
'''Given a pk_script, return the address it pays to, or None.'''
return ScriptPubKey.pay_to(cls.address_handlers, script)
@classmethod

View file

@ -70,7 +70,7 @@ class DHTIntegrationTest(AsyncioTestCase):
await asyncio.sleep(.1)
timeout -= 1
if not timeout:
self.fail("node didnt join back after 2 seconds")
self.fail("node didn't join back after 2 seconds")
async def test_announce_no_peers(self):
await self.setup_network(1)
@ -100,7 +100,7 @@ class DHTIntegrationTest(AsyncioTestCase):
await self.setup_network(2, seed_nodes=2)
node1, node2 = self.nodes
node2.stop()
# forcefully make it a bad peer but dont remove it from routing table
# forcefully make it a bad peer but don't remove it from routing table
address, port, node_id = node2.protocol.external_ip, node2.protocol.udp_port, node2.protocol.node_id
peer = KademliaPeer(self.loop, address, node_id, port)
self.assertTrue(node1.protocol.peer_manager.peer_is_good(peer))

View file

@ -98,7 +98,7 @@ class FileCommands(CommandTestCase):
resp = await self.daemon.jsonrpc_get('lbry://foo', timeout=2, save_file=True)
self.assertIn('error', resp)
self.assertEqual('Failed to download data blobs for sd hash %s within timeout' % sd_hash, resp['error'])
self.assertTrue(await self.daemon.jsonrpc_file_delete(claim_name='foo'), "data timeout didnt create a file")
self.assertTrue(await self.daemon.jsonrpc_file_delete(claim_name='foo'), "data timeout didn't create a file")
await self.server.blob_manager.delete_blobs([sd_hash])
resp = await self.daemon.jsonrpc_get('lbry://foo', timeout=2, save_file=True)
self.assertIn('error', resp)
@ -118,7 +118,7 @@ class FileCommands(CommandTestCase):
handle.write(b'some other stuff was there instead')
self.daemon.stream_manager.stop()
await self.daemon.stream_manager.start()
await asyncio.wait_for(self.wait_files_to_complete(), timeout=5) # if this hangs, file didnt get set completed
await asyncio.wait_for(self.wait_files_to_complete(), timeout=5) # if this hangs, file didn't get set completed
# check that internal state got through up to the file list API
stream = self.daemon.stream_manager.get_stream_by_stream_hash(file_info['stream_hash'])
file_info = self.sout(self.daemon.jsonrpc_file_list()[0])

View file

@ -178,7 +178,7 @@ class ResolveCommand(BaseResolveTestCase):
channel = (await self.channel_create('@abc', '1.0'))['outputs'][0]
orphan_claim_id = self.get_claim_id(orphan_claim)
# Original channel doesnt exists anymore, so the signature is invalid. For invalid signatures, resolution is
# Original channel doesn't exists anymore, so the signature is invalid. For invalid signatures, resolution is
# only possible outside a channel
response = await self.resolve('lbry://@abc/on-channel-claim')
self.assertEqual(response, {

View file

@ -413,7 +413,7 @@ class RangeRequestsLRUCache(CommandTestCase):
self.server.stop_server()
# running with cache size 0 gets through without errors without
# this since the server doesnt stop immediately
# this since the server doesn't stop immediately
await asyncio.sleep(1, loop=self.loop)
await self._request_stream()

View file

@ -55,7 +55,7 @@ class TestHeadersComponent(CommandTestCase):
async def test_cant_reach_host(self):
HeadersComponent.HEADERS_URL = 'notthere/'
os.unlink(self.headers_component.headers.path)
# test is that this doesnt raise
# test is that this doesn't raise
await self.headers_component.start()
self.assertTrue(self.component_manager.get_components_status()['blockchain_headers'])
self.assertEqual(await self.headers_component.get_status(), {})

View file

@ -126,5 +126,5 @@ class TestRecoverOldStreamDescriptors(AsyncioTestCase):
loop, tmp_dir, blob
)
self.assertFalse(blob.file_exists)
# fixme: this is an emergency PR, plase move this to blob_file tests later
# fixme: this is an emergency PR, please move this to blob_file tests later
self.assertIsNone(blob.length)

View file

@ -100,7 +100,7 @@ class ReconnectTests(IntegrationTestCase):
# * goes to pick some water outside... * time passes by and another donation comes in
sendtxid = await self.blockchain.send_to_address(address1, 42)
await self.blockchain.generate(1)
# (this is just so the test doesnt hang forever if it doesnt reconnect)
# (this is just so the test doesn't hang forever if it doesn't reconnect)
if not self.ledger.network.is_connected:
await asyncio.wait_for(self.ledger.network.on_connected.first, timeout=1.0)
# omg, the burned cable still works! torba is fire proof!

View file

@ -544,7 +544,7 @@ class BaseLedger(metaclass=LedgerRegistry):
return None
def broadcast(self, tx):
# broadcast cant be a retriable call yet
# broadcast can't be a retriable call yet
return self.network.broadcast(hexlify(tx.raw).decode())
async def wait(self, tx: basetransaction.BaseTransaction, height=-1, timeout=None):

View file

@ -73,7 +73,7 @@ class ClientSession(BaseClientSession):
return reply
except (RPCError, ProtocolError) as e:
if str(e).find('.*no such .*transaction.*'):
# shouldnt the server return none instead?
# shouldn't the server return none instead?
return None
log.warning("Wallet server (%s:%i) returned an error. Code: %s Message: %s",
*self.server, *e.args)
@ -256,7 +256,7 @@ class BaseNetwork:
try:
return await self.rpc('blockchain.address.subscribe', [address], True)
except asyncio.TimeoutError:
# abort and cancel, we cant lose a subscription, it will happen again on reconnect
# abort and cancel, we can't lose a subscription, it will happen again on reconnect
if self.client:
self.client.abort()
raise asyncio.CancelledError()

View file

@ -37,7 +37,7 @@ class FramerBase(object):
"""Abstract base class for a framer.
A framer breaks an incoming byte stream into protocol messages,
buffering if necesary. It also frames outgoing messages into
buffering if necessary. It also frames outgoing messages into
a byte stream.
"""

View file

@ -513,7 +513,7 @@ class JSONRPCv2(JSONRPC):
class JSONRPCLoose(JSONRPC):
"""A relaxed versin of JSON RPC."""
"""A relaxed version of JSON RPC."""
# Don't be so loose we accept any old message ID
_message_id = JSONRPCv2._message_id

View file

@ -93,7 +93,7 @@ class SessionBase(asyncio.Protocol):
# Set when a connection is made
self._address = None
self._proxy_address = None
# For logger.debug messsages
# For logger.debug messages
self.verbosity = 0
# Cleared when the send socket is full
self._can_send = Event()

View file

@ -117,7 +117,7 @@ class Daemon:
async def _send(self, payload, processor):
"""Send a payload to be converted to JSON.
Handles temporary connection issues. Daemon reponse errors
Handles temporary connection issues. Daemon response errors
are raise through DaemonError.
"""
def log_error(error):

View file

@ -228,7 +228,7 @@ class History:
def _compact_hashX(self, hashX, hist_map, hist_list,
write_items, keys_to_delete):
"""Compres history for a hashX. hist_list is an ordered list of
"""Compress history for a hashX. hist_list is an ordered list of
the histories to be compressed."""
# History entries (tx numbers) are 4 bytes each. Distribute
# over rows of up to 50KB in size. A fixed row size means

View file

@ -385,7 +385,7 @@ class SessionManager:
return "peer '{}' added".format(real_name)
async def rpc_disconnect(self, session_ids):
"""Disconnect sesssions.
"""Disconnect sessions.
session_ids: array of session IDs
"""
@ -397,7 +397,7 @@ class SessionManager:
return await self._for_each_session(session_ids, close)
async def rpc_log(self, session_ids):
"""Toggle logging of sesssions.
"""Toggle logging of sessions.
session_ids: array of session IDs
"""
@ -414,7 +414,7 @@ class SessionManager:
try:
self.daemon.set_url(daemon_url)
except Exception as e:
raise RPCError(BAD_REQUEST, f'an error occured: {e!r}')
raise RPCError(BAD_REQUEST, f'an error occurred: {e!r}')
return f'now using daemon at {self.daemon.logged_url()}'
async def rpc_stop(self):

View file

@ -457,7 +457,7 @@ class TxInputTokenPay(TxInput):
self.script[1] == self.OP_ANON_MARKER)
def is_generation(self):
# Transactions comming in from stealth addresses are seen by
# Transactions coming in from stealth addresses are seen by
# the blockchain as newly minted coins. The reverse, where coins
# are sent TO a stealth address, are seen by the blockchain as
# a coin burn.