Fix spelling

This commit is contained in:
Hugo 2018-10-18 14:40:37 +03:00
parent 67a15ea788
commit 3f704be85c
26 changed files with 35 additions and 35 deletions

View file

@ -62,7 +62,7 @@ confidence=
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes

View file

@ -444,7 +444,7 @@ most commands.
* Fixed https://github.com/lbryio/lbry/issues/923
* Fixed concurrent reflects opening too many files
* Fixed cases when reflecting would fail on error conditions
* Fixed deadlocks from occuring during blob writes
* Fixed deadlocks from occurring during blob writes
* Fixed and updated`lbrynet.tests.dht`
* Fixed redundant dht id
* Fixed dht `ping` method
@ -831,7 +831,7 @@ most commands.
* Removed check_pending logic from Daemon
* Switched to txrequests so requests can use twisted event loop
* Renamed API command file_seed to file_set_status
* Dont add expected payment to wallet when payment rate is 0
* Don't add expected payment to wallet when payment rate is 0
### Fixed
* Fix restart procedure in DaemonControl
* Create download directory if it doesn't exist
@ -862,7 +862,7 @@ most commands.
* Added string comparison to ClaimOutpoint (needed to look things up by outpoint)
* Remove unused API commands from daemon
* Fix file filter `outpoint`
* Made dictionary key names in API commmand outputs to be more consistent
* Made dictionary key names in API command outputs to be more consistent
### Added
* Add file filters: `claim_id`, `outpoint`, and `rowid`
* Make loggly logs less verbose
@ -914,7 +914,7 @@ most commands.
* Include download url in version check
### Fixed
* add misssing traceback to logging
* add missing traceback to logging
## [0.8.3] - 2017-02-15
### Fixed

View file

@ -1193,7 +1193,7 @@
{
"name": "claim_address",
"type": "str",
"description": "address where the claim is sent to, if not specified new address wil automatically be created",
"description": "address where the claim is sent to, if not specified new address will automatically be created",
"is_required": false
}
],

View file

@ -1903,7 +1903,7 @@ Options:
publishing to a channel where only the certificate
private key is in the wallet.
--claim_address=<claim_address> : (str) address where the claim is sent to, if not specified
new address wil automatically be created
new address will automatically be created
Returns:
(dict) Dictionary containing result of the claim

View file

@ -1743,7 +1743,7 @@ Args:
publishing to a channel where only the certificate
private key is in the wallet.
'claim_address' : (str) address where the claim is sent to, if not specified
new address wil automatically be created
new address will automatically be created
Returns:
(dict) Dictionary containing result of the claim

File diff suppressed because one or more lines are too long

View file

@ -52,7 +52,7 @@ class BlobFile:
def open_for_writing(self, peer):
"""
open a blob file to be written by peer, supports concurrent
writers, as long as they are from differnt peers.
writers, as long as they are from different peers.
returns tuple of (writer, finished_deferred)
@ -115,7 +115,7 @@ class BlobFile:
def verified(self):
"""
Protect verified from being modified by other classes.
verified is True if a write to a blob has completed succesfully,
verified is True if a write to a blob has completed successfully,
or a blob has been read to have the same length as specified
in init
"""

View file

@ -1,2 +1,2 @@
# dont touch this. CI server changes this during build/deployment
# don't touch this. CI server changes this during build/deployment
BUILD = "dev"

View file

@ -11,7 +11,7 @@ class Peer:
self.attempt_connection_at = None
# Number of times this Peer has been reported to be down, resets to 0 when it is up
self.down_count = 0
# Number of succesful connections (with full protocol completion) to this peer
# Number of successful connections (with full protocol completion) to this peer
self.success_count = 0
self.score = 0
self.stats = defaultdict(float) # {string stat_type, float count}

View file

@ -353,7 +353,7 @@ class AvailabilityRequest(RequestHelper):
# save available blobs
blob_hashes = response_dict['available_blobs']
if not blob_hashes:
# should not send any more requests as it doesnt have any blob we need
# should not send any more requests as it doesn't have any blob we need
self.update_local_score(-10.0)
return True
for blob_hash in blob_hashes:

View file

@ -246,7 +246,7 @@ def configure_twisted():
class LoggerNameFilter:
"""Filter a log record based on its name.
Allows all info level and higher records to pass thru.
Allows all info level and higher records to pass through.
Debug records pass if the log record name (or a parent) match
the input list of logger names.
"""

View file

@ -161,7 +161,7 @@ class DeferredLockContextManager:
self._lock = lock
def __enter__(self):
yield self._lock.aquire()
yield self._lock.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
yield self._lock.release()

View file

@ -53,10 +53,10 @@ class StreamBlobDecryptor:
def decrypt(self, write_func):
"""
Decrypt blob and write its content useing write_func
Decrypt blob and write its content using write_func
write_func - function that takes decrypted string as
arugment and writes it somewhere
argument and writes it somewhere
Returns:

View file

@ -69,7 +69,7 @@ class Logger(logging.Logger):
Returns: a function that takes the following arguments:
err: twisted.python.failure.Failure
msg: the message to log, using normal logging string iterpolation.
msg_args: the values to subtitute into `msg`
msg_args: the values to substitute into `msg`
msg_kwargs: set `level` to change from the default ERROR severity. Other
keywoards are treated as normal log kwargs.
"""

View file

@ -59,7 +59,7 @@ class IRoutingTable(Interface):
@param count: the amount of contacts to return
@type count: int
@param _rpcNodeID: Used during RPC, this is be the sender's Node ID
Whatever ID is passed in the paramater will get
Whatever ID is passed in the parameter will get
excluded from the list of returned contacts.
@type _rpcNodeID: str

View file

@ -621,7 +621,7 @@ class Node(MockKademliaHelper):
# # Update the "last accessed" timestamp for the appropriate k-bucket
# self._routingTable.touchKBucket(key)
if len(shortlist) == 0:
log.warning("This node doesnt know any other nodes")
log.warning("This node doesn't know any other nodes")
# This node doesn't know of any other nodes
fakeDf = defer.Deferred()
fakeDf.callback([])

View file

@ -311,7 +311,7 @@ class KademliaProtocol(protocol.DatagramProtocol):
If the data is spread over multiple UDP datagrams, the packets have the
following structure::
| | | | | |||||||||||| 0x00 |
|Transmision|Total number|Sequence number| RPC ID |Header end|
|Transmission|Total number|Sequence number| RPC ID |Header end|
| type ID | of packets |of this packet | | indicator|
| (1 byte) | (2 bytes) | (2 bytes) |(20 bytes)| (1 byte) |
| | | | | |||||||||||| |
@ -357,7 +357,7 @@ class KademliaProtocol(protocol.DatagramProtocol):
except OSError as err:
if err.errno == errno.EWOULDBLOCK:
# i'm scared this may swallow important errors, but i get a million of these
# on Linux and it doesnt seem to affect anything -grin
# on Linux and it doesn't seem to affect anything -grin
log.warning("Can't send data to dht: EWOULDBLOCK")
elif err.errno == errno.ENETUNREACH:
# this should probably try to retransmit when the network connection is back

View file

@ -157,7 +157,7 @@ class TreeRoutingTable:
@param count: the amount of contacts to return, default of k (8)
@type count: int
@param sender_node_id: Used during RPC, this is be the sender's Node ID
Whatever ID is passed in the paramater will get
Whatever ID is passed in the parameter will get
excluded from the list of returned contacts.
@type sender_node_id: str

View file

@ -118,7 +118,7 @@ class ReflectorServer(Protocol):
if self.receiving_blob:
self.blob_writer.write(data)
else:
log.debug('Not yet recieving blob, data needs further processing')
log.debug('Not yet receiving blob, data needs further processing')
self.request_buff += data
msg, extra_data = self._get_valid_response(self.request_buff)
if msg is not None:

View file

@ -83,7 +83,7 @@ def download_it(peer, timeout, blob_hash):
if info:
break
# there's some kind of race condition where it sometimes doesnt write the blob to disk in time
# there's some kind of race condition where it sometimes doesn't write the blob to disk in time
time.sleep(0.1)
if info is not None:

View file

@ -57,7 +57,7 @@ class MockUDPTransport(object):
dest = MockNetwork.peers[address][0]
debug_kademlia_packet(data, (self.address, self.port), address, self._node)
dest.datagramReceived(data, (self.address, self.port))
else: # the node is sending to an address that doesnt currently exist, act like it never arrived
else: # the node is sending to an address that doesn't currently exist, act like it never arrived
pass

View file

@ -31,7 +31,7 @@ class TestStoreExpiration(TestKademliaBase):
announce_d = announcing_node.announceHaveBlob(blob_hash)
self.pump_clock(5+1)
storing_node_ids = yield announce_d
self.assertEqual(len(storing_node_ids), 0) # cant store, wrong tokens, but they get nullified
self.assertEqual(len(storing_node_ids), 0) # can't store, wrong tokens, but they get nullified
announce_d = announcing_node.announceHaveBlob(blob_hash)
self.pump_clock(5+1)

View file

@ -33,7 +33,7 @@ def init_conf_windows(settings={}):
"""
There is no fork on windows, so imports
are freshly initialized in new processes.
So conf needs to be intialized for new processes
So conf needs to be initialized for new processes
"""
if os.name == 'nt':
original_settings = conf.settings

View file

@ -130,7 +130,7 @@ class TestIntegrationConnectionManager(TestCase):
self.server_port = None
def _init_connection_manager(self, seek_head_blob_first=False):
# this import is requierd here so utils.call_later is replaced by self.clock.callLater
# this import is required here so utils.call_later is replaced by self.clock.callLater
from lbrynet.core.client.ConnectionManager import ConnectionManager
self.connection_manager = ConnectionManager(self.downloader, self.rate_limiter,
[self.primary_request_creator], [])

View file

@ -128,7 +128,7 @@ class BlobManagerTest(unittest.TestCase):
count = yield self.bm.count_should_announce_blobs()
self.assertEqual(1, count)
# set should annouce to False
# set should announce to False
yield self.bm.set_should_announce(blob_hash, should_announce=False)
out = yield self.bm.get_should_announce(blob_hash)
self.assertFalse(out)

View file

@ -92,7 +92,7 @@ class GetStreamTests(unittest.TestCase):
@defer.inlineCallbacks
def test_init_exception(self):
"""
test that if initialization would fail, by giving it invaild
test that if initialization would fail, by giving it invalid
stream_info, that an exception is thrown
"""