From bce41ddab47063a7924223d1801842ad9f6b1aa4 Mon Sep 17 00:00:00 2001 From: Kay Kurokawa Date: Fri, 2 Dec 2016 12:57:18 -0500 Subject: [PATCH 01/26] Do not return 'success' and 'reason' as outputs in claim commands We throw an Exception instead --- lbrynet/core/Wallet.py | 24 ++++++++++++++++++++++-- lbrynet/lbrynet_daemon/Daemon.py | 6 ------ 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/lbrynet/core/Wallet.py b/lbrynet/core/Wallet.py index 17b9026fa..5d572a4fd 100644 --- a/lbrynet/core/Wallet.py +++ b/lbrynet/core/Wallet.py @@ -463,11 +463,13 @@ class Wallet(object): meta_for_return[k] = new_metadata[k] return defer.succeed(Metadata(meta_for_return)) + def claim_name(self, name, bid, m): def _save_metadata(claim_out, metadata): if not claim_out['success']: msg = 'Claim to name {} failed: {}'.format(name, claim_out['reason']) raise Exception(msg) + claim_out.pop('success') claim_outpoint = ClaimOutpoint(claim_out['txid'], claim_out['nout']) log.debug("Saving metadata for claim %s %d" % (claim_outpoint['txid'], claim_outpoint['nout'])) d = self._save_name_metadata(name, claim_outpoint, metadata['sources']['lbry_sd_hash']) @@ -494,11 +496,29 @@ class Wallet(object): return d def abandon_claim(self, txid, nout): + def _parse_abandon_claim_out(claim_out): + if not claim_out['success']: + msg = 'Abandon of {}:{} failed: {}'.format(txid, nout, claim_out['resason']) + raise Exception(msg) + claim_out.pop('success') + return defer.succeed(claim_out) + claim_outpoint = ClaimOutpoint(txid, nout) - return self._abandon_claim(claim_outpoint) + d = self._abandon_claim(claim_outpoint) + d.addCallback(lambda claim_out: _parse_abandon_claim_out(claim_out)) + return d def support_claim(self, name, claim_id, amount): - return self._support_claim(name, claim_id, amount) + def _parse_support_claim_out(claim_out): + if not claim_out['success']: + msg = 'Support of {}:{} failed: {}'.format(name, claim_id, claim_out['reason']) + raise Exception(msg) + claim_out.pop('success') + return defer.succeed(claim_out) + + d = self._support_claim(name, claim_id, amount) + d.addCallback(lambda claim_out: _parse_support_claim_out(claim_out)) + return d def get_tx(self, txid): d = self._get_raw_tx(txid) diff --git a/lbrynet/lbrynet_daemon/Daemon.py b/lbrynet/lbrynet_daemon/Daemon.py index 826ebad49..ba8385a5c 100644 --- a/lbrynet/lbrynet_daemon/Daemon.py +++ b/lbrynet/lbrynet_daemon/Daemon.py @@ -1697,8 +1697,6 @@ class Daemon(AuthJSONRPCServer): 'metadata': metadata dictionary optional 'fee' Returns: - 'success' : True if claim was succesful , False otherwise - 'reason' : if not succesful, give reason 'txid' : txid of resulting transaction if succesful 'nout' : nout of the resulting support claim if succesful 'fee' : fee paid for the claim transaction if succesful @@ -1773,8 +1771,6 @@ class Daemon(AuthJSONRPCServer): 'txid': txid of claim, string 'nout': nout of claim, integer Return: - success : True if succesful , False otherwise - reason : if not succesful, give reason txid : txid of resulting transaction if succesful fee : fee paid for the transaction if succesful """ @@ -1818,8 +1814,6 @@ class Daemon(AuthJSONRPCServer): 'claim_id': claim id of claim to support 'amount': amount to support by Return: - success : True if succesful , False otherwise - reason : if not succesful, give reason txid : txid of resulting transaction if succesful nout : nout of the resulting support claim if succesful fee : fee paid for the transaction if succesful From c278972f26b3e850f8c97d658b7328da63ce25c6 Mon Sep 17 00:00:00 2001 From: Kay Kurokawa Date: Wed, 7 Dec 2016 09:48:37 -0500 Subject: [PATCH 02/26] adding unit tests for Wallet.py --- tests/unit/core/test_Wallet.py | 136 +++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 tests/unit/core/test_Wallet.py diff --git a/tests/unit/core/test_Wallet.py b/tests/unit/core/test_Wallet.py new file mode 100644 index 000000000..17a203246 --- /dev/null +++ b/tests/unit/core/test_Wallet.py @@ -0,0 +1,136 @@ +from twisted.trial import unittest + +from twisted.internet import threads, defer +from lbrynet.core.Wallet import Wallet + +test_metadata = { +'license': 'NASA', +'fee': {'USD': {'amount': 0.01, 'address': 'baBYSK7CqGSn5KrEmNmmQwAhBSFgo6v47z'}}, +'ver': '0.0.3', +'description': 'test', +'language': 'en', +'author': 'test', +'title': 'test', +'sources': { + 'lbry_sd_hash': '8655f713819344980a9a0d67b198344e2c462c90f813e86f0c63789ab0868031f25c54d0bb31af6658e997e2041806eb'}, +'nsfw': False, +'content_type': 'video/mp4', +'thumbnail': 'test' +} + + +class MocLbryumWallet(Wallet): + def __init__(self): + pass + def get_name_claims(self): + return threads.deferToThread(lambda: []) + + def _save_name_metadata(self, name, claim_outpoint, sd_hash): + return defer.succeed(True) + + +class WalletTest(unittest.TestCase): + + def _check_exception(self, d): + def check(err): + with self.assertRaises(Exception): + err.raiseException() + d.addCallbacks(lambda _: self.assertTrue(False), lambda err: check(err)) + + def test_failed_send_name_claim(self): + def not_enough_funds_send_name_claim(self, name, val, amount): + claim_out = {'success':False, 'reason':'Not enough funds'} + return claim_out + MocLbryumWallet._send_name_claim = not_enough_funds_send_name_claim + wallet = MocLbryumWallet() + d = wallet.claim_name('test', 1, test_metadata) + self._check_exception(d) + return d + + def test_successful_send_name_claim(self): + test_claim_out = { + "claimid": "f43dc06256a69988bdbea09a58c80493ba15dcfa", + "fee": "0.00012", + "nout": 0, + "success": True, + "txid": "6f8180002ef4d21f5b09ca7d9648a54d213c666daf8639dc283e2fd47450269e" + } + + def check_out(claim_out): + self.assertTrue('success' not in claim_out) + self.assertEqual(claim_out['claimid'], test_claim_out['claimid']) + self.assertEqual(claim_out['fee'], test_claim_out['fee']) + self.assertEqual(claim_out['nout'], test_claim_out['nout']) + self.assertEqual(claim_out['txid'], test_claim_out['txid']) + + def success_send_name_claim(self, name, val, amount): + return test_claim_out + + MocLbryumWallet._send_name_claim = success_send_name_claim + wallet = MocLbryumWallet() + d = wallet.claim_name('test', 1, test_metadata) + d.addCallback(lambda claim_out: check_out(claim_out)) + return d + + def test_failed_support(self): + def failed_support_claim(self, name, claim_id, amount): + claim_out = {'success':False, 'reason':'Not enough funds'} + return threads.deferToThread(lambda: claim_out) + MocLbryumWallet._support_claim = failed_support_claim + wallet = MocLbryumWallet() + d = wallet.support_claim('test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 1) + self._check_exception(d) + return d + + def test_succesful_support(self): + test_support_out = { + "fee": "0.000129", + "nout": 0, + "success": True, + "txid": "11030a76521e5f552ca87ad70765d0cc52e6ea4c0dc0063335e6cf2a9a85085f" + } + + def check_out(claim_out): + self.assertTrue('success' not in claim_out) + self.assertEqual(claim_out['fee'], test_support_out['fee']) + self.assertEqual(claim_out['nout'], test_support_out['nout']) + self.assertEqual(claim_out['txid'], test_support_out['txid']) + + def success_support_claim(self, name, val, amount): + return threads.deferToThread(lambda: test_support_out) + MocLbryumWallet._support_claim = success_support_claim + wallet = MocLbryumWallet() + d = wallet.support_claim('test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 1) + d.addCallback(lambda claim_out: check_out(claim_out)) + return d + + def test_failed_abandon(self): + def failed_abandon_claim(self, claim_outpoint): + claim_out = {'success':False, 'reason':'Not enough funds'} + return threads.deferToThread(lambda: claim_out) + MocLbryumWallet._abandon_claim = failed_abandon_claim + wallet = MocLbryumWallet() + d = wallet.abandon_claim("11030a76521e5f552ca87ad70765d0cc52e6ea4c0dc0063335e6cf2a9a85085f", 1) + self._check_exception(d) + return d + + def test_successful_abandon(self): + test_abandon_out = { + "fee": "0.000096", + "success": True, + "txid": "0578c161ad8d36a7580c557d7444f967ea7f988e194c20d0e3c42c3cabf110dd" + } + + def check_out(claim_out): + self.assertTrue('success' not in claim_out) + self.assertEqual(claim_out['fee'], test_abandon_out['fee']) + self.assertEqual(claim_out['txid'], test_abandon_out['txid']) + + def success_abandon_claim(self, claim_outpoint): + return threads.deferToThread(lambda: test_abandon_out) + + MocLbryumWallet._abandon_claim = success_abandon_claim + wallet = MocLbryumWallet() + d = wallet.abandon_claim("0578c161ad8d36a7580c557d7444f967ea7f988e194c20d0e3c42c3cabf110dd", 1) + d.addCallback(lambda claim_out: check_out(claim_out)) + return d From fe4ea9b33a6bc30f06a23d614addd8940f6917bf Mon Sep 17 00:00:00 2001 From: Kay Kurokawa Date: Wed, 14 Dec 2016 11:53:41 -0500 Subject: [PATCH 03/26] use assertFailure, move around assertEqual arguments to standard locations --- tests/unit/core/test_Wallet.py | 42 +++++++++++++++------------------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/tests/unit/core/test_Wallet.py b/tests/unit/core/test_Wallet.py index 17a203246..0af068585 100644 --- a/tests/unit/core/test_Wallet.py +++ b/tests/unit/core/test_Wallet.py @@ -31,12 +31,6 @@ class MocLbryumWallet(Wallet): class WalletTest(unittest.TestCase): - def _check_exception(self, d): - def check(err): - with self.assertRaises(Exception): - err.raiseException() - d.addCallbacks(lambda _: self.assertTrue(False), lambda err: check(err)) - def test_failed_send_name_claim(self): def not_enough_funds_send_name_claim(self, name, val, amount): claim_out = {'success':False, 'reason':'Not enough funds'} @@ -44,11 +38,11 @@ class WalletTest(unittest.TestCase): MocLbryumWallet._send_name_claim = not_enough_funds_send_name_claim wallet = MocLbryumWallet() d = wallet.claim_name('test', 1, test_metadata) - self._check_exception(d) + self.assertFailure(d,Exception) return d def test_successful_send_name_claim(self): - test_claim_out = { + expected_claim_out = { "claimid": "f43dc06256a69988bdbea09a58c80493ba15dcfa", "fee": "0.00012", "nout": 0, @@ -58,13 +52,13 @@ class WalletTest(unittest.TestCase): def check_out(claim_out): self.assertTrue('success' not in claim_out) - self.assertEqual(claim_out['claimid'], test_claim_out['claimid']) - self.assertEqual(claim_out['fee'], test_claim_out['fee']) - self.assertEqual(claim_out['nout'], test_claim_out['nout']) - self.assertEqual(claim_out['txid'], test_claim_out['txid']) + self.assertEqual(expected_claim_out['claimid'], claim_out['claimid']) + self.assertEqual(expected_claim_out['fee'], claim_out['fee']) + self.assertEqual(expected_claim_out['nout'], claim_out['nout']) + self.assertEqual(expected_claim_out['txid'], claim_out['txid']) def success_send_name_claim(self, name, val, amount): - return test_claim_out + return expected_claim_out MocLbryumWallet._send_name_claim = success_send_name_claim wallet = MocLbryumWallet() @@ -79,11 +73,11 @@ class WalletTest(unittest.TestCase): MocLbryumWallet._support_claim = failed_support_claim wallet = MocLbryumWallet() d = wallet.support_claim('test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 1) - self._check_exception(d) + self.assertFailure(d,Exception) return d def test_succesful_support(self): - test_support_out = { + expected_support_out = { "fee": "0.000129", "nout": 0, "success": True, @@ -92,12 +86,12 @@ class WalletTest(unittest.TestCase): def check_out(claim_out): self.assertTrue('success' not in claim_out) - self.assertEqual(claim_out['fee'], test_support_out['fee']) - self.assertEqual(claim_out['nout'], test_support_out['nout']) - self.assertEqual(claim_out['txid'], test_support_out['txid']) + self.assertEqual(expected_support_out['fee'], claim_out['fee']) + self.assertEqual(expected_support_out['nout'], claim_out['nout']) + self.assertEqual(expected_support_out['txid'], claim_out['txid']) def success_support_claim(self, name, val, amount): - return threads.deferToThread(lambda: test_support_out) + return threads.deferToThread(lambda: expected_support_out) MocLbryumWallet._support_claim = success_support_claim wallet = MocLbryumWallet() d = wallet.support_claim('test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 1) @@ -111,11 +105,11 @@ class WalletTest(unittest.TestCase): MocLbryumWallet._abandon_claim = failed_abandon_claim wallet = MocLbryumWallet() d = wallet.abandon_claim("11030a76521e5f552ca87ad70765d0cc52e6ea4c0dc0063335e6cf2a9a85085f", 1) - self._check_exception(d) + self.assertFailure(d,Exception) return d def test_successful_abandon(self): - test_abandon_out = { + expected_abandon_out = { "fee": "0.000096", "success": True, "txid": "0578c161ad8d36a7580c557d7444f967ea7f988e194c20d0e3c42c3cabf110dd" @@ -123,11 +117,11 @@ class WalletTest(unittest.TestCase): def check_out(claim_out): self.assertTrue('success' not in claim_out) - self.assertEqual(claim_out['fee'], test_abandon_out['fee']) - self.assertEqual(claim_out['txid'], test_abandon_out['txid']) + self.assertEqual(expected_abandon_out['fee'], claim_out['fee']) + self.assertEqual(expected_abandon_out['txid'], claim_out['txid']) def success_abandon_claim(self, claim_outpoint): - return threads.deferToThread(lambda: test_abandon_out) + return threads.deferToThread(lambda: expected_abandon_out) MocLbryumWallet._abandon_claim = success_abandon_claim wallet = MocLbryumWallet() From f796f701f2956b75036722c956a054c4c65f08d7 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Sat, 10 Dec 2016 15:02:13 -0800 Subject: [PATCH 04/26] Immediately announce completed blobs It doesn't seem necessary to wait up to 60 seconds for a new blob to be announced to the dht. Immediately announce it and schedule the next announce time as usual. --- lbrynet/core/BlobManager.py | 9 ++++++--- lbrynet/core/server/DHTHashAnnouncer.py | 2 ++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/lbrynet/core/BlobManager.py b/lbrynet/core/BlobManager.py index ed9285ba2..4d9f390fd 100644 --- a/lbrynet/core/BlobManager.py +++ b/lbrynet/core/BlobManager.py @@ -120,6 +120,7 @@ class DiskBlobManager(BlobManager): return self.blob_creator_type(self, self.blob_dir) def _make_new_blob(self, blob_hash, upload_allowed, length=None): + log.debug('Making a new blob for %s', blob_hash) blob = self.blob_type(self.blob_dir, blob_hash, upload_allowed, length) self.blobs[blob_hash] = blob d = self._completed_blobs([blob_hash]) @@ -143,9 +144,11 @@ class DiskBlobManager(BlobManager): def blob_completed(self, blob, next_announce_time=None): if next_announce_time is None: - next_announce_time = time.time() - return self._add_completed_blob(blob.blob_hash, blob.length, - time.time(), next_announce_time) + next_announce_time = time.time() + self.hash_reannounce_time + d = self._add_completed_blob(blob.blob_hash, blob.length, + time.time(), next_announce_time) + d.addCallback(lambda _: self.hash_announcer.immediate_announce([blob.blob_hash])) + return d def completed_blobs(self, blobs_to_check): return self._completed_blobs(blobs_to_check) diff --git a/lbrynet/core/server/DHTHashAnnouncer.py b/lbrynet/core/server/DHTHashAnnouncer.py index eca877f73..8ce55ffd9 100644 --- a/lbrynet/core/server/DHTHashAnnouncer.py +++ b/lbrynet/core/server/DHTHashAnnouncer.py @@ -42,6 +42,7 @@ class DHTHashAnnouncer(object): return defer.succeed(False) def _announce_available_hashes(self): + log.debug('Announcing available hashes') ds = [] for supplier in self.suppliers: d = supplier.hashes_to_announce() @@ -62,6 +63,7 @@ class DHTHashAnnouncer(object): def announce(): if len(self.hash_queue): h, announce_deferred = self.hash_queue.popleft() + log.debug('Announcing blob %s to dht', h) d = self.dht_node.announceHaveBlob(binascii.unhexlify(h), self.peer_port) d.chainDeferred(announce_deferred) d.addBoth(lambda _: reactor.callLater(0, announce)) From 32fa2460ff17da9ad255482f26110c42484a8f2a Mon Sep 17 00:00:00 2001 From: Jeremy Kauffman Date: Tue, 13 Dec 2016 15:27:23 -0500 Subject: [PATCH 05/26] sort the help functions --- lbrynet/lbrynet_daemon/Daemon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lbrynet/lbrynet_daemon/Daemon.py b/lbrynet/lbrynet_daemon/Daemon.py index ba8385a5c..c4739e034 100644 --- a/lbrynet/lbrynet_daemon/Daemon.py +++ b/lbrynet/lbrynet_daemon/Daemon.py @@ -1353,7 +1353,7 @@ class Daemon(AuthJSONRPCServer): """ if not p: - return self._render_response(self.callable_methods.keys(), OK_CODE) + return self._render_response(sorted(self.callable_methods.keys()), OK_CODE) elif 'callable_during_start' in p.keys(): return self._render_response(self.allowed_during_startup, OK_CODE) elif 'function' in p.keys(): From 14ac2bac391d84ffc9d7b6bdb59ac5d54576d3a0 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Tue, 13 Dec 2016 17:08:29 -0600 Subject: [PATCH 06/26] pylint: fix trailing-whitespace --- lbrynet/conf.py | 6 +-- lbrynet/core/Wallet.py | 12 +++--- lbrynet/dht/COPYING | 4 +- lbrynet/dht/constants.py | 2 +- lbrynet/dht/contact.py | 14 +++---- lbrynet/dht/datastore.py | 2 +- lbrynet/dht/encoding.py | 34 ++++++++-------- lbrynet/dht/kbucket.py | 26 ++++++------ lbrynet/dht/msgformat.py | 20 +++++----- lbrynet/dht/msgtypes.py | 2 +- lbrynet/dht/node.py | 44 ++++++++++---------- lbrynet/dht/protocol.py | 2 +- lbrynet/dht/routingtable.py | 62 ++++++++++++++--------------- lbrynet/lbrynet_daemon/Daemon.py | 4 +- lbrynet/lbrynet_daemon/Publisher.py | 8 ++-- lbrynet/metadata/Metadata.py | 2 +- run_pylint.sh | 1 + 17 files changed, 123 insertions(+), 122 deletions(-) diff --git a/lbrynet/conf.py b/lbrynet/conf.py index 8444540d1..2806b25d5 100644 --- a/lbrynet/conf.py +++ b/lbrynet/conf.py @@ -214,7 +214,7 @@ class ApplicationSettings(Settings): self.BLOBFILES_DIR = "blobfiles" self.BLOB_SIZE = 2*MB self.LOG_FILE_NAME = "lbrynet.log" - self.LOG_POST_URL = "https://lbry.io/log-upload" + self.LOG_POST_URL = "https://lbry.io/log-upload" self.CRYPTSD_FILE_EXTENSION = ".cryptsd" self.API_ADDRESS = "lbryapi" self.ICON_PATH = "icons" if platform is WINDOWS else "app.icns" @@ -230,7 +230,7 @@ class ApplicationSettings(Settings): self.LOGGLY_TOKEN = 'LJEzATH4AzRgAwxjAP00LwZ2YGx3MwVgZTMuBQZ3MQuxLmOv' self.ANALYTICS_ENDPOINT = 'https://api.segment.io/v1' self.ANALYTICS_TOKEN = 'Ax5LZzR1o3q3Z3WjATASDwR5rKyHH0qOIRIbLmMXn2H=' - self.DB_REVISION_FILE_NAME = 'db_revision' + self.DB_REVISION_FILE_NAME = 'db_revision' Settings.__init__(self) @@ -299,7 +299,7 @@ class Config(DefaultSettings): return os.path.join(self.ensure_data_dir(), self.LOG_FILE_NAME) def get_db_revision_filename(self): - return os.path.join(self.ensure_data_dir(), self.DB_REVISION_FILE_NAME) + return os.path.join(self.ensure_data_dir(), self.DB_REVISION_FILE_NAME) def get_conf_filename(self): return get_settings_file_ext(self.ensure_data_dir()) diff --git a/lbrynet/core/Wallet.py b/lbrynet/core/Wallet.py index 5d572a4fd..479cd0626 100644 --- a/lbrynet/core/Wallet.py +++ b/lbrynet/core/Wallet.py @@ -361,7 +361,7 @@ class Wallet(object): except (TypeError, ValueError, ValidationError): return Failure(InvalidStreamInfoError(name, result['value'])) sd_hash = metadata['sources']['lbry_sd_hash'] - claim_outpoint = ClaimOutpoint(result['txid'], result['n']) + claim_outpoint = ClaimOutpoint(result['txid'], result['n']) d = self._save_name_metadata(name, claim_outpoint, sd_hash) d.addCallback(lambda _: self.get_claimid(name, result['txid'], result['n'])) d.addCallback(lambda cid: _log_success(cid)) @@ -382,7 +382,7 @@ class Wallet(object): d.addCallback(lambda claims: next(c for c in claims if c['name'] == name and c['nOut'] == claim_outpoint['nout'])) d.addCallback(lambda claim: self._update_claimid(claim['claimId'], name, ClaimOutpoint(txid, claim['nOut']))) return d - claim_outpoint = ClaimOutpoint(txid, nout) + claim_outpoint = ClaimOutpoint(txid, nout) d = self._get_claimid_for_tx(name, claim_outpoint) d.addCallback(_get_id_for_return) return d @@ -583,7 +583,7 @@ class Wallet(object): for claim in claims: if 'in claim trie' in claim: name_is_equal = 'name' in claim and str(claim['name']) == name - nout_is_equal = 'nOut' in claim and claim['nOut'] == claim_outpoint['nout'] + nout_is_equal = 'nOut' in claim and claim['nOut'] == claim_outpoint['nout'] if name_is_equal and nout_is_equal and 'value' in claim: try: value_dict = json.loads(claim['value']) @@ -682,7 +682,7 @@ class Wallet(object): d.addCallback( lambda _: self.db.runQuery("delete from name_metadata where name=? and txid=? and n=? and sd_hash=?", (name, claim_outpoint['txid'], UNSET_NOUT, sd_hash))) - + d.addCallback(lambda _: self.db.runQuery("insert into name_metadata values (?, ?, ?, ?)", (name, claim_outpoint['txid'], claim_outpoint['nout'], sd_hash))) return d @@ -698,7 +698,7 @@ class Wallet(object): d.addCallback( lambda _: self.db.runQuery("delete from claim_ids where claimId=? and name=? and txid=? and n=?", (claim_id, name, claim_outpoint['txid'], UNSET_NOUT))) - + d.addCallback(lambda r: self.db.runQuery("insert into claim_ids values (?, ?, ?, ?)", (claim_id, name, claim_outpoint['txid'], claim_outpoint['nout']))) d.addCallback(lambda _: claim_id) @@ -997,7 +997,7 @@ class LBRYumWallet(Wallet): def _send_name_claim_update(self, name, claim_id, claim_outpoint, value, amount): metadata = json.dumps(value) - log.debug("Update %s %d %f %s %s '%s'", claim_outpoint['txid'], claim_outpoint['nout'], + log.debug("Update %s %d %f %s %s '%s'", claim_outpoint['txid'], claim_outpoint['nout'], amount, name, claim_id, metadata) cmd = known_commands['update'] func = getattr(self.cmd_runner, cmd.name) diff --git a/lbrynet/dht/COPYING b/lbrynet/dht/COPYING index fc8a5de7e..cca7fc278 100644 --- a/lbrynet/dht/COPYING +++ b/lbrynet/dht/COPYING @@ -10,7 +10,7 @@ the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. - 0. Additional Definitions. + 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU @@ -111,7 +111,7 @@ the following: a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked - Version. + Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the diff --git a/lbrynet/dht/constants.py b/lbrynet/dht/constants.py index 853856a8a..d2e21bcae 100644 --- a/lbrynet/dht/constants.py +++ b/lbrynet/dht/constants.py @@ -10,7 +10,7 @@ """ This module defines the charaterizing constants of the Kademlia network C{checkRefreshInterval} and C{udpDatagramMaxSize} are implementation-specific -constants, and do not affect general Kademlia operation. +constants, and do not affect general Kademlia operation. """ ######### KADEMLIA CONSTANTS ########### diff --git a/lbrynet/dht/contact.py b/lbrynet/dht/contact.py index eac21518e..ae83f311b 100644 --- a/lbrynet/dht/contact.py +++ b/lbrynet/dht/contact.py @@ -10,7 +10,7 @@ class Contact(object): """ Encapsulation for remote contact - + This class contains information on a single remote contact, and also provides a direct RPC API to the remote node which it represents """ @@ -20,7 +20,7 @@ class Contact(object): self.port = udpPort self._networkProtocol = networkProtocol self.commTime = firstComm - + def __eq__(self, other): if isinstance(other, Contact): return self.id == other.id @@ -28,7 +28,7 @@ class Contact(object): return self.id == other else: return False - + def __ne__(self, other): if isinstance(other, Contact): return self.id != other.id @@ -41,20 +41,20 @@ class Contact(object): compact_ip = reduce( lambda buff, x: buff + bytearray([int(x)]), self.address.split('.'), bytearray()) return str(compact_ip) - + def __str__(self): return '<%s.%s object; IP address: %s, UDP port: %d>' % ( self.__module__, self.__class__.__name__, self.address, self.port) - + def __getattr__(self, name): """ This override allows the host node to call a method of the remote node (i.e. this contact) as if it was a local function. - + For instance, if C{remoteNode} is a instance of C{Contact}, the following will result in C{remoteNode}'s C{test()} method to be called with argument C{123}:: remoteNode.test(123) - + Such a RPC method call will return a Deferred, which will callback when the contact responds with the result (or an error occurs). This happens via this contact's C{_networkProtocol} object (i.e. the diff --git a/lbrynet/dht/datastore.py b/lbrynet/dht/datastore.py index e78e31107..8983f2bb6 100644 --- a/lbrynet/dht/datastore.py +++ b/lbrynet/dht/datastore.py @@ -16,7 +16,7 @@ import constants class DataStore(UserDict.DictMixin): """ Interface for classes implementing physical storage (for data published via the "STORE" RPC) for the Kademlia DHT - + @note: This provides an interface for a dict-like object """ def keys(self): diff --git a/lbrynet/dht/encoding.py b/lbrynet/dht/encoding.py index 304495200..2ef8d2e12 100644 --- a/lbrynet/dht/encoding.py +++ b/lbrynet/dht/encoding.py @@ -14,47 +14,47 @@ class DecodeError(Exception): class Encoding(object): """ Interface for RPC message encoders/decoders - + All encoding implementations used with this library should inherit and implement this. """ def encode(self, data): """ Encode the specified data - + @param data: The data to encode This method has to support encoding of the following types: C{str}, C{int} and C{long} Any additional data types may be supported as long as the implementing class's C{decode()} method can successfully decode them. - + @return: The encoded data @rtype: str """ def decode(self, data): """ Decode the specified data string - + @param data: The data (byte string) to decode. @type data: str - + @return: The decoded data (in its correct type) """ class Bencode(Encoding): """ Implementation of a Bencode-based algorithm (Bencode is the encoding algorithm used by Bittorrent). - + @note: This algorithm differs from the "official" Bencode algorithm in that it can encode/decode floating point values in addition to integers. """ - + def encode(self, data): """ Encoder implementation of the Bencode algorithm - + @param data: The data to encode @type data: int, long, tuple, list, dict or str - + @return: The encoded data @rtype: str """ @@ -76,7 +76,7 @@ class Bencode(Encoding): encodedDictItems += self.encode(data[key]) return 'd%se' % encodedDictItems elif type(data) == float: - # This (float data type) is a non-standard extension to the original Bencode algorithm + # This (float data type) is a non-standard extension to the original Bencode algorithm return 'f%fe' % data elif data == None: # This (None/NULL data type) is a non-standard extension @@ -85,16 +85,16 @@ class Bencode(Encoding): else: print data raise TypeError, "Cannot bencode '%s' object" % type(data) - + def decode(self, data): - """ Decoder implementation of the Bencode algorithm - + """ Decoder implementation of the Bencode algorithm + @param data: The encoded data @type data: str - + @note: This is a convenience wrapper for the recursive decoding algorithm, C{_decodeRecursive} - + @return: The decoded data, as a native Python type @rtype: int, list, dict or str """ @@ -104,11 +104,11 @@ class Bencode(Encoding): return self._decodeRecursive(data)[0] except ValueError as e: raise DecodeError, e.message - + @staticmethod def _decodeRecursive(data, startIndex=0): """ Actual implementation of the recursive Bencode algorithm - + Do not call this; use C{decode()} instead """ if data[startIndex] == 'i': diff --git a/lbrynet/dht/kbucket.py b/lbrynet/dht/kbucket.py index 58ceb8eed..2ccb088d2 100644 --- a/lbrynet/dht/kbucket.py +++ b/lbrynet/dht/kbucket.py @@ -31,11 +31,11 @@ class KBucket(object): def addContact(self, contact): """ Add contact to _contact list in the right order. This will move the contact to the end of the k-bucket if it is already present. - + @raise kademlia.kbucket.BucketFull: Raised when the bucket is full and the contact isn't in the bucket already - + @param contact: The contact to add @type contact: kademlia.contact.Contact """ @@ -57,7 +57,7 @@ class KBucket(object): def getContacts(self, count=-1, excludeContact=None): """ Returns a list containing up to the first count number of contacts - + @param count: The amount of contacts to return (if 0 or less, return all contacts) @type count: int @@ -65,12 +65,12 @@ class KBucket(object): the list of returned values, it will be discarded before returning. If a C{str} is passed as this argument, it must be the - contact's ID. + contact's ID. @type excludeContact: kademlia.contact.Contact or str - - + + @raise IndexError: If the number of requested contacts is too large - + @return: Return up to the first count number of contacts in a list If no contacts are present an empty is returned @rtype: list @@ -97,7 +97,7 @@ class KBucket(object): # enough contacts in list else: contactList = self._contacts[0:count] - + if excludeContact in contactList: contactList.remove(excludeContact) @@ -105,24 +105,24 @@ class KBucket(object): def removeContact(self, contact): """ Remove given contact from list - + @param contact: The contact to remove, or a string containing the contact's node ID @type contact: kademlia.contact.Contact or str - + @raise ValueError: The specified contact is not in this bucket """ self._contacts.remove(contact) - + def keyInRange(self, key): """ Tests whether the specified key (i.e. node ID) is in the range of the n-bit ID space covered by this k-bucket (in otherwords, it returns whether or not the specified key should be placed in this k-bucket) - + @param key: The key to test @type key: str or int - + @return: C{True} if the key is in this k-bucket's range, or C{False} if not. @rtype: bool diff --git a/lbrynet/dht/msgformat.py b/lbrynet/dht/msgformat.py index 9c6b773c8..027a91d5f 100644 --- a/lbrynet/dht/msgformat.py +++ b/lbrynet/dht/msgformat.py @@ -11,37 +11,37 @@ import msgtypes class MessageTranslator(object): """ Interface for RPC message translators/formatters - + Classes inheriting from this should provide a translation services between the classes used internally by this Kademlia implementation and the actual data that is transmitted between nodes. """ def fromPrimitive(self, msgPrimitive): """ Create an RPC Message from a message's string representation - + @param msgPrimitive: The unencoded primitive representation of a message @type msgPrimitive: str, int, list or dict - + @return: The translated message object @rtype: entangled.kademlia.msgtypes.Message """ - + def toPrimitive(self, message): """ Create a string representation of a message - + @param message: The message object @type message: msgtypes.Message - + @return: The message's primitive representation in a particular messaging format @rtype: str, int, list or dict """ - + class DefaultFormat(MessageTranslator): """ The default on-the-wire message format for this library """ typeRequest, typeResponse, typeError = range(3) headerType, headerMsgID, headerNodeID, headerPayload, headerArgs = range(5) - + def fromPrimitive(self, msgPrimitive): msgType = msgPrimitive[self.headerType] if msgType == self.typeRequest: @@ -62,8 +62,8 @@ class DefaultFormat(MessageTranslator): # Unknown message, no payload msg = msgtypes.Message(msgPrimitive[self.headerMsgID], msgPrimitive[self.headerNodeID]) return msg - - def toPrimitive(self, message): + + def toPrimitive(self, message): msg = {self.headerMsgID: message.id, self.headerNodeID: message.nodeID} if isinstance(message, msgtypes.RequestMessage): diff --git a/lbrynet/dht/msgtypes.py b/lbrynet/dht/msgtypes.py index de97fd239..a2365b501 100644 --- a/lbrynet/dht/msgtypes.py +++ b/lbrynet/dht/msgtypes.py @@ -22,7 +22,7 @@ class RequestMessage(Message): def __init__(self, nodeID, method, methodArgs, rpcID=None): if rpcID == None: hash = hashlib.sha384() - hash.update(str(random.getrandbits(255))) + hash.update(str(random.getrandbits(255))) rpcID = hash.digest() Message.__init__(self, rpcID, nodeID) self.request = method diff --git a/lbrynet/dht/node.py b/lbrynet/dht/node.py index 6a011bffa..ab46c8cd0 100644 --- a/lbrynet/dht/node.py +++ b/lbrynet/dht/node.py @@ -27,7 +27,7 @@ log = logging.getLogger(__name__) def rpcmethod(func): """ Decorator to expose Node methods as remote procedure calls - + Apply this decorator to methods in the Node class (or a subclass) in order to make them remotely callable via the DHT's RPC mechanism. """ @@ -36,13 +36,13 @@ def rpcmethod(func): class Node(object): """ Local node in the Kademlia network - + This class represents a single local node in a Kademlia network; in other words, this class encapsulates an Entangled-using application's "presence" in a Kademlia network. - + In Entangled, all interactions with the Kademlia network by a client - application is performed via this class (or a subclass). + application is performed via this class (or a subclass). """ def __init__(self, id=None, udpPort=4000, dataStore=None, routingTableClass=None, networkProtocol=None, lbryid=None, @@ -61,7 +61,7 @@ class Node(object): exposed. This should be a class, not an object, in order to allow the Node to pass an auto-generated node ID to the routingtable object - upon instantiation (if necessary). + upon instantiation (if necessary). @type routingTable: entangled.kademlia.routingtable.RoutingTable @param networkProtocol: The network protocol to use. This can be overridden from the default to (for example) @@ -138,7 +138,7 @@ class Node(object): def joinNetwork(self, knownNodeAddresses=None): """ Causes the Node to join the Kademlia network; normally, this should be called before any other DHT operations. - + @param knownNodeAddresses: A sequence of tuples containing IP address information for existing nodes on the Kademlia network, in the format: @@ -329,12 +329,12 @@ class Node(object): def iterativeFindNode(self, key): """ The basic Kademlia node lookup operation - + Call this to find a remote node in the P2P overlay network. - + @param key: the n-bit key (i.e. the node or value ID) to search for @type key: str - + @return: This immediately returns a deferred object, which will return a list of k "closest" contacts (C{kademlia.contact.Contact} objects) to the specified key as soon as the operation is @@ -345,12 +345,12 @@ class Node(object): def iterativeFindValue(self, key): """ The Kademlia search operation (deterministic) - + Call this to retrieve data from the DHT. - + @param key: the n-bit key (i.e. the value ID) to search for @type key: str - + @return: This immediately returns a deferred object, which will return either one of two things: - If the value was found, it will return a Python @@ -409,7 +409,7 @@ class Node(object): """ Remove the contact with the specified node ID from this node's table of known nodes. This is a simple wrapper for the same method in this object's RoutingTable object - + @param contactID: The node ID of the contact to remove @type contactID: str """ @@ -418,10 +418,10 @@ class Node(object): def findContact(self, contactID): """ Find a entangled.kademlia.contact.Contact object for the specified cotact ID - + @param contactID: The contact ID of the required Contact object @type contactID: str - + @return: Contact object of remote node with the specified node ID, or None if the contact was not found @rtype: twisted.internet.defer.Deferred @@ -444,7 +444,7 @@ class Node(object): @rpcmethod def ping(self): """ Used to verify contact between two Kademlia nodes - + @rtype: str """ return 'pong' @@ -452,7 +452,7 @@ class Node(object): @rpcmethod def store(self, key, value, originalPublisherID=None, self_store=False, **kwargs): """ Store the received data in this node's local hash table - + @param key: The hashtable key of the data @type key: str @param value: The actual data (the value associated with C{key}) @@ -467,7 +467,7 @@ class Node(object): @type age: int @rtype: str - + @todo: Since the data (value) may be large, passing it around as a buffer (which is the case currently) might not be a good idea... will have to fix this (perhaps use a stream from the Protocol class?) @@ -576,7 +576,7 @@ class Node(object): def _generateID(self): """ Generates an n-bit pseudo-random identifier - + @return: A globally unique n-bit pseudo-random identifier @rtype: str """ @@ -586,12 +586,12 @@ class Node(object): def _iterativeFind(self, key, startupShortlist=None, rpc='findNode'): """ The basic Kademlia iterative lookup operation (for nodes/values) - + This builds a list of k "closest" contacts through iterative use of the "FIND_NODE" RPC, or if C{findValue} is set to C{True}, using the "FIND_VALUE" RPC, in which case the value (if found) may be returned instead of a list of contacts - + @param key: the n-bit key (i.e. the node or value ID) to search for @type key: str @param startupShortlist: A list of contacts to use as the starting @@ -605,7 +605,7 @@ class Node(object): other operations that piggy-back on the basic Kademlia lookup operation (Entangled's "delete" RPC, for instance). @type rpc: str - + @return: If C{findValue} is C{True}, the algorithm will stop as soon as a data value for C{key} is found, and return a dictionary containing the key and the found value. Otherwise, it will diff --git a/lbrynet/dht/protocol.py b/lbrynet/dht/protocol.py index 9ec42d3a5..8d8e383b9 100644 --- a/lbrynet/dht/protocol.py +++ b/lbrynet/dht/protocol.py @@ -192,7 +192,7 @@ class KademliaProtocol(protocol.DatagramProtocol): @note: The header used for breaking up large data segments will possibly be moved out of the KademliaProtocol class in the future, into something similar to a message translator/encoder - class (see C{kademlia.msgformat} and C{kademlia.encoding}). + class (see C{kademlia.msgformat} and C{kademlia.encoding}). """ if len(data) > self.msgSizeLimit: # We have to spread the data over multiple UDP datagrams, diff --git a/lbrynet/dht/routingtable.py b/lbrynet/dht/routingtable.py index 92d6d6c72..c228c3372 100644 --- a/lbrynet/dht/routingtable.py +++ b/lbrynet/dht/routingtable.py @@ -30,10 +30,10 @@ class RoutingTable(object): @param contact: The contact to add to this node's k-buckets @type contact: kademlia.contact.Contact """ - + def distance(self, keyOne, keyTwo): """ Calculate the XOR result between two string variables - + @return: XOR result of two long variables @rtype: long """ @@ -44,7 +44,7 @@ class RoutingTable(object): def findCloseNodes(self, key, count, _rpcNodeID=None): """ Finds a number of known nodes closest to the node/value with the specified key. - + @param key: the n-bit key (i.e. the node or value ID) to search for @type key: str @param count: the amount of contacts to return @@ -53,9 +53,9 @@ class RoutingTable(object): Whatever ID is passed in the paramater will get excluded from the list of returned contacts. @type _rpcNodeID: str - + @return: A list of node contacts (C{kademlia.contact.Contact instances}) - closest to the specified key. + closest to the specified key. This method will return C{k} (or C{count}, if specified) contacts if at all possible; it will only return fewer if the node is returning all of the contacts that it knows of. @@ -63,7 +63,7 @@ class RoutingTable(object): """ def getContact(self, contactID): """ Returns the (known) contact with the specified node ID - + @raise ValueError: No contact with the specified contact ID is known by this node """ @@ -83,7 +83,7 @@ class RoutingTable(object): will be refreshed, regardless of the time they were last accessed. @type force: bool - + @return: A list of node ID's that the parent node should search for in order to refresh the routing Table @rtype: list @@ -91,14 +91,14 @@ class RoutingTable(object): def removeContact(self, contactID): """ Remove the contact with the specified node ID from the routing table - + @param contactID: The node ID of the contact to remove @type contactID: str """ def touchKBucket(self, key): """ Update the "last accessed" timestamp of the k-bucket which covers the range containing the specified key in the key/ID space - + @param key: A key in the range of the target k-bucket @type key: str """ @@ -106,13 +106,13 @@ class RoutingTable(object): class TreeRoutingTable(RoutingTable): """ This class implements a routing table used by a Node class. - + The Kademlia routing table is a binary tree whose leaves are k-buckets, where each k-bucket contains nodes with some common prefix of their IDs. This prefix is the k-bucket's position in the binary tree; it therefore covers some range of ID values, and together all of the k-buckets cover the entire n-bit ID (or key) space (with no overlap). - + @note: In this implementation, nodes in the tree (the k-buckets) are added dynamically, as needed; this technique is described in the 13-page version of the Kademlia paper, in section 2.4. It does, however, use the @@ -162,11 +162,11 @@ class TreeRoutingTable(RoutingTable): # the k-bucket. This implementation follows section # 2.2 regarding this point. headContact = self._buckets[bucketIndex]._contacts[0] - + def replaceContact(failure): """ Callback for the deferred PING RPC to see if the head node in the k-bucket is still responding - + @type failure: twisted.python.failure.Failure """ failure.trap(TimeoutError) @@ -180,18 +180,18 @@ class TreeRoutingTable(RoutingTable): pass # ...and add the new one at the tail of the bucket self.addContact(contact) - + # Ping the least-recently seen contact in this k-bucket headContact = self._buckets[bucketIndex]._contacts[0] df = headContact.ping() # If there's an error (i.e. timeout), remove the head # contact, and append the new one df.addErrback(replaceContact) - + def findCloseNodes(self, key, count, _rpcNodeID=None): """ Finds a number of known nodes closest to the node/value with the specified key. - + @param key: the n-bit key (i.e. the node or value ID) to search for @type key: str @param count: the amount of contacts to return @@ -200,9 +200,9 @@ class TreeRoutingTable(RoutingTable): Whatever ID is passed in the paramater will get excluded from the list of returned contacts. @type _rpcNodeID: str - + @return: A list of node contacts (C{kademlia.contact.Contact instances}) - closest to the specified key. + closest to the specified key. This method will return C{k} (or C{count}, if specified) contacts if at all possible; it will only return fewer if the node is returning all of the contacts that it knows of. @@ -219,7 +219,7 @@ class TreeRoutingTable(RoutingTable): i = 1 canGoLower = bucketIndex-i >= 0 canGoHigher = bucketIndex+i < len(self._buckets) - # Fill up the node list to k nodes, starting with the closest neighbouring nodes known + # Fill up the node list to k nodes, starting with the closest neighbouring nodes known while len(closestNodes) < constants.k and (canGoLower or canGoHigher): #TODO: this may need to be optimized if canGoLower: @@ -237,7 +237,7 @@ class TreeRoutingTable(RoutingTable): def getContact(self, contactID): """ Returns the (known) contact with the specified node ID - + @raise ValueError: No contact with the specified contact ID is known by this node """ @@ -265,7 +265,7 @@ class TreeRoutingTable(RoutingTable): will be refreshed, regardless of the time they were last accessed. @type force: bool - + @return: A list of node ID's that the parent node should search for in order to refresh the routing Table @rtype: list @@ -282,7 +282,7 @@ class TreeRoutingTable(RoutingTable): def removeContact(self, contactID): """ Remove the contact with the specified node ID from the routing table - + @param contactID: The node ID of the contact to remove @type contactID: str """ @@ -296,7 +296,7 @@ class TreeRoutingTable(RoutingTable): def touchKBucket(self, key): """ Update the "last accessed" timestamp of the k-bucket which covers the range containing the specified key in the key/ID space - + @param key: A key in the range of the target k-bucket @type key: str """ @@ -306,10 +306,10 @@ class TreeRoutingTable(RoutingTable): def _kbucketIndex(self, key): """ Calculate the index of the k-bucket which is responsible for the specified key (or ID) - + @param key: The key for which to find the appropriate k-bucket index @type key: str - + @return: The index of the k-bucket responsible for the specified key @rtype: int """ @@ -324,7 +324,7 @@ class TreeRoutingTable(RoutingTable): def _randomIDInBucketRange(self, bucketIndex): """ Returns a random ID in the specified k-bucket's range - + @param bucketIndex: The index of the k-bucket to use @type bucketIndex: int """ @@ -342,7 +342,7 @@ class TreeRoutingTable(RoutingTable): def _splitBucket(self, oldBucketIndex): """ Splits the specified k-bucket into two new buckets which together cover the same range in the key/ID space - + @param oldBucketIndex: The index of k-bucket to split (in this table's list of k-buckets) @type oldBucketIndex: int @@ -372,7 +372,7 @@ class OptimizedTreeRoutingTable(TreeRoutingTable): TreeRoutingTable.__init__(self, parentNodeID) # Cache containing nodes eligible to replace stale k-bucket entries self._replacementCache = {} - + def addContact(self, contact): """ Add the given contact to the correct k-bucket; if it already exists, its status will be updated @@ -415,11 +415,11 @@ class OptimizedTreeRoutingTable(TreeRoutingTable): elif len(self._replacementCache) >= constants.k: self._replacementCache.pop(0) self._replacementCache[bucketIndex].append(contact) - + def removeContact(self, contactID): """ Remove the contact with the specified node ID from the routing table - + @param contactID: The node ID of the contact to remove @type contactID: str """ @@ -430,7 +430,7 @@ class OptimizedTreeRoutingTable(TreeRoutingTable): #print 'removeContact(): Contact not in routing table' return contact.failedRPCs += 1 - if contact.failedRPCs >= 5: + if contact.failedRPCs >= 5: self._buckets[bucketIndex].removeContact(contactID) # Replace this stale contact with one from our replacemnent cache, if we have any if self._replacementCache.has_key(bucketIndex): diff --git a/lbrynet/lbrynet_daemon/Daemon.py b/lbrynet/lbrynet_daemon/Daemon.py index c4739e034..8b8929735 100644 --- a/lbrynet/lbrynet_daemon/Daemon.py +++ b/lbrynet/lbrynet_daemon/Daemon.py @@ -407,7 +407,7 @@ class Daemon(AuthJSONRPCServer): # claim_out is dictionary containing 'txid' and 'nout' def _add_to_pending_claims(self, name, claim_out): txid = claim_out['txid'] - nout = claim_out['nout'] + nout = claim_out['nout'] log.info("Adding lbry://%s to pending claims, txid %s nout %d" % (name, txid, nout)) self.pending_claims[name] = (txid, nout) return claim_out @@ -1498,7 +1498,7 @@ class Daemon(AuthJSONRPCServer): 'name': name to look up, string, do not include lbry:// prefix 'txid': optional, if specified, look for claim with this txid 'nout': optional, if specified, look for claim with this nout - + Returns: txid, amount, value, n, height """ diff --git a/lbrynet/lbrynet_daemon/Publisher.py b/lbrynet/lbrynet_daemon/Publisher.py index d91d2a89a..2f973a5fc 100644 --- a/lbrynet/lbrynet_daemon/Publisher.py +++ b/lbrynet/lbrynet_daemon/Publisher.py @@ -40,11 +40,11 @@ class Publisher(object): def start(self, name, file_path, bid, metadata): log.info('Starting publish for %s', name) def _show_result(): - log.info("Success! Published %s --> lbry://%s txid: %s nout: %d", + log.info("Success! Published %s --> lbry://%s txid: %s nout: %d", self.file_name, self.publish_name, self.txid, self.nout) out = {} out['nout'] = self.nout - out['txid'] = self.txid + out['txid'] = self.txid return defer.succeed(out) self.publish_name = name @@ -137,10 +137,10 @@ class Publisher(object): msg = 'Failed to claim name:{}'.format(claim_out['reason']) defer.fail(Exception(msg)) txid = claim_out['txid'] - nout = claim_out['nout'] + nout = claim_out['nout'] log.debug('Name claimed using txid: %s, nout: %d', txid, nout) self.txid = txid - self.nout = nout + self.nout = nout d = self.wallet.claim_name(self.publish_name, self.bid_amount, m) d.addCallback(set_txid_nout) diff --git a/lbrynet/metadata/Metadata.py b/lbrynet/metadata/Metadata.py index 82b866d4d..56e94ec30 100644 --- a/lbrynet/metadata/Metadata.py +++ b/lbrynet/metadata/Metadata.py @@ -38,7 +38,7 @@ class Metadata(StructuredDict): def __init__(self, metadata, migrate=True, target_version=None): if not isinstance(metadata, dict): - raise TypeError("metadata is not a dictionary") + raise TypeError("metadata is not a dictionary") starting_version = metadata.get('ver', '0.0.1') StructuredDict.__init__(self, metadata, starting_version, migrate, target_version) diff --git a/run_pylint.sh b/run_pylint.sh index 41618b4fd..99ee24924 100755 --- a/run_pylint.sh +++ b/run_pylint.sh @@ -8,4 +8,5 @@ pylint -E --disable=inherit-non-class --disable=no-member \ --enable=unused-import \ --enable=bad-whitespace \ --enable=line-too-long \ + --enable=trailing-whitespace \ lbrynet $@ From 7bce37a720d20f2620a0826fedbd5f2c27bca640 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Tue, 13 Dec 2016 17:16:12 -0600 Subject: [PATCH 07/26] pylint: add check for missing-final-newline --- lbrynet/core/BlobInfo.py | 2 +- lbrynet/core/DownloadOption.py | 2 +- lbrynet/core/HashAnnouncer.py | 2 +- lbrynet/core/Offer.py | 2 +- lbrynet/core/PeerFinder.py | 2 +- lbrynet/core/PeerManager.py | 2 +- lbrynet/core/__init__.py | 2 +- lbrynet/core/client/ClientRequest.py | 2 +- lbrynet/core/cryptoutils.py | 2 +- lbrynet/core/server/BlobAvailabilityHandler.py | 2 +- lbrynet/core/server/ServerProtocol.py | 2 +- lbrynet/core/sqlite_helpers.py | 2 +- lbrynet/cryptstream/__init__.py | 2 +- lbrynet/db_migrator/migrate0to1.py | 2 +- lbrynet/dht/constants.py | 2 +- lbrynet/dht/hashwatcher.py | 2 +- lbrynet/dht_scripts.py | 2 +- lbrynet/lbryfile/client/EncryptedFileMetadataHandler.py | 2 +- lbrynet/lbryfilemanager/EncryptedFileStatusReport.py | 2 +- lbrynet/lbryfilemanager/__init__.py | 2 +- lbrynet/lbrylive/LiveBlob.py | 2 +- lbrynet/lbrylive/LiveStreamCreator.py | 2 +- lbrynet/lbrylive/LiveStreamMetadataManager.py | 2 +- lbrynet/lbrylive/StreamDescriptor.py | 2 +- lbrynet/lbrylive/client/LiveStreamMetadataHandler.py | 2 +- lbrynet/lbrylive/client/LiveStreamOptions.py | 2 +- lbrynet/lbrylive/client/LiveStreamProgressManager.py | 2 +- lbrynet/lbrylive/server/LiveBlobInfoQueryHandler.py | 2 +- lbrynet/lbrynet_daemon/DaemonServer.py | 2 +- lbrynet/lbrynet_daemon/auth/util.py | 2 +- lbrynet/lbrynet_daemon/daemon_scripts/Autofetcher.py | 2 +- lbrynet/pointtraderclient/__init__.py | 2 +- run_pylint.sh | 1 + tests/dht/testProtocol.py | 2 +- 34 files changed, 34 insertions(+), 33 deletions(-) diff --git a/lbrynet/core/BlobInfo.py b/lbrynet/core/BlobInfo.py index d0e66c641..19fe72253 100644 --- a/lbrynet/core/BlobInfo.py +++ b/lbrynet/core/BlobInfo.py @@ -15,4 +15,4 @@ class BlobInfo(object): def __init__(self, blob_hash, blob_num, length): self.blob_hash = blob_hash self.blob_num = blob_num - self.length = length \ No newline at end of file + self.length = length diff --git a/lbrynet/core/DownloadOption.py b/lbrynet/core/DownloadOption.py index 87964fa46..6a4446b20 100644 --- a/lbrynet/core/DownloadOption.py +++ b/lbrynet/core/DownloadOption.py @@ -18,4 +18,4 @@ class DownloadOption(object): self.long_description = long_description self.short_description = short_description self.default_value = default_value - self.default_value_description = default_value_description \ No newline at end of file + self.default_value_description = default_value_description diff --git a/lbrynet/core/HashAnnouncer.py b/lbrynet/core/HashAnnouncer.py index dd0efaf54..841f685b6 100644 --- a/lbrynet/core/HashAnnouncer.py +++ b/lbrynet/core/HashAnnouncer.py @@ -12,4 +12,4 @@ class DummyHashAnnouncer(object): pass def immediate_announce(self, *args): - pass \ No newline at end of file + pass diff --git a/lbrynet/core/Offer.py b/lbrynet/core/Offer.py index 48b4b56fa..fb4641d57 100644 --- a/lbrynet/core/Offer.py +++ b/lbrynet/core/Offer.py @@ -59,4 +59,4 @@ class Offer(object): elif reply_message == Offer.RATE_UNSET: self.unset() else: - raise Exception("Unknown offer reply %s" % str(reply_message)) \ No newline at end of file + raise Exception("Unknown offer reply %s" % str(reply_message)) diff --git a/lbrynet/core/PeerFinder.py b/lbrynet/core/PeerFinder.py index 461d1804e..3f2339de2 100644 --- a/lbrynet/core/PeerFinder.py +++ b/lbrynet/core/PeerFinder.py @@ -16,4 +16,4 @@ class DummyPeerFinder(object): return defer.succeed([]) def get_most_popular_hashes(self, num_to_return): - return [] \ No newline at end of file + return [] diff --git a/lbrynet/core/PeerManager.py b/lbrynet/core/PeerManager.py index bf83d3e1f..1c5816158 100644 --- a/lbrynet/core/PeerManager.py +++ b/lbrynet/core/PeerManager.py @@ -11,4 +11,4 @@ class PeerManager(object): return peer peer = Peer(host, port) self.peers.append(peer) - return peer \ No newline at end of file + return peer diff --git a/lbrynet/core/__init__.py b/lbrynet/core/__init__.py index 2dff7714f..6ac1f3432 100644 --- a/lbrynet/core/__init__.py +++ b/lbrynet/core/__init__.py @@ -4,4 +4,4 @@ Classes and functions which can be used by any application wishing to make use o This includes classes for connecting to other peers and downloading blobs from them, listening for connections from peers and responding to their requests, managing locally stored blobs, sending and receiving payments, and locating peers in the DHT. -""" \ No newline at end of file +""" diff --git a/lbrynet/core/client/ClientRequest.py b/lbrynet/core/client/ClientRequest.py index 106982ccc..6dcd89455 100644 --- a/lbrynet/core/client/ClientRequest.py +++ b/lbrynet/core/client/ClientRequest.py @@ -24,4 +24,4 @@ class ClientBlobRequest(ClientPaidRequest): self.write = write_func self.finished_deferred = finished_deferred self.cancel = cancel_func - self.blob = blob \ No newline at end of file + self.blob = blob diff --git a/lbrynet/core/cryptoutils.py b/lbrynet/core/cryptoutils.py index d77013c92..7c0c5c40c 100644 --- a/lbrynet/core/cryptoutils.py +++ b/lbrynet/core/cryptoutils.py @@ -15,4 +15,4 @@ def sign_with_pass_phrase(m, pass_phrase): def verify_signature(m, signature, pub_key): - return seccure.verify(m, signature, pub_key, curve="brainpoolp384r1") \ No newline at end of file + return seccure.verify(m, signature, pub_key, curve="brainpoolp384r1") diff --git a/lbrynet/core/server/BlobAvailabilityHandler.py b/lbrynet/core/server/BlobAvailabilityHandler.py index dbd373a36..e8530d612 100644 --- a/lbrynet/core/server/BlobAvailabilityHandler.py +++ b/lbrynet/core/server/BlobAvailabilityHandler.py @@ -56,4 +56,4 @@ class BlobAvailabilityHandler(object): def _get_available_blobs(self, requested_blobs): d = self.blob_manager.completed_blobs(requested_blobs) - return d \ No newline at end of file + return d diff --git a/lbrynet/core/server/ServerProtocol.py b/lbrynet/core/server/ServerProtocol.py index 42043779b..df52dedb0 100644 --- a/lbrynet/core/server/ServerProtocol.py +++ b/lbrynet/core/server/ServerProtocol.py @@ -91,4 +91,4 @@ class ServerProtocolFactory(ServerFactory): def __init__(self, rate_limiter, query_handler_factories, peer_manager): self.rate_limiter = rate_limiter self.query_handler_factories = query_handler_factories - self.peer_manager = peer_manager \ No newline at end of file + self.peer_manager = peer_manager diff --git a/lbrynet/core/sqlite_helpers.py b/lbrynet/core/sqlite_helpers.py index c75e7ae48..14d81d716 100644 --- a/lbrynet/core/sqlite_helpers.py +++ b/lbrynet/core/sqlite_helpers.py @@ -20,4 +20,4 @@ def rerun_if_locked(f): d.addErrback(rerun, *args, **kwargs) return d - return wrapper \ No newline at end of file + return wrapper diff --git a/lbrynet/cryptstream/__init__.py b/lbrynet/cryptstream/__init__.py index df825f573..ff8842a67 100644 --- a/lbrynet/cryptstream/__init__.py +++ b/lbrynet/cryptstream/__init__.py @@ -5,4 +5,4 @@ Crypt Streams are encrypted blobs and metadata tying those blobs together. At le metadata is generally stored in a Stream Descriptor File, for example containing a public key used to bind blobs to the stream and a symmetric key used to encrypt the blobs. The list of blobs may or may not be present. -""" \ No newline at end of file +""" diff --git a/lbrynet/db_migrator/migrate0to1.py b/lbrynet/db_migrator/migrate0to1.py index 21edef1f6..562137ea5 100644 --- a/lbrynet/db_migrator/migrate0to1.py +++ b/lbrynet/db_migrator/migrate0to1.py @@ -304,4 +304,4 @@ # info_db.commit() # peer_db.commit() # info_db.close() -# peer_db.close() \ No newline at end of file +# peer_db.close() diff --git a/lbrynet/dht/constants.py b/lbrynet/dht/constants.py index d2e21bcae..9c1119546 100644 --- a/lbrynet/dht/constants.py +++ b/lbrynet/dht/constants.py @@ -49,4 +49,4 @@ checkRefreshInterval = refreshTimeout/5 #: be spread accross several UDP packets. udpDatagramMaxSize = 8192 # 8 KB -key_bits = 384 \ No newline at end of file +key_bits = 384 diff --git a/lbrynet/dht/hashwatcher.py b/lbrynet/dht/hashwatcher.py index fbe37202a..f7270eea2 100644 --- a/lbrynet/dht/hashwatcher.py +++ b/lbrynet/dht/hashwatcher.py @@ -32,4 +32,4 @@ class HashWatcher(): def _remove_old_hashes(self): remove_time = datetime.datetime.now() - datetime.timedelta(minutes=10) - self.hashes = [h for h in self.hashes if h[1] < remove_time] \ No newline at end of file + self.hashes = [h for h in self.hashes if h[1] < remove_time] diff --git a/lbrynet/dht_scripts.py b/lbrynet/dht_scripts.py index 7d6dec167..dc8709015 100644 --- a/lbrynet/dht_scripts.py +++ b/lbrynet/dht_scripts.py @@ -100,4 +100,4 @@ def get_hosts_for_hash_in_dht(): def announce_hash_to_dht(): - run_dht_script(announce_hash) \ No newline at end of file + run_dht_script(announce_hash) diff --git a/lbrynet/lbryfile/client/EncryptedFileMetadataHandler.py b/lbrynet/lbryfile/client/EncryptedFileMetadataHandler.py index 1444a61a3..bd09dfdfc 100644 --- a/lbrynet/lbryfile/client/EncryptedFileMetadataHandler.py +++ b/lbrynet/lbryfile/client/EncryptedFileMetadataHandler.py @@ -36,4 +36,4 @@ class EncryptedFileMetadataHandler(object): else: log.debug("Setting _final_blob_num to %s", str(blob_num - 1)) self._final_blob_num = blob_num - 1 - return infos \ No newline at end of file + return infos diff --git a/lbrynet/lbryfilemanager/EncryptedFileStatusReport.py b/lbrynet/lbryfilemanager/EncryptedFileStatusReport.py index 2062c5ff7..61d61a2a3 100644 --- a/lbrynet/lbryfilemanager/EncryptedFileStatusReport.py +++ b/lbrynet/lbryfilemanager/EncryptedFileStatusReport.py @@ -3,4 +3,4 @@ class EncryptedFileStatusReport(object): self.name = name self.num_completed = num_completed self.num_known = num_known - self.running_status = running_status \ No newline at end of file + self.running_status = running_status diff --git a/lbrynet/lbryfilemanager/__init__.py b/lbrynet/lbryfilemanager/__init__.py index 6f2017173..e41afc21b 100644 --- a/lbrynet/lbryfilemanager/__init__.py +++ b/lbrynet/lbryfilemanager/__init__.py @@ -4,4 +4,4 @@ Classes and functions used to create and download LBRY Files. LBRY Files are Crypt Streams created from any regular file. The whole file is read at the time that the LBRY File is created, so all constituent blobs are known and included in the stream descriptor file. -""" \ No newline at end of file +""" diff --git a/lbrynet/lbrylive/LiveBlob.py b/lbrynet/lbrylive/LiveBlob.py index e1e5448a7..46bf54f7a 100644 --- a/lbrynet/lbrylive/LiveBlob.py +++ b/lbrynet/lbrylive/LiveBlob.py @@ -21,4 +21,4 @@ class LiveStreamBlobMaker(CryptStreamBlobMaker): def _return_info(self, blob_hash): return LiveBlobInfo(blob_hash, self.blob_num, self.length, binascii.hexlify(self.iv), - self.revision, None) \ No newline at end of file + self.revision, None) diff --git a/lbrynet/lbrylive/LiveStreamCreator.py b/lbrynet/lbrylive/LiveStreamCreator.py index 393c2fda0..169f4668d 100644 --- a/lbrynet/lbrylive/LiveStreamCreator.py +++ b/lbrynet/lbrylive/LiveStreamCreator.py @@ -174,4 +174,4 @@ class StdinStreamProducer(object): self.consumer.write(data) def childConnectionLost(self, fd, reason): - self.stopProducing() \ No newline at end of file + self.stopProducing() diff --git a/lbrynet/lbrylive/LiveStreamMetadataManager.py b/lbrynet/lbrylive/LiveStreamMetadataManager.py index b7591a7ce..76bb6f906 100644 --- a/lbrynet/lbrylive/LiveStreamMetadataManager.py +++ b/lbrynet/lbrylive/LiveStreamMetadataManager.py @@ -387,4 +387,4 @@ class TempLiveStreamMetadataManager(DHTHashSupplier): if announce_time < current_time: self.streams[stream_hash]['announce_time'] = next_announce_time stream_hashes.append(stream_hash) - return stream_hashes \ No newline at end of file + return stream_hashes diff --git a/lbrynet/lbrylive/StreamDescriptor.py b/lbrynet/lbrylive/StreamDescriptor.py index f9e2b496e..1977c263c 100644 --- a/lbrynet/lbrylive/StreamDescriptor.py +++ b/lbrynet/lbrylive/StreamDescriptor.py @@ -135,4 +135,4 @@ class LiveStreamDescriptorValidator(object): return info def get_length_of_stream(self): - return None \ No newline at end of file + return None diff --git a/lbrynet/lbrylive/client/LiveStreamMetadataHandler.py b/lbrynet/lbrylive/client/LiveStreamMetadataHandler.py index 2a5dcc40e..d3856898d 100644 --- a/lbrynet/lbrylive/client/LiveStreamMetadataHandler.py +++ b/lbrynet/lbrylive/client/LiveStreamMetadataHandler.py @@ -344,4 +344,4 @@ class LiveStreamMetadataHandler(object): peer.update_score(-10.0) if reason.check(ConnectionClosedBeforeResponseError): return - return reason \ No newline at end of file + return reason diff --git a/lbrynet/lbrylive/client/LiveStreamOptions.py b/lbrynet/lbrylive/client/LiveStreamOptions.py index 4b57bd5fb..21961c746 100644 --- a/lbrynet/lbrylive/client/LiveStreamOptions.py +++ b/lbrynet/lbrylive/client/LiveStreamOptions.py @@ -71,4 +71,4 @@ class LiveStreamOptions(object): "Allow" ), ] - return options \ No newline at end of file + return options diff --git a/lbrynet/lbrylive/client/LiveStreamProgressManager.py b/lbrynet/lbrylive/client/LiveStreamProgressManager.py index ac277ed76..2869ada61 100644 --- a/lbrynet/lbrylive/client/LiveStreamProgressManager.py +++ b/lbrynet/lbrylive/client/LiveStreamProgressManager.py @@ -88,4 +88,4 @@ class LiveStreamProgressManager(StreamProgressManager): reactor.callLater(0, self._output_loop) else: self.outputting_d.callback(True) - self.outputting_d = None \ No newline at end of file + self.outputting_d = None diff --git a/lbrynet/lbrylive/server/LiveBlobInfoQueryHandler.py b/lbrynet/lbrylive/server/LiveBlobInfoQueryHandler.py index c2fc47277..2fce90b6b 100644 --- a/lbrynet/lbrylive/server/LiveBlobInfoQueryHandler.py +++ b/lbrynet/lbrylive/server/LiveBlobInfoQueryHandler.py @@ -181,4 +181,4 @@ class CryptBlobInfoQueryHandler(object): dl.addCallback(ensure_streams_match) dl.addCallback(lambda _: get_blob_infos()) - return dl \ No newline at end of file + return dl diff --git a/lbrynet/lbrynet_daemon/DaemonServer.py b/lbrynet/lbrynet_daemon/DaemonServer.py index 9231221c5..12bd17499 100644 --- a/lbrynet/lbrynet_daemon/DaemonServer.py +++ b/lbrynet/lbrynet_daemon/DaemonServer.py @@ -64,4 +64,4 @@ def create_auth_session(root): portal_to_realm = portal.Portal(realm, [checker, ]) factory = guard.BasicCredentialFactory('Login to lbrynet api') _lbrynet_server = guard.HTTPAuthSessionWrapper(portal_to_realm, [factory, ]) - return _lbrynet_server \ No newline at end of file + return _lbrynet_server diff --git a/lbrynet/lbrynet_daemon/auth/util.py b/lbrynet/lbrynet_daemon/auth/util.py index c1de700b5..eaef0896b 100644 --- a/lbrynet/lbrynet_daemon/auth/util.py +++ b/lbrynet/lbrynet_daemon/auth/util.py @@ -89,4 +89,4 @@ def initialize_api_key_file(key_path): def get_auth_message(message_dict): - return json.dumps(message_dict, sort_keys=True) \ No newline at end of file + return json.dumps(message_dict, sort_keys=True) diff --git a/lbrynet/lbrynet_daemon/daemon_scripts/Autofetcher.py b/lbrynet/lbrynet_daemon/daemon_scripts/Autofetcher.py index cd7ed02cb..fc73373f2 100644 --- a/lbrynet/lbrynet_daemon/daemon_scripts/Autofetcher.py +++ b/lbrynet/lbrynet_daemon/daemon_scripts/Autofetcher.py @@ -65,4 +65,4 @@ class Autofetcher(object): def run(api): fetcher = Autofetcher(api) - fetcher.start() \ No newline at end of file + fetcher.start() diff --git a/lbrynet/pointtraderclient/__init__.py b/lbrynet/pointtraderclient/__init__.py index f6337cb30..4c5b43dde 100644 --- a/lbrynet/pointtraderclient/__init__.py +++ b/lbrynet/pointtraderclient/__init__.py @@ -7,4 +7,4 @@ registering. The public key is used to spend points, and also used as an address are sent. To spend points, the public key signs a message containing the amount and the destination public key and sends it to the point trader server. To check for payments, the recipient sends a signed message asking the point trader server for its balance. -""" \ No newline at end of file +""" diff --git a/run_pylint.sh b/run_pylint.sh index 99ee24924..47cb2cee8 100755 --- a/run_pylint.sh +++ b/run_pylint.sh @@ -9,4 +9,5 @@ pylint -E --disable=inherit-non-class --disable=no-member \ --enable=bad-whitespace \ --enable=line-too-long \ --enable=trailing-whitespace \ + --enable=missing-final-newline \ lbrynet $@ diff --git a/tests/dht/testProtocol.py b/tests/dht/testProtocol.py index 91758c1cc..b6329f639 100644 --- a/tests/dht/testProtocol.py +++ b/tests/dht/testProtocol.py @@ -239,4 +239,4 @@ def suite(): if __name__ == '__main__': # If this module is executed from the commandline, run all its tests - unittest.TextTestRunner().run(suite()) \ No newline at end of file + unittest.TextTestRunner().run(suite()) From 083092ad5e83f04943d8181796e0e9b6c6389cc6 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Tue, 13 Dec 2016 17:23:07 -0600 Subject: [PATCH 08/26] pylint: add check for mixed-indentation --- run_pylint.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/run_pylint.sh b/run_pylint.sh index 47cb2cee8..daf9dc922 100755 --- a/run_pylint.sh +++ b/run_pylint.sh @@ -10,4 +10,5 @@ pylint -E --disable=inherit-non-class --disable=no-member \ --enable=line-too-long \ --enable=trailing-whitespace \ --enable=missing-final-newline \ + --enable=mixed-indentation \ lbrynet $@ From c30ea049592b6f3a856c197bc0f4105335945c17 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Tue, 13 Dec 2016 17:37:23 -0600 Subject: [PATCH 09/26] Remove commented code This is the result of running eradicate (https://github.com/myint/eradicate) on the code and double-checking the changes. --- lbrynet/core/BlobManager.py | 3 - lbrynet/core/Wallet.py | 1 - lbrynet/core/client/ClientProtocol.py | 3 - lbrynet/core/server/ServerRequestHandler.py | 1 - lbrynet/cryptstream/CryptBlob.py | 1 - lbrynet/db_migrator/migrate0to1.py | 307 ------------------ lbrynet/dht/node.py | 50 --- lbrynet/dht/protocol.py | 6 - lbrynet/dht/routingtable.py | 5 - lbrynet/dhttest.py | 18 - lbrynet/lbrylive/LiveStreamCreator.py | 1 - .../lbrylive/client/LiveStreamDownloader.py | 2 - lbrynet/lbrynet_daemon/DaemonRequest.py | 1 - lbrynet/lbrynet_daemon/auth/server.py | 1 - lbrynet/reflector/server/server.py | 1 - .../windows/lbry-win32-app/LBRYWin32App.py | 1 - setup.py | 8 - tests/dht/testNode.py | 189 +---------- tests/dht/testProtocol.py | 42 --- tests/unit/dht/test_datastore.py | 58 ---- 20 files changed, 14 insertions(+), 685 deletions(-) delete mode 100644 lbrynet/db_migrator/migrate0to1.py diff --git a/lbrynet/core/BlobManager.py b/lbrynet/core/BlobManager.py index 4d9f390fd..7b5c2fad2 100644 --- a/lbrynet/core/BlobManager.py +++ b/lbrynet/core/BlobManager.py @@ -102,7 +102,6 @@ class DiskBlobManager(BlobManager): if self._next_manage_call is not None and self._next_manage_call.active(): self._next_manage_call.cancel() self._next_manage_call = None - #d = self.db_conn.close() self.db_conn = None return defer.succeed(True) @@ -348,8 +347,6 @@ class DiskBlobManager(BlobManager): d.addCallback(lambda r: r[0][0] if len(r) else Failure(NoSuchBlobError(blob))) return d - #length, verified_time, next_announce_time = json.loads(self.db.Get(blob)) - #return length @rerun_if_locked def _update_blob_verified_timestamp(self, blob, timestamp): diff --git a/lbrynet/core/Wallet.py b/lbrynet/core/Wallet.py index 479cd0626..a20e844b3 100644 --- a/lbrynet/core/Wallet.py +++ b/lbrynet/core/Wallet.py @@ -214,7 +214,6 @@ class Wallet(object): once the service has been rendered """ rounded_amount = Decimal(str(round(amount, 8))) - #if peer in self.peer_addresses: if self.wallet_balance >= self.total_reserved_points + rounded_amount: self.total_reserved_points += rounded_amount return ReservedPoints(identifier, rounded_amount) diff --git a/lbrynet/core/client/ClientProtocol.py b/lbrynet/core/client/ClientProtocol.py index 19b0b6f43..0c36bbf6c 100644 --- a/lbrynet/core/client/ClientProtocol.py +++ b/lbrynet/core/client/ClientProtocol.py @@ -209,7 +209,6 @@ class ClientProtocol(Protocol): log.debug("Asking for another request.") from twisted.internet import reactor reactor.callLater(0, self._ask_for_request) - #self._ask_for_request() else: log.debug("Not asking for another request.") self.transport.loseConnection() @@ -230,8 +229,6 @@ class ClientProtocol(Protocol): # TODO: protocol had such a mechanism. log.debug("Closing the connection to %s because the download of blob %s was canceled", str(self.peer), str(self._blob_download_request.blob)) - #self.transport.loseConnection() - #return True return err ######### IRateLimited ######### diff --git a/lbrynet/core/server/ServerRequestHandler.py b/lbrynet/core/server/ServerRequestHandler.py index af2f7e691..c57a63be2 100644 --- a/lbrynet/core/server/ServerRequestHandler.py +++ b/lbrynet/core/server/ServerRequestHandler.py @@ -63,7 +63,6 @@ class ServerRequestHandler(object): #IConsumer stuff def registerProducer(self, producer, streaming): - #assert self.file_sender == producer self.producer = producer assert streaming is False producer.resumeProducing() diff --git a/lbrynet/cryptstream/CryptBlob.py b/lbrynet/cryptstream/CryptBlob.py index 750adf7d1..2148f10c1 100644 --- a/lbrynet/cryptstream/CryptBlob.py +++ b/lbrynet/cryptstream/CryptBlob.py @@ -48,7 +48,6 @@ class StreamBlobDecryptor(object): self.buff += data self.len_read += len(data) write_bytes() - #write_func(remove_padding(self.cipher.decrypt(self.buff))) d = self.blob.read(decrypt_bytes) d.addCallback(lambda _: finish_decrypt()) diff --git a/lbrynet/db_migrator/migrate0to1.py b/lbrynet/db_migrator/migrate0to1.py deleted file mode 100644 index 562137ea5..000000000 --- a/lbrynet/db_migrator/migrate0to1.py +++ /dev/null @@ -1,307 +0,0 @@ -# import sqlite3 -# import unqlite -# import leveldb -# import shutil -# import os -# import logging -# import json -# -# -# log = logging.getLogger(__name__) -# -# -# known_dbs = ['lbryfile_desc.db', 'lbryfiles.db', 'valuable_blobs.db', 'blobs.db', -# 'lbryfile_blob.db', 'lbryfile_info.db', 'settings.db', 'blind_settings.db', -# 'blind_peers.db', 'blind_info.db', 'lbryfile_info.db', 'lbryfile_manager.db', -# 'live_stream.db', 'stream_info.db', 'stream_blob.db', 'stream_desc.db'] -# -# -# def do_move(from_dir, to_dir): -# for known_db in known_dbs: -# known_db_path = os.path.join(from_dir, known_db) -# if os.path.exists(known_db_path): -# log.debug("Moving %s to %s", -# os.path.abspath(known_db_path), -# os.path.abspath(os.path.join(to_dir, known_db))) -# shutil.move(known_db_path, os.path.join(to_dir, known_db)) -# else: -# log.debug("Did not find %s", os.path.abspath(known_db_path)) -# -# -# def do_migration(db_dir): -# old_dir = os.path.join(db_dir, "_0_to_1_old") -# new_dir = os.path.join(db_dir, "_0_to_1_new") -# try: -# log.info("Moving dbs from the real directory to %s", os.path.abspath(old_dir)) -# os.makedirs(old_dir) -# do_move(db_dir, old_dir) -# except: -# log.error("An error occurred moving the old db files.") -# raise -# try: -# log.info("Creating the new directory in %s", os.path.abspath(new_dir)) -# os.makedirs(new_dir) -# -# except: -# log.error("An error occurred creating the new directory.") -# raise -# try: -# log.info("Doing the migration") -# migrate_blob_db(old_dir, new_dir) -# migrate_lbryfile_db(old_dir, new_dir) -# migrate_livestream_db(old_dir, new_dir) -# migrate_ptc_db(old_dir, new_dir) -# migrate_lbryfile_manager_db(old_dir, new_dir) -# migrate_settings_db(old_dir, new_dir) -# migrate_repeater_db(old_dir, new_dir) -# log.info("Migration succeeded") -# except: -# log.error("An error occurred during the migration. Restoring.") -# do_move(old_dir, db_dir) -# raise -# try: -# log.info("Moving dbs in the new directory to the real directory") -# do_move(new_dir, db_dir) -# db_revision = open(os.path.join(db_dir, 'db_revision'), mode='w+') -# db_revision.write("1") -# db_revision.close() -# os.rmdir(new_dir) -# except: -# log.error("An error occurred moving the new db files.") -# raise -# return old_dir -# -# -# def migrate_blob_db(old_db_dir, new_db_dir): -# old_blob_db_path = os.path.join(old_db_dir, "blobs.db") -# if not os.path.exists(old_blob_db_path): -# return True -# -# old_db = leveldb.LevelDB(old_blob_db_path) -# new_db_conn = sqlite3.connect(os.path.join(new_db_dir, "blobs.db")) -# c = new_db_conn.cursor() -# c.execute("create table if not exists blobs (" + -# " blob_hash text primary key, " + -# " blob_length integer, " + -# " last_verified_time real, " + -# " next_announce_time real" -# ")") -# new_db_conn.commit() -# c = new_db_conn.cursor() -# for blob_hash, blob_info in old_db.RangeIter(): -# blob_length, verified_time, announce_time = json.loads(blob_info) -# c.execute("insert into blobs values (?, ?, ?, ?)", -# (blob_hash, blob_length, verified_time, announce_time)) -# new_db_conn.commit() -# new_db_conn.close() -# -# -# def migrate_lbryfile_db(old_db_dir, new_db_dir): -# old_lbryfile_db_path = os.path.join(old_db_dir, "lbryfiles.db") -# if not os.path.exists(old_lbryfile_db_path): -# return True -# -# stream_info_db = leveldb.LevelDB(os.path.join(old_db_dir, "lbryfile_info.db")) -# stream_blob_db = leveldb.LevelDB(os.path.join(old_db_dir, "lbryfile_blob.db")) -# stream_desc_db = leveldb.LevelDB(os.path.join(old_db_dir, "lbryfile_desc.db")) -# -# db_conn = sqlite3.connect(os.path.join(new_db_dir, "lbryfile_info.db")) -# c = db_conn.cursor() -# c.execute("create table if not exists lbry_files (" + -# " stream_hash text primary key, " + -# " key text, " + -# " stream_name text, " + -# " suggested_file_name text" + -# ")") -# c.execute("create table if not exists lbry_file_blobs (" + -# " blob_hash text, " + -# " stream_hash text, " + -# " position integer, " + -# " iv text, " + -# " length integer, " + -# " foreign key(stream_hash) references lbry_files(stream_hash)" + -# ")") -# c.execute("create table if not exists lbry_file_descriptors (" + -# " sd_blob_hash TEXT PRIMARY KEY, " + -# " stream_hash TEXT, " + -# " foreign key(stream_hash) references lbry_files(stream_hash)" + -# ")") -# db_conn.commit() -# c = db_conn.cursor() -# for stream_hash, stream_info in stream_info_db.RangeIter(): -# key, name, suggested_file_name = json.loads(stream_info) -# c.execute("insert into lbry_files values (?, ?, ?, ?)", -# (stream_hash, key, name, suggested_file_name)) -# db_conn.commit() -# c = db_conn.cursor() -# for blob_hash_stream_hash, blob_info in stream_blob_db.RangeIter(): -# b_h, s_h = json.loads(blob_hash_stream_hash) -# position, iv, length = json.loads(blob_info) -# c.execute("insert into lbry_file_blobs values (?, ?, ?, ?, ?)", -# (b_h, s_h, position, iv, length)) -# db_conn.commit() -# c = db_conn.cursor() -# for sd_blob_hash, stream_hash in stream_desc_db.RangeIter(): -# c.execute("insert into lbry_file_descriptors values (?, ?)", -# (sd_blob_hash, stream_hash)) -# db_conn.commit() -# db_conn.close() -# -# -# def migrate_livestream_db(old_db_dir, new_db_dir): -# old_db_path = os.path.join(old_db_dir, "stream_info.db") -# if not os.path.exists(old_db_path): -# return True -# stream_info_db = leveldb.LevelDB(os.path.join(old_db_dir, "stream_info.db")) -# stream_blob_db = leveldb.LevelDB(os.path.join(old_db_dir, "stream_blob.db")) -# stream_desc_db = leveldb.LevelDB(os.path.join(old_db_dir, "stream_desc.db")) -# -# db_conn = sqlite3.connect(os.path.join(new_db_dir, "live_stream.db")) -# -# c = db_conn.cursor() -# -# c.execute("create table if not exists live_streams (" + -# " stream_hash text primary key, " + -# " public_key text, " + -# " key text, " + -# " stream_name text, " + -# " next_announce_time real" + -# ")") -# c.execute("create table if not exists live_stream_blobs (" + -# " blob_hash text, " + -# " stream_hash text, " + -# " position integer, " + -# " revision integer, " + -# " iv text, " + -# " length integer, " + -# " signature text, " + -# " foreign key(stream_hash) references live_streams(stream_hash)" + -# ")") -# c.execute("create table if not exists live_stream_descriptors (" + -# " sd_blob_hash TEXT PRIMARY KEY, " + -# " stream_hash TEXT, " + -# " foreign key(stream_hash) references live_streams(stream_hash)" + -# ")") -# -# db_conn.commit() -# -# c = db_conn.cursor() -# for stream_hash, stream_info in stream_info_db.RangeIter(): -# public_key, key, name, next_announce_time = json.loads(stream_info) -# c.execute("insert into live_streams values (?, ?, ?, ?, ?)", -# (stream_hash, public_key, key, name, next_announce_time)) -# db_conn.commit() -# c = db_conn.cursor() -# for blob_hash_stream_hash, blob_info in stream_blob_db.RangeIter(): -# b_h, s_h = json.loads(blob_hash_stream_hash) -# position, revision, iv, length, signature = json.loads(blob_info) -# c.execute("insert into live_stream_blobs values (?, ?, ?, ?, ?, ?, ?)", -# (b_h, s_h, position, revision, iv, length, signature)) -# db_conn.commit() -# c = db_conn.cursor() -# for sd_blob_hash, stream_hash in stream_desc_db.RangeIter(): -# c.execute("insert into live_stream_descriptors values (?, ?)", -# (sd_blob_hash, stream_hash)) -# db_conn.commit() -# db_conn.close() -# -# -# def migrate_ptc_db(old_db_dir, new_db_dir): -# old_db_path = os.path.join(old_db_dir, "ptcwallet.db") -# if not os.path.exists(old_db_path): -# return True -# old_db = leveldb.LevelDB(old_db_path) -# try: -# p_key = old_db.Get("private_key") -# new_db = unqlite.UnQLite(os.path.join(new_db_dir, "ptcwallet.db")) -# new_db['private_key'] = p_key -# except KeyError: -# pass -# -# -# def migrate_lbryfile_manager_db(old_db_dir, new_db_dir): -# old_db_path = os.path.join(old_db_dir, "lbryfiles.db") -# if not os.path.exists(old_db_path): -# return True -# old_db = leveldb.LevelDB(old_db_path) -# new_db = sqlite3.connect(os.path.join(new_db_dir, "lbryfile_info.db")) -# c = new_db.cursor() -# c.execute("create table if not exists lbry_file_options (" + -# " blob_data_rate real, " + -# " status text," + -# " stream_hash text," -# " foreign key(stream_hash) references lbry_files(stream_hash)" + -# ")") -# new_db.commit() -# FILE_STATUS = "t" -# FILE_OPTIONS = "o" -# c = new_db.cursor() -# for k, v in old_db.RangeIter(): -# key_type, stream_hash = json.loads(k) -# if key_type == FILE_STATUS: -# try: -# rate = json.loads(old_db.Get(json.dumps((FILE_OPTIONS, stream_hash))))[0] -# except KeyError: -# rate = None -# c.execute("insert into lbry_file_options values (?, ?, ?)", -# (rate, v, stream_hash)) -# new_db.commit() -# new_db.close() -# -# -# def migrate_settings_db(old_db_dir, new_db_dir): -# old_settings_db_path = os.path.join(old_db_dir, "settings.db") -# if not os.path.exists(old_settings_db_path): -# return True -# old_db = leveldb.LevelDB(old_settings_db_path) -# new_db = unqlite.UnQLite(os.path.join(new_db_dir, "settings.db")) -# for k, v in old_db.RangeIter(): -# new_db[k] = v -# -# -# def migrate_repeater_db(old_db_dir, new_db_dir): -# old_repeater_db_path = os.path.join(old_db_dir, "valuable_blobs.db") -# if not os.path.exists(old_repeater_db_path): -# return True -# old_db = leveldb.LevelDB(old_repeater_db_path) -# info_db = sqlite3.connect(os.path.join(new_db_dir, "blind_info.db")) -# peer_db = sqlite3.connect(os.path.join(new_db_dir, "blind_peers.db")) -# unql_db = unqlite.UnQLite(os.path.join(new_db_dir, "blind_settings.db")) -# BLOB_INFO_TYPE = 'b' -# SETTING_TYPE = 's' -# PEER_TYPE = 'p' -# info_c = info_db.cursor() -# info_c.execute("create table if not exists valuable_blobs (" + -# " blob_hash text primary key, " + -# " blob_length integer, " + -# " reference text, " + -# " peer_host text, " + -# " peer_port integer, " + -# " peer_score text" + -# ")") -# info_db.commit() -# peer_c = peer_db.cursor() -# peer_c.execute("create table if not exists approved_peers (" + -# " ip_address text, " + -# " port integer" + -# ")") -# peer_db.commit() -# info_c = info_db.cursor() -# peer_c = peer_db.cursor() -# for k, v in old_db.RangeIter(): -# key_type, key_rest = json.loads(k) -# if key_type == PEER_TYPE: -# host, port = key_rest -# peer_c.execute("insert into approved_peers values (?, ?)", -# (host, port)) -# elif key_type == SETTING_TYPE: -# unql_db[key_rest] = v -# elif key_type == BLOB_INFO_TYPE: -# blob_hash = key_rest -# length, reference, peer_host, peer_port, peer_score = json.loads(v) -# info_c.execute("insert into valuable_blobs values (?, ?, ?, ?, ?, ?)", -# (blob_hash, length, reference, peer_host, peer_port, peer_score)) -# info_db.commit() -# peer_db.commit() -# info_db.close() -# peer_db.close() diff --git a/lbrynet/dht/node.py b/lbrynet/dht/node.py index ab46c8cd0..d51956077 100644 --- a/lbrynet/dht/node.py +++ b/lbrynet/dht/node.py @@ -85,9 +85,6 @@ class Node(object): self.next_refresh_call = None self.next_change_token_call = None # Create k-buckets (for storing contacts) - #self._buckets = [] - #for i in range(160): - # self._buckets.append(kbucket.KBucket()) if routingTableClass == None: self._routingTable = routingtable.OptimizedTreeRoutingTable(self.id) else: @@ -118,7 +115,6 @@ class Node(object): self.hash_watcher = HashWatcher() def __del__(self): - #self._persistState() if self._listeningPort is not None: self._listeningPort.stopListening() @@ -165,16 +161,6 @@ class Node(object): # Initiate the Kademlia joining sequence - perform a search for this node's own ID self._joinDeferred = self._iterativeFind(self.id, bootstrapContacts) # #TODO: Refresh all k-buckets further away than this node's closest neighbour -# def getBucketAfterNeighbour(*args): -# for i in range(160): -# if len(self._buckets[i]) > 0: -# return i+1 -# return 160 -# df.addCallback(getBucketAfterNeighbour) -# df.addCallback(self._refreshKBuckets) - #protocol.reactor.callLater(10, self.printContacts) - #self._joinDeferred.addCallback(self._persistState) - #self._joinDeferred.addCallback(self.printContacts) # Start refreshing k-buckets periodically, if necessary self.next_refresh_call = twisted.internet.reactor.callLater( constants.checkRefreshInterval, self._refreshNode) #IGNORE:E1101 @@ -187,7 +173,6 @@ class Node(object): for contact in self._routingTable._buckets[i]._contacts: print contact print '==================================' - #twisted.internet.reactor.callLater(10, self.printContacts) def getApproximateTotalDHTNodes(self): # get the deepest bucket and the number of contacts in that bucket and multiply it @@ -218,7 +203,6 @@ class Node(object): if type(result) == dict: if blob_hash in result: for peer in result[blob_hash]: - #print peer if self.lbryid != peer[6:]: host = ".".join([str(ord(d)) for d in peer[:4]]) if host == "127.0.0.1": @@ -230,8 +214,6 @@ class Node(object): return expanded_peers def find_failed(err): - #print "An exception occurred in the DHT" - #print err.getErrorMessage() return [] d = self.iterativeFindValue(blob_hash) @@ -268,16 +250,12 @@ class Node(object): result = responseMsg.response if 'token' in result: - #print "Printing result...", result value['token'] = result['token'] d = n.store(blob_hash, value, self.id, 0) d.addCallback(log_success) d.addErrback(log_error, n) else: d = defer.succeed(False) - #else: - # print "result:", result - # print "No token where it should be" return d def requestPeers(contacts): @@ -289,7 +267,6 @@ class Node(object): contacts.pop() self.store(blob_hash, value, self_store=True, originalPublisherID=self.id) elif self.externalIP is not None: - #print "attempting to self-store" self.store(blob_hash, value, self_store=True, originalPublisherID=self.id) ds = [] for contact in contacts: @@ -323,7 +300,6 @@ class Node(object): h = hashlib.new('sha384') h.update(self.old_token_secret + compact_ip) if not token == h.digest(): - #print 'invalid token found' return False return True @@ -368,24 +344,17 @@ class Node(object): def checkResult(result): if type(result) == dict: # We have found the value; now see who was the closest contact without it... -# if 'closestNodeNoValue' in result: # ...and store the key/value pair -# contact = result['closestNodeNoValue'] -# contact.store(key, result[key]) outerDf.callback(result) else: # The value wasn't found, but a list of contacts was returned # Now, see if we have the value (it might seem wasteful to search on the network # first, but it ensures that all values are properly propagated through the # network - #if key in self._dataStore: if self._dataStore.hasPeersForBlob(key): # Ok, we have the value locally, so use that peers = self._dataStore.getPeersForBlob(key) # Send this value to the closest node without it - #if len(result) > 0: - # contact = result[0] - # contact.store(key, value) outerDf.callback({key: peers, "from_peer": 'self'}) else: # Ok, value does not exist in DHT at all @@ -484,19 +453,13 @@ class Node(object): compact_ip = contact.compact_ip() elif '_rpcNodeContact' in kwargs: contact = kwargs['_rpcNodeContact'] - #print contact.address compact_ip = contact.compact_ip() - #print compact_ip else: return 'Not OK' #raise TypeError, 'No contact info available' if ((self_store is False) and (not 'token' in value or not self.verify_token(value['token'], compact_ip))): - #if not 'token' in value: - # print "Couldn't find token in value" - #elif not self.verify_token(value['token'], contact.compact_ip()): - # print "Token is invalid" raise ValueError('Invalid or missing token') if 'port' in value: @@ -518,11 +481,8 @@ class Node(object): now = int(time.time()) originallyPublished = now# - age - #print compact_address self._dataStore.addPeerToBlob( key, compact_address, now, originallyPublished, originalPublisherID) - #if self_store is True: - # print "looks like it was successful maybe" return 'OK' @rpcmethod @@ -717,7 +677,6 @@ class Node(object): # Force the iteration pendingIterationCalls[0].cancel() del pendingIterationCalls[0] - #print 'forcing iteration =================' searchIteration() def log_error(err): @@ -725,7 +684,6 @@ class Node(object): # Send parallel, asynchronous FIND_NODE RPCs to the shortlist of contacts def searchIteration(): - #print '==> searchiteration' slowNodeCount[0] = len(activeProbes) # TODO: move sort_key to be a method on the class def sort_key(firstContact, secondContact, targetKey=key): @@ -797,7 +755,6 @@ class Node(object): # Ensure that the closest contacts are taken from the updated shortList searchIteration() else: - #print '++++++++++++++ DONE (logically) +++++++++++++\n\n' # If no probes were sent, there will not be any improvement, so we're done outerDf.callback(activeContacts) @@ -809,9 +766,7 @@ class Node(object): def _refreshNode(self): """ Periodically called to perform k-bucket refreshes and data replication/republishing as necessary """ - #print 'refreshNode called' df = self._refreshRoutingTable() - #df.addCallback(self._republishData) df.addCallback(self._removeExpiredPeers) df.addCallback(self._scheduleNextNodeRefresh) @@ -830,13 +785,8 @@ class Node(object): searchForNextNodeID() return outerDf - #def _republishData(self, *args): - # #print '---republishData() called' - # df = twisted.internet.threads.deferToThread(self._threadedRepublishData) - # return df def _scheduleNextNodeRefresh(self, *args): - #print '==== sheduling next refresh' self.next_refresh_call = twisted.internet.reactor.callLater( constants.checkRefreshInterval, self._refreshNode) diff --git a/lbrynet/dht/protocol.py b/lbrynet/dht/protocol.py index 8d8e383b9..f99711f36 100644 --- a/lbrynet/dht/protocol.py +++ b/lbrynet/dht/protocol.py @@ -208,7 +208,6 @@ class KademliaProtocol(protocol.DatagramProtocol): seqNumber = 0 startPos = 0 while seqNumber < totalPackets: - #reactor.iterate() #IGNORE:E1101 packetData = data[startPos:startPos+self.msgSizeLimit] encSeqNumber = chr(seqNumber >> 8) + chr(seqNumber & 0xff) txData = '\x00%s%s%s\x00%s' % (encTotalPackets, encSeqNumber, rpcID, packetData) @@ -270,13 +269,8 @@ class KademliaProtocol(protocol.DatagramProtocol): if callable(func) and hasattr(func, 'rpcmethod'): # Call the exposed Node method and return the result to the deferred callback chain try: - ##try: - ## # Try to pass the sender's node id to the function... kwargs = {'_rpcNodeID': senderContact.id, '_rpcNodeContact': senderContact} result = func(*args, **kwargs) - ##except TypeError: - ## # ...or simply call it if that fails - ## result = func(*args) except Exception, e: df.errback(failure.Failure(e)) else: diff --git a/lbrynet/dht/routingtable.py b/lbrynet/dht/routingtable.py index c228c3372..7d7334e7c 100644 --- a/lbrynet/dht/routingtable.py +++ b/lbrynet/dht/routingtable.py @@ -208,9 +208,6 @@ class TreeRoutingTable(RoutingTable): node is returning all of the contacts that it knows of. @rtype: list """ - #if key == self.id: - # bucketIndex = 0 #TODO: maybe not allow this to continue? - #else: bucketIndex = self._kbucketIndex(key) closestNodes = self._buckets[bucketIndex].getContacts(constants.k, _rpcNodeID) # This method must return k contacts (even if we have the node @@ -290,7 +287,6 @@ class TreeRoutingTable(RoutingTable): try: self._buckets[bucketIndex].removeContact(contactID) except ValueError: - #print 'removeContact(): Contact not in routing table' return def touchKBucket(self, key): @@ -427,7 +423,6 @@ class OptimizedTreeRoutingTable(TreeRoutingTable): try: contact = self._buckets[bucketIndex].getContact(contactID) except ValueError: - #print 'removeContact(): Contact not in routing table' return contact.failedRPCs += 1 if contact.failedRPCs >= 5: diff --git a/lbrynet/dhttest.py b/lbrynet/dhttest.py index 6d5aac7ff..c31773381 100644 --- a/lbrynet/dhttest.py +++ b/lbrynet/dhttest.py @@ -25,7 +25,6 @@ import sys, hashlib, random import twisted.internet.reactor from lbrynet.dht.node import Node -#from entangled.kademlia.datastore import SQLiteDataStore # The Entangled DHT node; instantiated in the main() method node = None @@ -77,7 +76,6 @@ def getValue(): binascii.unhexlify("f7d9dc4de674eaa2c5a022eb95bc0d33ec2e75c6")) deferredResult = node.iterativeFindValue( binascii.unhexlify("f7d9dc4de674eaa2c5a022eb95bc0d33ec2e75c6")) - #deferredResult = node.iterativeFindValue(KEY) # Add a callback to this result; this will be called as soon as the operation has completed deferredResult.addCallback(getValueCallback) # As before, add the generic error callback @@ -91,19 +89,8 @@ def getValueCallback(result): # contacts would be returned instead") print "Got the value" print result - #if type(result) == dict: - # for v in result[binascii.unhexlify("5292fa9c426621f02419f5050900392bdff5036c")]: - # print "v:", v - # print "v[6:", v[6:] - # print "lbryid:",lbryid - # print "lbryid == v[6:]:", lbryid == v[6:] - # print 'Value successfully retrieved: %s' % result[KEY] - #else: - # print 'Value not found' # Either way, schedule a "delete" operation for the key - #print 'Scheduling removal in 2.5 seconds...' - #twisted.internet.reactor.callLater(2.5, deleteValue) print 'Scheduling shutdown in 2.5 seconds...' twisted.internet.reactor.callLater(2.5, stop) @@ -151,9 +138,6 @@ if __name__ == '__main__': print 'Run this script without any arguments for info.\n' # Set up SQLite-based data store (you could use an in-memory store instead, for example) - #if os.path.isfile('/tmp/dbFile%s.db' % sys.argv[1]): - # os.remove('/tmp/dbFile%s.db' % sys.argv[1]) - #dataStore = SQLiteDataStore(dbFile = '/tmp/dbFile%s.db' % sys.argv[1]) # # Create the Entangled node. It extends the functionality of a # basic Kademlia node (but is fully backwards-compatible with a @@ -162,14 +146,12 @@ if __name__ == '__main__': # If you wish to have a pure Kademlia network, use the # entangled.kademlia.node.Node class instead print 'Creating Node...' - #node = EntangledNode( udpPort=int(sys.argv[1]), dataStore=dataStore ) node = Node(udpPort=int(sys.argv[1]), lbryid=lbryid) # Schedule the node to join the Kademlia/Entangled DHT node.joinNetwork(knownNodes) # Schedule the "storeValue() call to be invoked after 2.5 seconds, #using KEY and VALUE as arguments - #twisted.internet.reactor.callLater(2.5, storeValue, KEY, VALUE) twisted.internet.reactor.callLater(2.5, getValue) # Start the Twisted reactor - this fires up all networking, and # allows the scheduled join operation to take place diff --git a/lbrynet/lbrylive/LiveStreamCreator.py b/lbrynet/lbrylive/LiveStreamCreator.py index 169f4668d..1ee6826ed 100644 --- a/lbrynet/lbrylive/LiveStreamCreator.py +++ b/lbrynet/lbrylive/LiveStreamCreator.py @@ -152,7 +152,6 @@ class StdinStreamProducer(object): self.finished_deferred = defer.Deferred() self.consumer.registerProducer(self, True) - #self.reader = process.ProcessReader(reactor, self, 'read', 0) self.resumeProducing() return self.finished_deferred diff --git a/lbrynet/lbrylive/client/LiveStreamDownloader.py b/lbrynet/lbrylive/client/LiveStreamDownloader.py index 3371e31a9..3766ec579 100644 --- a/lbrynet/lbrylive/client/LiveStreamDownloader.py +++ b/lbrynet/lbrylive/client/LiveStreamDownloader.py @@ -46,7 +46,6 @@ class LiveStreamDownloader(_LiveStreamDownloader): _LiveStreamDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager, payment_rate_manager, wallet, upload_allowed) - #self.writer = process.ProcessWriter(reactor, self, 'write', 1) def _get_metadata_handler(self, download_manager): return LiveStreamMetadataHandler(self.stream_hash, self.stream_info_manager, @@ -61,7 +60,6 @@ class LiveStreamDownloader(_LiveStreamDownloader): def _get_write_func(self): def write_func(data): if self.stopped is False: - #self.writer.write(data) pass return write_func diff --git a/lbrynet/lbrynet_daemon/DaemonRequest.py b/lbrynet/lbrynet_daemon/DaemonRequest.py index 9eb080876..6f46e6bed 100644 --- a/lbrynet/lbrynet_daemon/DaemonRequest.py +++ b/lbrynet/lbrynet_daemon/DaemonRequest.py @@ -76,7 +76,6 @@ class DaemonRequest(server.Request): try: self.content.seek(0, 0) args.update(self.parse_multipart(self.content, pdict)) - #args.update(cgi.parse_multipart(self.content, pdict)) except KeyError as e: if e.args[0] == b'content-disposition': diff --git a/lbrynet/lbrynet_daemon/auth/server.py b/lbrynet/lbrynet_daemon/auth/server.py index e90b7f642..1376c024d 100644 --- a/lbrynet/lbrynet_daemon/auth/server.py +++ b/lbrynet/lbrynet_daemon/auth/server.py @@ -285,7 +285,6 @@ class AuthJSONRPCServer(AuthorizedBase): assert api_key.compare_hmac(to_auth, token), InvalidAuthenticationToken def _update_session_secret(self, session_id): - # log.info("Generating new token for next request") self.sessions.update({session_id: APIKey.new(name=session_id)}) def _get_jsonrpc_version(self, version=None, id=None): diff --git a/lbrynet/reflector/server/server.py b/lbrynet/reflector/server/server.py index 7cb4a7171..03f278867 100644 --- a/lbrynet/reflector/server/server.py +++ b/lbrynet/reflector/server/server.py @@ -30,7 +30,6 @@ class ReflectorServer(Protocol): def dataReceived(self, data): if self.receiving_blob: - # log.debug('Writing data to blob') self.blob_write(data) else: log.debug('Not yet recieving blob, data needs further processing') diff --git a/packaging/windows/lbry-win32-app/LBRYWin32App.py b/packaging/windows/lbry-win32-app/LBRYWin32App.py index 927007bd2..2a9bf9af9 100644 --- a/packaging/windows/lbry-win32-app/LBRYWin32App.py +++ b/packaging/windows/lbry-win32-app/LBRYWin32App.py @@ -165,7 +165,6 @@ class SysTrayIcon(object): def show_menu(self): menu = win32gui.CreatePopupMenu() self.create_menu(menu, self.menu_options) - # win32gui.SetMenuDefaultItem(menu, 1000, 0) pos = win32gui.GetCursorPos() # See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp diff --git a/setup.py b/setup.py index fc5c0b0a5..77e8f5542 100644 --- a/setup.py +++ b/setup.py @@ -51,14 +51,6 @@ requires = [ ] console_scripts = [ - # 'lbrynet-stdin-uploader = lbrynet.lbrynet_console.LBRYStdinUploader:launch_stdin_uploader', - # 'lbrynet-stdout-downloader = lbrynet.lbrynet_console.LBRYStdoutDownloader:launch_stdout_downloader', - # 'lbrynet-create-network = lbrynet.create_network:main', - # 'lbrynet-launch-node = lbrynet.dht.node:main', - # 'lbrynet-launch-rpc-node = lbrynet.rpc_node:main', - # 'lbrynet-rpc-node-cli = lbrynet.node_rpc_cli:main', - # 'lbrynet-lookup-hosts-for-hash = lbrynet.dht_scripts:get_hosts_for_hash_in_dht', - # 'lbrynet-announce_hash_to_dht = lbrynet.dht_scripts:announce_hash_to_dht', 'lbrynet-daemon = lbrynet.lbrynet_daemon.DaemonControl:start', 'stop-lbrynet-daemon = lbrynet.lbrynet_daemon.DaemonControl:stop', 'lbrynet-cli = lbrynet.lbrynet_daemon.DaemonCLI:main' diff --git a/tests/dht/testNode.py b/tests/dht/testNode.py index f4839da10..a4e751d51 100644 --- a/tests/dht/testNode.py +++ b/tests/dht/testNode.py @@ -54,12 +54,13 @@ class NodeDataTest(unittest.TestCase): h.update(str(i)) self.cases.append((h.digest(), 5000+2*i)) self.cases.append((h.digest(), 5001+2*i)) +<<<<<<< Updated upstream #(('a', 'hello there\nthis is a test'), - # ('b', unicode('jasdklfjklsdj;f2352352ljklzsdlkjkasf\ndsjklafsd')), - # ('e', 123), - # ('f', [('this', 'is', 1), {'complex': 'data entry'}]), # ('aMuchLongerKeyThanAnyOfThePreviousOnes', 'some data')) +======= + +>>>>>>> Stashed changes def testStore(self): def check_val_in_result(r, peer_info): @@ -105,31 +106,17 @@ class NodeContactTest(unittest.TestCase): self.failIf(contact in closestNodes, 'Node added itself as a contact') -#class NodeLookupTest(unittest.TestCase): +<<<<<<< Updated upstream # """ Test case for the Node class's iterative node lookup algorithm """ -# def setUp(self): -# import entangled.kademlia.contact -# self.node = entangled.kademlia.node.Node() -# self.remoteNodes = [] -# for i in range(10): -# remoteNode = entangled.kademlia.node.Node() -# remoteContact = entangled.kademlia.contact.Contact(remoteNode.id, '127.0.0.1', 91827+i, self.node._protocol) -# self.remoteNodes.append(remoteNode) -# self.node.addContact(remoteContact) -# def testIterativeFindNode(self): # """ Ugly brute-force test to see if the iterative node lookup algorithm runs without failing """ -# import entangled.kademlia.protocol -# entangled.kademlia.protocol.reactor.listenUDP(91826, self.node._protocol) -# for i in range(10): -# entangled.kademlia.protocol.reactor.listenUDP(91827+i, self.remoteNodes[i]._protocol) -# df = self.node.iterativeFindNode(self.node.id) -# df.addBoth(lambda _: entangled.kademlia.protocol.reactor.stop()) -# entangled.kademlia.protocol.reactor.run() +======= +>>>>>>> Stashed changes -""" Some scaffolding for the NodeLookupTest class. Allows isolated node testing by simulating remote node responses""" +"""Some scaffolding for the NodeLookupTest class. Allows isolated +node testing by simulating remote node responses""" from twisted.internet import protocol, defer, selectreactor from lbrynet.dht.msgtypes import ResponseMessage @@ -149,22 +136,17 @@ class FakeRPCProtocol(protocol.DatagramProtocol): """ Fake RPC protocol; allows entangled.kademlia.contact.Contact objects to "send" RPCs """ def sendRPC(self, contact, method, args, rawResponse=False): - #print method + " " + str(args) if method == "findNode": # get the specific contacts closest contacts closestContacts = [] - #print "contact" + contact.id for contactTuple in self.network: - #print contactTuple[0].id if contact == contactTuple[0]: # get the list of closest contacts for this contact closestContactsList = contactTuple[1] - #print "contact" + contact.id # Pack the closest contacts into a ResponseMessage for closeContact in closestContactsList: - #print closeContact.id closestContacts.append((closeContact.id, closeContact.address, closeContact.port)) message = ResponseMessage("rpcId", contact.id, closestContacts) @@ -221,9 +203,11 @@ class NodeLookupTest(unittest.TestCase): self.updPort = 81173 +<<<<<<< Updated upstream # create a dummy reactor - #self._protocol.reactor.listenUDP(self.updPort, self._protocol) +======= +>>>>>>> Stashed changes self.contactsAmount = 80 # set the node ID manually for testing self.node.id = '12345678901234567800' @@ -233,7 +217,6 @@ class NodeLookupTest(unittest.TestCase): # create 160 bit node ID's for test purposes self.testNodeIDs = [] - #idNum = long(self.node.id.encode('hex'), 16) idNum = int(self.node.id) for i in range(self.contactsAmount): # create the testNodeIDs in ascending order, away from the actual node ID, with regards to the distance metric @@ -284,7 +267,6 @@ class NodeLookupTest(unittest.TestCase): for item in self.contacts[0:6]: expectedResult.append(item.id) - #print item.id # Get the result from the deferred activeContacts = df.result @@ -298,151 +280,7 @@ class NodeLookupTest(unittest.TestCase): # Check that the received active contacts are the same as the input contacts self.failUnlessEqual(activeContacts, expectedResult, \ "Active should only contain the closest possible contacts which were used as input for the boostrap") - -# def testFindingCloserNodes(self): -# """ Test discovery of closer contacts""" -# -# # Use input contacts that have knowledge of closer contacts, -# df = self.node._iterativeFind(self.node.id, self.contacts[50:53]) -# #set the expected result -# expectedResult = [] -# #print "############ Expected Active contacts #################" -# for item in self.contacts[0:9]: -# expectedResult.append(item.id) -# #print item.id -# #print "#######################################################" -# -# # Get the result from the deferred -# activeContacts = df.result -# -# #print "!!!!!!!!!!! Receieved Active contacts !!!!!!!!!!!!!!!" -# #for item in activeContacts: -# # print item.id -# #print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" -# -# # Check the length of the active contacts -# self.failUnlessEqual(activeContacts.__len__(), expectedResult.__len__(), \ -# "Length of received active contacts not as expected, should be %d" %expectedResult.__len__()) -# -# -# # Check that the received active contacts are now closer to this node -# self.failUnlessEqual(activeContacts, expectedResult, \ -# "Active contacts should now only contain the closest possible contacts") - - - -# def testIterativeStore(self): -# """ test storing values """ -# -# # create the network of contacts in format: (contact, closest contacts) -# contactNetwork = ((self.contacts[0], self.contacts[0:8]), -# (self.contacts[1], self.contacts[0:8]), -# (self.contacts[2], self.contacts[0:8]), -# (self.contacts[3], self.contacts[0:8]), -# (self.contacts[4], self.contacts[0:8]), -# (self.contacts[5], self.contacts[0:8]), -# (self.contacts[6], self.contacts[0:8]), -# (self.contacts[7], self.contacts[0:8]), -# (self.contacts[8], self.contacts[0:8]), -# (self.contacts[40], self.contacts[41:48]), -# (self.contacts[41], self.contacts[41:48]), -# (self.contacts[42], self.contacts[41:48]), -# (self.contacts[43], self.contacts[41:48]), -# (self.contacts[44], self.contacts[41:48]), -# (self.contacts[45], self.contacts[41:48]), -# (self.contacts[46], self.contacts[41:48]), -# (self.contacts[47], self.contacts[41:48]), -# (self.contacts[48], self.contacts[41:48])) -# contacts_with_datastores = [] -# -# for contact_tuple in contactNetwork: -# contacts_with_datastores.append((contact_tuple[0], contact_tuple[1], lbrynet.dht.datastore.DictDataStore())) -# -# self._protocol.createNetwork(contacts_with_datastores) -# -# -# #self._protocol.createNetwork(contactNetwork) -# -# -# # Test storing a value that has an hash id close to the known contacts -# # The value should only be stored at those nodes -# value = 'value' -# valueID = self.contacts[40].id -# -# # Manually populate the routing table with contacts that have ID's close to the valueID -# for contact in self.contacts[40:48]: -# self.node.addContact(contact) -# -# # Manually populate the routing table with contacts that have ID's far away from the valueID -# for contact in self.contacts[0:8]: -# self.node.addContact(contact) -# -# # Store the value -# df = self.node.announceHaveBlob(valueID, value) -# -# storageNodes = df.result -# -# storageNodeIDs = [] -# for item in storageNodes: -# storageNodeIDs.append(item.id) -# storageNodeIDs.sort() -# #print storageNodeIDs -# -# expectedIDs = [] -# for item in self.contacts[40:43]: -# expectedIDs.append(item.id) -# #print expectedIDs -# -# #print '#### storage nodes ####' -# #for node in storageNodes: -# # print node.id -# -# -# # check that the value has been stored at nodes with ID's close to the valueID -# self.failUnlessEqual(storageNodeIDs, expectedIDs, \ -# "Value not stored at nodes with ID's close to the valueID") -# -# def testFindValue(self): -# # create test values using the contact ID as the key -# testValues = ({self.contacts[0].id: "some test data"}, -# {self.contacts[1].id: "some more test data"}, -# {self.contacts[8].id: "and more data"} -# ) -# -# -# # create the network of contacts in format: (contact, closest contacts, values) -# contactNetwork = ((self.contacts[0], self.contacts[0:6], testValues[0]), -# (self.contacts[1], self.contacts[0:6], testValues[1]), -# (self.contacts[2], self.contacts[0:6], {'2':'2'}), -# (self.contacts[3], self.contacts[0:6], {'4':'5'}), -# (self.contacts[4], self.contacts[0:6], testValues[2]), -# (self.contacts[5], self.contacts[0:6], {'2':'2'}), -# (self.contacts[6], self.contacts[0:6], {'2':'2'})) -# -# self._protocol.createNetwork(contactNetwork) -# -# # Initialise the routing table with some contacts -# for contact in self.contacts[0:4]: -# self.node.addContact(contact) -# -# # Initialise the node with some known contacts -# #self.node._iterativeFind(self.node.id, self.contacts[0:3]) -# -# df = self.node.iterativeFindValue(testValues[1].keys()[0]) -# -# resultDict = df.result -# keys = resultDict.keys() -# -# for key in keys: -# if key == 'closestNodeNoValue': -# print "closest contact without data " + " " + resultDict.get(key).id -# else: -# print "data key :" + key + "; " + "data: " + resultDict.get(key) - - - - - + def suite(): suite = unittest.TestSuite() @@ -452,6 +290,7 @@ def suite(): suite.addTest(unittest.makeSuite(NodeLookupTest)) return suite + if __name__ == '__main__': # If this module is executed from the commandline, run all its tests unittest.TextTestRunner().run(suite()) diff --git a/tests/dht/testProtocol.py b/tests/dht/testProtocol.py index b6329f639..7215eaa27 100644 --- a/tests/dht/testProtocol.py +++ b/tests/dht/testProtocol.py @@ -68,16 +68,12 @@ class ClientDatagramProtocol(lbrynet.dht.protocol.KademliaProtocol): lbrynet.dht.protocol.KademliaProtocol.__init__(self, None) def startProtocol(self): - #self.transport.connect(self.destination[0], self.destination[1]) self.sendDatagram() def sendDatagram(self): if len(self.data): self._send(self.data, self.msgID, self.destination) -# def datagramReceived(self, datagram, host): -# print 'Datagram received: ', repr(datagram) -# self.sendDatagram() @@ -193,44 +189,6 @@ class KademliaProtocolTest(unittest.TestCase): # The list of sent RPC messages should be empty at this stage self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!') -# def testDatagramLargeMessageReconstruction(self): -# """ Tests if a large amount of data can be successfully re-constructed from multiple UDP datagrams """ -# remoteContact = lbrynet.dht.contact.Contact('node2', '127.0.0.1', 9182, self.protocol) -# self.node.addContact(remoteContact) -# self.error = None -# #responseData = 8143 * '0' # Threshold for a single packet transmission -# responseData = 300000 * '0' -# def handleError(f): -# if f.check((lbrynet.dht.protocol.TimeoutError)): -# self.error = 'RPC from the following contact timed out: %s' % f.getErrorMessage() -# else: -# self.error = 'An RPC error occurred: %s' % f.getErrorMessage() -# def handleResult(result): -# if result != responseData: -# self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' % (responseData, result) -# # Publish the "local" node on the network -# lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol) -# # ...and make it think it is waiting for a result from an RPC -# msgID = 'abcdefghij1234567890' -# df = defer.Deferred() -# timeoutCall = lbrynet.dht.protocol.reactor.callLater(lbrynet.dht.constants.rpcTimeout, self.protocol._msgTimeout, msgID) -# self.protocol._sentMessages[msgID] = (remoteContact.id, df, timeoutCall) -# # Simulate the "reply" transmission -# msg = lbrynet.dht.msgtypes.ResponseMessage(msgID, 'node2', responseData) -# msgPrimitive = self.protocol._translator.toPrimitive(msg) -# encodedMsg = self.protocol._encoder.encode(msgPrimitive) -# udpClient = ClientDatagramProtocol() -# udpClient.data = encodedMsg -# udpClient.msgID = msgID -# lbrynet.dht.protocol.reactor.listenUDP(0, udpClient) -# df.addCallback(handleResult) -# df.addErrback(handleError) -# df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop()) -# lbrynet.dht.protocol.reactor.run() -# self.failIf(self.error, self.error) -# # The list of sent RPC messages should be empty at this stage -# #self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!') - def suite(): suite = unittest.TestSuite() diff --git a/tests/unit/dht/test_datastore.py b/tests/unit/dht/test_datastore.py index 6c3496871..d79eb63f6 100644 --- a/tests/unit/dht/test_datastore.py +++ b/tests/unit/dht/test_datastore.py @@ -17,7 +17,6 @@ import hashlib class DictDataStoreTest(unittest.TestCase): """ Basic tests case for the reference DataStore API and implementation """ def setUp(self): - #if not hasattr(self, 'ds'): self.ds = lbrynet.dht.datastore.DictDataStore() h = hashlib.sha1() h.update('g') @@ -29,12 +28,6 @@ class DictDataStoreTest(unittest.TestCase): h3.update('Boozoo Bajou - 09 - S.I.P.mp3') hashKey3 = h3.digest() #self.cases = (('a', 'hello there\nthis is a test'), - # ('b', unicode('jasdklfjklsdj;f2352352ljklzsdlkjkasf\ndsjklafsd')), - # ('e', 123), - # ('f', [('this', 'is', 1), {'complex': 'data entry'}]), - # ('aMuchLongerKeyThanAnyOfThePreviousOnes', 'some data'), - # (hashKey, 'some data'), - # (hashKey2, 'abcdefghijklmnopqrstuvwxz'), # (hashKey3, '1 2 3 4 5 6 7 8 9 0')) self.cases = ((hashKey, 'test1test1test1test1test1t'), (hashKey, 'test2'), @@ -90,88 +83,37 @@ class DictDataStoreTest(unittest.TestCase): self.failIf('val3' in self.ds.getPeersForBlob(h2), 'DataStore failed to delete an expired value! Value %s, publish time %s, current time %s' % ('val3', str(now - td2), str(now))) self.failUnless('val4' in self.ds.getPeersForBlob(h2), 'DataStore deleted an unexpired value! Value %s, publish time %s, current time %s' % ('val4', str(now), str(now))) -# def testReplace(self): # # First write with fake values -# now = int(time.time()) # for key, value in self.cases: -# try: -# self.ds.setItem(key, 'abc', now, now, 'node1') # except Exception: -# import traceback -# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5))) # # # write this stuff a second time, with the real values # for key, value in self.cases: -# try: -# self.ds.setItem(key, value, now, now, 'node1') # except Exception: -# import traceback -# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5))) # -# self.failUnlessEqual(len(self.ds.keys()), len(self.cases), 'Values did not get overwritten properly; expected %d keys, got %d' % (len(self.cases), len(self.ds.keys()))) # # Read back the data # for key, value in self.cases: -# self.failUnlessEqual(self.ds[key], value, 'DataStore returned invalid data! Expected "%s", got "%s"' % (value, self.ds[key])) -# def testDelete(self): # # First some values -# now = int(time.time()) # for key, value in self.cases: -# try: -# self.ds.setItem(key, 'abc', now, now, 'node1') # except Exception: -# import traceback -# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5))) # -# self.failUnlessEqual(len(self.ds.keys()), len(self.cases), 'Values did not get stored properly; expected %d keys, got %d' % (len(self.cases), len(self.ds.keys()))) # # # Delete an item from the data -# key, value == self.cases[0] -# del self.ds[key] -# self.failUnlessEqual(len(self.ds.keys()), len(self.cases)-1, 'Value was not deleted; expected %d keys, got %d' % (len(self.cases)-1, len(self.ds.keys()))) -# self.failIf(key in self.ds.keys(), 'Key was not deleted: %s' % key) -# def testMetaData(self): -# now = int(time.time()) -# age = random.randint(10,3600) -# originallyPublished = [] -# for i in range(len(self.cases)): -# originallyPublished.append(now - age) # # First some values with metadata -# i = 0 # for key, value in self.cases: -# try: -# self.ds.setItem(key, 'abc', now, originallyPublished[i], 'node%d' % i) -# i += 1 # except Exception: -# import traceback -# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5))) # # # Read back the meta-data -# i = 0 # for key, value in self.cases: -# dsLastPublished = self.ds.lastPublished(key) -# dsOriginallyPublished = self.ds.originalPublishTime(key) -# dsOriginalPublisherID = self.ds.originalPublisherID(key) -# self.failUnless(type(dsLastPublished) == int, 'DataStore returned invalid type for "last published" time! Expected "int", got %s' % type(dsLastPublished)) -# self.failUnless(type(dsOriginallyPublished) == int, 'DataStore returned invalid type for "originally published" time! Expected "int", got %s' % type(dsOriginallyPublished)) -# self.failUnless(type(dsOriginalPublisherID) == str, 'DataStore returned invalid type for "original publisher ID"; Expected "str", got %s' % type(dsOriginalPublisherID)) -# self.failUnlessEqual(dsLastPublished, now, 'DataStore returned invalid "last published" time! Expected "%d", got "%d"' % (now, dsLastPublished)) -# self.failUnlessEqual(dsOriginallyPublished, originallyPublished[i], 'DataStore returned invalid "originally published" time! Expected "%d", got "%d"' % (originallyPublished[i], dsOriginallyPublished)) -# self.failUnlessEqual(dsOriginalPublisherID, 'node%d' % i, 'DataStore returned invalid "original publisher ID"; Expected "%s", got "%s"' % ('node%d' % i, dsOriginalPublisherID)) -# i += 1 -#class SQLiteDataStoreTest(DictDataStoreTest): -# def setUp(self): -# self.ds = entangled.kademlia.datastore.SQLiteDataStore() -# DictDataStoreTest.setUp(self) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DictDataStoreTest)) - #suite.addTest(unittest.makeSuite(SQLiteDataStoreTest)) return suite From b8c91c61bb87e1c1e3f15eca5920b4af2f02c374 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Sat, 10 Dec 2016 10:33:04 -0800 Subject: [PATCH 10/26] Make a new logger with a fail method that is useful for errbacks. Extracting useful tracebacks and line numbers from failures withing twisted's deferred can be a pain. Hopefully this is a step in the right direction. --- lbrynet/core/log_support.py | 80 +++++++++++++++++++++++++++++ tests/unit/core/test_log_support.py | 40 +++++++++++++++ 2 files changed, 120 insertions(+) create mode 100644 tests/unit/core/test_log_support.py diff --git a/lbrynet/core/log_support.py b/lbrynet/core/log_support.py index 39ffe867e..9f6ad2222 100644 --- a/lbrynet/core/log_support.py +++ b/lbrynet/core/log_support.py @@ -1,4 +1,5 @@ import datetime +import inspect import json import logging import logging.handlers @@ -14,6 +15,22 @@ import lbrynet from lbrynet.conf import settings from lbrynet.core import utils +#### +# This code is copied from logging/__init__.py in the python source code +#### +# +# _srcfile is used when walking the stack to check when we've got the first +# caller stack frame. +# +if hasattr(sys, 'frozen'): #support for py2exe + _srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:]) +elif __file__[-4:].lower() in ['.pyc', '.pyo']: + _srcfile = __file__[:-4] + '.py' +else: + _srcfile = __file__ +_srcfile = os.path.normcase(_srcfile) +##### + session = FuturesSession() @@ -148,6 +165,30 @@ class JsonFormatter(logging.Formatter): data['exc_info'] = self.formatException(record.exc_info) return json.dumps(data) +#### +# This code is copied from logging/__init__.py in the python source code +#### +def findCaller(srcfile=None): + """Returns the filename, line number and function name of the caller""" + srcfile = srcfile or _srcfile + f = inspect.currentframe() + #On some versions of IronPython, currentframe() returns None if + #IronPython isn't run with -X:Frames. + if f is not None: + f = f.f_back + rv = "(unknown file)", 0, "(unknown function)" + while hasattr(f, "f_code"): + co = f.f_code + filename = os.path.normcase(co.co_filename) + # ignore any function calls that are in this file + if filename == srcfile: + f = f.f_back + continue + rv = (filename, f.f_lineno, co.co_name) + break + return rv +### + def failure(failure, log, msg, *args): """Log a failure message from a deferred. @@ -287,3 +328,42 @@ class LogUploader(object): else: log_size = 0 return cls(log_name, log_file, log_size) + + +class Logger(logging.Logger): + """A logger that has an extra `fail` method useful for handling twisted failures.""" + def fail(self): + """Returns a function to log a failure from an errback. + + The returned function appends the error message and extracts + the traceback from `err`. + + Example usage: + d.addErrback(log.fail(), 'This is an error message') + + Although odd, making the method call is necessary to extract + out useful filename and line number information; otherwise the + reported values are from inside twisted's deferred handling + code. + + Args (for the returned function): + err: twisted.python.failure.Failure + msg: the message to log, using normal logging string iterpolation. + args: the values to subtitute into `msg` + kwargs: set `level` to change from the default ERROR severity. Other + keywoards are treated as normal log kwargs. + + """ + fn, lno, func = findCaller() + def _fail(err, msg, *args, **kwargs): + level = kwargs.pop('level', logging.ERROR) + msg += ": %s" + args += (err.getErrorMessage(),) + exc_info = (err.type, err.value, err.getTracebackObject()) + record = self.makeRecord( + self.name, level, fn, lno, msg, args, exc_info, func, kwargs) + self.handle(record) + return _fail + + +logging.setLoggerClass(Logger) diff --git a/tests/unit/core/test_log_support.py b/tests/unit/core/test_log_support.py new file mode 100644 index 000000000..07ea5946d --- /dev/null +++ b/tests/unit/core/test_log_support.py @@ -0,0 +1,40 @@ +import StringIO +import logging + +from twisted.internet import defer +from twisted.trial import unittest + +from lbrynet.core import log_support + + +class TestLogger(unittest.TestCase): + def raiseError(self): + raise Exception('terrible things happened') + + def triggerErrback(self, log): + d = defer.Deferred() + d.addCallback(lambda _: self.raiseError()) + d.addErrback(log.fail(), 'My message') + d.callback(None) + return d + + def test_can_log_failure(self): + def output_lines(): + return stream.getvalue().split('\n') + + log = log_support.Logger('test') + stream = StringIO.StringIO() + handler = logging.StreamHandler(stream) + handler.setFormatter(logging.Formatter("%(filename)s:%(lineno)d - %(message)s")) + log.addHandler(handler) + + # the line number could change if this file gets refactored + expected_first_line = 'test_log_support.py:17 - My message: terrible things happened' + + # testing the entirety of the message is futile as the + # traceback will depend on the system the test is being run on + # but hopefully these two tests are good enough + d = self.triggerErrback(log) + d.addCallback(lambda _: self.assertEquals(expected_first_line, output_lines()[0])) + d.addCallback(lambda _: self.assertEqual(10, len(output_lines()))) + return d From 20b6b22334d16da5706511ea1c6ba506846a708c Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Sat, 10 Dec 2016 11:28:32 -0800 Subject: [PATCH 11/26] Add callback functionality to log.fail This enables the common pattern of being able to log a failure and then do something. --- lbrynet/core/log_support.py | 20 ++++++++++++------- tests/unit/core/test_log_support.py | 30 ++++++++++++++++++----------- 2 files changed, 32 insertions(+), 18 deletions(-) diff --git a/lbrynet/core/log_support.py b/lbrynet/core/log_support.py index 9f6ad2222..c57c063d6 100644 --- a/lbrynet/core/log_support.py +++ b/lbrynet/core/log_support.py @@ -332,7 +332,7 @@ class LogUploader(object): class Logger(logging.Logger): """A logger that has an extra `fail` method useful for handling twisted failures.""" - def fail(self): + def fail(self, callback=None, *args): """Returns a function to log a failure from an errback. The returned function appends the error message and extracts @@ -346,23 +346,29 @@ class Logger(logging.Logger): reported values are from inside twisted's deferred handling code. - Args (for the returned function): + Args: + callback: callable to call after making the log. The first argument + will be the `err` from the deferred + args: extra arguments to pass into `callback` + + Returns: a function that takes the following arguments: err: twisted.python.failure.Failure msg: the message to log, using normal logging string iterpolation. - args: the values to subtitute into `msg` + msg_args: the values to subtitute into `msg` kwargs: set `level` to change from the default ERROR severity. Other keywoards are treated as normal log kwargs. - """ fn, lno, func = findCaller() - def _fail(err, msg, *args, **kwargs): + def _fail(err, msg, *msg_args, **kwargs): level = kwargs.pop('level', logging.ERROR) msg += ": %s" - args += (err.getErrorMessage(),) + msg_args += (err.getErrorMessage(),) exc_info = (err.type, err.value, err.getTracebackObject()) record = self.makeRecord( - self.name, level, fn, lno, msg, args, exc_info, func, kwargs) + self.name, level, fn, lno, msg, msg_args, exc_info, func, kwargs) self.handle(record) + if callback: + callback(err, *args) return _fail diff --git a/tests/unit/core/test_log_support.py b/tests/unit/core/test_log_support.py index 07ea5946d..cf7bdfc27 100644 --- a/tests/unit/core/test_log_support.py +++ b/tests/unit/core/test_log_support.py @@ -1,6 +1,7 @@ import StringIO import logging +import mock from twisted.internet import defer from twisted.trial import unittest @@ -11,30 +12,37 @@ class TestLogger(unittest.TestCase): def raiseError(self): raise Exception('terrible things happened') - def triggerErrback(self, log): + def triggerErrback(self, callback=None): d = defer.Deferred() d.addCallback(lambda _: self.raiseError()) - d.addErrback(log.fail(), 'My message') + d.addErrback(self.log.fail(callback), 'My message') d.callback(None) return d + def setUp(self): + self.log = log_support.Logger('test') + self.stream = StringIO.StringIO() + handler = logging.StreamHandler(self.stream) + handler.setFormatter(logging.Formatter("%(filename)s:%(lineno)d - %(message)s")) + self.log.addHandler(handler) + def test_can_log_failure(self): def output_lines(): - return stream.getvalue().split('\n') - - log = log_support.Logger('test') - stream = StringIO.StringIO() - handler = logging.StreamHandler(stream) - handler.setFormatter(logging.Formatter("%(filename)s:%(lineno)d - %(message)s")) - log.addHandler(handler) + return self.stream.getvalue().split('\n') # the line number could change if this file gets refactored - expected_first_line = 'test_log_support.py:17 - My message: terrible things happened' + expected_first_line = 'test_log_support.py:18 - My message: terrible things happened' # testing the entirety of the message is futile as the # traceback will depend on the system the test is being run on # but hopefully these two tests are good enough - d = self.triggerErrback(log) + d = self.triggerErrback() d.addCallback(lambda _: self.assertEquals(expected_first_line, output_lines()[0])) d.addCallback(lambda _: self.assertEqual(10, len(output_lines()))) return d + + def test_can_log_failure_with_callback(self): + callback = mock.Mock() + d = self.triggerErrback(callback) + d.addCallback(lambda _: callback.assert_called_once_with(mock.ANY)) + return d From 4ca33505b56fe2ea0e49ddbfa1bcf6cd83899ac4 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Tue, 13 Dec 2016 19:58:48 -0600 Subject: [PATCH 12/26] logging: add kwargs to fail() call --- lbrynet/core/log_support.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lbrynet/core/log_support.py b/lbrynet/core/log_support.py index c57c063d6..451a57a13 100644 --- a/lbrynet/core/log_support.py +++ b/lbrynet/core/log_support.py @@ -332,7 +332,7 @@ class LogUploader(object): class Logger(logging.Logger): """A logger that has an extra `fail` method useful for handling twisted failures.""" - def fail(self, callback=None, *args): + def fail(self, callback=None, *args, **kwargs): """Returns a function to log a failure from an errback. The returned function appends the error message and extracts @@ -355,20 +355,20 @@ class Logger(logging.Logger): err: twisted.python.failure.Failure msg: the message to log, using normal logging string iterpolation. msg_args: the values to subtitute into `msg` - kwargs: set `level` to change from the default ERROR severity. Other + msg_kwargs: set `level` to change from the default ERROR severity. Other keywoards are treated as normal log kwargs. """ fn, lno, func = findCaller() - def _fail(err, msg, *msg_args, **kwargs): - level = kwargs.pop('level', logging.ERROR) + def _fail(err, msg, *msg_args, **msg_kwargs): + level = msg_kwargs.pop('level', logging.ERROR) msg += ": %s" msg_args += (err.getErrorMessage(),) exc_info = (err.type, err.value, err.getTracebackObject()) record = self.makeRecord( - self.name, level, fn, lno, msg, msg_args, exc_info, func, kwargs) + self.name, level, fn, lno, msg, msg_args, exc_info, func, msg_kwargs) self.handle(record) if callback: - callback(err, *args) + callback(err, *args, **kwargs) return _fail From fa8190356ad22ffeba1947793aaab17085e08ebe Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Sat, 10 Dec 2016 11:42:57 -0800 Subject: [PATCH 13/26] replace log_support.failure calls --- lbrynet/lbrynet_daemon/Daemon.py | 33 ++++++++++++------------- lbrynet/lbrynet_daemon/DaemonControl.py | 6 ++--- lbrynet/lbrynet_daemon/Publisher.py | 20 +++++++++------ lbrynet/lbrynet_daemon/auth/server.py | 11 +++------ lbrynet/reflector/client/blob.py | 8 +++--- 5 files changed, 39 insertions(+), 39 deletions(-) diff --git a/lbrynet/lbrynet_daemon/Daemon.py b/lbrynet/lbrynet_daemon/Daemon.py index 8b8929735..30ddea1e7 100644 --- a/lbrynet/lbrynet_daemon/Daemon.py +++ b/lbrynet/lbrynet_daemon/Daemon.py @@ -602,14 +602,14 @@ class Daemon(AuthJSONRPCServer): d = defer.succeed(None) d.addCallback(lambda _: self._stop_server()) - d.addErrback(log_support.failure, log, 'Failure while shutting down: %s') + d.addErrback(log.fail(), 'Failure while shutting down') d.addCallback(lambda _: self._stop_reflector()) - d.addErrback(log_support.failure, log, 'Failure while shutting down: %s') + d.addErrback(log.fail(), 'Failure while shutting down') d.addCallback(lambda _: self._stop_file_manager()) - d.addErrback(log_support.failure, log, 'Failure while shutting down: %s') + d.addErrback(log.fail(), 'Failure while shutting down') if self.session is not None: d.addCallback(lambda _: self.session.shut_down()) - d.addErrback(log_support.failure, log, 'Failure while shutting down: %s') + d.addErrback(log.fail(), 'Failure while shutting down') return d def _update_settings(self, settings): @@ -1468,9 +1468,20 @@ class Daemon(AuthJSONRPCServer): return self._render_response(None, BAD_REQUEST) d = self._resolve_name(name, force_refresh=force) + # TODO: this is the rpc call that returns a server.failure. + # what is up with that? d.addCallbacks( lambda info: self._render_response(info, OK_CODE), - errback=handle_failure, errbackArgs=('Failed to resolve name: %s',) + # TODO: Is server.failure a module? It looks like it: + # + # In [1]: import twisted.web.server + # In [2]: twisted.web.server.failure + # Out[2]: + # + # If so, maybe we should return something else. + errback=log.fail(lambda: server.failure), + errbackArgs=('Failed to resolve name: %s',) ) return d @@ -2692,18 +2703,6 @@ def get_lbry_file_search_value(p): raise NoValidSearch() -def handle_failure(err, msg): - log_support.failure(err, log, msg) - # TODO: Is this a module? It looks like it: - # - # In [1]: import twisted.web.server - # In [2]: twisted.web.server.failure - # Out[2]: - # - # If so, maybe we should return something else. - return server.failure - - def run_reflector_factory(factory): reflector_server = random.choice(conf.settings.reflector_servers) reflector_address, reflector_port = reflector_server diff --git a/lbrynet/lbrynet_daemon/DaemonControl.py b/lbrynet/lbrynet_daemon/DaemonControl.py index c880b35d7..4975bd93e 100644 --- a/lbrynet/lbrynet_daemon/DaemonControl.py +++ b/lbrynet/lbrynet_daemon/DaemonControl.py @@ -117,9 +117,8 @@ def update_settings_from_args(args): settings.update(to_pass) -def log_and_kill(failure, analytics_manager): +def kill(failure, analytics_manager): analytics_manager.send_server_startup_error(failure.getErrorMessage() + " " + str(failure)) - log_support.failure(failure, log, 'Failed to startup: %s') reactor.callFromThread(reactor.stop) @@ -130,14 +129,13 @@ def start_server_and_listen(launchui, use_auth, analytics_manager): launchui: set to true to open a browser window use_auth: set to true to enable http authentication analytics_manager: to send analytics - kwargs: passed along to `DaemonServer().start()` """ daemon_server = DaemonServer(analytics_manager) d = daemon_server.start(use_auth) if launchui: d.addCallback(lambda _: webbrowser.open(settings.UI_ADDRESS)) d.addCallback(lambda _: analytics_manager.send_server_startup_success()) - d.addErrback(log_and_kill, analytics_manager) + d.addErrback(log.fail(kill, analytics_manager), 'Failed to startup') if __name__ == "__main__": diff --git a/lbrynet/lbrynet_daemon/Publisher.py b/lbrynet/lbrynet_daemon/Publisher.py index 2f973a5fc..a52934833 100644 --- a/lbrynet/lbrynet_daemon/Publisher.py +++ b/lbrynet/lbrynet_daemon/Publisher.py @@ -5,7 +5,6 @@ import random from twisted.internet import threads, defer, reactor -from lbrynet.core import log_support from lbrynet.lbryfilemanager.EncryptedFileCreator import create_lbry_file from lbrynet.lbryfile.StreamDescriptor import publish_sd_blob from lbrynet.metadata.Metadata import Metadata @@ -68,7 +67,12 @@ class Publisher(object): d.addCallback(lambda _: self._claim_name()) d.addCallback(lambda _: self.set_status()) d.addCallback(lambda _: self.start_reflector()) - d.addCallbacks(lambda _: _show_result(), self._show_publish_error) + d.addCallbacks( + lambda _: _show_result(), + errback=log.fail(self._throw_publish_error), + errbackArgs=( + "An error occurred publishing %s to %s", self.file_name, self.publish_name) + ) return d def start_reflector(self): @@ -151,11 +155,13 @@ class Publisher(object): self.metadata['content_type'] = get_content_type(filename) self.metadata['ver'] = Metadata.current_version - def _show_publish_error(self, err): - log_support.failure( - err, log, "An error occurred publishing %s to %s. Error: %s.", - self.file_name, self.publish_name) - return defer.fail(Exception("Publish failed")) + def _throw_publish_error(self, err): + # TODO: I'm not a fan of the log and re-throw, especially when + # the new exception is more generic. Look over this to + # see if there is a reason not to remove the errback + # handler and allow the original exception to move up + # the stack. + raise Exception("Publish failed") def get_content_type(filename): diff --git a/lbrynet/lbrynet_daemon/auth/server.py b/lbrynet/lbrynet_daemon/auth/server.py index 1376c024d..e60e48a83 100644 --- a/lbrynet/lbrynet_daemon/auth/server.py +++ b/lbrynet/lbrynet_daemon/auth/server.py @@ -10,7 +10,6 @@ from twisted.python.failure import Failure from txjsonrpc import jsonrpclib from lbrynet.core.Error import InvalidAuthenticationToken, InvalidHeaderError, SubhandlerError from lbrynet.conf import settings -from lbrynet.core import log_support from lbrynet.lbrynet_daemon.auth.util import APIKey, get_auth_message from lbrynet.lbrynet_daemon.auth.client import LBRY_SECRET @@ -117,11 +116,6 @@ class AuthJSONRPCServer(AuthorizedBase): request.write(fault) request.finish() - def _log_and_render_error(self, failure, request, message=None, **kwargs): - msg = message or "API Failure: %s" - log_support.failure(Failure(failure), log, msg) - self._render_error(failure, request, **kwargs) - def render(self, request): notify_finish = request.notifyFinish() assert self._check_headers(request), InvalidHeaderError @@ -192,7 +186,10 @@ class AuthJSONRPCServer(AuthorizedBase): # cancel the response if the connection is broken notify_finish.addErrback(self._response_failed, d) d.addCallback(self._callback_render, request, version, reply_with_next_secret) - d.addErrback(self._log_and_render_error, request, version=version) + d.addErrback( + log.fail(self._render_error, request, version=version), + 'Failed to process %s', function_name + ) return server.NOT_DONE_YET def _register_user_session(self, session_id): diff --git a/lbrynet/reflector/client/blob.py b/lbrynet/reflector/client/blob.py index 4f999081a..62bb49938 100644 --- a/lbrynet/reflector/client/blob.py +++ b/lbrynet/reflector/client/blob.py @@ -5,7 +5,6 @@ from twisted.protocols.basic import FileSender from twisted.internet.protocol import Protocol, ClientFactory from twisted.internet import defer, error -from lbrynet.core import log_support from lbrynet.reflector.common import IncompleteResponse @@ -153,8 +152,7 @@ class BlobReflectorClient(Protocol): 'blob_size': self.next_blob_to_send.length })) - def log_fail_and_disconnect(self, err, blob_hash): - log_support.failure(err, log, "Error reflecting blob %s: %s", blob_hash) + def disconnect(self, err): self.transport.loseConnection() def send_next_request(self): @@ -172,7 +170,9 @@ class BlobReflectorClient(Protocol): # send the server the next blob hash + length d.addCallbacks( lambda _: self.send_blob_info(), - lambda err: self.log_fail_and_disconnect(err, blob_hash)) + errback=log.fail(self.disconnect), + errbackArgs=("Error reflecting blob %s", blob_hash) + ) return d else: # close connection From 740fad5cbef298e5a2b537d442b1a73b5e4bdaf5 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Tue, 13 Dec 2016 18:11:04 -0600 Subject: [PATCH 14/26] Refactor dht.node.iterativeFind Move nested functions into a helper class. Add new, smaller functions to increase readability --- lbrynet/dht/node.py | 378 ++++++++++++++++++++++++-------------------- 1 file changed, 210 insertions(+), 168 deletions(-) diff --git a/lbrynet/dht/node.py b/lbrynet/dht/node.py index d51956077..866f6c82e 100644 --- a/lbrynet/dht/node.py +++ b/lbrynet/dht/node.py @@ -34,6 +34,7 @@ def rpcmethod(func): func.rpcmethod = True return func + class Node(object): """ Local node in the Kademlia network @@ -591,176 +592,11 @@ class Node(object): # This is used during the bootstrap process; node ID's are most probably fake shortlist = startupShortlist - # List of active queries; len() indicates number of active probes - # - # n.b: using lists for these variables, because Python doesn't - # allow binding a new value to a name in an enclosing - # (non-global) scope - activeProbes = [] - # List of contact IDs that have already been queried - alreadyContacted = [] - # Probes that were active during the previous iteration - # A list of found and known-to-be-active remote nodes - activeContacts = [] - # This should only contain one entry; the next scheduled iteration call - pendingIterationCalls = [] - prevClosestNode = [None] - findValueResult = {} - slowNodeCount = [0] - - def extendShortlist(responseTuple): - """ @type responseMsg: kademlia.msgtypes.ResponseMessage """ - # The "raw response" tuple contains the response message, - # and the originating address info - responseMsg = responseTuple[0] - originAddress = responseTuple[1] # tuple: (ip adress, udp port) - # Make sure the responding node is valid, and abort the operation if it isn't - if responseMsg.nodeID in activeContacts or responseMsg.nodeID == self.id: - return responseMsg.nodeID - - # Mark this node as active - if responseMsg.nodeID in shortlist: - # Get the contact information from the shortlist... - aContact = shortlist[shortlist.index(responseMsg.nodeID)] - else: - # If it's not in the shortlist; we probably used a fake ID to reach it - # - reconstruct the contact, using the real node ID this time - aContact = Contact( - responseMsg.nodeID, originAddress[0], originAddress[1], self._protocol) - activeContacts.append(aContact) - # This makes sure "bootstrap"-nodes with "fake" IDs don't get queried twice - if responseMsg.nodeID not in alreadyContacted: - alreadyContacted.append(responseMsg.nodeID) - # Now grow extend the (unverified) shortlist with the returned contacts - result = responseMsg.response - #TODO: some validation on the result (for guarding against attacks) - # If we are looking for a value, first see if this result is the value - # we are looking for before treating it as a list of contact triples - if findValue is True and key in result and not 'contacts' in result: - # We have found the value - findValueResult[key] = result[key] - findValueResult['from_peer'] = aContact.address - else: - if findValue is True: - # We are looking for a value, and the remote node didn't have it - # - mark it as the closest "empty" node, if it is - if 'closestNodeNoValue' in findValueResult: - is_closer = ( - self._routingTable.distance(key, responseMsg.nodeID) < - self._routingTable.distance(key, activeContacts[0].id)) - if is_closer: - findValueResult['closestNodeNoValue'] = aContact - else: - findValueResult['closestNodeNoValue'] = aContact - contactTriples = result['contacts'] - else: - contactTriples = result - for contactTriple in contactTriples: - if isinstance(contactTriple, (list, tuple)) and len(contactTriple) == 3: - testContact = Contact( - contactTriple[0], contactTriple[1], contactTriple[2], self._protocol) - if testContact not in shortlist: - shortlist.append(testContact) - return responseMsg.nodeID - - def removeFromShortlist(failure): - """ @type failure: twisted.python.failure.Failure """ - failure.trap(protocol.TimeoutError) - deadContactID = failure.getErrorMessage() - if deadContactID in shortlist: - shortlist.remove(deadContactID) - return deadContactID - - def cancelActiveProbe(contactID): - activeProbes.pop() - if len(activeProbes) <= constants.alpha/2 and len(pendingIterationCalls): - # Force the iteration - pendingIterationCalls[0].cancel() - del pendingIterationCalls[0] - searchIteration() - - def log_error(err): - log.error(err.getErrorMessage()) - - # Send parallel, asynchronous FIND_NODE RPCs to the shortlist of contacts - def searchIteration(): - slowNodeCount[0] = len(activeProbes) - # TODO: move sort_key to be a method on the class - def sort_key(firstContact, secondContact, targetKey=key): - return cmp( - self._routingTable.distance(firstContact.id, targetKey), - self._routingTable.distance(secondContact.id, targetKey) - ) - # Sort the discovered active nodes from closest to furthest - activeContacts.sort(sort_key) - # This makes sure a returning probe doesn't force calling this function by mistake - while len(pendingIterationCalls): - del pendingIterationCalls[0] - # See if should continue the search - if key in findValueResult: - outerDf.callback(findValueResult) - return - elif len(activeContacts) and findValue == False: - is_all_done = ( - len(activeContacts) >= constants.k or - ( - activeContacts[0] == prevClosestNode[0] and - len(activeProbes) == slowNodeCount[0] - ) - ) - if is_all_done: - # TODO: Re-send the FIND_NODEs to all of the k closest nodes not already queried - # - # Ok, we're done; either we have accumulated k - # active contacts or no improvement in closestNode - # has been noted - outerDf.callback(activeContacts) - return - # The search continues... - if len(activeContacts): - prevClosestNode[0] = activeContacts[0] - contactedNow = 0 - shortlist.sort(sort_key) - # Store the current shortList length before contacting other nodes - prevShortlistLength = len(shortlist) - for contact in shortlist: - if contact.id not in alreadyContacted: - activeProbes.append(contact.id) - rpcMethod = getattr(contact, rpc) - df = rpcMethod(key, rawResponse=True) - df.addCallback(extendShortlist) - df.addErrback(removeFromShortlist) - df.addCallback(cancelActiveProbe) - df.addErrback(log_error) - alreadyContacted.append(contact.id) - contactedNow += 1 - if contactedNow == constants.alpha: - break - should_lookup_active_calls = ( - len(activeProbes) > slowNodeCount[0] or - ( - len(shortlist) < constants.k and - len(activeContacts) < len(shortlist) and - len(activeProbes) > 0 - ) - ) - if should_lookup_active_calls: - # Schedule the next iteration if there are any active - # calls (Kademlia uses loose parallelism) - call = twisted.internet.reactor.callLater( - constants.iterativeLookupDelay, searchIteration) #IGNORE:E1101 - pendingIterationCalls.append(call) - # Check for a quick contact response that made an update to the shortList - elif prevShortlistLength < len(shortlist): - # Ensure that the closest contacts are taken from the updated shortList - searchIteration() - else: - # If no probes were sent, there will not be any improvement, so we're done - outerDf.callback(activeContacts) - outerDf = defer.Deferred() + + helper = _IterativeFindHelper(self, outerDf, shortlist, key, findValue, rpc) # Start the iterations - searchIteration() + helper.searchIteration() return outerDf def _refreshNode(self): @@ -796,6 +632,212 @@ class Node(object): return df +# This was originally a set of nested methods in _iterativeFind +# but they have been moved into this helper class in-order to +# have better scoping and readability +class _IterativeFindHelper(object): + # TODO: use polymorphism to search for a value or node + # instead of using a find_value flag + def __init__(self, node, outer_d, shortlist, key, find_value, rpc): + self.node = node + self.outer_d = outer_d + self.shortlist = shortlist + self.key = key + self.find_value = find_value + self.rpc = rpc + # List of active queries; len() indicates number of active probes + # + # n.b: using lists for these variables, because Python doesn't + # allow binding a new value to a name in an enclosing + # (non-global) scope + self.active_probes = [] + # List of contact IDs that have already been queried + self.already_contacted = [] + # Probes that were active during the previous iteration + # A list of found and known-to-be-active remote nodes + self.active_contacts = [] + # This should only contain one entry; the next scheduled iteration call + self.pending_iteration_calls = [] + self.prev_closest_node = [None] + self.find_value_result = {} + self.slow_node_count = [0] + + def extendShortlist(self, responseTuple): + """ @type responseMsg: kademlia.msgtypes.ResponseMessage """ + # The "raw response" tuple contains the response message, + # and the originating address info + responseMsg = responseTuple[0] + originAddress = responseTuple[1] # tuple: (ip adress, udp port) + # Make sure the responding node is valid, and abort the operation if it isn't + if responseMsg.nodeID in self.active_contacts or responseMsg.nodeID == self.node.id: + return responseMsg.nodeID + + # Mark this node as active + aContact = self._getActiveContact(responseMsg, originAddress) + self.active_contacts.append(aContact) + + # This makes sure "bootstrap"-nodes with "fake" IDs don't get queried twice + if responseMsg.nodeID not in self.already_contacted: + self.already_contacted.append(responseMsg.nodeID) + # Now grow extend the (unverified) shortlist with the returned contacts + result = responseMsg.response + #TODO: some validation on the result (for guarding against attacks) + # If we are looking for a value, first see if this result is the value + # we are looking for before treating it as a list of contact triples + if self.find_value is True and self.key in result and not 'contacts' in result: + # We have found the value + self.find_value_result[self.key] = result[self.key] + self.find_value_result['from_peer'] = aContact.address + else: + if self.find_value is True: + self._setClosestNodeValue(responseMsg, aContact) + self._keepSearching(result) + return responseMsg.nodeID + + def _getActiveContact(self, responseMsg, originAddress): + if responseMsg.nodeID in self.shortlist: + # Get the contact information from the shortlist... + return self.shortlist[self.shortlist.index(responseMsg.nodeID)] + else: + # If it's not in the shortlist; we probably used a fake ID to reach it + # - reconstruct the contact, using the real node ID this time + return Contact( + responseMsg.nodeID, originAddress[0], originAddress[1], self.node._protocol) + + def _keepSearching(self, result): + contactTriples = self._getContactTriples(result) + for contactTriple in contactTriples: + self._addIfValid(contactTriple) + + def _getContactTriples(self, result): + if self.find_value is True: + return result['contacts'] + else: + return result + + def _setClosestNodeValue(self, responseMsg, aContact): + # We are looking for a value, and the remote node didn't have it + # - mark it as the closest "empty" node, if it is + if 'closestNodeNoValue' in self.find_value_result: + if self._is_closer(responseMsg): + self.find_value_result['closestNodeNoValue'] = aContact + else: + self.find_value_result['closestNodeNoValue'] = aContact + + def _is_closer(self, responseMsg): + return ( + self.node._routingTable.distance(self.key, responseMsg.nodeID) < + self.node._routingTable.distance(self.key, self.active_contacts[0].id) + ) + + def _addIfValid(self, contactTriple): + if isinstance(contactTriple, (list, tuple)) and len(contactTriple) == 3: + testContact = Contact( + contactTriple[0], contactTriple[1], contactTriple[2], self.node._protocol) + if testContact not in self.shortlist: + self.shortlist.append(testContact) + + def removeFromShortlist(self, failure): + """ @type failure: twisted.python.failure.Failure """ + failure.trap(protocol.TimeoutError) + deadContactID = failure.getErrorMessage() + if deadContactID in self.shortlist: + self.shortlist.remove(deadContactID) + return deadContactID + + def cancelActiveProbe(self, contactID): + self.active_probes.pop() + if len(self.active_probes) <= constants.alpha/2 and len(self.pending_iteration_calls): + # Force the iteration + self.pending_iteration_calls[0].cancel() + del self.pending_iteration_calls[0] + self.searchIteration() + + # Send parallel, asynchronous FIND_NODE RPCs to the shortlist of contacts + def searchIteration(self): + self.slow_node_count[0] = len(self.active_probes) + # TODO: move sort_key to be a method on the class + def sort_key(firstContact, secondContact, targetKey=self.key): + return cmp( + self.node._routingTable.distance(firstContact.id, targetKey), + self.node._routingTable.distance(secondContact.id, targetKey) + ) + # Sort the discovered active nodes from closest to furthest + self.active_contacts.sort(sort_key) + # This makes sure a returning probe doesn't force calling this function by mistake + while len(self.pending_iteration_calls): + del self.pending_iteration_calls[0] + # See if should continue the search + if self.key in self.find_value_result: + self.outer_d.callback(self.find_value_result) + return + elif len(self.active_contacts) and self.find_value == False: + if self._is_all_done(): + # TODO: Re-send the FIND_NODEs to all of the k closest nodes not already queried + # + # Ok, we're done; either we have accumulated k active + # contacts or no improvement in closestNode has been + # noted + self.outer_d.callback(self.active_contacts) + return + # The search continues... + if len(self.active_contacts): + self.prev_closest_node[0] = self.active_contacts[0] + contactedNow = 0 + self.shortlist.sort(sort_key) + # Store the current shortList length before contacting other nodes + prevShortlistLength = len(self.shortlist) + for contact in self.shortlist: + if contact.id not in self.already_contacted: + self._probeContact(contact) + contactedNow += 1 + if contactedNow == constants.alpha: + break + if self._should_lookup_active_calls(): + # Schedule the next iteration if there are any active + # calls (Kademlia uses loose parallelism) + call = twisted.internet.reactor.callLater( + constants.iterativeLookupDelay, self.searchIteration) #IGNORE:E1101 + self.pending_iteration_calls.append(call) + # Check for a quick contact response that made an update to the shortList + elif prevShortlistLength < len(self.shortlist): + # Ensure that the closest contacts are taken from the updated shortList + self.searchIteration() + else: + # If no probes were sent, there will not be any improvement, so we're done + self.outer_d.callback(self.active_contacts) + + def _probeContact(self, contact): + self.active_probes.append(contact.id) + rpcMethod = getattr(contact, self.rpc) + df = rpcMethod(self.key, rawResponse=True) + df.addCallback(self.extendShortlist) + df.addErrback(self.removeFromShortlist) + df.addCallback(self.cancelActiveProbe) + df.addErrback(log.fail(), 'Failed to contact %s', contact) + self.already_contacted.append(contact.id) + + def _should_lookup_active_calls(self): + return ( + len(self.active_probes) > self.slow_node_count[0] or + ( + len(self.shortlist) < constants.k and + len(self.active_contacts) < len(self.shortlist) and + len(self.active_probes) > 0 + ) + ) + + def _is_all_done(self): + return ( + len(self.active_contacts) >= constants.k or + ( + self.active_contacts[0] == self.prev_closest_node[0] and + len(self.active_probes) == self.slow_node_count[0] + ) + ) + + + def main(): parser = argparse.ArgumentParser(description="Launch a dht node") parser.add_argument("udp_port", help="The UDP port on which the node will listen", From 0084d4684fc4d51bfe7ae561ca2bd0e647d29ae4 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Tue, 13 Dec 2016 20:53:24 -0600 Subject: [PATCH 15/26] Add distance optimization --- lbrynet/dht/node.py | 88 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 74 insertions(+), 14 deletions(-) diff --git a/lbrynet/dht/node.py b/lbrynet/dht/node.py index 866f6c82e..6f8d12a2b 100644 --- a/lbrynet/dht/node.py +++ b/lbrynet/dht/node.py @@ -6,10 +6,16 @@ # # The docstrings in this module contain epytext markup; API documentation # may be created by processing this file with epydoc: http://epydoc.sf.net - -import hashlib, random, struct, time, binascii import argparse +import binascii +import hashlib +import operator +import random +import struct +import time + from twisted.internet import defer, error + import constants import routingtable import datastore @@ -645,6 +651,9 @@ class _IterativeFindHelper(object): self.key = key self.find_value = find_value self.rpc = rpc + # all distance operations in this class only care about the distance + # to self.key, so this makes it easier to calculate those + self.distance = Distance(key) # List of active queries; len() indicates number of active probes # # n.b: using lists for these variables, because Python doesn't @@ -725,10 +734,7 @@ class _IterativeFindHelper(object): self.find_value_result['closestNodeNoValue'] = aContact def _is_closer(self, responseMsg): - return ( - self.node._routingTable.distance(self.key, responseMsg.nodeID) < - self.node._routingTable.distance(self.key, self.active_contacts[0].id) - ) + return self.distance.is_closer(responseMsg.nodeID, self.active_contacts[0].id) def _addIfValid(self, contactTriple): if isinstance(contactTriple, (list, tuple)) and len(contactTriple) == 3: @@ -753,17 +759,15 @@ class _IterativeFindHelper(object): del self.pending_iteration_calls[0] self.searchIteration() + def sortByDistance(self, contact_list): + """Sort the list of contacts in order by distance from key""" + ExpensiveSort(contact_list, self.distance.to_contact).sort() + # Send parallel, asynchronous FIND_NODE RPCs to the shortlist of contacts def searchIteration(self): self.slow_node_count[0] = len(self.active_probes) - # TODO: move sort_key to be a method on the class - def sort_key(firstContact, secondContact, targetKey=self.key): - return cmp( - self.node._routingTable.distance(firstContact.id, targetKey), - self.node._routingTable.distance(secondContact.id, targetKey) - ) # Sort the discovered active nodes from closest to furthest - self.active_contacts.sort(sort_key) + self.sortByDistance(self.active_contacts) # This makes sure a returning probe doesn't force calling this function by mistake while len(self.pending_iteration_calls): del self.pending_iteration_calls[0] @@ -784,7 +788,7 @@ class _IterativeFindHelper(object): if len(self.active_contacts): self.prev_closest_node[0] = self.active_contacts[0] contactedNow = 0 - self.shortlist.sort(sort_key) + self.sortByDistance(self.shortlist) # Store the current shortList length before contacting other nodes prevShortlistLength = len(self.shortlist) for contact in self.shortlist: @@ -837,6 +841,62 @@ class _IterativeFindHelper(object): ) +class Distance(object): + """Calculate the XOR result between two string variables. + + Frequently we re-use one of the points so as an optimization + we pre-calculate the long value of that point. + """ + def __init__(self, key): + self.key = key + self.val_key_one = long(key.encode('hex'), 16) + + def __call__(self, key_two): + val_key_two = long(key_two.encode('hex'), 16) + return self.val_key_one ^ val_key_two + + def is_closer(self, a, b): + """Returns true is `a` is closer to `key` than `b` is""" + return self(a) < self(b) + + def to_contact(self, contact): + """A convenience function for calculating the distance to a contact""" + return self(contact.id) + + +class ExpensiveSort(object): + """Sort a list in place. + + The result of `key(item)` is cached for each item in the `to_sort` + list as an optimization. This can be useful when `key` is + expensive. + + Attributes: + to_sort: a list of items to sort + key: callable, like `key` in normal python sort + attr: the attribute name used to cache the value on each item. + """ + def __init__(self, to_sort, key, attr='__value'): + self.to_sort = to_sort + self.key = key + self.attr = attr + + def sort(self): + self._cacheValues() + self._sortByValue() + self._removeValue() + + def _cacheValues(self): + for item in self.to_sort: + setattr(item, self.attr, self.key(item)) + + def _sortByValue(self): + self.to_sort.sort(key=operator.attrgetter(self.attr)) + + def _removeValue(self): + for item in self.to_sort: + delattr(item, self.attr) + def main(): parser = argparse.ArgumentParser(description="Launch a dht node") From 323bccb0ae1dfaad690e14f3b338c6007367c023 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Tue, 13 Dec 2016 21:00:24 -0600 Subject: [PATCH 16/26] another distance optimization --- lbrynet/dht/node.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lbrynet/dht/node.py b/lbrynet/dht/node.py index 6f8d12a2b..8da5272c8 100644 --- a/lbrynet/dht/node.py +++ b/lbrynet/dht/node.py @@ -267,9 +267,7 @@ class Node(object): def requestPeers(contacts): if self.externalIP is not None and len(contacts) >= constants.k: - is_closer = ( - self._routingTable.distance(blob_hash, self.id) < - self._routingTable.distance(blob_hash, contacts[-1].id)) + is_closer = Distance(blob_hash).is_closer(self.id, contacts[-1].id) if is_closer: contacts.pop() self.store(blob_hash, value, self_store=True, originalPublisherID=self.id) From 250831a86aadb81f2794a16d25b9b4ade8781738 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Tue, 13 Dec 2016 21:01:01 -0600 Subject: [PATCH 17/26] remove unused distance function from routingtable --- lbrynet/dht/routingtable.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/lbrynet/dht/routingtable.py b/lbrynet/dht/routingtable.py index 7d7334e7c..b9dff61f2 100644 --- a/lbrynet/dht/routingtable.py +++ b/lbrynet/dht/routingtable.py @@ -31,16 +31,6 @@ class RoutingTable(object): @type contact: kademlia.contact.Contact """ - def distance(self, keyOne, keyTwo): - """ Calculate the XOR result between two string variables - - @return: XOR result of two long variables - @rtype: long - """ - valKeyOne = long(keyOne.encode('hex'), 16) - valKeyTwo = long(keyTwo.encode('hex'), 16) - return valKeyOne ^ valKeyTwo - def findCloseNodes(self, key, count, _rpcNodeID=None): """ Finds a number of known nodes closest to the node/value with the specified key. From 4f3b5cd8020473d7210a6d2cbf6800ff04faaf4a Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Wed, 14 Dec 2016 17:01:00 -0600 Subject: [PATCH 18/26] Better logging on DHT errors Timeout errors are common on the dht so log those at debug, but other errors need to (potentially) recieve more attention --- lbrynet/dht/node.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/lbrynet/dht/node.py b/lbrynet/dht/node.py index 8da5272c8..6dd6cadd3 100644 --- a/lbrynet/dht/node.py +++ b/lbrynet/dht/node.py @@ -235,9 +235,14 @@ class Node(object): known_nodes = {} def log_error(err, n): - log.debug("error storing blob_hash %s at %s", binascii.hexlify(blob_hash), str(n)) - log.debug(err.getErrorMessage()) - log.debug(err.getTraceback()) + if err.check(protocol.TimeoutError): + log.debug( + "Timeout while storing blob_hash %s at %s", + binascii.hexlify(blob_hash), n) + else: + log.error( + "Unexpected error while storing blob_hash %s at %s: %s", + binascii.hexlify(blob_hash), n, err.getErrorMessage()) def log_success(res): log.debug("Response to store request: %s", str(res)) From fec917b9c1ec221905c1a69961eedf63112f3999 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Sat, 10 Dec 2016 12:01:29 -0800 Subject: [PATCH 19/26] rename platform to system_info --- lbrynet/analytics/manager.py | 2 +- lbrynet/core/{Platform.py => system_info.py} | 0 lbrynet/lbrynet_daemon/Daemon.py | 5 +++-- 3 files changed, 4 insertions(+), 3 deletions(-) rename lbrynet/core/{Platform.py => system_info.py} (100%) diff --git a/lbrynet/analytics/manager.py b/lbrynet/analytics/manager.py index e6c9fbc15..ad899ba98 100644 --- a/lbrynet/analytics/manager.py +++ b/lbrynet/analytics/manager.py @@ -3,7 +3,7 @@ from lbrynet.core import looping_call_manager from twisted.internet import defer from twisted.internet import task -from lbrynet.core.Platform import get_platform +from lbrynet.core.system_info import get_platform from lbrynet.conf import settings import constants diff --git a/lbrynet/core/Platform.py b/lbrynet/core/system_info.py similarity index 100% rename from lbrynet/core/Platform.py rename to lbrynet/core/system_info.py diff --git a/lbrynet/lbrynet_daemon/Daemon.py b/lbrynet/lbrynet_daemon/Daemon.py index 30ddea1e7..d7139937b 100644 --- a/lbrynet/lbrynet_daemon/Daemon.py +++ b/lbrynet/lbrynet_daemon/Daemon.py @@ -39,7 +39,8 @@ from lbrynet.lbrynet_daemon.Downloader import GetStream from lbrynet.lbrynet_daemon.Publisher import Publisher from lbrynet.lbrynet_daemon.ExchangeRateManager import ExchangeRateManager from lbrynet.lbrynet_daemon.auth.server import AuthJSONRPCServer -from lbrynet.core import log_support, utils, Platform +from lbrynet.core import log_support, utils +from lbrynet.core import system_info from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier, download_sd_blob from lbrynet.core.StreamDescriptor import BlobStreamDescriptorReader from lbrynet.core.Session import Session @@ -354,7 +355,7 @@ class Daemon(AuthJSONRPCServer): def _get_platform(self): if self.platform is None: - self.platform = Platform.get_platform() + self.platform = system_info.get_platform() self.platform["ui_version"] = self.lbry_ui_manager.loaded_git_version return self.platform From fa29c92760196cd8d2ee2ccde966ed8be1ed1507 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Wed, 14 Dec 2016 10:33:14 -0600 Subject: [PATCH 20/26] Add setting to disable reuploading to reflector --- lbrynet/conf.py | 1 + lbrynet/lbryfilemanager/EncryptedFileDownloader.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/lbrynet/conf.py b/lbrynet/conf.py index 2806b25d5..285956def 100644 --- a/lbrynet/conf.py +++ b/lbrynet/conf.py @@ -182,6 +182,7 @@ ENVIRONMENT = Env( # all of your credits. API_INTERFACE=(str, "localhost"), bittrex_feed=(str, "https://bittrex.com/api/v1.1/public/getmarkethistory"), + reflector_reupload=(bool, True), ) diff --git a/lbrynet/lbryfilemanager/EncryptedFileDownloader.py b/lbrynet/lbryfilemanager/EncryptedFileDownloader.py index ff9355e39..91f855e90 100644 --- a/lbrynet/lbryfilemanager/EncryptedFileDownloader.py +++ b/lbrynet/lbryfilemanager/EncryptedFileDownloader.py @@ -74,10 +74,8 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver): d.addCallbacks(_save_claim_id, lambda err: _notify_bad_claim(name, txid, nout)) return d - reflector_server = random.choice(settings.reflector_servers) - d.addCallback(_save_stream_info) - d.addCallback(lambda _: reupload.check_and_restore_availability(self, reflector_server)) + d.addCallback(lambda _: self._reupload()) d.addCallback(lambda _: self.lbry_file_manager.get_lbry_file_status(self)) def restore_status(status): @@ -92,6 +90,12 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver): d.addCallback(restore_status) return d + def _reupload(self): + if not settings.reflector_reupload: + return + reflector_server = random.choice(settings.reflector_servers) + return reupload.check_and_restore_availability(self, reflector_server) + def stop(self, err=None, change_status=True): def set_saving_status_done(): From 781b915717aa8efdb8c8fe0091a19e1de2a08427 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Wed, 14 Dec 2016 11:14:03 -0600 Subject: [PATCH 21/26] remove unused check_consistency function --- lbrynet/core/BlobManager.py | 75 +------------------------------------ 1 file changed, 1 insertion(+), 74 deletions(-) diff --git a/lbrynet/core/BlobManager.py b/lbrynet/core/BlobManager.py index 7b5c2fad2..4d174a8e7 100644 --- a/lbrynet/core/BlobManager.py +++ b/lbrynet/core/BlobManager.py @@ -9,10 +9,10 @@ from twisted.enterprise import adbapi from lbrynet.core.HashBlob import BlobFile, TempBlob, BlobFileCreator, TempBlobCreator from lbrynet.core.server.DHTHashAnnouncer import DHTHashSupplier from lbrynet.core.utils import is_valid_blobhash -from lbrynet.core.cryptoutils import get_lbry_hash_obj from lbrynet.core.Error import NoSuchBlobError from lbrynet.core.sqlite_helpers import rerun_if_locked + log = logging.getLogger(__name__) @@ -52,9 +52,6 @@ class BlobManager(DHTHashSupplier): def get_blob_length(self, blob_hash): pass - def check_consistency(self): - pass - def blob_requested(self, blob_hash): pass @@ -188,9 +185,6 @@ class DiskBlobManager(BlobManager): def get_blob_length(self, blob_hash): return self._get_blob_length(blob_hash) - def check_consistency(self): - return self._check_consistency() - def get_all_verified_blobs(self): d = self._get_all_verified_blob_hashes() d.addCallback(self.completed_blobs) @@ -382,73 +376,6 @@ class DiskBlobManager(BlobManager): return self.db_conn.runInteraction(delete_blobs) - @rerun_if_locked - def _check_consistency(self): - - ALREADY_VERIFIED = 1 - NEWLY_VERIFIED = 2 - INVALID = 3 - - current_time = time.time() - d = self.db_conn.runQuery("select blob_hash, blob_length, last_verified_time from blobs") - - def check_blob(blob_hash, blob_length, verified_time): - file_path = os.path.join(self.blob_dir, blob_hash) - if os.path.isfile(file_path): - if verified_time >= os.path.getctime(file_path): - return ALREADY_VERIFIED - else: - h = get_lbry_hash_obj() - len_so_far = 0 - f = open(file_path) - while True: - data = f.read(2**12) - if not data: - break - h.update(data) - len_so_far += len(data) - if len_so_far == blob_length and h.hexdigest() == blob_hash: - return NEWLY_VERIFIED - return INVALID - - def do_check(blobs): - already_verified = [] - newly_verified = [] - invalid = [] - for blob_hash, blob_length, verified_time in blobs: - status = check_blob(blob_hash, blob_length, verified_time) - if status == ALREADY_VERIFIED: - already_verified.append(blob_hash) - elif status == NEWLY_VERIFIED: - newly_verified.append(blob_hash) - else: - invalid.append(blob_hash) - return already_verified, newly_verified, invalid - - def update_newly_verified(transaction, blobs): - for b in blobs: - transaction.execute("update blobs set last_verified_time = ? where blob_hash = ?", - (current_time, b)) - - def check_blobs(blobs): - - @rerun_if_locked - def update_and_return(status_lists): - - already_verified, newly_verified, invalid = status_lists - - d = self.db_conn.runInteraction(update_newly_verified, newly_verified) - d.addCallback(lambda _: status_lists) - return d - - d = threads.deferToThread(do_check, blobs) - - d.addCallback(update_and_return) - return d - - d.addCallback(check_blobs) - return d - @rerun_if_locked def _get_all_verified_blob_hashes(self): d = self.db_conn.runQuery("select blob_hash, last_verified_time from blobs") From 84baa5e0656a66ea15fa9b48c421ce850639dbe9 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Wed, 14 Dec 2016 13:57:19 -0600 Subject: [PATCH 22/26] dht: refactor _msgTimeout --- lbrynet/dht/protocol.py | 59 +++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/lbrynet/dht/protocol.py b/lbrynet/dht/protocol.py index f99711f36..3530f78f6 100644 --- a/lbrynet/dht/protocol.py +++ b/lbrynet/dht/protocol.py @@ -282,34 +282,41 @@ class KademliaProtocol(protocol.DatagramProtocol): def _msgTimeout(self, messageID): """ Called when an RPC request message times out """ # Find the message that timed out - if self._sentMessages.has_key(messageID): - remoteContactID, df = self._sentMessages[messageID][0:2] - if self._partialMessages.has_key(messageID): - # We are still receiving this message - # See if any progress has been made; if not, kill the message - if self._partialMessagesProgress.has_key(messageID): - same_length = ( - len(self._partialMessagesProgress[messageID]) == - len(self._partialMessages[messageID])) - if same_length: - # No progress has been made - del self._partialMessagesProgress[messageID] - del self._partialMessages[messageID] - df.errback(failure.Failure(TimeoutError(remoteContactID))) - return - # Reset the RPC timeout timer - timeoutCall = reactor.callLater( - constants.rpcTimeout, self._msgTimeout, messageID) #IGNORE:E1101 - self._sentMessages[messageID] = (remoteContactID, df, timeoutCall) - return - del self._sentMessages[messageID] - # The message's destination node is now considered to be dead; - # raise an (asynchronous) TimeoutError exception and update the host node - self._node.removeContact(remoteContactID) - df.errback(failure.Failure(TimeoutError(remoteContactID))) - else: + if not self._sentMessages.has_key(messageID): # This should never be reached log.error("deferred timed out, but is not present in sent messages list!") + return + remoteContactID, df = self._sentMessages[messageID][0:2] + if self._partialMessages.has_key(messageID): + # We are still receiving this message + self._msgTimeoutInProgress(messageID, remoteContactID, df) + return + del self._sentMessages[messageID] + # The message's destination node is now considered to be dead; + # raise an (asynchronous) TimeoutError exception and update the host node + self._node.removeContact(remoteContactID) + df.errback(failure.Failure(TimeoutError(remoteContactID))) + + def _msgTimeoutInProgress(self, messageID, remoteContactID, df): + # See if any progress has been made; if not, kill the message + if self._hasProgressBeenMade(messageID): + # Reset the RPC timeout timer + timeoutCall = reactor.callLater(constants.rpcTimeout, self._msgTimeout, messageID) + self._sentMessages[messageID] = (remoteContactID, df, timeoutCall) + else: + # No progress has been made + del self._partialMessagesProgress[messageID] + del self._partialMessages[messageID] + df.errback(failure.Failure(TimeoutError(remoteContactID))) + + def _hasProgressBeenMade(self, messageID): + return ( + self._partialMessagesProgress.has_key(messageID) and + ( + len(self._partialMessagesProgress[messageID]) != + len(self._partialMessages[messageID]) + ) + ) def stopProtocol(self): """ Called when the transport is disconnected. From f181af85479fae7f0c7fec4855f0f92968d1cbcd Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Wed, 14 Dec 2016 16:37:17 -0600 Subject: [PATCH 23/26] Cleanup code in BlobManager --- lbrynet/core/BlobManager.py | 57 ++++++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/lbrynet/core/BlobManager.py b/lbrynet/core/BlobManager.py index 4d174a8e7..ee688d8ae 100644 --- a/lbrynet/core/BlobManager.py +++ b/lbrynet/core/BlobManager.py @@ -83,6 +83,8 @@ class DiskBlobManager(BlobManager): self.db_conn = None self.blob_type = BlobFile self.blob_creator_type = BlobFileCreator + # TODO: consider using an LRU for blobs as there could potentially + # be thousands of blobs loaded up, many stale self.blobs = {} self.blob_hashes_to_delete = {} # {blob_hash: being_deleted (True/False)} self._next_manage_call = None @@ -295,18 +297,27 @@ class DiskBlobManager(BlobManager): @rerun_if_locked def _completed_blobs(self, blobs_to_check): + """Returns of the blobs_to_check, which are valid""" blobs_to_check = filter(is_valid_blobhash, blobs_to_check) - def get_blobs_in_db(db_transaction): - blobs_in_db = [] # [(blob_hash, last_verified_time)] + def _get_last_verified_time(db_transaction, blob_hash): + result = db_transaction.execute( + "select last_verified_time from blobs where blob_hash = ?", (blob_hash,)) + row = result.fetchone() + if row: + return row[0] + else: + return None + + def _filter_blobs_in_db(db_transaction, blobs_to_check): for b in blobs_to_check: - result = db_transaction.execute( - "select last_verified_time from blobs where blob_hash = ?", - (b,)) - row = result.fetchone() - if row is not None: - blobs_in_db.append((b, row[0])) - return blobs_in_db + verified_time = _get_last_verified_time(db_transaction, b) + if verified_time: + yield (b, verified_time) + + def get_blobs_in_db(db_transaction, blob_to_check): + # [(blob_hash, last_verified_time)] + return list(_filter_blobs_in_db(db_transaction, blobs_to_check)) def get_valid_blobs(blobs_in_db): @@ -315,23 +326,31 @@ class DiskBlobManager(BlobManager): if os.path.isfile(file_path): if verified_time > os.path.getctime(file_path): return True + else: + log.debug('Verification time for %s is too old (%s < %s)', + file_path, verified_time, os.path.getctime(file_path)) + else: + log.debug('file %s does not exist', file_path) return False - def return_valid_blobs(results): - valid_blobs = [] - for (b, verified_date), (success, result) in zip(blobs_in_db, results): - if success is True and result is True: - valid_blobs.append(b) + def filter_valid_blobs(results): + assert len(blobs_in_db) == len(results) + valid_blobs = [ + b for (b, verified_date), (success, result) in zip(blobs_in_db, results) + if success is True and result is True + ] + log.debug('Of %s blobs, %s were valid', len(results), len(valid_blobs)) return valid_blobs - ds = [] - for b, verified_date in blobs_in_db: - ds.append(threads.deferToThread(check_blob_verified_date, b, verified_date)) + ds = [ + threads.deferToThread(check_blob_verified_date, b, verified_date) + for b, verified_date in blobs_in_db + ] dl = defer.DeferredList(ds) - dl.addCallback(return_valid_blobs) + dl.addCallback(filter_valid_blobs) return dl - d = self.db_conn.runInteraction(get_blobs_in_db) + d = self.db_conn.runInteraction(get_blobs_in_db, blobs_to_check) d.addCallback(get_valid_blobs) return d From 3585d861ffdafc8ff0c0393a9a4b8e53998002fe Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Wed, 14 Dec 2016 16:46:06 -0600 Subject: [PATCH 24/26] Add TRACE level logging --- lbrynet/core/log_support.py | 6 ++++++ lbrynet/core/server/ServerProtocol.py | 2 +- lbrynet/core/server/ServerRequestHandler.py | 18 ++++++++++-------- tests/__init__.py | 4 ++++ 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/lbrynet/core/log_support.py b/lbrynet/core/log_support.py index 451a57a13..880f7cd18 100644 --- a/lbrynet/core/log_support.py +++ b/lbrynet/core/log_support.py @@ -33,6 +33,7 @@ _srcfile = os.path.normcase(_srcfile) session = FuturesSession() +TRACE = 5 def bg_cb(sess, resp): @@ -371,5 +372,10 @@ class Logger(logging.Logger): callback(err, *args, **kwargs) return _fail + def trace(self, msg, *args, **kwargs): + if self.isEnabledFor(TRACE): + self._log(TRACE, msg, args, **kwargs) + logging.setLoggerClass(Logger) +logging.addLevelName(TRACE, 'TRACE') diff --git a/lbrynet/core/server/ServerProtocol.py b/lbrynet/core/server/ServerProtocol.py index df52dedb0..b88ffe0ba 100644 --- a/lbrynet/core/server/ServerProtocol.py +++ b/lbrynet/core/server/ServerProtocol.py @@ -64,7 +64,7 @@ class ServerProtocol(Protocol): self.transport.loseConnection() def write(self, data): - log.debug("Writing %s bytes of data to the transport", str(len(data))) + log.trace("Writing %s bytes of data to the transport", len(data)) self.transport.write(data) self.factory.rate_limiter.report_ul_bytes(len(data)) diff --git a/lbrynet/core/server/ServerRequestHandler.py b/lbrynet/core/server/ServerRequestHandler.py index c57a63be2..813771647 100644 --- a/lbrynet/core/server/ServerRequestHandler.py +++ b/lbrynet/core/server/ServerRequestHandler.py @@ -52,13 +52,15 @@ class ServerRequestHandler(object): from twisted.internet import reactor - if self.production_paused is False: - chunk = self.response_buff[:self.CHUNK_SIZE] - self.response_buff = self.response_buff[self.CHUNK_SIZE:] - if chunk != '': - log.debug("writing %s bytes to the client", str(len(chunk))) - self.consumer.write(chunk) - reactor.callLater(0, self._produce_more) + if self.production_paused: + return + chunk = self.response_buff[:self.CHUNK_SIZE] + self.response_buff = self.response_buff[self.CHUNK_SIZE:] + if chunk == '': + return + log.trace("writing %s bytes to the client", len(chunk)) + self.consumer.write(chunk) + reactor.callLater(0, self._produce_more) #IConsumer stuff @@ -79,7 +81,7 @@ class ServerRequestHandler(object): def get_more_data(): if self.producer is not None: - log.debug("Requesting more data from the producer") + log.trace("Requesting more data from the producer") self.producer.resumeProducing() reactor.callLater(0, get_more_data) diff --git a/tests/__init__.py b/tests/__init__.py index e69de29bb..6ce67146e 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -0,0 +1,4 @@ +# log_support setups the default Logger class +# and so we need to ensure that it is also +# setup for the tests +from lbrynet.core import log_support From 56d394fb5f64f1bfdea3e61713ecbf73580d808c Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Wed, 14 Dec 2016 16:52:26 -0600 Subject: [PATCH 25/26] Add timing to hash announcements This could potentially be a performance issue on reflector or any daemon with a large number of blobs. --- lbrynet/core/server/DHTHashAnnouncer.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/lbrynet/core/server/DHTHashAnnouncer.py b/lbrynet/core/server/DHTHashAnnouncer.py index 8ce55ffd9..d41a19e74 100644 --- a/lbrynet/core/server/DHTHashAnnouncer.py +++ b/lbrynet/core/server/DHTHashAnnouncer.py @@ -1,6 +1,7 @@ import binascii import collections import logging +import time from twisted.internet import defer, reactor @@ -52,7 +53,11 @@ class DHTHashAnnouncer(object): return dl def _announce_hashes(self, hashes): - + if not hashes: + return + log.debug('Announcing %s hashes', len(hashes)) + # TODO: add a timeit decorator + start = time.time() ds = [] for h in hashes: @@ -74,7 +79,10 @@ class DHTHashAnnouncer(object): # TODO: maybe make the 5 configurable self._concurrent_announcers += 1 announce() - return defer.DeferredList(ds) + d = defer.DeferredList(ds) + d.addCallback(lambda _: log.debug('Took %s seconds to announce %s hashes', + time.time() - start, len(hashes))) + return d class DHTHashSupplier(object): From 7af6e9e0dd8dc2bd6ef339cd10585b25efa3e7c2 Mon Sep 17 00:00:00 2001 From: Job Evers-Meltzer Date: Wed, 14 Dec 2016 16:14:37 -0600 Subject: [PATCH 26/26] Improve BlobAvailabilityTracker performance For daemons with a lot of blobs, getting mean availabity will be slow. Samples the blobs in an attempt at getting better performance. --- lbrynet/core/BlobAvailability.py | 30 ++++++++++++++++++++++++------ tests/mocks.py | 2 +- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/lbrynet/core/BlobAvailability.py b/lbrynet/core/BlobAvailability.py index 5131a7cd4..69f012a5b 100644 --- a/lbrynet/core/BlobAvailability.py +++ b/lbrynet/core/BlobAvailability.py @@ -1,4 +1,6 @@ import logging +import random +import time from twisted.internet import defer from twisted.internet.task import LoopingCall @@ -27,7 +29,7 @@ class BlobAvailabilityTracker(object): def start(self): log.info("Starting %s", self) self._check_popular.start(30) - self._check_mine.start(120) + self._check_mine.start(600) def stop(self): log.info("Stopping %s", self) @@ -76,7 +78,8 @@ class BlobAvailabilityTracker(object): def _update_most_popular(self): d = self._get_most_popular() - d.addCallback(lambda _: self._get_mean_peers()) + d.addCallback(lambda _: self._set_mean_peers()) + def _update_mine(self): def _get_peers(blobs): @@ -85,11 +88,26 @@ class BlobAvailabilityTracker(object): dl.append(self._update_peers_for_blob(hash)) return defer.DeferredList(dl) - d = self._blob_manager.get_all_verified_blobs() - d.addCallback(_get_peers) - d.addCallback(lambda _: self._get_mean_peers()) + def sample(blobs): + return random.sample(blobs, 100) - def _get_mean_peers(self): + start = time.time() + log.debug('==> Updating the peers for my blobs') + d = self._blob_manager.get_all_verified_blobs() + # as far as I can tell, this only is used to set _last_mean_availability + # which... seems like a very expensive operation for such little payoff. + # so taking a sample should get about the same effect as querying the entire + # list of blobs + d.addCallback(sample) + d.addCallback(_get_peers) + d.addCallback(lambda _: self._set_mean_peers()) + d.addCallback(lambda _: log.debug('<== Done updating peers for my blobs. Took %s seconds', + time.time() - start)) + # although unused, need to return or else the looping call + # could overrun on a previous call + return d + + def _set_mean_peers(self): num_peers = [len(self.availability[blob]) for blob in self.availability] mean = Decimal(sum(num_peers)) / Decimal(max(1, len(num_peers))) self._last_mean_availability = mean diff --git a/tests/mocks.py b/tests/mocks.py index 1525b51ba..2899ab096 100644 --- a/tests/mocks.py +++ b/tests/mocks.py @@ -167,7 +167,7 @@ class BlobAvailabilityTracker(BlobAvailability.BlobAvailabilityTracker): self._dht_node = None self._check_popular = None self._check_mine = None - self._get_mean_peers() + self._set_mean_peers() def start(self): pass