Remove commented code

This is the result of running eradicate (https://github.com/myint/eradicate)
on the code and double-checking the changes.
This commit is contained in:
Job Evers-Meltzer 2016-12-13 17:37:23 -06:00
parent 083092ad5e
commit c30ea04959
20 changed files with 14 additions and 685 deletions

View file

@ -102,7 +102,6 @@ class DiskBlobManager(BlobManager):
if self._next_manage_call is not None and self._next_manage_call.active():
self._next_manage_call.cancel()
self._next_manage_call = None
#d = self.db_conn.close()
self.db_conn = None
return defer.succeed(True)
@ -348,8 +347,6 @@ class DiskBlobManager(BlobManager):
d.addCallback(lambda r: r[0][0] if len(r) else Failure(NoSuchBlobError(blob)))
return d
#length, verified_time, next_announce_time = json.loads(self.db.Get(blob))
#return length
@rerun_if_locked
def _update_blob_verified_timestamp(self, blob, timestamp):

View file

@ -214,7 +214,6 @@ class Wallet(object):
once the service has been rendered
"""
rounded_amount = Decimal(str(round(amount, 8)))
#if peer in self.peer_addresses:
if self.wallet_balance >= self.total_reserved_points + rounded_amount:
self.total_reserved_points += rounded_amount
return ReservedPoints(identifier, rounded_amount)

View file

@ -209,7 +209,6 @@ class ClientProtocol(Protocol):
log.debug("Asking for another request.")
from twisted.internet import reactor
reactor.callLater(0, self._ask_for_request)
#self._ask_for_request()
else:
log.debug("Not asking for another request.")
self.transport.loseConnection()
@ -230,8 +229,6 @@ class ClientProtocol(Protocol):
# TODO: protocol had such a mechanism.
log.debug("Closing the connection to %s because the download of blob %s was canceled",
str(self.peer), str(self._blob_download_request.blob))
#self.transport.loseConnection()
#return True
return err
######### IRateLimited #########

View file

@ -63,7 +63,6 @@ class ServerRequestHandler(object):
#IConsumer stuff
def registerProducer(self, producer, streaming):
#assert self.file_sender == producer
self.producer = producer
assert streaming is False
producer.resumeProducing()

View file

@ -48,7 +48,6 @@ class StreamBlobDecryptor(object):
self.buff += data
self.len_read += len(data)
write_bytes()
#write_func(remove_padding(self.cipher.decrypt(self.buff)))
d = self.blob.read(decrypt_bytes)
d.addCallback(lambda _: finish_decrypt())

View file

@ -1,307 +0,0 @@
# import sqlite3
# import unqlite
# import leveldb
# import shutil
# import os
# import logging
# import json
#
#
# log = logging.getLogger(__name__)
#
#
# known_dbs = ['lbryfile_desc.db', 'lbryfiles.db', 'valuable_blobs.db', 'blobs.db',
# 'lbryfile_blob.db', 'lbryfile_info.db', 'settings.db', 'blind_settings.db',
# 'blind_peers.db', 'blind_info.db', 'lbryfile_info.db', 'lbryfile_manager.db',
# 'live_stream.db', 'stream_info.db', 'stream_blob.db', 'stream_desc.db']
#
#
# def do_move(from_dir, to_dir):
# for known_db in known_dbs:
# known_db_path = os.path.join(from_dir, known_db)
# if os.path.exists(known_db_path):
# log.debug("Moving %s to %s",
# os.path.abspath(known_db_path),
# os.path.abspath(os.path.join(to_dir, known_db)))
# shutil.move(known_db_path, os.path.join(to_dir, known_db))
# else:
# log.debug("Did not find %s", os.path.abspath(known_db_path))
#
#
# def do_migration(db_dir):
# old_dir = os.path.join(db_dir, "_0_to_1_old")
# new_dir = os.path.join(db_dir, "_0_to_1_new")
# try:
# log.info("Moving dbs from the real directory to %s", os.path.abspath(old_dir))
# os.makedirs(old_dir)
# do_move(db_dir, old_dir)
# except:
# log.error("An error occurred moving the old db files.")
# raise
# try:
# log.info("Creating the new directory in %s", os.path.abspath(new_dir))
# os.makedirs(new_dir)
#
# except:
# log.error("An error occurred creating the new directory.")
# raise
# try:
# log.info("Doing the migration")
# migrate_blob_db(old_dir, new_dir)
# migrate_lbryfile_db(old_dir, new_dir)
# migrate_livestream_db(old_dir, new_dir)
# migrate_ptc_db(old_dir, new_dir)
# migrate_lbryfile_manager_db(old_dir, new_dir)
# migrate_settings_db(old_dir, new_dir)
# migrate_repeater_db(old_dir, new_dir)
# log.info("Migration succeeded")
# except:
# log.error("An error occurred during the migration. Restoring.")
# do_move(old_dir, db_dir)
# raise
# try:
# log.info("Moving dbs in the new directory to the real directory")
# do_move(new_dir, db_dir)
# db_revision = open(os.path.join(db_dir, 'db_revision'), mode='w+')
# db_revision.write("1")
# db_revision.close()
# os.rmdir(new_dir)
# except:
# log.error("An error occurred moving the new db files.")
# raise
# return old_dir
#
#
# def migrate_blob_db(old_db_dir, new_db_dir):
# old_blob_db_path = os.path.join(old_db_dir, "blobs.db")
# if not os.path.exists(old_blob_db_path):
# return True
#
# old_db = leveldb.LevelDB(old_blob_db_path)
# new_db_conn = sqlite3.connect(os.path.join(new_db_dir, "blobs.db"))
# c = new_db_conn.cursor()
# c.execute("create table if not exists blobs (" +
# " blob_hash text primary key, " +
# " blob_length integer, " +
# " last_verified_time real, " +
# " next_announce_time real"
# ")")
# new_db_conn.commit()
# c = new_db_conn.cursor()
# for blob_hash, blob_info in old_db.RangeIter():
# blob_length, verified_time, announce_time = json.loads(blob_info)
# c.execute("insert into blobs values (?, ?, ?, ?)",
# (blob_hash, blob_length, verified_time, announce_time))
# new_db_conn.commit()
# new_db_conn.close()
#
#
# def migrate_lbryfile_db(old_db_dir, new_db_dir):
# old_lbryfile_db_path = os.path.join(old_db_dir, "lbryfiles.db")
# if not os.path.exists(old_lbryfile_db_path):
# return True
#
# stream_info_db = leveldb.LevelDB(os.path.join(old_db_dir, "lbryfile_info.db"))
# stream_blob_db = leveldb.LevelDB(os.path.join(old_db_dir, "lbryfile_blob.db"))
# stream_desc_db = leveldb.LevelDB(os.path.join(old_db_dir, "lbryfile_desc.db"))
#
# db_conn = sqlite3.connect(os.path.join(new_db_dir, "lbryfile_info.db"))
# c = db_conn.cursor()
# c.execute("create table if not exists lbry_files (" +
# " stream_hash text primary key, " +
# " key text, " +
# " stream_name text, " +
# " suggested_file_name text" +
# ")")
# c.execute("create table if not exists lbry_file_blobs (" +
# " blob_hash text, " +
# " stream_hash text, " +
# " position integer, " +
# " iv text, " +
# " length integer, " +
# " foreign key(stream_hash) references lbry_files(stream_hash)" +
# ")")
# c.execute("create table if not exists lbry_file_descriptors (" +
# " sd_blob_hash TEXT PRIMARY KEY, " +
# " stream_hash TEXT, " +
# " foreign key(stream_hash) references lbry_files(stream_hash)" +
# ")")
# db_conn.commit()
# c = db_conn.cursor()
# for stream_hash, stream_info in stream_info_db.RangeIter():
# key, name, suggested_file_name = json.loads(stream_info)
# c.execute("insert into lbry_files values (?, ?, ?, ?)",
# (stream_hash, key, name, suggested_file_name))
# db_conn.commit()
# c = db_conn.cursor()
# for blob_hash_stream_hash, blob_info in stream_blob_db.RangeIter():
# b_h, s_h = json.loads(blob_hash_stream_hash)
# position, iv, length = json.loads(blob_info)
# c.execute("insert into lbry_file_blobs values (?, ?, ?, ?, ?)",
# (b_h, s_h, position, iv, length))
# db_conn.commit()
# c = db_conn.cursor()
# for sd_blob_hash, stream_hash in stream_desc_db.RangeIter():
# c.execute("insert into lbry_file_descriptors values (?, ?)",
# (sd_blob_hash, stream_hash))
# db_conn.commit()
# db_conn.close()
#
#
# def migrate_livestream_db(old_db_dir, new_db_dir):
# old_db_path = os.path.join(old_db_dir, "stream_info.db")
# if not os.path.exists(old_db_path):
# return True
# stream_info_db = leveldb.LevelDB(os.path.join(old_db_dir, "stream_info.db"))
# stream_blob_db = leveldb.LevelDB(os.path.join(old_db_dir, "stream_blob.db"))
# stream_desc_db = leveldb.LevelDB(os.path.join(old_db_dir, "stream_desc.db"))
#
# db_conn = sqlite3.connect(os.path.join(new_db_dir, "live_stream.db"))
#
# c = db_conn.cursor()
#
# c.execute("create table if not exists live_streams (" +
# " stream_hash text primary key, " +
# " public_key text, " +
# " key text, " +
# " stream_name text, " +
# " next_announce_time real" +
# ")")
# c.execute("create table if not exists live_stream_blobs (" +
# " blob_hash text, " +
# " stream_hash text, " +
# " position integer, " +
# " revision integer, " +
# " iv text, " +
# " length integer, " +
# " signature text, " +
# " foreign key(stream_hash) references live_streams(stream_hash)" +
# ")")
# c.execute("create table if not exists live_stream_descriptors (" +
# " sd_blob_hash TEXT PRIMARY KEY, " +
# " stream_hash TEXT, " +
# " foreign key(stream_hash) references live_streams(stream_hash)" +
# ")")
#
# db_conn.commit()
#
# c = db_conn.cursor()
# for stream_hash, stream_info in stream_info_db.RangeIter():
# public_key, key, name, next_announce_time = json.loads(stream_info)
# c.execute("insert into live_streams values (?, ?, ?, ?, ?)",
# (stream_hash, public_key, key, name, next_announce_time))
# db_conn.commit()
# c = db_conn.cursor()
# for blob_hash_stream_hash, blob_info in stream_blob_db.RangeIter():
# b_h, s_h = json.loads(blob_hash_stream_hash)
# position, revision, iv, length, signature = json.loads(blob_info)
# c.execute("insert into live_stream_blobs values (?, ?, ?, ?, ?, ?, ?)",
# (b_h, s_h, position, revision, iv, length, signature))
# db_conn.commit()
# c = db_conn.cursor()
# for sd_blob_hash, stream_hash in stream_desc_db.RangeIter():
# c.execute("insert into live_stream_descriptors values (?, ?)",
# (sd_blob_hash, stream_hash))
# db_conn.commit()
# db_conn.close()
#
#
# def migrate_ptc_db(old_db_dir, new_db_dir):
# old_db_path = os.path.join(old_db_dir, "ptcwallet.db")
# if not os.path.exists(old_db_path):
# return True
# old_db = leveldb.LevelDB(old_db_path)
# try:
# p_key = old_db.Get("private_key")
# new_db = unqlite.UnQLite(os.path.join(new_db_dir, "ptcwallet.db"))
# new_db['private_key'] = p_key
# except KeyError:
# pass
#
#
# def migrate_lbryfile_manager_db(old_db_dir, new_db_dir):
# old_db_path = os.path.join(old_db_dir, "lbryfiles.db")
# if not os.path.exists(old_db_path):
# return True
# old_db = leveldb.LevelDB(old_db_path)
# new_db = sqlite3.connect(os.path.join(new_db_dir, "lbryfile_info.db"))
# c = new_db.cursor()
# c.execute("create table if not exists lbry_file_options (" +
# " blob_data_rate real, " +
# " status text," +
# " stream_hash text,"
# " foreign key(stream_hash) references lbry_files(stream_hash)" +
# ")")
# new_db.commit()
# FILE_STATUS = "t"
# FILE_OPTIONS = "o"
# c = new_db.cursor()
# for k, v in old_db.RangeIter():
# key_type, stream_hash = json.loads(k)
# if key_type == FILE_STATUS:
# try:
# rate = json.loads(old_db.Get(json.dumps((FILE_OPTIONS, stream_hash))))[0]
# except KeyError:
# rate = None
# c.execute("insert into lbry_file_options values (?, ?, ?)",
# (rate, v, stream_hash))
# new_db.commit()
# new_db.close()
#
#
# def migrate_settings_db(old_db_dir, new_db_dir):
# old_settings_db_path = os.path.join(old_db_dir, "settings.db")
# if not os.path.exists(old_settings_db_path):
# return True
# old_db = leveldb.LevelDB(old_settings_db_path)
# new_db = unqlite.UnQLite(os.path.join(new_db_dir, "settings.db"))
# for k, v in old_db.RangeIter():
# new_db[k] = v
#
#
# def migrate_repeater_db(old_db_dir, new_db_dir):
# old_repeater_db_path = os.path.join(old_db_dir, "valuable_blobs.db")
# if not os.path.exists(old_repeater_db_path):
# return True
# old_db = leveldb.LevelDB(old_repeater_db_path)
# info_db = sqlite3.connect(os.path.join(new_db_dir, "blind_info.db"))
# peer_db = sqlite3.connect(os.path.join(new_db_dir, "blind_peers.db"))
# unql_db = unqlite.UnQLite(os.path.join(new_db_dir, "blind_settings.db"))
# BLOB_INFO_TYPE = 'b'
# SETTING_TYPE = 's'
# PEER_TYPE = 'p'
# info_c = info_db.cursor()
# info_c.execute("create table if not exists valuable_blobs (" +
# " blob_hash text primary key, " +
# " blob_length integer, " +
# " reference text, " +
# " peer_host text, " +
# " peer_port integer, " +
# " peer_score text" +
# ")")
# info_db.commit()
# peer_c = peer_db.cursor()
# peer_c.execute("create table if not exists approved_peers (" +
# " ip_address text, " +
# " port integer" +
# ")")
# peer_db.commit()
# info_c = info_db.cursor()
# peer_c = peer_db.cursor()
# for k, v in old_db.RangeIter():
# key_type, key_rest = json.loads(k)
# if key_type == PEER_TYPE:
# host, port = key_rest
# peer_c.execute("insert into approved_peers values (?, ?)",
# (host, port))
# elif key_type == SETTING_TYPE:
# unql_db[key_rest] = v
# elif key_type == BLOB_INFO_TYPE:
# blob_hash = key_rest
# length, reference, peer_host, peer_port, peer_score = json.loads(v)
# info_c.execute("insert into valuable_blobs values (?, ?, ?, ?, ?, ?)",
# (blob_hash, length, reference, peer_host, peer_port, peer_score))
# info_db.commit()
# peer_db.commit()
# info_db.close()
# peer_db.close()

View file

@ -85,9 +85,6 @@ class Node(object):
self.next_refresh_call = None
self.next_change_token_call = None
# Create k-buckets (for storing contacts)
#self._buckets = []
#for i in range(160):
# self._buckets.append(kbucket.KBucket())
if routingTableClass == None:
self._routingTable = routingtable.OptimizedTreeRoutingTable(self.id)
else:
@ -118,7 +115,6 @@ class Node(object):
self.hash_watcher = HashWatcher()
def __del__(self):
#self._persistState()
if self._listeningPort is not None:
self._listeningPort.stopListening()
@ -165,16 +161,6 @@ class Node(object):
# Initiate the Kademlia joining sequence - perform a search for this node's own ID
self._joinDeferred = self._iterativeFind(self.id, bootstrapContacts)
# #TODO: Refresh all k-buckets further away than this node's closest neighbour
# def getBucketAfterNeighbour(*args):
# for i in range(160):
# if len(self._buckets[i]) > 0:
# return i+1
# return 160
# df.addCallback(getBucketAfterNeighbour)
# df.addCallback(self._refreshKBuckets)
#protocol.reactor.callLater(10, self.printContacts)
#self._joinDeferred.addCallback(self._persistState)
#self._joinDeferred.addCallback(self.printContacts)
# Start refreshing k-buckets periodically, if necessary
self.next_refresh_call = twisted.internet.reactor.callLater(
constants.checkRefreshInterval, self._refreshNode) #IGNORE:E1101
@ -187,7 +173,6 @@ class Node(object):
for contact in self._routingTable._buckets[i]._contacts:
print contact
print '=================================='
#twisted.internet.reactor.callLater(10, self.printContacts)
def getApproximateTotalDHTNodes(self):
# get the deepest bucket and the number of contacts in that bucket and multiply it
@ -218,7 +203,6 @@ class Node(object):
if type(result) == dict:
if blob_hash in result:
for peer in result[blob_hash]:
#print peer
if self.lbryid != peer[6:]:
host = ".".join([str(ord(d)) for d in peer[:4]])
if host == "127.0.0.1":
@ -230,8 +214,6 @@ class Node(object):
return expanded_peers
def find_failed(err):
#print "An exception occurred in the DHT"
#print err.getErrorMessage()
return []
d = self.iterativeFindValue(blob_hash)
@ -268,16 +250,12 @@ class Node(object):
result = responseMsg.response
if 'token' in result:
#print "Printing result...", result
value['token'] = result['token']
d = n.store(blob_hash, value, self.id, 0)
d.addCallback(log_success)
d.addErrback(log_error, n)
else:
d = defer.succeed(False)
#else:
# print "result:", result
# print "No token where it should be"
return d
def requestPeers(contacts):
@ -289,7 +267,6 @@ class Node(object):
contacts.pop()
self.store(blob_hash, value, self_store=True, originalPublisherID=self.id)
elif self.externalIP is not None:
#print "attempting to self-store"
self.store(blob_hash, value, self_store=True, originalPublisherID=self.id)
ds = []
for contact in contacts:
@ -323,7 +300,6 @@ class Node(object):
h = hashlib.new('sha384')
h.update(self.old_token_secret + compact_ip)
if not token == h.digest():
#print 'invalid token found'
return False
return True
@ -368,24 +344,17 @@ class Node(object):
def checkResult(result):
if type(result) == dict:
# We have found the value; now see who was the closest contact without it...
# if 'closestNodeNoValue' in result:
# ...and store the key/value pair
# contact = result['closestNodeNoValue']
# contact.store(key, result[key])
outerDf.callback(result)
else:
# The value wasn't found, but a list of contacts was returned
# Now, see if we have the value (it might seem wasteful to search on the network
# first, but it ensures that all values are properly propagated through the
# network
#if key in self._dataStore:
if self._dataStore.hasPeersForBlob(key):
# Ok, we have the value locally, so use that
peers = self._dataStore.getPeersForBlob(key)
# Send this value to the closest node without it
#if len(result) > 0:
# contact = result[0]
# contact.store(key, value)
outerDf.callback({key: peers, "from_peer": 'self'})
else:
# Ok, value does not exist in DHT at all
@ -484,19 +453,13 @@ class Node(object):
compact_ip = contact.compact_ip()
elif '_rpcNodeContact' in kwargs:
contact = kwargs['_rpcNodeContact']
#print contact.address
compact_ip = contact.compact_ip()
#print compact_ip
else:
return 'Not OK'
#raise TypeError, 'No contact info available'
if ((self_store is False) and
(not 'token' in value or not self.verify_token(value['token'], compact_ip))):
#if not 'token' in value:
# print "Couldn't find token in value"
#elif not self.verify_token(value['token'], contact.compact_ip()):
# print "Token is invalid"
raise ValueError('Invalid or missing token')
if 'port' in value:
@ -518,11 +481,8 @@ class Node(object):
now = int(time.time())
originallyPublished = now# - age
#print compact_address
self._dataStore.addPeerToBlob(
key, compact_address, now, originallyPublished, originalPublisherID)
#if self_store is True:
# print "looks like it was successful maybe"
return 'OK'
@rpcmethod
@ -717,7 +677,6 @@ class Node(object):
# Force the iteration
pendingIterationCalls[0].cancel()
del pendingIterationCalls[0]
#print 'forcing iteration ================='
searchIteration()
def log_error(err):
@ -725,7 +684,6 @@ class Node(object):
# Send parallel, asynchronous FIND_NODE RPCs to the shortlist of contacts
def searchIteration():
#print '==> searchiteration'
slowNodeCount[0] = len(activeProbes)
# TODO: move sort_key to be a method on the class
def sort_key(firstContact, secondContact, targetKey=key):
@ -797,7 +755,6 @@ class Node(object):
# Ensure that the closest contacts are taken from the updated shortList
searchIteration()
else:
#print '++++++++++++++ DONE (logically) +++++++++++++\n\n'
# If no probes were sent, there will not be any improvement, so we're done
outerDf.callback(activeContacts)
@ -809,9 +766,7 @@ class Node(object):
def _refreshNode(self):
""" Periodically called to perform k-bucket refreshes and data
replication/republishing as necessary """
#print 'refreshNode called'
df = self._refreshRoutingTable()
#df.addCallback(self._republishData)
df.addCallback(self._removeExpiredPeers)
df.addCallback(self._scheduleNextNodeRefresh)
@ -830,13 +785,8 @@ class Node(object):
searchForNextNodeID()
return outerDf
#def _republishData(self, *args):
# #print '---republishData() called'
# df = twisted.internet.threads.deferToThread(self._threadedRepublishData)
# return df
def _scheduleNextNodeRefresh(self, *args):
#print '==== sheduling next refresh'
self.next_refresh_call = twisted.internet.reactor.callLater(
constants.checkRefreshInterval, self._refreshNode)

View file

@ -208,7 +208,6 @@ class KademliaProtocol(protocol.DatagramProtocol):
seqNumber = 0
startPos = 0
while seqNumber < totalPackets:
#reactor.iterate() #IGNORE:E1101
packetData = data[startPos:startPos+self.msgSizeLimit]
encSeqNumber = chr(seqNumber >> 8) + chr(seqNumber & 0xff)
txData = '\x00%s%s%s\x00%s' % (encTotalPackets, encSeqNumber, rpcID, packetData)
@ -270,13 +269,8 @@ class KademliaProtocol(protocol.DatagramProtocol):
if callable(func) and hasattr(func, 'rpcmethod'):
# Call the exposed Node method and return the result to the deferred callback chain
try:
##try:
## # Try to pass the sender's node id to the function...
kwargs = {'_rpcNodeID': senderContact.id, '_rpcNodeContact': senderContact}
result = func(*args, **kwargs)
##except TypeError:
## # ...or simply call it if that fails
## result = func(*args)
except Exception, e:
df.errback(failure.Failure(e))
else:

View file

@ -208,9 +208,6 @@ class TreeRoutingTable(RoutingTable):
node is returning all of the contacts that it knows of.
@rtype: list
"""
#if key == self.id:
# bucketIndex = 0 #TODO: maybe not allow this to continue?
#else:
bucketIndex = self._kbucketIndex(key)
closestNodes = self._buckets[bucketIndex].getContacts(constants.k, _rpcNodeID)
# This method must return k contacts (even if we have the node
@ -290,7 +287,6 @@ class TreeRoutingTable(RoutingTable):
try:
self._buckets[bucketIndex].removeContact(contactID)
except ValueError:
#print 'removeContact(): Contact not in routing table'
return
def touchKBucket(self, key):
@ -427,7 +423,6 @@ class OptimizedTreeRoutingTable(TreeRoutingTable):
try:
contact = self._buckets[bucketIndex].getContact(contactID)
except ValueError:
#print 'removeContact(): Contact not in routing table'
return
contact.failedRPCs += 1
if contact.failedRPCs >= 5:

View file

@ -25,7 +25,6 @@
import sys, hashlib, random
import twisted.internet.reactor
from lbrynet.dht.node import Node
#from entangled.kademlia.datastore import SQLiteDataStore
# The Entangled DHT node; instantiated in the main() method
node = None
@ -77,7 +76,6 @@ def getValue():
binascii.unhexlify("f7d9dc4de674eaa2c5a022eb95bc0d33ec2e75c6"))
deferredResult = node.iterativeFindValue(
binascii.unhexlify("f7d9dc4de674eaa2c5a022eb95bc0d33ec2e75c6"))
#deferredResult = node.iterativeFindValue(KEY)
# Add a callback to this result; this will be called as soon as the operation has completed
deferredResult.addCallback(getValueCallback)
# As before, add the generic error callback
@ -91,19 +89,8 @@ def getValueCallback(result):
# contacts would be returned instead")
print "Got the value"
print result
#if type(result) == dict:
# for v in result[binascii.unhexlify("5292fa9c426621f02419f5050900392bdff5036c")]:
# print "v:", v
# print "v[6:", v[6:]
# print "lbryid:",lbryid
# print "lbryid == v[6:]:", lbryid == v[6:]
# print 'Value successfully retrieved: %s' % result[KEY]
#else:
# print 'Value not found'
# Either way, schedule a "delete" operation for the key
#print 'Scheduling removal in 2.5 seconds...'
#twisted.internet.reactor.callLater(2.5, deleteValue)
print 'Scheduling shutdown in 2.5 seconds...'
twisted.internet.reactor.callLater(2.5, stop)
@ -151,9 +138,6 @@ if __name__ == '__main__':
print 'Run this script without any arguments for info.\n'
# Set up SQLite-based data store (you could use an in-memory store instead, for example)
#if os.path.isfile('/tmp/dbFile%s.db' % sys.argv[1]):
# os.remove('/tmp/dbFile%s.db' % sys.argv[1])
#dataStore = SQLiteDataStore(dbFile = '/tmp/dbFile%s.db' % sys.argv[1])
#
# Create the Entangled node. It extends the functionality of a
# basic Kademlia node (but is fully backwards-compatible with a
@ -162,14 +146,12 @@ if __name__ == '__main__':
# If you wish to have a pure Kademlia network, use the
# entangled.kademlia.node.Node class instead
print 'Creating Node...'
#node = EntangledNode( udpPort=int(sys.argv[1]), dataStore=dataStore )
node = Node(udpPort=int(sys.argv[1]), lbryid=lbryid)
# Schedule the node to join the Kademlia/Entangled DHT
node.joinNetwork(knownNodes)
# Schedule the "storeValue() call to be invoked after 2.5 seconds,
#using KEY and VALUE as arguments
#twisted.internet.reactor.callLater(2.5, storeValue, KEY, VALUE)
twisted.internet.reactor.callLater(2.5, getValue)
# Start the Twisted reactor - this fires up all networking, and
# allows the scheduled join operation to take place

View file

@ -152,7 +152,6 @@ class StdinStreamProducer(object):
self.finished_deferred = defer.Deferred()
self.consumer.registerProducer(self, True)
#self.reader = process.ProcessReader(reactor, self, 'read', 0)
self.resumeProducing()
return self.finished_deferred

View file

@ -46,7 +46,6 @@ class LiveStreamDownloader(_LiveStreamDownloader):
_LiveStreamDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, payment_rate_manager, wallet, upload_allowed)
#self.writer = process.ProcessWriter(reactor, self, 'write', 1)
def _get_metadata_handler(self, download_manager):
return LiveStreamMetadataHandler(self.stream_hash, self.stream_info_manager,
@ -61,7 +60,6 @@ class LiveStreamDownloader(_LiveStreamDownloader):
def _get_write_func(self):
def write_func(data):
if self.stopped is False:
#self.writer.write(data)
pass
return write_func

View file

@ -76,7 +76,6 @@ class DaemonRequest(server.Request):
try:
self.content.seek(0, 0)
args.update(self.parse_multipart(self.content, pdict))
#args.update(cgi.parse_multipart(self.content, pdict))
except KeyError as e:
if e.args[0] == b'content-disposition':

View file

@ -285,7 +285,6 @@ class AuthJSONRPCServer(AuthorizedBase):
assert api_key.compare_hmac(to_auth, token), InvalidAuthenticationToken
def _update_session_secret(self, session_id):
# log.info("Generating new token for next request")
self.sessions.update({session_id: APIKey.new(name=session_id)})
def _get_jsonrpc_version(self, version=None, id=None):

View file

@ -30,7 +30,6 @@ class ReflectorServer(Protocol):
def dataReceived(self, data):
if self.receiving_blob:
# log.debug('Writing data to blob')
self.blob_write(data)
else:
log.debug('Not yet recieving blob, data needs further processing')

View file

@ -165,7 +165,6 @@ class SysTrayIcon(object):
def show_menu(self):
menu = win32gui.CreatePopupMenu()
self.create_menu(menu, self.menu_options)
# win32gui.SetMenuDefaultItem(menu, 1000, 0)
pos = win32gui.GetCursorPos()
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp

View file

@ -51,14 +51,6 @@ requires = [
]
console_scripts = [
# 'lbrynet-stdin-uploader = lbrynet.lbrynet_console.LBRYStdinUploader:launch_stdin_uploader',
# 'lbrynet-stdout-downloader = lbrynet.lbrynet_console.LBRYStdoutDownloader:launch_stdout_downloader',
# 'lbrynet-create-network = lbrynet.create_network:main',
# 'lbrynet-launch-node = lbrynet.dht.node:main',
# 'lbrynet-launch-rpc-node = lbrynet.rpc_node:main',
# 'lbrynet-rpc-node-cli = lbrynet.node_rpc_cli:main',
# 'lbrynet-lookup-hosts-for-hash = lbrynet.dht_scripts:get_hosts_for_hash_in_dht',
# 'lbrynet-announce_hash_to_dht = lbrynet.dht_scripts:announce_hash_to_dht',
'lbrynet-daemon = lbrynet.lbrynet_daemon.DaemonControl:start',
'stop-lbrynet-daemon = lbrynet.lbrynet_daemon.DaemonControl:stop',
'lbrynet-cli = lbrynet.lbrynet_daemon.DaemonCLI:main'

View file

@ -54,12 +54,13 @@ class NodeDataTest(unittest.TestCase):
h.update(str(i))
self.cases.append((h.digest(), 5000+2*i))
self.cases.append((h.digest(), 5001+2*i))
<<<<<<< Updated upstream
#(('a', 'hello there\nthis is a test'),
# ('b', unicode('jasdklfjklsdj;f2352352ljklzsdlkjkasf\ndsjklafsd')),
# ('e', 123),
# ('f', [('this', 'is', 1), {'complex': 'data entry'}]),
# ('aMuchLongerKeyThanAnyOfThePreviousOnes', 'some data'))
=======
>>>>>>> Stashed changes
def testStore(self):
def check_val_in_result(r, peer_info):
@ -105,31 +106,17 @@ class NodeContactTest(unittest.TestCase):
self.failIf(contact in closestNodes, 'Node added itself as a contact')
#class NodeLookupTest(unittest.TestCase):
<<<<<<< Updated upstream
# """ Test case for the Node class's iterative node lookup algorithm """
# def setUp(self):
# import entangled.kademlia.contact
# self.node = entangled.kademlia.node.Node()
# self.remoteNodes = []
# for i in range(10):
# remoteNode = entangled.kademlia.node.Node()
# remoteContact = entangled.kademlia.contact.Contact(remoteNode.id, '127.0.0.1', 91827+i, self.node._protocol)
# self.remoteNodes.append(remoteNode)
# self.node.addContact(remoteContact)
# def testIterativeFindNode(self):
# """ Ugly brute-force test to see if the iterative node lookup algorithm runs without failing """
# import entangled.kademlia.protocol
# entangled.kademlia.protocol.reactor.listenUDP(91826, self.node._protocol)
# for i in range(10):
# entangled.kademlia.protocol.reactor.listenUDP(91827+i, self.remoteNodes[i]._protocol)
# df = self.node.iterativeFindNode(self.node.id)
# df.addBoth(lambda _: entangled.kademlia.protocol.reactor.stop())
# entangled.kademlia.protocol.reactor.run()
=======
>>>>>>> Stashed changes
""" Some scaffolding for the NodeLookupTest class. Allows isolated node testing by simulating remote node responses"""
"""Some scaffolding for the NodeLookupTest class. Allows isolated
node testing by simulating remote node responses"""
from twisted.internet import protocol, defer, selectreactor
from lbrynet.dht.msgtypes import ResponseMessage
@ -149,22 +136,17 @@ class FakeRPCProtocol(protocol.DatagramProtocol):
""" Fake RPC protocol; allows entangled.kademlia.contact.Contact objects to "send" RPCs """
def sendRPC(self, contact, method, args, rawResponse=False):
#print method + " " + str(args)
if method == "findNode":
# get the specific contacts closest contacts
closestContacts = []
#print "contact" + contact.id
for contactTuple in self.network:
#print contactTuple[0].id
if contact == contactTuple[0]:
# get the list of closest contacts for this contact
closestContactsList = contactTuple[1]
#print "contact" + contact.id
# Pack the closest contacts into a ResponseMessage
for closeContact in closestContactsList:
#print closeContact.id
closestContacts.append((closeContact.id, closeContact.address, closeContact.port))
message = ResponseMessage("rpcId", contact.id, closestContacts)
@ -221,9 +203,11 @@ class NodeLookupTest(unittest.TestCase):
self.updPort = 81173
<<<<<<< Updated upstream
# create a dummy reactor
#self._protocol.reactor.listenUDP(self.updPort, self._protocol)
=======
>>>>>>> Stashed changes
self.contactsAmount = 80
# set the node ID manually for testing
self.node.id = '12345678901234567800'
@ -233,7 +217,6 @@ class NodeLookupTest(unittest.TestCase):
# create 160 bit node ID's for test purposes
self.testNodeIDs = []
#idNum = long(self.node.id.encode('hex'), 16)
idNum = int(self.node.id)
for i in range(self.contactsAmount):
# create the testNodeIDs in ascending order, away from the actual node ID, with regards to the distance metric
@ -284,7 +267,6 @@ class NodeLookupTest(unittest.TestCase):
for item in self.contacts[0:6]:
expectedResult.append(item.id)
#print item.id
# Get the result from the deferred
activeContacts = df.result
@ -298,151 +280,7 @@ class NodeLookupTest(unittest.TestCase):
# Check that the received active contacts are the same as the input contacts
self.failUnlessEqual(activeContacts, expectedResult, \
"Active should only contain the closest possible contacts which were used as input for the boostrap")
# def testFindingCloserNodes(self):
# """ Test discovery of closer contacts"""
#
# # Use input contacts that have knowledge of closer contacts,
# df = self.node._iterativeFind(self.node.id, self.contacts[50:53])
# #set the expected result
# expectedResult = []
# #print "############ Expected Active contacts #################"
# for item in self.contacts[0:9]:
# expectedResult.append(item.id)
# #print item.id
# #print "#######################################################"
#
# # Get the result from the deferred
# activeContacts = df.result
#
# #print "!!!!!!!!!!! Receieved Active contacts !!!!!!!!!!!!!!!"
# #for item in activeContacts:
# # print item.id
# #print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
#
# # Check the length of the active contacts
# self.failUnlessEqual(activeContacts.__len__(), expectedResult.__len__(), \
# "Length of received active contacts not as expected, should be %d" %expectedResult.__len__())
#
#
# # Check that the received active contacts are now closer to this node
# self.failUnlessEqual(activeContacts, expectedResult, \
# "Active contacts should now only contain the closest possible contacts")
# def testIterativeStore(self):
# """ test storing values """
#
# # create the network of contacts in format: (contact, closest contacts)
# contactNetwork = ((self.contacts[0], self.contacts[0:8]),
# (self.contacts[1], self.contacts[0:8]),
# (self.contacts[2], self.contacts[0:8]),
# (self.contacts[3], self.contacts[0:8]),
# (self.contacts[4], self.contacts[0:8]),
# (self.contacts[5], self.contacts[0:8]),
# (self.contacts[6], self.contacts[0:8]),
# (self.contacts[7], self.contacts[0:8]),
# (self.contacts[8], self.contacts[0:8]),
# (self.contacts[40], self.contacts[41:48]),
# (self.contacts[41], self.contacts[41:48]),
# (self.contacts[42], self.contacts[41:48]),
# (self.contacts[43], self.contacts[41:48]),
# (self.contacts[44], self.contacts[41:48]),
# (self.contacts[45], self.contacts[41:48]),
# (self.contacts[46], self.contacts[41:48]),
# (self.contacts[47], self.contacts[41:48]),
# (self.contacts[48], self.contacts[41:48]))
# contacts_with_datastores = []
#
# for contact_tuple in contactNetwork:
# contacts_with_datastores.append((contact_tuple[0], contact_tuple[1], lbrynet.dht.datastore.DictDataStore()))
#
# self._protocol.createNetwork(contacts_with_datastores)
#
#
# #self._protocol.createNetwork(contactNetwork)
#
#
# # Test storing a value that has an hash id close to the known contacts
# # The value should only be stored at those nodes
# value = 'value'
# valueID = self.contacts[40].id
#
# # Manually populate the routing table with contacts that have ID's close to the valueID
# for contact in self.contacts[40:48]:
# self.node.addContact(contact)
#
# # Manually populate the routing table with contacts that have ID's far away from the valueID
# for contact in self.contacts[0:8]:
# self.node.addContact(contact)
#
# # Store the value
# df = self.node.announceHaveBlob(valueID, value)
#
# storageNodes = df.result
#
# storageNodeIDs = []
# for item in storageNodes:
# storageNodeIDs.append(item.id)
# storageNodeIDs.sort()
# #print storageNodeIDs
#
# expectedIDs = []
# for item in self.contacts[40:43]:
# expectedIDs.append(item.id)
# #print expectedIDs
#
# #print '#### storage nodes ####'
# #for node in storageNodes:
# # print node.id
#
#
# # check that the value has been stored at nodes with ID's close to the valueID
# self.failUnlessEqual(storageNodeIDs, expectedIDs, \
# "Value not stored at nodes with ID's close to the valueID")
#
# def testFindValue(self):
# # create test values using the contact ID as the key
# testValues = ({self.contacts[0].id: "some test data"},
# {self.contacts[1].id: "some more test data"},
# {self.contacts[8].id: "and more data"}
# )
#
#
# # create the network of contacts in format: (contact, closest contacts, values)
# contactNetwork = ((self.contacts[0], self.contacts[0:6], testValues[0]),
# (self.contacts[1], self.contacts[0:6], testValues[1]),
# (self.contacts[2], self.contacts[0:6], {'2':'2'}),
# (self.contacts[3], self.contacts[0:6], {'4':'5'}),
# (self.contacts[4], self.contacts[0:6], testValues[2]),
# (self.contacts[5], self.contacts[0:6], {'2':'2'}),
# (self.contacts[6], self.contacts[0:6], {'2':'2'}))
#
# self._protocol.createNetwork(contactNetwork)
#
# # Initialise the routing table with some contacts
# for contact in self.contacts[0:4]:
# self.node.addContact(contact)
#
# # Initialise the node with some known contacts
# #self.node._iterativeFind(self.node.id, self.contacts[0:3])
#
# df = self.node.iterativeFindValue(testValues[1].keys()[0])
#
# resultDict = df.result
# keys = resultDict.keys()
#
# for key in keys:
# if key == 'closestNodeNoValue':
# print "closest contact without data " + " " + resultDict.get(key).id
# else:
# print "data key :" + key + "; " + "data: " + resultDict.get(key)
def suite():
suite = unittest.TestSuite()
@ -452,6 +290,7 @@ def suite():
suite.addTest(unittest.makeSuite(NodeLookupTest))
return suite
if __name__ == '__main__':
# If this module is executed from the commandline, run all its tests
unittest.TextTestRunner().run(suite())

View file

@ -68,16 +68,12 @@ class ClientDatagramProtocol(lbrynet.dht.protocol.KademliaProtocol):
lbrynet.dht.protocol.KademliaProtocol.__init__(self, None)
def startProtocol(self):
#self.transport.connect(self.destination[0], self.destination[1])
self.sendDatagram()
def sendDatagram(self):
if len(self.data):
self._send(self.data, self.msgID, self.destination)
# def datagramReceived(self, datagram, host):
# print 'Datagram received: ', repr(datagram)
# self.sendDatagram()
@ -193,44 +189,6 @@ class KademliaProtocolTest(unittest.TestCase):
# The list of sent RPC messages should be empty at this stage
self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!')
# def testDatagramLargeMessageReconstruction(self):
# """ Tests if a large amount of data can be successfully re-constructed from multiple UDP datagrams """
# remoteContact = lbrynet.dht.contact.Contact('node2', '127.0.0.1', 9182, self.protocol)
# self.node.addContact(remoteContact)
# self.error = None
# #responseData = 8143 * '0' # Threshold for a single packet transmission
# responseData = 300000 * '0'
# def handleError(f):
# if f.check((lbrynet.dht.protocol.TimeoutError)):
# self.error = 'RPC from the following contact timed out: %s' % f.getErrorMessage()
# else:
# self.error = 'An RPC error occurred: %s' % f.getErrorMessage()
# def handleResult(result):
# if result != responseData:
# self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' % (responseData, result)
# # Publish the "local" node on the network
# lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol)
# # ...and make it think it is waiting for a result from an RPC
# msgID = 'abcdefghij1234567890'
# df = defer.Deferred()
# timeoutCall = lbrynet.dht.protocol.reactor.callLater(lbrynet.dht.constants.rpcTimeout, self.protocol._msgTimeout, msgID)
# self.protocol._sentMessages[msgID] = (remoteContact.id, df, timeoutCall)
# # Simulate the "reply" transmission
# msg = lbrynet.dht.msgtypes.ResponseMessage(msgID, 'node2', responseData)
# msgPrimitive = self.protocol._translator.toPrimitive(msg)
# encodedMsg = self.protocol._encoder.encode(msgPrimitive)
# udpClient = ClientDatagramProtocol()
# udpClient.data = encodedMsg
# udpClient.msgID = msgID
# lbrynet.dht.protocol.reactor.listenUDP(0, udpClient)
# df.addCallback(handleResult)
# df.addErrback(handleError)
# df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop())
# lbrynet.dht.protocol.reactor.run()
# self.failIf(self.error, self.error)
# # The list of sent RPC messages should be empty at this stage
# #self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!')
def suite():
suite = unittest.TestSuite()

View file

@ -17,7 +17,6 @@ import hashlib
class DictDataStoreTest(unittest.TestCase):
""" Basic tests case for the reference DataStore API and implementation """
def setUp(self):
#if not hasattr(self, 'ds'):
self.ds = lbrynet.dht.datastore.DictDataStore()
h = hashlib.sha1()
h.update('g')
@ -29,12 +28,6 @@ class DictDataStoreTest(unittest.TestCase):
h3.update('Boozoo Bajou - 09 - S.I.P.mp3')
hashKey3 = h3.digest()
#self.cases = (('a', 'hello there\nthis is a test'),
# ('b', unicode('jasdklfjklsdj;f2352352ljklzsdlkjkasf\ndsjklafsd')),
# ('e', 123),
# ('f', [('this', 'is', 1), {'complex': 'data entry'}]),
# ('aMuchLongerKeyThanAnyOfThePreviousOnes', 'some data'),
# (hashKey, 'some data'),
# (hashKey2, 'abcdefghijklmnopqrstuvwxz'),
# (hashKey3, '1 2 3 4 5 6 7 8 9 0'))
self.cases = ((hashKey, 'test1test1test1test1test1t'),
(hashKey, 'test2'),
@ -90,88 +83,37 @@ class DictDataStoreTest(unittest.TestCase):
self.failIf('val3' in self.ds.getPeersForBlob(h2), 'DataStore failed to delete an expired value! Value %s, publish time %s, current time %s' % ('val3', str(now - td2), str(now)))
self.failUnless('val4' in self.ds.getPeersForBlob(h2), 'DataStore deleted an unexpired value! Value %s, publish time %s, current time %s' % ('val4', str(now), str(now)))
# def testReplace(self):
# # First write with fake values
# now = int(time.time())
# for key, value in self.cases:
# try:
# self.ds.setItem(key, 'abc', now, now, 'node1')
# except Exception:
# import traceback
# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5)))
#
# # write this stuff a second time, with the real values
# for key, value in self.cases:
# try:
# self.ds.setItem(key, value, now, now, 'node1')
# except Exception:
# import traceback
# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5)))
#
# self.failUnlessEqual(len(self.ds.keys()), len(self.cases), 'Values did not get overwritten properly; expected %d keys, got %d' % (len(self.cases), len(self.ds.keys())))
# # Read back the data
# for key, value in self.cases:
# self.failUnlessEqual(self.ds[key], value, 'DataStore returned invalid data! Expected "%s", got "%s"' % (value, self.ds[key]))
# def testDelete(self):
# # First some values
# now = int(time.time())
# for key, value in self.cases:
# try:
# self.ds.setItem(key, 'abc', now, now, 'node1')
# except Exception:
# import traceback
# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5)))
#
# self.failUnlessEqual(len(self.ds.keys()), len(self.cases), 'Values did not get stored properly; expected %d keys, got %d' % (len(self.cases), len(self.ds.keys())))
#
# # Delete an item from the data
# key, value == self.cases[0]
# del self.ds[key]
# self.failUnlessEqual(len(self.ds.keys()), len(self.cases)-1, 'Value was not deleted; expected %d keys, got %d' % (len(self.cases)-1, len(self.ds.keys())))
# self.failIf(key in self.ds.keys(), 'Key was not deleted: %s' % key)
# def testMetaData(self):
# now = int(time.time())
# age = random.randint(10,3600)
# originallyPublished = []
# for i in range(len(self.cases)):
# originallyPublished.append(now - age)
# # First some values with metadata
# i = 0
# for key, value in self.cases:
# try:
# self.ds.setItem(key, 'abc', now, originallyPublished[i], 'node%d' % i)
# i += 1
# except Exception:
# import traceback
# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5)))
#
# # Read back the meta-data
# i = 0
# for key, value in self.cases:
# dsLastPublished = self.ds.lastPublished(key)
# dsOriginallyPublished = self.ds.originalPublishTime(key)
# dsOriginalPublisherID = self.ds.originalPublisherID(key)
# self.failUnless(type(dsLastPublished) == int, 'DataStore returned invalid type for "last published" time! Expected "int", got %s' % type(dsLastPublished))
# self.failUnless(type(dsOriginallyPublished) == int, 'DataStore returned invalid type for "originally published" time! Expected "int", got %s' % type(dsOriginallyPublished))
# self.failUnless(type(dsOriginalPublisherID) == str, 'DataStore returned invalid type for "original publisher ID"; Expected "str", got %s' % type(dsOriginalPublisherID))
# self.failUnlessEqual(dsLastPublished, now, 'DataStore returned invalid "last published" time! Expected "%d", got "%d"' % (now, dsLastPublished))
# self.failUnlessEqual(dsOriginallyPublished, originallyPublished[i], 'DataStore returned invalid "originally published" time! Expected "%d", got "%d"' % (originallyPublished[i], dsOriginallyPublished))
# self.failUnlessEqual(dsOriginalPublisherID, 'node%d' % i, 'DataStore returned invalid "original publisher ID"; Expected "%s", got "%s"' % ('node%d' % i, dsOriginalPublisherID))
# i += 1
#class SQLiteDataStoreTest(DictDataStoreTest):
# def setUp(self):
# self.ds = entangled.kademlia.datastore.SQLiteDataStore()
# DictDataStoreTest.setUp(self)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DictDataStoreTest))
#suite.addTest(unittest.makeSuite(SQLiteDataStoreTest))
return suite