Merge pull request #339 from lbryio/all-prs

All The PRs
This commit is contained in:
Job Evers‐Meltzer 2016-12-15 19:42:35 -06:00 committed by GitHub
commit 3dde7af576
74 changed files with 923 additions and 1216 deletions

View file

@ -3,7 +3,7 @@ from lbrynet.core import looping_call_manager
from twisted.internet import defer from twisted.internet import defer
from twisted.internet import task from twisted.internet import task
from lbrynet.core.Platform import get_platform from lbrynet.core.system_info import get_platform
from lbrynet.conf import settings from lbrynet.conf import settings
import constants import constants

View file

@ -182,6 +182,7 @@ ENVIRONMENT = Env(
# all of your credits. # all of your credits.
API_INTERFACE=(str, "localhost"), API_INTERFACE=(str, "localhost"),
bittrex_feed=(str, "https://bittrex.com/api/v1.1/public/getmarkethistory"), bittrex_feed=(str, "https://bittrex.com/api/v1.1/public/getmarkethistory"),
reflector_reupload=(bool, True),
) )
@ -214,7 +215,7 @@ class ApplicationSettings(Settings):
self.BLOBFILES_DIR = "blobfiles" self.BLOBFILES_DIR = "blobfiles"
self.BLOB_SIZE = 2*MB self.BLOB_SIZE = 2*MB
self.LOG_FILE_NAME = "lbrynet.log" self.LOG_FILE_NAME = "lbrynet.log"
self.LOG_POST_URL = "https://lbry.io/log-upload" self.LOG_POST_URL = "https://lbry.io/log-upload"
self.CRYPTSD_FILE_EXTENSION = ".cryptsd" self.CRYPTSD_FILE_EXTENSION = ".cryptsd"
self.API_ADDRESS = "lbryapi" self.API_ADDRESS = "lbryapi"
self.ICON_PATH = "icons" if platform is WINDOWS else "app.icns" self.ICON_PATH = "icons" if platform is WINDOWS else "app.icns"
@ -230,7 +231,7 @@ class ApplicationSettings(Settings):
self.LOGGLY_TOKEN = 'LJEzATH4AzRgAwxjAP00LwZ2YGx3MwVgZTMuBQZ3MQuxLmOv' self.LOGGLY_TOKEN = 'LJEzATH4AzRgAwxjAP00LwZ2YGx3MwVgZTMuBQZ3MQuxLmOv'
self.ANALYTICS_ENDPOINT = 'https://api.segment.io/v1' self.ANALYTICS_ENDPOINT = 'https://api.segment.io/v1'
self.ANALYTICS_TOKEN = 'Ax5LZzR1o3q3Z3WjATASDwR5rKyHH0qOIRIbLmMXn2H=' self.ANALYTICS_TOKEN = 'Ax5LZzR1o3q3Z3WjATASDwR5rKyHH0qOIRIbLmMXn2H='
self.DB_REVISION_FILE_NAME = 'db_revision' self.DB_REVISION_FILE_NAME = 'db_revision'
Settings.__init__(self) Settings.__init__(self)
@ -299,7 +300,7 @@ class Config(DefaultSettings):
return os.path.join(self.ensure_data_dir(), self.LOG_FILE_NAME) return os.path.join(self.ensure_data_dir(), self.LOG_FILE_NAME)
def get_db_revision_filename(self): def get_db_revision_filename(self):
return os.path.join(self.ensure_data_dir(), self.DB_REVISION_FILE_NAME) return os.path.join(self.ensure_data_dir(), self.DB_REVISION_FILE_NAME)
def get_conf_filename(self): def get_conf_filename(self):
return get_settings_file_ext(self.ensure_data_dir()) return get_settings_file_ext(self.ensure_data_dir())

View file

@ -1,4 +1,6 @@
import logging import logging
import random
import time
from twisted.internet import defer from twisted.internet import defer
from twisted.internet.task import LoopingCall from twisted.internet.task import LoopingCall
@ -27,7 +29,7 @@ class BlobAvailabilityTracker(object):
def start(self): def start(self):
log.info("Starting %s", self) log.info("Starting %s", self)
self._check_popular.start(30) self._check_popular.start(30)
self._check_mine.start(120) self._check_mine.start(600)
def stop(self): def stop(self):
log.info("Stopping %s", self) log.info("Stopping %s", self)
@ -76,7 +78,8 @@ class BlobAvailabilityTracker(object):
def _update_most_popular(self): def _update_most_popular(self):
d = self._get_most_popular() d = self._get_most_popular()
d.addCallback(lambda _: self._get_mean_peers()) d.addCallback(lambda _: self._set_mean_peers())
def _update_mine(self): def _update_mine(self):
def _get_peers(blobs): def _get_peers(blobs):
@ -85,11 +88,26 @@ class BlobAvailabilityTracker(object):
dl.append(self._update_peers_for_blob(hash)) dl.append(self._update_peers_for_blob(hash))
return defer.DeferredList(dl) return defer.DeferredList(dl)
d = self._blob_manager.get_all_verified_blobs() def sample(blobs):
d.addCallback(_get_peers) return random.sample(blobs, 100)
d.addCallback(lambda _: self._get_mean_peers())
def _get_mean_peers(self): start = time.time()
log.debug('==> Updating the peers for my blobs')
d = self._blob_manager.get_all_verified_blobs()
# as far as I can tell, this only is used to set _last_mean_availability
# which... seems like a very expensive operation for such little payoff.
# so taking a sample should get about the same effect as querying the entire
# list of blobs
d.addCallback(sample)
d.addCallback(_get_peers)
d.addCallback(lambda _: self._set_mean_peers())
d.addCallback(lambda _: log.debug('<== Done updating peers for my blobs. Took %s seconds',
time.time() - start))
# although unused, need to return or else the looping call
# could overrun on a previous call
return d
def _set_mean_peers(self):
num_peers = [len(self.availability[blob]) for blob in self.availability] num_peers = [len(self.availability[blob]) for blob in self.availability]
mean = Decimal(sum(num_peers)) / Decimal(max(1, len(num_peers))) mean = Decimal(sum(num_peers)) / Decimal(max(1, len(num_peers)))
self._last_mean_availability = mean self._last_mean_availability = mean

View file

@ -15,4 +15,4 @@ class BlobInfo(object):
def __init__(self, blob_hash, blob_num, length): def __init__(self, blob_hash, blob_num, length):
self.blob_hash = blob_hash self.blob_hash = blob_hash
self.blob_num = blob_num self.blob_num = blob_num
self.length = length self.length = length

View file

@ -9,10 +9,10 @@ from twisted.enterprise import adbapi
from lbrynet.core.HashBlob import BlobFile, TempBlob, BlobFileCreator, TempBlobCreator from lbrynet.core.HashBlob import BlobFile, TempBlob, BlobFileCreator, TempBlobCreator
from lbrynet.core.server.DHTHashAnnouncer import DHTHashSupplier from lbrynet.core.server.DHTHashAnnouncer import DHTHashSupplier
from lbrynet.core.utils import is_valid_blobhash from lbrynet.core.utils import is_valid_blobhash
from lbrynet.core.cryptoutils import get_lbry_hash_obj
from lbrynet.core.Error import NoSuchBlobError from lbrynet.core.Error import NoSuchBlobError
from lbrynet.core.sqlite_helpers import rerun_if_locked from lbrynet.core.sqlite_helpers import rerun_if_locked
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -52,9 +52,6 @@ class BlobManager(DHTHashSupplier):
def get_blob_length(self, blob_hash): def get_blob_length(self, blob_hash):
pass pass
def check_consistency(self):
pass
def blob_requested(self, blob_hash): def blob_requested(self, blob_hash):
pass pass
@ -86,6 +83,8 @@ class DiskBlobManager(BlobManager):
self.db_conn = None self.db_conn = None
self.blob_type = BlobFile self.blob_type = BlobFile
self.blob_creator_type = BlobFileCreator self.blob_creator_type = BlobFileCreator
# TODO: consider using an LRU for blobs as there could potentially
# be thousands of blobs loaded up, many stale
self.blobs = {} self.blobs = {}
self.blob_hashes_to_delete = {} # {blob_hash: being_deleted (True/False)} self.blob_hashes_to_delete = {} # {blob_hash: being_deleted (True/False)}
self._next_manage_call = None self._next_manage_call = None
@ -102,7 +101,6 @@ class DiskBlobManager(BlobManager):
if self._next_manage_call is not None and self._next_manage_call.active(): if self._next_manage_call is not None and self._next_manage_call.active():
self._next_manage_call.cancel() self._next_manage_call.cancel()
self._next_manage_call = None self._next_manage_call = None
#d = self.db_conn.close()
self.db_conn = None self.db_conn = None
return defer.succeed(True) return defer.succeed(True)
@ -120,6 +118,7 @@ class DiskBlobManager(BlobManager):
return self.blob_creator_type(self, self.blob_dir) return self.blob_creator_type(self, self.blob_dir)
def _make_new_blob(self, blob_hash, upload_allowed, length=None): def _make_new_blob(self, blob_hash, upload_allowed, length=None):
log.debug('Making a new blob for %s', blob_hash)
blob = self.blob_type(self.blob_dir, blob_hash, upload_allowed, length) blob = self.blob_type(self.blob_dir, blob_hash, upload_allowed, length)
self.blobs[blob_hash] = blob self.blobs[blob_hash] = blob
d = self._completed_blobs([blob_hash]) d = self._completed_blobs([blob_hash])
@ -143,9 +142,11 @@ class DiskBlobManager(BlobManager):
def blob_completed(self, blob, next_announce_time=None): def blob_completed(self, blob, next_announce_time=None):
if next_announce_time is None: if next_announce_time is None:
next_announce_time = time.time() next_announce_time = time.time() + self.hash_reannounce_time
return self._add_completed_blob(blob.blob_hash, blob.length, d = self._add_completed_blob(blob.blob_hash, blob.length,
time.time(), next_announce_time) time.time(), next_announce_time)
d.addCallback(lambda _: self.hash_announcer.immediate_announce([blob.blob_hash]))
return d
def completed_blobs(self, blobs_to_check): def completed_blobs(self, blobs_to_check):
return self._completed_blobs(blobs_to_check) return self._completed_blobs(blobs_to_check)
@ -186,9 +187,6 @@ class DiskBlobManager(BlobManager):
def get_blob_length(self, blob_hash): def get_blob_length(self, blob_hash):
return self._get_blob_length(blob_hash) return self._get_blob_length(blob_hash)
def check_consistency(self):
return self._check_consistency()
def get_all_verified_blobs(self): def get_all_verified_blobs(self):
d = self._get_all_verified_blob_hashes() d = self._get_all_verified_blob_hashes()
d.addCallback(self.completed_blobs) d.addCallback(self.completed_blobs)
@ -299,18 +297,27 @@ class DiskBlobManager(BlobManager):
@rerun_if_locked @rerun_if_locked
def _completed_blobs(self, blobs_to_check): def _completed_blobs(self, blobs_to_check):
"""Returns of the blobs_to_check, which are valid"""
blobs_to_check = filter(is_valid_blobhash, blobs_to_check) blobs_to_check = filter(is_valid_blobhash, blobs_to_check)
def get_blobs_in_db(db_transaction): def _get_last_verified_time(db_transaction, blob_hash):
blobs_in_db = [] # [(blob_hash, last_verified_time)] result = db_transaction.execute(
"select last_verified_time from blobs where blob_hash = ?", (blob_hash,))
row = result.fetchone()
if row:
return row[0]
else:
return None
def _filter_blobs_in_db(db_transaction, blobs_to_check):
for b in blobs_to_check: for b in blobs_to_check:
result = db_transaction.execute( verified_time = _get_last_verified_time(db_transaction, b)
"select last_verified_time from blobs where blob_hash = ?", if verified_time:
(b,)) yield (b, verified_time)
row = result.fetchone()
if row is not None: def get_blobs_in_db(db_transaction, blob_to_check):
blobs_in_db.append((b, row[0])) # [(blob_hash, last_verified_time)]
return blobs_in_db return list(_filter_blobs_in_db(db_transaction, blobs_to_check))
def get_valid_blobs(blobs_in_db): def get_valid_blobs(blobs_in_db):
@ -319,23 +326,31 @@ class DiskBlobManager(BlobManager):
if os.path.isfile(file_path): if os.path.isfile(file_path):
if verified_time > os.path.getctime(file_path): if verified_time > os.path.getctime(file_path):
return True return True
else:
log.debug('Verification time for %s is too old (%s < %s)',
file_path, verified_time, os.path.getctime(file_path))
else:
log.debug('file %s does not exist', file_path)
return False return False
def return_valid_blobs(results): def filter_valid_blobs(results):
valid_blobs = [] assert len(blobs_in_db) == len(results)
for (b, verified_date), (success, result) in zip(blobs_in_db, results): valid_blobs = [
if success is True and result is True: b for (b, verified_date), (success, result) in zip(blobs_in_db, results)
valid_blobs.append(b) if success is True and result is True
]
log.debug('Of %s blobs, %s were valid', len(results), len(valid_blobs))
return valid_blobs return valid_blobs
ds = [] ds = [
for b, verified_date in blobs_in_db: threads.deferToThread(check_blob_verified_date, b, verified_date)
ds.append(threads.deferToThread(check_blob_verified_date, b, verified_date)) for b, verified_date in blobs_in_db
]
dl = defer.DeferredList(ds) dl = defer.DeferredList(ds)
dl.addCallback(return_valid_blobs) dl.addCallback(filter_valid_blobs)
return dl return dl
d = self.db_conn.runInteraction(get_blobs_in_db) d = self.db_conn.runInteraction(get_blobs_in_db, blobs_to_check)
d.addCallback(get_valid_blobs) d.addCallback(get_valid_blobs)
return d return d
@ -345,8 +360,6 @@ class DiskBlobManager(BlobManager):
d.addCallback(lambda r: r[0][0] if len(r) else Failure(NoSuchBlobError(blob))) d.addCallback(lambda r: r[0][0] if len(r) else Failure(NoSuchBlobError(blob)))
return d return d
#length, verified_time, next_announce_time = json.loads(self.db.Get(blob))
#return length
@rerun_if_locked @rerun_if_locked
def _update_blob_verified_timestamp(self, blob, timestamp): def _update_blob_verified_timestamp(self, blob, timestamp):
@ -382,73 +395,6 @@ class DiskBlobManager(BlobManager):
return self.db_conn.runInteraction(delete_blobs) return self.db_conn.runInteraction(delete_blobs)
@rerun_if_locked
def _check_consistency(self):
ALREADY_VERIFIED = 1
NEWLY_VERIFIED = 2
INVALID = 3
current_time = time.time()
d = self.db_conn.runQuery("select blob_hash, blob_length, last_verified_time from blobs")
def check_blob(blob_hash, blob_length, verified_time):
file_path = os.path.join(self.blob_dir, blob_hash)
if os.path.isfile(file_path):
if verified_time >= os.path.getctime(file_path):
return ALREADY_VERIFIED
else:
h = get_lbry_hash_obj()
len_so_far = 0
f = open(file_path)
while True:
data = f.read(2**12)
if not data:
break
h.update(data)
len_so_far += len(data)
if len_so_far == blob_length and h.hexdigest() == blob_hash:
return NEWLY_VERIFIED
return INVALID
def do_check(blobs):
already_verified = []
newly_verified = []
invalid = []
for blob_hash, blob_length, verified_time in blobs:
status = check_blob(blob_hash, blob_length, verified_time)
if status == ALREADY_VERIFIED:
already_verified.append(blob_hash)
elif status == NEWLY_VERIFIED:
newly_verified.append(blob_hash)
else:
invalid.append(blob_hash)
return already_verified, newly_verified, invalid
def update_newly_verified(transaction, blobs):
for b in blobs:
transaction.execute("update blobs set last_verified_time = ? where blob_hash = ?",
(current_time, b))
def check_blobs(blobs):
@rerun_if_locked
def update_and_return(status_lists):
already_verified, newly_verified, invalid = status_lists
d = self.db_conn.runInteraction(update_newly_verified, newly_verified)
d.addCallback(lambda _: status_lists)
return d
d = threads.deferToThread(do_check, blobs)
d.addCallback(update_and_return)
return d
d.addCallback(check_blobs)
return d
@rerun_if_locked @rerun_if_locked
def _get_all_verified_blob_hashes(self): def _get_all_verified_blob_hashes(self):
d = self.db_conn.runQuery("select blob_hash, last_verified_time from blobs") d = self.db_conn.runQuery("select blob_hash, last_verified_time from blobs")

View file

@ -18,4 +18,4 @@ class DownloadOption(object):
self.long_description = long_description self.long_description = long_description
self.short_description = short_description self.short_description = short_description
self.default_value = default_value self.default_value = default_value
self.default_value_description = default_value_description self.default_value_description = default_value_description

View file

@ -12,4 +12,4 @@ class DummyHashAnnouncer(object):
pass pass
def immediate_announce(self, *args): def immediate_announce(self, *args):
pass pass

View file

@ -59,4 +59,4 @@ class Offer(object):
elif reply_message == Offer.RATE_UNSET: elif reply_message == Offer.RATE_UNSET:
self.unset() self.unset()
else: else:
raise Exception("Unknown offer reply %s" % str(reply_message)) raise Exception("Unknown offer reply %s" % str(reply_message))

View file

@ -16,4 +16,4 @@ class DummyPeerFinder(object):
return defer.succeed([]) return defer.succeed([])
def get_most_popular_hashes(self, num_to_return): def get_most_popular_hashes(self, num_to_return):
return [] return []

View file

@ -11,4 +11,4 @@ class PeerManager(object):
return peer return peer
peer = Peer(host, port) peer = Peer(host, port)
self.peers.append(peer) self.peers.append(peer)
return peer return peer

View file

@ -212,7 +212,6 @@ class Wallet(object):
once the service has been rendered once the service has been rendered
""" """
rounded_amount = Decimal(str(round(amount, 8))) rounded_amount = Decimal(str(round(amount, 8)))
#if peer in self.peer_addresses:
if self.wallet_balance >= self.total_reserved_points + rounded_amount: if self.wallet_balance >= self.total_reserved_points + rounded_amount:
self.total_reserved_points += rounded_amount self.total_reserved_points += rounded_amount
return ReservedPoints(identifier, rounded_amount) return ReservedPoints(identifier, rounded_amount)
@ -359,7 +358,7 @@ class Wallet(object):
except (TypeError, ValueError, ValidationError): except (TypeError, ValueError, ValidationError):
return Failure(InvalidStreamInfoError(name, result['value'])) return Failure(InvalidStreamInfoError(name, result['value']))
sd_hash = metadata['sources']['lbry_sd_hash'] sd_hash = metadata['sources']['lbry_sd_hash']
claim_outpoint = ClaimOutpoint(result['txid'], result['n']) claim_outpoint = ClaimOutpoint(result['txid'], result['n'])
d = self._save_name_metadata(name, claim_outpoint, sd_hash) d = self._save_name_metadata(name, claim_outpoint, sd_hash)
d.addCallback(lambda _: self.get_claimid(name, result['txid'], result['n'])) d.addCallback(lambda _: self.get_claimid(name, result['txid'], result['n']))
d.addCallback(lambda cid: _log_success(cid)) d.addCallback(lambda cid: _log_success(cid))
@ -380,7 +379,7 @@ class Wallet(object):
d.addCallback(lambda claims: next(c for c in claims if c['name'] == name and c['nOut'] == claim_outpoint['nout'])) d.addCallback(lambda claims: next(c for c in claims if c['name'] == name and c['nOut'] == claim_outpoint['nout']))
d.addCallback(lambda claim: self._update_claimid(claim['claimId'], name, ClaimOutpoint(txid, claim['nOut']))) d.addCallback(lambda claim: self._update_claimid(claim['claimId'], name, ClaimOutpoint(txid, claim['nOut'])))
return d return d
claim_outpoint = ClaimOutpoint(txid, nout) claim_outpoint = ClaimOutpoint(txid, nout)
d = self._get_claimid_for_tx(name, claim_outpoint) d = self._get_claimid_for_tx(name, claim_outpoint)
d.addCallback(_get_id_for_return) d.addCallback(_get_id_for_return)
return d return d
@ -461,11 +460,13 @@ class Wallet(object):
meta_for_return[k] = new_metadata[k] meta_for_return[k] = new_metadata[k]
return defer.succeed(Metadata(meta_for_return)) return defer.succeed(Metadata(meta_for_return))
def claim_name(self, name, bid, m): def claim_name(self, name, bid, m):
def _save_metadata(claim_out, metadata): def _save_metadata(claim_out, metadata):
if not claim_out['success']: if not claim_out['success']:
msg = 'Claim to name {} failed: {}'.format(name, claim_out['reason']) msg = 'Claim to name {} failed: {}'.format(name, claim_out['reason'])
raise Exception(msg) raise Exception(msg)
claim_out.pop('success')
claim_outpoint = ClaimOutpoint(claim_out['txid'], claim_out['nout']) claim_outpoint = ClaimOutpoint(claim_out['txid'], claim_out['nout'])
log.debug("Saving metadata for claim %s %d" % (claim_outpoint['txid'], claim_outpoint['nout'])) log.debug("Saving metadata for claim %s %d" % (claim_outpoint['txid'], claim_outpoint['nout']))
d = self._save_name_metadata(name, claim_outpoint, metadata['sources']['lbry_sd_hash']) d = self._save_name_metadata(name, claim_outpoint, metadata['sources']['lbry_sd_hash'])
@ -492,11 +493,29 @@ class Wallet(object):
return d return d
def abandon_claim(self, txid, nout): def abandon_claim(self, txid, nout):
def _parse_abandon_claim_out(claim_out):
if not claim_out['success']:
msg = 'Abandon of {}:{} failed: {}'.format(txid, nout, claim_out['resason'])
raise Exception(msg)
claim_out.pop('success')
return defer.succeed(claim_out)
claim_outpoint = ClaimOutpoint(txid, nout) claim_outpoint = ClaimOutpoint(txid, nout)
return self._abandon_claim(claim_outpoint) d = self._abandon_claim(claim_outpoint)
d.addCallback(lambda claim_out: _parse_abandon_claim_out(claim_out))
return d
def support_claim(self, name, claim_id, amount): def support_claim(self, name, claim_id, amount):
return self._support_claim(name, claim_id, amount) def _parse_support_claim_out(claim_out):
if not claim_out['success']:
msg = 'Support of {}:{} failed: {}'.format(name, claim_id, claim_out['reason'])
raise Exception(msg)
claim_out.pop('success')
return defer.succeed(claim_out)
d = self._support_claim(name, claim_id, amount)
d.addCallback(lambda claim_out: _parse_support_claim_out(claim_out))
return d
def get_block_info(self, height): def get_block_info(self, height):
d = self._get_blockhash(height) d = self._get_blockhash(height)
@ -556,7 +575,7 @@ class Wallet(object):
for claim in claims: for claim in claims:
if 'in claim trie' in claim: if 'in claim trie' in claim:
name_is_equal = 'name' in claim and str(claim['name']) == name name_is_equal = 'name' in claim and str(claim['name']) == name
nout_is_equal = 'nOut' in claim and claim['nOut'] == claim_outpoint['nout'] nout_is_equal = 'nOut' in claim and claim['nOut'] == claim_outpoint['nout']
if name_is_equal and nout_is_equal and 'value' in claim: if name_is_equal and nout_is_equal and 'value' in claim:
try: try:
value_dict = json.loads(claim['value']) value_dict = json.loads(claim['value'])
@ -655,7 +674,7 @@ class Wallet(object):
d.addCallback( d.addCallback(
lambda _: self.db.runQuery("delete from name_metadata where name=? and txid=? and n=? and sd_hash=?", lambda _: self.db.runQuery("delete from name_metadata where name=? and txid=? and n=? and sd_hash=?",
(name, claim_outpoint['txid'], UNSET_NOUT, sd_hash))) (name, claim_outpoint['txid'], UNSET_NOUT, sd_hash)))
d.addCallback(lambda _: self.db.runQuery("insert into name_metadata values (?, ?, ?, ?)", d.addCallback(lambda _: self.db.runQuery("insert into name_metadata values (?, ?, ?, ?)",
(name, claim_outpoint['txid'], claim_outpoint['nout'], sd_hash))) (name, claim_outpoint['txid'], claim_outpoint['nout'], sd_hash)))
return d return d
@ -671,7 +690,7 @@ class Wallet(object):
d.addCallback( d.addCallback(
lambda _: self.db.runQuery("delete from claim_ids where claimId=? and name=? and txid=? and n=?", lambda _: self.db.runQuery("delete from claim_ids where claimId=? and name=? and txid=? and n=?",
(claim_id, name, claim_outpoint['txid'], UNSET_NOUT))) (claim_id, name, claim_outpoint['txid'], UNSET_NOUT)))
d.addCallback(lambda r: self.db.runQuery("insert into claim_ids values (?, ?, ?, ?)", d.addCallback(lambda r: self.db.runQuery("insert into claim_ids values (?, ?, ?, ?)",
(claim_id, name, claim_outpoint['txid'], claim_outpoint['nout']))) (claim_id, name, claim_outpoint['txid'], claim_outpoint['nout'])))
d.addCallback(lambda _: claim_id) d.addCallback(lambda _: claim_id)

View file

@ -4,4 +4,4 @@ Classes and functions which can be used by any application wishing to make use o
This includes classes for connecting to other peers and downloading blobs from them, listening for This includes classes for connecting to other peers and downloading blobs from them, listening for
connections from peers and responding to their requests, managing locally stored blobs, sending connections from peers and responding to their requests, managing locally stored blobs, sending
and receiving payments, and locating peers in the DHT. and receiving payments, and locating peers in the DHT.
""" """

View file

@ -209,7 +209,6 @@ class ClientProtocol(Protocol):
log.debug("Asking for another request.") log.debug("Asking for another request.")
from twisted.internet import reactor from twisted.internet import reactor
reactor.callLater(0, self._ask_for_request) reactor.callLater(0, self._ask_for_request)
#self._ask_for_request()
else: else:
log.debug("Not asking for another request.") log.debug("Not asking for another request.")
self.transport.loseConnection() self.transport.loseConnection()
@ -230,8 +229,6 @@ class ClientProtocol(Protocol):
# TODO: protocol had such a mechanism. # TODO: protocol had such a mechanism.
log.debug("Closing the connection to %s because the download of blob %s was canceled", log.debug("Closing the connection to %s because the download of blob %s was canceled",
str(self.peer), str(self._blob_download_request.blob)) str(self.peer), str(self._blob_download_request.blob))
#self.transport.loseConnection()
#return True
return err return err
######### IRateLimited ######### ######### IRateLimited #########

View file

@ -24,4 +24,4 @@ class ClientBlobRequest(ClientPaidRequest):
self.write = write_func self.write = write_func
self.finished_deferred = finished_deferred self.finished_deferred = finished_deferred
self.cancel = cancel_func self.cancel = cancel_func
self.blob = blob self.blob = blob

View file

@ -15,4 +15,4 @@ def sign_with_pass_phrase(m, pass_phrase):
def verify_signature(m, signature, pub_key): def verify_signature(m, signature, pub_key):
return seccure.verify(m, signature, pub_key, curve="brainpoolp384r1") return seccure.verify(m, signature, pub_key, curve="brainpoolp384r1")

View file

@ -1,4 +1,5 @@
import datetime import datetime
import inspect
import json import json
import logging import logging
import logging.handlers import logging.handlers
@ -14,8 +15,25 @@ import lbrynet
from lbrynet.conf import settings from lbrynet.conf import settings
from lbrynet.core import utils from lbrynet.core import utils
####
# This code is copied from logging/__init__.py in the python source code
####
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
#####
session = FuturesSession() session = FuturesSession()
TRACE = 5
def bg_cb(sess, resp): def bg_cb(sess, resp):
@ -148,6 +166,30 @@ class JsonFormatter(logging.Formatter):
data['exc_info'] = self.formatException(record.exc_info) data['exc_info'] = self.formatException(record.exc_info)
return json.dumps(data) return json.dumps(data)
####
# This code is copied from logging/__init__.py in the python source code
####
def findCaller(srcfile=None):
"""Returns the filename, line number and function name of the caller"""
srcfile = srcfile or _srcfile
f = inspect.currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# ignore any function calls that are in this file
if filename == srcfile:
f = f.f_back
continue
rv = (filename, f.f_lineno, co.co_name)
break
return rv
###
def failure(failure, log, msg, *args): def failure(failure, log, msg, *args):
"""Log a failure message from a deferred. """Log a failure message from a deferred.
@ -287,3 +329,53 @@ class LogUploader(object):
else: else:
log_size = 0 log_size = 0
return cls(log_name, log_file, log_size) return cls(log_name, log_file, log_size)
class Logger(logging.Logger):
"""A logger that has an extra `fail` method useful for handling twisted failures."""
def fail(self, callback=None, *args, **kwargs):
"""Returns a function to log a failure from an errback.
The returned function appends the error message and extracts
the traceback from `err`.
Example usage:
d.addErrback(log.fail(), 'This is an error message')
Although odd, making the method call is necessary to extract
out useful filename and line number information; otherwise the
reported values are from inside twisted's deferred handling
code.
Args:
callback: callable to call after making the log. The first argument
will be the `err` from the deferred
args: extra arguments to pass into `callback`
Returns: a function that takes the following arguments:
err: twisted.python.failure.Failure
msg: the message to log, using normal logging string iterpolation.
msg_args: the values to subtitute into `msg`
msg_kwargs: set `level` to change from the default ERROR severity. Other
keywoards are treated as normal log kwargs.
"""
fn, lno, func = findCaller()
def _fail(err, msg, *msg_args, **msg_kwargs):
level = msg_kwargs.pop('level', logging.ERROR)
msg += ": %s"
msg_args += (err.getErrorMessage(),)
exc_info = (err.type, err.value, err.getTracebackObject())
record = self.makeRecord(
self.name, level, fn, lno, msg, msg_args, exc_info, func, msg_kwargs)
self.handle(record)
if callback:
callback(err, *args, **kwargs)
return _fail
def trace(self, msg, *args, **kwargs):
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
logging.setLoggerClass(Logger)
logging.addLevelName(TRACE, 'TRACE')

View file

@ -56,4 +56,4 @@ class BlobAvailabilityHandler(object):
def _get_available_blobs(self, requested_blobs): def _get_available_blobs(self, requested_blobs):
d = self.blob_manager.completed_blobs(requested_blobs) d = self.blob_manager.completed_blobs(requested_blobs)
return d return d

View file

@ -1,6 +1,7 @@
import binascii import binascii
import collections import collections
import logging import logging
import time
from twisted.internet import defer, reactor from twisted.internet import defer, reactor
@ -42,6 +43,7 @@ class DHTHashAnnouncer(object):
return defer.succeed(False) return defer.succeed(False)
def _announce_available_hashes(self): def _announce_available_hashes(self):
log.debug('Announcing available hashes')
ds = [] ds = []
for supplier in self.suppliers: for supplier in self.suppliers:
d = supplier.hashes_to_announce() d = supplier.hashes_to_announce()
@ -51,7 +53,11 @@ class DHTHashAnnouncer(object):
return dl return dl
def _announce_hashes(self, hashes): def _announce_hashes(self, hashes):
if not hashes:
return
log.debug('Announcing %s hashes', len(hashes))
# TODO: add a timeit decorator
start = time.time()
ds = [] ds = []
for h in hashes: for h in hashes:
@ -62,6 +68,7 @@ class DHTHashAnnouncer(object):
def announce(): def announce():
if len(self.hash_queue): if len(self.hash_queue):
h, announce_deferred = self.hash_queue.popleft() h, announce_deferred = self.hash_queue.popleft()
log.debug('Announcing blob %s to dht', h)
d = self.dht_node.announceHaveBlob(binascii.unhexlify(h), self.peer_port) d = self.dht_node.announceHaveBlob(binascii.unhexlify(h), self.peer_port)
d.chainDeferred(announce_deferred) d.chainDeferred(announce_deferred)
d.addBoth(lambda _: reactor.callLater(0, announce)) d.addBoth(lambda _: reactor.callLater(0, announce))
@ -72,7 +79,10 @@ class DHTHashAnnouncer(object):
# TODO: maybe make the 5 configurable # TODO: maybe make the 5 configurable
self._concurrent_announcers += 1 self._concurrent_announcers += 1
announce() announce()
return defer.DeferredList(ds) d = defer.DeferredList(ds)
d.addCallback(lambda _: log.debug('Took %s seconds to announce %s hashes',
time.time() - start, len(hashes)))
return d
class DHTHashSupplier(object): class DHTHashSupplier(object):

View file

@ -64,7 +64,7 @@ class ServerProtocol(Protocol):
self.transport.loseConnection() self.transport.loseConnection()
def write(self, data): def write(self, data):
log.debug("Writing %s bytes of data to the transport", str(len(data))) log.trace("Writing %s bytes of data to the transport", len(data))
self.transport.write(data) self.transport.write(data)
self.factory.rate_limiter.report_ul_bytes(len(data)) self.factory.rate_limiter.report_ul_bytes(len(data))
@ -91,4 +91,4 @@ class ServerProtocolFactory(ServerFactory):
def __init__(self, rate_limiter, query_handler_factories, peer_manager): def __init__(self, rate_limiter, query_handler_factories, peer_manager):
self.rate_limiter = rate_limiter self.rate_limiter = rate_limiter
self.query_handler_factories = query_handler_factories self.query_handler_factories = query_handler_factories
self.peer_manager = peer_manager self.peer_manager = peer_manager

View file

@ -52,18 +52,19 @@ class ServerRequestHandler(object):
from twisted.internet import reactor from twisted.internet import reactor
if self.production_paused is False: if self.production_paused:
chunk = self.response_buff[:self.CHUNK_SIZE] return
self.response_buff = self.response_buff[self.CHUNK_SIZE:] chunk = self.response_buff[:self.CHUNK_SIZE]
if chunk != '': self.response_buff = self.response_buff[self.CHUNK_SIZE:]
log.debug("writing %s bytes to the client", str(len(chunk))) if chunk == '':
self.consumer.write(chunk) return
reactor.callLater(0, self._produce_more) log.trace("writing %s bytes to the client", len(chunk))
self.consumer.write(chunk)
reactor.callLater(0, self._produce_more)
#IConsumer stuff #IConsumer stuff
def registerProducer(self, producer, streaming): def registerProducer(self, producer, streaming):
#assert self.file_sender == producer
self.producer = producer self.producer = producer
assert streaming is False assert streaming is False
producer.resumeProducing() producer.resumeProducing()
@ -80,7 +81,7 @@ class ServerRequestHandler(object):
def get_more_data(): def get_more_data():
if self.producer is not None: if self.producer is not None:
log.debug("Requesting more data from the producer") log.trace("Requesting more data from the producer")
self.producer.resumeProducing() self.producer.resumeProducing()
reactor.callLater(0, get_more_data) reactor.callLater(0, get_more_data)

View file

@ -20,4 +20,4 @@ def rerun_if_locked(f):
d.addErrback(rerun, *args, **kwargs) d.addErrback(rerun, *args, **kwargs)
return d return d
return wrapper return wrapper

View file

@ -48,7 +48,6 @@ class StreamBlobDecryptor(object):
self.buff += data self.buff += data
self.len_read += len(data) self.len_read += len(data)
write_bytes() write_bytes()
#write_func(remove_padding(self.cipher.decrypt(self.buff)))
d = self.blob.read(decrypt_bytes) d = self.blob.read(decrypt_bytes)
d.addCallback(lambda _: finish_decrypt()) d.addCallback(lambda _: finish_decrypt())

View file

@ -5,4 +5,4 @@ Crypt Streams are encrypted blobs and metadata tying those blobs together. At le
metadata is generally stored in a Stream Descriptor File, for example containing a public key metadata is generally stored in a Stream Descriptor File, for example containing a public key
used to bind blobs to the stream and a symmetric key used to encrypt the blobs. The list of blobs used to bind blobs to the stream and a symmetric key used to encrypt the blobs. The list of blobs
may or may not be present. may or may not be present.
""" """

View file

@ -1,307 +0,0 @@
# import sqlite3
# import unqlite
# import leveldb
# import shutil
# import os
# import logging
# import json
#
#
# log = logging.getLogger(__name__)
#
#
# known_dbs = ['lbryfile_desc.db', 'lbryfiles.db', 'valuable_blobs.db', 'blobs.db',
# 'lbryfile_blob.db', 'lbryfile_info.db', 'settings.db', 'blind_settings.db',
# 'blind_peers.db', 'blind_info.db', 'lbryfile_info.db', 'lbryfile_manager.db',
# 'live_stream.db', 'stream_info.db', 'stream_blob.db', 'stream_desc.db']
#
#
# def do_move(from_dir, to_dir):
# for known_db in known_dbs:
# known_db_path = os.path.join(from_dir, known_db)
# if os.path.exists(known_db_path):
# log.debug("Moving %s to %s",
# os.path.abspath(known_db_path),
# os.path.abspath(os.path.join(to_dir, known_db)))
# shutil.move(known_db_path, os.path.join(to_dir, known_db))
# else:
# log.debug("Did not find %s", os.path.abspath(known_db_path))
#
#
# def do_migration(db_dir):
# old_dir = os.path.join(db_dir, "_0_to_1_old")
# new_dir = os.path.join(db_dir, "_0_to_1_new")
# try:
# log.info("Moving dbs from the real directory to %s", os.path.abspath(old_dir))
# os.makedirs(old_dir)
# do_move(db_dir, old_dir)
# except:
# log.error("An error occurred moving the old db files.")
# raise
# try:
# log.info("Creating the new directory in %s", os.path.abspath(new_dir))
# os.makedirs(new_dir)
#
# except:
# log.error("An error occurred creating the new directory.")
# raise
# try:
# log.info("Doing the migration")
# migrate_blob_db(old_dir, new_dir)
# migrate_lbryfile_db(old_dir, new_dir)
# migrate_livestream_db(old_dir, new_dir)
# migrate_ptc_db(old_dir, new_dir)
# migrate_lbryfile_manager_db(old_dir, new_dir)
# migrate_settings_db(old_dir, new_dir)
# migrate_repeater_db(old_dir, new_dir)
# log.info("Migration succeeded")
# except:
# log.error("An error occurred during the migration. Restoring.")
# do_move(old_dir, db_dir)
# raise
# try:
# log.info("Moving dbs in the new directory to the real directory")
# do_move(new_dir, db_dir)
# db_revision = open(os.path.join(db_dir, 'db_revision'), mode='w+')
# db_revision.write("1")
# db_revision.close()
# os.rmdir(new_dir)
# except:
# log.error("An error occurred moving the new db files.")
# raise
# return old_dir
#
#
# def migrate_blob_db(old_db_dir, new_db_dir):
# old_blob_db_path = os.path.join(old_db_dir, "blobs.db")
# if not os.path.exists(old_blob_db_path):
# return True
#
# old_db = leveldb.LevelDB(old_blob_db_path)
# new_db_conn = sqlite3.connect(os.path.join(new_db_dir, "blobs.db"))
# c = new_db_conn.cursor()
# c.execute("create table if not exists blobs (" +
# " blob_hash text primary key, " +
# " blob_length integer, " +
# " last_verified_time real, " +
# " next_announce_time real"
# ")")
# new_db_conn.commit()
# c = new_db_conn.cursor()
# for blob_hash, blob_info in old_db.RangeIter():
# blob_length, verified_time, announce_time = json.loads(blob_info)
# c.execute("insert into blobs values (?, ?, ?, ?)",
# (blob_hash, blob_length, verified_time, announce_time))
# new_db_conn.commit()
# new_db_conn.close()
#
#
# def migrate_lbryfile_db(old_db_dir, new_db_dir):
# old_lbryfile_db_path = os.path.join(old_db_dir, "lbryfiles.db")
# if not os.path.exists(old_lbryfile_db_path):
# return True
#
# stream_info_db = leveldb.LevelDB(os.path.join(old_db_dir, "lbryfile_info.db"))
# stream_blob_db = leveldb.LevelDB(os.path.join(old_db_dir, "lbryfile_blob.db"))
# stream_desc_db = leveldb.LevelDB(os.path.join(old_db_dir, "lbryfile_desc.db"))
#
# db_conn = sqlite3.connect(os.path.join(new_db_dir, "lbryfile_info.db"))
# c = db_conn.cursor()
# c.execute("create table if not exists lbry_files (" +
# " stream_hash text primary key, " +
# " key text, " +
# " stream_name text, " +
# " suggested_file_name text" +
# ")")
# c.execute("create table if not exists lbry_file_blobs (" +
# " blob_hash text, " +
# " stream_hash text, " +
# " position integer, " +
# " iv text, " +
# " length integer, " +
# " foreign key(stream_hash) references lbry_files(stream_hash)" +
# ")")
# c.execute("create table if not exists lbry_file_descriptors (" +
# " sd_blob_hash TEXT PRIMARY KEY, " +
# " stream_hash TEXT, " +
# " foreign key(stream_hash) references lbry_files(stream_hash)" +
# ")")
# db_conn.commit()
# c = db_conn.cursor()
# for stream_hash, stream_info in stream_info_db.RangeIter():
# key, name, suggested_file_name = json.loads(stream_info)
# c.execute("insert into lbry_files values (?, ?, ?, ?)",
# (stream_hash, key, name, suggested_file_name))
# db_conn.commit()
# c = db_conn.cursor()
# for blob_hash_stream_hash, blob_info in stream_blob_db.RangeIter():
# b_h, s_h = json.loads(blob_hash_stream_hash)
# position, iv, length = json.loads(blob_info)
# c.execute("insert into lbry_file_blobs values (?, ?, ?, ?, ?)",
# (b_h, s_h, position, iv, length))
# db_conn.commit()
# c = db_conn.cursor()
# for sd_blob_hash, stream_hash in stream_desc_db.RangeIter():
# c.execute("insert into lbry_file_descriptors values (?, ?)",
# (sd_blob_hash, stream_hash))
# db_conn.commit()
# db_conn.close()
#
#
# def migrate_livestream_db(old_db_dir, new_db_dir):
# old_db_path = os.path.join(old_db_dir, "stream_info.db")
# if not os.path.exists(old_db_path):
# return True
# stream_info_db = leveldb.LevelDB(os.path.join(old_db_dir, "stream_info.db"))
# stream_blob_db = leveldb.LevelDB(os.path.join(old_db_dir, "stream_blob.db"))
# stream_desc_db = leveldb.LevelDB(os.path.join(old_db_dir, "stream_desc.db"))
#
# db_conn = sqlite3.connect(os.path.join(new_db_dir, "live_stream.db"))
#
# c = db_conn.cursor()
#
# c.execute("create table if not exists live_streams (" +
# " stream_hash text primary key, " +
# " public_key text, " +
# " key text, " +
# " stream_name text, " +
# " next_announce_time real" +
# ")")
# c.execute("create table if not exists live_stream_blobs (" +
# " blob_hash text, " +
# " stream_hash text, " +
# " position integer, " +
# " revision integer, " +
# " iv text, " +
# " length integer, " +
# " signature text, " +
# " foreign key(stream_hash) references live_streams(stream_hash)" +
# ")")
# c.execute("create table if not exists live_stream_descriptors (" +
# " sd_blob_hash TEXT PRIMARY KEY, " +
# " stream_hash TEXT, " +
# " foreign key(stream_hash) references live_streams(stream_hash)" +
# ")")
#
# db_conn.commit()
#
# c = db_conn.cursor()
# for stream_hash, stream_info in stream_info_db.RangeIter():
# public_key, key, name, next_announce_time = json.loads(stream_info)
# c.execute("insert into live_streams values (?, ?, ?, ?, ?)",
# (stream_hash, public_key, key, name, next_announce_time))
# db_conn.commit()
# c = db_conn.cursor()
# for blob_hash_stream_hash, blob_info in stream_blob_db.RangeIter():
# b_h, s_h = json.loads(blob_hash_stream_hash)
# position, revision, iv, length, signature = json.loads(blob_info)
# c.execute("insert into live_stream_blobs values (?, ?, ?, ?, ?, ?, ?)",
# (b_h, s_h, position, revision, iv, length, signature))
# db_conn.commit()
# c = db_conn.cursor()
# for sd_blob_hash, stream_hash in stream_desc_db.RangeIter():
# c.execute("insert into live_stream_descriptors values (?, ?)",
# (sd_blob_hash, stream_hash))
# db_conn.commit()
# db_conn.close()
#
#
# def migrate_ptc_db(old_db_dir, new_db_dir):
# old_db_path = os.path.join(old_db_dir, "ptcwallet.db")
# if not os.path.exists(old_db_path):
# return True
# old_db = leveldb.LevelDB(old_db_path)
# try:
# p_key = old_db.Get("private_key")
# new_db = unqlite.UnQLite(os.path.join(new_db_dir, "ptcwallet.db"))
# new_db['private_key'] = p_key
# except KeyError:
# pass
#
#
# def migrate_lbryfile_manager_db(old_db_dir, new_db_dir):
# old_db_path = os.path.join(old_db_dir, "lbryfiles.db")
# if not os.path.exists(old_db_path):
# return True
# old_db = leveldb.LevelDB(old_db_path)
# new_db = sqlite3.connect(os.path.join(new_db_dir, "lbryfile_info.db"))
# c = new_db.cursor()
# c.execute("create table if not exists lbry_file_options (" +
# " blob_data_rate real, " +
# " status text," +
# " stream_hash text,"
# " foreign key(stream_hash) references lbry_files(stream_hash)" +
# ")")
# new_db.commit()
# FILE_STATUS = "t"
# FILE_OPTIONS = "o"
# c = new_db.cursor()
# for k, v in old_db.RangeIter():
# key_type, stream_hash = json.loads(k)
# if key_type == FILE_STATUS:
# try:
# rate = json.loads(old_db.Get(json.dumps((FILE_OPTIONS, stream_hash))))[0]
# except KeyError:
# rate = None
# c.execute("insert into lbry_file_options values (?, ?, ?)",
# (rate, v, stream_hash))
# new_db.commit()
# new_db.close()
#
#
# def migrate_settings_db(old_db_dir, new_db_dir):
# old_settings_db_path = os.path.join(old_db_dir, "settings.db")
# if not os.path.exists(old_settings_db_path):
# return True
# old_db = leveldb.LevelDB(old_settings_db_path)
# new_db = unqlite.UnQLite(os.path.join(new_db_dir, "settings.db"))
# for k, v in old_db.RangeIter():
# new_db[k] = v
#
#
# def migrate_repeater_db(old_db_dir, new_db_dir):
# old_repeater_db_path = os.path.join(old_db_dir, "valuable_blobs.db")
# if not os.path.exists(old_repeater_db_path):
# return True
# old_db = leveldb.LevelDB(old_repeater_db_path)
# info_db = sqlite3.connect(os.path.join(new_db_dir, "blind_info.db"))
# peer_db = sqlite3.connect(os.path.join(new_db_dir, "blind_peers.db"))
# unql_db = unqlite.UnQLite(os.path.join(new_db_dir, "blind_settings.db"))
# BLOB_INFO_TYPE = 'b'
# SETTING_TYPE = 's'
# PEER_TYPE = 'p'
# info_c = info_db.cursor()
# info_c.execute("create table if not exists valuable_blobs (" +
# " blob_hash text primary key, " +
# " blob_length integer, " +
# " reference text, " +
# " peer_host text, " +
# " peer_port integer, " +
# " peer_score text" +
# ")")
# info_db.commit()
# peer_c = peer_db.cursor()
# peer_c.execute("create table if not exists approved_peers (" +
# " ip_address text, " +
# " port integer" +
# ")")
# peer_db.commit()
# info_c = info_db.cursor()
# peer_c = peer_db.cursor()
# for k, v in old_db.RangeIter():
# key_type, key_rest = json.loads(k)
# if key_type == PEER_TYPE:
# host, port = key_rest
# peer_c.execute("insert into approved_peers values (?, ?)",
# (host, port))
# elif key_type == SETTING_TYPE:
# unql_db[key_rest] = v
# elif key_type == BLOB_INFO_TYPE:
# blob_hash = key_rest
# length, reference, peer_host, peer_port, peer_score = json.loads(v)
# info_c.execute("insert into valuable_blobs values (?, ?, ?, ?, ?, ?)",
# (blob_hash, length, reference, peer_host, peer_port, peer_score))
# info_db.commit()
# peer_db.commit()
# info_db.close()
# peer_db.close()

View file

@ -10,7 +10,7 @@
the terms and conditions of version 3 of the GNU General Public the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below. License, supplemented by the additional permissions listed below.
0. Additional Definitions. 0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License, and the "GNU GPL" refers to version 3 of the GNU
@ -111,7 +111,7 @@ the following:
a copy of the Library already present on the user's computer a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked of the Library that is interface-compatible with the Linked
Version. Version.
e) Provide Installation Information, but only if you would otherwise e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the be required to provide such information under section 6 of the

View file

@ -10,7 +10,7 @@
""" This module defines the charaterizing constants of the Kademlia network """ This module defines the charaterizing constants of the Kademlia network
C{checkRefreshInterval} and C{udpDatagramMaxSize} are implementation-specific C{checkRefreshInterval} and C{udpDatagramMaxSize} are implementation-specific
constants, and do not affect general Kademlia operation. constants, and do not affect general Kademlia operation.
""" """
######### KADEMLIA CONSTANTS ########### ######### KADEMLIA CONSTANTS ###########
@ -49,4 +49,4 @@ checkRefreshInterval = refreshTimeout/5
#: be spread accross several UDP packets. #: be spread accross several UDP packets.
udpDatagramMaxSize = 8192 # 8 KB udpDatagramMaxSize = 8192 # 8 KB
key_bits = 384 key_bits = 384

View file

@ -10,7 +10,7 @@
class Contact(object): class Contact(object):
""" Encapsulation for remote contact """ Encapsulation for remote contact
This class contains information on a single remote contact, and also This class contains information on a single remote contact, and also
provides a direct RPC API to the remote node which it represents provides a direct RPC API to the remote node which it represents
""" """
@ -20,7 +20,7 @@ class Contact(object):
self.port = udpPort self.port = udpPort
self._networkProtocol = networkProtocol self._networkProtocol = networkProtocol
self.commTime = firstComm self.commTime = firstComm
def __eq__(self, other): def __eq__(self, other):
if isinstance(other, Contact): if isinstance(other, Contact):
return self.id == other.id return self.id == other.id
@ -28,7 +28,7 @@ class Contact(object):
return self.id == other return self.id == other
else: else:
return False return False
def __ne__(self, other): def __ne__(self, other):
if isinstance(other, Contact): if isinstance(other, Contact):
return self.id != other.id return self.id != other.id
@ -41,20 +41,20 @@ class Contact(object):
compact_ip = reduce( compact_ip = reduce(
lambda buff, x: buff + bytearray([int(x)]), self.address.split('.'), bytearray()) lambda buff, x: buff + bytearray([int(x)]), self.address.split('.'), bytearray())
return str(compact_ip) return str(compact_ip)
def __str__(self): def __str__(self):
return '<%s.%s object; IP address: %s, UDP port: %d>' % ( return '<%s.%s object; IP address: %s, UDP port: %d>' % (
self.__module__, self.__class__.__name__, self.address, self.port) self.__module__, self.__class__.__name__, self.address, self.port)
def __getattr__(self, name): def __getattr__(self, name):
""" This override allows the host node to call a method of the remote """ This override allows the host node to call a method of the remote
node (i.e. this contact) as if it was a local function. node (i.e. this contact) as if it was a local function.
For instance, if C{remoteNode} is a instance of C{Contact}, the For instance, if C{remoteNode} is a instance of C{Contact}, the
following will result in C{remoteNode}'s C{test()} method to be following will result in C{remoteNode}'s C{test()} method to be
called with argument C{123}:: called with argument C{123}::
remoteNode.test(123) remoteNode.test(123)
Such a RPC method call will return a Deferred, which will callback Such a RPC method call will return a Deferred, which will callback
when the contact responds with the result (or an error occurs). when the contact responds with the result (or an error occurs).
This happens via this contact's C{_networkProtocol} object (i.e. the This happens via this contact's C{_networkProtocol} object (i.e. the

View file

@ -16,7 +16,7 @@ import constants
class DataStore(UserDict.DictMixin): class DataStore(UserDict.DictMixin):
""" Interface for classes implementing physical storage (for data """ Interface for classes implementing physical storage (for data
published via the "STORE" RPC) for the Kademlia DHT published via the "STORE" RPC) for the Kademlia DHT
@note: This provides an interface for a dict-like object @note: This provides an interface for a dict-like object
""" """
def keys(self): def keys(self):

View file

@ -14,47 +14,47 @@ class DecodeError(Exception):
class Encoding(object): class Encoding(object):
""" Interface for RPC message encoders/decoders """ Interface for RPC message encoders/decoders
All encoding implementations used with this library should inherit and All encoding implementations used with this library should inherit and
implement this. implement this.
""" """
def encode(self, data): def encode(self, data):
""" Encode the specified data """ Encode the specified data
@param data: The data to encode @param data: The data to encode
This method has to support encoding of the following This method has to support encoding of the following
types: C{str}, C{int} and C{long} types: C{str}, C{int} and C{long}
Any additional data types may be supported as long as the Any additional data types may be supported as long as the
implementing class's C{decode()} method can successfully implementing class's C{decode()} method can successfully
decode them. decode them.
@return: The encoded data @return: The encoded data
@rtype: str @rtype: str
""" """
def decode(self, data): def decode(self, data):
""" Decode the specified data string """ Decode the specified data string
@param data: The data (byte string) to decode. @param data: The data (byte string) to decode.
@type data: str @type data: str
@return: The decoded data (in its correct type) @return: The decoded data (in its correct type)
""" """
class Bencode(Encoding): class Bencode(Encoding):
""" Implementation of a Bencode-based algorithm (Bencode is the encoding """ Implementation of a Bencode-based algorithm (Bencode is the encoding
algorithm used by Bittorrent). algorithm used by Bittorrent).
@note: This algorithm differs from the "official" Bencode algorithm in @note: This algorithm differs from the "official" Bencode algorithm in
that it can encode/decode floating point values in addition to that it can encode/decode floating point values in addition to
integers. integers.
""" """
def encode(self, data): def encode(self, data):
""" Encoder implementation of the Bencode algorithm """ Encoder implementation of the Bencode algorithm
@param data: The data to encode @param data: The data to encode
@type data: int, long, tuple, list, dict or str @type data: int, long, tuple, list, dict or str
@return: The encoded data @return: The encoded data
@rtype: str @rtype: str
""" """
@ -76,7 +76,7 @@ class Bencode(Encoding):
encodedDictItems += self.encode(data[key]) encodedDictItems += self.encode(data[key])
return 'd%se' % encodedDictItems return 'd%se' % encodedDictItems
elif type(data) == float: elif type(data) == float:
# This (float data type) is a non-standard extension to the original Bencode algorithm # This (float data type) is a non-standard extension to the original Bencode algorithm
return 'f%fe' % data return 'f%fe' % data
elif data == None: elif data == None:
# This (None/NULL data type) is a non-standard extension # This (None/NULL data type) is a non-standard extension
@ -85,16 +85,16 @@ class Bencode(Encoding):
else: else:
print data print data
raise TypeError, "Cannot bencode '%s' object" % type(data) raise TypeError, "Cannot bencode '%s' object" % type(data)
def decode(self, data): def decode(self, data):
""" Decoder implementation of the Bencode algorithm """ Decoder implementation of the Bencode algorithm
@param data: The encoded data @param data: The encoded data
@type data: str @type data: str
@note: This is a convenience wrapper for the recursive decoding @note: This is a convenience wrapper for the recursive decoding
algorithm, C{_decodeRecursive} algorithm, C{_decodeRecursive}
@return: The decoded data, as a native Python type @return: The decoded data, as a native Python type
@rtype: int, list, dict or str @rtype: int, list, dict or str
""" """
@ -104,11 +104,11 @@ class Bencode(Encoding):
return self._decodeRecursive(data)[0] return self._decodeRecursive(data)[0]
except ValueError as e: except ValueError as e:
raise DecodeError, e.message raise DecodeError, e.message
@staticmethod @staticmethod
def _decodeRecursive(data, startIndex=0): def _decodeRecursive(data, startIndex=0):
""" Actual implementation of the recursive Bencode algorithm """ Actual implementation of the recursive Bencode algorithm
Do not call this; use C{decode()} instead Do not call this; use C{decode()} instead
""" """
if data[startIndex] == 'i': if data[startIndex] == 'i':

View file

@ -32,4 +32,4 @@ class HashWatcher():
def _remove_old_hashes(self): def _remove_old_hashes(self):
remove_time = datetime.datetime.now() - datetime.timedelta(minutes=10) remove_time = datetime.datetime.now() - datetime.timedelta(minutes=10)
self.hashes = [h for h in self.hashes if h[1] < remove_time] self.hashes = [h for h in self.hashes if h[1] < remove_time]

View file

@ -31,11 +31,11 @@ class KBucket(object):
def addContact(self, contact): def addContact(self, contact):
""" Add contact to _contact list in the right order. This will move the """ Add contact to _contact list in the right order. This will move the
contact to the end of the k-bucket if it is already present. contact to the end of the k-bucket if it is already present.
@raise kademlia.kbucket.BucketFull: Raised when the bucket is full and @raise kademlia.kbucket.BucketFull: Raised when the bucket is full and
the contact isn't in the bucket the contact isn't in the bucket
already already
@param contact: The contact to add @param contact: The contact to add
@type contact: kademlia.contact.Contact @type contact: kademlia.contact.Contact
""" """
@ -57,7 +57,7 @@ class KBucket(object):
def getContacts(self, count=-1, excludeContact=None): def getContacts(self, count=-1, excludeContact=None):
""" Returns a list containing up to the first count number of contacts """ Returns a list containing up to the first count number of contacts
@param count: The amount of contacts to return (if 0 or less, return @param count: The amount of contacts to return (if 0 or less, return
all contacts) all contacts)
@type count: int @type count: int
@ -65,12 +65,12 @@ class KBucket(object):
the list of returned values, it will be the list of returned values, it will be
discarded before returning. If a C{str} is discarded before returning. If a C{str} is
passed as this argument, it must be the passed as this argument, it must be the
contact's ID. contact's ID.
@type excludeContact: kademlia.contact.Contact or str @type excludeContact: kademlia.contact.Contact or str
@raise IndexError: If the number of requested contacts is too large @raise IndexError: If the number of requested contacts is too large
@return: Return up to the first count number of contacts in a list @return: Return up to the first count number of contacts in a list
If no contacts are present an empty is returned If no contacts are present an empty is returned
@rtype: list @rtype: list
@ -97,7 +97,7 @@ class KBucket(object):
# enough contacts in list # enough contacts in list
else: else:
contactList = self._contacts[0:count] contactList = self._contacts[0:count]
if excludeContact in contactList: if excludeContact in contactList:
contactList.remove(excludeContact) contactList.remove(excludeContact)
@ -105,24 +105,24 @@ class KBucket(object):
def removeContact(self, contact): def removeContact(self, contact):
""" Remove given contact from list """ Remove given contact from list
@param contact: The contact to remove, or a string containing the @param contact: The contact to remove, or a string containing the
contact's node ID contact's node ID
@type contact: kademlia.contact.Contact or str @type contact: kademlia.contact.Contact or str
@raise ValueError: The specified contact is not in this bucket @raise ValueError: The specified contact is not in this bucket
""" """
self._contacts.remove(contact) self._contacts.remove(contact)
def keyInRange(self, key): def keyInRange(self, key):
""" Tests whether the specified key (i.e. node ID) is in the range """ Tests whether the specified key (i.e. node ID) is in the range
of the n-bit ID space covered by this k-bucket (in otherwords, it of the n-bit ID space covered by this k-bucket (in otherwords, it
returns whether or not the specified key should be placed in this returns whether or not the specified key should be placed in this
k-bucket) k-bucket)
@param key: The key to test @param key: The key to test
@type key: str or int @type key: str or int
@return: C{True} if the key is in this k-bucket's range, or C{False} @return: C{True} if the key is in this k-bucket's range, or C{False}
if not. if not.
@rtype: bool @rtype: bool

View file

@ -11,37 +11,37 @@ import msgtypes
class MessageTranslator(object): class MessageTranslator(object):
""" Interface for RPC message translators/formatters """ Interface for RPC message translators/formatters
Classes inheriting from this should provide a translation services between Classes inheriting from this should provide a translation services between
the classes used internally by this Kademlia implementation and the actual the classes used internally by this Kademlia implementation and the actual
data that is transmitted between nodes. data that is transmitted between nodes.
""" """
def fromPrimitive(self, msgPrimitive): def fromPrimitive(self, msgPrimitive):
""" Create an RPC Message from a message's string representation """ Create an RPC Message from a message's string representation
@param msgPrimitive: The unencoded primitive representation of a message @param msgPrimitive: The unencoded primitive representation of a message
@type msgPrimitive: str, int, list or dict @type msgPrimitive: str, int, list or dict
@return: The translated message object @return: The translated message object
@rtype: entangled.kademlia.msgtypes.Message @rtype: entangled.kademlia.msgtypes.Message
""" """
def toPrimitive(self, message): def toPrimitive(self, message):
""" Create a string representation of a message """ Create a string representation of a message
@param message: The message object @param message: The message object
@type message: msgtypes.Message @type message: msgtypes.Message
@return: The message's primitive representation in a particular @return: The message's primitive representation in a particular
messaging format messaging format
@rtype: str, int, list or dict @rtype: str, int, list or dict
""" """
class DefaultFormat(MessageTranslator): class DefaultFormat(MessageTranslator):
""" The default on-the-wire message format for this library """ """ The default on-the-wire message format for this library """
typeRequest, typeResponse, typeError = range(3) typeRequest, typeResponse, typeError = range(3)
headerType, headerMsgID, headerNodeID, headerPayload, headerArgs = range(5) headerType, headerMsgID, headerNodeID, headerPayload, headerArgs = range(5)
def fromPrimitive(self, msgPrimitive): def fromPrimitive(self, msgPrimitive):
msgType = msgPrimitive[self.headerType] msgType = msgPrimitive[self.headerType]
if msgType == self.typeRequest: if msgType == self.typeRequest:
@ -62,8 +62,8 @@ class DefaultFormat(MessageTranslator):
# Unknown message, no payload # Unknown message, no payload
msg = msgtypes.Message(msgPrimitive[self.headerMsgID], msgPrimitive[self.headerNodeID]) msg = msgtypes.Message(msgPrimitive[self.headerMsgID], msgPrimitive[self.headerNodeID])
return msg return msg
def toPrimitive(self, message): def toPrimitive(self, message):
msg = {self.headerMsgID: message.id, msg = {self.headerMsgID: message.id,
self.headerNodeID: message.nodeID} self.headerNodeID: message.nodeID}
if isinstance(message, msgtypes.RequestMessage): if isinstance(message, msgtypes.RequestMessage):

View file

@ -22,7 +22,7 @@ class RequestMessage(Message):
def __init__(self, nodeID, method, methodArgs, rpcID=None): def __init__(self, nodeID, method, methodArgs, rpcID=None):
if rpcID == None: if rpcID == None:
hash = hashlib.sha384() hash = hashlib.sha384()
hash.update(str(random.getrandbits(255))) hash.update(str(random.getrandbits(255)))
rpcID = hash.digest() rpcID = hash.digest()
Message.__init__(self, rpcID, nodeID) Message.__init__(self, rpcID, nodeID)
self.request = method self.request = method

View file

@ -6,10 +6,16 @@
# #
# The docstrings in this module contain epytext markup; API documentation # The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net # may be created by processing this file with epydoc: http://epydoc.sf.net
import hashlib, random, struct, time, binascii
import argparse import argparse
import binascii
import hashlib
import operator
import random
import struct
import time
from twisted.internet import defer, error from twisted.internet import defer, error
import constants import constants
import routingtable import routingtable
import datastore import datastore
@ -27,22 +33,23 @@ log = logging.getLogger(__name__)
def rpcmethod(func): def rpcmethod(func):
""" Decorator to expose Node methods as remote procedure calls """ Decorator to expose Node methods as remote procedure calls
Apply this decorator to methods in the Node class (or a subclass) in order Apply this decorator to methods in the Node class (or a subclass) in order
to make them remotely callable via the DHT's RPC mechanism. to make them remotely callable via the DHT's RPC mechanism.
""" """
func.rpcmethod = True func.rpcmethod = True
return func return func
class Node(object): class Node(object):
""" Local node in the Kademlia network """ Local node in the Kademlia network
This class represents a single local node in a Kademlia network; in other This class represents a single local node in a Kademlia network; in other
words, this class encapsulates an Entangled-using application's "presence" words, this class encapsulates an Entangled-using application's "presence"
in a Kademlia network. in a Kademlia network.
In Entangled, all interactions with the Kademlia network by a client In Entangled, all interactions with the Kademlia network by a client
application is performed via this class (or a subclass). application is performed via this class (or a subclass).
""" """
def __init__(self, id=None, udpPort=4000, dataStore=None, def __init__(self, id=None, udpPort=4000, dataStore=None,
routingTableClass=None, networkProtocol=None, lbryid=None, routingTableClass=None, networkProtocol=None, lbryid=None,
@ -61,7 +68,7 @@ class Node(object):
exposed. This should be a class, not an object, exposed. This should be a class, not an object,
in order to allow the Node to pass an in order to allow the Node to pass an
auto-generated node ID to the routingtable object auto-generated node ID to the routingtable object
upon instantiation (if necessary). upon instantiation (if necessary).
@type routingTable: entangled.kademlia.routingtable.RoutingTable @type routingTable: entangled.kademlia.routingtable.RoutingTable
@param networkProtocol: The network protocol to use. This can be @param networkProtocol: The network protocol to use. This can be
overridden from the default to (for example) overridden from the default to (for example)
@ -85,9 +92,6 @@ class Node(object):
self.next_refresh_call = None self.next_refresh_call = None
self.next_change_token_call = None self.next_change_token_call = None
# Create k-buckets (for storing contacts) # Create k-buckets (for storing contacts)
#self._buckets = []
#for i in range(160):
# self._buckets.append(kbucket.KBucket())
if routingTableClass == None: if routingTableClass == None:
self._routingTable = routingtable.OptimizedTreeRoutingTable(self.id) self._routingTable = routingtable.OptimizedTreeRoutingTable(self.id)
else: else:
@ -118,7 +122,6 @@ class Node(object):
self.hash_watcher = HashWatcher() self.hash_watcher = HashWatcher()
def __del__(self): def __del__(self):
#self._persistState()
if self._listeningPort is not None: if self._listeningPort is not None:
self._listeningPort.stopListening() self._listeningPort.stopListening()
@ -138,7 +141,7 @@ class Node(object):
def joinNetwork(self, knownNodeAddresses=None): def joinNetwork(self, knownNodeAddresses=None):
""" Causes the Node to join the Kademlia network; normally, this """ Causes the Node to join the Kademlia network; normally, this
should be called before any other DHT operations. should be called before any other DHT operations.
@param knownNodeAddresses: A sequence of tuples containing IP address @param knownNodeAddresses: A sequence of tuples containing IP address
information for existing nodes on the information for existing nodes on the
Kademlia network, in the format: Kademlia network, in the format:
@ -165,16 +168,6 @@ class Node(object):
# Initiate the Kademlia joining sequence - perform a search for this node's own ID # Initiate the Kademlia joining sequence - perform a search for this node's own ID
self._joinDeferred = self._iterativeFind(self.id, bootstrapContacts) self._joinDeferred = self._iterativeFind(self.id, bootstrapContacts)
# #TODO: Refresh all k-buckets further away than this node's closest neighbour # #TODO: Refresh all k-buckets further away than this node's closest neighbour
# def getBucketAfterNeighbour(*args):
# for i in range(160):
# if len(self._buckets[i]) > 0:
# return i+1
# return 160
# df.addCallback(getBucketAfterNeighbour)
# df.addCallback(self._refreshKBuckets)
#protocol.reactor.callLater(10, self.printContacts)
#self._joinDeferred.addCallback(self._persistState)
#self._joinDeferred.addCallback(self.printContacts)
# Start refreshing k-buckets periodically, if necessary # Start refreshing k-buckets periodically, if necessary
self.next_refresh_call = twisted.internet.reactor.callLater( self.next_refresh_call = twisted.internet.reactor.callLater(
constants.checkRefreshInterval, self._refreshNode) #IGNORE:E1101 constants.checkRefreshInterval, self._refreshNode) #IGNORE:E1101
@ -187,7 +180,6 @@ class Node(object):
for contact in self._routingTable._buckets[i]._contacts: for contact in self._routingTable._buckets[i]._contacts:
print contact print contact
print '==================================' print '=================================='
#twisted.internet.reactor.callLater(10, self.printContacts)
def getApproximateTotalDHTNodes(self): def getApproximateTotalDHTNodes(self):
# get the deepest bucket and the number of contacts in that bucket and multiply it # get the deepest bucket and the number of contacts in that bucket and multiply it
@ -218,7 +210,6 @@ class Node(object):
if type(result) == dict: if type(result) == dict:
if blob_hash in result: if blob_hash in result:
for peer in result[blob_hash]: for peer in result[blob_hash]:
#print peer
if self.lbryid != peer[6:]: if self.lbryid != peer[6:]:
host = ".".join([str(ord(d)) for d in peer[:4]]) host = ".".join([str(ord(d)) for d in peer[:4]])
if host == "127.0.0.1": if host == "127.0.0.1":
@ -230,8 +221,6 @@ class Node(object):
return expanded_peers return expanded_peers
def find_failed(err): def find_failed(err):
#print "An exception occurred in the DHT"
#print err.getErrorMessage()
return [] return []
d = self.iterativeFindValue(blob_hash) d = self.iterativeFindValue(blob_hash)
@ -246,9 +235,14 @@ class Node(object):
known_nodes = {} known_nodes = {}
def log_error(err, n): def log_error(err, n):
log.debug("error storing blob_hash %s at %s", binascii.hexlify(blob_hash), str(n)) if err.check(protocol.TimeoutError):
log.debug(err.getErrorMessage()) log.debug(
log.debug(err.getTraceback()) "Timeout while storing blob_hash %s at %s",
binascii.hexlify(blob_hash), n)
else:
log.error(
"Unexpected error while storing blob_hash %s at %s: %s",
binascii.hexlify(blob_hash), n, err.getErrorMessage())
def log_success(res): def log_success(res):
log.debug("Response to store request: %s", str(res)) log.debug("Response to store request: %s", str(res))
@ -268,28 +262,21 @@ class Node(object):
result = responseMsg.response result = responseMsg.response
if 'token' in result: if 'token' in result:
#print "Printing result...", result
value['token'] = result['token'] value['token'] = result['token']
d = n.store(blob_hash, value, self.id, 0) d = n.store(blob_hash, value, self.id, 0)
d.addCallback(log_success) d.addCallback(log_success)
d.addErrback(log_error, n) d.addErrback(log_error, n)
else: else:
d = defer.succeed(False) d = defer.succeed(False)
#else:
# print "result:", result
# print "No token where it should be"
return d return d
def requestPeers(contacts): def requestPeers(contacts):
if self.externalIP is not None and len(contacts) >= constants.k: if self.externalIP is not None and len(contacts) >= constants.k:
is_closer = ( is_closer = Distance(blob_hash).is_closer(self.id, contacts[-1].id)
self._routingTable.distance(blob_hash, self.id) <
self._routingTable.distance(blob_hash, contacts[-1].id))
if is_closer: if is_closer:
contacts.pop() contacts.pop()
self.store(blob_hash, value, self_store=True, originalPublisherID=self.id) self.store(blob_hash, value, self_store=True, originalPublisherID=self.id)
elif self.externalIP is not None: elif self.externalIP is not None:
#print "attempting to self-store"
self.store(blob_hash, value, self_store=True, originalPublisherID=self.id) self.store(blob_hash, value, self_store=True, originalPublisherID=self.id)
ds = [] ds = []
for contact in contacts: for contact in contacts:
@ -323,18 +310,17 @@ class Node(object):
h = hashlib.new('sha384') h = hashlib.new('sha384')
h.update(self.old_token_secret + compact_ip) h.update(self.old_token_secret + compact_ip)
if not token == h.digest(): if not token == h.digest():
#print 'invalid token found'
return False return False
return True return True
def iterativeFindNode(self, key): def iterativeFindNode(self, key):
""" The basic Kademlia node lookup operation """ The basic Kademlia node lookup operation
Call this to find a remote node in the P2P overlay network. Call this to find a remote node in the P2P overlay network.
@param key: the n-bit key (i.e. the node or value ID) to search for @param key: the n-bit key (i.e. the node or value ID) to search for
@type key: str @type key: str
@return: This immediately returns a deferred object, which will return @return: This immediately returns a deferred object, which will return
a list of k "closest" contacts (C{kademlia.contact.Contact} a list of k "closest" contacts (C{kademlia.contact.Contact}
objects) to the specified key as soon as the operation is objects) to the specified key as soon as the operation is
@ -345,12 +331,12 @@ class Node(object):
def iterativeFindValue(self, key): def iterativeFindValue(self, key):
""" The Kademlia search operation (deterministic) """ The Kademlia search operation (deterministic)
Call this to retrieve data from the DHT. Call this to retrieve data from the DHT.
@param key: the n-bit key (i.e. the value ID) to search for @param key: the n-bit key (i.e. the value ID) to search for
@type key: str @type key: str
@return: This immediately returns a deferred object, which will return @return: This immediately returns a deferred object, which will return
either one of two things: either one of two things:
- If the value was found, it will return a Python - If the value was found, it will return a Python
@ -368,24 +354,17 @@ class Node(object):
def checkResult(result): def checkResult(result):
if type(result) == dict: if type(result) == dict:
# We have found the value; now see who was the closest contact without it... # We have found the value; now see who was the closest contact without it...
# if 'closestNodeNoValue' in result:
# ...and store the key/value pair # ...and store the key/value pair
# contact = result['closestNodeNoValue']
# contact.store(key, result[key])
outerDf.callback(result) outerDf.callback(result)
else: else:
# The value wasn't found, but a list of contacts was returned # The value wasn't found, but a list of contacts was returned
# Now, see if we have the value (it might seem wasteful to search on the network # Now, see if we have the value (it might seem wasteful to search on the network
# first, but it ensures that all values are properly propagated through the # first, but it ensures that all values are properly propagated through the
# network # network
#if key in self._dataStore:
if self._dataStore.hasPeersForBlob(key): if self._dataStore.hasPeersForBlob(key):
# Ok, we have the value locally, so use that # Ok, we have the value locally, so use that
peers = self._dataStore.getPeersForBlob(key) peers = self._dataStore.getPeersForBlob(key)
# Send this value to the closest node without it # Send this value to the closest node without it
#if len(result) > 0:
# contact = result[0]
# contact.store(key, value)
outerDf.callback({key: peers, "from_peer": 'self'}) outerDf.callback({key: peers, "from_peer": 'self'})
else: else:
# Ok, value does not exist in DHT at all # Ok, value does not exist in DHT at all
@ -409,7 +388,7 @@ class Node(object):
""" Remove the contact with the specified node ID from this node's """ Remove the contact with the specified node ID from this node's
table of known nodes. This is a simple wrapper for the same method table of known nodes. This is a simple wrapper for the same method
in this object's RoutingTable object in this object's RoutingTable object
@param contactID: The node ID of the contact to remove @param contactID: The node ID of the contact to remove
@type contactID: str @type contactID: str
""" """
@ -418,10 +397,10 @@ class Node(object):
def findContact(self, contactID): def findContact(self, contactID):
""" Find a entangled.kademlia.contact.Contact object for the specified """ Find a entangled.kademlia.contact.Contact object for the specified
cotact ID cotact ID
@param contactID: The contact ID of the required Contact object @param contactID: The contact ID of the required Contact object
@type contactID: str @type contactID: str
@return: Contact object of remote node with the specified node ID, @return: Contact object of remote node with the specified node ID,
or None if the contact was not found or None if the contact was not found
@rtype: twisted.internet.defer.Deferred @rtype: twisted.internet.defer.Deferred
@ -444,7 +423,7 @@ class Node(object):
@rpcmethod @rpcmethod
def ping(self): def ping(self):
""" Used to verify contact between two Kademlia nodes """ Used to verify contact between two Kademlia nodes
@rtype: str @rtype: str
""" """
return 'pong' return 'pong'
@ -452,7 +431,7 @@ class Node(object):
@rpcmethod @rpcmethod
def store(self, key, value, originalPublisherID=None, self_store=False, **kwargs): def store(self, key, value, originalPublisherID=None, self_store=False, **kwargs):
""" Store the received data in this node's local hash table """ Store the received data in this node's local hash table
@param key: The hashtable key of the data @param key: The hashtable key of the data
@type key: str @type key: str
@param value: The actual data (the value associated with C{key}) @param value: The actual data (the value associated with C{key})
@ -467,7 +446,7 @@ class Node(object):
@type age: int @type age: int
@rtype: str @rtype: str
@todo: Since the data (value) may be large, passing it around as a buffer @todo: Since the data (value) may be large, passing it around as a buffer
(which is the case currently) might not be a good idea... will have (which is the case currently) might not be a good idea... will have
to fix this (perhaps use a stream from the Protocol class?) to fix this (perhaps use a stream from the Protocol class?)
@ -484,19 +463,13 @@ class Node(object):
compact_ip = contact.compact_ip() compact_ip = contact.compact_ip()
elif '_rpcNodeContact' in kwargs: elif '_rpcNodeContact' in kwargs:
contact = kwargs['_rpcNodeContact'] contact = kwargs['_rpcNodeContact']
#print contact.address
compact_ip = contact.compact_ip() compact_ip = contact.compact_ip()
#print compact_ip
else: else:
return 'Not OK' return 'Not OK'
#raise TypeError, 'No contact info available' #raise TypeError, 'No contact info available'
if ((self_store is False) and if ((self_store is False) and
(not 'token' in value or not self.verify_token(value['token'], compact_ip))): (not 'token' in value or not self.verify_token(value['token'], compact_ip))):
#if not 'token' in value:
# print "Couldn't find token in value"
#elif not self.verify_token(value['token'], contact.compact_ip()):
# print "Token is invalid"
raise ValueError('Invalid or missing token') raise ValueError('Invalid or missing token')
if 'port' in value: if 'port' in value:
@ -518,11 +491,8 @@ class Node(object):
now = int(time.time()) now = int(time.time())
originallyPublished = now# - age originallyPublished = now# - age
#print compact_address
self._dataStore.addPeerToBlob( self._dataStore.addPeerToBlob(
key, compact_address, now, originallyPublished, originalPublisherID) key, compact_address, now, originallyPublished, originalPublisherID)
#if self_store is True:
# print "looks like it was successful maybe"
return 'OK' return 'OK'
@rpcmethod @rpcmethod
@ -576,7 +546,7 @@ class Node(object):
def _generateID(self): def _generateID(self):
""" Generates an n-bit pseudo-random identifier """ Generates an n-bit pseudo-random identifier
@return: A globally unique n-bit pseudo-random identifier @return: A globally unique n-bit pseudo-random identifier
@rtype: str @rtype: str
""" """
@ -586,12 +556,12 @@ class Node(object):
def _iterativeFind(self, key, startupShortlist=None, rpc='findNode'): def _iterativeFind(self, key, startupShortlist=None, rpc='findNode'):
""" The basic Kademlia iterative lookup operation (for nodes/values) """ The basic Kademlia iterative lookup operation (for nodes/values)
This builds a list of k "closest" contacts through iterative use of This builds a list of k "closest" contacts through iterative use of
the "FIND_NODE" RPC, or if C{findValue} is set to C{True}, using the the "FIND_NODE" RPC, or if C{findValue} is set to C{True}, using the
"FIND_VALUE" RPC, in which case the value (if found) may be returned "FIND_VALUE" RPC, in which case the value (if found) may be returned
instead of a list of contacts instead of a list of contacts
@param key: the n-bit key (i.e. the node or value ID) to search for @param key: the n-bit key (i.e. the node or value ID) to search for
@type key: str @type key: str
@param startupShortlist: A list of contacts to use as the starting @param startupShortlist: A list of contacts to use as the starting
@ -605,7 +575,7 @@ class Node(object):
other operations that piggy-back on the basic Kademlia other operations that piggy-back on the basic Kademlia
lookup operation (Entangled's "delete" RPC, for instance). lookup operation (Entangled's "delete" RPC, for instance).
@type rpc: str @type rpc: str
@return: If C{findValue} is C{True}, the algorithm will stop as soon @return: If C{findValue} is C{True}, the algorithm will stop as soon
as a data value for C{key} is found, and return a dictionary as a data value for C{key} is found, and return a dictionary
containing the key and the found value. Otherwise, it will containing the key and the found value. Otherwise, it will
@ -631,187 +601,17 @@ class Node(object):
# This is used during the bootstrap process; node ID's are most probably fake # This is used during the bootstrap process; node ID's are most probably fake
shortlist = startupShortlist shortlist = startupShortlist
# List of active queries; len() indicates number of active probes
#
# n.b: using lists for these variables, because Python doesn't
# allow binding a new value to a name in an enclosing
# (non-global) scope
activeProbes = []
# List of contact IDs that have already been queried
alreadyContacted = []
# Probes that were active during the previous iteration
# A list of found and known-to-be-active remote nodes
activeContacts = []
# This should only contain one entry; the next scheduled iteration call
pendingIterationCalls = []
prevClosestNode = [None]
findValueResult = {}
slowNodeCount = [0]
def extendShortlist(responseTuple):
""" @type responseMsg: kademlia.msgtypes.ResponseMessage """
# The "raw response" tuple contains the response message,
# and the originating address info
responseMsg = responseTuple[0]
originAddress = responseTuple[1] # tuple: (ip adress, udp port)
# Make sure the responding node is valid, and abort the operation if it isn't
if responseMsg.nodeID in activeContacts or responseMsg.nodeID == self.id:
return responseMsg.nodeID
# Mark this node as active
if responseMsg.nodeID in shortlist:
# Get the contact information from the shortlist...
aContact = shortlist[shortlist.index(responseMsg.nodeID)]
else:
# If it's not in the shortlist; we probably used a fake ID to reach it
# - reconstruct the contact, using the real node ID this time
aContact = Contact(
responseMsg.nodeID, originAddress[0], originAddress[1], self._protocol)
activeContacts.append(aContact)
# This makes sure "bootstrap"-nodes with "fake" IDs don't get queried twice
if responseMsg.nodeID not in alreadyContacted:
alreadyContacted.append(responseMsg.nodeID)
# Now grow extend the (unverified) shortlist with the returned contacts
result = responseMsg.response
#TODO: some validation on the result (for guarding against attacks)
# If we are looking for a value, first see if this result is the value
# we are looking for before treating it as a list of contact triples
if findValue is True and key in result and not 'contacts' in result:
# We have found the value
findValueResult[key] = result[key]
findValueResult['from_peer'] = aContact.address
else:
if findValue is True:
# We are looking for a value, and the remote node didn't have it
# - mark it as the closest "empty" node, if it is
if 'closestNodeNoValue' in findValueResult:
is_closer = (
self._routingTable.distance(key, responseMsg.nodeID) <
self._routingTable.distance(key, activeContacts[0].id))
if is_closer:
findValueResult['closestNodeNoValue'] = aContact
else:
findValueResult['closestNodeNoValue'] = aContact
contactTriples = result['contacts']
else:
contactTriples = result
for contactTriple in contactTriples:
if isinstance(contactTriple, (list, tuple)) and len(contactTriple) == 3:
testContact = Contact(
contactTriple[0], contactTriple[1], contactTriple[2], self._protocol)
if testContact not in shortlist:
shortlist.append(testContact)
return responseMsg.nodeID
def removeFromShortlist(failure):
""" @type failure: twisted.python.failure.Failure """
failure.trap(protocol.TimeoutError)
deadContactID = failure.getErrorMessage()
if deadContactID in shortlist:
shortlist.remove(deadContactID)
return deadContactID
def cancelActiveProbe(contactID):
activeProbes.pop()
if len(activeProbes) <= constants.alpha/2 and len(pendingIterationCalls):
# Force the iteration
pendingIterationCalls[0].cancel()
del pendingIterationCalls[0]
#print 'forcing iteration ================='
searchIteration()
def log_error(err):
log.error(err.getErrorMessage())
# Send parallel, asynchronous FIND_NODE RPCs to the shortlist of contacts
def searchIteration():
#print '==> searchiteration'
slowNodeCount[0] = len(activeProbes)
# TODO: move sort_key to be a method on the class
def sort_key(firstContact, secondContact, targetKey=key):
return cmp(
self._routingTable.distance(firstContact.id, targetKey),
self._routingTable.distance(secondContact.id, targetKey)
)
# Sort the discovered active nodes from closest to furthest
activeContacts.sort(sort_key)
# This makes sure a returning probe doesn't force calling this function by mistake
while len(pendingIterationCalls):
del pendingIterationCalls[0]
# See if should continue the search
if key in findValueResult:
outerDf.callback(findValueResult)
return
elif len(activeContacts) and findValue == False:
is_all_done = (
len(activeContacts) >= constants.k or
(
activeContacts[0] == prevClosestNode[0] and
len(activeProbes) == slowNodeCount[0]
)
)
if is_all_done:
# TODO: Re-send the FIND_NODEs to all of the k closest nodes not already queried
#
# Ok, we're done; either we have accumulated k
# active contacts or no improvement in closestNode
# has been noted
outerDf.callback(activeContacts)
return
# The search continues...
if len(activeContacts):
prevClosestNode[0] = activeContacts[0]
contactedNow = 0
shortlist.sort(sort_key)
# Store the current shortList length before contacting other nodes
prevShortlistLength = len(shortlist)
for contact in shortlist:
if contact.id not in alreadyContacted:
activeProbes.append(contact.id)
rpcMethod = getattr(contact, rpc)
df = rpcMethod(key, rawResponse=True)
df.addCallback(extendShortlist)
df.addErrback(removeFromShortlist)
df.addCallback(cancelActiveProbe)
df.addErrback(log_error)
alreadyContacted.append(contact.id)
contactedNow += 1
if contactedNow == constants.alpha:
break
should_lookup_active_calls = (
len(activeProbes) > slowNodeCount[0] or
(
len(shortlist) < constants.k and
len(activeContacts) < len(shortlist) and
len(activeProbes) > 0
)
)
if should_lookup_active_calls:
# Schedule the next iteration if there are any active
# calls (Kademlia uses loose parallelism)
call = twisted.internet.reactor.callLater(
constants.iterativeLookupDelay, searchIteration) #IGNORE:E1101
pendingIterationCalls.append(call)
# Check for a quick contact response that made an update to the shortList
elif prevShortlistLength < len(shortlist):
# Ensure that the closest contacts are taken from the updated shortList
searchIteration()
else:
#print '++++++++++++++ DONE (logically) +++++++++++++\n\n'
# If no probes were sent, there will not be any improvement, so we're done
outerDf.callback(activeContacts)
outerDf = defer.Deferred() outerDf = defer.Deferred()
helper = _IterativeFindHelper(self, outerDf, shortlist, key, findValue, rpc)
# Start the iterations # Start the iterations
searchIteration() helper.searchIteration()
return outerDf return outerDf
def _refreshNode(self): def _refreshNode(self):
""" Periodically called to perform k-bucket refreshes and data """ Periodically called to perform k-bucket refreshes and data
replication/republishing as necessary """ replication/republishing as necessary """
#print 'refreshNode called'
df = self._refreshRoutingTable() df = self._refreshRoutingTable()
#df.addCallback(self._republishData)
df.addCallback(self._removeExpiredPeers) df.addCallback(self._removeExpiredPeers)
df.addCallback(self._scheduleNextNodeRefresh) df.addCallback(self._scheduleNextNodeRefresh)
@ -830,13 +630,8 @@ class Node(object):
searchForNextNodeID() searchForNextNodeID()
return outerDf return outerDf
#def _republishData(self, *args):
# #print '---republishData() called'
# df = twisted.internet.threads.deferToThread(self._threadedRepublishData)
# return df
def _scheduleNextNodeRefresh(self, *args): def _scheduleNextNodeRefresh(self, *args):
#print '==== sheduling next refresh'
self.next_refresh_call = twisted.internet.reactor.callLater( self.next_refresh_call = twisted.internet.reactor.callLater(
constants.checkRefreshInterval, self._refreshNode) constants.checkRefreshInterval, self._refreshNode)
@ -846,6 +641,266 @@ class Node(object):
return df return df
# This was originally a set of nested methods in _iterativeFind
# but they have been moved into this helper class in-order to
# have better scoping and readability
class _IterativeFindHelper(object):
# TODO: use polymorphism to search for a value or node
# instead of using a find_value flag
def __init__(self, node, outer_d, shortlist, key, find_value, rpc):
self.node = node
self.outer_d = outer_d
self.shortlist = shortlist
self.key = key
self.find_value = find_value
self.rpc = rpc
# all distance operations in this class only care about the distance
# to self.key, so this makes it easier to calculate those
self.distance = Distance(key)
# List of active queries; len() indicates number of active probes
#
# n.b: using lists for these variables, because Python doesn't
# allow binding a new value to a name in an enclosing
# (non-global) scope
self.active_probes = []
# List of contact IDs that have already been queried
self.already_contacted = []
# Probes that were active during the previous iteration
# A list of found and known-to-be-active remote nodes
self.active_contacts = []
# This should only contain one entry; the next scheduled iteration call
self.pending_iteration_calls = []
self.prev_closest_node = [None]
self.find_value_result = {}
self.slow_node_count = [0]
def extendShortlist(self, responseTuple):
""" @type responseMsg: kademlia.msgtypes.ResponseMessage """
# The "raw response" tuple contains the response message,
# and the originating address info
responseMsg = responseTuple[0]
originAddress = responseTuple[1] # tuple: (ip adress, udp port)
# Make sure the responding node is valid, and abort the operation if it isn't
if responseMsg.nodeID in self.active_contacts or responseMsg.nodeID == self.node.id:
return responseMsg.nodeID
# Mark this node as active
aContact = self._getActiveContact(responseMsg, originAddress)
self.active_contacts.append(aContact)
# This makes sure "bootstrap"-nodes with "fake" IDs don't get queried twice
if responseMsg.nodeID not in self.already_contacted:
self.already_contacted.append(responseMsg.nodeID)
# Now grow extend the (unverified) shortlist with the returned contacts
result = responseMsg.response
#TODO: some validation on the result (for guarding against attacks)
# If we are looking for a value, first see if this result is the value
# we are looking for before treating it as a list of contact triples
if self.find_value is True and self.key in result and not 'contacts' in result:
# We have found the value
self.find_value_result[self.key] = result[self.key]
self.find_value_result['from_peer'] = aContact.address
else:
if self.find_value is True:
self._setClosestNodeValue(responseMsg, aContact)
self._keepSearching(result)
return responseMsg.nodeID
def _getActiveContact(self, responseMsg, originAddress):
if responseMsg.nodeID in self.shortlist:
# Get the contact information from the shortlist...
return self.shortlist[self.shortlist.index(responseMsg.nodeID)]
else:
# If it's not in the shortlist; we probably used a fake ID to reach it
# - reconstruct the contact, using the real node ID this time
return Contact(
responseMsg.nodeID, originAddress[0], originAddress[1], self.node._protocol)
def _keepSearching(self, result):
contactTriples = self._getContactTriples(result)
for contactTriple in contactTriples:
self._addIfValid(contactTriple)
def _getContactTriples(self, result):
if self.find_value is True:
return result['contacts']
else:
return result
def _setClosestNodeValue(self, responseMsg, aContact):
# We are looking for a value, and the remote node didn't have it
# - mark it as the closest "empty" node, if it is
if 'closestNodeNoValue' in self.find_value_result:
if self._is_closer(responseMsg):
self.find_value_result['closestNodeNoValue'] = aContact
else:
self.find_value_result['closestNodeNoValue'] = aContact
def _is_closer(self, responseMsg):
return self.distance.is_closer(responseMsg.nodeID, self.active_contacts[0].id)
def _addIfValid(self, contactTriple):
if isinstance(contactTriple, (list, tuple)) and len(contactTriple) == 3:
testContact = Contact(
contactTriple[0], contactTriple[1], contactTriple[2], self.node._protocol)
if testContact not in self.shortlist:
self.shortlist.append(testContact)
def removeFromShortlist(self, failure):
""" @type failure: twisted.python.failure.Failure """
failure.trap(protocol.TimeoutError)
deadContactID = failure.getErrorMessage()
if deadContactID in self.shortlist:
self.shortlist.remove(deadContactID)
return deadContactID
def cancelActiveProbe(self, contactID):
self.active_probes.pop()
if len(self.active_probes) <= constants.alpha/2 and len(self.pending_iteration_calls):
# Force the iteration
self.pending_iteration_calls[0].cancel()
del self.pending_iteration_calls[0]
self.searchIteration()
def sortByDistance(self, contact_list):
"""Sort the list of contacts in order by distance from key"""
ExpensiveSort(contact_list, self.distance.to_contact).sort()
# Send parallel, asynchronous FIND_NODE RPCs to the shortlist of contacts
def searchIteration(self):
self.slow_node_count[0] = len(self.active_probes)
# Sort the discovered active nodes from closest to furthest
self.sortByDistance(self.active_contacts)
# This makes sure a returning probe doesn't force calling this function by mistake
while len(self.pending_iteration_calls):
del self.pending_iteration_calls[0]
# See if should continue the search
if self.key in self.find_value_result:
self.outer_d.callback(self.find_value_result)
return
elif len(self.active_contacts) and self.find_value == False:
if self._is_all_done():
# TODO: Re-send the FIND_NODEs to all of the k closest nodes not already queried
#
# Ok, we're done; either we have accumulated k active
# contacts or no improvement in closestNode has been
# noted
self.outer_d.callback(self.active_contacts)
return
# The search continues...
if len(self.active_contacts):
self.prev_closest_node[0] = self.active_contacts[0]
contactedNow = 0
self.sortByDistance(self.shortlist)
# Store the current shortList length before contacting other nodes
prevShortlistLength = len(self.shortlist)
for contact in self.shortlist:
if contact.id not in self.already_contacted:
self._probeContact(contact)
contactedNow += 1
if contactedNow == constants.alpha:
break
if self._should_lookup_active_calls():
# Schedule the next iteration if there are any active
# calls (Kademlia uses loose parallelism)
call = twisted.internet.reactor.callLater(
constants.iterativeLookupDelay, self.searchIteration) #IGNORE:E1101
self.pending_iteration_calls.append(call)
# Check for a quick contact response that made an update to the shortList
elif prevShortlistLength < len(self.shortlist):
# Ensure that the closest contacts are taken from the updated shortList
self.searchIteration()
else:
# If no probes were sent, there will not be any improvement, so we're done
self.outer_d.callback(self.active_contacts)
def _probeContact(self, contact):
self.active_probes.append(contact.id)
rpcMethod = getattr(contact, self.rpc)
df = rpcMethod(self.key, rawResponse=True)
df.addCallback(self.extendShortlist)
df.addErrback(self.removeFromShortlist)
df.addCallback(self.cancelActiveProbe)
df.addErrback(log.fail(), 'Failed to contact %s', contact)
self.already_contacted.append(contact.id)
def _should_lookup_active_calls(self):
return (
len(self.active_probes) > self.slow_node_count[0] or
(
len(self.shortlist) < constants.k and
len(self.active_contacts) < len(self.shortlist) and
len(self.active_probes) > 0
)
)
def _is_all_done(self):
return (
len(self.active_contacts) >= constants.k or
(
self.active_contacts[0] == self.prev_closest_node[0] and
len(self.active_probes) == self.slow_node_count[0]
)
)
class Distance(object):
"""Calculate the XOR result between two string variables.
Frequently we re-use one of the points so as an optimization
we pre-calculate the long value of that point.
"""
def __init__(self, key):
self.key = key
self.val_key_one = long(key.encode('hex'), 16)
def __call__(self, key_two):
val_key_two = long(key_two.encode('hex'), 16)
return self.val_key_one ^ val_key_two
def is_closer(self, a, b):
"""Returns true is `a` is closer to `key` than `b` is"""
return self(a) < self(b)
def to_contact(self, contact):
"""A convenience function for calculating the distance to a contact"""
return self(contact.id)
class ExpensiveSort(object):
"""Sort a list in place.
The result of `key(item)` is cached for each item in the `to_sort`
list as an optimization. This can be useful when `key` is
expensive.
Attributes:
to_sort: a list of items to sort
key: callable, like `key` in normal python sort
attr: the attribute name used to cache the value on each item.
"""
def __init__(self, to_sort, key, attr='__value'):
self.to_sort = to_sort
self.key = key
self.attr = attr
def sort(self):
self._cacheValues()
self._sortByValue()
self._removeValue()
def _cacheValues(self):
for item in self.to_sort:
setattr(item, self.attr, self.key(item))
def _sortByValue(self):
self.to_sort.sort(key=operator.attrgetter(self.attr))
def _removeValue(self):
for item in self.to_sort:
delattr(item, self.attr)
def main(): def main():
parser = argparse.ArgumentParser(description="Launch a dht node") parser = argparse.ArgumentParser(description="Launch a dht node")
parser.add_argument("udp_port", help="The UDP port on which the node will listen", parser.add_argument("udp_port", help="The UDP port on which the node will listen",

View file

@ -192,7 +192,7 @@ class KademliaProtocol(protocol.DatagramProtocol):
@note: The header used for breaking up large data segments will @note: The header used for breaking up large data segments will
possibly be moved out of the KademliaProtocol class in the possibly be moved out of the KademliaProtocol class in the
future, into something similar to a message translator/encoder future, into something similar to a message translator/encoder
class (see C{kademlia.msgformat} and C{kademlia.encoding}). class (see C{kademlia.msgformat} and C{kademlia.encoding}).
""" """
if len(data) > self.msgSizeLimit: if len(data) > self.msgSizeLimit:
# We have to spread the data over multiple UDP datagrams, # We have to spread the data over multiple UDP datagrams,
@ -208,7 +208,6 @@ class KademliaProtocol(protocol.DatagramProtocol):
seqNumber = 0 seqNumber = 0
startPos = 0 startPos = 0
while seqNumber < totalPackets: while seqNumber < totalPackets:
#reactor.iterate() #IGNORE:E1101
packetData = data[startPos:startPos+self.msgSizeLimit] packetData = data[startPos:startPos+self.msgSizeLimit]
encSeqNumber = chr(seqNumber >> 8) + chr(seqNumber & 0xff) encSeqNumber = chr(seqNumber >> 8) + chr(seqNumber & 0xff)
txData = '\x00%s%s%s\x00%s' % (encTotalPackets, encSeqNumber, rpcID, packetData) txData = '\x00%s%s%s\x00%s' % (encTotalPackets, encSeqNumber, rpcID, packetData)
@ -270,13 +269,8 @@ class KademliaProtocol(protocol.DatagramProtocol):
if callable(func) and hasattr(func, 'rpcmethod'): if callable(func) and hasattr(func, 'rpcmethod'):
# Call the exposed Node method and return the result to the deferred callback chain # Call the exposed Node method and return the result to the deferred callback chain
try: try:
##try:
## # Try to pass the sender's node id to the function...
kwargs = {'_rpcNodeID': senderContact.id, '_rpcNodeContact': senderContact} kwargs = {'_rpcNodeID': senderContact.id, '_rpcNodeContact': senderContact}
result = func(*args, **kwargs) result = func(*args, **kwargs)
##except TypeError:
## # ...or simply call it if that fails
## result = func(*args)
except Exception, e: except Exception, e:
df.errback(failure.Failure(e)) df.errback(failure.Failure(e))
else: else:
@ -288,34 +282,41 @@ class KademliaProtocol(protocol.DatagramProtocol):
def _msgTimeout(self, messageID): def _msgTimeout(self, messageID):
""" Called when an RPC request message times out """ """ Called when an RPC request message times out """
# Find the message that timed out # Find the message that timed out
if self._sentMessages.has_key(messageID): if not self._sentMessages.has_key(messageID):
remoteContactID, df = self._sentMessages[messageID][0:2]
if self._partialMessages.has_key(messageID):
# We are still receiving this message
# See if any progress has been made; if not, kill the message
if self._partialMessagesProgress.has_key(messageID):
same_length = (
len(self._partialMessagesProgress[messageID]) ==
len(self._partialMessages[messageID]))
if same_length:
# No progress has been made
del self._partialMessagesProgress[messageID]
del self._partialMessages[messageID]
df.errback(failure.Failure(TimeoutError(remoteContactID)))
return
# Reset the RPC timeout timer
timeoutCall = reactor.callLater(
constants.rpcTimeout, self._msgTimeout, messageID) #IGNORE:E1101
self._sentMessages[messageID] = (remoteContactID, df, timeoutCall)
return
del self._sentMessages[messageID]
# The message's destination node is now considered to be dead;
# raise an (asynchronous) TimeoutError exception and update the host node
self._node.removeContact(remoteContactID)
df.errback(failure.Failure(TimeoutError(remoteContactID)))
else:
# This should never be reached # This should never be reached
log.error("deferred timed out, but is not present in sent messages list!") log.error("deferred timed out, but is not present in sent messages list!")
return
remoteContactID, df = self._sentMessages[messageID][0:2]
if self._partialMessages.has_key(messageID):
# We are still receiving this message
self._msgTimeoutInProgress(messageID, remoteContactID, df)
return
del self._sentMessages[messageID]
# The message's destination node is now considered to be dead;
# raise an (asynchronous) TimeoutError exception and update the host node
self._node.removeContact(remoteContactID)
df.errback(failure.Failure(TimeoutError(remoteContactID)))
def _msgTimeoutInProgress(self, messageID, remoteContactID, df):
# See if any progress has been made; if not, kill the message
if self._hasProgressBeenMade(messageID):
# Reset the RPC timeout timer
timeoutCall = reactor.callLater(constants.rpcTimeout, self._msgTimeout, messageID)
self._sentMessages[messageID] = (remoteContactID, df, timeoutCall)
else:
# No progress has been made
del self._partialMessagesProgress[messageID]
del self._partialMessages[messageID]
df.errback(failure.Failure(TimeoutError(remoteContactID)))
def _hasProgressBeenMade(self, messageID):
return (
self._partialMessagesProgress.has_key(messageID) and
(
len(self._partialMessagesProgress[messageID]) !=
len(self._partialMessages[messageID])
)
)
def stopProtocol(self): def stopProtocol(self):
""" Called when the transport is disconnected. """ Called when the transport is disconnected.

View file

@ -30,21 +30,11 @@ class RoutingTable(object):
@param contact: The contact to add to this node's k-buckets @param contact: The contact to add to this node's k-buckets
@type contact: kademlia.contact.Contact @type contact: kademlia.contact.Contact
""" """
def distance(self, keyOne, keyTwo):
""" Calculate the XOR result between two string variables
@return: XOR result of two long variables
@rtype: long
"""
valKeyOne = long(keyOne.encode('hex'), 16)
valKeyTwo = long(keyTwo.encode('hex'), 16)
return valKeyOne ^ valKeyTwo
def findCloseNodes(self, key, count, _rpcNodeID=None): def findCloseNodes(self, key, count, _rpcNodeID=None):
""" Finds a number of known nodes closest to the node/value with the """ Finds a number of known nodes closest to the node/value with the
specified key. specified key.
@param key: the n-bit key (i.e. the node or value ID) to search for @param key: the n-bit key (i.e. the node or value ID) to search for
@type key: str @type key: str
@param count: the amount of contacts to return @param count: the amount of contacts to return
@ -53,9 +43,9 @@ class RoutingTable(object):
Whatever ID is passed in the paramater will get Whatever ID is passed in the paramater will get
excluded from the list of returned contacts. excluded from the list of returned contacts.
@type _rpcNodeID: str @type _rpcNodeID: str
@return: A list of node contacts (C{kademlia.contact.Contact instances}) @return: A list of node contacts (C{kademlia.contact.Contact instances})
closest to the specified key. closest to the specified key.
This method will return C{k} (or C{count}, if specified) This method will return C{k} (or C{count}, if specified)
contacts if at all possible; it will only return fewer if the contacts if at all possible; it will only return fewer if the
node is returning all of the contacts that it knows of. node is returning all of the contacts that it knows of.
@ -63,7 +53,7 @@ class RoutingTable(object):
""" """
def getContact(self, contactID): def getContact(self, contactID):
""" Returns the (known) contact with the specified node ID """ Returns the (known) contact with the specified node ID
@raise ValueError: No contact with the specified contact ID is known @raise ValueError: No contact with the specified contact ID is known
by this node by this node
""" """
@ -83,7 +73,7 @@ class RoutingTable(object):
will be refreshed, regardless of the time they were last will be refreshed, regardless of the time they were last
accessed. accessed.
@type force: bool @type force: bool
@return: A list of node ID's that the parent node should search for @return: A list of node ID's that the parent node should search for
in order to refresh the routing Table in order to refresh the routing Table
@rtype: list @rtype: list
@ -91,14 +81,14 @@ class RoutingTable(object):
def removeContact(self, contactID): def removeContact(self, contactID):
""" Remove the contact with the specified node ID from the routing """ Remove the contact with the specified node ID from the routing
table table
@param contactID: The node ID of the contact to remove @param contactID: The node ID of the contact to remove
@type contactID: str @type contactID: str
""" """
def touchKBucket(self, key): def touchKBucket(self, key):
""" Update the "last accessed" timestamp of the k-bucket which covers """ Update the "last accessed" timestamp of the k-bucket which covers
the range containing the specified key in the key/ID space the range containing the specified key in the key/ID space
@param key: A key in the range of the target k-bucket @param key: A key in the range of the target k-bucket
@type key: str @type key: str
""" """
@ -106,13 +96,13 @@ class RoutingTable(object):
class TreeRoutingTable(RoutingTable): class TreeRoutingTable(RoutingTable):
""" This class implements a routing table used by a Node class. """ This class implements a routing table used by a Node class.
The Kademlia routing table is a binary tree whose leaves are k-buckets, The Kademlia routing table is a binary tree whose leaves are k-buckets,
where each k-bucket contains nodes with some common prefix of their IDs. where each k-bucket contains nodes with some common prefix of their IDs.
This prefix is the k-bucket's position in the binary tree; it therefore This prefix is the k-bucket's position in the binary tree; it therefore
covers some range of ID values, and together all of the k-buckets cover covers some range of ID values, and together all of the k-buckets cover
the entire n-bit ID (or key) space (with no overlap). the entire n-bit ID (or key) space (with no overlap).
@note: In this implementation, nodes in the tree (the k-buckets) are @note: In this implementation, nodes in the tree (the k-buckets) are
added dynamically, as needed; this technique is described in the 13-page added dynamically, as needed; this technique is described in the 13-page
version of the Kademlia paper, in section 2.4. It does, however, use the version of the Kademlia paper, in section 2.4. It does, however, use the
@ -162,11 +152,11 @@ class TreeRoutingTable(RoutingTable):
# the k-bucket. This implementation follows section # the k-bucket. This implementation follows section
# 2.2 regarding this point. # 2.2 regarding this point.
headContact = self._buckets[bucketIndex]._contacts[0] headContact = self._buckets[bucketIndex]._contacts[0]
def replaceContact(failure): def replaceContact(failure):
""" Callback for the deferred PING RPC to see if the head """ Callback for the deferred PING RPC to see if the head
node in the k-bucket is still responding node in the k-bucket is still responding
@type failure: twisted.python.failure.Failure @type failure: twisted.python.failure.Failure
""" """
failure.trap(TimeoutError) failure.trap(TimeoutError)
@ -180,18 +170,18 @@ class TreeRoutingTable(RoutingTable):
pass pass
# ...and add the new one at the tail of the bucket # ...and add the new one at the tail of the bucket
self.addContact(contact) self.addContact(contact)
# Ping the least-recently seen contact in this k-bucket # Ping the least-recently seen contact in this k-bucket
headContact = self._buckets[bucketIndex]._contacts[0] headContact = self._buckets[bucketIndex]._contacts[0]
df = headContact.ping() df = headContact.ping()
# If there's an error (i.e. timeout), remove the head # If there's an error (i.e. timeout), remove the head
# contact, and append the new one # contact, and append the new one
df.addErrback(replaceContact) df.addErrback(replaceContact)
def findCloseNodes(self, key, count, _rpcNodeID=None): def findCloseNodes(self, key, count, _rpcNodeID=None):
""" Finds a number of known nodes closest to the node/value with the """ Finds a number of known nodes closest to the node/value with the
specified key. specified key.
@param key: the n-bit key (i.e. the node or value ID) to search for @param key: the n-bit key (i.e. the node or value ID) to search for
@type key: str @type key: str
@param count: the amount of contacts to return @param count: the amount of contacts to return
@ -200,17 +190,14 @@ class TreeRoutingTable(RoutingTable):
Whatever ID is passed in the paramater will get Whatever ID is passed in the paramater will get
excluded from the list of returned contacts. excluded from the list of returned contacts.
@type _rpcNodeID: str @type _rpcNodeID: str
@return: A list of node contacts (C{kademlia.contact.Contact instances}) @return: A list of node contacts (C{kademlia.contact.Contact instances})
closest to the specified key. closest to the specified key.
This method will return C{k} (or C{count}, if specified) This method will return C{k} (or C{count}, if specified)
contacts if at all possible; it will only return fewer if the contacts if at all possible; it will only return fewer if the
node is returning all of the contacts that it knows of. node is returning all of the contacts that it knows of.
@rtype: list @rtype: list
""" """
#if key == self.id:
# bucketIndex = 0 #TODO: maybe not allow this to continue?
#else:
bucketIndex = self._kbucketIndex(key) bucketIndex = self._kbucketIndex(key)
closestNodes = self._buckets[bucketIndex].getContacts(constants.k, _rpcNodeID) closestNodes = self._buckets[bucketIndex].getContacts(constants.k, _rpcNodeID)
# This method must return k contacts (even if we have the node # This method must return k contacts (even if we have the node
@ -219,7 +206,7 @@ class TreeRoutingTable(RoutingTable):
i = 1 i = 1
canGoLower = bucketIndex-i >= 0 canGoLower = bucketIndex-i >= 0
canGoHigher = bucketIndex+i < len(self._buckets) canGoHigher = bucketIndex+i < len(self._buckets)
# Fill up the node list to k nodes, starting with the closest neighbouring nodes known # Fill up the node list to k nodes, starting with the closest neighbouring nodes known
while len(closestNodes) < constants.k and (canGoLower or canGoHigher): while len(closestNodes) < constants.k and (canGoLower or canGoHigher):
#TODO: this may need to be optimized #TODO: this may need to be optimized
if canGoLower: if canGoLower:
@ -237,7 +224,7 @@ class TreeRoutingTable(RoutingTable):
def getContact(self, contactID): def getContact(self, contactID):
""" Returns the (known) contact with the specified node ID """ Returns the (known) contact with the specified node ID
@raise ValueError: No contact with the specified contact ID is known @raise ValueError: No contact with the specified contact ID is known
by this node by this node
""" """
@ -265,7 +252,7 @@ class TreeRoutingTable(RoutingTable):
will be refreshed, regardless of the time they were last will be refreshed, regardless of the time they were last
accessed. accessed.
@type force: bool @type force: bool
@return: A list of node ID's that the parent node should search for @return: A list of node ID's that the parent node should search for
in order to refresh the routing Table in order to refresh the routing Table
@rtype: list @rtype: list
@ -282,7 +269,7 @@ class TreeRoutingTable(RoutingTable):
def removeContact(self, contactID): def removeContact(self, contactID):
""" Remove the contact with the specified node ID from the routing """ Remove the contact with the specified node ID from the routing
table table
@param contactID: The node ID of the contact to remove @param contactID: The node ID of the contact to remove
@type contactID: str @type contactID: str
""" """
@ -290,13 +277,12 @@ class TreeRoutingTable(RoutingTable):
try: try:
self._buckets[bucketIndex].removeContact(contactID) self._buckets[bucketIndex].removeContact(contactID)
except ValueError: except ValueError:
#print 'removeContact(): Contact not in routing table'
return return
def touchKBucket(self, key): def touchKBucket(self, key):
""" Update the "last accessed" timestamp of the k-bucket which covers """ Update the "last accessed" timestamp of the k-bucket which covers
the range containing the specified key in the key/ID space the range containing the specified key in the key/ID space
@param key: A key in the range of the target k-bucket @param key: A key in the range of the target k-bucket
@type key: str @type key: str
""" """
@ -306,10 +292,10 @@ class TreeRoutingTable(RoutingTable):
def _kbucketIndex(self, key): def _kbucketIndex(self, key):
""" Calculate the index of the k-bucket which is responsible for the """ Calculate the index of the k-bucket which is responsible for the
specified key (or ID) specified key (or ID)
@param key: The key for which to find the appropriate k-bucket index @param key: The key for which to find the appropriate k-bucket index
@type key: str @type key: str
@return: The index of the k-bucket responsible for the specified key @return: The index of the k-bucket responsible for the specified key
@rtype: int @rtype: int
""" """
@ -324,7 +310,7 @@ class TreeRoutingTable(RoutingTable):
def _randomIDInBucketRange(self, bucketIndex): def _randomIDInBucketRange(self, bucketIndex):
""" Returns a random ID in the specified k-bucket's range """ Returns a random ID in the specified k-bucket's range
@param bucketIndex: The index of the k-bucket to use @param bucketIndex: The index of the k-bucket to use
@type bucketIndex: int @type bucketIndex: int
""" """
@ -342,7 +328,7 @@ class TreeRoutingTable(RoutingTable):
def _splitBucket(self, oldBucketIndex): def _splitBucket(self, oldBucketIndex):
""" Splits the specified k-bucket into two new buckets which together """ Splits the specified k-bucket into two new buckets which together
cover the same range in the key/ID space cover the same range in the key/ID space
@param oldBucketIndex: The index of k-bucket to split (in this table's @param oldBucketIndex: The index of k-bucket to split (in this table's
list of k-buckets) list of k-buckets)
@type oldBucketIndex: int @type oldBucketIndex: int
@ -372,7 +358,7 @@ class OptimizedTreeRoutingTable(TreeRoutingTable):
TreeRoutingTable.__init__(self, parentNodeID) TreeRoutingTable.__init__(self, parentNodeID)
# Cache containing nodes eligible to replace stale k-bucket entries # Cache containing nodes eligible to replace stale k-bucket entries
self._replacementCache = {} self._replacementCache = {}
def addContact(self, contact): def addContact(self, contact):
""" Add the given contact to the correct k-bucket; if it already """ Add the given contact to the correct k-bucket; if it already
exists, its status will be updated exists, its status will be updated
@ -415,11 +401,11 @@ class OptimizedTreeRoutingTable(TreeRoutingTable):
elif len(self._replacementCache) >= constants.k: elif len(self._replacementCache) >= constants.k:
self._replacementCache.pop(0) self._replacementCache.pop(0)
self._replacementCache[bucketIndex].append(contact) self._replacementCache[bucketIndex].append(contact)
def removeContact(self, contactID): def removeContact(self, contactID):
""" Remove the contact with the specified node ID from the routing """ Remove the contact with the specified node ID from the routing
table table
@param contactID: The node ID of the contact to remove @param contactID: The node ID of the contact to remove
@type contactID: str @type contactID: str
""" """
@ -427,10 +413,9 @@ class OptimizedTreeRoutingTable(TreeRoutingTable):
try: try:
contact = self._buckets[bucketIndex].getContact(contactID) contact = self._buckets[bucketIndex].getContact(contactID)
except ValueError: except ValueError:
#print 'removeContact(): Contact not in routing table'
return return
contact.failedRPCs += 1 contact.failedRPCs += 1
if contact.failedRPCs >= 5: if contact.failedRPCs >= 5:
self._buckets[bucketIndex].removeContact(contactID) self._buckets[bucketIndex].removeContact(contactID)
# Replace this stale contact with one from our replacemnent cache, if we have any # Replace this stale contact with one from our replacemnent cache, if we have any
if self._replacementCache.has_key(bucketIndex): if self._replacementCache.has_key(bucketIndex):

View file

@ -100,4 +100,4 @@ def get_hosts_for_hash_in_dht():
def announce_hash_to_dht(): def announce_hash_to_dht():
run_dht_script(announce_hash) run_dht_script(announce_hash)

View file

@ -25,7 +25,6 @@
import sys, hashlib, random import sys, hashlib, random
import twisted.internet.reactor import twisted.internet.reactor
from lbrynet.dht.node import Node from lbrynet.dht.node import Node
#from entangled.kademlia.datastore import SQLiteDataStore
# The Entangled DHT node; instantiated in the main() method # The Entangled DHT node; instantiated in the main() method
node = None node = None
@ -77,7 +76,6 @@ def getValue():
binascii.unhexlify("f7d9dc4de674eaa2c5a022eb95bc0d33ec2e75c6")) binascii.unhexlify("f7d9dc4de674eaa2c5a022eb95bc0d33ec2e75c6"))
deferredResult = node.iterativeFindValue( deferredResult = node.iterativeFindValue(
binascii.unhexlify("f7d9dc4de674eaa2c5a022eb95bc0d33ec2e75c6")) binascii.unhexlify("f7d9dc4de674eaa2c5a022eb95bc0d33ec2e75c6"))
#deferredResult = node.iterativeFindValue(KEY)
# Add a callback to this result; this will be called as soon as the operation has completed # Add a callback to this result; this will be called as soon as the operation has completed
deferredResult.addCallback(getValueCallback) deferredResult.addCallback(getValueCallback)
# As before, add the generic error callback # As before, add the generic error callback
@ -91,19 +89,8 @@ def getValueCallback(result):
# contacts would be returned instead") # contacts would be returned instead")
print "Got the value" print "Got the value"
print result print result
#if type(result) == dict:
# for v in result[binascii.unhexlify("5292fa9c426621f02419f5050900392bdff5036c")]:
# print "v:", v
# print "v[6:", v[6:]
# print "lbryid:",lbryid
# print "lbryid == v[6:]:", lbryid == v[6:]
# print 'Value successfully retrieved: %s' % result[KEY]
#else:
# print 'Value not found'
# Either way, schedule a "delete" operation for the key # Either way, schedule a "delete" operation for the key
#print 'Scheduling removal in 2.5 seconds...'
#twisted.internet.reactor.callLater(2.5, deleteValue)
print 'Scheduling shutdown in 2.5 seconds...' print 'Scheduling shutdown in 2.5 seconds...'
twisted.internet.reactor.callLater(2.5, stop) twisted.internet.reactor.callLater(2.5, stop)
@ -151,9 +138,6 @@ if __name__ == '__main__':
print 'Run this script without any arguments for info.\n' print 'Run this script without any arguments for info.\n'
# Set up SQLite-based data store (you could use an in-memory store instead, for example) # Set up SQLite-based data store (you could use an in-memory store instead, for example)
#if os.path.isfile('/tmp/dbFile%s.db' % sys.argv[1]):
# os.remove('/tmp/dbFile%s.db' % sys.argv[1])
#dataStore = SQLiteDataStore(dbFile = '/tmp/dbFile%s.db' % sys.argv[1])
# #
# Create the Entangled node. It extends the functionality of a # Create the Entangled node. It extends the functionality of a
# basic Kademlia node (but is fully backwards-compatible with a # basic Kademlia node (but is fully backwards-compatible with a
@ -162,14 +146,12 @@ if __name__ == '__main__':
# If you wish to have a pure Kademlia network, use the # If you wish to have a pure Kademlia network, use the
# entangled.kademlia.node.Node class instead # entangled.kademlia.node.Node class instead
print 'Creating Node...' print 'Creating Node...'
#node = EntangledNode( udpPort=int(sys.argv[1]), dataStore=dataStore )
node = Node(udpPort=int(sys.argv[1]), lbryid=lbryid) node = Node(udpPort=int(sys.argv[1]), lbryid=lbryid)
# Schedule the node to join the Kademlia/Entangled DHT # Schedule the node to join the Kademlia/Entangled DHT
node.joinNetwork(knownNodes) node.joinNetwork(knownNodes)
# Schedule the "storeValue() call to be invoked after 2.5 seconds, # Schedule the "storeValue() call to be invoked after 2.5 seconds,
#using KEY and VALUE as arguments #using KEY and VALUE as arguments
#twisted.internet.reactor.callLater(2.5, storeValue, KEY, VALUE)
twisted.internet.reactor.callLater(2.5, getValue) twisted.internet.reactor.callLater(2.5, getValue)
# Start the Twisted reactor - this fires up all networking, and # Start the Twisted reactor - this fires up all networking, and
# allows the scheduled join operation to take place # allows the scheduled join operation to take place

View file

@ -36,4 +36,4 @@ class EncryptedFileMetadataHandler(object):
else: else:
log.debug("Setting _final_blob_num to %s", str(blob_num - 1)) log.debug("Setting _final_blob_num to %s", str(blob_num - 1))
self._final_blob_num = blob_num - 1 self._final_blob_num = blob_num - 1
return infos return infos

View file

@ -74,10 +74,8 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver):
d.addCallbacks(_save_claim_id, lambda err: _notify_bad_claim(name, txid, nout)) d.addCallbacks(_save_claim_id, lambda err: _notify_bad_claim(name, txid, nout))
return d return d
reflector_server = random.choice(settings.reflector_servers)
d.addCallback(_save_stream_info) d.addCallback(_save_stream_info)
d.addCallback(lambda _: reupload.check_and_restore_availability(self, reflector_server)) d.addCallback(lambda _: self._reupload())
d.addCallback(lambda _: self.lbry_file_manager.get_lbry_file_status(self)) d.addCallback(lambda _: self.lbry_file_manager.get_lbry_file_status(self))
def restore_status(status): def restore_status(status):
@ -92,6 +90,12 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver):
d.addCallback(restore_status) d.addCallback(restore_status)
return d return d
def _reupload(self):
if not settings.reflector_reupload:
return
reflector_server = random.choice(settings.reflector_servers)
return reupload.check_and_restore_availability(self, reflector_server)
def stop(self, err=None, change_status=True): def stop(self, err=None, change_status=True):
def set_saving_status_done(): def set_saving_status_done():

View file

@ -3,4 +3,4 @@ class EncryptedFileStatusReport(object):
self.name = name self.name = name
self.num_completed = num_completed self.num_completed = num_completed
self.num_known = num_known self.num_known = num_known
self.running_status = running_status self.running_status = running_status

View file

@ -4,4 +4,4 @@ Classes and functions used to create and download LBRY Files.
LBRY Files are Crypt Streams created from any regular file. The whole file is read LBRY Files are Crypt Streams created from any regular file. The whole file is read
at the time that the LBRY File is created, so all constituent blobs are known and at the time that the LBRY File is created, so all constituent blobs are known and
included in the stream descriptor file. included in the stream descriptor file.
""" """

View file

@ -21,4 +21,4 @@ class LiveStreamBlobMaker(CryptStreamBlobMaker):
def _return_info(self, blob_hash): def _return_info(self, blob_hash):
return LiveBlobInfo(blob_hash, self.blob_num, self.length, binascii.hexlify(self.iv), return LiveBlobInfo(blob_hash, self.blob_num, self.length, binascii.hexlify(self.iv),
self.revision, None) self.revision, None)

View file

@ -152,7 +152,6 @@ class StdinStreamProducer(object):
self.finished_deferred = defer.Deferred() self.finished_deferred = defer.Deferred()
self.consumer.registerProducer(self, True) self.consumer.registerProducer(self, True)
#self.reader = process.ProcessReader(reactor, self, 'read', 0)
self.resumeProducing() self.resumeProducing()
return self.finished_deferred return self.finished_deferred
@ -174,4 +173,4 @@ class StdinStreamProducer(object):
self.consumer.write(data) self.consumer.write(data)
def childConnectionLost(self, fd, reason): def childConnectionLost(self, fd, reason):
self.stopProducing() self.stopProducing()

View file

@ -387,4 +387,4 @@ class TempLiveStreamMetadataManager(DHTHashSupplier):
if announce_time < current_time: if announce_time < current_time:
self.streams[stream_hash]['announce_time'] = next_announce_time self.streams[stream_hash]['announce_time'] = next_announce_time
stream_hashes.append(stream_hash) stream_hashes.append(stream_hash)
return stream_hashes return stream_hashes

View file

@ -135,4 +135,4 @@ class LiveStreamDescriptorValidator(object):
return info return info
def get_length_of_stream(self): def get_length_of_stream(self):
return None return None

View file

@ -46,7 +46,6 @@ class LiveStreamDownloader(_LiveStreamDownloader):
_LiveStreamDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, _LiveStreamDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, payment_rate_manager, wallet, upload_allowed) stream_info_manager, payment_rate_manager, wallet, upload_allowed)
#self.writer = process.ProcessWriter(reactor, self, 'write', 1)
def _get_metadata_handler(self, download_manager): def _get_metadata_handler(self, download_manager):
return LiveStreamMetadataHandler(self.stream_hash, self.stream_info_manager, return LiveStreamMetadataHandler(self.stream_hash, self.stream_info_manager,
@ -61,7 +60,6 @@ class LiveStreamDownloader(_LiveStreamDownloader):
def _get_write_func(self): def _get_write_func(self):
def write_func(data): def write_func(data):
if self.stopped is False: if self.stopped is False:
#self.writer.write(data)
pass pass
return write_func return write_func

View file

@ -344,4 +344,4 @@ class LiveStreamMetadataHandler(object):
peer.update_score(-10.0) peer.update_score(-10.0)
if reason.check(ConnectionClosedBeforeResponseError): if reason.check(ConnectionClosedBeforeResponseError):
return return
return reason return reason

View file

@ -71,4 +71,4 @@ class LiveStreamOptions(object):
"Allow" "Allow"
), ),
] ]
return options return options

View file

@ -88,4 +88,4 @@ class LiveStreamProgressManager(StreamProgressManager):
reactor.callLater(0, self._output_loop) reactor.callLater(0, self._output_loop)
else: else:
self.outputting_d.callback(True) self.outputting_d.callback(True)
self.outputting_d = None self.outputting_d = None

View file

@ -181,4 +181,4 @@ class CryptBlobInfoQueryHandler(object):
dl.addCallback(ensure_streams_match) dl.addCallback(ensure_streams_match)
dl.addCallback(lambda _: get_blob_infos()) dl.addCallback(lambda _: get_blob_infos())
return dl return dl

View file

@ -39,7 +39,8 @@ from lbrynet.lbrynet_daemon.Downloader import GetStream
from lbrynet.lbrynet_daemon.Publisher import Publisher from lbrynet.lbrynet_daemon.Publisher import Publisher
from lbrynet.lbrynet_daemon.ExchangeRateManager import ExchangeRateManager from lbrynet.lbrynet_daemon.ExchangeRateManager import ExchangeRateManager
from lbrynet.lbrynet_daemon.auth.server import AuthJSONRPCServer from lbrynet.lbrynet_daemon.auth.server import AuthJSONRPCServer
from lbrynet.core import log_support, utils, Platform from lbrynet.core import log_support, utils
from lbrynet.core import system_info
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier, download_sd_blob from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier, download_sd_blob
from lbrynet.core.StreamDescriptor import BlobStreamDescriptorReader from lbrynet.core.StreamDescriptor import BlobStreamDescriptorReader
from lbrynet.core.Session import Session from lbrynet.core.Session import Session
@ -354,7 +355,7 @@ class Daemon(AuthJSONRPCServer):
def _get_platform(self): def _get_platform(self):
if self.platform is None: if self.platform is None:
self.platform = Platform.get_platform() self.platform = system_info.get_platform()
self.platform["ui_version"] = self.lbry_ui_manager.loaded_git_version self.platform["ui_version"] = self.lbry_ui_manager.loaded_git_version
return self.platform return self.platform
@ -407,7 +408,7 @@ class Daemon(AuthJSONRPCServer):
# claim_out is dictionary containing 'txid' and 'nout' # claim_out is dictionary containing 'txid' and 'nout'
def _add_to_pending_claims(self, name, claim_out): def _add_to_pending_claims(self, name, claim_out):
txid = claim_out['txid'] txid = claim_out['txid']
nout = claim_out['nout'] nout = claim_out['nout']
log.info("Adding lbry://%s to pending claims, txid %s nout %d" % (name, txid, nout)) log.info("Adding lbry://%s to pending claims, txid %s nout %d" % (name, txid, nout))
self.pending_claims[name] = (txid, nout) self.pending_claims[name] = (txid, nout)
return claim_out return claim_out
@ -602,14 +603,14 @@ class Daemon(AuthJSONRPCServer):
d = defer.succeed(None) d = defer.succeed(None)
d.addCallback(lambda _: self._stop_server()) d.addCallback(lambda _: self._stop_server())
d.addErrback(log_support.failure, log, 'Failure while shutting down: %s') d.addErrback(log.fail(), 'Failure while shutting down')
d.addCallback(lambda _: self._stop_reflector()) d.addCallback(lambda _: self._stop_reflector())
d.addErrback(log_support.failure, log, 'Failure while shutting down: %s') d.addErrback(log.fail(), 'Failure while shutting down')
d.addCallback(lambda _: self._stop_file_manager()) d.addCallback(lambda _: self._stop_file_manager())
d.addErrback(log_support.failure, log, 'Failure while shutting down: %s') d.addErrback(log.fail(), 'Failure while shutting down')
if self.session is not None: if self.session is not None:
d.addCallback(lambda _: self.session.shut_down()) d.addCallback(lambda _: self.session.shut_down())
d.addErrback(log_support.failure, log, 'Failure while shutting down: %s') d.addErrback(log.fail(), 'Failure while shutting down')
return d return d
def _update_settings(self, settings): def _update_settings(self, settings):
@ -1353,7 +1354,7 @@ class Daemon(AuthJSONRPCServer):
""" """
if not p: if not p:
return self._render_response(self.callable_methods.keys(), OK_CODE) return self._render_response(sorted(self.callable_methods.keys()), OK_CODE)
elif 'callable_during_start' in p.keys(): elif 'callable_during_start' in p.keys():
return self._render_response(self.allowed_during_startup, OK_CODE) return self._render_response(self.allowed_during_startup, OK_CODE)
elif 'function' in p.keys(): elif 'function' in p.keys():
@ -1468,9 +1469,20 @@ class Daemon(AuthJSONRPCServer):
return self._render_response(None, BAD_REQUEST) return self._render_response(None, BAD_REQUEST)
d = self._resolve_name(name, force_refresh=force) d = self._resolve_name(name, force_refresh=force)
# TODO: this is the rpc call that returns a server.failure.
# what is up with that?
d.addCallbacks( d.addCallbacks(
lambda info: self._render_response(info, OK_CODE), lambda info: self._render_response(info, OK_CODE),
errback=handle_failure, errbackArgs=('Failed to resolve name: %s',) # TODO: Is server.failure a module? It looks like it:
#
# In [1]: import twisted.web.server
# In [2]: twisted.web.server.failure
# Out[2]: <module 'twisted.python.failure' from
# '.../site-packages/twisted/python/failure.pyc'>
#
# If so, maybe we should return something else.
errback=log.fail(lambda: server.failure),
errbackArgs=('Failed to resolve name: %s',)
) )
return d return d
@ -1498,7 +1510,7 @@ class Daemon(AuthJSONRPCServer):
'name': name to look up, string, do not include lbry:// prefix 'name': name to look up, string, do not include lbry:// prefix
'txid': optional, if specified, look for claim with this txid 'txid': optional, if specified, look for claim with this txid
'nout': optional, if specified, look for claim with this nout 'nout': optional, if specified, look for claim with this nout
Returns: Returns:
txid, amount, value, n, height txid, amount, value, n, height
""" """
@ -1697,8 +1709,6 @@ class Daemon(AuthJSONRPCServer):
'metadata': metadata dictionary 'metadata': metadata dictionary
optional 'fee' optional 'fee'
Returns: Returns:
'success' : True if claim was succesful , False otherwise
'reason' : if not succesful, give reason
'txid' : txid of resulting transaction if succesful 'txid' : txid of resulting transaction if succesful
'nout' : nout of the resulting support claim if succesful 'nout' : nout of the resulting support claim if succesful
'fee' : fee paid for the claim transaction if succesful 'fee' : fee paid for the claim transaction if succesful
@ -1773,8 +1783,6 @@ class Daemon(AuthJSONRPCServer):
'txid': txid of claim, string 'txid': txid of claim, string
'nout': nout of claim, integer 'nout': nout of claim, integer
Return: Return:
success : True if succesful , False otherwise
reason : if not succesful, give reason
txid : txid of resulting transaction if succesful txid : txid of resulting transaction if succesful
fee : fee paid for the transaction if succesful fee : fee paid for the transaction if succesful
""" """
@ -1818,8 +1826,6 @@ class Daemon(AuthJSONRPCServer):
'claim_id': claim id of claim to support 'claim_id': claim id of claim to support
'amount': amount to support by 'amount': amount to support by
Return: Return:
success : True if succesful , False otherwise
reason : if not succesful, give reason
txid : txid of resulting transaction if succesful txid : txid of resulting transaction if succesful
nout : nout of the resulting support claim if succesful nout : nout of the resulting support claim if succesful
fee : fee paid for the transaction if succesful fee : fee paid for the transaction if succesful
@ -2682,18 +2688,6 @@ def get_lbry_file_search_value(p):
raise NoValidSearch() raise NoValidSearch()
def handle_failure(err, msg):
log_support.failure(err, log, msg)
# TODO: Is this a module? It looks like it:
#
# In [1]: import twisted.web.server
# In [2]: twisted.web.server.failure
# Out[2]: <module 'twisted.python.failure' from '.../site-packages/twisted/python/failure.pyc'>
#
# If so, maybe we should return something else.
return server.failure
def run_reflector_factory(factory): def run_reflector_factory(factory):
reflector_server = random.choice(conf.settings.reflector_servers) reflector_server = random.choice(conf.settings.reflector_servers)
reflector_address, reflector_port = reflector_server reflector_address, reflector_port = reflector_server

View file

@ -117,9 +117,8 @@ def update_settings_from_args(args):
settings.update(to_pass) settings.update(to_pass)
def log_and_kill(failure, analytics_manager): def kill(failure, analytics_manager):
analytics_manager.send_server_startup_error(failure.getErrorMessage() + " " + str(failure)) analytics_manager.send_server_startup_error(failure.getErrorMessage() + " " + str(failure))
log_support.failure(failure, log, 'Failed to startup: %s')
reactor.callFromThread(reactor.stop) reactor.callFromThread(reactor.stop)
@ -130,14 +129,13 @@ def start_server_and_listen(launchui, use_auth, analytics_manager):
launchui: set to true to open a browser window launchui: set to true to open a browser window
use_auth: set to true to enable http authentication use_auth: set to true to enable http authentication
analytics_manager: to send analytics analytics_manager: to send analytics
kwargs: passed along to `DaemonServer().start()`
""" """
daemon_server = DaemonServer(analytics_manager) daemon_server = DaemonServer(analytics_manager)
d = daemon_server.start(use_auth) d = daemon_server.start(use_auth)
if launchui: if launchui:
d.addCallback(lambda _: webbrowser.open(settings.UI_ADDRESS)) d.addCallback(lambda _: webbrowser.open(settings.UI_ADDRESS))
d.addCallback(lambda _: analytics_manager.send_server_startup_success()) d.addCallback(lambda _: analytics_manager.send_server_startup_success())
d.addErrback(log_and_kill, analytics_manager) d.addErrback(log.fail(kill, analytics_manager), 'Failed to startup')
if __name__ == "__main__": if __name__ == "__main__":

View file

@ -76,7 +76,6 @@ class DaemonRequest(server.Request):
try: try:
self.content.seek(0, 0) self.content.seek(0, 0)
args.update(self.parse_multipart(self.content, pdict)) args.update(self.parse_multipart(self.content, pdict))
#args.update(cgi.parse_multipart(self.content, pdict))
except KeyError as e: except KeyError as e:
if e.args[0] == b'content-disposition': if e.args[0] == b'content-disposition':

View file

@ -64,4 +64,4 @@ def create_auth_session(root):
portal_to_realm = portal.Portal(realm, [checker, ]) portal_to_realm = portal.Portal(realm, [checker, ])
factory = guard.BasicCredentialFactory('Login to lbrynet api') factory = guard.BasicCredentialFactory('Login to lbrynet api')
_lbrynet_server = guard.HTTPAuthSessionWrapper(portal_to_realm, [factory, ]) _lbrynet_server = guard.HTTPAuthSessionWrapper(portal_to_realm, [factory, ])
return _lbrynet_server return _lbrynet_server

View file

@ -5,7 +5,6 @@ import random
from twisted.internet import threads, defer, reactor from twisted.internet import threads, defer, reactor
from lbrynet.core import log_support
from lbrynet.lbryfilemanager.EncryptedFileCreator import create_lbry_file from lbrynet.lbryfilemanager.EncryptedFileCreator import create_lbry_file
from lbrynet.lbryfile.StreamDescriptor import publish_sd_blob from lbrynet.lbryfile.StreamDescriptor import publish_sd_blob
from lbrynet.metadata.Metadata import Metadata from lbrynet.metadata.Metadata import Metadata
@ -40,11 +39,11 @@ class Publisher(object):
def start(self, name, file_path, bid, metadata): def start(self, name, file_path, bid, metadata):
log.info('Starting publish for %s', name) log.info('Starting publish for %s', name)
def _show_result(): def _show_result():
log.info("Success! Published %s --> lbry://%s txid: %s nout: %d", log.info("Success! Published %s --> lbry://%s txid: %s nout: %d",
self.file_name, self.publish_name, self.txid, self.nout) self.file_name, self.publish_name, self.txid, self.nout)
out = {} out = {}
out['nout'] = self.nout out['nout'] = self.nout
out['txid'] = self.txid out['txid'] = self.txid
return defer.succeed(out) return defer.succeed(out)
self.publish_name = name self.publish_name = name
@ -68,7 +67,12 @@ class Publisher(object):
d.addCallback(lambda _: self._claim_name()) d.addCallback(lambda _: self._claim_name())
d.addCallback(lambda _: self.set_status()) d.addCallback(lambda _: self.set_status())
d.addCallback(lambda _: self.start_reflector()) d.addCallback(lambda _: self.start_reflector())
d.addCallbacks(lambda _: _show_result(), self._show_publish_error) d.addCallbacks(
lambda _: _show_result(),
errback=log.fail(self._throw_publish_error),
errbackArgs=(
"An error occurred publishing %s to %s", self.file_name, self.publish_name)
)
return d return d
def start_reflector(self): def start_reflector(self):
@ -137,10 +141,10 @@ class Publisher(object):
msg = 'Failed to claim name:{}'.format(claim_out['reason']) msg = 'Failed to claim name:{}'.format(claim_out['reason'])
defer.fail(Exception(msg)) defer.fail(Exception(msg))
txid = claim_out['txid'] txid = claim_out['txid']
nout = claim_out['nout'] nout = claim_out['nout']
log.debug('Name claimed using txid: %s, nout: %d', txid, nout) log.debug('Name claimed using txid: %s, nout: %d', txid, nout)
self.txid = txid self.txid = txid
self.nout = nout self.nout = nout
d = self.wallet.claim_name(self.publish_name, self.bid_amount, m) d = self.wallet.claim_name(self.publish_name, self.bid_amount, m)
d.addCallback(set_txid_nout) d.addCallback(set_txid_nout)
@ -151,11 +155,13 @@ class Publisher(object):
self.metadata['content_type'] = get_content_type(filename) self.metadata['content_type'] = get_content_type(filename)
self.metadata['ver'] = Metadata.current_version self.metadata['ver'] = Metadata.current_version
def _show_publish_error(self, err): def _throw_publish_error(self, err):
log_support.failure( # TODO: I'm not a fan of the log and re-throw, especially when
err, log, "An error occurred publishing %s to %s. Error: %s.", # the new exception is more generic. Look over this to
self.file_name, self.publish_name) # see if there is a reason not to remove the errback
return defer.fail(Exception("Publish failed")) # handler and allow the original exception to move up
# the stack.
raise Exception("Publish failed")
def get_content_type(filename): def get_content_type(filename):

View file

@ -10,7 +10,6 @@ from twisted.python.failure import Failure
from txjsonrpc import jsonrpclib from txjsonrpc import jsonrpclib
from lbrynet.core.Error import InvalidAuthenticationToken, InvalidHeaderError, SubhandlerError from lbrynet.core.Error import InvalidAuthenticationToken, InvalidHeaderError, SubhandlerError
from lbrynet.conf import settings from lbrynet.conf import settings
from lbrynet.core import log_support
from lbrynet.lbrynet_daemon.auth.util import APIKey, get_auth_message from lbrynet.lbrynet_daemon.auth.util import APIKey, get_auth_message
from lbrynet.lbrynet_daemon.auth.client import LBRY_SECRET from lbrynet.lbrynet_daemon.auth.client import LBRY_SECRET
@ -117,11 +116,6 @@ class AuthJSONRPCServer(AuthorizedBase):
request.write(fault) request.write(fault)
request.finish() request.finish()
def _log_and_render_error(self, failure, request, message=None, **kwargs):
msg = message or "API Failure: %s"
log_support.failure(Failure(failure), log, msg)
self._render_error(failure, request, **kwargs)
def render(self, request): def render(self, request):
notify_finish = request.notifyFinish() notify_finish = request.notifyFinish()
assert self._check_headers(request), InvalidHeaderError assert self._check_headers(request), InvalidHeaderError
@ -192,7 +186,10 @@ class AuthJSONRPCServer(AuthorizedBase):
# cancel the response if the connection is broken # cancel the response if the connection is broken
notify_finish.addErrback(self._response_failed, d) notify_finish.addErrback(self._response_failed, d)
d.addCallback(self._callback_render, request, version, reply_with_next_secret) d.addCallback(self._callback_render, request, version, reply_with_next_secret)
d.addErrback(self._log_and_render_error, request, version=version) d.addErrback(
log.fail(self._render_error, request, version=version),
'Failed to process %s', function_name
)
return server.NOT_DONE_YET return server.NOT_DONE_YET
def _register_user_session(self, session_id): def _register_user_session(self, session_id):
@ -285,7 +282,6 @@ class AuthJSONRPCServer(AuthorizedBase):
assert api_key.compare_hmac(to_auth, token), InvalidAuthenticationToken assert api_key.compare_hmac(to_auth, token), InvalidAuthenticationToken
def _update_session_secret(self, session_id): def _update_session_secret(self, session_id):
# log.info("Generating new token for next request")
self.sessions.update({session_id: APIKey.new(name=session_id)}) self.sessions.update({session_id: APIKey.new(name=session_id)})
def _get_jsonrpc_version(self, version=None, id=None): def _get_jsonrpc_version(self, version=None, id=None):

View file

@ -89,4 +89,4 @@ def initialize_api_key_file(key_path):
def get_auth_message(message_dict): def get_auth_message(message_dict):
return json.dumps(message_dict, sort_keys=True) return json.dumps(message_dict, sort_keys=True)

View file

@ -65,4 +65,4 @@ class Autofetcher(object):
def run(api): def run(api):
fetcher = Autofetcher(api) fetcher = Autofetcher(api)
fetcher.start() fetcher.start()

View file

@ -38,7 +38,7 @@ class Metadata(StructuredDict):
def __init__(self, metadata, migrate=True, target_version=None): def __init__(self, metadata, migrate=True, target_version=None):
if not isinstance(metadata, dict): if not isinstance(metadata, dict):
raise TypeError("metadata is not a dictionary") raise TypeError("metadata is not a dictionary")
starting_version = metadata.get('ver', '0.0.1') starting_version = metadata.get('ver', '0.0.1')
StructuredDict.__init__(self, metadata, starting_version, migrate, target_version) StructuredDict.__init__(self, metadata, starting_version, migrate, target_version)

View file

@ -7,4 +7,4 @@ registering. The public key is used to spend points, and also used as an address
are sent. To spend points, the public key signs a message containing the amount and the destination are sent. To spend points, the public key signs a message containing the amount and the destination
public key and sends it to the point trader server. To check for payments, the recipient sends a public key and sends it to the point trader server. To check for payments, the recipient sends a
signed message asking the point trader server for its balance. signed message asking the point trader server for its balance.
""" """

View file

@ -5,7 +5,6 @@ from twisted.protocols.basic import FileSender
from twisted.internet.protocol import Protocol, ClientFactory from twisted.internet.protocol import Protocol, ClientFactory
from twisted.internet import defer, error from twisted.internet import defer, error
from lbrynet.core import log_support
from lbrynet.reflector.common import IncompleteResponse from lbrynet.reflector.common import IncompleteResponse
@ -153,8 +152,7 @@ class BlobReflectorClient(Protocol):
'blob_size': self.next_blob_to_send.length 'blob_size': self.next_blob_to_send.length
})) }))
def log_fail_and_disconnect(self, err, blob_hash): def disconnect(self, err):
log_support.failure(err, log, "Error reflecting blob %s: %s", blob_hash)
self.transport.loseConnection() self.transport.loseConnection()
def send_next_request(self): def send_next_request(self):
@ -172,7 +170,9 @@ class BlobReflectorClient(Protocol):
# send the server the next blob hash + length # send the server the next blob hash + length
d.addCallbacks( d.addCallbacks(
lambda _: self.send_blob_info(), lambda _: self.send_blob_info(),
lambda err: self.log_fail_and_disconnect(err, blob_hash)) errback=log.fail(self.disconnect),
errbackArgs=("Error reflecting blob %s", blob_hash)
)
return d return d
else: else:
# close connection # close connection

View file

@ -30,7 +30,6 @@ class ReflectorServer(Protocol):
def dataReceived(self, data): def dataReceived(self, data):
if self.receiving_blob: if self.receiving_blob:
# log.debug('Writing data to blob')
self.blob_write(data) self.blob_write(data)
else: else:
log.debug('Not yet recieving blob, data needs further processing') log.debug('Not yet recieving blob, data needs further processing')

View file

@ -165,7 +165,6 @@ class SysTrayIcon(object):
def show_menu(self): def show_menu(self):
menu = win32gui.CreatePopupMenu() menu = win32gui.CreatePopupMenu()
self.create_menu(menu, self.menu_options) self.create_menu(menu, self.menu_options)
# win32gui.SetMenuDefaultItem(menu, 1000, 0)
pos = win32gui.GetCursorPos() pos = win32gui.GetCursorPos()
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp # See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp

View file

@ -8,4 +8,7 @@ pylint -E --disable=inherit-non-class --disable=no-member \
--enable=unused-import \ --enable=unused-import \
--enable=bad-whitespace \ --enable=bad-whitespace \
--enable=line-too-long \ --enable=line-too-long \
--enable=trailing-whitespace \
--enable=missing-final-newline \
--enable=mixed-indentation \
lbrynet $@ lbrynet $@

View file

@ -51,14 +51,6 @@ requires = [
] ]
console_scripts = [ console_scripts = [
# 'lbrynet-stdin-uploader = lbrynet.lbrynet_console.LBRYStdinUploader:launch_stdin_uploader',
# 'lbrynet-stdout-downloader = lbrynet.lbrynet_console.LBRYStdoutDownloader:launch_stdout_downloader',
# 'lbrynet-create-network = lbrynet.create_network:main',
# 'lbrynet-launch-node = lbrynet.dht.node:main',
# 'lbrynet-launch-rpc-node = lbrynet.rpc_node:main',
# 'lbrynet-rpc-node-cli = lbrynet.node_rpc_cli:main',
# 'lbrynet-lookup-hosts-for-hash = lbrynet.dht_scripts:get_hosts_for_hash_in_dht',
# 'lbrynet-announce_hash_to_dht = lbrynet.dht_scripts:announce_hash_to_dht',
'lbrynet-daemon = lbrynet.lbrynet_daemon.DaemonControl:start', 'lbrynet-daemon = lbrynet.lbrynet_daemon.DaemonControl:start',
'stop-lbrynet-daemon = lbrynet.lbrynet_daemon.DaemonControl:stop', 'stop-lbrynet-daemon = lbrynet.lbrynet_daemon.DaemonControl:stop',
'lbrynet-cli = lbrynet.lbrynet_daemon.DaemonCLI:main' 'lbrynet-cli = lbrynet.lbrynet_daemon.DaemonCLI:main'

View file

@ -0,0 +1,4 @@
# log_support setups the default Logger class
# and so we need to ensure that it is also
# setup for the tests
from lbrynet.core import log_support

View file

@ -54,12 +54,13 @@ class NodeDataTest(unittest.TestCase):
h.update(str(i)) h.update(str(i))
self.cases.append((h.digest(), 5000+2*i)) self.cases.append((h.digest(), 5000+2*i))
self.cases.append((h.digest(), 5001+2*i)) self.cases.append((h.digest(), 5001+2*i))
<<<<<<< Updated upstream
#(('a', 'hello there\nthis is a test'), #(('a', 'hello there\nthis is a test'),
# ('b', unicode('jasdklfjklsdj;f2352352ljklzsdlkjkasf\ndsjklafsd')),
# ('e', 123),
# ('f', [('this', 'is', 1), {'complex': 'data entry'}]),
# ('aMuchLongerKeyThanAnyOfThePreviousOnes', 'some data')) # ('aMuchLongerKeyThanAnyOfThePreviousOnes', 'some data'))
=======
>>>>>>> Stashed changes
def testStore(self): def testStore(self):
def check_val_in_result(r, peer_info): def check_val_in_result(r, peer_info):
@ -105,31 +106,17 @@ class NodeContactTest(unittest.TestCase):
self.failIf(contact in closestNodes, 'Node added itself as a contact') self.failIf(contact in closestNodes, 'Node added itself as a contact')
#class NodeLookupTest(unittest.TestCase): <<<<<<< Updated upstream
# """ Test case for the Node class's iterative node lookup algorithm """ # """ Test case for the Node class's iterative node lookup algorithm """
# def setUp(self):
# import entangled.kademlia.contact
# self.node = entangled.kademlia.node.Node()
# self.remoteNodes = []
# for i in range(10):
# remoteNode = entangled.kademlia.node.Node()
# remoteContact = entangled.kademlia.contact.Contact(remoteNode.id, '127.0.0.1', 91827+i, self.node._protocol)
# self.remoteNodes.append(remoteNode)
# self.node.addContact(remoteContact)
# def testIterativeFindNode(self):
# """ Ugly brute-force test to see if the iterative node lookup algorithm runs without failing """ # """ Ugly brute-force test to see if the iterative node lookup algorithm runs without failing """
# import entangled.kademlia.protocol
# entangled.kademlia.protocol.reactor.listenUDP(91826, self.node._protocol)
# for i in range(10):
# entangled.kademlia.protocol.reactor.listenUDP(91827+i, self.remoteNodes[i]._protocol)
# df = self.node.iterativeFindNode(self.node.id)
# df.addBoth(lambda _: entangled.kademlia.protocol.reactor.stop())
# entangled.kademlia.protocol.reactor.run()
=======
>>>>>>> Stashed changes
""" Some scaffolding for the NodeLookupTest class. Allows isolated node testing by simulating remote node responses""" """Some scaffolding for the NodeLookupTest class. Allows isolated
node testing by simulating remote node responses"""
from twisted.internet import protocol, defer, selectreactor from twisted.internet import protocol, defer, selectreactor
from lbrynet.dht.msgtypes import ResponseMessage from lbrynet.dht.msgtypes import ResponseMessage
@ -149,22 +136,17 @@ class FakeRPCProtocol(protocol.DatagramProtocol):
""" Fake RPC protocol; allows entangled.kademlia.contact.Contact objects to "send" RPCs """ """ Fake RPC protocol; allows entangled.kademlia.contact.Contact objects to "send" RPCs """
def sendRPC(self, contact, method, args, rawResponse=False): def sendRPC(self, contact, method, args, rawResponse=False):
#print method + " " + str(args)
if method == "findNode": if method == "findNode":
# get the specific contacts closest contacts # get the specific contacts closest contacts
closestContacts = [] closestContacts = []
#print "contact" + contact.id
for contactTuple in self.network: for contactTuple in self.network:
#print contactTuple[0].id
if contact == contactTuple[0]: if contact == contactTuple[0]:
# get the list of closest contacts for this contact # get the list of closest contacts for this contact
closestContactsList = contactTuple[1] closestContactsList = contactTuple[1]
#print "contact" + contact.id
# Pack the closest contacts into a ResponseMessage # Pack the closest contacts into a ResponseMessage
for closeContact in closestContactsList: for closeContact in closestContactsList:
#print closeContact.id
closestContacts.append((closeContact.id, closeContact.address, closeContact.port)) closestContacts.append((closeContact.id, closeContact.address, closeContact.port))
message = ResponseMessage("rpcId", contact.id, closestContacts) message = ResponseMessage("rpcId", contact.id, closestContacts)
@ -221,9 +203,11 @@ class NodeLookupTest(unittest.TestCase):
self.updPort = 81173 self.updPort = 81173
<<<<<<< Updated upstream
# create a dummy reactor # create a dummy reactor
#self._protocol.reactor.listenUDP(self.updPort, self._protocol)
=======
>>>>>>> Stashed changes
self.contactsAmount = 80 self.contactsAmount = 80
# set the node ID manually for testing # set the node ID manually for testing
self.node.id = '12345678901234567800' self.node.id = '12345678901234567800'
@ -233,7 +217,6 @@ class NodeLookupTest(unittest.TestCase):
# create 160 bit node ID's for test purposes # create 160 bit node ID's for test purposes
self.testNodeIDs = [] self.testNodeIDs = []
#idNum = long(self.node.id.encode('hex'), 16)
idNum = int(self.node.id) idNum = int(self.node.id)
for i in range(self.contactsAmount): for i in range(self.contactsAmount):
# create the testNodeIDs in ascending order, away from the actual node ID, with regards to the distance metric # create the testNodeIDs in ascending order, away from the actual node ID, with regards to the distance metric
@ -284,7 +267,6 @@ class NodeLookupTest(unittest.TestCase):
for item in self.contacts[0:6]: for item in self.contacts[0:6]:
expectedResult.append(item.id) expectedResult.append(item.id)
#print item.id
# Get the result from the deferred # Get the result from the deferred
activeContacts = df.result activeContacts = df.result
@ -298,151 +280,7 @@ class NodeLookupTest(unittest.TestCase):
# Check that the received active contacts are the same as the input contacts # Check that the received active contacts are the same as the input contacts
self.failUnlessEqual(activeContacts, expectedResult, \ self.failUnlessEqual(activeContacts, expectedResult, \
"Active should only contain the closest possible contacts which were used as input for the boostrap") "Active should only contain the closest possible contacts which were used as input for the boostrap")
# def testFindingCloserNodes(self):
# """ Test discovery of closer contacts"""
#
# # Use input contacts that have knowledge of closer contacts,
# df = self.node._iterativeFind(self.node.id, self.contacts[50:53])
# #set the expected result
# expectedResult = []
# #print "############ Expected Active contacts #################"
# for item in self.contacts[0:9]:
# expectedResult.append(item.id)
# #print item.id
# #print "#######################################################"
#
# # Get the result from the deferred
# activeContacts = df.result
#
# #print "!!!!!!!!!!! Receieved Active contacts !!!!!!!!!!!!!!!"
# #for item in activeContacts:
# # print item.id
# #print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
#
# # Check the length of the active contacts
# self.failUnlessEqual(activeContacts.__len__(), expectedResult.__len__(), \
# "Length of received active contacts not as expected, should be %d" %expectedResult.__len__())
#
#
# # Check that the received active contacts are now closer to this node
# self.failUnlessEqual(activeContacts, expectedResult, \
# "Active contacts should now only contain the closest possible contacts")
# def testIterativeStore(self):
# """ test storing values """
#
# # create the network of contacts in format: (contact, closest contacts)
# contactNetwork = ((self.contacts[0], self.contacts[0:8]),
# (self.contacts[1], self.contacts[0:8]),
# (self.contacts[2], self.contacts[0:8]),
# (self.contacts[3], self.contacts[0:8]),
# (self.contacts[4], self.contacts[0:8]),
# (self.contacts[5], self.contacts[0:8]),
# (self.contacts[6], self.contacts[0:8]),
# (self.contacts[7], self.contacts[0:8]),
# (self.contacts[8], self.contacts[0:8]),
# (self.contacts[40], self.contacts[41:48]),
# (self.contacts[41], self.contacts[41:48]),
# (self.contacts[42], self.contacts[41:48]),
# (self.contacts[43], self.contacts[41:48]),
# (self.contacts[44], self.contacts[41:48]),
# (self.contacts[45], self.contacts[41:48]),
# (self.contacts[46], self.contacts[41:48]),
# (self.contacts[47], self.contacts[41:48]),
# (self.contacts[48], self.contacts[41:48]))
# contacts_with_datastores = []
#
# for contact_tuple in contactNetwork:
# contacts_with_datastores.append((contact_tuple[0], contact_tuple[1], lbrynet.dht.datastore.DictDataStore()))
#
# self._protocol.createNetwork(contacts_with_datastores)
#
#
# #self._protocol.createNetwork(contactNetwork)
#
#
# # Test storing a value that has an hash id close to the known contacts
# # The value should only be stored at those nodes
# value = 'value'
# valueID = self.contacts[40].id
#
# # Manually populate the routing table with contacts that have ID's close to the valueID
# for contact in self.contacts[40:48]:
# self.node.addContact(contact)
#
# # Manually populate the routing table with contacts that have ID's far away from the valueID
# for contact in self.contacts[0:8]:
# self.node.addContact(contact)
#
# # Store the value
# df = self.node.announceHaveBlob(valueID, value)
#
# storageNodes = df.result
#
# storageNodeIDs = []
# for item in storageNodes:
# storageNodeIDs.append(item.id)
# storageNodeIDs.sort()
# #print storageNodeIDs
#
# expectedIDs = []
# for item in self.contacts[40:43]:
# expectedIDs.append(item.id)
# #print expectedIDs
#
# #print '#### storage nodes ####'
# #for node in storageNodes:
# # print node.id
#
#
# # check that the value has been stored at nodes with ID's close to the valueID
# self.failUnlessEqual(storageNodeIDs, expectedIDs, \
# "Value not stored at nodes with ID's close to the valueID")
#
# def testFindValue(self):
# # create test values using the contact ID as the key
# testValues = ({self.contacts[0].id: "some test data"},
# {self.contacts[1].id: "some more test data"},
# {self.contacts[8].id: "and more data"}
# )
#
#
# # create the network of contacts in format: (contact, closest contacts, values)
# contactNetwork = ((self.contacts[0], self.contacts[0:6], testValues[0]),
# (self.contacts[1], self.contacts[0:6], testValues[1]),
# (self.contacts[2], self.contacts[0:6], {'2':'2'}),
# (self.contacts[3], self.contacts[0:6], {'4':'5'}),
# (self.contacts[4], self.contacts[0:6], testValues[2]),
# (self.contacts[5], self.contacts[0:6], {'2':'2'}),
# (self.contacts[6], self.contacts[0:6], {'2':'2'}))
#
# self._protocol.createNetwork(contactNetwork)
#
# # Initialise the routing table with some contacts
# for contact in self.contacts[0:4]:
# self.node.addContact(contact)
#
# # Initialise the node with some known contacts
# #self.node._iterativeFind(self.node.id, self.contacts[0:3])
#
# df = self.node.iterativeFindValue(testValues[1].keys()[0])
#
# resultDict = df.result
# keys = resultDict.keys()
#
# for key in keys:
# if key == 'closestNodeNoValue':
# print "closest contact without data " + " " + resultDict.get(key).id
# else:
# print "data key :" + key + "; " + "data: " + resultDict.get(key)
def suite(): def suite():
suite = unittest.TestSuite() suite = unittest.TestSuite()
@ -452,6 +290,7 @@ def suite():
suite.addTest(unittest.makeSuite(NodeLookupTest)) suite.addTest(unittest.makeSuite(NodeLookupTest))
return suite return suite
if __name__ == '__main__': if __name__ == '__main__':
# If this module is executed from the commandline, run all its tests # If this module is executed from the commandline, run all its tests
unittest.TextTestRunner().run(suite()) unittest.TextTestRunner().run(suite())

View file

@ -68,16 +68,12 @@ class ClientDatagramProtocol(lbrynet.dht.protocol.KademliaProtocol):
lbrynet.dht.protocol.KademliaProtocol.__init__(self, None) lbrynet.dht.protocol.KademliaProtocol.__init__(self, None)
def startProtocol(self): def startProtocol(self):
#self.transport.connect(self.destination[0], self.destination[1])
self.sendDatagram() self.sendDatagram()
def sendDatagram(self): def sendDatagram(self):
if len(self.data): if len(self.data):
self._send(self.data, self.msgID, self.destination) self._send(self.data, self.msgID, self.destination)
# def datagramReceived(self, datagram, host):
# print 'Datagram received: ', repr(datagram)
# self.sendDatagram()
@ -193,44 +189,6 @@ class KademliaProtocolTest(unittest.TestCase):
# The list of sent RPC messages should be empty at this stage # The list of sent RPC messages should be empty at this stage
self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!') self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!')
# def testDatagramLargeMessageReconstruction(self):
# """ Tests if a large amount of data can be successfully re-constructed from multiple UDP datagrams """
# remoteContact = lbrynet.dht.contact.Contact('node2', '127.0.0.1', 9182, self.protocol)
# self.node.addContact(remoteContact)
# self.error = None
# #responseData = 8143 * '0' # Threshold for a single packet transmission
# responseData = 300000 * '0'
# def handleError(f):
# if f.check((lbrynet.dht.protocol.TimeoutError)):
# self.error = 'RPC from the following contact timed out: %s' % f.getErrorMessage()
# else:
# self.error = 'An RPC error occurred: %s' % f.getErrorMessage()
# def handleResult(result):
# if result != responseData:
# self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' % (responseData, result)
# # Publish the "local" node on the network
# lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol)
# # ...and make it think it is waiting for a result from an RPC
# msgID = 'abcdefghij1234567890'
# df = defer.Deferred()
# timeoutCall = lbrynet.dht.protocol.reactor.callLater(lbrynet.dht.constants.rpcTimeout, self.protocol._msgTimeout, msgID)
# self.protocol._sentMessages[msgID] = (remoteContact.id, df, timeoutCall)
# # Simulate the "reply" transmission
# msg = lbrynet.dht.msgtypes.ResponseMessage(msgID, 'node2', responseData)
# msgPrimitive = self.protocol._translator.toPrimitive(msg)
# encodedMsg = self.protocol._encoder.encode(msgPrimitive)
# udpClient = ClientDatagramProtocol()
# udpClient.data = encodedMsg
# udpClient.msgID = msgID
# lbrynet.dht.protocol.reactor.listenUDP(0, udpClient)
# df.addCallback(handleResult)
# df.addErrback(handleError)
# df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop())
# lbrynet.dht.protocol.reactor.run()
# self.failIf(self.error, self.error)
# # The list of sent RPC messages should be empty at this stage
# #self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!')
def suite(): def suite():
suite = unittest.TestSuite() suite = unittest.TestSuite()
@ -239,4 +197,4 @@ def suite():
if __name__ == '__main__': if __name__ == '__main__':
# If this module is executed from the commandline, run all its tests # If this module is executed from the commandline, run all its tests
unittest.TextTestRunner().run(suite()) unittest.TextTestRunner().run(suite())

View file

@ -167,7 +167,7 @@ class BlobAvailabilityTracker(BlobAvailability.BlobAvailabilityTracker):
self._dht_node = None self._dht_node = None
self._check_popular = None self._check_popular = None
self._check_mine = None self._check_mine = None
self._get_mean_peers() self._set_mean_peers()
def start(self): def start(self):
pass pass

View file

@ -0,0 +1,130 @@
from twisted.trial import unittest
from twisted.internet import threads, defer
from lbrynet.core.Wallet import Wallet
test_metadata = {
'license': 'NASA',
'fee': {'USD': {'amount': 0.01, 'address': 'baBYSK7CqGSn5KrEmNmmQwAhBSFgo6v47z'}},
'ver': '0.0.3',
'description': 'test',
'language': 'en',
'author': 'test',
'title': 'test',
'sources': {
'lbry_sd_hash': '8655f713819344980a9a0d67b198344e2c462c90f813e86f0c63789ab0868031f25c54d0bb31af6658e997e2041806eb'},
'nsfw': False,
'content_type': 'video/mp4',
'thumbnail': 'test'
}
class MocLbryumWallet(Wallet):
def __init__(self):
pass
def get_name_claims(self):
return threads.deferToThread(lambda: [])
def _save_name_metadata(self, name, claim_outpoint, sd_hash):
return defer.succeed(True)
class WalletTest(unittest.TestCase):
def test_failed_send_name_claim(self):
def not_enough_funds_send_name_claim(self, name, val, amount):
claim_out = {'success':False, 'reason':'Not enough funds'}
return claim_out
MocLbryumWallet._send_name_claim = not_enough_funds_send_name_claim
wallet = MocLbryumWallet()
d = wallet.claim_name('test', 1, test_metadata)
self.assertFailure(d,Exception)
return d
def test_successful_send_name_claim(self):
expected_claim_out = {
"claimid": "f43dc06256a69988bdbea09a58c80493ba15dcfa",
"fee": "0.00012",
"nout": 0,
"success": True,
"txid": "6f8180002ef4d21f5b09ca7d9648a54d213c666daf8639dc283e2fd47450269e"
}
def check_out(claim_out):
self.assertTrue('success' not in claim_out)
self.assertEqual(expected_claim_out['claimid'], claim_out['claimid'])
self.assertEqual(expected_claim_out['fee'], claim_out['fee'])
self.assertEqual(expected_claim_out['nout'], claim_out['nout'])
self.assertEqual(expected_claim_out['txid'], claim_out['txid'])
def success_send_name_claim(self, name, val, amount):
return expected_claim_out
MocLbryumWallet._send_name_claim = success_send_name_claim
wallet = MocLbryumWallet()
d = wallet.claim_name('test', 1, test_metadata)
d.addCallback(lambda claim_out: check_out(claim_out))
return d
def test_failed_support(self):
def failed_support_claim(self, name, claim_id, amount):
claim_out = {'success':False, 'reason':'Not enough funds'}
return threads.deferToThread(lambda: claim_out)
MocLbryumWallet._support_claim = failed_support_claim
wallet = MocLbryumWallet()
d = wallet.support_claim('test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 1)
self.assertFailure(d,Exception)
return d
def test_succesful_support(self):
expected_support_out = {
"fee": "0.000129",
"nout": 0,
"success": True,
"txid": "11030a76521e5f552ca87ad70765d0cc52e6ea4c0dc0063335e6cf2a9a85085f"
}
def check_out(claim_out):
self.assertTrue('success' not in claim_out)
self.assertEqual(expected_support_out['fee'], claim_out['fee'])
self.assertEqual(expected_support_out['nout'], claim_out['nout'])
self.assertEqual(expected_support_out['txid'], claim_out['txid'])
def success_support_claim(self, name, val, amount):
return threads.deferToThread(lambda: expected_support_out)
MocLbryumWallet._support_claim = success_support_claim
wallet = MocLbryumWallet()
d = wallet.support_claim('test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 1)
d.addCallback(lambda claim_out: check_out(claim_out))
return d
def test_failed_abandon(self):
def failed_abandon_claim(self, claim_outpoint):
claim_out = {'success':False, 'reason':'Not enough funds'}
return threads.deferToThread(lambda: claim_out)
MocLbryumWallet._abandon_claim = failed_abandon_claim
wallet = MocLbryumWallet()
d = wallet.abandon_claim("11030a76521e5f552ca87ad70765d0cc52e6ea4c0dc0063335e6cf2a9a85085f", 1)
self.assertFailure(d,Exception)
return d
def test_successful_abandon(self):
expected_abandon_out = {
"fee": "0.000096",
"success": True,
"txid": "0578c161ad8d36a7580c557d7444f967ea7f988e194c20d0e3c42c3cabf110dd"
}
def check_out(claim_out):
self.assertTrue('success' not in claim_out)
self.assertEqual(expected_abandon_out['fee'], claim_out['fee'])
self.assertEqual(expected_abandon_out['txid'], claim_out['txid'])
def success_abandon_claim(self, claim_outpoint):
return threads.deferToThread(lambda: expected_abandon_out)
MocLbryumWallet._abandon_claim = success_abandon_claim
wallet = MocLbryumWallet()
d = wallet.abandon_claim("0578c161ad8d36a7580c557d7444f967ea7f988e194c20d0e3c42c3cabf110dd", 1)
d.addCallback(lambda claim_out: check_out(claim_out))
return d

View file

@ -0,0 +1,48 @@
import StringIO
import logging
import mock
from twisted.internet import defer
from twisted.trial import unittest
from lbrynet.core import log_support
class TestLogger(unittest.TestCase):
def raiseError(self):
raise Exception('terrible things happened')
def triggerErrback(self, callback=None):
d = defer.Deferred()
d.addCallback(lambda _: self.raiseError())
d.addErrback(self.log.fail(callback), 'My message')
d.callback(None)
return d
def setUp(self):
self.log = log_support.Logger('test')
self.stream = StringIO.StringIO()
handler = logging.StreamHandler(self.stream)
handler.setFormatter(logging.Formatter("%(filename)s:%(lineno)d - %(message)s"))
self.log.addHandler(handler)
def test_can_log_failure(self):
def output_lines():
return self.stream.getvalue().split('\n')
# the line number could change if this file gets refactored
expected_first_line = 'test_log_support.py:18 - My message: terrible things happened'
# testing the entirety of the message is futile as the
# traceback will depend on the system the test is being run on
# but hopefully these two tests are good enough
d = self.triggerErrback()
d.addCallback(lambda _: self.assertEquals(expected_first_line, output_lines()[0]))
d.addCallback(lambda _: self.assertEqual(10, len(output_lines())))
return d
def test_can_log_failure_with_callback(self):
callback = mock.Mock()
d = self.triggerErrback(callback)
d.addCallback(lambda _: callback.assert_called_once_with(mock.ANY))
return d

View file

@ -17,7 +17,6 @@ import hashlib
class DictDataStoreTest(unittest.TestCase): class DictDataStoreTest(unittest.TestCase):
""" Basic tests case for the reference DataStore API and implementation """ """ Basic tests case for the reference DataStore API and implementation """
def setUp(self): def setUp(self):
#if not hasattr(self, 'ds'):
self.ds = lbrynet.dht.datastore.DictDataStore() self.ds = lbrynet.dht.datastore.DictDataStore()
h = hashlib.sha1() h = hashlib.sha1()
h.update('g') h.update('g')
@ -29,12 +28,6 @@ class DictDataStoreTest(unittest.TestCase):
h3.update('Boozoo Bajou - 09 - S.I.P.mp3') h3.update('Boozoo Bajou - 09 - S.I.P.mp3')
hashKey3 = h3.digest() hashKey3 = h3.digest()
#self.cases = (('a', 'hello there\nthis is a test'), #self.cases = (('a', 'hello there\nthis is a test'),
# ('b', unicode('jasdklfjklsdj;f2352352ljklzsdlkjkasf\ndsjklafsd')),
# ('e', 123),
# ('f', [('this', 'is', 1), {'complex': 'data entry'}]),
# ('aMuchLongerKeyThanAnyOfThePreviousOnes', 'some data'),
# (hashKey, 'some data'),
# (hashKey2, 'abcdefghijklmnopqrstuvwxz'),
# (hashKey3, '1 2 3 4 5 6 7 8 9 0')) # (hashKey3, '1 2 3 4 5 6 7 8 9 0'))
self.cases = ((hashKey, 'test1test1test1test1test1t'), self.cases = ((hashKey, 'test1test1test1test1test1t'),
(hashKey, 'test2'), (hashKey, 'test2'),
@ -90,88 +83,37 @@ class DictDataStoreTest(unittest.TestCase):
self.failIf('val3' in self.ds.getPeersForBlob(h2), 'DataStore failed to delete an expired value! Value %s, publish time %s, current time %s' % ('val3', str(now - td2), str(now))) self.failIf('val3' in self.ds.getPeersForBlob(h2), 'DataStore failed to delete an expired value! Value %s, publish time %s, current time %s' % ('val3', str(now - td2), str(now)))
self.failUnless('val4' in self.ds.getPeersForBlob(h2), 'DataStore deleted an unexpired value! Value %s, publish time %s, current time %s' % ('val4', str(now), str(now))) self.failUnless('val4' in self.ds.getPeersForBlob(h2), 'DataStore deleted an unexpired value! Value %s, publish time %s, current time %s' % ('val4', str(now), str(now)))
# def testReplace(self):
# # First write with fake values # # First write with fake values
# now = int(time.time())
# for key, value in self.cases: # for key, value in self.cases:
# try:
# self.ds.setItem(key, 'abc', now, now, 'node1')
# except Exception: # except Exception:
# import traceback
# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5)))
# #
# # write this stuff a second time, with the real values # # write this stuff a second time, with the real values
# for key, value in self.cases: # for key, value in self.cases:
# try:
# self.ds.setItem(key, value, now, now, 'node1')
# except Exception: # except Exception:
# import traceback
# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5)))
# #
# self.failUnlessEqual(len(self.ds.keys()), len(self.cases), 'Values did not get overwritten properly; expected %d keys, got %d' % (len(self.cases), len(self.ds.keys())))
# # Read back the data # # Read back the data
# for key, value in self.cases: # for key, value in self.cases:
# self.failUnlessEqual(self.ds[key], value, 'DataStore returned invalid data! Expected "%s", got "%s"' % (value, self.ds[key]))
# def testDelete(self):
# # First some values # # First some values
# now = int(time.time())
# for key, value in self.cases: # for key, value in self.cases:
# try:
# self.ds.setItem(key, 'abc', now, now, 'node1')
# except Exception: # except Exception:
# import traceback
# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5)))
# #
# self.failUnlessEqual(len(self.ds.keys()), len(self.cases), 'Values did not get stored properly; expected %d keys, got %d' % (len(self.cases), len(self.ds.keys())))
# #
# # Delete an item from the data # # Delete an item from the data
# key, value == self.cases[0]
# del self.ds[key]
# self.failUnlessEqual(len(self.ds.keys()), len(self.cases)-1, 'Value was not deleted; expected %d keys, got %d' % (len(self.cases)-1, len(self.ds.keys())))
# self.failIf(key in self.ds.keys(), 'Key was not deleted: %s' % key)
# def testMetaData(self):
# now = int(time.time())
# age = random.randint(10,3600)
# originallyPublished = []
# for i in range(len(self.cases)):
# originallyPublished.append(now - age)
# # First some values with metadata # # First some values with metadata
# i = 0
# for key, value in self.cases: # for key, value in self.cases:
# try:
# self.ds.setItem(key, 'abc', now, originallyPublished[i], 'node%d' % i)
# i += 1
# except Exception: # except Exception:
# import traceback
# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5)))
# #
# # Read back the meta-data # # Read back the meta-data
# i = 0
# for key, value in self.cases: # for key, value in self.cases:
# dsLastPublished = self.ds.lastPublished(key)
# dsOriginallyPublished = self.ds.originalPublishTime(key)
# dsOriginalPublisherID = self.ds.originalPublisherID(key)
# self.failUnless(type(dsLastPublished) == int, 'DataStore returned invalid type for "last published" time! Expected "int", got %s' % type(dsLastPublished))
# self.failUnless(type(dsOriginallyPublished) == int, 'DataStore returned invalid type for "originally published" time! Expected "int", got %s' % type(dsOriginallyPublished))
# self.failUnless(type(dsOriginalPublisherID) == str, 'DataStore returned invalid type for "original publisher ID"; Expected "str", got %s' % type(dsOriginalPublisherID))
# self.failUnlessEqual(dsLastPublished, now, 'DataStore returned invalid "last published" time! Expected "%d", got "%d"' % (now, dsLastPublished))
# self.failUnlessEqual(dsOriginallyPublished, originallyPublished[i], 'DataStore returned invalid "originally published" time! Expected "%d", got "%d"' % (originallyPublished[i], dsOriginallyPublished))
# self.failUnlessEqual(dsOriginalPublisherID, 'node%d' % i, 'DataStore returned invalid "original publisher ID"; Expected "%s", got "%s"' % ('node%d' % i, dsOriginalPublisherID))
# i += 1
#class SQLiteDataStoreTest(DictDataStoreTest):
# def setUp(self):
# self.ds = entangled.kademlia.datastore.SQLiteDataStore()
# DictDataStoreTest.setUp(self)
def suite(): def suite():
suite = unittest.TestSuite() suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DictDataStoreTest)) suite.addTest(unittest.makeSuite(DictDataStoreTest))
#suite.addTest(unittest.makeSuite(SQLiteDataStoreTest))
return suite return suite