forked from LBRYCommunity/lbry-sdk
Use named loggers, fix some pep8 formatting mistakes
This commit is contained in:
parent
6f40ef6015
commit
bb5cd49327
46 changed files with 407 additions and 270 deletions
|
@ -0,0 +1,4 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
|
@ -2,7 +2,7 @@ import logging
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import sqlite3
|
import sqlite3
|
||||||
from twisted.internet import threads, defer, reactor, task
|
from twisted.internet import threads, defer
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
from twisted.enterprise import adbapi
|
from twisted.enterprise import adbapi
|
||||||
from lbrynet.core.HashBlob import BlobFile, TempBlob, BlobFileCreator, TempBlobCreator
|
from lbrynet.core.HashBlob import BlobFile, TempBlob, BlobFileCreator, TempBlobCreator
|
||||||
|
@ -13,6 +13,9 @@ from lbrynet.core.Error import NoSuchBlobError
|
||||||
from lbrynet.core.sqlite_helpers import rerun_if_locked
|
from lbrynet.core.sqlite_helpers import rerun_if_locked
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BlobManager(DHTHashSupplier):
|
class BlobManager(DHTHashSupplier):
|
||||||
"""This class is subclassed by classes which keep track of which blobs are available
|
"""This class is subclassed by classes which keep track of which blobs are available
|
||||||
and which give access to new/existing blobs"""
|
and which give access to new/existing blobs"""
|
||||||
|
@ -137,7 +140,7 @@ class DiskBlobManager(BlobManager):
|
||||||
return self._get_blobs_to_announce(next_announce_time)
|
return self._get_blobs_to_announce(next_announce_time)
|
||||||
|
|
||||||
def creator_finished(self, blob_creator):
|
def creator_finished(self, blob_creator):
|
||||||
logging.debug("blob_creator.blob_hash: %s", blob_creator.blob_hash)
|
log.debug("blob_creator.blob_hash: %s", blob_creator.blob_hash)
|
||||||
assert blob_creator.blob_hash is not None
|
assert blob_creator.blob_hash is not None
|
||||||
assert blob_creator.blob_hash not in self.blobs
|
assert blob_creator.blob_hash not in self.blobs
|
||||||
assert blob_creator.length is not None
|
assert blob_creator.length is not None
|
||||||
|
@ -188,7 +191,7 @@ class DiskBlobManager(BlobManager):
|
||||||
return b_h
|
return b_h
|
||||||
|
|
||||||
def set_not_deleting(err, b_h):
|
def set_not_deleting(err, b_h):
|
||||||
logging.warning("Failed to delete blob %s. Reason: %s", str(b_h), err.getErrorMessage())
|
log.warning("Failed to delete blob %s. Reason: %s", str(b_h), err.getErrorMessage())
|
||||||
self.blob_hashes_to_delete[b_h] = False
|
self.blob_hashes_to_delete[b_h] = False
|
||||||
return err
|
return err
|
||||||
|
|
||||||
|
@ -200,7 +203,7 @@ class DiskBlobManager(BlobManager):
|
||||||
d = defer.succeed(True)
|
d = defer.succeed(True)
|
||||||
|
|
||||||
def log_error(err):
|
def log_error(err):
|
||||||
logging.warning("Failed to delete completed blobs from the db: %s", err.getErrorMessage())
|
log.warning("Failed to delete completed blobs from the db: %s", err.getErrorMessage())
|
||||||
|
|
||||||
d.addErrback(log_error)
|
d.addErrback(log_error)
|
||||||
return d
|
return d
|
||||||
|
@ -238,7 +241,7 @@ class DiskBlobManager(BlobManager):
|
||||||
|
|
||||||
@rerun_if_locked
|
@rerun_if_locked
|
||||||
def _add_completed_blob(self, blob_hash, length, timestamp, next_announce_time=None):
|
def _add_completed_blob(self, blob_hash, length, timestamp, next_announce_time=None):
|
||||||
logging.debug("Adding a completed blob. blob_hash=%s, length=%s", blob_hash, str(length))
|
log.debug("Adding a completed blob. blob_hash=%s, length=%s", blob_hash, str(length))
|
||||||
if next_announce_time is None:
|
if next_announce_time is None:
|
||||||
next_announce_time = timestamp
|
next_announce_time = timestamp
|
||||||
d = self.db_conn.runQuery("insert into blobs values (?, ?, ?, ?)",
|
d = self.db_conn.runQuery("insert into blobs values (?, ?, ?, ?)",
|
||||||
|
@ -502,7 +505,7 @@ class TempBlobManager(BlobManager):
|
||||||
d = self._delete_blobs_marked_for_deletion()
|
d = self._delete_blobs_marked_for_deletion()
|
||||||
|
|
||||||
def set_next_manage_call():
|
def set_next_manage_call():
|
||||||
logging.info("Setting the next manage call in %s", str(self))
|
log.info("Setting the next manage call in %s", str(self))
|
||||||
self._next_manage_call = reactor.callLater(1, self._manage)
|
self._next_manage_call = reactor.callLater(1, self._manage)
|
||||||
|
|
||||||
d.addCallback(lambda _: set_next_manage_call())
|
d.addCallback(lambda _: set_next_manage_call())
|
||||||
|
@ -511,11 +514,11 @@ class TempBlobManager(BlobManager):
|
||||||
|
|
||||||
def remove_from_list(b_h):
|
def remove_from_list(b_h):
|
||||||
del self.blob_hashes_to_delete[b_h]
|
del self.blob_hashes_to_delete[b_h]
|
||||||
logging.info("Deleted blob %s", blob_hash)
|
log.info("Deleted blob %s", blob_hash)
|
||||||
return b_h
|
return b_h
|
||||||
|
|
||||||
def set_not_deleting(err, b_h):
|
def set_not_deleting(err, b_h):
|
||||||
logging.warning("Failed to delete blob %s. Reason: %s", str(b_h), err.getErrorMessage())
|
log.warning("Failed to delete blob %s. Reason: %s", str(b_h), err.getErrorMessage())
|
||||||
self.blob_hashes_to_delete[b_h] = False
|
self.blob_hashes_to_delete[b_h] = False
|
||||||
return b_h
|
return b_h
|
||||||
|
|
||||||
|
@ -524,7 +527,7 @@ class TempBlobManager(BlobManager):
|
||||||
if being_deleted is False:
|
if being_deleted is False:
|
||||||
if blob_hash in self.blobs:
|
if blob_hash in self.blobs:
|
||||||
self.blob_hashes_to_delete[blob_hash] = True
|
self.blob_hashes_to_delete[blob_hash] = True
|
||||||
logging.info("Found a blob marked for deletion: %s", blob_hash)
|
log.info("Found a blob marked for deletion: %s", blob_hash)
|
||||||
blob = self.blobs[blob_hash]
|
blob = self.blobs[blob_hash]
|
||||||
d = blob.delete()
|
d = blob.delete()
|
||||||
|
|
||||||
|
@ -535,6 +538,6 @@ class TempBlobManager(BlobManager):
|
||||||
else:
|
else:
|
||||||
remove_from_list(blob_hash)
|
remove_from_list(blob_hash)
|
||||||
d = defer.fail(Failure(NoSuchBlobError(blob_hash)))
|
d = defer.fail(Failure(NoSuchBlobError(blob_hash)))
|
||||||
logging.warning("Blob %s cannot be deleted because it is unknown")
|
log.warning("Blob %s cannot be deleted because it is unknown")
|
||||||
ds.append(d)
|
ds.append(d)
|
||||||
return defer.DeferredList(ds)
|
return defer.DeferredList(ds)
|
|
@ -13,6 +13,9 @@ from lbrynet.core.Error import DownloadCanceledError, InvalidDataError
|
||||||
from lbrynet.core.cryptoutils import get_lbry_hash_obj
|
from lbrynet.core.cryptoutils import get_lbry_hash_obj
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class HashBlobReader(object):
|
class HashBlobReader(object):
|
||||||
implements(interfaces.IConsumer)
|
implements(interfaces.IConsumer)
|
||||||
|
|
||||||
|
@ -84,7 +87,7 @@ class HashBlob(object):
|
||||||
if self.length is None and 0 <= length <= BLOB_SIZE:
|
if self.length is None and 0 <= length <= BLOB_SIZE:
|
||||||
self.length = length
|
self.length = length
|
||||||
return True
|
return True
|
||||||
logging.warning("Got an invalid length. Previous length: %s, Invalid length: %s", str(self.length), str(length))
|
log.warning("Got an invalid length. Previous length: %s, Invalid length: %s", str(self.length), str(length))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_length(self):
|
def get_length(self):
|
||||||
|
@ -126,7 +129,7 @@ class HashBlob(object):
|
||||||
finished_deferred.callback(self)
|
finished_deferred.callback(self)
|
||||||
del self.writers[p]
|
del self.writers[p]
|
||||||
return True
|
return True
|
||||||
logging.warning("Somehow, the writer that was accepted as being valid was already removed. writer: %s",
|
log.warning("Somehow, the writer that was accepted as being valid was already removed. writer: %s",
|
||||||
str(writer))
|
str(writer))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -199,14 +202,14 @@ class BlobFile(HashBlob):
|
||||||
|
|
||||||
def open_for_writing(self, peer):
|
def open_for_writing(self, peer):
|
||||||
if not peer in self.writers:
|
if not peer in self.writers:
|
||||||
logging.debug("Opening %s to be written by %s", str(self), str(peer))
|
log.debug("Opening %s to be written by %s", str(self), str(peer))
|
||||||
write_file = tempfile.NamedTemporaryFile(delete=False, dir=self.blob_dir)
|
write_file = tempfile.NamedTemporaryFile(delete=False, dir=self.blob_dir)
|
||||||
finished_deferred = defer.Deferred()
|
finished_deferred = defer.Deferred()
|
||||||
writer = HashBlobWriter(write_file, self.get_length, self.writer_finished)
|
writer = HashBlobWriter(write_file, self.get_length, self.writer_finished)
|
||||||
|
|
||||||
self.writers[peer] = (writer, finished_deferred)
|
self.writers[peer] = (writer, finished_deferred)
|
||||||
return finished_deferred, writer.write, writer.cancel
|
return finished_deferred, writer.write, writer.cancel
|
||||||
logging.warning("Tried to download the same file twice simultaneously from the same peer")
|
log.warning("Tried to download the same file twice simultaneously from the same peer")
|
||||||
return None, None, None
|
return None, None, None
|
||||||
|
|
||||||
def open_for_reading(self):
|
def open_for_reading(self):
|
||||||
|
@ -232,7 +235,7 @@ class BlobFile(HashBlob):
|
||||||
d = threads.deferToThread(delete_from_file_system)
|
d = threads.deferToThread(delete_from_file_system)
|
||||||
|
|
||||||
def log_error(err):
|
def log_error(err):
|
||||||
logging.warning("An error occurred deleting %s: %s", str(self.file_path), err.getErrorMessage())
|
log.warning("An error occurred deleting %s: %s", str(self.file_path), err.getErrorMessage())
|
||||||
return err
|
return err
|
||||||
|
|
||||||
d.addErrback(log_error)
|
d.addErrback(log_error)
|
||||||
|
@ -247,7 +250,7 @@ class BlobFile(HashBlob):
|
||||||
|
|
||||||
def _close_writer(self, writer):
|
def _close_writer(self, writer):
|
||||||
if writer.write_handle is not None:
|
if writer.write_handle is not None:
|
||||||
logging.debug("Closing %s", str(self))
|
log.debug("Closing %s", str(self))
|
||||||
name = writer.write_handle.name
|
name = writer.write_handle.name
|
||||||
writer.write_handle.close()
|
writer.write_handle.close()
|
||||||
threads.deferToThread(os.remove, name)
|
threads.deferToThread(os.remove, name)
|
||||||
|
|
|
@ -16,6 +16,9 @@ import time
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ReservedPoints(object):
|
class ReservedPoints(object):
|
||||||
def __init__(self, identifier, amount):
|
def __init__(self, identifier, amount):
|
||||||
self.identifier = identifier
|
self.identifier = identifier
|
||||||
|
@ -53,7 +56,7 @@ class LBRYcrdWallet(object):
|
||||||
if self.start_lbrycrdd is True:
|
if self.start_lbrycrdd is True:
|
||||||
self._start_daemon()
|
self._start_daemon()
|
||||||
self._get_info()
|
self._get_info()
|
||||||
logging.info("Connected!")
|
log.info("Connected!")
|
||||||
|
|
||||||
def start_manage():
|
def start_manage():
|
||||||
self.stopped = False
|
self.stopped = False
|
||||||
|
@ -67,7 +70,7 @@ class LBRYcrdWallet(object):
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
|
||||||
def log_stop_error(err):
|
def log_stop_error(err):
|
||||||
logging.error("An error occurred stopping the wallet. %s", err.getTraceback())
|
log.error("An error occurred stopping the wallet. %s", err.getTraceback())
|
||||||
|
|
||||||
self.stopped = True
|
self.stopped = True
|
||||||
# If self.next_manage_call is None, then manage is currently running or else
|
# If self.next_manage_call is None, then manage is currently running or else
|
||||||
|
@ -84,7 +87,7 @@ class LBRYcrdWallet(object):
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def manage(self):
|
def manage(self):
|
||||||
logging.info("Doing manage")
|
log.info("Doing manage")
|
||||||
self.next_manage_call = None
|
self.next_manage_call = None
|
||||||
have_set_manage_running = [False]
|
have_set_manage_running = [False]
|
||||||
|
|
||||||
|
@ -123,7 +126,7 @@ class LBRYcrdWallet(object):
|
||||||
d.addCallback(lambda _: set_next_manage_call())
|
d.addCallback(lambda _: set_next_manage_call())
|
||||||
|
|
||||||
def log_error(err):
|
def log_error(err):
|
||||||
logging.error("Something went wrong during manage. Error message: %s", err.getErrorMessage())
|
log.error("Something went wrong during manage. Error message: %s", err.getErrorMessage())
|
||||||
return err
|
return err
|
||||||
|
|
||||||
d.addErrback(log_error)
|
d.addErrback(log_error)
|
||||||
|
@ -191,7 +194,7 @@ class LBRYcrdWallet(object):
|
||||||
self.queued_payments[self.peer_addresses[peer]] += rounded_amount
|
self.queued_payments[self.peer_addresses[peer]] += rounded_amount
|
||||||
# make any unused points available
|
# make any unused points available
|
||||||
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
|
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
|
||||||
logging.info("ordering that %s points be sent to %s", str(rounded_amount),
|
log.info("ordering that %s points be sent to %s", str(rounded_amount),
|
||||||
str(self.peer_addresses[peer]))
|
str(self.peer_addresses[peer]))
|
||||||
peer.update_stats('points_sent', amount)
|
peer.update_stats('points_sent', amount)
|
||||||
return defer.succeed(True)
|
return defer.succeed(True)
|
||||||
|
@ -201,7 +204,7 @@ class LBRYcrdWallet(object):
|
||||||
rounded_amount = Decimal(str(round(amount, 8)))
|
rounded_amount = Decimal(str(round(amount, 8)))
|
||||||
assert(peer in self.current_address_given_to_peer)
|
assert(peer in self.current_address_given_to_peer)
|
||||||
address = self.current_address_given_to_peer[peer]
|
address = self.current_address_given_to_peer[peer]
|
||||||
logging.info("expecting a payment at address %s in the amount of %s", str(address), str(rounded_amount))
|
log.info("expecting a payment at address %s in the amount of %s", str(address), str(rounded_amount))
|
||||||
self.expected_balances[address] += rounded_amount
|
self.expected_balances[address] += rounded_amount
|
||||||
expected_balance = self.expected_balances[address]
|
expected_balance = self.expected_balances[address]
|
||||||
expected_time = datetime.datetime.now() + self.max_expected_payment_time
|
expected_time = datetime.datetime.now() + self.max_expected_payment_time
|
||||||
|
@ -286,12 +289,12 @@ class LBRYcrdWallet(object):
|
||||||
break
|
break
|
||||||
except (socket.error, JSONRPCException):
|
except (socket.error, JSONRPCException):
|
||||||
tries += 1
|
tries += 1
|
||||||
logging.warning("Failed to connect to lbrycrdd.")
|
log.warning("Failed to connect to lbrycrdd.")
|
||||||
if tries < 5:
|
if tries < 5:
|
||||||
time.sleep(2 ** tries)
|
time.sleep(2 ** tries)
|
||||||
logging.warning("Trying again in %d seconds", 2 ** tries)
|
log.warning("Trying again in %d seconds", 2 ** tries)
|
||||||
else:
|
else:
|
||||||
logging.warning("Giving up.")
|
log.warning("Giving up.")
|
||||||
else:
|
else:
|
||||||
self.lbrycrdd.terminate()
|
self.lbrycrdd.terminate()
|
||||||
raise ValueError("Couldn't open lbrycrdd")
|
raise ValueError("Couldn't open lbrycrdd")
|
||||||
|
@ -338,7 +341,7 @@ class LBRYcrdWallet(object):
|
||||||
peer.update_score(balance[5])
|
peer.update_score(balance[5])
|
||||||
peer.update_stats('points_received', balance[5])
|
peer.update_stats('points_received', balance[5])
|
||||||
else:
|
else:
|
||||||
logging.warning("Something went wrong checking a balance. Peer: %s, account: %s,"
|
log.warning("Something went wrong checking a balance. Peer: %s, account: %s,"
|
||||||
"expected balance: %s, expected time: %s, count: %s, error: %s",
|
"expected balance: %s, expected time: %s, count: %s, error: %s",
|
||||||
str(balance[0]), str(balance[1]), str(balance[2]), str(balance[3]),
|
str(balance[0]), str(balance[1]), str(balance[2]), str(balance[3]),
|
||||||
str(balance[4]), str(result.getErrorMessage()))
|
str(balance[4]), str(result.getErrorMessage()))
|
||||||
|
@ -349,14 +352,14 @@ class LBRYcrdWallet(object):
|
||||||
@_catch_connection_error
|
@_catch_connection_error
|
||||||
def _check_expected_balance(self, expected_balance):
|
def _check_expected_balance(self, expected_balance):
|
||||||
rpc_conn = self._get_rpc_conn()
|
rpc_conn = self._get_rpc_conn()
|
||||||
logging.info("Checking balance of address %s", str(expected_balance[1]))
|
log.info("Checking balance of address %s", str(expected_balance[1]))
|
||||||
balance = rpc_conn.getreceivedbyaddress(expected_balance[1])
|
balance = rpc_conn.getreceivedbyaddress(expected_balance[1])
|
||||||
logging.debug("received balance: %s", str(balance))
|
log.debug("received balance: %s", str(balance))
|
||||||
logging.debug("expected balance: %s", str(expected_balance[2]))
|
log.debug("expected balance: %s", str(expected_balance[2]))
|
||||||
return balance >= expected_balance[2]
|
return balance >= expected_balance[2]
|
||||||
|
|
||||||
def _send_payments(self):
|
def _send_payments(self):
|
||||||
logging.info("Trying to send payments, if there are any to be sent")
|
log.info("Trying to send payments, if there are any to be sent")
|
||||||
|
|
||||||
def do_send(payments):
|
def do_send(payments):
|
||||||
rpc_conn = self._get_rpc_conn()
|
rpc_conn = self._get_rpc_conn()
|
||||||
|
@ -364,15 +367,15 @@ class LBRYcrdWallet(object):
|
||||||
|
|
||||||
payments_to_send = {}
|
payments_to_send = {}
|
||||||
for address, points in self.queued_payments.items():
|
for address, points in self.queued_payments.items():
|
||||||
logging.info("Should be sending %s points to %s", str(points), str(address))
|
log.info("Should be sending %s points to %s", str(points), str(address))
|
||||||
payments_to_send[address] = float(points)
|
payments_to_send[address] = float(points)
|
||||||
self.total_reserved_points -= points
|
self.total_reserved_points -= points
|
||||||
self.wallet_balance -= points
|
self.wallet_balance -= points
|
||||||
del self.queued_payments[address]
|
del self.queued_payments[address]
|
||||||
if payments_to_send:
|
if payments_to_send:
|
||||||
logging.info("Creating a transaction with outputs %s", str(payments_to_send))
|
log.info("Creating a transaction with outputs %s", str(payments_to_send))
|
||||||
return threads.deferToThread(do_send, payments_to_send)
|
return threads.deferToThread(do_send, payments_to_send)
|
||||||
logging.info("There were no payments to send")
|
log.info("There were no payments to send")
|
||||||
return defer.succeed(True)
|
return defer.succeed(True)
|
||||||
|
|
||||||
@_catch_connection_error
|
@_catch_connection_error
|
||||||
|
@ -439,7 +442,7 @@ class LBRYcrdAddressRequester(object):
|
||||||
|
|
||||||
def _request_failed(self, err, peer):
|
def _request_failed(self, err, peer):
|
||||||
if not err.check(RequestCanceledError):
|
if not err.check(RequestCanceledError):
|
||||||
logging.warning("A peer failed to send a valid public key response. Error: %s, peer: %s",
|
log.warning("A peer failed to send a valid public key response. Error: %s, peer: %s",
|
||||||
err.getErrorMessage(), str(peer))
|
err.getErrorMessage(), str(peer))
|
||||||
#return err
|
#return err
|
||||||
|
|
||||||
|
@ -490,7 +493,7 @@ class LBRYcrdAddressQueryHandler(object):
|
||||||
d.addCallback(create_response)
|
d.addCallback(create_response)
|
||||||
return d
|
return d
|
||||||
if self.address is None:
|
if self.address is None:
|
||||||
logging.warning("Expected a request for an address, but did not receive one")
|
log.warning("Expected a request for an address, but did not receive one")
|
||||||
return defer.fail(Failure(ValueError("Expected but did not receive an address request")))
|
return defer.fail(Failure(ValueError("Expected but did not receive an address request")))
|
||||||
else:
|
else:
|
||||||
return defer.succeed({})
|
return defer.succeed({})
|
|
@ -15,6 +15,9 @@ from twisted.python.failure import Failure
|
||||||
from lbrynet.core.LBRYcrdWallet import ReservedPoints
|
from lbrynet.core.LBRYcrdWallet import ReservedPoints
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class PTCWallet(object):
|
class PTCWallet(object):
|
||||||
"""This class sends payments to peers and also ensures that expected payments are received.
|
"""This class sends payments to peers and also ensures that expected payments are received.
|
||||||
This class is only intended to be used for testing."""
|
This class is only intended to be used for testing."""
|
||||||
|
@ -160,7 +163,7 @@ class PTCWallet(object):
|
||||||
ds.append(d)
|
ds.append(d)
|
||||||
del self.queued_payments[peer]
|
del self.queued_payments[peer]
|
||||||
else:
|
else:
|
||||||
logging.warning("Don't have a payment address for peer %s. Can't send %s points.",
|
log.warning("Don't have a payment address for peer %s. Can't send %s points.",
|
||||||
str(peer), str(points))
|
str(peer), str(points))
|
||||||
return defer.DeferredList(ds)
|
return defer.DeferredList(ds)
|
||||||
|
|
||||||
|
@ -207,7 +210,7 @@ class PTCWallet(object):
|
||||||
if self.peer_pub_keys[peer] in self.received_payments:
|
if self.peer_pub_keys[peer] in self.received_payments:
|
||||||
received_balance = sum([a[0] for a in self.received_payments[self.peer_pub_keys[peer]]])
|
received_balance = sum([a[0] for a in self.received_payments[self.peer_pub_keys[peer]]])
|
||||||
if min_expected_balance > received_balance:
|
if min_expected_balance > received_balance:
|
||||||
logging.warning("Account in bad standing: %s (pub_key: %s), expected amount = %s, received_amount = %s",
|
log.warning("Account in bad standing: %s (pub_key: %s), expected amount = %s, received_amount = %s",
|
||||||
str(peer), self.peer_pub_keys[peer], str(min_expected_balance), str(received_balance))
|
str(peer), self.peer_pub_keys[peer], str(min_expected_balance), str(received_balance))
|
||||||
|
|
||||||
def _open_db(self):
|
def _open_db(self):
|
||||||
|
@ -261,7 +264,7 @@ class PointTraderKeyExchanger(object):
|
||||||
|
|
||||||
def _request_failed(self, err, peer):
|
def _request_failed(self, err, peer):
|
||||||
if not err.check(RequestCanceledError):
|
if not err.check(RequestCanceledError):
|
||||||
logging.warning("A peer failed to send a valid public key response. Error: %s, peer: %s",
|
log.warning("A peer failed to send a valid public key response. Error: %s, peer: %s",
|
||||||
err.getErrorMessage(), str(peer))
|
err.getErrorMessage(), str(peer))
|
||||||
#return err
|
#return err
|
||||||
|
|
||||||
|
@ -306,15 +309,15 @@ class PointTraderKeyQueryHandler(object):
|
||||||
try:
|
try:
|
||||||
RSA.importKey(new_encoded_pub_key)
|
RSA.importKey(new_encoded_pub_key)
|
||||||
except (ValueError, TypeError, IndexError):
|
except (ValueError, TypeError, IndexError):
|
||||||
logging.warning("Client sent an invalid public key.")
|
log.warning("Client sent an invalid public key.")
|
||||||
return defer.fail(Failure(ValueError("Client sent an invalid public key")))
|
return defer.fail(Failure(ValueError("Client sent an invalid public key")))
|
||||||
self.public_key = new_encoded_pub_key
|
self.public_key = new_encoded_pub_key
|
||||||
self.wallet.set_public_key_for_peer(self.peer, self.public_key)
|
self.wallet.set_public_key_for_peer(self.peer, self.public_key)
|
||||||
logging.debug("Received the client's public key: %s", str(self.public_key))
|
log.debug("Received the client's public key: %s", str(self.public_key))
|
||||||
fields = {'public_key': self.wallet.encoded_public_key}
|
fields = {'public_key': self.wallet.encoded_public_key}
|
||||||
return defer.succeed(fields)
|
return defer.succeed(fields)
|
||||||
if self.public_key is None:
|
if self.public_key is None:
|
||||||
logging.warning("Expected a public key, but did not receive one")
|
log.warning("Expected a public key, but did not receive one")
|
||||||
return defer.fail(Failure(ValueError("Expected but did not receive a public key")))
|
return defer.fail(Failure(ValueError("Expected but did not receive a public key")))
|
||||||
else:
|
else:
|
||||||
return defer.succeed({})
|
return defer.succeed({})
|
|
@ -13,6 +13,9 @@ from lbrynet.core.PaymentRateManager import BasePaymentRateManager
|
||||||
from twisted.internet import threads, defer
|
from twisted.internet import threads, defer
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LBRYSession(object):
|
class LBRYSession(object):
|
||||||
"""This class manages all important services common to any application that uses the network:
|
"""This class manages all important services common to any application that uses the network:
|
||||||
the hash announcer, which informs other peers that this peer is associated with some hash. Usually,
|
the hash announcer, which informs other peers that this peer is associated with some hash. Usually,
|
||||||
|
@ -104,7 +107,7 @@ class LBRYSession(object):
|
||||||
def setup(self):
|
def setup(self):
|
||||||
"""Create the blob directory and database if necessary, start all desired services"""
|
"""Create the blob directory and database if necessary, start all desired services"""
|
||||||
|
|
||||||
logging.debug("Setting up the lbry session")
|
log.debug("Setting up the lbry session")
|
||||||
|
|
||||||
if self.lbryid is None:
|
if self.lbryid is None:
|
||||||
self.lbryid = generate_id()
|
self.lbryid = generate_id()
|
||||||
|
@ -124,7 +127,7 @@ class LBRYSession(object):
|
||||||
d.addCallback(lambda _: self._setup_dht())
|
d.addCallback(lambda _: self._setup_dht())
|
||||||
else:
|
else:
|
||||||
if self.hash_announcer is None and self.peer_port is not None:
|
if self.hash_announcer is None and self.peer_port is not None:
|
||||||
logging.warning("The server has no way to advertise its available blobs.")
|
log.warning("The server has no way to advertise its available blobs.")
|
||||||
self.hash_announcer = DummyHashAnnouncer()
|
self.hash_announcer = DummyHashAnnouncer()
|
||||||
|
|
||||||
d.addCallback(lambda _: self._setup_other_components())
|
d.addCallback(lambda _: self._setup_other_components())
|
||||||
|
@ -146,11 +149,11 @@ class LBRYSession(object):
|
||||||
|
|
||||||
def _try_upnp(self):
|
def _try_upnp(self):
|
||||||
|
|
||||||
logging.debug("In _try_upnp")
|
log.debug("In _try_upnp")
|
||||||
|
|
||||||
def threaded_try_upnp():
|
def threaded_try_upnp():
|
||||||
if self.use_upnp is False:
|
if self.use_upnp is False:
|
||||||
logging.debug("Not using upnp")
|
log.debug("Not using upnp")
|
||||||
return False
|
return False
|
||||||
u = miniupnpc.UPnP()
|
u = miniupnpc.UPnP()
|
||||||
num_devices_found = u.discover()
|
num_devices_found = u.discover()
|
||||||
|
@ -169,7 +172,7 @@ class LBRYSession(object):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def upnp_failed(err):
|
def upnp_failed(err):
|
||||||
logging.warning("UPnP failed. Reason: %s", err.getErrorMessage())
|
log.warning("UPnP failed. Reason: %s", err.getErrorMessage())
|
||||||
return False
|
return False
|
||||||
|
|
||||||
d = threads.deferToThread(threaded_try_upnp)
|
d = threads.deferToThread(threaded_try_upnp)
|
||||||
|
@ -180,7 +183,7 @@ class LBRYSession(object):
|
||||||
|
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
|
|
||||||
logging.debug("Starting the dht")
|
log.debug("Starting the dht")
|
||||||
|
|
||||||
def match_port(h, p):
|
def match_port(h, p):
|
||||||
return h, p
|
return h, p
|
||||||
|
@ -216,7 +219,7 @@ class LBRYSession(object):
|
||||||
return dl
|
return dl
|
||||||
|
|
||||||
def _setup_other_components(self):
|
def _setup_other_components(self):
|
||||||
logging.debug("Setting up the rest of the components")
|
log.debug("Setting up the rest of the components")
|
||||||
|
|
||||||
if self.rate_limiter is None:
|
if self.rate_limiter is None:
|
||||||
self.rate_limiter = RateLimiter()
|
self.rate_limiter = RateLimiter()
|
||||||
|
|
|
@ -3,6 +3,9 @@ from twisted.internet import interfaces, defer
|
||||||
from zope.interface import implements
|
from zope.interface import implements
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class StreamCreator(object):
|
class StreamCreator(object):
|
||||||
"""Classes which derive from this class create a 'stream', which can be any
|
"""Classes which derive from this class create a 'stream', which can be any
|
||||||
collection of associated blobs and associated metadata. These classes
|
collection of associated blobs and associated metadata. These classes
|
||||||
|
@ -42,7 +45,7 @@ class StreamCreator(object):
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
"""Stop creating the stream. Create the terminating zero-length blob."""
|
"""Stop creating the stream. Create the terminating zero-length blob."""
|
||||||
logging.debug("stop has been called for StreamCreator")
|
log.debug("stop has been called for StreamCreator")
|
||||||
self.stopped = True
|
self.stopped = True
|
||||||
if self.current_blob is not None:
|
if self.current_blob is not None:
|
||||||
current_blob = self.current_blob
|
current_blob = self.current_blob
|
||||||
|
|
|
@ -6,6 +6,9 @@ from lbrynet.core.client.StandaloneBlobDownloader import StandaloneBlobDownloade
|
||||||
from lbrynet.core.Error import UnknownStreamTypeError, InvalidStreamDescriptorError
|
from lbrynet.core.Error import UnknownStreamTypeError, InvalidStreamDescriptorError
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class StreamDescriptorReader(object):
|
class StreamDescriptorReader(object):
|
||||||
"""Classes which derive from this class read a stream descriptor file return
|
"""Classes which derive from this class read a stream descriptor file return
|
||||||
a dictionary containing the fields in the file"""
|
a dictionary containing the fields in the file"""
|
||||||
|
@ -82,7 +85,7 @@ class PlainStreamDescriptorWriter(StreamDescriptorWriter):
|
||||||
def _write_stream_descriptor(self, raw_data):
|
def _write_stream_descriptor(self, raw_data):
|
||||||
|
|
||||||
def write_file():
|
def write_file():
|
||||||
logging.debug("Writing the sd file to disk")
|
log.debug("Writing the sd file to disk")
|
||||||
with open(self.sd_file_name, 'w') as sd_file:
|
with open(self.sd_file_name, 'w') as sd_file:
|
||||||
sd_file.write(raw_data)
|
sd_file.write(raw_data)
|
||||||
return self.sd_file_name
|
return self.sd_file_name
|
||||||
|
@ -97,10 +100,10 @@ class BlobStreamDescriptorWriter(StreamDescriptorWriter):
|
||||||
self.blob_manager = blob_manager
|
self.blob_manager = blob_manager
|
||||||
|
|
||||||
def _write_stream_descriptor(self, raw_data):
|
def _write_stream_descriptor(self, raw_data):
|
||||||
logging.debug("Creating the new blob for the stream descriptor")
|
log.debug("Creating the new blob for the stream descriptor")
|
||||||
blob_creator = self.blob_manager.get_blob_creator()
|
blob_creator = self.blob_manager.get_blob_creator()
|
||||||
blob_creator.write(raw_data)
|
blob_creator.write(raw_data)
|
||||||
logging.debug("Wrote the data to the new blob")
|
log.debug("Wrote the data to the new blob")
|
||||||
return blob_creator.close()
|
return blob_creator.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,9 @@ from lbrynet.core.client.ClientRequest import ClientRequest, ClientBlobRequest
|
||||||
from lbrynet.interfaces import IRequestCreator
|
from lbrynet.interfaces import IRequestCreator
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BlobRequester(object):
|
class BlobRequester(object):
|
||||||
implements(IRequestCreator)
|
implements(IRequestCreator)
|
||||||
|
|
||||||
|
@ -85,7 +88,7 @@ class BlobRequester(object):
|
||||||
######### internal calls #########
|
######### internal calls #########
|
||||||
|
|
||||||
def _download_succeeded(self, arg, peer, blob):
|
def _download_succeeded(self, arg, peer, blob):
|
||||||
logging.info("Blob %s has been successfully downloaded from %s", str(blob), str(peer))
|
log.info("Blob %s has been successfully downloaded from %s", str(blob), str(peer))
|
||||||
self._update_local_score(peer, 5.0)
|
self._update_local_score(peer, 5.0)
|
||||||
peer.update_stats('blobs_downloaded', 1)
|
peer.update_stats('blobs_downloaded', 1)
|
||||||
peer.update_score(5.0)
|
peer.update_score(5.0)
|
||||||
|
@ -106,7 +109,7 @@ class BlobRequester(object):
|
||||||
|
|
||||||
def _handle_download_error(self, err, peer, blob_to_download):
|
def _handle_download_error(self, err, peer, blob_to_download):
|
||||||
if not err.check(DownloadCanceledError, PriceDisagreementError, RequestCanceledError):
|
if not err.check(DownloadCanceledError, PriceDisagreementError, RequestCanceledError):
|
||||||
logging.warning("An error occurred while downloading %s from %s. Error: %s",
|
log.warning("An error occurred while downloading %s from %s. Error: %s",
|
||||||
blob_to_download.blob_hash, str(peer), err.getTraceback())
|
blob_to_download.blob_hash, str(peer), err.getTraceback())
|
||||||
if err.check(PriceDisagreementError):
|
if err.check(PriceDisagreementError):
|
||||||
# Don't kill the whole connection just because a price couldn't be agreed upon.
|
# Don't kill the whole connection just because a price couldn't be agreed upon.
|
||||||
|
@ -124,7 +127,7 @@ class BlobRequester(object):
|
||||||
else:
|
else:
|
||||||
blob_hash = blobs_without_sources[0].blob_hash
|
blob_hash = blobs_without_sources[0].blob_hash
|
||||||
r = blob_hash
|
r = blob_hash
|
||||||
logging.debug("Blob requester peer search response: %s", str(r))
|
log.debug("Blob requester peer search response: %s", str(r))
|
||||||
return defer.succeed(r)
|
return defer.succeed(r)
|
||||||
|
|
||||||
def _find_peers_for_hash(self, h):
|
def _find_peers_for_hash(self, h):
|
||||||
|
@ -202,7 +205,7 @@ class BlobRequester(object):
|
||||||
request = ClientBlobRequest(request_dict, response_identifier, counting_write_func, d,
|
request = ClientBlobRequest(request_dict, response_identifier, counting_write_func, d,
|
||||||
cancel_func, blob_to_download)
|
cancel_func, blob_to_download)
|
||||||
|
|
||||||
logging.info("Requesting blob %s from %s", str(blob_to_download), str(peer))
|
log.info("Requesting blob %s from %s", str(blob_to_download), str(peer))
|
||||||
return request
|
return request
|
||||||
|
|
||||||
def _price_settled(self, protocol):
|
def _price_settled(self, protocol):
|
||||||
|
@ -239,11 +242,11 @@ class BlobRequester(object):
|
||||||
def _handle_availability(self, response_dict, peer, request):
|
def _handle_availability(self, response_dict, peer, request):
|
||||||
if not request.response_identifier in response_dict:
|
if not request.response_identifier in response_dict:
|
||||||
raise InvalidResponseError("response identifier not in response")
|
raise InvalidResponseError("response identifier not in response")
|
||||||
logging.debug("Received a response to the availability request")
|
log.debug("Received a response to the availability request")
|
||||||
blob_hashes = response_dict[request.response_identifier]
|
blob_hashes = response_dict[request.response_identifier]
|
||||||
for blob_hash in blob_hashes:
|
for blob_hash in blob_hashes:
|
||||||
if blob_hash in request.request_dict['requested_blobs']:
|
if blob_hash in request.request_dict['requested_blobs']:
|
||||||
logging.debug("The server has indicated it has the following blob available: %s", blob_hash)
|
log.debug("The server has indicated it has the following blob available: %s", blob_hash)
|
||||||
self._available_blobs[peer].append(blob_hash)
|
self._available_blobs[peer].append(blob_hash)
|
||||||
if blob_hash in self._unavailable_blobs[peer]:
|
if blob_hash in self._unavailable_blobs[peer]:
|
||||||
self._unavailable_blobs[peer].remove(blob_hash)
|
self._unavailable_blobs[peer].remove(blob_hash)
|
||||||
|
@ -298,7 +301,7 @@ class BlobRequester(object):
|
||||||
if reason.check(NoResponseError):
|
if reason.check(NoResponseError):
|
||||||
self._incompatible_peers.append(peer)
|
self._incompatible_peers.append(peer)
|
||||||
return
|
return
|
||||||
logging.warning("Blob requester: a request of type '%s' failed. Reason: %s, Error type: %s",
|
log.warning("Blob requester: a request of type '%s' failed. Reason: %s, Error type: %s",
|
||||||
str(request_type), reason.getErrorMessage(), reason.type)
|
str(request_type), reason.getErrorMessage(), reason.type)
|
||||||
self._update_local_score(peer, -10.0)
|
self._update_local_score(peer, -10.0)
|
||||||
if isinstance(reason, InvalidResponseError):
|
if isinstance(reason, InvalidResponseError):
|
||||||
|
|
|
@ -11,6 +11,9 @@ from lbrynet.interfaces import IRequestSender, IRateLimited
|
||||||
from zope.interface import implements
|
from zope.interface import implements
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ClientProtocol(Protocol):
|
class ClientProtocol(Protocol):
|
||||||
implements(IRequestSender, IRateLimited)
|
implements(IRequestSender, IRateLimited)
|
||||||
|
|
||||||
|
@ -39,7 +42,7 @@ class ClientProtocol(Protocol):
|
||||||
else:
|
else:
|
||||||
self._response_buff += data
|
self._response_buff += data
|
||||||
if len(self._response_buff) > MAX_RESPONSE_SIZE:
|
if len(self._response_buff) > MAX_RESPONSE_SIZE:
|
||||||
logging.warning("Response is too large. Size %s", len(self._response_buff))
|
log.warning("Response is too large. Size %s", len(self._response_buff))
|
||||||
self.transport.loseConnection()
|
self.transport.loseConnection()
|
||||||
response, extra_data = self._get_valid_response(self._response_buff)
|
response, extra_data = self._get_valid_response(self._response_buff)
|
||||||
if response is not None:
|
if response is not None:
|
||||||
|
@ -55,7 +58,7 @@ class ClientProtocol(Protocol):
|
||||||
else:
|
else:
|
||||||
err = reason
|
err = reason
|
||||||
#if self._response_deferreds:
|
#if self._response_deferreds:
|
||||||
# logging.warning("Lost connection with active response deferreds. %s", str(self._response_deferreds))
|
# log.warning("Lost connection with active response deferreds. %s", str(self._response_deferreds))
|
||||||
for key, d in self._response_deferreds.items():
|
for key, d in self._response_deferreds.items():
|
||||||
del self._response_deferreds[key]
|
del self._response_deferreds[key]
|
||||||
d.errback(err)
|
d.errback(err)
|
||||||
|
@ -70,7 +73,7 @@ class ClientProtocol(Protocol):
|
||||||
return defer.fail(failure.Failure(ValueError("There is already a request for that response active")))
|
return defer.fail(failure.Failure(ValueError("There is already a request for that response active")))
|
||||||
self._next_request.update(request.request_dict)
|
self._next_request.update(request.request_dict)
|
||||||
d = defer.Deferred()
|
d = defer.Deferred()
|
||||||
logging.debug("Adding a request. Request: %s", str(request))
|
log.debug("Adding a request. Request: %s", str(request))
|
||||||
self._response_deferreds[request.response_identifier] = d
|
self._response_deferreds[request.response_identifier] = d
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
@ -102,7 +105,7 @@ class ClientProtocol(Protocol):
|
||||||
######### Internal request handling #########
|
######### Internal request handling #########
|
||||||
|
|
||||||
def _handle_request_error(self, err):
|
def _handle_request_error(self, err):
|
||||||
logging.error("An unexpected error occurred creating or sending a request to %s. Error message: %s",
|
log.error("An unexpected error occurred creating or sending a request to %s. Error message: %s",
|
||||||
str(self.peer), err.getTraceback())
|
str(self.peer), err.getTraceback())
|
||||||
self.transport.loseConnection()
|
self.transport.loseConnection()
|
||||||
|
|
||||||
|
@ -117,7 +120,7 @@ class ClientProtocol(Protocol):
|
||||||
self._send_request_message(request_msg)
|
self._send_request_message(request_msg)
|
||||||
else:
|
else:
|
||||||
# The connection manager has indicated that this connection should be terminated
|
# The connection manager has indicated that this connection should be terminated
|
||||||
logging.info("Closing the connection to %s due to having no further requests to send", str(self.peer))
|
log.info("Closing the connection to %s due to having no further requests to send", str(self.peer))
|
||||||
self.transport.loseConnection()
|
self.transport.loseConnection()
|
||||||
|
|
||||||
d = self._connection_manager.get_next_request(self.peer, self)
|
d = self._connection_manager.get_next_request(self.peer, self)
|
||||||
|
@ -153,14 +156,14 @@ class ClientProtocol(Protocol):
|
||||||
# If an error gets to this point, log it and kill the connection.
|
# If an error gets to this point, log it and kill the connection.
|
||||||
if not err.check(MisbehavingPeerError, ConnectionClosedBeforeResponseError, DownloadCanceledError,
|
if not err.check(MisbehavingPeerError, ConnectionClosedBeforeResponseError, DownloadCanceledError,
|
||||||
RequestCanceledError):
|
RequestCanceledError):
|
||||||
logging.error("The connection to %s is closing due to an unexpected error: %s", str(self.peer),
|
log.error("The connection to %s is closing due to an unexpected error: %s", str(self.peer),
|
||||||
err.getErrorMessage())
|
err.getErrorMessage())
|
||||||
if not err.check(RequestCanceledError):
|
if not err.check(RequestCanceledError):
|
||||||
self.transport.loseConnection()
|
self.transport.loseConnection()
|
||||||
|
|
||||||
def _handle_response(self, response):
|
def _handle_response(self, response):
|
||||||
ds = []
|
ds = []
|
||||||
logging.debug("Handling a response. Current expected responses: %s", str(self._response_deferreds))
|
log.debug("Handling a response. Current expected responses: %s", str(self._response_deferreds))
|
||||||
for key, val in response.items():
|
for key, val in response.items():
|
||||||
if key in self._response_deferreds:
|
if key in self._response_deferreds:
|
||||||
d = self._response_deferreds[key]
|
d = self._response_deferreds[key]
|
||||||
|
@ -183,7 +186,7 @@ class ClientProtocol(Protocol):
|
||||||
dl.addCallback(lambda _: self._ask_for_request())
|
dl.addCallback(lambda _: self._ask_for_request())
|
||||||
|
|
||||||
def _downloading_finished(self, arg):
|
def _downloading_finished(self, arg):
|
||||||
logging.debug("The blob has finished downloading")
|
log.debug("The blob has finished downloading")
|
||||||
self._blob_download_request = None
|
self._blob_download_request = None
|
||||||
self._downloading_blob = False
|
self._downloading_blob = False
|
||||||
return arg
|
return arg
|
||||||
|
@ -194,7 +197,7 @@ class ClientProtocol(Protocol):
|
||||||
# TODO: always be this way. it's done this way now because the client has no other way
|
# TODO: always be this way. it's done this way now because the client has no other way
|
||||||
# TODO: of telling the server it wants the download to stop. It would be great if the
|
# TODO: of telling the server it wants the download to stop. It would be great if the
|
||||||
# TODO: protocol had such a mechanism.
|
# TODO: protocol had such a mechanism.
|
||||||
logging.info("Closing the connection to %s because the download of blob %s was canceled",
|
log.info("Closing the connection to %s because the download of blob %s was canceled",
|
||||||
str(self.peer), str(self._blob_download_request.blob))
|
str(self.peer), str(self._blob_download_request.blob))
|
||||||
#self.transport.loseConnection()
|
#self.transport.loseConnection()
|
||||||
#return True
|
#return True
|
||||||
|
|
|
@ -7,6 +7,9 @@ from lbrynet.core.client.ClientProtocol import ClientProtocolFactory
|
||||||
from lbrynet.core.Error import InsufficientFundsError
|
from lbrynet.core.Error import InsufficientFundsError
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class PeerConnectionHandler(object):
|
class PeerConnectionHandler(object):
|
||||||
def __init__(self, request_creators, factory):
|
def __init__(self, request_creators, factory):
|
||||||
self.request_creators = request_creators
|
self.request_creators = request_creators
|
||||||
|
@ -42,7 +45,7 @@ class ConnectionManager(object):
|
||||||
for peer in self._peer_connections.keys():
|
for peer in self._peer_connections.keys():
|
||||||
|
|
||||||
def close_connection(p):
|
def close_connection(p):
|
||||||
logging.info("Abruptly closing a connection to %s due to downloading being paused",
|
log.info("Abruptly closing a connection to %s due to downloading being paused",
|
||||||
str(p))
|
str(p))
|
||||||
|
|
||||||
if self._peer_connections[p].factory.p is not None:
|
if self._peer_connections[p].factory.p is not None:
|
||||||
|
@ -66,10 +69,10 @@ class ConnectionManager(object):
|
||||||
|
|
||||||
def get_next_request(self, peer, protocol):
|
def get_next_request(self, peer, protocol):
|
||||||
|
|
||||||
logging.debug("Trying to get the next request for peer %s", str(peer))
|
log.debug("Trying to get the next request for peer %s", str(peer))
|
||||||
|
|
||||||
if not peer in self._peer_connections:
|
if not peer in self._peer_connections:
|
||||||
logging.debug("The peer has already been told to shut down.")
|
log.debug("The peer has already been told to shut down.")
|
||||||
return defer.succeed(False)
|
return defer.succeed(False)
|
||||||
|
|
||||||
def handle_error(err):
|
def handle_error(err):
|
||||||
|
@ -140,7 +143,7 @@ class ConnectionManager(object):
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
|
|
||||||
if peer is not None:
|
if peer is not None:
|
||||||
logging.debug("Trying to connect to %s", str(peer))
|
log.debug("Trying to connect to %s", str(peer))
|
||||||
factory = ClientProtocolFactory(peer, self.rate_limiter, self)
|
factory = ClientProtocolFactory(peer, self.rate_limiter, self)
|
||||||
self._peer_connections[peer] = PeerConnectionHandler(self._primary_request_creators[:],
|
self._peer_connections[peer] = PeerConnectionHandler(self._primary_request_creators[:],
|
||||||
factory)
|
factory)
|
||||||
|
@ -152,9 +155,9 @@ class ConnectionManager(object):
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
|
|
||||||
def get_new_peers(request_creators):
|
def get_new_peers(request_creators):
|
||||||
logging.debug("Trying to get a new peer to connect to")
|
log.debug("Trying to get a new peer to connect to")
|
||||||
if len(request_creators) > 0:
|
if len(request_creators) > 0:
|
||||||
logging.debug("Got a creator to check: %s", str(request_creators[0]))
|
log.debug("Got a creator to check: %s", str(request_creators[0]))
|
||||||
d = request_creators[0].get_new_peers()
|
d = request_creators[0].get_new_peers()
|
||||||
d.addCallback(lambda h: h if h is not None else get_new_peers(request_creators[1:]))
|
d.addCallback(lambda h: h if h is not None else get_new_peers(request_creators[1:]))
|
||||||
return d
|
return d
|
||||||
|
@ -164,14 +167,14 @@ class ConnectionManager(object):
|
||||||
def pick_best_peer(peers):
|
def pick_best_peer(peers):
|
||||||
# TODO: Eventually rank them based on past performance/reputation. For now
|
# TODO: Eventually rank them based on past performance/reputation. For now
|
||||||
# TODO: just pick the first to which we don't have an open connection
|
# TODO: just pick the first to which we don't have an open connection
|
||||||
logging.debug("Got a list of peers to choose from: %s", str(peers))
|
log.debug("Got a list of peers to choose from: %s", str(peers))
|
||||||
if peers is None:
|
if peers is None:
|
||||||
return None
|
return None
|
||||||
for peer in peers:
|
for peer in peers:
|
||||||
if not peer in self._peer_connections:
|
if not peer in self._peer_connections:
|
||||||
logging.debug("Got a good peer. Returning peer %s", str(peer))
|
log.debug("Got a good peer. Returning peer %s", str(peer))
|
||||||
return peer
|
return peer
|
||||||
logging.debug("Couldn't find a good peer to connect to")
|
log.debug("Couldn't find a good peer to connect to")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if len(self._peer_connections) < MAX_CONNECTIONS_PER_STREAM:
|
if len(self._peer_connections) < MAX_CONNECTIONS_PER_STREAM:
|
||||||
|
|
|
@ -5,6 +5,9 @@ from zope.interface import implements
|
||||||
from lbrynet import interfaces
|
from lbrynet import interfaces
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DownloadManager(object):
|
class DownloadManager(object):
|
||||||
implements(interfaces.IDownloadManager)
|
implements(interfaces.IDownloadManager)
|
||||||
|
|
||||||
|
@ -24,7 +27,7 @@ class DownloadManager(object):
|
||||||
|
|
||||||
def start_downloading(self):
|
def start_downloading(self):
|
||||||
d = self.blob_info_finder.get_initial_blobs()
|
d = self.blob_info_finder.get_initial_blobs()
|
||||||
logging.debug("Requested the initial blobs from the info finder")
|
log.debug("Requested the initial blobs from the info finder")
|
||||||
d.addCallback(self.add_blobs_to_download)
|
d.addCallback(self.add_blobs_to_download)
|
||||||
d.addCallback(lambda _: self.resume_downloading())
|
d.addCallback(lambda _: self.resume_downloading())
|
||||||
return d
|
return d
|
||||||
|
@ -33,7 +36,7 @@ class DownloadManager(object):
|
||||||
|
|
||||||
def check_start(result, manager):
|
def check_start(result, manager):
|
||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
logging.error("Failed to start the %s: %s", manager, result.getErrorMessage())
|
log.error("Failed to start the %s: %s", manager, result.getErrorMessage())
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -49,7 +52,7 @@ class DownloadManager(object):
|
||||||
|
|
||||||
def check_stop(result, manager):
|
def check_stop(result, manager):
|
||||||
if isinstance(result, failure.Failure):
|
if isinstance(result, failure.Failure):
|
||||||
logging.error("Failed to stop the %s: %s", manager. result.getErrorMessage())
|
log.error("Failed to stop the %s: %s", manager. result.getErrorMessage())
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -63,21 +66,21 @@ class DownloadManager(object):
|
||||||
|
|
||||||
def add_blobs_to_download(self, blob_infos):
|
def add_blobs_to_download(self, blob_infos):
|
||||||
|
|
||||||
logging.debug("Adding %s to blobs", str(blob_infos))
|
log.debug("Adding %s to blobs", str(blob_infos))
|
||||||
|
|
||||||
def add_blob_to_list(blob, blob_num):
|
def add_blob_to_list(blob, blob_num):
|
||||||
self.blobs[blob_num] = blob
|
self.blobs[blob_num] = blob
|
||||||
logging.info("Added blob (hash: %s, number %s) to the list", str(blob.blob_hash), str(blob_num))
|
log.info("Added blob (hash: %s, number %s) to the list", str(blob.blob_hash), str(blob_num))
|
||||||
|
|
||||||
def error_during_add(err):
|
def error_during_add(err):
|
||||||
logging.warning("An error occurred adding the blob to blobs. Error:%s", err.getErrorMessage())
|
log.warning("An error occurred adding the blob to blobs. Error:%s", err.getErrorMessage())
|
||||||
return err
|
return err
|
||||||
|
|
||||||
ds = []
|
ds = []
|
||||||
for blob_info in blob_infos:
|
for blob_info in blob_infos:
|
||||||
if not blob_info.blob_num in self.blobs:
|
if not blob_info.blob_num in self.blobs:
|
||||||
self.blob_infos[blob_info.blob_num] = blob_info
|
self.blob_infos[blob_info.blob_num] = blob_info
|
||||||
logging.debug("Trying to get the blob associated with blob hash %s", str(blob_info.blob_hash))
|
log.debug("Trying to get the blob associated with blob hash %s", str(blob_info.blob_hash))
|
||||||
d = self.blob_manager.get_blob(blob_info.blob_hash, self.upload_allowed, blob_info.length)
|
d = self.blob_manager.get_blob(blob_info.blob_hash, self.upload_allowed, blob_info.length)
|
||||||
d.addCallback(add_blob_to_list, blob_info.blob_num)
|
d.addCallback(add_blob_to_list, blob_info.blob_num)
|
||||||
d.addErrback(error_during_add)
|
d.addErrback(error_during_add)
|
||||||
|
|
|
@ -8,6 +8,9 @@ from lbrynet.core.client.DownloadManager import DownloadManager
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class SingleBlobMetadataHandler(object):
|
class SingleBlobMetadataHandler(object):
|
||||||
implements(interfaces.IMetadataHandler)
|
implements(interfaces.IMetadataHandler)
|
||||||
|
|
||||||
|
@ -18,7 +21,7 @@ class SingleBlobMetadataHandler(object):
|
||||||
######## IMetadataHandler #########
|
######## IMetadataHandler #########
|
||||||
|
|
||||||
def get_initial_blobs(self):
|
def get_initial_blobs(self):
|
||||||
logging.debug("Returning the blob info")
|
log.debug("Returning the blob info")
|
||||||
return defer.succeed([BlobInfo(self.blob_hash, 0, None)])
|
return defer.succeed([BlobInfo(self.blob_hash, 0, None)])
|
||||||
|
|
||||||
def final_blob_num(self):
|
def final_blob_num(self):
|
||||||
|
@ -72,7 +75,7 @@ class SingleProgressManager(object):
|
||||||
|
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
|
|
||||||
logging.debug("The blob %s has been downloaded. Calling the finished callback", str(blob))
|
log.debug("The blob %s has been downloaded. Calling the finished callback", str(blob))
|
||||||
if self.finished is False:
|
if self.finished is False:
|
||||||
self.finished = True
|
self.finished = True
|
||||||
reactor.callLater(0, self.finished_callback, blob)
|
reactor.callLater(0, self.finished_callback, blob)
|
||||||
|
|
|
@ -4,6 +4,9 @@ from twisted.internet import defer
|
||||||
from zope.interface import implements
|
from zope.interface import implements
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class StreamProgressManager(object):
|
class StreamProgressManager(object):
|
||||||
implements(IProgressManager)
|
implements(IProgressManager)
|
||||||
|
|
||||||
|
@ -61,18 +64,18 @@ class StreamProgressManager(object):
|
||||||
return defer.succeed(None)
|
return defer.succeed(None)
|
||||||
|
|
||||||
def _finished_with_blob(self, blob_num):
|
def _finished_with_blob(self, blob_num):
|
||||||
logging.debug("In _finished_with_blob, blob_num = %s", str(blob_num))
|
log.debug("In _finished_with_blob, blob_num = %s", str(blob_num))
|
||||||
if self.delete_blob_after_finished is True:
|
if self.delete_blob_after_finished is True:
|
||||||
logging.debug("delete_blob_after_finished is True")
|
log.debug("delete_blob_after_finished is True")
|
||||||
blobs = self.download_manager.blobs
|
blobs = self.download_manager.blobs
|
||||||
if blob_num in blobs:
|
if blob_num in blobs:
|
||||||
logging.debug("Telling the blob manager, %s, to delete blob %s", str(self.blob_manager),
|
log.debug("Telling the blob manager, %s, to delete blob %s", str(self.blob_manager),
|
||||||
blobs[blob_num].blob_hash)
|
blobs[blob_num].blob_hash)
|
||||||
self.blob_manager.delete_blobs([blobs[blob_num].blob_hash])
|
self.blob_manager.delete_blobs([blobs[blob_num].blob_hash])
|
||||||
else:
|
else:
|
||||||
logging.debug("Blob number %s was not in blobs", str(blob_num))
|
log.debug("Blob number %s was not in blobs", str(blob_num))
|
||||||
else:
|
else:
|
||||||
logging.debug("delete_blob_after_finished is False")
|
log.debug("delete_blob_after_finished is False")
|
||||||
|
|
||||||
|
|
||||||
class FullStreamProgressManager(StreamProgressManager):
|
class FullStreamProgressManager(StreamProgressManager):
|
||||||
|
@ -126,14 +129,14 @@ class FullStreamProgressManager(StreamProgressManager):
|
||||||
current_blob_num = self.last_blob_outputted + 1
|
current_blob_num = self.last_blob_outputted + 1
|
||||||
|
|
||||||
if current_blob_num in blobs and blobs[current_blob_num].is_validated():
|
if current_blob_num in blobs and blobs[current_blob_num].is_validated():
|
||||||
logging.info("Outputting blob %s", str(self.last_blob_outputted + 1))
|
log.info("Outputting blob %s", str(self.last_blob_outputted + 1))
|
||||||
self.provided_blob_nums.append(self.last_blob_outputted + 1)
|
self.provided_blob_nums.append(self.last_blob_outputted + 1)
|
||||||
d = self.download_manager.handle_blob(self.last_blob_outputted + 1)
|
d = self.download_manager.handle_blob(self.last_blob_outputted + 1)
|
||||||
d.addCallback(lambda _: finished_outputting_blob())
|
d.addCallback(lambda _: finished_outputting_blob())
|
||||||
d.addCallback(lambda _: self._finished_with_blob(current_blob_num))
|
d.addCallback(lambda _: self._finished_with_blob(current_blob_num))
|
||||||
|
|
||||||
def log_error(err):
|
def log_error(err):
|
||||||
logging.warning("Error occurred in the output loop. Error: %s", err.getErrorMessage())
|
log.warning("Error occurred in the output loop. Error: %s", err.getErrorMessage())
|
||||||
|
|
||||||
d.addErrback(log_error)
|
d.addErrback(log_error)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -4,6 +4,9 @@ from zope.interface import implements
|
||||||
from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler
|
from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BlobAvailabilityHandlerFactory(object):
|
class BlobAvailabilityHandlerFactory(object):
|
||||||
implements(IQueryHandlerFactory)
|
implements(IQueryHandlerFactory)
|
||||||
|
|
||||||
|
@ -37,7 +40,7 @@ class BlobAvailabilityHandler(object):
|
||||||
|
|
||||||
def handle_queries(self, queries):
|
def handle_queries(self, queries):
|
||||||
if self.query_identifiers[0] in queries:
|
if self.query_identifiers[0] in queries:
|
||||||
logging.debug("Received the client's list of requested blobs")
|
log.debug("Received the client's list of requested blobs")
|
||||||
d = self._get_available_blobs(queries[self.query_identifiers[0]])
|
d = self._get_available_blobs(queries[self.query_identifiers[0]])
|
||||||
|
|
||||||
def set_field(available_blobs):
|
def set_field(available_blobs):
|
||||||
|
|
|
@ -6,6 +6,9 @@ from zope.interface import implements
|
||||||
from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler, IBlobSender
|
from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler, IBlobSender
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BlobRequestHandlerFactory(object):
|
class BlobRequestHandlerFactory(object):
|
||||||
implements(IQueryHandlerFactory)
|
implements(IQueryHandlerFactory)
|
||||||
|
|
||||||
|
@ -58,7 +61,7 @@ class BlobRequestHandler(object):
|
||||||
response['blob_data_payment_rate'] = 'RATE_ACCEPTED'
|
response['blob_data_payment_rate'] = 'RATE_ACCEPTED'
|
||||||
|
|
||||||
if self.query_identifiers[1] in queries:
|
if self.query_identifiers[1] in queries:
|
||||||
logging.debug("Received the client's request to send a blob")
|
log.debug("Received the client's request to send a blob")
|
||||||
response_fields = {}
|
response_fields = {}
|
||||||
response['incoming_blob'] = response_fields
|
response['incoming_blob'] = response_fields
|
||||||
|
|
||||||
|
@ -75,11 +78,11 @@ class BlobRequestHandler(object):
|
||||||
if read_handle is not None:
|
if read_handle is not None:
|
||||||
self.currently_uploading = blob
|
self.currently_uploading = blob
|
||||||
self.read_handle = read_handle
|
self.read_handle = read_handle
|
||||||
logging.debug("Sending %s to client", str(blob))
|
log.debug("Sending %s to client", str(blob))
|
||||||
response_fields['blob_hash'] = blob.blob_hash
|
response_fields['blob_hash'] = blob.blob_hash
|
||||||
response_fields['length'] = blob.length
|
response_fields['length'] = blob.length
|
||||||
return response
|
return response
|
||||||
logging.debug("We can not send %s", str(blob))
|
log.debug("We can not send %s", str(blob))
|
||||||
response_fields['error'] = "BLOB_UNAVAILABLE"
|
response_fields['error'] = "BLOB_UNAVAILABLE"
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
@ -129,13 +132,13 @@ class BlobRequestHandler(object):
|
||||||
|
|
||||||
def start_transfer():
|
def start_transfer():
|
||||||
self.file_sender = FileSender()
|
self.file_sender = FileSender()
|
||||||
logging.info("Starting the file upload")
|
log.info("Starting the file upload")
|
||||||
assert self.read_handle is not None, "self.read_handle was None when trying to start the transfer"
|
assert self.read_handle is not None, "self.read_handle was None when trying to start the transfer"
|
||||||
d = self.file_sender.beginFileTransfer(self.read_handle, consumer, count_bytes)
|
d = self.file_sender.beginFileTransfer(self.read_handle, consumer, count_bytes)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def set_expected_payment():
|
def set_expected_payment():
|
||||||
logging.info("Setting expected payment")
|
log.info("Setting expected payment")
|
||||||
if self.blob_bytes_uploaded != 0 and self.blob_data_payment_rate is not None:
|
if self.blob_bytes_uploaded != 0 and self.blob_data_payment_rate is not None:
|
||||||
self.wallet.add_expected_payment(self.peer,
|
self.wallet.add_expected_payment(self.peer,
|
||||||
self.currently_uploading.length * 1.0 *
|
self.currently_uploading.length * 1.0 *
|
||||||
|
@ -151,6 +154,6 @@ class BlobRequestHandler(object):
|
||||||
self.currently_uploading = None
|
self.currently_uploading = None
|
||||||
self.file_sender = None
|
self.file_sender = None
|
||||||
if reason is not None and isinstance(reason, Failure):
|
if reason is not None and isinstance(reason, Failure):
|
||||||
logging.warning("Upload has failed. Reason: %s", reason.getErrorMessage())
|
log.warning("Upload has failed. Reason: %s", reason.getErrorMessage())
|
||||||
|
|
||||||
return _send_file()
|
return _send_file()
|
|
@ -6,6 +6,9 @@ from zope.interface import implements
|
||||||
from lbrynet.core.server.ServerRequestHandler import ServerRequestHandler
|
from lbrynet.core.server.ServerRequestHandler import ServerRequestHandler
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ServerProtocol(Protocol):
|
class ServerProtocol(Protocol):
|
||||||
"""ServerProtocol needs to:
|
"""ServerProtocol needs to:
|
||||||
|
|
||||||
|
@ -26,7 +29,7 @@ class ServerProtocol(Protocol):
|
||||||
#Protocol stuff
|
#Protocol stuff
|
||||||
|
|
||||||
def connectionMade(self):
|
def connectionMade(self):
|
||||||
logging.debug("Got a connection")
|
log.debug("Got a connection")
|
||||||
peer_info = self.transport.getPeer()
|
peer_info = self.transport.getPeer()
|
||||||
self.peer = self.factory.peer_manager.get_peer(peer_info.host, peer_info.port)
|
self.peer = self.factory.peer_manager.get_peer(peer_info.host, peer_info.port)
|
||||||
self.request_handler = ServerRequestHandler(self)
|
self.request_handler = ServerRequestHandler(self)
|
||||||
|
@ -34,7 +37,7 @@ class ServerProtocol(Protocol):
|
||||||
if enabled is True:
|
if enabled is True:
|
||||||
query_handler = query_handler_factory.build_query_handler()
|
query_handler = query_handler_factory.build_query_handler()
|
||||||
query_handler.register_with_request_handler(self.request_handler, self.peer)
|
query_handler.register_with_request_handler(self.request_handler, self.peer)
|
||||||
logging.debug("Setting the request handler")
|
log.debug("Setting the request handler")
|
||||||
self.factory.rate_limiter.register_protocol(self)
|
self.factory.rate_limiter.register_protocol(self)
|
||||||
|
|
||||||
def connectionLost(self, reason=failure.Failure(error.ConnectionDone())):
|
def connectionLost(self, reason=failure.Failure(error.ConnectionDone())):
|
||||||
|
@ -42,10 +45,10 @@ class ServerProtocol(Protocol):
|
||||||
self.request_handler.stopProducing()
|
self.request_handler.stopProducing()
|
||||||
self.factory.rate_limiter.unregister_protocol(self)
|
self.factory.rate_limiter.unregister_protocol(self)
|
||||||
if not reason.check(error.ConnectionDone):
|
if not reason.check(error.ConnectionDone):
|
||||||
logging.warning("Closing a connection. Reason: %s", reason.getErrorMessage())
|
log.warning("Closing a connection. Reason: %s", reason.getErrorMessage())
|
||||||
|
|
||||||
def dataReceived(self, data):
|
def dataReceived(self, data):
|
||||||
logging.debug("Receiving %s bytes of data from the transport", str(len(data)))
|
log.debug("Receiving %s bytes of data from the transport", str(len(data)))
|
||||||
self.factory.rate_limiter.report_dl_bytes(len(data))
|
self.factory.rate_limiter.report_dl_bytes(len(data))
|
||||||
if self.request_handler is not None:
|
if self.request_handler is not None:
|
||||||
self.request_handler.data_received(data)
|
self.request_handler.data_received(data)
|
||||||
|
@ -53,7 +56,7 @@ class ServerProtocol(Protocol):
|
||||||
#IConsumer stuff
|
#IConsumer stuff
|
||||||
|
|
||||||
def registerProducer(self, producer, streaming):
|
def registerProducer(self, producer, streaming):
|
||||||
logging.debug("Registering the producer")
|
log.debug("Registering the producer")
|
||||||
assert streaming is True
|
assert streaming is True
|
||||||
|
|
||||||
def unregisterProducer(self):
|
def unregisterProducer(self):
|
||||||
|
@ -61,7 +64,7 @@ class ServerProtocol(Protocol):
|
||||||
self.transport.loseConnection()
|
self.transport.loseConnection()
|
||||||
|
|
||||||
def write(self, data):
|
def write(self, data):
|
||||||
logging.debug("Writing %s bytes of data to the transport", str(len(data)))
|
log.debug("Writing %s bytes of data to the transport", str(len(data)))
|
||||||
self.transport.write(data)
|
self.transport.write(data)
|
||||||
self.factory.rate_limiter.report_ul_bytes(len(data))
|
self.factory.rate_limiter.report_ul_bytes(len(data))
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,9 @@ from zope.interface import implements
|
||||||
from lbrynet.interfaces import IRequestHandler
|
from lbrynet.interfaces import IRequestHandler
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ServerRequestHandler(object):
|
class ServerRequestHandler(object):
|
||||||
"""This class handles requests from clients. It can upload blobs and return request for information about
|
"""This class handles requests from clients. It can upload blobs and return request for information about
|
||||||
more blobs that are associated with streams"""
|
more blobs that are associated with streams"""
|
||||||
|
@ -52,7 +55,7 @@ class ServerRequestHandler(object):
|
||||||
chunk = self.response_buff[:self.CHUNK_SIZE]
|
chunk = self.response_buff[:self.CHUNK_SIZE]
|
||||||
self.response_buff = self.response_buff[self.CHUNK_SIZE:]
|
self.response_buff = self.response_buff[self.CHUNK_SIZE:]
|
||||||
if chunk != '':
|
if chunk != '':
|
||||||
logging.debug("writing %s bytes to the client", str(len(chunk)))
|
log.debug("writing %s bytes to the client", str(len(chunk)))
|
||||||
self.consumer.write(chunk)
|
self.consumer.write(chunk)
|
||||||
reactor.callLater(0, self._produce_more)
|
reactor.callLater(0, self._produce_more)
|
||||||
|
|
||||||
|
@ -76,7 +79,7 @@ class ServerRequestHandler(object):
|
||||||
|
|
||||||
def get_more_data():
|
def get_more_data():
|
||||||
if self.producer is not None:
|
if self.producer is not None:
|
||||||
logging.debug("Requesting more data from the producer")
|
log.debug("Requesting more data from the producer")
|
||||||
self.producer.resumeProducing()
|
self.producer.resumeProducing()
|
||||||
|
|
||||||
reactor.callLater(0, get_more_data)
|
reactor.callLater(0, get_more_data)
|
||||||
|
@ -84,8 +87,8 @@ class ServerRequestHandler(object):
|
||||||
#From Protocol
|
#From Protocol
|
||||||
|
|
||||||
def data_received(self, data):
|
def data_received(self, data):
|
||||||
logging.debug("Received data")
|
log.debug("Received data")
|
||||||
logging.debug("%s", str(data))
|
log.debug("%s", str(data))
|
||||||
if self.request_received is False:
|
if self.request_received is False:
|
||||||
self.request_buff = self.request_buff + data
|
self.request_buff = self.request_buff + data
|
||||||
msg = self.try_to_parse_request(self.request_buff)
|
msg = self.try_to_parse_request(self.request_buff)
|
||||||
|
@ -96,10 +99,10 @@ class ServerRequestHandler(object):
|
||||||
d.addCallback(lambda _: self.blob_sender.send_blob_if_requested(self))
|
d.addCallback(lambda _: self.blob_sender.send_blob_if_requested(self))
|
||||||
d.addCallbacks(lambda _: self.finished_response(), self.request_failure_handler)
|
d.addCallbacks(lambda _: self.finished_response(), self.request_failure_handler)
|
||||||
else:
|
else:
|
||||||
logging.info("Request buff not a valid json message")
|
log.info("Request buff not a valid json message")
|
||||||
logging.info("Request buff: %s", str(self.request_buff))
|
log.info("Request buff: %s", str(self.request_buff))
|
||||||
else:
|
else:
|
||||||
logging.warning("The client sent data when we were uploading a file. This should not happen")
|
log.warning("The client sent data when we were uploading a file. This should not happen")
|
||||||
|
|
||||||
######### IRequestHandler #########
|
######### IRequestHandler #########
|
||||||
|
|
||||||
|
@ -112,7 +115,7 @@ class ServerRequestHandler(object):
|
||||||
#response handling
|
#response handling
|
||||||
|
|
||||||
def request_failure_handler(self, err):
|
def request_failure_handler(self, err):
|
||||||
logging.warning("An error occurred handling a request. Error: %s", err.getErrorMessage())
|
log.warning("An error occurred handling a request. Error: %s", err.getErrorMessage())
|
||||||
self.stopProducing()
|
self.stopProducing()
|
||||||
return err
|
return err
|
||||||
|
|
||||||
|
@ -122,15 +125,15 @@ class ServerRequestHandler(object):
|
||||||
|
|
||||||
def send_response(self, msg):
|
def send_response(self, msg):
|
||||||
m = json.dumps(msg)
|
m = json.dumps(msg)
|
||||||
logging.info("Sending a response of length %s", str(len(m)))
|
log.info("Sending a response of length %s", str(len(m)))
|
||||||
logging.debug("Response: %s", str(m))
|
log.debug("Response: %s", str(m))
|
||||||
self.response_buff = self.response_buff + m
|
self.response_buff = self.response_buff + m
|
||||||
self._produce_more()
|
self._produce_more()
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def handle_request(self, msg):
|
def handle_request(self, msg):
|
||||||
logging.debug("Handling a request")
|
log.debug("Handling a request")
|
||||||
logging.debug(str(msg))
|
log.debug(str(msg))
|
||||||
|
|
||||||
def create_response_message(results):
|
def create_response_message(results):
|
||||||
response = {}
|
response = {}
|
||||||
|
@ -140,11 +143,11 @@ class ServerRequestHandler(object):
|
||||||
else:
|
else:
|
||||||
# result is a Failure
|
# result is a Failure
|
||||||
return result
|
return result
|
||||||
logging.debug("Finished making the response message. Response: %s", str(response))
|
log.debug("Finished making the response message. Response: %s", str(response))
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def log_errors(err):
|
def log_errors(err):
|
||||||
logging.warning("An error occurred handling a client request. Error message: %s", err.getErrorMessage())
|
log.warning("An error occurred handling a client request. Error message: %s", err.getErrorMessage())
|
||||||
return err
|
return err
|
||||||
|
|
||||||
def send_response(response):
|
def send_response(response):
|
||||||
|
|
|
@ -3,11 +3,14 @@ from twisted.internet import task, reactor
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def rerun_if_locked(f):
|
def rerun_if_locked(f):
|
||||||
|
|
||||||
def rerun(err, *args, **kwargs):
|
def rerun(err, *args, **kwargs):
|
||||||
if err.check(sqlite3.OperationalError) and err.value.message == "database is locked":
|
if err.check(sqlite3.OperationalError) and err.value.message == "database is locked":
|
||||||
logging.warning("database was locked. rerunning %s with args %s, kwargs %s",
|
log.warning("database was locked. rerunning %s with args %s, kwargs %s",
|
||||||
str(f), str(args), str(kwargs))
|
str(f), str(args), str(kwargs))
|
||||||
return task.deferLater(reactor, 0, wrapper, *args, **kwargs)
|
return task.deferLater(reactor, 0, wrapper, *args, **kwargs)
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -5,6 +5,9 @@ from lbrynet.conf import BLOB_SIZE
|
||||||
from lbrynet.core.BlobInfo import BlobInfo
|
from lbrynet.core.BlobInfo import BlobInfo
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class CryptBlobInfo(BlobInfo):
|
class CryptBlobInfo(BlobInfo):
|
||||||
def __init__(self, blob_hash, blob_num, length, iv):
|
def __init__(self, blob_hash, blob_num, length, iv):
|
||||||
BlobInfo.__init__(self, blob_hash, blob_num, length)
|
BlobInfo.__init__(self, blob_hash, blob_num, length)
|
||||||
|
@ -78,12 +81,12 @@ class CryptStreamBlobMaker(object):
|
||||||
return done, num_bytes_to_write
|
return done, num_bytes_to_write
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
logging.debug("closing blob %s with plaintext len %s", str(self.blob_num), str(self.length))
|
log.debug("closing blob %s with plaintext len %s", str(self.blob_num), str(self.length))
|
||||||
if self.length != 0:
|
if self.length != 0:
|
||||||
self._close_buffer()
|
self._close_buffer()
|
||||||
d = self.blob.close()
|
d = self.blob.close()
|
||||||
d.addCallback(self._return_info)
|
d.addCallback(self._return_info)
|
||||||
logging.debug("called the finished_callback from CryptStreamBlobMaker.close")
|
log.debug("called the finished_callback from CryptStreamBlobMaker.close")
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _write_buffer(self):
|
def _write_buffer(self):
|
||||||
|
|
|
@ -12,6 +12,9 @@ from lbrynet.core.StreamCreator import StreamCreator
|
||||||
from lbrynet.cryptstream.CryptBlob import CryptStreamBlobMaker
|
from lbrynet.cryptstream.CryptBlob import CryptStreamBlobMaker
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class CryptStreamCreator(StreamCreator):
|
class CryptStreamCreator(StreamCreator):
|
||||||
"""Create a new stream with blobs encrypted by a symmetric cipher.
|
"""Create a new stream with blobs encrypted by a symmetric cipher.
|
||||||
|
|
||||||
|
@ -57,18 +60,18 @@ class CryptStreamCreator(StreamCreator):
|
||||||
return defer.succeed(True)
|
return defer.succeed(True)
|
||||||
|
|
||||||
def _finalize(self):
|
def _finalize(self):
|
||||||
logging.debug("_finalize has been called")
|
log.debug("_finalize has been called")
|
||||||
self.blob_count += 1
|
self.blob_count += 1
|
||||||
iv = self.iv_generator.next()
|
iv = self.iv_generator.next()
|
||||||
final_blob_creator = self.blob_manager.get_blob_creator()
|
final_blob_creator = self.blob_manager.get_blob_creator()
|
||||||
logging.debug("Created the finished_deferred")
|
log.debug("Created the finished_deferred")
|
||||||
final_blob = self._get_blob_maker(iv, final_blob_creator)
|
final_blob = self._get_blob_maker(iv, final_blob_creator)
|
||||||
logging.debug("Created the final blob")
|
log.debug("Created the final blob")
|
||||||
logging.debug("Calling close on final blob")
|
log.debug("Calling close on final blob")
|
||||||
d = final_blob.close()
|
d = final_blob.close()
|
||||||
d.addCallback(self._blob_finished)
|
d.addCallback(self._blob_finished)
|
||||||
self.finished_deferreds.append(d)
|
self.finished_deferreds.append(d)
|
||||||
logging.debug("called close on final blob, returning from make_final_blob")
|
log.debug("called close on final blob, returning from make_final_blob")
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _write(self, data):
|
def _write(self, data):
|
||||||
|
|
|
@ -7,6 +7,9 @@ import logging
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
known_dbs = ['lbryfile_desc.db', 'lbryfiles.db', 'valuable_blobs.db', 'blobs.db',
|
known_dbs = ['lbryfile_desc.db', 'lbryfiles.db', 'valuable_blobs.db', 'blobs.db',
|
||||||
'lbryfile_blob.db', 'lbryfile_info.db', 'settings.db', 'blind_settings.db',
|
'lbryfile_blob.db', 'lbryfile_info.db', 'settings.db', 'blind_settings.db',
|
||||||
'blind_peers.db', 'blind_info.db', 'lbryfile_info.db', 'lbryfile_manager.db',
|
'blind_peers.db', 'blind_info.db', 'lbryfile_info.db', 'lbryfile_manager.db',
|
||||||
|
@ -17,33 +20,33 @@ def do_move(from_dir, to_dir):
|
||||||
for known_db in known_dbs:
|
for known_db in known_dbs:
|
||||||
known_db_path = os.path.join(from_dir, known_db)
|
known_db_path = os.path.join(from_dir, known_db)
|
||||||
if os.path.exists(known_db_path):
|
if os.path.exists(known_db_path):
|
||||||
logging.debug("Moving %s to %s",
|
log.debug("Moving %s to %s",
|
||||||
os.path.abspath(known_db_path),
|
os.path.abspath(known_db_path),
|
||||||
os.path.abspath(os.path.join(to_dir, known_db)))
|
os.path.abspath(os.path.join(to_dir, known_db)))
|
||||||
shutil.move(known_db_path, os.path.join(to_dir, known_db))
|
shutil.move(known_db_path, os.path.join(to_dir, known_db))
|
||||||
else:
|
else:
|
||||||
logging.debug("Did not find %s", os.path.abspath(known_db_path))
|
log.debug("Did not find %s", os.path.abspath(known_db_path))
|
||||||
|
|
||||||
|
|
||||||
def do_migration(db_dir):
|
def do_migration(db_dir):
|
||||||
old_dir = os.path.join(db_dir, "_0_to_1_old")
|
old_dir = os.path.join(db_dir, "_0_to_1_old")
|
||||||
new_dir = os.path.join(db_dir, "_0_to_1_new")
|
new_dir = os.path.join(db_dir, "_0_to_1_new")
|
||||||
try:
|
try:
|
||||||
logging.info("Moving dbs from the real directory to %s", os.path.abspath(old_dir))
|
log.info("Moving dbs from the real directory to %s", os.path.abspath(old_dir))
|
||||||
os.makedirs(old_dir)
|
os.makedirs(old_dir)
|
||||||
do_move(db_dir, old_dir)
|
do_move(db_dir, old_dir)
|
||||||
except:
|
except:
|
||||||
logging.error("An error occurred moving the old db files.")
|
log.error("An error occurred moving the old db files.")
|
||||||
raise
|
raise
|
||||||
try:
|
try:
|
||||||
logging.info("Creating the new directory in %s", os.path.abspath(new_dir))
|
log.info("Creating the new directory in %s", os.path.abspath(new_dir))
|
||||||
os.makedirs(new_dir)
|
os.makedirs(new_dir)
|
||||||
|
|
||||||
except:
|
except:
|
||||||
logging.error("An error occurred creating the new directory.")
|
log.error("An error occurred creating the new directory.")
|
||||||
raise
|
raise
|
||||||
try:
|
try:
|
||||||
logging.info("Doing the migration")
|
log.info("Doing the migration")
|
||||||
migrate_blob_db(old_dir, new_dir)
|
migrate_blob_db(old_dir, new_dir)
|
||||||
migrate_lbryfile_db(old_dir, new_dir)
|
migrate_lbryfile_db(old_dir, new_dir)
|
||||||
migrate_livestream_db(old_dir, new_dir)
|
migrate_livestream_db(old_dir, new_dir)
|
||||||
|
@ -51,20 +54,20 @@ def do_migration(db_dir):
|
||||||
migrate_lbryfile_manager_db(old_dir, new_dir)
|
migrate_lbryfile_manager_db(old_dir, new_dir)
|
||||||
migrate_settings_db(old_dir, new_dir)
|
migrate_settings_db(old_dir, new_dir)
|
||||||
migrate_repeater_db(old_dir, new_dir)
|
migrate_repeater_db(old_dir, new_dir)
|
||||||
logging.info("Migration succeeded")
|
log.info("Migration succeeded")
|
||||||
except:
|
except:
|
||||||
logging.error("An error occurred during the migration. Restoring.")
|
log.error("An error occurred during the migration. Restoring.")
|
||||||
do_move(old_dir, db_dir)
|
do_move(old_dir, db_dir)
|
||||||
raise
|
raise
|
||||||
try:
|
try:
|
||||||
logging.info("Moving dbs in the new directory to the real directory")
|
log.info("Moving dbs in the new directory to the real directory")
|
||||||
do_move(new_dir, db_dir)
|
do_move(new_dir, db_dir)
|
||||||
db_revision = open(os.path.join(db_dir, 'db_revision'), mode='w+')
|
db_revision = open(os.path.join(db_dir, 'db_revision'), mode='w+')
|
||||||
db_revision.write("1")
|
db_revision.write("1")
|
||||||
db_revision.close()
|
db_revision.close()
|
||||||
os.rmdir(new_dir)
|
os.rmdir(new_dir)
|
||||||
except:
|
except:
|
||||||
logging.error("An error occurred moving the new db files.")
|
log.error("An error occurred moving the new db files.")
|
||||||
raise
|
raise
|
||||||
return old_dir
|
return old_dir
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,9 @@ from hashwatcher import HashWatcher
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def rpcmethod(func):
|
def rpcmethod(func):
|
||||||
""" Decorator to expose Node methods as remote procedure calls
|
""" Decorator to expose Node methods as remote procedure calls
|
||||||
|
|
||||||
|
@ -231,12 +234,12 @@ class Node(object):
|
||||||
known_nodes = {}
|
known_nodes = {}
|
||||||
|
|
||||||
def log_error(err, n):
|
def log_error(err, n):
|
||||||
logging.error("error storing blob_hash %s at %s", binascii.hexlify(blob_hash), str(n))
|
log.error("error storing blob_hash %s at %s", binascii.hexlify(blob_hash), str(n))
|
||||||
logging.error(binascii.hexlify(err.getErrorMessage()))
|
log.error(binascii.hexlify(err.getErrorMessage()))
|
||||||
logging.error(err.getTraceback())
|
log.error(err.getTraceback())
|
||||||
|
|
||||||
def log_success(res):
|
def log_success(res):
|
||||||
logging.debug("Response to store request: %s", str(res))
|
log.debug("Response to store request: %s", str(res))
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def announce_to_peer(responseTuple):
|
def announce_to_peer(responseTuple):
|
||||||
|
@ -332,7 +335,7 @@ class Node(object):
|
||||||
# originalPublisherID = self.id
|
# originalPublisherID = self.id
|
||||||
#
|
#
|
||||||
# def log_error(err):
|
# def log_error(err):
|
||||||
# logging.error(err.getErrorMessage())
|
# log.error(err.getErrorMessage())
|
||||||
#
|
#
|
||||||
# # Prepare a callback for doing "STORE" RPC calls
|
# # Prepare a callback for doing "STORE" RPC calls
|
||||||
# def executeStoreRPCs(nodes):
|
# def executeStoreRPCs(nodes):
|
||||||
|
@ -756,7 +759,7 @@ class Node(object):
|
||||||
searchIteration()
|
searchIteration()
|
||||||
|
|
||||||
def log_error(err):
|
def log_error(err):
|
||||||
logging.error(err.getErrorMessage())
|
log.error(err.getErrorMessage())
|
||||||
|
|
||||||
# Send parallel, asynchronous FIND_NODE RPCs to the shortlist of contacts
|
# Send parallel, asynchronous FIND_NODE RPCs to the shortlist of contacts
|
||||||
def searchIteration():
|
def searchIteration():
|
||||||
|
|
|
@ -6,6 +6,9 @@ import sys
|
||||||
from lbrynet.core.utils import generate_id
|
from lbrynet.core.utils import generate_id
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def print_usage():
|
def print_usage():
|
||||||
print "Usage:\n%s UDP_PORT KNOWN_NODE_IP KNOWN_NODE_PORT HASH"
|
print "Usage:\n%s UDP_PORT KNOWN_NODE_IP KNOWN_NODE_PORT HASH"
|
||||||
|
|
||||||
|
@ -13,15 +16,15 @@ def print_usage():
|
||||||
def join_network(udp_port, known_nodes):
|
def join_network(udp_port, known_nodes):
|
||||||
lbryid = generate_id()
|
lbryid = generate_id()
|
||||||
|
|
||||||
logging.info('Creating Node...')
|
log.info('Creating Node...')
|
||||||
node = Node(udpPort=udp_port, lbryid=lbryid)
|
node = Node(udpPort=udp_port, lbryid=lbryid)
|
||||||
|
|
||||||
logging.info('Joining network...')
|
log.info('Joining network...')
|
||||||
d = node.joinNetwork(known_nodes)
|
d = node.joinNetwork(known_nodes)
|
||||||
|
|
||||||
def log_network_size():
|
def log_network_size():
|
||||||
logging.info("Approximate number of nodes in DHT: %s", str(node.getApproximateTotalDHTNodes()))
|
log.info("Approximate number of nodes in DHT: %s", str(node.getApproximateTotalDHTNodes()))
|
||||||
logging.info("Approximate number of blobs in DHT: %s", str(node.getApproximateTotalHashes()))
|
log.info("Approximate number of blobs in DHT: %s", str(node.getApproximateTotalHashes()))
|
||||||
|
|
||||||
d.addCallback(lambda _: log_network_size())
|
d.addCallback(lambda _: log_network_size())
|
||||||
|
|
||||||
|
@ -36,7 +39,7 @@ def get_hosts(node, h):
|
||||||
print "Hosts returned from the DHT: "
|
print "Hosts returned from the DHT: "
|
||||||
print hosts
|
print hosts
|
||||||
|
|
||||||
logging.info("Looking up %s", h)
|
log.info("Looking up %s", h)
|
||||||
d = node.getPeersForBlob(h)
|
d = node.getPeersForBlob(h)
|
||||||
d.addCallback(print_hosts)
|
d.addCallback(print_hosts)
|
||||||
return d
|
return d
|
||||||
|
@ -48,9 +51,9 @@ def announce_hash(node, h):
|
||||||
def log_results(results):
|
def log_results(results):
|
||||||
for success, result in results:
|
for success, result in results:
|
||||||
if success:
|
if success:
|
||||||
logging.info("Succeeded: %s", str(result))
|
log.info("Succeeded: %s", str(result))
|
||||||
else:
|
else:
|
||||||
logging.info("Failed: %s", str(result.getErrorMessage()))
|
log.info("Failed: %s", str(result.getErrorMessage()))
|
||||||
|
|
||||||
d.addCallback(log_results)
|
d.addCallback(log_results)
|
||||||
return d
|
return d
|
||||||
|
@ -80,11 +83,11 @@ def run_dht_script(dht_func):
|
||||||
d.addCallback(run_dht_func)
|
d.addCallback(run_dht_func)
|
||||||
|
|
||||||
def log_err(err):
|
def log_err(err):
|
||||||
logging.error("An error occurred: %s", err.getTraceback())
|
log.error("An error occurred: %s", err.getTraceback())
|
||||||
return err
|
return err
|
||||||
|
|
||||||
def shut_down():
|
def shut_down():
|
||||||
logging.info("Shutting down")
|
log.info("Shutting down")
|
||||||
reactor.stop()
|
reactor.stop()
|
||||||
|
|
||||||
d.addErrback(log_err)
|
d.addErrback(log_err)
|
||||||
|
|
|
@ -1,13 +1,16 @@
|
||||||
import logging
|
import logging
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import os
|
import os
|
||||||
from twisted.internet import threads, defer
|
from twisted.internet import defer
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
from twisted.enterprise import adbapi
|
from twisted.enterprise import adbapi
|
||||||
from lbrynet.core.Error import DuplicateStreamHashError, NoSuchStreamHashError
|
from lbrynet.core.Error import DuplicateStreamHashError, NoSuchStreamHashError
|
||||||
from lbrynet.core.sqlite_helpers import rerun_if_locked
|
from lbrynet.core.sqlite_helpers import rerun_if_locked
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DBLBRYFileMetadataManager(object):
|
class DBLBRYFileMetadataManager(object):
|
||||||
"""Store and provide access to LBRY file metadata using leveldb files"""
|
"""Store and provide access to LBRY file metadata using leveldb files"""
|
||||||
|
|
||||||
|
@ -45,7 +48,7 @@ class DBLBRYFileMetadataManager(object):
|
||||||
return self._add_blobs_to_stream(stream_hash, blobs, ignore_duplicate_error=True)
|
return self._add_blobs_to_stream(stream_hash, blobs, ignore_duplicate_error=True)
|
||||||
|
|
||||||
def get_blobs_for_stream(self, stream_hash, start_blob=None, end_blob=None, count=None, reverse=False):
|
def get_blobs_for_stream(self, stream_hash, start_blob=None, end_blob=None, count=None, reverse=False):
|
||||||
logging.info("Getting blobs for a stream. Count is %s", str(count))
|
log.info("Getting blobs for a stream. Count is %s", str(count))
|
||||||
|
|
||||||
def get_positions_of_start_and_end():
|
def get_positions_of_start_and_end():
|
||||||
if start_blob is not None:
|
if start_blob is not None:
|
||||||
|
|
|
@ -6,11 +6,14 @@ from twisted.internet import defer
|
||||||
from lbrynet.core.Error import DuplicateStreamHashError, InvalidStreamDescriptorError
|
from lbrynet.core.Error import DuplicateStreamHashError, InvalidStreamDescriptorError
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
LBRYFileStreamType = "lbryfile"
|
LBRYFileStreamType = "lbryfile"
|
||||||
|
|
||||||
|
|
||||||
def save_sd_info(stream_info_manager, sd_info, ignore_duplicate=False):
|
def save_sd_info(stream_info_manager, sd_info, ignore_duplicate=False):
|
||||||
logging.debug("Saving info for %s", str(sd_info['stream_name']))
|
log.debug("Saving info for %s", str(sd_info['stream_name']))
|
||||||
hex_stream_name = sd_info['stream_name']
|
hex_stream_name = sd_info['stream_name']
|
||||||
key = sd_info['key']
|
key = sd_info['key']
|
||||||
stream_hash = sd_info['stream_hash']
|
stream_hash = sd_info['stream_hash']
|
||||||
|
@ -26,7 +29,7 @@ def save_sd_info(stream_info_manager, sd_info, ignore_duplicate=False):
|
||||||
blob_num = blob['blob_num']
|
blob_num = blob['blob_num']
|
||||||
iv = blob['iv']
|
iv = blob['iv']
|
||||||
crypt_blobs.append(CryptBlobInfo(blob_hash, blob_num, length, iv))
|
crypt_blobs.append(CryptBlobInfo(blob_hash, blob_num, length, iv))
|
||||||
logging.debug("Trying to save stream info for %s", str(hex_stream_name))
|
log.debug("Trying to save stream info for %s", str(hex_stream_name))
|
||||||
d = stream_info_manager.save_stream(stream_hash, hex_stream_name, key,
|
d = stream_info_manager.save_stream(stream_hash, hex_stream_name, key,
|
||||||
suggested_file_name, crypt_blobs)
|
suggested_file_name, crypt_blobs)
|
||||||
|
|
||||||
|
@ -80,7 +83,7 @@ class LBRYFileStreamDescriptorValidator(object):
|
||||||
self.raw_info = raw_info
|
self.raw_info = raw_info
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
logging.debug("Trying to validate stream descriptor for %s", str(self.raw_info['stream_name']))
|
log.debug("Trying to validate stream descriptor for %s", str(self.raw_info['stream_name']))
|
||||||
try:
|
try:
|
||||||
hex_stream_name = self.raw_info['stream_name']
|
hex_stream_name = self.raw_info['stream_name']
|
||||||
key = self.raw_info['key']
|
key = self.raw_info['key']
|
||||||
|
|
|
@ -4,6 +4,9 @@ from lbrynet.cryptstream.CryptBlob import CryptBlobInfo
|
||||||
from lbrynet.interfaces import IMetadataHandler
|
from lbrynet.interfaces import IMetadataHandler
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LBRYFileMetadataHandler(object):
|
class LBRYFileMetadataHandler(object):
|
||||||
implements(IMetadataHandler)
|
implements(IMetadataHandler)
|
||||||
|
|
||||||
|
@ -31,6 +34,6 @@ class LBRYFileMetadataHandler(object):
|
||||||
if blob_hash is not None:
|
if blob_hash is not None:
|
||||||
infos.append(CryptBlobInfo(blob_hash, blob_num, length, iv))
|
infos.append(CryptBlobInfo(blob_hash, blob_num, length, iv))
|
||||||
else:
|
else:
|
||||||
logging.debug("Setting _final_blob_num to %s", str(blob_num - 1))
|
log.debug("Setting _final_blob_num to %s", str(blob_num - 1))
|
||||||
self._final_blob_num = blob_num - 1
|
self._final_blob_num = blob_num - 1
|
||||||
return infos
|
return infos
|
|
@ -14,6 +14,9 @@ from twisted.protocols.basic import FileSender
|
||||||
from lbrynet.lbryfilemanager.LBRYFileDownloader import ManagedLBRYFileDownloader
|
from lbrynet.lbryfilemanager.LBRYFileDownloader import ManagedLBRYFileDownloader
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LBRYFileStreamCreator(CryptStreamCreator):
|
class LBRYFileStreamCreator(CryptStreamCreator):
|
||||||
"""
|
"""
|
||||||
A CryptStreamCreator which adds itself and its additional metadata to an LBRYFileManager
|
A CryptStreamCreator which adds itself and its additional metadata to an LBRYFileManager
|
||||||
|
@ -30,7 +33,7 @@ class LBRYFileStreamCreator(CryptStreamCreator):
|
||||||
self.blob_infos = []
|
self.blob_infos = []
|
||||||
|
|
||||||
def _blob_finished(self, blob_info):
|
def _blob_finished(self, blob_info):
|
||||||
logging.debug("length: %s", str(blob_info.length))
|
log.debug("length: %s", str(blob_info.length))
|
||||||
self.blob_infos.append(blob_info)
|
self.blob_infos.append(blob_info)
|
||||||
|
|
||||||
def _save_lbry_file_info(self):
|
def _save_lbry_file_info(self):
|
||||||
|
@ -128,11 +131,11 @@ def create_lbry_file(session, lbry_file_manager, file_name, file_handle, key=Non
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def stop_file(creator):
|
def stop_file(creator):
|
||||||
logging.debug("the file sender has triggered its deferred. stopping the stream writer")
|
log.debug("the file sender has triggered its deferred. stopping the stream writer")
|
||||||
return creator.stop()
|
return creator.stop()
|
||||||
|
|
||||||
def make_stream_desc_file(stream_hash):
|
def make_stream_desc_file(stream_hash):
|
||||||
logging.debug("creating the stream descriptor file")
|
log.debug("creating the stream descriptor file")
|
||||||
descriptor_writer = PlainStreamDescriptorWriter(file_name + conf.CRYPTSD_FILE_EXTENSION)
|
descriptor_writer = PlainStreamDescriptorWriter(file_name + conf.CRYPTSD_FILE_EXTENSION)
|
||||||
|
|
||||||
d = get_sd_info(lbry_file_manager.stream_info_manager, stream_hash, True)
|
d = get_sd_info(lbry_file_manager.stream_info_manager, stream_hash, True)
|
||||||
|
|
|
@ -3,7 +3,6 @@ Keep track of which LBRY Files are downloading and store their LBRY File specifi
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import json
|
|
||||||
|
|
||||||
from twisted.enterprise import adbapi
|
from twisted.enterprise import adbapi
|
||||||
|
|
||||||
|
@ -12,12 +11,15 @@ from lbrynet.lbryfilemanager.LBRYFileDownloader import ManagedLBRYFileDownloader
|
||||||
from lbrynet.lbryfilemanager.LBRYFileDownloader import ManagedLBRYFileDownloaderFactory
|
from lbrynet.lbryfilemanager.LBRYFileDownloader import ManagedLBRYFileDownloaderFactory
|
||||||
from lbrynet.lbryfile.StreamDescriptor import LBRYFileStreamType
|
from lbrynet.lbryfile.StreamDescriptor import LBRYFileStreamType
|
||||||
from lbrynet.core.PaymentRateManager import PaymentRateManager
|
from lbrynet.core.PaymentRateManager import PaymentRateManager
|
||||||
from twisted.internet import threads, defer, task, reactor
|
from twisted.internet import defer, task, reactor
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
from lbrynet.cryptstream.client.CryptStreamDownloader import AlreadyStoppedError, CurrentlyStoppingError
|
from lbrynet.cryptstream.client.CryptStreamDownloader import AlreadyStoppedError, CurrentlyStoppingError
|
||||||
from lbrynet.core.sqlite_helpers import rerun_if_locked
|
from lbrynet.core.sqlite_helpers import rerun_if_locked
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LBRYFileManager(object):
|
class LBRYFileManager(object):
|
||||||
"""
|
"""
|
||||||
Keeps track of currently opened LBRY Files, their options, and their LBRY File specific metadata.
|
Keeps track of currently opened LBRY Files, their options, and their LBRY File specific metadata.
|
||||||
|
@ -70,7 +72,7 @@ class LBRYFileManager(object):
|
||||||
return self._set_lbry_file_payment_rate(stream_hash, new_rate)
|
return self._set_lbry_file_payment_rate(stream_hash, new_rate)
|
||||||
|
|
||||||
def change_lbry_file_status(self, stream_hash, status):
|
def change_lbry_file_status(self, stream_hash, status):
|
||||||
logging.debug("Changing status of %s to %s", stream_hash, status)
|
log.debug("Changing status of %s to %s", stream_hash, status)
|
||||||
return self._change_file_status(stream_hash, status)
|
return self._change_file_status(stream_hash, status)
|
||||||
|
|
||||||
def get_lbry_file_status_reports(self):
|
def get_lbry_file_status_reports(self):
|
||||||
|
@ -100,7 +102,7 @@ class LBRYFileManager(object):
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def log_error(err):
|
def log_error(err):
|
||||||
logging.error("An error occurred while starting a lbry file: %s", err.getErrorMessage())
|
log.error("An error occurred while starting a lbry file: %s", err.getErrorMessage())
|
||||||
|
|
||||||
def start_lbry_files(stream_hashes_and_options):
|
def start_lbry_files(stream_hashes_and_options):
|
||||||
for stream_hash, options in stream_hashes_and_options:
|
for stream_hash, options in stream_hashes_and_options:
|
||||||
|
|
|
@ -12,6 +12,9 @@ from twisted.protocols.basic import FileSender
|
||||||
from zope.interface import implements
|
from zope.interface import implements
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LiveStreamCreator(CryptStreamCreator):
|
class LiveStreamCreator(CryptStreamCreator):
|
||||||
def __init__(self, blob_manager, stream_info_manager, name=None, key=None, iv_generator=None,
|
def __init__(self, blob_manager, stream_info_manager, name=None, key=None, iv_generator=None,
|
||||||
delete_after_num=None, secret_pass_phrase=None):
|
delete_after_num=None, secret_pass_phrase=None):
|
||||||
|
@ -30,8 +33,8 @@ class LiveStreamCreator(CryptStreamCreator):
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _blob_finished(self, blob_info):
|
def _blob_finished(self, blob_info):
|
||||||
logging.debug("In blob_finished")
|
log.debug("In blob_finished")
|
||||||
logging.debug("length: %s", str(blob_info.length))
|
log.debug("length: %s", str(blob_info.length))
|
||||||
sig_hash = get_lbry_hash_obj()
|
sig_hash = get_lbry_hash_obj()
|
||||||
sig_hash.update(self.stream_hash)
|
sig_hash.update(self.stream_hash)
|
||||||
if blob_info.length != 0:
|
if blob_info.length != 0:
|
||||||
|
@ -48,11 +51,11 @@ class LiveStreamCreator(CryptStreamCreator):
|
||||||
d = self.stream_info_manager.add_blobs_to_stream(self.stream_hash, [blob_info])
|
d = self.stream_info_manager.add_blobs_to_stream(self.stream_hash, [blob_info])
|
||||||
|
|
||||||
def log_add_error(err):
|
def log_add_error(err):
|
||||||
logging.error("An error occurred adding a blob info to the stream info manager: %s", err.getErrorMessage())
|
log.error("An error occurred adding a blob info to the stream info manager: %s", err.getErrorMessage())
|
||||||
return err
|
return err
|
||||||
|
|
||||||
d.addErrback(log_add_error)
|
d.addErrback(log_add_error)
|
||||||
logging.debug("returning from blob_finished")
|
log.debug("returning from blob_finished")
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
|
|
|
@ -3,13 +3,16 @@ import logging
|
||||||
from twisted.enterprise import adbapi
|
from twisted.enterprise import adbapi
|
||||||
import os
|
import os
|
||||||
import sqlite3
|
import sqlite3
|
||||||
from twisted.internet import threads, defer
|
from twisted.internet import defer
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
from lbrynet.core.server.DHTHashAnnouncer import DHTHashSupplier
|
from lbrynet.core.server.DHTHashAnnouncer import DHTHashSupplier
|
||||||
from lbrynet.core.Error import DuplicateStreamHashError, NoSuchStreamHashError
|
from lbrynet.core.Error import DuplicateStreamHashError, NoSuchStreamHashError
|
||||||
from lbrynet.core.sqlite_helpers import rerun_if_locked
|
from lbrynet.core.sqlite_helpers import rerun_if_locked
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DBLiveStreamMetadataManager(DHTHashSupplier):
|
class DBLiveStreamMetadataManager(DHTHashSupplier):
|
||||||
"""This class stores all stream info in a leveldb database stored in the same directory as the blobfiles"""
|
"""This class stores all stream info in a leveldb database stored in the same directory as the blobfiles"""
|
||||||
|
|
||||||
|
@ -58,7 +61,7 @@ class DBLiveStreamMetadataManager(DHTHashSupplier):
|
||||||
return self._add_blobs_to_stream(stream_hash, blobs, ignore_duplicate_error=True)
|
return self._add_blobs_to_stream(stream_hash, blobs, ignore_duplicate_error=True)
|
||||||
|
|
||||||
def get_blobs_for_stream(self, stream_hash, start_blob=None, end_blob=None, count=None, reverse=False):
|
def get_blobs_for_stream(self, stream_hash, start_blob=None, end_blob=None, count=None, reverse=False):
|
||||||
logging.info("Getting blobs for a stream. Count is %s", str(count))
|
log.info("Getting blobs for a stream. Count is %s", str(count))
|
||||||
|
|
||||||
def get_positions_of_start_and_end():
|
def get_positions_of_start_and_end():
|
||||||
if start_blob is not None:
|
if start_blob is not None:
|
||||||
|
|
|
@ -8,11 +8,14 @@ from lbrynet.interfaces import IStreamDescriptorValidator
|
||||||
from zope.interface import implements
|
from zope.interface import implements
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
LiveStreamType = "lbrylive"
|
LiveStreamType = "lbrylive"
|
||||||
|
|
||||||
|
|
||||||
def save_sd_info(stream_info_manager, sd_info, ignore_duplicate=False):
|
def save_sd_info(stream_info_manager, sd_info, ignore_duplicate=False):
|
||||||
logging.debug("Saving info for %s", str(sd_info['stream_name']))
|
log.debug("Saving info for %s", str(sd_info['stream_name']))
|
||||||
hex_stream_name = sd_info['stream_name']
|
hex_stream_name = sd_info['stream_name']
|
||||||
public_key = sd_info['public_key']
|
public_key = sd_info['public_key']
|
||||||
key = sd_info['key']
|
key = sd_info['key']
|
||||||
|
@ -30,7 +33,7 @@ def save_sd_info(stream_info_manager, sd_info, ignore_duplicate=False):
|
||||||
iv = blob['iv']
|
iv = blob['iv']
|
||||||
signature = blob['signature']
|
signature = blob['signature']
|
||||||
crypt_blobs.append(LiveBlobInfo(blob_hash, blob_num, length, iv, revision, signature))
|
crypt_blobs.append(LiveBlobInfo(blob_hash, blob_num, length, iv, revision, signature))
|
||||||
logging.debug("Trying to save stream info for %s", str(hex_stream_name))
|
log.debug("Trying to save stream info for %s", str(hex_stream_name))
|
||||||
d = stream_info_manager.save_stream(stream_hash, public_key, hex_stream_name,
|
d = stream_info_manager.save_stream(stream_hash, public_key, hex_stream_name,
|
||||||
key, crypt_blobs)
|
key, crypt_blobs)
|
||||||
|
|
||||||
|
@ -88,7 +91,7 @@ class LBRYLiveStreamDescriptorValidator(object):
|
||||||
self.raw_info = raw_info
|
self.raw_info = raw_info
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
logging.debug("Trying to validate stream descriptor for %s", str(self.raw_info['stream_name']))
|
log.debug("Trying to validate stream descriptor for %s", str(self.raw_info['stream_name']))
|
||||||
hex_stream_name = self.raw_info['stream_name']
|
hex_stream_name = self.raw_info['stream_name']
|
||||||
public_key = self.raw_info['public_key']
|
public_key = self.raw_info['public_key']
|
||||||
key = self.raw_info['key']
|
key = self.raw_info['key']
|
||||||
|
|
|
@ -8,8 +8,6 @@ from lbrynet.lbrylive.StreamDescriptor import save_sd_info
|
||||||
from lbrynet.lbrylive.PaymentRateManager import LiveStreamPaymentRateManager
|
from lbrynet.lbrylive.PaymentRateManager import LiveStreamPaymentRateManager
|
||||||
from twisted.internet import defer, threads # , process
|
from twisted.internet import defer, threads # , process
|
||||||
from lbrynet.interfaces import IStreamDownloaderFactory
|
from lbrynet.interfaces import IStreamDownloaderFactory
|
||||||
from lbrynet.lbrylive.PaymentRateManager import BaseLiveStreamPaymentRateManager
|
|
||||||
from lbrynet.conf import MIN_BLOB_INFO_PAYMENT_RATE
|
|
||||||
from lbrynet.lbrylive.StreamDescriptor import LiveStreamType
|
from lbrynet.lbrylive.StreamDescriptor import LiveStreamType
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,9 @@ from lbrynet.core.Error import InsufficientFundsError, InvalidResponseError, Req
|
||||||
from lbrynet.core.Error import NoResponseError, ConnectionClosedBeforeResponseError
|
from lbrynet.core.Error import NoResponseError, ConnectionClosedBeforeResponseError
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LiveStreamMetadataHandler(object):
|
class LiveStreamMetadataHandler(object):
|
||||||
implements(IRequestCreator, IMetadataHandler)
|
implements(IRequestCreator, IMetadataHandler)
|
||||||
|
|
||||||
|
@ -79,7 +82,7 @@ class LiveStreamMetadataHandler(object):
|
||||||
r = None
|
r = None
|
||||||
if self._finished_discovery() is False:
|
if self._finished_discovery() is False:
|
||||||
r = self.stream_hash
|
r = self.stream_hash
|
||||||
logging.debug("Info finder peer search response for stream %s: %s", str(self.stream_hash), str(r))
|
log.debug("Info finder peer search response for stream %s: %s", str(self.stream_hash), str(r))
|
||||||
return defer.succeed(r)
|
return defer.succeed(r)
|
||||||
|
|
||||||
def _find_peers_for_hash(self, h):
|
def _find_peers_for_hash(self, h):
|
||||||
|
@ -101,7 +104,7 @@ class LiveStreamMetadataHandler(object):
|
||||||
if blob_hash is not None:
|
if blob_hash is not None:
|
||||||
infos.append(LiveBlobInfo(blob_hash, blob_num, length, iv, revision, signature))
|
infos.append(LiveBlobInfo(blob_hash, blob_num, length, iv, revision, signature))
|
||||||
else:
|
else:
|
||||||
logging.debug("Setting _final_blob_num to %s", str(blob_num - 1))
|
log.debug("Setting _final_blob_num to %s", str(blob_num - 1))
|
||||||
self._final_blob_num = blob_num - 1
|
self._final_blob_num = blob_num - 1
|
||||||
return infos
|
return infos
|
||||||
|
|
||||||
|
@ -134,7 +137,7 @@ class LiveStreamMetadataHandler(object):
|
||||||
further_blobs_request['count'] = count
|
further_blobs_request['count'] = count
|
||||||
else:
|
else:
|
||||||
further_blobs_request['count'] = MAX_BLOB_INFOS_TO_REQUEST
|
further_blobs_request['count'] = MAX_BLOB_INFOS_TO_REQUEST
|
||||||
logging.debug("Requesting %s blob infos from %s", str(further_blobs_request['count']), str(peer))
|
log.debug("Requesting %s blob infos from %s", str(further_blobs_request['count']), str(peer))
|
||||||
r_dict = {'further_blobs': further_blobs_request}
|
r_dict = {'further_blobs': further_blobs_request}
|
||||||
response_identifier = 'further_blobs'
|
response_identifier = 'further_blobs'
|
||||||
request = ClientPaidRequest(r_dict, response_identifier, further_blobs_request['count'])
|
request = ClientPaidRequest(r_dict, response_identifier, further_blobs_request['count'])
|
||||||
|
@ -142,7 +145,7 @@ class LiveStreamMetadataHandler(object):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _get_discovery_params(self):
|
def _get_discovery_params(self):
|
||||||
logging.debug("In _get_discovery_params")
|
log.debug("In _get_discovery_params")
|
||||||
stream_position = self.download_manager.stream_position()
|
stream_position = self.download_manager.stream_position()
|
||||||
blobs = self.download_manager.blobs
|
blobs = self.download_manager.blobs
|
||||||
if blobs:
|
if blobs:
|
||||||
|
@ -153,7 +156,7 @@ class LiveStreamMetadataHandler(object):
|
||||||
if final_blob_num is not None:
|
if final_blob_num is not None:
|
||||||
last_blob_num = final_blob_num
|
last_blob_num = final_blob_num
|
||||||
if self.download_whole is False:
|
if self.download_whole is False:
|
||||||
logging.debug("download_whole is False")
|
log.debug("download_whole is False")
|
||||||
if final_blob_num is not None:
|
if final_blob_num is not None:
|
||||||
for i in xrange(stream_position, final_blob_num + 1):
|
for i in xrange(stream_position, final_blob_num + 1):
|
||||||
if not i in blobs:
|
if not i in blobs:
|
||||||
|
@ -171,7 +174,7 @@ class LiveStreamMetadataHandler(object):
|
||||||
return self.stream_hash, blobs[last_blob_num].blob_hash, 'end', -1 * self.max_before_skip_ahead
|
return self.stream_hash, blobs[last_blob_num].blob_hash, 'end', -1 * self.max_before_skip_ahead
|
||||||
else:
|
else:
|
||||||
return self.stream_hash, None, 'end', -1 * self.max_before_skip_ahead
|
return self.stream_hash, None, 'end', -1 * self.max_before_skip_ahead
|
||||||
logging.debug("download_whole is True")
|
log.debug("download_whole is True")
|
||||||
beginning = None
|
beginning = None
|
||||||
end = None
|
end = None
|
||||||
for i in xrange(stream_position, last_blob_num + 1):
|
for i in xrange(stream_position, last_blob_num + 1):
|
||||||
|
@ -187,17 +190,17 @@ class LiveStreamMetadataHandler(object):
|
||||||
break
|
break
|
||||||
if beginning is None:
|
if beginning is None:
|
||||||
if final_blob_num is not None:
|
if final_blob_num is not None:
|
||||||
logging.debug("Discovery is finished. stream_position: %s, last_blob_num + 1: %s", str(stream_position),
|
log.debug("Discovery is finished. stream_position: %s, last_blob_num + 1: %s", str(stream_position),
|
||||||
str(last_blob_num + 1))
|
str(last_blob_num + 1))
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
logging.debug("Discovery is not finished. final blob num is unknown.")
|
log.debug("Discovery is not finished. final blob num is unknown.")
|
||||||
if last_blob_num != -1:
|
if last_blob_num != -1:
|
||||||
return self.stream_hash, blobs[last_blob_num].blob_hash, None, None
|
return self.stream_hash, blobs[last_blob_num].blob_hash, None, None
|
||||||
else:
|
else:
|
||||||
return self.stream_hash, 'beginning', None, None
|
return self.stream_hash, 'beginning', None, None
|
||||||
else:
|
else:
|
||||||
logging.info("Discovery is not finished. Not all blobs are known.")
|
log.info("Discovery is not finished. Not all blobs are known.")
|
||||||
return self.stream_hash, beginning, end, None
|
return self.stream_hash, beginning, end, None
|
||||||
|
|
||||||
def _price_settled(self, protocol):
|
def _price_settled(self, protocol):
|
||||||
|
@ -244,7 +247,7 @@ class LiveStreamMetadataHandler(object):
|
||||||
if response == "RATE_ACCEPTED":
|
if response == "RATE_ACCEPTED":
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
logging.info("Rate offer has been rejected by %s", str(peer))
|
log.info("Rate offer has been rejected by %s", str(peer))
|
||||||
del self._protocol_prices[protocol]
|
del self._protocol_prices[protocol]
|
||||||
self._price_disagreements.append(peer)
|
self._price_disagreements.append(peer)
|
||||||
return True
|
return True
|
||||||
|
@ -263,8 +266,8 @@ class LiveStreamMetadataHandler(object):
|
||||||
if not 'blob_infos' in response:
|
if not 'blob_infos' in response:
|
||||||
return InvalidResponseError("Missing the required field 'blob_infos'")
|
return InvalidResponseError("Missing the required field 'blob_infos'")
|
||||||
raw_blob_infos = response['blob_infos']
|
raw_blob_infos = response['blob_infos']
|
||||||
logging.info("Handling %s further blobs from %s", str(len(raw_blob_infos)), str(peer))
|
log.info("Handling %s further blobs from %s", str(len(raw_blob_infos)), str(peer))
|
||||||
logging.debug("blobs: %s", str(raw_blob_infos))
|
log.debug("blobs: %s", str(raw_blob_infos))
|
||||||
for raw_blob_info in raw_blob_infos:
|
for raw_blob_info in raw_blob_infos:
|
||||||
length = raw_blob_info['length']
|
length = raw_blob_info['length']
|
||||||
if length != 0:
|
if length != 0:
|
||||||
|
@ -276,10 +279,10 @@ class LiveStreamMetadataHandler(object):
|
||||||
iv = raw_blob_info['iv']
|
iv = raw_blob_info['iv']
|
||||||
signature = raw_blob_info['signature']
|
signature = raw_blob_info['signature']
|
||||||
blob_info = LiveBlobInfo(blob_hash, num, length, iv, revision, signature)
|
blob_info = LiveBlobInfo(blob_hash, num, length, iv, revision, signature)
|
||||||
logging.debug("Learned about a potential blob: %s", str(blob_hash))
|
log.debug("Learned about a potential blob: %s", str(blob_hash))
|
||||||
if self._verify_blob(blob_info):
|
if self._verify_blob(blob_info):
|
||||||
if blob_hash is None:
|
if blob_hash is None:
|
||||||
logging.info("Setting _final_blob_num to %s", str(num - 1))
|
log.info("Setting _final_blob_num to %s", str(num - 1))
|
||||||
self._final_blob_num = num - 1
|
self._final_blob_num = num - 1
|
||||||
else:
|
else:
|
||||||
blob_infos.append(blob_info)
|
blob_infos.append(blob_info)
|
||||||
|
@ -289,7 +292,7 @@ class LiveStreamMetadataHandler(object):
|
||||||
|
|
||||||
def add_blobs_to_download_manager():
|
def add_blobs_to_download_manager():
|
||||||
blob_nums = [b.blob_num for b in blob_infos]
|
blob_nums = [b.blob_num for b in blob_infos]
|
||||||
logging.info("Adding the following blob nums to the download manager: %s", str(blob_nums))
|
log.info("Adding the following blob nums to the download manager: %s", str(blob_nums))
|
||||||
self.download_manager.add_blobs_to_download(blob_infos)
|
self.download_manager.add_blobs_to_download(blob_infos)
|
||||||
|
|
||||||
d.addCallback(lambda _: add_blobs_to_download_manager())
|
d.addCallback(lambda _: add_blobs_to_download_manager())
|
||||||
|
@ -308,12 +311,12 @@ class LiveStreamMetadataHandler(object):
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _verify_blob(self, blob):
|
def _verify_blob(self, blob):
|
||||||
logging.debug("Got an unverified blob to check:")
|
log.debug("Got an unverified blob to check:")
|
||||||
logging.debug("blob_hash: %s", blob.blob_hash)
|
log.debug("blob_hash: %s", blob.blob_hash)
|
||||||
logging.debug("blob_num: %s", str(blob.blob_num))
|
log.debug("blob_num: %s", str(blob.blob_num))
|
||||||
logging.debug("revision: %s", str(blob.revision))
|
log.debug("revision: %s", str(blob.revision))
|
||||||
logging.debug("iv: %s", blob.iv)
|
log.debug("iv: %s", blob.iv)
|
||||||
logging.debug("length: %s", str(blob.length))
|
log.debug("length: %s", str(blob.length))
|
||||||
hashsum = get_lbry_hash_obj()
|
hashsum = get_lbry_hash_obj()
|
||||||
hashsum.update(self.stream_hash)
|
hashsum.update(self.stream_hash)
|
||||||
if blob.length != 0:
|
if blob.length != 0:
|
||||||
|
@ -322,12 +325,12 @@ class LiveStreamMetadataHandler(object):
|
||||||
hashsum.update(str(blob.revision))
|
hashsum.update(str(blob.revision))
|
||||||
hashsum.update(blob.iv)
|
hashsum.update(blob.iv)
|
||||||
hashsum.update(str(blob.length))
|
hashsum.update(str(blob.length))
|
||||||
logging.debug("hexdigest to be verified: %s", hashsum.hexdigest())
|
log.debug("hexdigest to be verified: %s", hashsum.hexdigest())
|
||||||
if verify_signature(hashsum.digest(), blob.signature, self.stream_pub_key):
|
if verify_signature(hashsum.digest(), blob.signature, self.stream_pub_key):
|
||||||
logging.debug("Blob info is valid")
|
log.debug("Blob info is valid")
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
logging.debug("The blob info is invalid")
|
log.debug("The blob info is invalid")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def _request_failed(self, reason, peer):
|
def _request_failed(self, reason, peer):
|
||||||
|
@ -336,7 +339,7 @@ class LiveStreamMetadataHandler(object):
|
||||||
if reason.check(NoResponseError):
|
if reason.check(NoResponseError):
|
||||||
self._incompatible_peers.append(peer)
|
self._incompatible_peers.append(peer)
|
||||||
return
|
return
|
||||||
logging.warning("Crypt stream info finder: a request failed. Reason: %s", reason.getErrorMessage())
|
log.warning("Crypt stream info finder: a request failed. Reason: %s", reason.getErrorMessage())
|
||||||
self._update_local_score(peer, -5.0)
|
self._update_local_score(peer, -5.0)
|
||||||
peer.update_score(-10.0)
|
peer.update_score(-10.0)
|
||||||
if reason.check(ConnectionClosedBeforeResponseError):
|
if reason.check(ConnectionClosedBeforeResponseError):
|
||||||
|
|
|
@ -3,6 +3,9 @@ from lbrynet.core.client.StreamProgressManager import StreamProgressManager
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LiveStreamProgressManager(StreamProgressManager):
|
class LiveStreamProgressManager(StreamProgressManager):
|
||||||
def __init__(self, finished_callback, blob_manager, download_manager, delete_blob_after_finished=False,
|
def __init__(self, finished_callback, blob_manager, download_manager, delete_blob_after_finished=False,
|
||||||
download_whole=True, max_before_skip_ahead=5):
|
download_whole=True, max_before_skip_ahead=5):
|
||||||
|
@ -52,9 +55,9 @@ class LiveStreamProgressManager(StreamProgressManager):
|
||||||
return
|
return
|
||||||
|
|
||||||
blobs = self.download_manager.blobs
|
blobs = self.download_manager.blobs
|
||||||
logging.info("In _output_loop. last_blob_outputted: %s", str(self.last_blob_outputted))
|
log.info("In _output_loop. last_blob_outputted: %s", str(self.last_blob_outputted))
|
||||||
if blobs:
|
if blobs:
|
||||||
logging.debug("Newest blob number: %s", str(max(blobs.iterkeys())))
|
log.debug("Newest blob number: %s", str(max(blobs.iterkeys())))
|
||||||
if self.outputting_d is None:
|
if self.outputting_d is None:
|
||||||
self.outputting_d = defer.Deferred()
|
self.outputting_d = defer.Deferred()
|
||||||
|
|
||||||
|
@ -71,14 +74,14 @@ class LiveStreamProgressManager(StreamProgressManager):
|
||||||
reactor.callLater(0, self._output_loop)
|
reactor.callLater(0, self._output_loop)
|
||||||
|
|
||||||
if current_blob_num in blobs and blobs[current_blob_num].is_validated():
|
if current_blob_num in blobs and blobs[current_blob_num].is_validated():
|
||||||
logging.info("Outputting blob %s", str(current_blob_num))
|
log.info("Outputting blob %s", str(current_blob_num))
|
||||||
self.provided_blob_nums.append(current_blob_num)
|
self.provided_blob_nums.append(current_blob_num)
|
||||||
d = self.download_manager.handle_blob(current_blob_num)
|
d = self.download_manager.handle_blob(current_blob_num)
|
||||||
d.addCallback(lambda _: finished_outputting_blob())
|
d.addCallback(lambda _: finished_outputting_blob())
|
||||||
d.addCallback(lambda _: self._finished_with_blob(current_blob_num))
|
d.addCallback(lambda _: self._finished_with_blob(current_blob_num))
|
||||||
elif blobs and max(blobs.iterkeys()) > self.last_blob_outputted + self.max_before_skip_ahead - 1:
|
elif blobs and max(blobs.iterkeys()) > self.last_blob_outputted + self.max_before_skip_ahead - 1:
|
||||||
self.last_blob_outputted += 1
|
self.last_blob_outputted += 1
|
||||||
logging.info("Skipping blob number %s due to knowing about blob number %s",
|
log.info("Skipping blob number %s due to knowing about blob number %s",
|
||||||
str(self.last_blob_outputted), str(max(blobs.iterkeys())))
|
str(self.last_blob_outputted), str(max(blobs.iterkeys())))
|
||||||
self._finished_with_blob(current_blob_num)
|
self._finished_with_blob(current_blob_num)
|
||||||
reactor.callLater(0, self._output_loop)
|
reactor.callLater(0, self._output_loop)
|
||||||
|
|
|
@ -4,6 +4,9 @@ from zope.interface import implements
|
||||||
from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler
|
from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class CryptBlobInfoQueryHandlerFactory(object):
|
class CryptBlobInfoQueryHandlerFactory(object):
|
||||||
implements(IQueryHandlerFactory)
|
implements(IQueryHandlerFactory)
|
||||||
|
|
||||||
|
@ -54,7 +57,7 @@ class CryptBlobInfoQueryHandler(object):
|
||||||
|
|
||||||
if self.query_identifiers[1] in queries:
|
if self.query_identifiers[1] in queries:
|
||||||
further_blobs_request = queries[self.query_identifiers[1]]
|
further_blobs_request = queries[self.query_identifiers[1]]
|
||||||
logging.debug("Received the client's request for additional blob information")
|
log.debug("Received the client's request for additional blob information")
|
||||||
|
|
||||||
if self.blob_info_payment_rate is None:
|
if self.blob_info_payment_rate is None:
|
||||||
response['further_blobs'] = {'error': 'RATE_UNSET'}
|
response['further_blobs'] = {'error': 'RATE_UNSET'}
|
||||||
|
@ -62,7 +65,7 @@ class CryptBlobInfoQueryHandler(object):
|
||||||
|
|
||||||
def count_and_charge(blob_infos):
|
def count_and_charge(blob_infos):
|
||||||
if len(blob_infos) != 0:
|
if len(blob_infos) != 0:
|
||||||
logging.info("Responding with %s infos", str(len(blob_infos)))
|
log.info("Responding with %s infos", str(len(blob_infos)))
|
||||||
expected_payment = 1.0 * len(blob_infos) * self.blob_info_payment_rate / 1000.0
|
expected_payment = 1.0 * len(blob_infos) * self.blob_info_payment_rate / 1000.0
|
||||||
self.wallet.add_expected_payment(self.peer, expected_payment)
|
self.wallet.add_expected_payment(self.peer, expected_payment)
|
||||||
self.peer.update_stats('uploaded_crypt_blob_infos', len(blob_infos))
|
self.peer.update_stats('uploaded_crypt_blob_infos', len(blob_infos))
|
||||||
|
|
|
@ -9,6 +9,9 @@ from lbrynet.core.StreamDescriptor import download_sd_blob
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class InvalidChoiceError(Exception):
|
class InvalidChoiceError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -335,7 +338,7 @@ class AddStream(ControlHandler):
|
||||||
|
|
||||||
def _handle_load_failed(self, err):
|
def _handle_load_failed(self, err):
|
||||||
self.loading_failed = True
|
self.loading_failed = True
|
||||||
logging.error("An exception occurred attempting to load the stream descriptor: %s", err.getTraceback())
|
log.error("An exception occurred attempting to load the stream descriptor: %s", err.getTraceback())
|
||||||
return defer.succeed("Encountered a problem while loading the stream descriptor: %s\n"
|
return defer.succeed("Encountered a problem while loading the stream descriptor: %s\n"
|
||||||
"See console.log for further details.\n"
|
"See console.log for further details.\n"
|
||||||
"Press enter to continue" % err.getErrorMessage())
|
"Press enter to continue" % err.getErrorMessage())
|
||||||
|
|
|
@ -36,6 +36,9 @@ from lbrynet.lbrynet_console.ControlHandlers import PeerStatsAndSettingsChooserF
|
||||||
from lbrynet.core.LBRYcrdWallet import LBRYcrdWallet
|
from lbrynet.core.LBRYcrdWallet import LBRYcrdWallet
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LBRYConsole():
|
class LBRYConsole():
|
||||||
"""A class which can upload and download file streams to and from the network"""
|
"""A class which can upload and download file streams to and from the network"""
|
||||||
def __init__(self, peer_port, dht_node_port, known_dht_nodes, control_class, wallet_type, lbrycrd_conf,
|
def __init__(self, peer_port, dht_node_port, known_dht_nodes, control_class, wallet_type, lbrycrd_conf,
|
||||||
|
@ -132,10 +135,10 @@ class LBRYConsole():
|
||||||
db_revision = open(os.path.join(self.conf_dir, "db_revision"), mode='w')
|
db_revision = open(os.path.join(self.conf_dir, "db_revision"), mode='w')
|
||||||
db_revision.write(str(self.current_db_revision))
|
db_revision.write(str(self.current_db_revision))
|
||||||
db_revision.close()
|
db_revision.close()
|
||||||
logging.debug("Created the configuration directory: %s", str(self.conf_dir))
|
log.debug("Created the configuration directory: %s", str(self.conf_dir))
|
||||||
if not os.path.exists(self.data_dir):
|
if not os.path.exists(self.data_dir):
|
||||||
os.makedirs(self.data_dir)
|
os.makedirs(self.data_dir)
|
||||||
logging.debug("Created the data directory: %s", str(self.data_dir))
|
log.debug("Created the data directory: %s", str(self.data_dir))
|
||||||
|
|
||||||
def _check_db_migration(self):
|
def _check_db_migration(self):
|
||||||
old_revision = 0
|
old_revision = 0
|
||||||
|
@ -494,8 +497,14 @@ def launch_lbry_console():
|
||||||
|
|
||||||
|
|
||||||
log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s"
|
log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s"
|
||||||
logging.basicConfig(level=logging.DEBUG, filename=os.path.join(conf_dir, "console.log"),
|
formatter = logging.Formatter(log_format)
|
||||||
format=log_format)
|
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.DEBUG)
|
||||||
|
file_handler = logging.FileHandler(os.path.join(conf_dir, "console.log"))
|
||||||
|
file_handler.setFormatter(formatter)
|
||||||
|
file_handler.addFilter(logging.Filter("lbrynet"))
|
||||||
|
logger.addHandler(file_handler)
|
||||||
|
|
||||||
console = LBRYConsole(peer_port, dht_node_port, bootstrap_nodes, StdIOControl, wallet_type=args.wallet_type,
|
console = LBRYConsole(peer_port, dht_node_port, bootstrap_nodes, StdIOControl, wallet_type=args.wallet_type,
|
||||||
lbrycrd_conf=lbrycrd_conf, use_upnp=args.use_upnp,
|
lbrycrd_conf=lbrycrd_conf, use_upnp=args.use_upnp,
|
||||||
|
|
|
@ -6,6 +6,9 @@ import os
|
||||||
from twisted.internet import threads, defer
|
from twisted.internet import threads, defer
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LBRYSettings(object):
|
class LBRYSettings(object):
|
||||||
def __init__(self, db_dir):
|
def __init__(self, db_dir):
|
||||||
self.db_dir = db_dir
|
self.db_dir = db_dir
|
||||||
|
@ -19,7 +22,7 @@ class LBRYSettings(object):
|
||||||
return defer.succeed(True)
|
return defer.succeed(True)
|
||||||
|
|
||||||
def _open_db(self):
|
def _open_db(self):
|
||||||
logging.debug("Opening %s as the settings database", str(os.path.join(self.db_dir, "settings.db")))
|
log.debug("Opening %s as the settings database", str(os.path.join(self.db_dir, "settings.db")))
|
||||||
self.db = unqlite.UnQLite(os.path.join(self.db_dir, "settings.db"))
|
self.db = unqlite.UnQLite(os.path.join(self.db_dir, "settings.db"))
|
||||||
return defer.succeed(True)
|
return defer.succeed(True)
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,9 @@ from twisted.python.failure import Failure
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class BlindMetadataHandler(object):
|
class BlindMetadataHandler(object):
|
||||||
implements(IMetadataHandler, IRequestCreator)
|
implements(IMetadataHandler, IRequestCreator)
|
||||||
|
|
||||||
|
@ -195,7 +198,7 @@ class BlindMetadataHandler(object):
|
||||||
if not 'valuable_blob_hashes' in response:
|
if not 'valuable_blob_hashes' in response:
|
||||||
return InvalidResponseError("Missing the required field 'valuable_blob_hashes'")
|
return InvalidResponseError("Missing the required field 'valuable_blob_hashes'")
|
||||||
hashes = response['valuable_blob_hashes']
|
hashes = response['valuable_blob_hashes']
|
||||||
logging.info("Handling %s valuable blob hashes from %s", str(len(hashes)), str(peer))
|
log.info("Handling %s valuable blob hashes from %s", str(len(hashes)), str(peer))
|
||||||
expire_time = datetime.datetime.now() + datetime.timedelta(minutes=10)
|
expire_time = datetime.datetime.now() + datetime.timedelta(minutes=10)
|
||||||
reference = None
|
reference = None
|
||||||
unique_hashes = set()
|
unique_hashes = set()
|
||||||
|
@ -230,8 +233,8 @@ class BlindMetadataHandler(object):
|
||||||
if not 'blob_lengths' in response:
|
if not 'blob_lengths' in response:
|
||||||
return InvalidResponseError("Missing the required field 'blob_lengths'")
|
return InvalidResponseError("Missing the required field 'blob_lengths'")
|
||||||
raw_blob_lengths = response['blob_lengths']
|
raw_blob_lengths = response['blob_lengths']
|
||||||
logging.info("Handling %s blob lengths from %s", str(len(raw_blob_lengths)), str(peer))
|
log.info("Handling %s blob lengths from %s", str(len(raw_blob_lengths)), str(peer))
|
||||||
logging.debug("blobs: %s", str(raw_blob_lengths))
|
log.debug("blobs: %s", str(raw_blob_lengths))
|
||||||
infos = []
|
infos = []
|
||||||
unique_hashes = set()
|
unique_hashes = set()
|
||||||
for blob_hash, length in raw_blob_lengths:
|
for blob_hash, length in raw_blob_lengths:
|
||||||
|
@ -288,7 +291,7 @@ class BlindMetadataHandler(object):
|
||||||
if reason.check(NoResponseError):
|
if reason.check(NoResponseError):
|
||||||
self._incompatible_peers.append(peer)
|
self._incompatible_peers.append(peer)
|
||||||
return
|
return
|
||||||
logging.warning("Valuable blob info requester: a request of type %s has failed. Reason: %s",
|
log.warning("Valuable blob info requester: a request of type %s has failed. Reason: %s",
|
||||||
str(request_type), str(reason.getErrorMessage()))
|
str(request_type), str(reason.getErrorMessage()))
|
||||||
self._update_local_score(peer, -10.0)
|
self._update_local_score(peer, -10.0)
|
||||||
peer.update_score(-5.0)
|
peer.update_score(-5.0)
|
||||||
|
|
|
@ -4,6 +4,9 @@ from twisted.internet import defer
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ValuableQueryHandler(object):
|
class ValuableQueryHandler(object):
|
||||||
implements(IQueryHandler)
|
implements(IQueryHandler)
|
||||||
|
|
||||||
|
@ -98,7 +101,7 @@ class ValuableBlobHashQueryHandler(ValuableQueryHandler):
|
||||||
for blob_hash, count in valuable_hashes:
|
for blob_hash, count in valuable_hashes:
|
||||||
hashes_and_scores.append((blob_hash, 1.0 * count / 10.0))
|
hashes_and_scores.append((blob_hash, 1.0 * count / 10.0))
|
||||||
if len(hashes_and_scores) != 0:
|
if len(hashes_and_scores) != 0:
|
||||||
logging.info("Responding to a valuable blob hashes request with %s blob hashes: %s",
|
log.info("Responding to a valuable blob hashes request with %s blob hashes: %s",
|
||||||
str(len(hashes_and_scores)))
|
str(len(hashes_and_scores)))
|
||||||
expected_payment = 1.0 * len(hashes_and_scores) * self.valuable_blob_hash_payment_rate / 1000.0
|
expected_payment = 1.0 * len(hashes_and_scores) * self.valuable_blob_hash_payment_rate / 1000.0
|
||||||
self.wallet.add_expected_payment(self.peer, expected_payment)
|
self.wallet.add_expected_payment(self.peer, expected_payment)
|
||||||
|
@ -190,7 +193,7 @@ class ValuableBlobLengthQueryHandler(ValuableQueryHandler):
|
||||||
if success is True:
|
if success is True:
|
||||||
lengths.append(response_pair)
|
lengths.append(response_pair)
|
||||||
if len(lengths) > 0:
|
if len(lengths) > 0:
|
||||||
logging.info("Responding with %s blob lengths: %s", str(len(lengths)))
|
log.info("Responding with %s blob lengths: %s", str(len(lengths)))
|
||||||
expected_payment = 1.0 * len(lengths) * self.blob_length_payment_rate / 1000.0
|
expected_payment = 1.0 * len(lengths) * self.blob_length_payment_rate / 1000.0
|
||||||
self.wallet.add_expected_payment(self.peer, expected_payment)
|
self.wallet.add_expected_payment(self.peer, expected_payment)
|
||||||
self.peer.update_stats('uploaded_valuable_blob_infos', len(lengths))
|
self.peer.update_stats('uploaded_valuable_blob_infos', len(lengths))
|
||||||
|
|
|
@ -11,6 +11,9 @@ import os
|
||||||
from twisted.internet import defer, reactor, tksupport, task
|
from twisted.internet import defer, reactor, tksupport, task
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class DownloaderApp(object):
|
class DownloaderApp(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.master = None
|
self.master = None
|
||||||
|
@ -27,7 +30,7 @@ class DownloaderApp(object):
|
||||||
d.addCallback(lambda _: self._enable_lookup())
|
d.addCallback(lambda _: self._enable_lookup())
|
||||||
|
|
||||||
def show_error_and_stop(err):
|
def show_error_and_stop(err):
|
||||||
logging.error(err.getErrorMessage())
|
log.error(err.getErrorMessage())
|
||||||
tkMessageBox.showerror(title="Start Error", message=err.getErrorMessage())
|
tkMessageBox.showerror(title="Start Error", message=err.getErrorMessage())
|
||||||
return self.stop()
|
return self.stop()
|
||||||
|
|
||||||
|
@ -37,7 +40,7 @@ class DownloaderApp(object):
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
|
||||||
def log_error(err):
|
def log_error(err):
|
||||||
logging.error(err.getErrorMessage())
|
log.error(err.getErrorMessage())
|
||||||
|
|
||||||
if self.downloader is not None:
|
if self.downloader is not None:
|
||||||
d = self.downloader.stop()
|
d = self.downloader.stop()
|
||||||
|
|
|
@ -22,6 +22,9 @@ import requests
|
||||||
from twisted.internet import threads, defer, task
|
from twisted.internet import threads, defer, task
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LBRYDownloader(object):
|
class LBRYDownloader(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.session = None
|
self.session = None
|
||||||
|
@ -109,13 +112,13 @@ class LBRYDownloader(object):
|
||||||
|
|
||||||
def get_configuration():
|
def get_configuration():
|
||||||
if not os.path.exists("lbry.conf"):
|
if not os.path.exists("lbry.conf"):
|
||||||
logging.debug("Could not read lbry.conf")
|
log.debug("Could not read lbry.conf")
|
||||||
return ""
|
return ""
|
||||||
else:
|
else:
|
||||||
lbry_conf = open("lbry.conf")
|
lbry_conf = open("lbry.conf")
|
||||||
logging.debug("Loading configuration options from lbry.conf")
|
log.debug("Loading configuration options from lbry.conf")
|
||||||
lines = lbry_conf.readlines()
|
lines = lbry_conf.readlines()
|
||||||
logging.debug("lbry.conf file contents:\n%s", str(lines))
|
log.debug("lbry.conf file contents:\n%s", str(lines))
|
||||||
return lines
|
return lines
|
||||||
|
|
||||||
d = threads.deferToThread(get_configuration)
|
d = threads.deferToThread(get_configuration)
|
||||||
|
@ -143,7 +146,7 @@ class LBRYDownloader(object):
|
||||||
known_nodes.append((ip_address, int(port_string)))
|
known_nodes.append((ip_address, int(port_string)))
|
||||||
except (ValueError, AssertionError):
|
except (ValueError, AssertionError):
|
||||||
raise ValueError("Expected known nodes in format 192.168.1.1:4000,192.168.1.2:4001. Got %s" % str(field_value))
|
raise ValueError("Expected known nodes in format 192.168.1.1:4000,192.168.1.2:4001. Got %s" % str(field_value))
|
||||||
logging.debug("Setting known_dht_nodes to %s", str(known_nodes))
|
log.debug("Setting known_dht_nodes to %s", str(known_nodes))
|
||||||
self.known_dht_nodes = known_nodes
|
self.known_dht_nodes = known_nodes
|
||||||
elif field_name == "run_server":
|
elif field_name == "run_server":
|
||||||
if field_value.lower() == "true":
|
if field_value.lower() == "true":
|
||||||
|
@ -152,25 +155,25 @@ class LBRYDownloader(object):
|
||||||
run_server = False
|
run_server = False
|
||||||
else:
|
else:
|
||||||
raise ValueError("run_server must be set to True or False. Got %s" % field_value)
|
raise ValueError("run_server must be set to True or False. Got %s" % field_value)
|
||||||
logging.debug("Setting run_server to %s", str(run_server))
|
log.debug("Setting run_server to %s", str(run_server))
|
||||||
self.run_server = run_server
|
self.run_server = run_server
|
||||||
elif field_name == "db_dir":
|
elif field_name == "db_dir":
|
||||||
logging.debug("Setting conf_dir to %s", str(field_value))
|
log.debug("Setting conf_dir to %s", str(field_value))
|
||||||
self.conf_dir = field_value
|
self.conf_dir = field_value
|
||||||
elif field_name == "data_dir":
|
elif field_name == "data_dir":
|
||||||
logging.debug("Setting data_dir to %s", str(field_value))
|
log.debug("Setting data_dir to %s", str(field_value))
|
||||||
self.data_dir = field_value
|
self.data_dir = field_value
|
||||||
elif field_name == "wallet_dir":
|
elif field_name == "wallet_dir":
|
||||||
logging.debug("Setting wallet_dir to %s", str(field_value))
|
log.debug("Setting wallet_dir to %s", str(field_value))
|
||||||
self.wallet_dir = field_value
|
self.wallet_dir = field_value
|
||||||
elif field_name == "wallet_conf":
|
elif field_name == "wallet_conf":
|
||||||
logging.debug("Setting wallet_conf to %s", str(field_value))
|
log.debug("Setting wallet_conf to %s", str(field_value))
|
||||||
self.wallet_conf = field_value
|
self.wallet_conf = field_value
|
||||||
elif field_name == "peer_port":
|
elif field_name == "peer_port":
|
||||||
try:
|
try:
|
||||||
peer_port = int(field_value)
|
peer_port = int(field_value)
|
||||||
assert 0 <= peer_port <= 65535
|
assert 0 <= peer_port <= 65535
|
||||||
logging.debug("Setting peer_port to %s", str(peer_port))
|
log.debug("Setting peer_port to %s", str(peer_port))
|
||||||
self.peer_port = peer_port
|
self.peer_port = peer_port
|
||||||
except (ValueError, AssertionError):
|
except (ValueError, AssertionError):
|
||||||
raise ValueError("peer_port must be set to an integer between 1 and 65535. Got %s" % field_value)
|
raise ValueError("peer_port must be set to an integer between 1 and 65535. Got %s" % field_value)
|
||||||
|
@ -178,7 +181,7 @@ class LBRYDownloader(object):
|
||||||
try:
|
try:
|
||||||
dht_port = int(field_value)
|
dht_port = int(field_value)
|
||||||
assert 0 <= dht_port <= 65535
|
assert 0 <= dht_port <= 65535
|
||||||
logging.debug("Setting dht_node_port to %s", str(dht_port))
|
log.debug("Setting dht_node_port to %s", str(dht_port))
|
||||||
self.dht_node_port = dht_port
|
self.dht_node_port = dht_port
|
||||||
except (ValueError, AssertionError):
|
except (ValueError, AssertionError):
|
||||||
raise ValueError("dht_port must be set to an integer between 1 and 65535. Got %s" % field_value)
|
raise ValueError("dht_port must be set to an integer between 1 and 65535. Got %s" % field_value)
|
||||||
|
@ -189,13 +192,13 @@ class LBRYDownloader(object):
|
||||||
use_upnp = False
|
use_upnp = False
|
||||||
else:
|
else:
|
||||||
raise ValueError("use_upnp must be set to True or False. Got %s" % str(field_value))
|
raise ValueError("use_upnp must be set to True or False. Got %s" % str(field_value))
|
||||||
logging.debug("Setting use_upnp to %s", str(use_upnp))
|
log.debug("Setting use_upnp to %s", str(use_upnp))
|
||||||
self.use_upnp = use_upnp
|
self.use_upnp = use_upnp
|
||||||
elif field_name == "default_blob_data_payment_rate":
|
elif field_name == "default_blob_data_payment_rate":
|
||||||
try:
|
try:
|
||||||
rate = float(field_value)
|
rate = float(field_value)
|
||||||
assert rate >= 0.0
|
assert rate >= 0.0
|
||||||
logging.debug("Setting default_blob_data_payment_rate to %s", str(rate))
|
log.debug("Setting default_blob_data_payment_rate to %s", str(rate))
|
||||||
self.default_blob_data_payment_rate = rate
|
self.default_blob_data_payment_rate = rate
|
||||||
except (ValueError, AssertionError):
|
except (ValueError, AssertionError):
|
||||||
raise ValueError("default_blob_data_payment_rate must be a positive floating point number, e.g. 0.5. Got %s" % str(field_value))
|
raise ValueError("default_blob_data_payment_rate must be a positive floating point number, e.g. 0.5. Got %s" % str(field_value))
|
||||||
|
@ -206,10 +209,10 @@ class LBRYDownloader(object):
|
||||||
start_lbrycrdd = False
|
start_lbrycrdd = False
|
||||||
else:
|
else:
|
||||||
raise ValueError("start_lbrycrdd must be set to True or False. Got %s" % field_value)
|
raise ValueError("start_lbrycrdd must be set to True or False. Got %s" % field_value)
|
||||||
logging.debug("Setting start_lbrycrdd to %s", str(start_lbrycrdd))
|
log.debug("Setting start_lbrycrdd to %s", str(start_lbrycrdd))
|
||||||
self.start_lbrycrdd = start_lbrycrdd
|
self.start_lbrycrdd = start_lbrycrdd
|
||||||
elif field_name == "download_directory":
|
elif field_name == "download_directory":
|
||||||
logging.debug("Setting download_directory to %s", str(field_value))
|
log.debug("Setting download_directory to %s", str(field_value))
|
||||||
self.download_directory = field_value
|
self.download_directory = field_value
|
||||||
elif field_name == "delete_blobs_on_stream_remove":
|
elif field_name == "delete_blobs_on_stream_remove":
|
||||||
if field_value.lower() == "true":
|
if field_value.lower() == "true":
|
||||||
|
@ -219,7 +222,7 @@ class LBRYDownloader(object):
|
||||||
else:
|
else:
|
||||||
raise ValueError("delete_blobs_on_stream_remove must be set to True or False")
|
raise ValueError("delete_blobs_on_stream_remove must be set to True or False")
|
||||||
else:
|
else:
|
||||||
logging.warning("Got unknown configuration field: %s", field_name)
|
log.warning("Got unknown configuration field: %s", field_name)
|
||||||
|
|
||||||
d.addCallback(load_configuration)
|
d.addCallback(load_configuration)
|
||||||
return d
|
return d
|
||||||
|
@ -230,10 +233,10 @@ class LBRYDownloader(object):
|
||||||
db_revision = open(os.path.join(self.conf_dir, "db_revision"), mode='w')
|
db_revision = open(os.path.join(self.conf_dir, "db_revision"), mode='w')
|
||||||
db_revision.write(str(self.current_db_revision))
|
db_revision.write(str(self.current_db_revision))
|
||||||
db_revision.close()
|
db_revision.close()
|
||||||
logging.debug("Created the configuration directory: %s", str(self.conf_dir))
|
log.debug("Created the configuration directory: %s", str(self.conf_dir))
|
||||||
if not os.path.exists(self.data_dir):
|
if not os.path.exists(self.data_dir):
|
||||||
os.makedirs(self.data_dir)
|
os.makedirs(self.data_dir)
|
||||||
logging.debug("Created the data directory: %s", str(self.data_dir))
|
log.debug("Created the data directory: %s", str(self.data_dir))
|
||||||
if not os.path.exists(self.wallet_dir):
|
if not os.path.exists(self.wallet_dir):
|
||||||
os.makedirs(self.wallet_dir)
|
os.makedirs(self.wallet_dir)
|
||||||
if not os.path.exists(self.wallet_conf):
|
if not os.path.exists(self.wallet_conf):
|
||||||
|
@ -323,7 +326,7 @@ class LBRYDownloader(object):
|
||||||
return 0.0
|
return 0.0
|
||||||
|
|
||||||
def log_error(err):
|
def log_error(err):
|
||||||
logging.warning("unable to request free credits. %s", err.getErrorMessage())
|
log.warning("unable to request free credits. %s", err.getErrorMessage())
|
||||||
return 0.0
|
return 0.0
|
||||||
|
|
||||||
def request_credits(address):
|
def request_credits(address):
|
||||||
|
@ -468,7 +471,7 @@ class LBRYDownloader(object):
|
||||||
|
|
||||||
def show_err(err):
|
def show_err(err):
|
||||||
tkMessageBox.showerror(title="Download Error", message=err.getErrorMessage())
|
tkMessageBox.showerror(title="Download Error", message=err.getErrorMessage())
|
||||||
logging.error(err.getErrorMessage())
|
log.error(err.getErrorMessage())
|
||||||
stream_frame.show_download_done(payment_rate_manager.points_paid)
|
stream_frame.show_download_done(payment_rate_manager.points_paid)
|
||||||
|
|
||||||
resolve_d.addErrback(lambda err: err.trap(defer.CancelledError, UnknownNameError,
|
resolve_d.addErrback(lambda err: err.trap(defer.CancelledError, UnknownNameError,
|
||||||
|
|
|
@ -9,7 +9,15 @@ import locale
|
||||||
def start_downloader():
|
def start_downloader():
|
||||||
|
|
||||||
log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s"
|
log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s"
|
||||||
logging.basicConfig(level=logging.DEBUG, format=log_format, filename="downloader.log")
|
formatter = logging.Formatter(log_format)
|
||||||
|
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.setLevel(logging.DEBUG)
|
||||||
|
file_handler = logging.FileHandler("downloader.log")
|
||||||
|
file_handler.setFormatter(formatter)
|
||||||
|
file_handler.addFilter(logging.Filter("lbrynet"))
|
||||||
|
logger.addHandler(file_handler)
|
||||||
|
|
||||||
sys.stdout = open("downloader.out.log", 'w')
|
sys.stdout = open("downloader.out.log", 'w')
|
||||||
sys.stderr = open("downloader.err.log", 'w')
|
sys.stderr = open("downloader.err.log", 'w')
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,6 @@ from lbrynet.lbrylive.client.LiveStreamDownloader import add_full_live_stream_do
|
||||||
|
|
||||||
log_format = "%(funcName)s(): %(message)s"
|
log_format = "%(funcName)s(): %(message)s"
|
||||||
logging.basicConfig(level=logging.WARNING, format=log_format)
|
logging.basicConfig(level=logging.WARNING, format=log_format)
|
||||||
logging.debug("test")
|
|
||||||
|
|
||||||
|
|
||||||
class FakeNode(object):
|
class FakeNode(object):
|
||||||
|
|
Loading…
Reference in a new issue