commit 7240ff6b1cc93e20d6fca2e8726364d80bd041ee Author: Jimmy Kiselak Date: Thu Aug 20 11:27:15 2015 -0400 initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..0d20b6487 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/INSTALL b/INSTALL new file mode 100644 index 000000000..30c63da12 --- /dev/null +++ b/INSTALL @@ -0,0 +1,34 @@ +Prerequisites +------------- + +To use the LBRYWallet, which enables spending and accepting LBRYcrds in exchange for data, the +LBRYcrd application (insert link to LBRYcrd website here) must be installed and running. If +this is not desired, the testing client can be used to simulate trading points, which is +built into LBRYnet. + +on Ubuntu: + +sudo apt-get install libgmp3-dev build-essential python-dev python-pip + +Getting the source +------------------ + +Don't you already have it? + +Setting up the environment +-------------------------- + +It's recommended that you use a virtualenv + +sudo apt-get install python-virtualenv +cd +virtualenv . +source bin/activate + +(to deactivate the virtualenv, enter 'deactivate') + +python setup.py install + +this will install all of the libraries and a few applications + +For running the file sharing application, see RUNNING \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..95dc562d9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2015, LBRY, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/README b/README new file mode 100644 index 000000000..d678d5801 --- /dev/null +++ b/README @@ -0,0 +1,57 @@ +LBRYnet +======= + +LBRYnet is a fully decentralized network for distributing data. It consists of peers uploading +and downloading data from other peers, possibly in exchange for payments, and a distributed hash +table, used by peers to discover other peers. + +Overview +-------- + +On LBRYnet, data is broken into chunks, and each chunk is specified by its sha384 hash sum. This +guarantees that peers can verify the correctness of each chunk without having to know anything +about its contents, and can confidently re-transmit the chunk to other peers. Peers wishing to +transmit chunks to other peers announce to the distributed hash table that they are associated +with the sha384 hash sum in question. When a peer wants to download that chunk from the network, +it asks the distributed hash table which peers are associated with that sha384 hash sum. The +distributed hash table can also be used more generally. It simply stores IP addresses and +ports which are associated with 384-bit numbers, and can be used by any type of application to +help peers find each other. For example, an application for which clients don't know all of the +necessary chunks may use some identifier, chosen by the application, to find clients which do +know all of the necessary chunks. + +Running +------- + +LBRYnet comes with an file sharing application, called 'lbrynet-console', which breaks +files into chunks, encrypts them with a symmetric key, computes their sha384 hash sum, generates +a special file called a 'stream descriptor' containing the hash sums and some other file metadata, +and makes the chunks available for download by other peers. A peer wishing to download the file +must first obtain the 'stream descriptor' and then may open it with his 'lbrynet-console' client, +download all of the chunks by locating peers with the chunks via the DHT, and then combine the +chunks into the original file, according to the metadata included in the 'stream descriptor'. + +To install and use this client, see INSTALL and RUNNING + +Installation +------------ + +See INSTALL + +Developers +---------- + +Documentation: doc.lbry.io +Source code: trac.lbry.io/browser + +To contribute to the development of LBRYnet or lbrynet-console, contact jimmy@lbry.io + +Support +------- + +Send all support requests to jimmy@lbry.io + +License +------- + +See LICENSE \ No newline at end of file diff --git a/RUNNING b/RUNNING new file mode 100644 index 000000000..b6b64031f --- /dev/null +++ b/RUNNING @@ -0,0 +1,52 @@ +To install LBRYnet and lbrynet-console, see INSTALL + +lbrynet-console is a console application which makes use of the LBRYnet to share files. + +In particular, lbrynet-console splits files into encrypted chunks of data compatible with +LBRYnet, groups all metadata into a 'stream descriptor file' which can be sent directly to +others wishing to obtain the file, or can be itself turned into a chunk compatible with +LBRYnet and downloaded via LBRYnet by anyone knowing its sha384 hashsum. lbrynet-console +also acts as a client whichreads a stream descriptor file, downloads the chunks of data +specified by the hash sums found in the stream descriptor file, decrypts them according to +metadata found in the stream, and reconstructs the original file. lbrynet-console features +a server so that clients can connect to it and download the chunks and other data gotten +from files created locally and files that have been downloaded from LBRYnet. + +lbrynet-console also has a plugin system. There are two plugins: a live stream proof of +concept which is currently far behind the development of the rest of the application and +therefore will not run, and a plugin which attempts to determine which chunks on the +network should be downloaded in order for the application to turn a profit. It will run, +but its usefulness is extremely limited. + +Passing '--help' to lbrynet-console will cause it to print out a quick help message +describing other command line options to the application. + +Once the application has been started, the user is presented with a numbered list of +actions which looks something like this: + +... +[2] Toggle whether an LBRY File is running +[3] Create an LBRY File from file +[4] Publish a stream descriptor file to the DHT for an LBRY File +... + +To perform an action, type the desired number and then hit enter. For example, if you wish +to create an LBRY file from a file as described in the beginning of this document, type 3 and +hit enter. + +If the application needs more input in order to for the action to be taken, the application +will continue to print prompts for input until it has received what it needs. + +For example, when creating an LBRY file from a file, the application needs to know which file +it's supposed to use to create the LBRY file, so the user will be prompted for it: + +File name: + +The user should input the desired file name and hit enter, at which point the application +will go about splitting the file and making it available on the network. + +Some actions will produce sub-menus of actions, which work the same way. + +A more detailed user guide is available at doc.lbry.io + +Any issues may be reported to jimmy@lbry.io \ No newline at end of file diff --git a/ez_setup.py b/ez_setup.py new file mode 100644 index 000000000..1bcd3e94c --- /dev/null +++ b/ez_setup.py @@ -0,0 +1,332 @@ +#!/usr/bin/env python +"""Bootstrap setuptools installation + +To use setuptools in your package's setup.py, include this +file in the same directory and add this to the top of your setup.py:: + + from ez_setup import use_setuptools + use_setuptools() + +To require a specific version of setuptools, set a download +mirror, or use an alternate download directory, simply supply +the appropriate options to ``use_setuptools()``. + +This file can also be run as a script to install or upgrade setuptools. +""" +import os +import shutil +import sys +import tempfile +import zipfile +import optparse +import subprocess +import platform +import textwrap +import contextlib + +from distutils import log + +try: + from urllib.request import urlopen +except ImportError: + from urllib2 import urlopen + +try: + from site import USER_SITE +except ImportError: + USER_SITE = None + +DEFAULT_VERSION = "4.0.1" +DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/" + +def _python_cmd(*args): + """ + Return True if the command succeeded. + """ + args = (sys.executable,) + args + return subprocess.call(args) == 0 + + +def _install(archive_filename, install_args=()): + with archive_context(archive_filename): + # installing + log.warn('Installing Setuptools') + if not _python_cmd('setup.py', 'install', *install_args): + log.warn('Something went wrong during the installation.') + log.warn('See the error message above.') + # exitcode will be 2 + return 2 + + +def _build_egg(egg, archive_filename, to_dir): + with archive_context(archive_filename): + # building an egg + log.warn('Building a Setuptools egg in %s', to_dir) + _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) + # returning the result + log.warn(egg) + if not os.path.exists(egg): + raise IOError('Could not build the egg.') + + +class ContextualZipFile(zipfile.ZipFile): + """ + Supplement ZipFile class to support context manager for Python 2.6 + """ + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __new__(cls, *args, **kwargs): + """ + Construct a ZipFile or ContextualZipFile as appropriate + """ + if hasattr(zipfile.ZipFile, '__exit__'): + return zipfile.ZipFile(*args, **kwargs) + return super(ContextualZipFile, cls).__new__(cls) + + +@contextlib.contextmanager +def archive_context(filename): + # extracting the archive + tmpdir = tempfile.mkdtemp() + log.warn('Extracting in %s', tmpdir) + old_wd = os.getcwd() + try: + os.chdir(tmpdir) + with ContextualZipFile(filename) as archive: + archive.extractall() + + # going in the directory + subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) + os.chdir(subdir) + log.warn('Now working in %s', subdir) + yield + + finally: + os.chdir(old_wd) + shutil.rmtree(tmpdir) + + +def _do_download(version, download_base, to_dir, download_delay): + egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg' + % (version, sys.version_info[0], sys.version_info[1])) + if not os.path.exists(egg): + archive = download_setuptools(version, download_base, + to_dir, download_delay) + _build_egg(egg, archive, to_dir) + sys.path.insert(0, egg) + + # Remove previously-imported pkg_resources if present (see + # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). + if 'pkg_resources' in sys.modules: + del sys.modules['pkg_resources'] + + import setuptools + setuptools.bootstrap_install_from = egg + + +def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, + to_dir=os.curdir, download_delay=15): + to_dir = os.path.abspath(to_dir) + rep_modules = 'pkg_resources', 'setuptools' + imported = set(sys.modules).intersection(rep_modules) + try: + import pkg_resources + except ImportError: + return _do_download(version, download_base, to_dir, download_delay) + try: + pkg_resources.require("setuptools>=" + version) + return + except pkg_resources.DistributionNotFound: + return _do_download(version, download_base, to_dir, download_delay) + except pkg_resources.VersionConflict as VC_err: + if imported: + msg = textwrap.dedent(""" + The required version of setuptools (>={version}) is not available, + and can't be installed while this script is running. Please + install a more recent version first, using + 'easy_install -U setuptools'. + + (Currently using {VC_err.args[0]!r}) + """).format(VC_err=VC_err, version=version) + sys.stderr.write(msg) + sys.exit(2) + + # otherwise, reload ok + del pkg_resources, sys.modules['pkg_resources'] + return _do_download(version, download_base, to_dir, download_delay) + +def _clean_check(cmd, target): + """ + Run the command to download target. If the command fails, clean up before + re-raising the error. + """ + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + if os.access(target, os.F_OK): + os.unlink(target) + raise + +def download_file_powershell(url, target): + """ + Download the file at url to target using Powershell (which will validate + trust). Raise an exception if the command cannot complete. + """ + target = os.path.abspath(target) + ps_cmd = ( + "[System.Net.WebRequest]::DefaultWebProxy.Credentials = " + "[System.Net.CredentialCache]::DefaultCredentials; " + "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" + % vars() + ) + cmd = [ + 'powershell', + '-Command', + ps_cmd, + ] + _clean_check(cmd, target) + +def has_powershell(): + if platform.system() != 'Windows': + return False + cmd = ['powershell', '-Command', 'echo test'] + with open(os.path.devnull, 'wb') as devnull: + try: + subprocess.check_call(cmd, stdout=devnull, stderr=devnull) + except Exception: + return False + return True + +download_file_powershell.viable = has_powershell + +def download_file_curl(url, target): + cmd = ['curl', url, '--silent', '--output', target] + _clean_check(cmd, target) + +def has_curl(): + cmd = ['curl', '--version'] + with open(os.path.devnull, 'wb') as devnull: + try: + subprocess.check_call(cmd, stdout=devnull, stderr=devnull) + except Exception: + return False + return True + +download_file_curl.viable = has_curl + +def download_file_wget(url, target): + cmd = ['wget', url, '--quiet', '--output-document', target] + _clean_check(cmd, target) + +def has_wget(): + cmd = ['wget', '--version'] + with open(os.path.devnull, 'wb') as devnull: + try: + subprocess.check_call(cmd, stdout=devnull, stderr=devnull) + except Exception: + return False + return True + +download_file_wget.viable = has_wget + +def download_file_insecure(url, target): + """ + Use Python to download the file, even though it cannot authenticate the + connection. + """ + src = urlopen(url) + try: + # Read all the data in one block. + data = src.read() + finally: + src.close() + + # Write all the data in one block to avoid creating a partial file. + with open(target, "wb") as dst: + dst.write(data) + +download_file_insecure.viable = lambda: True + +def get_best_downloader(): + downloaders = ( + download_file_powershell, + download_file_curl, + download_file_wget, + download_file_insecure, + ) + viable_downloaders = (dl for dl in downloaders if dl.viable()) + return next(viable_downloaders, None) + +def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, + to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader): + """ + Download setuptools from a specified location and return its filename + + `version` should be a valid setuptools version number that is available + as an egg for download under the `download_base` URL (which should end + with a '/'). `to_dir` is the directory where the egg will be downloaded. + `delay` is the number of seconds to pause before an actual download + attempt. + + ``downloader_factory`` should be a function taking no arguments and + returning a function for downloading a URL to a target. + """ + # making sure we use the absolute path + to_dir = os.path.abspath(to_dir) + zip_name = "setuptools-%s.zip" % version + url = download_base + zip_name + saveto = os.path.join(to_dir, zip_name) + if not os.path.exists(saveto): # Avoid repeated downloads + log.warn("Downloading %s", url) + downloader = downloader_factory() + downloader(url, saveto) + return os.path.realpath(saveto) + +def _build_install_args(options): + """ + Build the arguments to 'python setup.py install' on the setuptools package + """ + return ['--user'] if options.user_install else [] + +def _parse_args(): + """ + Parse the command line for options + """ + parser = optparse.OptionParser() + parser.add_option( + '--user', dest='user_install', action='store_true', default=False, + help='install in user site package (requires Python 2.6 or later)') + parser.add_option( + '--download-base', dest='download_base', metavar="URL", + default=DEFAULT_URL, + help='alternative URL from where to download the setuptools package') + parser.add_option( + '--insecure', dest='downloader_factory', action='store_const', + const=lambda: download_file_insecure, default=get_best_downloader, + help='Use internal, non-validating downloader' + ) + parser.add_option( + '--version', help="Specify which version to download", + default=DEFAULT_VERSION, + ) + options, args = parser.parse_args() + # positional arguments are ignored + return options + +def main(): + """Install or upgrade setuptools and EasyInstall""" + options = _parse_args() + archive = download_setuptools( + version=options.version, + download_base=options.download_base, + downloader_factory=options.downloader_factory, + ) + return _install(archive, _build_install_args(options)) + +if __name__ == '__main__': + sys.exit(main()) diff --git a/lbrynet/__init__.py b/lbrynet/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/conf.py b/lbrynet/conf.py new file mode 100644 index 000000000..b4d91b5b8 --- /dev/null +++ b/lbrynet/conf.py @@ -0,0 +1,25 @@ +""" +Some network wide and also application specific parameters +""" + + +import os + + +MAX_HANDSHAKE_SIZE = 2**16 +MAX_REQUEST_SIZE = 2**16 +MAX_BLOB_REQUEST_SIZE = 2**16 +MAX_RESPONSE_INFO_SIZE = 2**16 +MAX_BLOB_INFOS_TO_REQUEST = 20 +BLOBFILES_DIR = ".blobfiles" +BLOB_SIZE = 2**21 +MIN_BLOB_DATA_PAYMENT_RATE = .5 # points/megabyte +MIN_BLOB_INFO_PAYMENT_RATE = 2.0 # points/1000 infos +MIN_VALUABLE_BLOB_INFO_PAYMENT_RATE = 5.0 # points/1000 infos +MIN_VALUABLE_BLOB_HASH_PAYMENT_RATE = 5.0 # points/1000 infos +MAX_CONNECTIONS_PER_STREAM = 5 + +POINTTRADER_SERVER = 'http://ec2-54-187-192-68.us-west-2.compute.amazonaws.com:2424' +#POINTTRADER_SERVER = 'http://127.0.0.1:2424' + +CRYPTSD_FILE_EXTENSION = ".cryptsd" \ No newline at end of file diff --git a/lbrynet/core/BlobInfo.py b/lbrynet/core/BlobInfo.py new file mode 100644 index 000000000..d0e66c641 --- /dev/null +++ b/lbrynet/core/BlobInfo.py @@ -0,0 +1,18 @@ +class BlobInfo(object): + """ + This structure is used to represent the metadata of a blob. + + @ivar blob_hash: The sha384 hashsum of the blob's data. + @type blob_hash: string, hex-encoded + + @ivar blob_num: For streams, the position of the blob in the stream. + @type blob_num: integer + + @ivar length: The length of the blob in bytes. + @type length: integer + """ + + def __init__(self, blob_hash, blob_num, length): + self.blob_hash = blob_hash + self.blob_num = blob_num + self.length = length \ No newline at end of file diff --git a/lbrynet/core/BlobManager.py b/lbrynet/core/BlobManager.py new file mode 100644 index 000000000..1449d35ca --- /dev/null +++ b/lbrynet/core/BlobManager.py @@ -0,0 +1,438 @@ +import logging +import os +import leveldb +import time +import json +from twisted.internet import threads, defer, reactor, task +from twisted.python.failure import Failure +from lbrynet.core.HashBlob import BlobFile, TempBlob, BlobFileCreator, TempBlobCreator +from lbrynet.core.server.DHTHashAnnouncer import DHTHashSupplier +from lbrynet.core.utils import is_valid_blobhash +from lbrynet.core.cryptoutils import get_lbry_hash_obj + + +class BlobManager(DHTHashSupplier): + """This class is subclassed by classes which keep track of which blobs are available + and which give access to new/existing blobs""" + def __init__(self, hash_announcer): + DHTHashSupplier.__init__(self, hash_announcer) + + def setup(self): + pass + + def get_blob(self, blob_hash, upload_allowed, length): + pass + + def get_blob_creator(self): + pass + + def _make_new_blob(self, blob_hash, upload_allowed, length): + pass + + def blob_completed(self, blob, next_announce_time=None): + pass + + def completed_blobs(self, blobs_to_check): + pass + + def hashes_to_announce(self): + pass + + def creator_finished(self, blob_creator): + pass + + def delete_blob(self, blob_hash): + pass + + def get_blob_length(self, blob_hash): + pass + + def check_consistency(self): + pass + + def blob_requested(self, blob_hash): + pass + + def blob_downloaded(self, blob_hash): + pass + + def blob_searched_on(self, blob_hash): + pass + + def blob_paid_for(self, blob_hash, amount): + pass + + +class DiskBlobManager(BlobManager): + """This class stores blobs on the hard disk""" + def __init__(self, hash_announcer, blob_dir, db_dir): + BlobManager.__init__(self, hash_announcer) + self.blob_dir = blob_dir + self.db_dir = db_dir + self.db = None + self.blob_type = BlobFile + self.blob_creator_type = BlobFileCreator + self.blobs = {} + self.blob_hashes_to_delete = {} # {blob_hash: being_deleted (True/False)} + self._next_manage_call = None + + def setup(self): + d = threads.deferToThread(self._open_db) + d.addCallback(lambda _: self._manage()) + return d + + def stop(self): + if self._next_manage_call is not None and self._next_manage_call.active(): + self._next_manage_call.cancel() + self._next_manage_call = None + self.db = None + return defer.succeed(True) + + def get_blob(self, blob_hash, upload_allowed, length=None): + """Return a blob identified by blob_hash, which may be a new blob or a blob that is already on the hard disk""" + # TODO: if blob.upload_allowed and upload_allowed is False, change upload_allowed in blob and on disk + if blob_hash in self.blobs: + return defer.succeed(self.blobs[blob_hash]) + return self._make_new_blob(blob_hash, upload_allowed, length) + + def get_blob_creator(self): + return self.blob_creator_type(self, self.blob_dir) + + def _make_new_blob(self, blob_hash, upload_allowed, length=None): + blob = self.blob_type(self.blob_dir, blob_hash, upload_allowed, length) + self.blobs[blob_hash] = blob + d = threads.deferToThread(self._completed_blobs, [blob_hash]) + + def check_completed(completed_blobs): + + def set_length(length): + blob.length = length + + if len(completed_blobs) == 1 and completed_blobs[0] == blob_hash: + blob.verified = True + inner_d = threads.deferToThread(self._get_blob_length, blob_hash) + inner_d.addCallback(set_length) + inner_d.addCallback(lambda _: blob) + else: + inner_d = defer.succeed(blob) + return inner_d + + d.addCallback(check_completed) + return d + + def blob_completed(self, blob, next_announce_time=None): + if next_announce_time is None: + next_announce_time = time.time() + return threads.deferToThread(self._add_completed_blob, blob.blob_hash, blob.length, + time.time(), next_announce_time) + + def completed_blobs(self, blobs_to_check): + return threads.deferToThread(self._completed_blobs, blobs_to_check) + + def hashes_to_announce(self): + next_announce_time = time.time() + self.hash_reannounce_time + return threads.deferToThread(self._get_blobs_to_announce, next_announce_time) + + def creator_finished(self, blob_creator): + logging.debug("blob_creator.blob_hash: %s", blob_creator.blob_hash) + assert blob_creator.blob_hash is not None + assert blob_creator.blob_hash not in self.blobs + assert blob_creator.length is not None + new_blob = self.blob_type(self.blob_dir, blob_creator.blob_hash, True, blob_creator.length) + new_blob.verified = True + self.blobs[blob_creator.blob_hash] = new_blob + if self.hash_announcer is not None: + self.hash_announcer.immediate_announce([blob_creator.blob_hash]) + next_announce_time = time.time() + self.hash_reannounce_time + d = self.blob_completed(new_blob, next_announce_time) + else: + d = self.blob_completed(new_blob) + return d + + def delete_blobs(self, blob_hashes): + for blob_hash in blob_hashes: + if not blob_hash in self.blob_hashes_to_delete: + self.blob_hashes_to_delete[blob_hash] = False + + def update_all_last_verified_dates(self, timestamp): + return threads.deferToThread(self._update_all_last_verified_dates, timestamp) + + def immediate_announce_all_blobs(self): + d = threads.deferToThread(self._get_all_verified_blob_hashes) + d.addCallback(self.hash_announcer.immediate_announce) + return d + + def get_blob_length(self, blob_hash): + return threads.deferToThread(self._get_blob_length, blob_hash) + + def check_consistency(self): + return threads.deferToThread(self._check_consistency) + + def _manage(self): + from twisted.internet import reactor + + d = self._delete_blobs_marked_for_deletion() + + def set_next_manage_call(): + self._next_manage_call = reactor.callLater(1, self._manage) + + d.addCallback(lambda _: set_next_manage_call()) + + def _delete_blobs_marked_for_deletion(self): + + def remove_from_list(b_h): + del self.blob_hashes_to_delete[b_h] + return b_h + + def set_not_deleting(err, b_h): + logging.warning("Failed to delete blob %s. Reason: %s", str(b_h), err.getErrorMessage()) + self.blob_hashes_to_delete[b_h] = False + return err + + def delete_from_db(result): + b_hs = [r[1] for r in result if r[0] is True] + if b_hs: + d = threads.deferToThread(self._delete_blobs_from_db, b_hs) + else: + d = defer.succeed(True) + + def log_error(err): + logging.warning("Failed to delete completed blobs from the db: %s", err.getErrorMessage()) + + d.addErrback(log_error) + return d + + def delete(blob, b_h): + d = blob.delete() + d.addCallbacks(lambda _: remove_from_list(b_h), set_not_deleting, errbackArgs=(b_h,)) + return d + + ds = [] + for blob_hash, being_deleted in self.blob_hashes_to_delete.items(): + if being_deleted is False: + self.blob_hashes_to_delete[blob_hash] = True + d = self.get_blob(blob_hash, True) + d.addCallbacks(delete, set_not_deleting, callbackArgs=(blob_hash,), errbackArgs=(blob_hash,)) + ds.append(d) + dl = defer.DeferredList(ds, consumeErrors=True) + dl.addCallback(delete_from_db) + return defer.DeferredList(ds) + + ######### database calls ######### + + def _open_db(self): + self.db = leveldb.LevelDB(os.path.join(self.db_dir, "blobs.db")) + + def _add_completed_blob(self, blob_hash, length, timestamp, next_announce_time=None): + logging.debug("Adding a completed blob. blob_hash=%s, length=%s", blob_hash, str(length)) + if next_announce_time is None: + next_announce_time = timestamp + self.db.Put(blob_hash, json.dumps((length, timestamp, next_announce_time)), sync=True) + + def _completed_blobs(self, blobs_to_check): + blobs = [] + for b in blobs_to_check: + if is_valid_blobhash(b): + try: + length, verified_time, next_announce_time = json.loads(self.db.Get(b)) + except KeyError: + continue + file_path = os.path.join(self.blob_dir, b) + if os.path.isfile(file_path): + if verified_time > os.path.getctime(file_path): + blobs.append(b) + return blobs + + def _get_blob_length(self, blob): + length, verified_time, next_announce_time = json.loads(self.db.Get(blob)) + return length + + def _update_blob_verified_timestamp(self, blob, timestamp): + length, old_verified_time, next_announce_time = json.loads(self.db.Get(blob)) + self.db.Put(blob, json.dumps((length, timestamp, next_announce_time)), sync=True) + + def _get_blobs_to_announce(self, next_announce_time): + # TODO: See if the following would be better for handling announce times: + # TODO: Have a separate db for them, and read the whole thing into memory + # TODO: on startup, and then write changes to db when they happen + blobs = [] + batch = leveldb.WriteBatch() + current_time = time.time() + for blob_hash, blob_info in self.db.RangeIter(): + length, verified_time, announce_time = json.loads(blob_info) + if announce_time < current_time: + batch.Put(blob_hash, json.dumps((length, verified_time, next_announce_time))) + blobs.append(blob_hash) + self.db.Write(batch, sync=True) + return blobs + + def _update_all_last_verified_dates(self, timestamp): + batch = leveldb.WriteBatch() + for blob_hash, blob_info in self.db.RangeIter(): + length, verified_time, announce_time = json.loads(blob_info) + batch.Put(blob_hash, json.dumps((length, timestamp, announce_time))) + self.db.Write(batch, sync=True) + + def _delete_blobs_from_db(self, blob_hashes): + batch = leveldb.WriteBatch() + for blob_hash in blob_hashes: + batch.Delete(blob_hash) + self.db.Write(batch, sync=True) + + def _check_consistency(self): + batch = leveldb.WriteBatch() + current_time = time.time() + for blob_hash, blob_info in self.db.RangeIter(): + length, verified_time, announce_time = json.loads(blob_info) + file_path = os.path.join(self.blob_dir, blob_hash) + if os.path.isfile(file_path): + if verified_time < os.path.getctime(file_path): + h = get_lbry_hash_obj() + len_so_far = 0 + f = open(file_path) + while True: + data = f.read(2**12) + if not data: + break + h.update(data) + len_so_far += len(data) + if len_so_far == length and h.hexdigest() == blob_hash: + batch.Put(blob_hash, json.dumps((length, current_time, announce_time))) + self.db.Write(batch, sync=True) + + def _get_all_verified_blob_hashes(self): + blob_hashes = [] + for blob_hash, blob_info in self.db.RangeIter(): + length, verified_time, announce_time = json.loads(blob_info) + file_path = os.path.join(self.blob_dir, blob_hash) + if os.path.isfile(file_path): + if verified_time > os.path.getctime(file_path): + blob_hashes.append(blob_hash) + return blob_hashes + + +class TempBlobManager(BlobManager): + """This class stores blobs in memory""" + def __init__(self, hash_announcer): + BlobManager.__init__(self, hash_announcer) + self.blob_type = TempBlob + self.blob_creator_type = TempBlobCreator + self.blobs = {} + self.blob_next_announces = {} + self.blob_hashes_to_delete = {} # {blob_hash: being_deleted (True/False)} + self._next_manage_call = None + + def setup(self): + self._manage() + return defer.succeed(True) + + def stop(self): + if self._next_manage_call is not None and self._next_manage_call.active(): + self._next_manage_call.cancel() + self._next_manage_call = None + + def get_blob(self, blob_hash, upload_allowed, length=None): + if blob_hash in self.blobs: + return defer.succeed(self.blobs[blob_hash]) + return self._make_new_blob(blob_hash, upload_allowed, length) + + def get_blob_creator(self): + return self.blob_creator_type(self) + + def _make_new_blob(self, blob_hash, upload_allowed, length=None): + blob = self.blob_type(blob_hash, upload_allowed, length) + self.blobs[blob_hash] = blob + return defer.succeed(blob) + + def blob_completed(self, blob, next_announce_time=None): + if next_announce_time is None: + next_announce_time = time.time() + self.blob_next_announces[blob.blob_hash] = next_announce_time + return defer.succeed(True) + + def completed_blobs(self, blobs_to_check): + blobs = [b.blob_hash for b in self.blobs.itervalues() if b.blob_hash in blobs_to_check and b.is_validated()] + return defer.succeed(blobs) + + def hashes_to_announce(self): + now = time.time() + blobs = [blob_hash for blob_hash, announce_time in self.blob_next_announces.iteritems() if announce_time < now] + next_announce_time = now + self.hash_reannounce_time + for b in blobs: + self.blob_next_announces[b] = next_announce_time + return defer.succeed(blobs) + + def creator_finished(self, blob_creator): + assert blob_creator.blob_hash is not None + assert blob_creator.blob_hash not in self.blobs + assert blob_creator.length is not None + new_blob = self.blob_type(blob_creator.blob_hash, True, blob_creator.length) + new_blob.verified = True + new_blob.data_buffer = blob_creator.data_buffer + new_blob.length = blob_creator.length + self.blobs[blob_creator.blob_hash] = new_blob + if self.hash_announcer is not None: + self.hash_announcer.immediate_announce([blob_creator.blob_hash]) + next_announce_time = time.time() + self.hash_reannounce_time + d = self.blob_completed(new_blob, next_announce_time) + else: + d = self.blob_completed(new_blob) + d.addCallback(lambda _: new_blob) + return d + + def delete_blobs(self, blob_hashes): + for blob_hash in blob_hashes: + if not blob_hash in self.blob_hashes_to_delete: + self.blob_hashes_to_delete[blob_hash] = False + + def get_blob_length(self, blob_hash): + if blob_hash in self.blobs: + if self.blobs[blob_hash].length is not None: + return defer.succeed(self.blobs[blob_hash].length) + return defer.fail(ValueError("No such blob hash is known")) + + def immediate_announce_all_blobs(self): + return self.hash_announcer.immediate_announce(self.blobs.iterkeys()) + + def _manage(self): + from twisted.internet import reactor + + d = self._delete_blobs_marked_for_deletion() + + def set_next_manage_call(): + logging.info("Setting the next manage call in %s", str(self)) + self._next_manage_call = reactor.callLater(1, self._manage) + + d.addCallback(lambda _: set_next_manage_call()) + + def _delete_blobs_marked_for_deletion(self): + + def remove_from_list(b_h): + del self.blob_hashes_to_delete[b_h] + logging.info("Deleted blob %s", blob_hash) + return b_h + + def set_not_deleting(err, b_h): + logging.warning("Failed to delete blob %s. Reason: %s", str(b_h), err.getErrorMessage()) + self.blob_hashes_to_delete[b_h] = False + return b_h + + ds = [] + for blob_hash, being_deleted in self.blob_hashes_to_delete.items(): + if being_deleted is False: + if blob_hash in self.blobs: + self.blob_hashes_to_delete[blob_hash] = True + logging.info("Found a blob marked for deletion: %s", blob_hash) + blob = self.blobs[blob_hash] + d = blob.delete() + + d.addCallbacks(lambda _: remove_from_list(blob_hash), set_not_deleting, + errbackArgs=(blob_hash,)) + + ds.append(d) + else: + remove_from_list(blob_hash) + d = defer.fail(Failure(ValueError("No such blob known"))) + logging.warning("Blob %s cannot be deleted because it is unknown") + ds.append(d) + return defer.DeferredList(ds) \ No newline at end of file diff --git a/lbrynet/core/DownloadOption.py b/lbrynet/core/DownloadOption.py new file mode 100644 index 000000000..0972cbe19 --- /dev/null +++ b/lbrynet/core/DownloadOption.py @@ -0,0 +1,6 @@ +class DownloadOption(object): + def __init__(self, option_types, long_description, short_description, default): + self.option_types = option_types + self.long_description = long_description + self.short_description = short_description + self.default = default \ No newline at end of file diff --git a/lbrynet/core/Error.py b/lbrynet/core/Error.py new file mode 100644 index 000000000..3780998cb --- /dev/null +++ b/lbrynet/core/Error.py @@ -0,0 +1,48 @@ +class PriceDisagreementError(Exception): + pass + + +class DuplicateStreamHashError(Exception): + pass + + +class DownloadCanceledError(Exception): + pass + + +class RequestCanceledError(Exception): + pass + + +class InsufficientFundsError(Exception): + pass + + +class ConnectionClosedBeforeResponseError(Exception): + pass + + +class UnknownNameError(Exception): + def __init__(self, name): + self.name = name + + +class InvalidStreamInfoError(Exception): + def __init__(self, name): + self.name = name + + +class MisbehavingPeerError(Exception): + pass + + +class InvalidDataError(MisbehavingPeerError): + pass + + +class NoResponseError(MisbehavingPeerError): + pass + + +class InvalidResponseError(MisbehavingPeerError): + pass \ No newline at end of file diff --git a/lbrynet/core/HashAnnouncer.py b/lbrynet/core/HashAnnouncer.py new file mode 100644 index 000000000..dd0efaf54 --- /dev/null +++ b/lbrynet/core/HashAnnouncer.py @@ -0,0 +1,15 @@ +class DummyHashAnnouncer(object): + def __init__(self, *args): + pass + + def run_manage_loop(self): + pass + + def stop(self): + pass + + def add_supplier(self, *args): + pass + + def immediate_announce(self, *args): + pass \ No newline at end of file diff --git a/lbrynet/core/HashBlob.py b/lbrynet/core/HashBlob.py new file mode 100644 index 000000000..896208621 --- /dev/null +++ b/lbrynet/core/HashBlob.py @@ -0,0 +1,391 @@ +from StringIO import StringIO +import logging +import os +import tempfile +import threading +import shutil +from twisted.internet import interfaces, defer, threads +from twisted.protocols.basic import FileSender +from twisted.python.failure import Failure +from zope.interface import implements +from lbrynet.conf import BLOB_SIZE +from lbrynet.core.Error import DownloadCanceledError, InvalidDataError +from lbrynet.core.cryptoutils import get_lbry_hash_obj + + +class HashBlobReader(object): + implements(interfaces.IConsumer) + + def __init__(self, write_func): + self.write_func = write_func + + def registerProducer(self, producer, streaming): + + from twisted.internet import reactor + + self.producer = producer + self.streaming = streaming + if self.streaming is False: + reactor.callLater(0, self.producer.resumeProducing) + + def unregisterProducer(self): + pass + + def write(self, data): + + from twisted.internet import reactor + + self.write_func(data) + if self.streaming is False: + reactor.callLater(0, self.producer.resumeProducing) + + +class HashBlobWriter(object): + def __init__(self, write_handle, length_getter, finished_cb): + self.write_handle = write_handle + self.length_getter = length_getter + self.finished_cb = finished_cb + self.hashsum = get_lbry_hash_obj() + self.len_so_far = 0 + + def write(self, data): + self.hashsum.update(data) + self.len_so_far += len(data) + if self.len_so_far > self.length_getter(): + self.finished_cb(self, Failure(InvalidDataError("Length so far is greater than the expected length." + " %s to %s" % (str(self.len_so_far), + str(self.length_getter()))))) + else: + self.write_handle.write(data) + if self.len_so_far == self.length_getter(): + self.finished_cb(self) + + def cancel(self, reason=None): + if reason is None: + reason = Failure(DownloadCanceledError()) + self.finished_cb(self, reason) + + +class HashBlob(object): + """A chunk of data available on the network which is specified by a hashsum""" + + def __init__(self, blob_hash, upload_allowed, length=None): + self.blob_hash = blob_hash + self.length = length + self.writers = {} # {Peer: writer, finished_deferred} + self.finished_deferred = None + self.verified = False + self.upload_allowed = upload_allowed + self.readers = 0 + + def set_length(self, length): + if self.length is not None and length == self.length: + return True + if self.length is None and 0 <= length <= BLOB_SIZE: + self.length = length + return True + logging.warning("Got an invalid length. Previous length: %s, Invalid length: %s", str(self.length), str(length)) + return False + + def get_length(self): + return self.length + + def is_validated(self): + if self.verified: + return True + else: + return False + + def is_downloading(self): + if self.writers: + return True + return False + + def read(self, write_func): + + def close_self(*args): + self.close_read_handle(file_handle) + return args[0] + + file_sender = FileSender() + reader = HashBlobReader(write_func) + file_handle = self.open_for_reading() + if file_handle is not None: + d = file_sender.beginFileTransfer(file_handle, reader) + d.addCallback(close_self) + else: + d = defer.fail(ValueError("Could not read the blob")) + return d + + def writer_finished(self, writer, err=None): + + def fire_finished_deferred(): + self.verified = True + for p, (w, finished_deferred) in self.writers.items(): + if w == writer: + finished_deferred.callback(self) + del self.writers[p] + return True + logging.warning("Somehow, the writer that was accepted as being valid was already removed. writer: %s", + str(writer)) + return False + + def errback_finished_deferred(err): + for p, (w, finished_deferred) in self.writers.items(): + if w == writer: + finished_deferred.errback(err) + del self.writers[p] + + def cancel_other_downloads(): + for p, (w, finished_deferred) in self.writers.items(): + w.cancel() + + if err is None: + if writer.len_so_far == self.length and writer.hashsum.hexdigest() == self.blob_hash: + if self.verified is False: + d = self._save_verified_blob(writer) + d.addCallbacks(lambda _: fire_finished_deferred(), errback_finished_deferred) + d.addCallback(lambda _: cancel_other_downloads()) + else: + errback_finished_deferred(Failure(DownloadCanceledError())) + d = defer.succeed(True) + else: + err_string = "length vs expected: {0}, {1}, hash vs expected: {2}, {3}" + err_string = err_string.format(self.length, writer.len_so_far, self.blob_hash, + writer.hashsum.hexdigest()) + errback_finished_deferred(Failure(InvalidDataError(err_string))) + d = defer.succeed(True) + else: + errback_finished_deferred(err) + d = defer.succeed(True) + + d.addBoth(lambda _: self._close_writer(writer)) + return d + + def open_for_writing(self, peer): + pass + + def open_for_reading(self): + pass + + def delete(self): + pass + + def close_read_handle(self, file_handle): + pass + + def _close_writer(self, writer): + pass + + def _save_verified_blob(self, writer): + pass + + def __str__(self): + return self.blob_hash[:16] + + def __repr__(self): + return str(self) + + +class BlobFile(HashBlob): + """A HashBlob which will be saved to the hard disk of the downloader""" + + def __init__(self, blob_dir, *args): + HashBlob.__init__(self, *args) + self.blob_dir = blob_dir + self.file_path = os.path.join(blob_dir, self.blob_hash) + self.setting_verified_blob_lock = threading.Lock() + self.moved_verified_blob = False + + def open_for_writing(self, peer): + if not peer in self.writers: + logging.debug("Opening %s to be written by %s", str(self), str(peer)) + write_file = tempfile.NamedTemporaryFile(delete=False, dir=self.blob_dir) + finished_deferred = defer.Deferred() + writer = HashBlobWriter(write_file, self.get_length, self.writer_finished) + + self.writers[peer] = (writer, finished_deferred) + return finished_deferred, writer.write, writer.cancel + logging.warning("Tried to download the same file twice simultaneously from the same peer") + return None, None, None + + def open_for_reading(self): + if self.verified is True: + file_handle = None + try: + file_handle = open(self.file_path, 'rb') + self.readers += 1 + return file_handle + except IOError: + self.close_read_handle(file_handle) + return None + + def delete(self): + if not self.writers and not self.readers: + self.verified = False + self.moved_verified_blob = False + + def delete_from_file_system(): + if os.path.isfile(self.file_path): + os.remove(self.file_path) + + d = threads.deferToThread(delete_from_file_system) + + def log_error(err): + logging.warning("An error occurred deleting %s: %s", str(self.file_path), err.getErrorMessage()) + return err + + d.addErrback(log_error) + return d + else: + return defer.fail(Failure(ValueError("File is currently being read or written and cannot be deleted"))) + + def close_read_handle(self, file_handle): + if file_handle is not None: + file_handle.close() + self.readers -= 1 + + def _close_writer(self, writer): + if writer.write_handle is not None: + logging.debug("Closing %s", str(self)) + name = writer.write_handle.name + writer.write_handle.close() + threads.deferToThread(os.remove, name) + writer.write_handle = None + + def _save_verified_blob(self, writer): + + def move_file(): + with self.setting_verified_blob_lock: + if self.moved_verified_blob is False: + temp_file_name = writer.write_handle.name + writer.write_handle.close() + shutil.move(temp_file_name, self.file_path) + writer.write_handle = None + self.moved_verified_blob = True + return True + else: + raise DownloadCanceledError() + + return threads.deferToThread(move_file) + + +class TempBlob(HashBlob): + """A HashBlob which will only exist in memory""" + def __init__(self, *args): + HashBlob.__init__(self, *args) + self.data_buffer = "" + + def open_for_writing(self, peer): + if not peer in self.writers: + temp_buffer = StringIO() + finished_deferred = defer.Deferred() + writer = HashBlobWriter(temp_buffer, self.get_length, self.writer_finished) + + self.writers[peer] = (writer, finished_deferred) + return finished_deferred, writer.write, writer.cancel + return None, None, None + + def open_for_reading(self): + if self.verified is True: + return StringIO(self.data_buffer) + return None + + def delete(self): + if not self.writers and not self.readers: + self.verified = False + self.data_buffer = '' + return defer.succeed(True) + else: + return defer.fail(Failure(ValueError("Blob is currently being read or written and cannot be deleted"))) + + def close_read_handle(self, file_handle): + file_handle.close() + + def _close_writer(self, writer): + if writer.write_handle is not None: + writer.write_handle.close() + writer.write_handle = None + + def _save_verified_blob(self, writer): + if not self.data_buffer: + self.data_buffer = writer.write_handle.getvalue() + writer.write_handle.close() + writer.write_handle = None + return defer.succeed(True) + else: + return defer.fail(Failure(DownloadCanceledError())) + + +class HashBlobCreator(object): + def __init__(self, blob_manager): + self.blob_manager = blob_manager + self.hashsum = get_lbry_hash_obj() + self.len_so_far = 0 + self.blob_hash = None + self.length = None + + def open(self): + pass + + def close(self): + self.length = self.len_so_far + if self.length == 0: + self.blob_hash = None + else: + self.blob_hash = self.hashsum.hexdigest() + d = self._close() + + if self.blob_hash is not None: + d.addCallback(lambda _: self.blob_manager.creator_finished(self)) + d.addCallback(lambda _: self.blob_hash) + else: + d.addCallback(lambda _: None) + return d + + def write(self, data): + self.hashsum.update(data) + self.len_so_far += len(data) + self._write(data) + + def _close(self): + pass + + def _write(self, data): + pass + + +class BlobFileCreator(HashBlobCreator): + def __init__(self, blob_manager, blob_dir): + HashBlobCreator.__init__(self, blob_manager) + self.blob_dir = blob_dir + self.out_file = tempfile.NamedTemporaryFile(delete=False, dir=self.blob_dir) + + def _close(self): + temp_file_name = self.out_file.name + self.out_file.close() + + def change_file_name(): + shutil.move(temp_file_name, os.path.join(self.blob_dir, self.blob_hash)) + return True + + if self.blob_hash is not None: + d = threads.deferToThread(change_file_name) + else: + d = defer.succeed(True) + return d + + def _write(self, data): + self.out_file.write(data) + + +class TempBlobCreator(HashBlobCreator): + def __init__(self, blob_manager): + HashBlobCreator.__init__(self, blob_manager) + self.data_buffer = '' + + def _close(self): + return defer.succeed(True) + + def _write(self, data): + self.data_buffer += data \ No newline at end of file diff --git a/lbrynet/core/LBRYcrdWallet.py b/lbrynet/core/LBRYcrdWallet.py new file mode 100644 index 000000000..22326b315 --- /dev/null +++ b/lbrynet/core/LBRYcrdWallet.py @@ -0,0 +1,468 @@ +from lbrynet.interfaces import IRequestCreator, IQueryHandlerFactory, IQueryHandler, ILBRYWallet +from lbrynet.core.client.ClientRequest import ClientRequest +from lbrynet.core.Error import UnknownNameError, InvalidStreamInfoError, RequestCanceledError +from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException +from twisted.internet import threads, reactor, defer, task +from twisted.python.failure import Failure +from collections import defaultdict, deque +from zope.interface import implements +from decimal import Decimal +import datetime +import logging +import json +import subprocess +import socket +import time +import os + + +class ReservedPoints(object): + def __init__(self, identifier, amount): + self.identifier = identifier + self.amount = amount + + +class LBRYcrdWallet(object): + """This class implements the LBRYWallet interface for the LBRYcrd payment system""" + implements(ILBRYWallet) + + def __init__(self, rpc_user, rpc_pass, rpc_url, rpc_port, start_lbrycrdd=False, + wallet_dir=None, wallet_conf=None): + self.rpc_conn_string = "http://%s:%s@%s:%s" % (rpc_user, rpc_pass, rpc_url, str(rpc_port)) + self.next_manage_call = None + self.wallet_balance = Decimal(0.0) + self.total_reserved_points = Decimal(0.0) + self.peer_addresses = {} # {Peer: string} + self.queued_payments = defaultdict(Decimal) # {address(string): amount(Decimal)} + self.expected_balances = defaultdict(Decimal) # {address(string): amount(Decimal)} + self.current_address_given_to_peer = {} # {Peer: address(string)} + self.expected_balance_at_time = deque() # (Peer, address(string), amount(Decimal), time(datetime), count(int), + # incremental_amount(float)) + self.max_expected_payment_time = datetime.timedelta(minutes=3) + self.stopped = True + self.start_lbrycrdd = start_lbrycrdd + self.started_lbrycrdd = False + self.wallet_dir = wallet_dir + self.wallet_conf = wallet_conf + self.lbrycrdd = None + self.manage_running = False + + def start(self): + + def make_connection(): + if self.start_lbrycrdd is True: + self._start_daemon() + logging.info("Trying to connect to %s", self.rpc_conn_string) + self.rpc_conn = AuthServiceProxy(self.rpc_conn_string) + logging.info("Connected!") + + def start_manage(): + self.stopped = False + self.manage() + return True + + d = threads.deferToThread(make_connection) + d.addCallback(lambda _: start_manage()) + return d + + def stop(self): + self.stopped = True + # If self.next_manage_call is None, then manage is currently running or else + # start has not been called, so set stopped and do nothing else. + if self.next_manage_call is not None: + self.next_manage_call.cancel() + self.next_manage_call = None + + d = self.manage() + if self.start_lbrycrdd is True: + d.addBoth(lambda _: self._stop_daemon()) + return d + + def manage(self): + logging.info("Doing manage") + self.next_manage_call = None + have_set_manage_running = [False] + + def check_if_manage_running(): + + d = defer.Deferred() + + def fire_if_not_running(): + if self.manage_running is False: + self.manage_running = True + have_set_manage_running[0] = True + d.callback(True) + else: + task.deferLater(reactor, 1, fire_if_not_running) + + fire_if_not_running() + return d + + d = check_if_manage_running() + + d.addCallback(lambda _: self._check_expected_balances()) + + d.addCallback(lambda _: self._send_payments()) + + d.addCallback(lambda _: threads.deferToThread(self._get_wallet_balance)) + + def set_wallet_balance(balance): + self.wallet_balance = balance + + d.addCallback(set_wallet_balance) + + def set_next_manage_call(): + if not self.stopped: + self.next_manage_call = reactor.callLater(60, self.manage) + + d.addCallback(lambda _: set_next_manage_call()) + + def log_error(err): + logging.error("Something went wrong during manage. Error message: %s", err.getErrorMessage()) + return err + + d.addErrback(log_error) + + def set_manage_not_running(arg): + if have_set_manage_running[0] is True: + self.manage_running = False + return arg + + d.addBoth(set_manage_not_running) + return d + + def get_info_exchanger(self): + return LBRYcrdAddressRequester(self) + + def get_wallet_info_query_handler_factory(self): + return LBRYcrdAddressQueryHandlerFactory(self) + + def get_balance(self): + d = threads.deferToThread(self._get_wallet_balance) + return d + + def reserve_points(self, peer, amount): + """ + Ensure a certain amount of points are available to be sent as payment, before the service is rendered + + @param peer: The peer to which the payment will ultimately be sent + + @param amount: The amount of points to reserve + + @return: A ReservedPoints object which is given to send_points once the service has been rendered + """ + rounded_amount = Decimal(str(round(amount, 8))) + #if peer in self.peer_addresses: + if self.wallet_balance >= self.total_reserved_points + rounded_amount: + self.total_reserved_points += rounded_amount + return ReservedPoints(peer, rounded_amount) + return None + + def cancel_point_reservation(self, reserved_points): + """ + Return all of the points that were reserved previously for some ReservedPoints object + + @param reserved_points: ReservedPoints previously returned by reserve_points + + @return: None + """ + self.total_reserved_points -= reserved_points.amount + + def send_points(self, reserved_points, amount): + """ + Schedule a payment to be sent to a peer + + @param reserved_points: ReservedPoints object previously returned by reserve_points + + @param amount: amount of points to actually send, must be less than or equal to the + amount reserved in reserved_points + + @return: Deferred which fires when the payment has been scheduled + """ + rounded_amount = Decimal(str(round(amount, 8))) + peer = reserved_points.identifier + assert(rounded_amount <= reserved_points.amount) + assert(peer in self.peer_addresses) + self.queued_payments[self.peer_addresses[peer]] += rounded_amount + # make any unused points available + self.total_reserved_points -= (reserved_points.amount - rounded_amount) + logging.info("ordering that %s points be sent to %s", str(rounded_amount), + str(self.peer_addresses[peer])) + peer.update_stats('points_sent', amount) + return defer.succeed(True) + + def add_expected_payment(self, peer, amount): + """Increase the number of points expected to be paid by a peer""" + rounded_amount = Decimal(str(round(amount, 8))) + assert(peer in self.current_address_given_to_peer) + address = self.current_address_given_to_peer[peer] + logging.info("expecting a payment at address %s in the amount of %s", str(address), str(rounded_amount)) + self.expected_balances[address] += rounded_amount + expected_balance = self.expected_balances[address] + expected_time = datetime.datetime.now() + self.max_expected_payment_time + self.expected_balance_at_time.append((peer, address, expected_balance, expected_time, 0, amount)) + peer.update_stats('expected_points', amount) + + def update_peer_address(self, peer, address): + self.peer_addresses[peer] = address + + def get_new_address_for_peer(self, peer): + def set_address_for_peer(address): + self.current_address_given_to_peer[peer] = address + return address + d = threads.deferToThread(self._get_new_address) + d.addCallback(set_address_for_peer) + return d + + def get_stream_info_for_name(self, name): + + def get_stream_info_from_value(result): + r_dict = {} + if 'value' in result: + value = result['value'] + try: + value_dict = json.loads(value) + except ValueError: + return Failure(InvalidStreamInfoError(name)) + if 'stream_hash' in value_dict: + r_dict['stream_hash'] = value_dict['stream_hash'] + if 'name' in value_dict: + r_dict['name'] = value_dict['name'] + if 'description' in value_dict: + r_dict['description'] = value_dict['description'] + return r_dict + return Failure(UnknownNameError(name)) + + d = threads.deferToThread(self._get_value_for_name, name) + d.addCallback(get_stream_info_from_value) + return d + + def claim_name(self, stream_hash, name, amount): + value = json.dumps({"stream_hash": stream_hash}) + d = threads.deferToThread(self._claim_name, name, value, amount) + return d + + def get_available_balance(self): + return float(self.wallet_balance - self.total_reserved_points) + + def get_new_address(self): + return threads.deferToThread(self._get_new_address) + + def _start_daemon(self): + + if os.name == "nt": + si = subprocess.STARTUPINFO + si.dwFlags = subprocess.STARTF_USESHOWWINDOW + si.wShowWindow = subprocess.SW_HIDE + self.lbrycrdd = subprocess.Popen(["lbrycrdd.exe", "-datadir=%s" % self.wallet_dir, + "-conf=%s" % self.wallet_conf], startupinfo=si) + else: + self.lbrycrdd = subprocess.Popen(["./lbrycrdd", "-datadir=%s" % self.wallet_dir, + "-conf=%s" % self.wallet_conf]) + self.started_lbrycrdd = True + + tries = 0 + while tries < 5: + try: + rpc_conn = AuthServiceProxy(self.rpc_conn_string) + rpc_conn.getinfo() + break + except (socket.error, JSONRPCException): + tries += 1 + logging.warning("Failed to connect to lbrycrdd.") + if tries < 5: + time.sleep(2 ** tries) + logging.warning("Trying again in %d seconds", 2 ** tries) + else: + logging.warning("Giving up.") + else: + self.lbrycrdd.terminate() + raise ValueError("Couldn't open lbrycrdd") + + def _stop_daemon(self): + if self.lbrycrdd is not None and self.started_lbrycrdd is True: + d = threads.deferToThread(self._rpc_stop) + return d + return defer.succeed(True) + + def _check_expected_balances(self): + now = datetime.datetime.now() + balances_to_check = [] + try: + while self.expected_balance_at_time[0][3] < now: + balances_to_check.append(self.expected_balance_at_time.popleft()) + except IndexError: + pass + ds = [] + for balance_to_check in balances_to_check: + d = threads.deferToThread(self._check_expected_balance, balance_to_check) + ds.append(d) + dl = defer.DeferredList(ds) + + def handle_checks(results): + from future_builtins import zip + for balance, (success, result) in zip(balances_to_check, results): + peer = balance[0] + if success is True: + if result is False: + if balance[4] <= 1: # first or second strike, give them another chance + new_expected_balance = (balance[0], + balance[1], + balance[2], + datetime.datetime.now() + self.max_expected_payment_time, + balance[4] + 1, + balance[5]) + self.expected_balance_at_time.append(new_expected_balance) + peer.update_score(-5.0) + else: + peer.update_score(-50.0) + else: + if balance[4] == 0: + peer.update_score(balance[5]) + peer.update_stats('points_received', balance[5]) + else: + logging.warning("Something went wrong checking a balance. Peer: %s, account: %s," + "expected balance: %s, expected time: %s, count: %s, error: %s", + str(balance[0]), str(balance[1]), str(balance[2]), str(balance[3]), + str(balance[4]), str(result.getErrorMessage())) + + dl.addCallback(handle_checks) + return dl + + def _check_expected_balance(self, expected_balance): + rpc_conn = AuthServiceProxy(self.rpc_conn_string) + logging.info("Checking balance of address %s", str(expected_balance[1])) + balance = rpc_conn.getreceivedbyaddress(expected_balance[1]) + logging.debug("received balance: %s", str(balance)) + logging.debug("expected balance: %s", str(expected_balance[2])) + return balance >= expected_balance[2] + + def _send_payments(self): + logging.info("Trying to send payments, if there are any to be sent") + + def do_send(payments): + rpc_conn = AuthServiceProxy(self.rpc_conn_string) + rpc_conn.sendmany("", payments) + + payments_to_send = {} + for address, points in self.queued_payments.items(): + logging.info("Should be sending %s points to %s", str(points), str(address)) + payments_to_send[address] = float(points) + self.total_reserved_points -= points + self.wallet_balance -= points + del self.queued_payments[address] + if payments_to_send: + logging.info("Creating a transaction with outputs %s", str(payments_to_send)) + return threads.deferToThread(do_send, payments_to_send) + logging.info("There were no payments to send") + return defer.succeed(True) + + def _get_wallet_balance(self): + rpc_conn = AuthServiceProxy(self.rpc_conn_string) + return rpc_conn.getbalance("") + + def _get_new_address(self): + rpc_conn = AuthServiceProxy(self.rpc_conn_string) + return rpc_conn.getnewaddress() + + def _get_value_for_name(self, name): + rpc_conn = AuthServiceProxy(self.rpc_conn_string) + return rpc_conn.getvalueforname(name) + + def _claim_name(self, name, value, amount): + rpc_conn = AuthServiceProxy(self.rpc_conn_string) + return str(rpc_conn.claimname(name, value, amount)) + + def _rpc_stop(self): + rpc_conn = AuthServiceProxy(self.rpc_conn_string) + rpc_conn.stop() + self.lbrycrdd.wait() + + +class LBRYcrdAddressRequester(object): + implements([IRequestCreator]) + + def __init__(self, wallet): + self.wallet = wallet + self._protocols = [] + + ######### IRequestCreator ######### + + def send_next_request(self, peer, protocol): + + if not protocol in self._protocols: + r = ClientRequest({'lbrycrd_address': True}, 'lbrycrd_address') + d = protocol.add_request(r) + d.addCallback(self._handle_address_response, peer, r, protocol) + d.addErrback(self._request_failed, peer) + self._protocols.append(protocol) + return defer.succeed(True) + else: + return defer.succeed(False) + + ######### internal calls ######### + + def _handle_address_response(self, response_dict, peer, request, protocol): + assert request.response_identifier in response_dict, \ + "Expected %s in dict but did not get it" % request.response_identifier + assert protocol in self._protocols, "Responding protocol is not in our list of protocols" + address = response_dict[request.response_identifier] + self.wallet.update_peer_address(peer, address) + + def _request_failed(self, err, peer): + if not err.check(RequestCanceledError): + logging.warning("A peer failed to send a valid public key response. Error: %s, peer: %s", + err.getErrorMessage(), str(peer)) + #return err + + +class LBRYcrdAddressQueryHandlerFactory(object): + implements(IQueryHandlerFactory) + + def __init__(self, wallet): + self.wallet = wallet + + ######### IQueryHandlerFactory ######### + + def build_query_handler(self): + q_h = LBRYcrdAddressQueryHandler(self.wallet) + return q_h + + def get_primary_query_identifier(self): + return 'lbrycrd_address' + + def get_description(self): + return "LBRYcrd Address - an address for receiving payments via LBRYcrd" + + +class LBRYcrdAddressQueryHandler(object): + implements(IQueryHandler) + + def __init__(self, wallet): + self.wallet = wallet + self.query_identifiers = ['lbrycrd_address'] + self.address = None + self.peer = None + + ######### IQueryHandler ######### + + def register_with_request_handler(self, request_handler, peer): + self.peer = peer + request_handler.register_query_handler(self, self.query_identifiers) + + def handle_queries(self, queries): + + def create_response(address): + self.address = address + fields = {'lbrycrd_address': address} + return fields + + if self.query_identifiers[0] in queries: + d = self.wallet.get_new_address_for_peer(self.peer) + d.addCallback(create_response) + return d + if self.address is None: + logging.warning("Expected a request for an address, but did not receive one") + return defer.fail(Failure(ValueError("Expected but did not receive an address request"))) + else: + return defer.succeed({}) \ No newline at end of file diff --git a/lbrynet/core/PTCWallet.py b/lbrynet/core/PTCWallet.py new file mode 100644 index 000000000..01fdd0ea8 --- /dev/null +++ b/lbrynet/core/PTCWallet.py @@ -0,0 +1,315 @@ +from collections import defaultdict +import logging +import leveldb +import os +import time +from Crypto.Hash import SHA512 +from Crypto.PublicKey import RSA +from lbrynet.core.client.ClientRequest import ClientRequest +from lbrynet.core.Error import RequestCanceledError +from lbrynet.interfaces import IRequestCreator, IQueryHandlerFactory, IQueryHandler, ILBRYWallet +from lbrynet.pointtraderclient import pointtraderclient +from twisted.internet import defer, threads +from zope.interface import implements +from twisted.python.failure import Failure +from lbrynet.core.LBRYcrdWallet import ReservedPoints + + +class PTCWallet(object): + """This class sends payments to peers and also ensures that expected payments are received. + This class is only intended to be used for testing.""" + implements(ILBRYWallet) + + def __init__(self, db_dir): + self.db_dir = db_dir + self.db = None + self.private_key = None + self.encoded_public_key = None + self.peer_pub_keys = {} + self.queued_payments = defaultdict(int) + self.expected_payments = defaultdict(list) + self.received_payments = defaultdict(list) + self.next_manage_call = None + self.payment_check_window = 3 * 60 # 3 minutes + self.new_payments_expected_time = time.time() - self.payment_check_window + self.known_transactions = [] + self.total_reserved_points = 0.0 + self.wallet_balance = 0.0 + + def manage(self): + """Send payments, ensure expected payments are received""" + + from twisted.internet import reactor + + if time.time() < self.new_payments_expected_time + self.payment_check_window: + d1 = self._get_new_payments() + else: + d1 = defer.succeed(None) + d1.addCallback(lambda _: self._check_good_standing()) + d2 = self._send_queued_points() + self.next_manage_call = reactor.callLater(60, self.manage) + dl = defer.DeferredList([d1, d2]) + dl.addCallback(lambda _: self.get_balance()) + + def set_balance(balance): + self.wallet_balance = balance + + dl.addCallback(set_balance) + return dl + + def stop(self): + if self.next_manage_call is not None: + self.next_manage_call.cancel() + self.next_manage_call = None + d = self.manage() + self.next_manage_call.cancel() + self.next_manage_call = None + self.db = None + return d + + def start(self): + + def save_key(success, private_key): + if success is True: + threads.deferToThread(self.save_private_key, private_key.exportKey()) + return True + return False + + def register_private_key(private_key): + self.private_key = private_key + self.encoded_public_key = self.private_key.publickey().exportKey() + d_r = pointtraderclient.register_new_account(private_key) + d_r.addCallback(save_key, private_key) + return d_r + + def ensure_private_key_exists(encoded_private_key): + if encoded_private_key is not None: + self.private_key = RSA.importKey(encoded_private_key) + self.encoded_public_key = self.private_key.publickey().exportKey() + return True + else: + create_d = threads.deferToThread(RSA.generate, 4096) + create_d.addCallback(register_private_key) + return create_d + + def start_manage(): + self.manage() + return True + d = threads.deferToThread(self._open_db) + d.addCallback(lambda _: threads.deferToThread(self.get_wallet_private_key)) + d.addCallback(ensure_private_key_exists) + d.addCallback(lambda _: start_manage()) + return d + + def get_info_exchanger(self): + return PointTraderKeyExchanger(self) + + def get_wallet_info_query_handler_factory(self): + return PointTraderKeyQueryHandlerFactory(self) + + def reserve_points(self, peer, amount): + """ + Ensure a certain amount of points are available to be sent as payment, before the service is rendered + + @param peer: The peer to which the payment will ultimately be sent + + @param amount: The amount of points to reserve + + @return: A ReservedPoints object which is given to send_points once the service has been rendered + """ + if self.wallet_balance >= self.total_reserved_points + amount: + self.total_reserved_points += amount + return ReservedPoints(peer, amount) + return None + + def cancel_point_reservation(self, reserved_points): + """ + Return all of the points that were reserved previously for some ReservedPoints object + + @param reserved_points: ReservedPoints previously returned by reserve_points + + @return: None + """ + self.total_reserved_points -= reserved_points.amount + + def send_points(self, reserved_points, amount): + """ + Schedule a payment to be sent to a peer + + @param reserved_points: ReservedPoints object previously returned by reserve_points + + @param amount: amount of points to actually send, must be less than or equal to the + amount reserved in reserved_points + + @return: Deferred which fires when the payment has been scheduled + """ + self.queued_payments[reserved_points.identifier] += amount + # make any unused points available + self.total_reserved_points -= reserved_points.amount - amount + reserved_points.identifier.update_stats('points_sent', amount) + d = defer.succeed(True) + return d + + def _send_queued_points(self): + ds = [] + for peer, points in self.queued_payments.items(): + if peer in self.peer_pub_keys: + d = pointtraderclient.send_points(self.private_key, self.peer_pub_keys[peer], points) + self.wallet_balance -= points + self.total_reserved_points -= points + ds.append(d) + del self.queued_payments[peer] + else: + logging.warning("Don't have a payment address for peer %s. Can't send %s points.", + str(peer), str(points)) + return defer.DeferredList(ds) + + def get_balance(self): + """Return the balance of this wallet""" + d = pointtraderclient.get_balance(self.private_key) + return d + + def add_expected_payment(self, peer, amount): + """Increase the number of points expected to be paid by a peer""" + self.expected_payments[peer].append((amount, time.time())) + self.new_payments_expected_time = time.time() + peer.update_stats('expected_points', amount) + + def set_public_key_for_peer(self, peer, pub_key): + self.peer_pub_keys[peer] = pub_key + + def _get_new_payments(self): + + def add_new_transactions(transactions): + for transaction in transactions: + if transaction[1] == self.encoded_public_key: + t_hash = SHA512.new() + t_hash.update(transaction[0]) + t_hash.update(transaction[1]) + t_hash.update(str(transaction[2])) + t_hash.update(transaction[3]) + if t_hash.hexdigest() not in self.known_transactions: + self.known_transactions.append(t_hash.hexdigest()) + self._add_received_payment(transaction[0], transaction[2]) + + d = pointtraderclient.get_recent_transactions(self.private_key) + d.addCallback(add_new_transactions) + return d + + def _add_received_payment(self, encoded_other_public_key, amount): + self.received_payments[encoded_other_public_key].append((amount, time.time())) + + def _check_good_standing(self): + for peer, expected_payments in self.expected_payments.iteritems(): + expected_cutoff = time.time() - 90 + min_expected_balance = sum([a[0] for a in expected_payments if a[1] < expected_cutoff]) + received_balance = 0 + if self.peer_pub_keys[peer] in self.received_payments: + received_balance = sum([a[0] for a in self.received_payments[self.peer_pub_keys[peer]]]) + if min_expected_balance > received_balance: + logging.warning("Account in bad standing: %s (pub_key: %s), expected amount = %s, received_amount = %s", + str(peer), self.peer_pub_keys[peer], str(min_expected_balance), str(received_balance)) + + def _open_db(self): + self.db = leveldb.LevelDB(os.path.join(self.db_dir, "ptcwallet.db")) + + def save_private_key(self, private_key): + self.db.Put("private_key", private_key) + + def get_wallet_private_key(self): + try: + return self.db.Get("private_key") + except KeyError: + return None + + +class PointTraderKeyExchanger(object): + implements([IRequestCreator]) + + def __init__(self, wallet): + self.wallet = wallet + self._protocols = [] + + ######### IRequestCreator ######### + + def send_next_request(self, peer, protocol): + if not protocol in self._protocols: + r = ClientRequest({'public_key': self.wallet.encoded_public_key}, + 'public_key') + d = protocol.add_request(r) + d.addCallback(self._handle_exchange_response, peer, r, protocol) + d.addErrback(self._request_failed, peer) + self._protocols.append(protocol) + return defer.succeed(True) + else: + return defer.succeed(False) + + ######### internal calls ######### + + def _handle_exchange_response(self, response_dict, peer, request, protocol): + assert request.response_identifier in response_dict, \ + "Expected %s in dict but did not get it" % request.response_identifier + assert protocol in self._protocols, "Responding protocol is not in our list of protocols" + peer_pub_key = response_dict[request.response_identifier] + self.wallet.set_public_key_for_peer(peer, peer_pub_key) + return True + + def _request_failed(self, err, peer): + if not err.check(RequestCanceledError): + logging.warning("A peer failed to send a valid public key response. Error: %s, peer: %s", + err.getErrorMessage(), str(peer)) + #return err + + +class PointTraderKeyQueryHandlerFactory(object): + implements(IQueryHandlerFactory) + + def __init__(self, wallet): + self.wallet = wallet + + ######### IQueryHandlerFactory ######### + + def build_query_handler(self): + q_h = PointTraderKeyQueryHandler(self.wallet) + return q_h + + def get_primary_query_identifier(self): + return 'public_key' + + def get_description(self): + return "Point Trader Address - an address for receiving payments on the point trader testing network" + + +class PointTraderKeyQueryHandler(object): + implements(IQueryHandler) + + def __init__(self, wallet): + self.wallet = wallet + self.query_identifiers = ['public_key'] + self.public_key = None + self.peer = None + + ######### IQueryHandler ######### + + def register_with_request_handler(self, request_handler, peer): + self.peer = peer + request_handler.register_query_handler(self, self.query_identifiers) + + def handle_queries(self, queries): + if self.query_identifiers[0] in queries: + new_encoded_pub_key = queries[self.query_identifiers[0]] + try: + RSA.importKey(new_encoded_pub_key) + except (ValueError, TypeError, IndexError): + logging.warning("Client sent an invalid public key.") + return defer.fail(Failure(ValueError("Client sent an invalid public key"))) + self.public_key = new_encoded_pub_key + self.wallet.set_public_key_for_peer(self.peer, self.public_key) + logging.debug("Received the client's public key: %s", str(self.public_key)) + fields = {'public_key': self.wallet.encoded_public_key} + return defer.succeed(fields) + if self.public_key is None: + logging.warning("Expected a public key, but did not receive one") + return defer.fail(Failure(ValueError("Expected but did not receive a public key"))) + else: + return defer.succeed({}) \ No newline at end of file diff --git a/lbrynet/core/PaymentRateManager.py b/lbrynet/core/PaymentRateManager.py new file mode 100644 index 000000000..a18882ac0 --- /dev/null +++ b/lbrynet/core/PaymentRateManager.py @@ -0,0 +1,29 @@ +class BasePaymentRateManager(object): + def __init__(self, rate): + self.min_blob_data_payment_rate = rate + + +class PaymentRateManager(object): + def __init__(self, base, rate=None): + """ + @param base: a BasePaymentRateManager + + @param rate: the min blob data payment rate + """ + self.base = base + self.min_blob_data_payment_rate = rate + self.points_paid = 0.0 + + def get_rate_blob_data(self, peer): + return self.get_effective_min_blob_data_payment_rate() + + def accept_rate_blob_data(self, peer, payment_rate): + return payment_rate >= self.get_effective_min_blob_data_payment_rate() + + def get_effective_min_blob_data_payment_rate(self): + if self.min_blob_data_payment_rate is None: + return self.base.min_blob_data_payment_rate + return self.min_blob_data_payment_rate + + def record_points_paid(self, amount): + self.points_paid += amount \ No newline at end of file diff --git a/lbrynet/core/Peer.py b/lbrynet/core/Peer.py new file mode 100644 index 000000000..c3b4a76ec --- /dev/null +++ b/lbrynet/core/Peer.py @@ -0,0 +1,36 @@ +from collections import defaultdict +import datetime + + +class Peer(object): + def __init__(self, host, port): + self.host = host + self.port = port + self.attempt_connection_at = None + self.down_count = 0 + self.score = 0 + self.stats = defaultdict(float) # {string stat_type, float count} + + def is_available(self): + if (self.attempt_connection_at is None or + datetime.datetime.today() > self.attempt_connection_at): + return True + return False + + def report_up(self): + self.down_count = 0 + self.attempt_connection_at = None + + def report_down(self): + self.down_count += 1 + timeout_time = datetime.timedelta(seconds=60 * self.down_count) + self.attempt_connection_at = datetime.datetime.today() + timeout_time + + def update_score(self, score_change): + self.score += score_change + + def update_stats(self, stat_type, count): + self.stats[stat_type] += count + + def __str__(self): + return self.host + ":" + str(self.port) \ No newline at end of file diff --git a/lbrynet/core/PeerFinder.py b/lbrynet/core/PeerFinder.py new file mode 100644 index 000000000..461d1804e --- /dev/null +++ b/lbrynet/core/PeerFinder.py @@ -0,0 +1,19 @@ +from twisted.internet import defer + + +class DummyPeerFinder(object): + """This class finds peers which have announced to the DHT that they have certain blobs""" + def __init__(self): + pass + + def run_manage_loop(self): + pass + + def stop(self): + pass + + def find_peers_for_blob(self, blob_hash): + return defer.succeed([]) + + def get_most_popular_hashes(self, num_to_return): + return [] \ No newline at end of file diff --git a/lbrynet/core/PeerManager.py b/lbrynet/core/PeerManager.py new file mode 100644 index 000000000..bf83d3e1f --- /dev/null +++ b/lbrynet/core/PeerManager.py @@ -0,0 +1,14 @@ +from lbrynet.core.Peer import Peer + + +class PeerManager(object): + def __init__(self): + self.peers = [] + + def get_peer(self, host, port): + for peer in self.peers: + if peer.host == host and peer.port == port: + return peer + peer = Peer(host, port) + self.peers.append(peer) + return peer \ No newline at end of file diff --git a/lbrynet/core/RateLimiter.py b/lbrynet/core/RateLimiter.py new file mode 100644 index 000000000..0038cceaf --- /dev/null +++ b/lbrynet/core/RateLimiter.py @@ -0,0 +1,206 @@ +from zope.interface import implements +from lbrynet.interfaces import IRateLimiter + + +class DummyRateLimiter(object): + def __init__(self): + self.dl_bytes_this_second = 0 + self.ul_bytes_this_second = 0 + self.total_dl_bytes = 0 + self.total_ul_bytes = 0 + self.target_dl = 0 + self.target_ul = 0 + self.ul_delay = 0.00 + self.dl_delay = 0.00 + self.next_tick = None + + def tick(self): + + from twisted.internet import reactor + + self.dl_bytes_this_second = 0 + self.ul_bytes_this_second = 0 + self.next_tick = reactor.callLater(1.0, self.tick) + + def stop(self): + if self.next_tick is not None: + self.next_tick.cancel() + self.next_tick = None + + def set_dl_limit(self, limit): + pass + + def set_ul_limit(self, limit): + pass + + def ul_wait_time(self): + return self.ul_delay + + def dl_wait_time(self): + return self.dl_delay + + def report_dl_bytes(self, num_bytes): + self.dl_bytes_this_second += num_bytes + self.total_dl_bytes += num_bytes + + def report_ul_bytes(self, num_bytes): + self.ul_bytes_this_second += num_bytes + self.total_ul_bytes += num_bytes + + +class RateLimiter(object): + """This class ensures that upload and download rates don't exceed specified maximums""" + + implements(IRateLimiter) + + #called by main application + + def __init__(self, max_dl_bytes=None, max_ul_bytes=None): + self.max_dl_bytes = max_dl_bytes + self.max_ul_bytes = max_ul_bytes + self.dl_bytes_this_second = 0 + self.ul_bytes_this_second = 0 + self.total_dl_bytes = 0 + self.total_ul_bytes = 0 + self.next_tick = None + self.next_unthrottle_dl = None + self.next_unthrottle_ul = None + + self.next_dl_check = None + self.next_ul_check = None + + self.dl_check_interval = 1.0 + self.ul_check_interval = 1.0 + + self.dl_throttled = False + self.ul_throttled = False + + self.protocols = [] + + def tick(self): + + from twisted.internet import reactor + + # happens once per second + if self.next_dl_check is not None: + self.next_dl_check.cancel() + self.next_dl_check = None + if self.next_ul_check is not None: + self.next_ul_check.cancel() + self.next_ul_check = None + if self.max_dl_bytes is not None: + if self.dl_bytes_this_second == 0: + self.dl_check_interval = 1.0 + else: + self.dl_check_interval = min(1.0, self.dl_check_interval * + self.max_dl_bytes / self.dl_bytes_this_second) + self.next_dl_check = reactor.callLater(self.dl_check_interval, self.check_dl) + if self.max_ul_bytes is not None: + if self.ul_bytes_this_second == 0: + self.ul_check_interval = 1.0 + else: + self.ul_check_interval = min(1.0, self.ul_check_interval * + self.max_ul_bytes / self.ul_bytes_this_second) + self.next_ul_check = reactor.callLater(self.ul_check_interval, self.check_ul) + self.dl_bytes_this_second = 0 + self.ul_bytes_this_second = 0 + self.unthrottle_dl() + self.unthrottle_ul() + self.next_tick = reactor.callLater(1.0, self.tick) + + def stop(self): + if self.next_tick is not None: + self.next_tick.cancel() + self.next_tick = None + if self.next_dl_check is not None: + self.next_dl_check.cancel() + self.next_dl_check = None + if self.next_ul_check is not None: + self.next_ul_check.cancel() + self.next_ul_check = None + + def set_dl_limit(self, limit): + self.max_dl_bytes = limit + + def set_ul_limit(self, limit): + self.max_ul_bytes = limit + + #throttling + + def check_dl(self): + + from twisted.internet import reactor + + self.next_dl_check = None + + if self.dl_bytes_this_second > self.max_dl_bytes: + self.throttle_dl() + else: + self.next_dl_check = reactor.callLater(self.dl_check_interval, self.check_dl) + self.dl_check_interval = min(self.dl_check_interval * 2, 1.0) + + def check_ul(self): + + from twisted.internet import reactor + + self.next_ul_check = None + + if self.ul_bytes_this_second > self.max_ul_bytes: + self.throttle_ul() + else: + self.next_ul_check = reactor.callLater(self.ul_check_interval, self.check_ul) + self.ul_check_interval = min(self.ul_check_interval * 2, 1.0) + + def throttle_dl(self): + if self.dl_throttled is False: + for protocol in self.protocols: + protocol.throttle_download() + self.dl_throttled = True + + def throttle_ul(self): + if self.ul_throttled is False: + for protocol in self.protocols: + protocol.throttle_upload() + self.ul_throttled = True + + def unthrottle_dl(self): + if self.dl_throttled is True: + for protocol in self.protocols: + protocol.unthrottle_download() + self.dl_throttled = False + + def unthrottle_ul(self): + if self.ul_throttled is True: + for protocol in self.protocols: + protocol.unthrottle_upload() + self.ul_throttled = False + + #deprecated + + def ul_wait_time(self): + return 0 + + def dl_wait_time(self): + return 0 + + #called by protocols + + def report_dl_bytes(self, num_bytes): + self.dl_bytes_this_second += num_bytes + self.total_dl_bytes += num_bytes + + def report_ul_bytes(self, num_bytes): + self.ul_bytes_this_second += num_bytes + self.total_ul_bytes += num_bytes + + def register_protocol(self, protocol): + if protocol not in self.protocols: + self.protocols.append(protocol) + if self.dl_throttled is True: + protocol.throttle_download() + if self.ul_throttled is True: + protocol.throttle_upload() + + def unregister_protocol(self, protocol): + if protocol in self.protocols: + self.protocols.remove(protocol) \ No newline at end of file diff --git a/lbrynet/core/Session.py b/lbrynet/core/Session.py new file mode 100644 index 000000000..9bff46fde --- /dev/null +++ b/lbrynet/core/Session.py @@ -0,0 +1,245 @@ +import logging +import miniupnpc +from lbrynet.core.PTCWallet import PTCWallet +from lbrynet.core.BlobManager import DiskBlobManager, TempBlobManager +from lbrynet.dht import node +from lbrynet.core.PeerManager import PeerManager +from lbrynet.core.RateLimiter import RateLimiter +from lbrynet.core.client.DHTPeerFinder import DHTPeerFinder +from lbrynet.core.HashAnnouncer import DummyHashAnnouncer +from lbrynet.core.server.DHTHashAnnouncer import DHTHashAnnouncer +from lbrynet.core.utils import generate_id +from lbrynet.core.PaymentRateManager import BasePaymentRateManager +from twisted.internet import threads, defer + + +class LBRYSession(object): + """This class manages all important services common to any application that uses the network: + the hash announcer, which informs other peers that this peer is associated with some hash. Usually, + this means this peer has a blob identified by the hash in question, but it can be used for other + purposes. + the peer finder, which finds peers that are associated with some hash. + the blob manager, which keeps track of which blobs have been downloaded and provides access to them, + the rate limiter, which attempts to ensure download and upload rates stay below a set maximum, + and upnp, which opens holes in compatible firewalls so that remote peers can connect to this peer.""" + def __init__(self, blob_data_payment_rate, db_dir=None, lbryid=None, peer_manager=None, dht_node_port=None, + known_dht_nodes=None, peer_finder=None, hash_announcer=None, + blob_dir=None, blob_manager=None, peer_port=None, use_upnp=True, + rate_limiter=None, wallet=None): + """ + @param blob_data_payment_rate: The default payment rate for blob data + + @param db_dir: The directory in which levelDB files should be stored + + @param lbryid: The unique ID of this node + + @param peer_manager: An object which keeps track of all known peers. If None, a PeerManager will be created + + @param dht_node_port: The port on which the dht node should listen for incoming connections + + @param known_dht_nodes: A list of nodes which the dht node should use to bootstrap into the dht + + @param peer_finder: An object which is used to look up peers that are associated with some hash. If None, + a DHTPeerFinder will be used, which looks for peers in the distributed hash table. + + @param hash_announcer: An object which announces to other peers that this peer is associated with some hash. + If None, and peer_port is not None, a DHTHashAnnouncer will be used. If None and + peer_port is None, a DummyHashAnnouncer will be used, which will not actually announce + anything. + + @param blob_dir: The directory in which blobs will be stored. If None and blob_manager is None, blobs will + be stored in memory only. + + @param blob_manager: An object which keeps track of downloaded blobs and provides access to them. If None, + and blob_dir is not None, a DiskBlobManager will be used, with the given blob_dir. + If None and blob_dir is None, a TempBlobManager will be used, which stores blobs in + memory only. + + @param peer_port: The port on which other peers should connect to this peer + + @param use_upnp: Whether or not to try to open a hole in the firewall so that outside peers can connect to + this peer's peer_port and dht_node_port + + @param rate_limiter: An object which keeps track of the amount of data transferred to and from this peer, + and can limit that rate if desired + + @param wallet: An object which will be used to keep track of expected payments and which will pay peers. + If None, a wallet which uses the Point Trader system will be used, which is meant for testing + only + + @return: + """ + self.db_dir = db_dir + + self.lbryid = lbryid + + self.peer_manager = peer_manager + + self.dht_node_port = dht_node_port + self.known_dht_nodes = known_dht_nodes + if self.known_dht_nodes is None: + self.known_dht_nodes = [] + self.peer_finder = peer_finder + self.hash_announcer = hash_announcer + + self.blob_dir = blob_dir + self.blob_manager = blob_manager + + self.peer_port = peer_port + + self.use_upnp = use_upnp + + self.rate_limiter = rate_limiter + + self.external_ip = '127.0.0.1' + self.upnp_handler = None + self.upnp_redirects_set = False + + self.wallet = wallet + + self.dht_node = None + + self.base_payment_rate_manager = BasePaymentRateManager(blob_data_payment_rate) + + def setup(self): + """Create the blob directory and database if necessary, start all desired services""" + + logging.debug("Setting up the lbry session") + + if self.lbryid is None: + self.lbryid = generate_id() + + if self.wallet is None: + self.wallet = PTCWallet(self.db_dir) + + if self.peer_manager is None: + self.peer_manager = PeerManager() + + if self.use_upnp is True: + d = self._try_upnp() + else: + d = defer.succeed(True) + + if self.peer_finder is None: + d.addCallback(lambda _: self._setup_dht()) + else: + if self.hash_announcer is None and self.peer_port is not None: + logging.warning("The server has no way to advertise its available blobs.") + self.hash_announcer = DummyHashAnnouncer() + + d.addCallback(lambda _: self._setup_other_components()) + return d + + def shut_down(self): + """Stop all services""" + ds = [] + if self.dht_node is not None: + ds.append(defer.maybeDeferred(self.dht_node.stop)) + ds.append(defer.maybeDeferred(self.rate_limiter.stop)) + ds.append(defer.maybeDeferred(self.peer_finder.stop)) + ds.append(defer.maybeDeferred(self.hash_announcer.stop)) + ds.append(defer.maybeDeferred(self.wallet.stop)) + ds.append(defer.maybeDeferred(self.blob_manager.stop)) + if self.upnp_redirects_set is True: + ds.append(defer.maybeDeferred(self._unset_upnp)) + return defer.DeferredList(ds) + + def _try_upnp(self): + + logging.debug("In _try_upnp") + + def threaded_try_upnp(): + if self.use_upnp is False: + logging.debug("Not using upnp") + return False + u = miniupnpc.UPnP() + num_devices_found = u.discover() + if num_devices_found > 0: + self.upnp_handler = u + u.selectigd() + external_ip = u.externalipaddress() + if external_ip != '0.0.0.0': + self.external_ip = external_ip + if self.peer_port is not None: + u.addportmapping(self.peer_port, 'TCP', u.lanaddr, self.peer_port, 'LBRY peer port', '') + if self.dht_node_port is not None: + u.addportmapping(self.dht_node_port, 'UDP', u.lanaddr, self.dht_node_port, 'LBRY DHT port', '') + self.upnp_redirects_set = True + return True + return False + + def upnp_failed(err): + logging.warning("UPnP failed. Reason: %s", err.getErrorMessage()) + return False + + d = threads.deferToThread(threaded_try_upnp) + d.addErrback(upnp_failed) + return d + + def _setup_dht(self): + + from twisted.internet import reactor + + logging.debug("Starting the dht") + + def match_port(h, p): + return h, p + + def join_resolved_addresses(result): + addresses = [] + for success, value in result: + if success is True: + addresses.append(value) + return addresses + + def start_dht(addresses): + self.dht_node.joinNetwork(addresses) + self.peer_finder.run_manage_loop() + self.hash_announcer.run_manage_loop() + return True + + ds = [] + for host, port in self.known_dht_nodes: + d = reactor.resolve(host) + d.addCallback(match_port, port) + ds.append(d) + + self.dht_node = node.Node(udpPort=self.dht_node_port, lbryid=self.lbryid, + externalIP=self.external_ip) + self.peer_finder = DHTPeerFinder(self.dht_node, self.peer_manager) + if self.hash_announcer is None: + self.hash_announcer = DHTHashAnnouncer(self.dht_node, self.peer_port) + + dl = defer.DeferredList(ds) + dl.addCallback(join_resolved_addresses) + dl.addCallback(start_dht) + return dl + + def _setup_other_components(self): + logging.debug("Setting up the rest of the components") + + if self.rate_limiter is None: + self.rate_limiter = RateLimiter() + + if self.blob_manager is None: + if self.blob_dir is None: + self.blob_manager = TempBlobManager(self.hash_announcer) + else: + self.blob_manager = DiskBlobManager(self.hash_announcer, self.blob_dir, self.db_dir) + + self.rate_limiter.tick() + d1 = self.blob_manager.setup() + d2 = self.wallet.start() + return defer.DeferredList([d1, d2], fireOnOneErrback=True) + + def _unset_upnp(self): + + def threaded_unset_upnp(): + u = self.upnp_handler + if self.peer_port is not None: + u.deleteportmapping(self.peer_port, 'TCP') + if self.dht_node_port is not None: + u.deleteportmapping(self.dht_node_port, 'UDP') + self.upnp_redirects_set = False + + return threads.deferToThread(threaded_unset_upnp) \ No newline at end of file diff --git a/lbrynet/core/StreamCreator.py b/lbrynet/core/StreamCreator.py new file mode 100644 index 000000000..948cfa0f9 --- /dev/null +++ b/lbrynet/core/StreamCreator.py @@ -0,0 +1,73 @@ +import logging +from twisted.internet import interfaces, defer +from zope.interface import implements + + +class StreamCreator(object): + """Classes which derive from this class create a 'stream', which can be any + collection of associated blobs and associated metadata. These classes + use the IConsumer interface to get data from an IProducer and transform + the data into a 'stream'""" + + implements(interfaces.IConsumer) + + def __init__(self, name): + """ + @param name: the name of the stream + """ + self.name = name + self.stopped = True + self.producer = None + self.streaming = None + self.blob_count = -1 + self.current_blob = None + self.finished_deferreds = [] + + def _blob_finished(self, blob_info): + pass + + def registerProducer(self, producer, streaming): + + from twisted.internet import reactor + + self.producer = producer + self.streaming = streaming + self.stopped = False + if streaming is False: + reactor.callLater(0, self.producer.resumeProducing) + + def unregisterProducer(self): + self.stopped = True + self.producer = None + + def stop(self): + """Stop creating the stream. Create the terminating zero-length blob.""" + logging.debug("stop has been called for StreamCreator") + self.stopped = True + if self.current_blob is not None: + current_blob = self.current_blob + d = current_blob.close() + d.addCallback(self._blob_finished) + self.finished_deferreds.append(d) + self.current_blob = None + self._finalize() + dl = defer.DeferredList(self.finished_deferreds) + dl.addCallback(lambda _: self._finished()) + return dl + + def _finalize(self): + pass + + def _finished(self): + pass + + def write(self, data): + + from twisted.internet import reactor + + self._write(data) + if self.stopped is False and self.streaming is False: + reactor.callLater(0, self.producer.resumeProducing) + + def _write(self, data): + pass \ No newline at end of file diff --git a/lbrynet/core/StreamDescriptor.py b/lbrynet/core/StreamDescriptor.py new file mode 100644 index 000000000..1a33dbb64 --- /dev/null +++ b/lbrynet/core/StreamDescriptor.py @@ -0,0 +1,195 @@ +from collections import defaultdict +import json +import logging +from twisted.internet import threads +from lbrynet.core.client.StandaloneBlobDownloader import StandaloneBlobDownloader + + +class StreamDescriptorReader(object): + """Classes which derive from this class read a stream descriptor file return + a dictionary containing the fields in the file""" + def __init__(self): + pass + + def _get_raw_data(self): + """This method must be overridden by subclasses. It should return a deferred + which fires with the raw data in the stream descriptor""" + pass + + def get_info(self): + """Return the fields contained in the file""" + d = self._get_raw_data() + d.addCallback(json.loads) + return d + + +class PlainStreamDescriptorReader(StreamDescriptorReader): + """Read a stream descriptor file which is not a blob but a regular file""" + def __init__(self, stream_descriptor_filename): + StreamDescriptorReader.__init__(self) + self.stream_descriptor_filename = stream_descriptor_filename + + def _get_raw_data(self): + + def get_data(): + with open(self.stream_descriptor_filename) as file_handle: + raw_data = file_handle.read() + return raw_data + + return threads.deferToThread(get_data) + + +class BlobStreamDescriptorReader(StreamDescriptorReader): + """Read a stream descriptor file which is a blob""" + def __init__(self, blob): + StreamDescriptorReader.__init__(self) + self.blob = blob + + def _get_raw_data(self): + + def get_data(): + f = self.blob.open_for_reading() + if f is not None: + raw_data = f.read() + self.blob.close_read_handle(f) + return raw_data + else: + raise ValueError("Could not open the blob for reading") + + return threads.deferToThread(get_data) + + +class StreamDescriptorWriter(object): + """Classes which derive from this class write fields from a dictionary + of fields to a stream descriptor""" + def __init__(self): + pass + + def create_descriptor(self, sd_info): + return self._write_stream_descriptor(json.dumps(sd_info)) + + def _write_stream_descriptor(self, raw_data): + """This method must be overridden by subclasses to write raw data to the stream descriptor""" + pass + + +class PlainStreamDescriptorWriter(StreamDescriptorWriter): + def __init__(self, sd_file_name): + StreamDescriptorWriter.__init__(self) + self.sd_file_name = sd_file_name + + def _write_stream_descriptor(self, raw_data): + + def write_file(): + logging.debug("Writing the sd file to disk") + with open(self.sd_file_name, 'w') as sd_file: + sd_file.write(raw_data) + return self.sd_file_name + + return threads.deferToThread(write_file) + + +class BlobStreamDescriptorWriter(StreamDescriptorWriter): + def __init__(self, blob_manager): + StreamDescriptorWriter.__init__(self) + + self.blob_manager = blob_manager + + def _write_stream_descriptor(self, raw_data): + logging.debug("Creating the new blob for the stream descriptor") + blob_creator = self.blob_manager.get_blob_creator() + blob_creator.write(raw_data) + logging.debug("Wrote the data to the new blob") + return blob_creator.close() + + +class StreamDescriptorIdentifier(object): + """Tries to determine the type of stream described by the stream descriptor using the + 'stream_type' field. Keeps a list of StreamDescriptorValidators and StreamDownloaderFactorys + and returns the appropriate ones based on the type of the stream descriptor given + """ + def __init__(self): + self._sd_info_validators = {} # {stream_type: IStreamDescriptorValidator} + self._stream_downloader_factories = defaultdict(list) # {stream_type: [IStreamDownloaderFactory]} + + def add_stream_info_validator(self, stream_type, sd_info_validator): + """ + This is how the StreamDescriptorIdentifier learns about new types of stream descriptors. + + There can only be one StreamDescriptorValidator for each type of stream. + + @param stream_type: A string representing the type of stream descriptor. This must be unique to + this stream descriptor. + + @param sd_info_validator: A class implementing the IStreamDescriptorValidator interface. This class's + constructor will be passed the raw metadata in the stream descriptor file and its 'validate' method + will then be called. If the validation step fails, an exception will be thrown, preventing the stream + descriptor from being further processed. + + @return: None + """ + self._sd_info_validators[stream_type] = sd_info_validator + + def add_stream_downloader_factory(self, stream_type, factory): + """ + Register a stream downloader factory with the StreamDescriptorIdentifier. + + This is how the StreamDescriptorIdentifier determines what factories may be used to process different stream + descriptor files. There must be at least one factory for each type of stream added via + "add_stream_info_validator". + + @param stream_type: A string representing the type of stream descriptor which the factory knows how to process. + + @param factory: An object implementing the IStreamDownloaderFactory interface. + + @return: None + """ + self._stream_downloader_factories[stream_type].append(factory) + + def get_info_and_factories_for_sd_file(self, sd_path): + + sd_reader = PlainStreamDescriptorReader(sd_path) + d = sd_reader.get_info() + d.addCallback(self._return_info_and_factories) + return d + + def get_info_and_factories_for_sd_blob(self, sd_blob): + sd_reader = BlobStreamDescriptorReader(sd_blob) + d = sd_reader.get_info() + d.addCallback(self._return_info_and_factories) + return d + + def _get_factories(self, stream_type): + assert stream_type in self._stream_downloader_factories, "Unrecognized stream type: " + str(stream_type) + return self._stream_downloader_factories[stream_type] + + def _get_validator(self, stream_type): + assert stream_type in self._sd_info_validators, "Unrecognized stream type: " + str(stream_type) + return self._sd_info_validators[stream_type] + + def _return_info_and_factories(self, sd_info): + assert 'stream_type' in sd_info, 'Invalid stream descriptor. No stream_type parameter.' + stream_type = sd_info['stream_type'] + factories = self._get_factories(stream_type) + validator = self._get_validator(stream_type)(sd_info) + d = validator.validate() + + d.addCallback(lambda _: (validator, factories)) + return d + + +def download_sd_blob(session, blob_hash, payment_rate_manager): + """ + Downloads a single blob from the network + + @param session: + + @param blob_hash: + + @param payment_rate_manager: + + @return: An object of type HashBlob + """ + downloader = StandaloneBlobDownloader(blob_hash, session.blob_manager, session.peer_finder, + session.rate_limiter, payment_rate_manager, session.wallet) + return downloader.download() \ No newline at end of file diff --git a/lbrynet/core/__init__.py b/lbrynet/core/__init__.py new file mode 100644 index 000000000..2dff7714f --- /dev/null +++ b/lbrynet/core/__init__.py @@ -0,0 +1,7 @@ +""" +Classes and functions which can be used by any application wishing to make use of the LBRY network. + +This includes classes for connecting to other peers and downloading blobs from them, listening for +connections from peers and responding to their requests, managing locally stored blobs, sending +and receiving payments, and locating peers in the DHT. +""" \ No newline at end of file diff --git a/lbrynet/core/client/BlobRequester.py b/lbrynet/core/client/BlobRequester.py new file mode 100644 index 000000000..c90d70d9e --- /dev/null +++ b/lbrynet/core/client/BlobRequester.py @@ -0,0 +1,307 @@ +from collections import defaultdict +import logging +from twisted.internet import defer +from twisted.python.failure import Failure +from zope.interface import implements +from lbrynet.core.Error import PriceDisagreementError, DownloadCanceledError, InsufficientFundsError +from lbrynet.core.Error import InvalidResponseError, RequestCanceledError, NoResponseError +from lbrynet.core.client.ClientRequest import ClientRequest, ClientBlobRequest +from lbrynet.interfaces import IRequestCreator + + +class BlobRequester(object): + implements(IRequestCreator) + + def __init__(self, blob_manager, peer_finder, payment_rate_manager, wallet, download_manager): + self.blob_manager = blob_manager + self.peer_finder = peer_finder + self.payment_rate_manager = payment_rate_manager + self.wallet = wallet + self.download_manager = download_manager + self._peers = defaultdict(int) # {Peer: score} + self._available_blobs = defaultdict(list) # {Peer: [blob_hash]} + self._unavailable_blobs = defaultdict(list) # {Peer: [blob_hash]}} + self._protocol_prices = {} # {ClientProtocol: price} + self._price_disagreements = [] # [Peer] + self._incompatible_peers = [] + + ######## IRequestCreator ######### + + def send_next_request(self, peer, protocol): + sent_request = False + if self._blobs_to_download() and self._should_send_request_to(peer): + a_r = self._get_availability_request(peer) + d_r = self._get_download_request(peer) + p_r = None + + if a_r is not None or d_r is not None: + p_r = self._get_price_request(peer, protocol) + + if a_r is not None: + d1 = protocol.add_request(a_r) + d1.addCallback(self._handle_availability, peer, a_r) + d1.addErrback(self._request_failed, "availability request", peer) + sent_request = True + if d_r is not None: + reserved_points = self._reserve_points(peer, protocol, d_r.max_pay_units) + if reserved_points is not None: + # Note: The following three callbacks will be called when the blob has been + # fully downloaded or canceled + d_r.finished_deferred.addCallbacks(self._download_succeeded, self._download_failed, + callbackArgs=(peer, d_r.blob), + errbackArgs=(peer,)) + d_r.finished_deferred.addBoth(self._pay_or_cancel_payment, protocol, reserved_points, d_r.blob) + d_r.finished_deferred.addErrback(self._handle_download_error, peer, d_r.blob) + + d2 = protocol.add_blob_request(d_r) + # Note: The following two callbacks will be called as soon as the peer sends its + # response, which will be before the blob has finished downloading, but may be + # after the blob has been canceled. For example, + # 1) client sends request to Peer A + # 2) the blob is finished downloading from peer B, and therefore this one is canceled + # 3) client receives response from Peer A + # Therefore, these callbacks shouldn't rely on there being a blob about to be + # downloaded. + d2.addCallback(self._handle_incoming_blob, peer, d_r) + d2.addErrback(self._request_failed, "download request", peer) + + sent_request = True + else: + d_r.cancel(InsufficientFundsError()) + return defer.fail(InsufficientFundsError()) + if sent_request is True: + if p_r is not None: + d3 = protocol.add_request(p_r) + d3.addCallback(self._handle_price_response, peer, p_r, protocol) + d3.addErrback(self._request_failed, "price request", peer) + return defer.succeed(sent_request) + + def get_new_peers(self): + d = self._get_hash_for_peer_search() + d.addCallback(self._find_peers_for_hash) + return d + + ######### internal calls ######### + + def _download_succeeded(self, arg, peer, blob): + logging.info("Blob %s has been successfully downloaded from %s", str(blob), str(peer)) + self._update_local_score(peer, 5.0) + peer.update_stats('blobs_downloaded', 1) + peer.update_score(5.0) + self.blob_manager.blob_completed(blob) + return arg + + def _download_failed(self, reason, peer): + if not reason.check(DownloadCanceledError, PriceDisagreementError): + self._update_local_score(peer, -10.0) + return reason + + def _pay_or_cancel_payment(self, arg, protocol, reserved_points, blob): + if blob.length != 0 and (not isinstance(arg, Failure) or arg.check(DownloadCanceledError)): + self._pay_peer(protocol, blob.length, reserved_points) + else: + self._cancel_points(reserved_points) + return arg + + def _handle_download_error(self, err, peer, blob_to_download): + if not err.check(DownloadCanceledError, PriceDisagreementError, RequestCanceledError): + logging.warning("An error occurred while downloading %s from %s. Error: %s", + blob_to_download.blob_hash, str(peer), err.getTraceback()) + if err.check(PriceDisagreementError): + # Don't kill the whole connection just because a price couldn't be agreed upon. + # Other information might be desired by other request creators at a better rate. + return True + return err + + def _get_hash_for_peer_search(self): + r = None + blobs_to_download = self._blobs_to_download() + if blobs_to_download: + blobs_without_sources = self._blobs_without_sources() + if not blobs_without_sources: + blob_hash = blobs_to_download[0].blob_hash + else: + blob_hash = blobs_without_sources[0].blob_hash + r = blob_hash + logging.debug("Blob requester peer search response: %s", str(r)) + return defer.succeed(r) + + def _find_peers_for_hash(self, h): + if h is None: + return None + else: + d = self.peer_finder.find_peers_for_blob(h) + + def choose_best_peers(peers): + bad_peers = self._get_bad_peers() + return [p for p in peers if not p in bad_peers] + + d.addCallback(choose_best_peers) + return d + + def _should_send_request_to(self, peer): + if self._peers[peer] < -5.0: + return False + if peer in self._price_disagreements: + return False + if peer in self._incompatible_peers: + return False + return True + + def _get_bad_peers(self): + return [p for p in self._peers.iterkeys() if not self._should_send_request_to(p)] + + def _hash_available(self, blob_hash): + for peer in self._available_blobs: + if blob_hash in self._available_blobs[peer]: + return True + return False + + def _hash_available_on(self, blob_hash, peer): + if blob_hash in self._available_blobs[peer]: + return True + return False + + def _blobs_to_download(self): + needed_blobs = self.download_manager.needed_blobs() + return sorted(needed_blobs, key=lambda b: b.is_downloading()) + + def _blobs_without_sources(self): + return [b for b in self.download_manager.needed_blobs() if not self._hash_available(b.blob_hash)] + + def _get_availability_request(self, peer): + all_needed = [b.blob_hash for b in self._blobs_to_download() if not b.blob_hash in self._available_blobs[peer]] + # sort them so that the peer will be asked first for blobs it hasn't said it doesn't have + to_request = sorted(all_needed, key=lambda b: b in self._unavailable_blobs[peer])[:20] + if to_request: + r_dict = {'requested_blobs': to_request} + response_identifier = 'available_blobs' + request = ClientRequest(r_dict, response_identifier) + return request + return None + + def _get_download_request(self, peer): + request = None + to_download = [b for b in self._blobs_to_download() if self._hash_available_on(b.blob_hash, peer)] + while to_download and request is None: + blob_to_download = to_download[0] + to_download = to_download[1:] + if not blob_to_download.is_validated(): + d, write_func, cancel_func = blob_to_download.open_for_writing(peer) + + def counting_write_func(data): + peer.update_stats('blob_bytes_downloaded', len(data)) + return write_func(data) + + if d is not None: + + request_dict = {'requested_blob': blob_to_download.blob_hash} + response_identifier = 'incoming_blob' + + request = ClientBlobRequest(request_dict, response_identifier, counting_write_func, d, + cancel_func, blob_to_download) + + logging.info("Requesting blob %s from %s", str(blob_to_download), str(peer)) + return request + + def _price_settled(self, protocol): + if protocol in self._protocol_prices: + return True + return False + + def _get_price_request(self, peer, protocol): + request = None + if not protocol in self._protocol_prices: + self._protocol_prices[protocol] = self.payment_rate_manager.get_rate_blob_data(peer) + request_dict = {'blob_data_payment_rate': self._protocol_prices[protocol]} + request = ClientRequest(request_dict, 'blob_data_payment_rate') + return request + + def _update_local_score(self, peer, amount): + self._peers[peer] += amount + + def _reserve_points(self, peer, protocol, max_bytes): + assert protocol in self._protocol_prices + points_to_reserve = 1.0 * max_bytes * self._protocol_prices[protocol] / 2**20 + return self.wallet.reserve_points(peer, points_to_reserve) + + def _pay_peer(self, protocol, num_bytes, reserved_points): + assert num_bytes != 0 + assert protocol in self._protocol_prices + point_amount = 1.0 * num_bytes * self._protocol_prices[protocol] / 2**20 + self.wallet.send_points(reserved_points, point_amount) + self.payment_rate_manager.record_points_paid(point_amount) + + def _cancel_points(self, reserved_points): + self.wallet.cancel_point_reservation(reserved_points) + + def _handle_availability(self, response_dict, peer, request): + if not request.response_identifier in response_dict: + raise InvalidResponseError("response identifier not in response") + logging.debug("Received a response to the availability request") + blob_hashes = response_dict[request.response_identifier] + for blob_hash in blob_hashes: + if blob_hash in request.request_dict['requested_blobs']: + logging.debug("The server has indicated it has the following blob available: %s", blob_hash) + self._available_blobs[peer].append(blob_hash) + if blob_hash in self._unavailable_blobs[peer]: + self._unavailable_blobs[peer].remove(blob_hash) + request.request_dict['requested_blobs'].remove(blob_hash) + for blob_hash in request.request_dict['requested_blobs']: + self._unavailable_blobs[peer].append(blob_hash) + return True + + def _handle_incoming_blob(self, response_dict, peer, request): + if not request.response_identifier in response_dict: + return InvalidResponseError("response identifier not in response") + if not type(response_dict[request.response_identifier]) == dict: + return InvalidResponseError("response not a dict. got %s" % + (type(response_dict[request.response_identifier]),)) + response = response_dict[request.response_identifier] + if 'error' in response: + # This means we're not getting our blob for some reason + if response['error'] == "RATE_UNSET": + # Stop the download with an error that won't penalize the peer + request.cancel(PriceDisagreementError()) + else: + # The peer has done something bad so we should get out of here + return InvalidResponseError("Got an unknown error from the peer: %s" % + (response['error'],)) + else: + if not 'blob_hash' in response: + return InvalidResponseError("Missing the required field 'blob_hash'") + if not response['blob_hash'] == request.request_dict['requested_blob']: + return InvalidResponseError("Incoming blob does not match expected. Incoming: %s. Expected: %s" % + (response['blob_hash'], request.request_dict['requested_blob'])) + if not 'length' in response: + return InvalidResponseError("Missing the required field 'length'") + if not request.blob.set_length(response['length']): + return InvalidResponseError("Could not set the length of the blob") + return True + + def _handle_price_response(self, response_dict, peer, request, protocol): + if not request.response_identifier in response_dict: + return InvalidResponseError("response identifier not in response") + assert protocol in self._protocol_prices + response = response_dict[request.response_identifier] + if response == "RATE_ACCEPTED": + return True + else: + del self._protocol_prices[protocol] + self._price_disagreements.append(peer) + return True + + def _request_failed(self, reason, request_type, peer): + if reason.check(RequestCanceledError): + return + if reason.check(NoResponseError): + self._incompatible_peers.append(peer) + return + logging.warning("Blob requester: a request of type '%s' failed. Reason: %s, Error type: %s", + str(request_type), reason.getErrorMessage(), reason.type) + self._update_local_score(peer, -10.0) + if isinstance(reason, InvalidResponseError): + peer.update_score(-10.0) + else: + peer.update_score(-2.0) + return reason \ No newline at end of file diff --git a/lbrynet/core/client/ClientProtocol.py b/lbrynet/core/client/ClientProtocol.py new file mode 100644 index 000000000..5a9a2387b --- /dev/null +++ b/lbrynet/core/client/ClientProtocol.py @@ -0,0 +1,235 @@ +import json +import logging +from twisted.internet import error, defer, reactor +from twisted.internet.protocol import Protocol, ClientFactory +from twisted.python import failure +from lbrynet.conf import MAX_RESPONSE_INFO_SIZE as MAX_RESPONSE_SIZE +from lbrynet.core.Error import ConnectionClosedBeforeResponseError, NoResponseError +from lbrynet.core.Error import DownloadCanceledError, MisbehavingPeerError +from lbrynet.core.Error import RequestCanceledError +from lbrynet.interfaces import IRequestSender, IRateLimited +from zope.interface import implements + + +class ClientProtocol(Protocol): + implements(IRequestSender, IRateLimited) + + ######### Protocol ######### + + def connectionMade(self): + self._connection_manager = self.factory.connection_manager + self._rate_limiter = self.factory.rate_limiter + self.peer = self.factory.peer + self._response_deferreds = {} + self._response_buff = '' + self._downloading_blob = False + self._blob_download_request = None + self._next_request = {} + self.connection_closed = False + self.connection_closing = False + + self.peer.report_up() + + self._ask_for_request() + + def dataReceived(self, data): + self._rate_limiter.report_dl_bytes(len(data)) + if self._downloading_blob is True: + self._blob_download_request.write(data) + else: + self._response_buff += data + if len(self._response_buff) > MAX_RESPONSE_SIZE: + logging.warning("Response is too large. Size %s", len(self._response_buff)) + self.transport.loseConnection() + response, extra_data = self._get_valid_response(self._response_buff) + if response is not None: + self._response_buff = '' + self._handle_response(response) + if self._downloading_blob is True and len(extra_data) != 0: + self._blob_download_request.write(extra_data) + + def connectionLost(self, reason): + self.connection_closed = True + if reason.check(error.ConnectionDone): + err = failure.Failure(ConnectionClosedBeforeResponseError()) + else: + err = reason + #if self._response_deferreds: + # logging.warning("Lost connection with active response deferreds. %s", str(self._response_deferreds)) + for key, d in self._response_deferreds.items(): + del self._response_deferreds[key] + d.errback(err) + if self._blob_download_request is not None: + self._blob_download_request.cancel(err) + self._connection_manager.protocol_disconnected(self.peer, self) + + ######### IRequestSender ######### + + def add_request(self, request): + if request.response_identifier in self._response_deferreds: + return defer.fail(failure.Failure(ValueError("There is already a request for that response active"))) + self._next_request.update(request.request_dict) + d = defer.Deferred() + logging.debug("Adding a request. Request: %s", str(request)) + self._response_deferreds[request.response_identifier] = d + return d + + def add_blob_request(self, blob_request): + if self._blob_download_request is None: + d = self.add_request(blob_request) + self._blob_download_request = blob_request + blob_request.finished_deferred.addCallbacks(self._downloading_finished, + self._downloading_failed) + blob_request.finished_deferred.addErrback(self._handle_response_error) + return d + else: + return defer.fail(failure.Failure(ValueError("There is already a blob download request active"))) + + def cancel_requests(self): + self.connection_closing = True + ds = [] + err = failure.Failure(RequestCanceledError()) + for key, d in self._response_deferreds.items(): + del self._response_deferreds[key] + d.errback(err) + ds.append(d) + if self._blob_download_request is not None: + self._blob_download_request.cancel(err) + ds.append(self._blob_download_request.finished_deferred) + self._blob_download_request = None + return defer.DeferredList(ds) + + ######### Internal request handling ######### + + def _handle_request_error(self, err): + logging.error("An unexpected error occurred creating or sending a request to %s. Error message: %s", + str(self.peer), err.getTraceback()) + self.transport.loseConnection() + + def _ask_for_request(self): + + if self.connection_closed is True or self.connection_closing is True: + return + + def send_request_or_close(do_request): + if do_request is True: + request_msg, self._next_request = self._next_request, {} + self._send_request_message(request_msg) + else: + # The connection manager has indicated that this connection should be terminated + logging.info("Closing the connection to %s due to having no further requests to send", str(self.peer)) + self.transport.loseConnection() + + d = self._connection_manager.get_next_request(self.peer, self) + d.addCallback(send_request_or_close) + d.addErrback(self._handle_request_error) + + def _send_request_message(self, request_msg): + # TODO: compare this message to the last one. If they're the same, + # TODO: incrementally delay this message. + m = json.dumps(request_msg) + self.transport.write(m) + + def _get_valid_response(self, response_msg): + extra_data = None + response = None + curr_pos = 0 + while 1: + next_close_paren = response_msg.find('}', curr_pos) + if next_close_paren != -1: + curr_pos = next_close_paren + 1 + try: + response = json.loads(response_msg[:curr_pos]) + except ValueError: + pass + else: + extra_data = response_msg[curr_pos:] + break + else: + break + return response, extra_data + + def _handle_response_error(self, err): + # If an error gets to this point, log it and kill the connection. + if not err.check(MisbehavingPeerError, ConnectionClosedBeforeResponseError, DownloadCanceledError, + RequestCanceledError): + logging.error("The connection to %s is closing due to an unexpected error: %s", str(self.peer), + err.getErrorMessage()) + if not err.check(RequestCanceledError): + self.transport.loseConnection() + + def _handle_response(self, response): + ds = [] + logging.debug("Handling a response. Current expected responses: %s", str(self._response_deferreds)) + for key, val in response.items(): + if key in self._response_deferreds: + d = self._response_deferreds[key] + del self._response_deferreds[key] + d.callback({key: val}) + ds.append(d) + for k, d in self._response_deferreds.items(): + del self._response_deferreds[k] + d.errback(failure.Failure(NoResponseError())) + ds.append(d) + + if self._blob_download_request is not None: + self._downloading_blob = True + d = self._blob_download_request.finished_deferred + d.addErrback(self._handle_response_error) + ds.append(d) + + dl = defer.DeferredList(ds) + + dl.addCallback(lambda _: self._ask_for_request()) + + def _downloading_finished(self, arg): + logging.debug("The blob has finished downloading") + self._blob_download_request = None + self._downloading_blob = False + return arg + + def _downloading_failed(self, err): + if err.check(DownloadCanceledError): + # TODO: (wish-list) it seems silly to close the connection over this, and it shouldn't + # TODO: always be this way. it's done this way now because the client has no other way + # TODO: of telling the server it wants the download to stop. It would be great if the + # TODO: protocol had such a mechanism. + logging.info("Closing the connection to %s because the download of blob %s was canceled", + str(self.peer), str(self._blob_download_request.blob)) + #self.transport.loseConnection() + #return True + return err + + ######### IRateLimited ######### + + def throttle_upload(self): + pass + + def unthrottle_upload(self): + pass + + def throttle_download(self): + self.transport.pauseProducing() + + def unthrottle_download(self): + self.transport.resumeProducing() + + +class ClientProtocolFactory(ClientFactory): + protocol = ClientProtocol + + def __init__(self, peer, rate_limiter, connection_manager): + self.peer = peer + self.rate_limiter = rate_limiter + self.connection_manager = connection_manager + self.p = None + + def clientConnectionFailed(self, connector, reason): + self.peer.report_down() + self.connection_manager.protocol_disconnected(self.peer, connector) + + def buildProtocol(self, addr): + p = self.protocol() + p.factory = self + self.p = p + return p \ No newline at end of file diff --git a/lbrynet/core/client/ClientRequest.py b/lbrynet/core/client/ClientRequest.py new file mode 100644 index 000000000..b5630f729 --- /dev/null +++ b/lbrynet/core/client/ClientRequest.py @@ -0,0 +1,27 @@ +from lbrynet.conf import BLOB_SIZE + + +class ClientRequest(object): + def __init__(self, request_dict, response_identifier=None): + self.request_dict = request_dict + self.response_identifier = response_identifier + + +class ClientPaidRequest(ClientRequest): + def __init__(self, request_dict, response_identifier, max_pay_units): + ClientRequest.__init__(self, request_dict, response_identifier) + self.max_pay_units = max_pay_units + + +class ClientBlobRequest(ClientPaidRequest): + def __init__(self, request_dict, response_identifier, write_func, finished_deferred, + cancel_func, blob): + if blob.length is None: + max_pay_units = BLOB_SIZE + else: + max_pay_units = blob.length + ClientPaidRequest.__init__(self, request_dict, response_identifier, max_pay_units) + self.write = write_func + self.finished_deferred = finished_deferred + self.cancel = cancel_func + self.blob = blob \ No newline at end of file diff --git a/lbrynet/core/client/ConnectionManager.py b/lbrynet/core/client/ConnectionManager.py new file mode 100644 index 000000000..6f8ce06b9 --- /dev/null +++ b/lbrynet/core/client/ConnectionManager.py @@ -0,0 +1,177 @@ +import logging +from twisted.internet import defer +from zope.interface import implements +from lbrynet import interfaces +from lbrynet.conf import MAX_CONNECTIONS_PER_STREAM +from lbrynet.core.client.ClientProtocol import ClientProtocolFactory +from lbrynet.core.Error import InsufficientFundsError + + +class ConnectionManager(object): + implements(interfaces.IConnectionManager) + + def __init__(self, downloader, rate_limiter, primary_request_creators, secondary_request_creators): + self.downloader = downloader + self.rate_limiter = rate_limiter + self.primary_request_creators = primary_request_creators + self.secondary_request_creators = secondary_request_creators + self.peer_connections = {} # {Peer: {'connection': connection, + # 'request_creators': [IRequestCreator if using this connection]}} + self.connections_closing = {} # {Peer: deferred (fired when the connection is closed)} + self.next_manage_call = None + + def start(self): + from twisted.internet import reactor + + if self.next_manage_call is not None and self.next_manage_call.active() is True: + self.next_manage_call.cancel() + self.next_manage_call = reactor.callLater(0, self._manage) + return defer.succeed(True) + + def stop(self): + if self.next_manage_call is not None and self.next_manage_call.active() is True: + self.next_manage_call.cancel() + self.next_manage_call = None + closing_deferreds = [] + for peer in self.peer_connections.keys(): + + def close_connection(p): + logging.info("Abruptly closing a connection to %s due to downloading being paused", + str(p)) + + if self.peer_connections[p]['factory'].p is not None: + d = self.peer_connections[p]['factory'].p.cancel_requests() + else: + d = defer.succeed(True) + + def disconnect_peer(): + self.peer_connections[p]['connection'].disconnect() + if p in self.peer_connections: + del self.peer_connections[p] + d = defer.Deferred() + self.connections_closing[p] = d + return d + + d.addBoth(lambda _: disconnect_peer()) + return d + + closing_deferreds.append(close_connection(peer)) + return defer.DeferredList(closing_deferreds) + + def get_next_request(self, peer, protocol): + + logging.debug("Trying to get the next request for peer %s", str(peer)) + + if not peer in self.peer_connections: + logging.debug("The peer has already been told to shut down.") + return defer.succeed(False) + + def handle_error(err): + if err.check(InsufficientFundsError): + self.downloader.insufficient_funds() + return False + else: + return err + + def check_if_request_sent(request_sent, request_creator): + if request_sent is False: + if request_creator in self.peer_connections[peer]['request_creators']: + self.peer_connections[peer]['request_creators'].remove(request_creator) + else: + if not request_creator in self.peer_connections[peer]['request_creators']: + self.peer_connections[peer]['request_creators'].append(request_creator) + return request_sent + + def check_requests(requests): + have_request = True in [r[1] for r in requests if r[0] is True] + return have_request + + def get_secondary_requests_if_necessary(have_request): + if have_request is True: + ds = [] + for s_r_c in self.secondary_request_creators: + d = s_r_c.send_next_request(peer, protocol) + ds.append(d) + dl = defer.DeferredList(ds) + else: + dl = defer.succeed(None) + dl.addCallback(lambda _: have_request) + return dl + + ds = [] + + for p_r_c in self.primary_request_creators: + d = p_r_c.send_next_request(peer, protocol) + d.addErrback(handle_error) + d.addCallback(check_if_request_sent, p_r_c) + ds.append(d) + + dl = defer.DeferredList(ds, fireOnOneErrback=True) + dl.addCallback(check_requests) + dl.addCallback(get_secondary_requests_if_necessary) + return dl + + def protocol_disconnected(self, peer, protocol): + if peer in self.peer_connections: + del self.peer_connections[peer] + if peer in self.connections_closing: + d = self.connections_closing[peer] + del self.connections_closing[peer] + d.callback(True) + + def _rank_request_creator_connections(self): + """ + @return: an ordered list of our request creators, ranked according to which has the least number of + connections open that it likes + """ + def count_peers(request_creator): + return len([p for p in self.peer_connections.itervalues() if request_creator in p['request_creators']]) + + return sorted(self.primary_request_creators, key=count_peers) + + def _connect_to_peer(self, peer): + + from twisted.internet import reactor + + if peer is not None: + logging.debug("Trying to connect to %s", str(peer)) + factory = ClientProtocolFactory(peer, self.rate_limiter, self) + connection = reactor.connectTCP(peer.host, peer.port, factory) + self.peer_connections[peer] = {'connection': connection, + 'request_creators': self.primary_request_creators[:], + 'factory': factory} + + def _manage(self): + + from twisted.internet import reactor + + def get_new_peers(request_creators): + logging.debug("Trying to get a new peer to connect to") + if len(request_creators) > 0: + logging.debug("Got a creator to check: %s", str(request_creators[0])) + d = request_creators[0].get_new_peers() + d.addCallback(lambda h: h if h is not None else get_new_peers(request_creators[1:])) + return d + else: + return defer.succeed(None) + + def pick_best_peer(peers): + # TODO: Eventually rank them based on past performance/reputation. For now + # TODO: just pick the first to which we don't have an open connection + logging.debug("Got a list of peers to choose from: %s", str(peers)) + if peers is None: + return None + for peer in peers: + if not peer in self.peer_connections: + logging.debug("Got a good peer. Returning peer %s", str(peer)) + return peer + logging.debug("Couldn't find a good peer to connect to") + return None + + if len(self.peer_connections) < MAX_CONNECTIONS_PER_STREAM: + ordered_request_creators = self._rank_request_creator_connections() + d = get_new_peers(ordered_request_creators) + d.addCallback(pick_best_peer) + d.addCallback(self._connect_to_peer) + + self.next_manage_call = reactor.callLater(1, self._manage) \ No newline at end of file diff --git a/lbrynet/core/client/DHTPeerFinder.py b/lbrynet/core/client/DHTPeerFinder.py new file mode 100644 index 000000000..4ee3266f4 --- /dev/null +++ b/lbrynet/core/client/DHTPeerFinder.py @@ -0,0 +1,47 @@ +import binascii +from zope.interface import implements +from lbrynet.interfaces import IPeerFinder + + +class DHTPeerFinder(object): + """This class finds peers which have announced to the DHT that they have certain blobs""" + implements(IPeerFinder) + + def __init__(self, dht_node, peer_manager): + self.dht_node = dht_node + self.peer_manager = peer_manager + self.peers = [] + self.next_manage_call = None + + def run_manage_loop(self): + + from twisted.internet import reactor + + self._manage_peers() + self.next_manage_call = reactor.callLater(60, self.run_manage_loop) + + def stop(self): + if self.next_manage_call is not None and self.next_manage_call.active(): + self.next_manage_call.cancel() + self.next_manage_call = None + + def _manage_peers(self): + pass + + def find_peers_for_blob(self, blob_hash): + bin_hash = binascii.unhexlify(blob_hash) + + def filter_peers(peer_list): + good_peers = [] + for host, port in peer_list: + peer = self.peer_manager.get_peer(host, port) + if peer.is_available() is True: + good_peers.append(peer) + return good_peers + + d = self.dht_node.getPeersForBlob(bin_hash) + d.addCallback(filter_peers) + return d + + def get_most_popular_hashes(self, num_to_return): + return self.dht_node.get_most_popular_hashes(num_to_return) \ No newline at end of file diff --git a/lbrynet/core/client/DownloadManager.py b/lbrynet/core/client/DownloadManager.py new file mode 100644 index 000000000..2ddd22bdb --- /dev/null +++ b/lbrynet/core/client/DownloadManager.py @@ -0,0 +1,115 @@ +import logging +from twisted.internet import defer +from twisted.python import failure +from zope.interface import implements +from lbrynet import interfaces + + +class DownloadManager(object): + implements(interfaces.IDownloadManager) + + def __init__(self, blob_manager, upload_allowed): + self.blob_manager = blob_manager + self.upload_allowed = upload_allowed + self.blob_requester = None + self.blob_info_finder = None + self.progress_manager = None + self.blob_handler = None + self.connection_manager = None + + self.blobs = {} + self.blob_infos = {} + + ######### IDownloadManager ######### + + def start_downloading(self): + d = self.blob_info_finder.get_initial_blobs() + logging.debug("Requested the initial blobs from the info finder") + d.addCallback(self.add_blobs_to_download) + d.addCallback(lambda _: self.resume_downloading()) + return d + + def resume_downloading(self): + + def check_start(result, manager): + if isinstance(result, failure.Failure): + logging.error("Failed to start the %s: %s", manager, result.getErrorMessage()) + return False + return True + + d1 = self.progress_manager.start() + d1.addBoth(check_start, "progress manager") + d2 = self.connection_manager.start() + d2.addBoth(check_start, "connection manager") + dl = defer.DeferredList([d1, d2]) + dl.addCallback(lambda xs: False not in xs) + return dl + + def stop_downloading(self): + + def check_stop(result, manager): + if isinstance(result, failure.Failure): + logging.error("Failed to stop the %s: %s", manager. result.getErrorMessage()) + return False + return True + + d1 = self.progress_manager.stop() + d1.addBoth(check_stop, "progress manager") + d2 = self.connection_manager.stop() + d2.addBoth(check_stop, "connection manager") + dl = defer.DeferredList([d1, d2]) + dl.addCallback(lambda xs: False not in xs) + return dl + + def add_blobs_to_download(self, blob_infos): + + logging.debug("Adding %s to blobs", str(blob_infos)) + + def add_blob_to_list(blob, blob_num): + self.blobs[blob_num] = blob + logging.info("Added blob (hash: %s, number %s) to the list", str(blob.blob_hash), str(blob_num)) + + def error_during_add(err): + logging.warning("An error occurred adding the blob to blobs. Error:%s", err.getErrorMessage()) + return err + + ds = [] + for blob_info in blob_infos: + if not blob_info.blob_num in self.blobs: + self.blob_infos[blob_info.blob_num] = blob_info + logging.debug("Trying to get the blob associated with blob hash %s", str(blob_info.blob_hash)) + d = self.blob_manager.get_blob(blob_info.blob_hash, self.upload_allowed, blob_info.length) + d.addCallback(add_blob_to_list, blob_info.blob_num) + d.addErrback(error_during_add) + ds.append(d) + + dl = defer.DeferredList(ds) + return dl + + def stream_position(self): + return self.progress_manager.stream_position() + + def needed_blobs(self): + return self.progress_manager.needed_blobs() + + def final_blob_num(self): + return self.blob_info_finder.final_blob_num() + + def handle_blob(self, blob_num): + return self.blob_handler.handle_blob(self.blobs[blob_num], self.blob_infos[blob_num]) + + def calculate_total_bytes(self): + return sum([bi.length for bi in self.blob_infos.itervalues()]) + + def calculate_bytes_left_to_output(self): + if not self.blobs: + return self.calculate_total_bytes() + else: + to_be_outputted = [b for n, b in self.blobs.iteritems() if n >= self.progress_manager.last_blob_outputted] + return sum([b.length for b in to_be_outputted if b.length is not None]) + + def calculate_bytes_left_to_download(self): + if not self.blobs: + return self.calculate_total_bytes() + else: + return sum([b.length for b in self.needed_blobs() if b.length is not None]) \ No newline at end of file diff --git a/lbrynet/core/client/StandaloneBlobDownloader.py b/lbrynet/core/client/StandaloneBlobDownloader.py new file mode 100644 index 000000000..208b0020f --- /dev/null +++ b/lbrynet/core/client/StandaloneBlobDownloader.py @@ -0,0 +1,133 @@ +import logging +from zope.interface import implements +from lbrynet import interfaces +from lbrynet.core.BlobInfo import BlobInfo +from lbrynet.core.client.BlobRequester import BlobRequester +from lbrynet.core.client.ConnectionManager import ConnectionManager +from lbrynet.core.client.DownloadManager import DownloadManager +from twisted.internet import defer + + +class SingleBlobMetadataHandler(object): + implements(interfaces.IMetadataHandler) + + def __init__(self, blob_hash, download_manager): + self.blob_hash = blob_hash + self.download_manager = download_manager + + ######## IMetadataHandler ######### + + def get_initial_blobs(self): + logging.debug("Returning the blob info") + return defer.succeed([BlobInfo(self.blob_hash, 0, None)]) + + def final_blob_num(self): + return 0 + + +class SingleProgressManager(object): + def __init__(self, finished_callback, download_manager): + self.finished_callback = finished_callback + self.finished = False + self.download_manager = download_manager + self._next_check_if_finished = None + + def start(self): + + from twisted.internet import reactor + + assert self._next_check_if_finished is None + self._next_check_if_finished = reactor.callLater(0, self._check_if_finished) + return defer.succeed(True) + + def stop(self): + if self._next_check_if_finished is not None: + self._next_check_if_finished.cancel() + self._next_check_if_finished = None + return defer.succeed(True) + + def _check_if_finished(self): + + from twisted.internet import reactor + + self._next_check_if_finished = None + if self.finished is False: + if self.stream_position() == 1: + self.blob_downloaded(self.download_manager.blobs[0], 0) + else: + self._next_check_if_finished = reactor.callLater(1, self._check_if_finished) + + def stream_position(self): + blobs = self.download_manager.blobs + if blobs and blobs[0].is_validated(): + return 1 + return 0 + + def needed_blobs(self): + blobs = self.download_manager.blobs + assert len(blobs) == 1 + return [b for b in blobs.itervalues() if not b.is_validated()] + + def blob_downloaded(self, blob, blob_num): + + from twisted.internet import reactor + + logging.debug("The blob %s has been downloaded. Calling the finished callback", str(blob)) + if self.finished is False: + self.finished = True + reactor.callLater(0, self.finished_callback, blob) + + +class DummyBlobHandler(object): + def __init__(self): + pass + + def handle_blob(self, blob, blob_info): + pass + + +class StandaloneBlobDownloader(object): + + def __init__(self, blob_hash, blob_manager, peer_finder, rate_limiter, payment_rate_manager, wallet): + self.blob_hash = blob_hash + self.blob_manager = blob_manager + self.peer_finder = peer_finder + self.rate_limiter = rate_limiter + self.payment_rate_manager = payment_rate_manager + self.wallet = wallet + self.download_manager = None + self.finished_deferred = None + + def download(self): + def cancel_download(d): + self.stop() + + self.finished_deferred = defer.Deferred(canceller=cancel_download) + self.download_manager = DownloadManager(self.blob_manager, True) + self.download_manager.blob_requester = BlobRequester(self.blob_manager, self.peer_finder, + self.payment_rate_manager, self.wallet, + self.download_manager) + self.download_manager.blob_info_finder = SingleBlobMetadataHandler(self.blob_hash, + self.download_manager) + self.download_manager.progress_manager = SingleProgressManager(self._blob_downloaded, + self.download_manager) + self.download_manager.blob_handler = DummyBlobHandler() + self.download_manager.wallet_info_exchanger = self.wallet.get_info_exchanger() + self.download_manager.connection_manager = ConnectionManager( + self, self.rate_limiter, + [self.download_manager.blob_requester], + [self.download_manager.wallet_info_exchanger] + ) + d = self.download_manager.start_downloading() + d.addCallback(lambda _: self.finished_deferred) + return d + + def stop(self): + return self.download_manager.stop_downloading() + + def _blob_downloaded(self, blob): + self.stop() + self.finished_deferred.callback(blob) + + def insufficient_funds(self): + return self.stop() \ No newline at end of file diff --git a/lbrynet/core/client/StreamProgressManager.py b/lbrynet/core/client/StreamProgressManager.py new file mode 100644 index 000000000..42ff9233b --- /dev/null +++ b/lbrynet/core/client/StreamProgressManager.py @@ -0,0 +1,141 @@ +import logging +from lbrynet.interfaces import IProgressManager +from twisted.internet import defer +from zope.interface import implements + + +class StreamProgressManager(object): + implements(IProgressManager) + + def __init__(self, finished_callback, blob_manager, download_manager, delete_blob_after_finished=False): + self.finished_callback = finished_callback + self.blob_manager = blob_manager + self.delete_blob_after_finished = delete_blob_after_finished + self.download_manager = download_manager + self.provided_blob_nums = [] + self.last_blob_outputted = -1 + self.stopped = True + self._next_try_to_output_call = None + self.outputting_d = None + + ######### IProgressManager ######### + + def start(self): + + from twisted.internet import reactor + + self.stopped = False + self._next_try_to_output_call = reactor.callLater(0, self._try_to_output) + return defer.succeed(True) + + def stop(self): + self.stopped = True + if self._next_try_to_output_call is not None and self._next_try_to_output_call.active(): + self._next_try_to_output_call.cancel() + self._next_try_to_output_call = None + return self._stop_outputting() + + def blob_downloaded(self, blob, blob_num): + if self.outputting_d is None: + self._output_loop() + + ######### internal ######### + + def _finished_outputting(self): + self.finished_callback(True) + + def _try_to_output(self): + + from twisted.internet import reactor + + self._next_try_to_output_call = reactor.callLater(1, self._try_to_output) + if self.outputting_d is None: + self._output_loop() + + def _output_loop(self): + pass + + def _stop_outputting(self): + if self.outputting_d is not None: + return self.outputting_d + return defer.succeed(None) + + def _finished_with_blob(self, blob_num): + logging.debug("In _finished_with_blob, blob_num = %s", str(blob_num)) + if self.delete_blob_after_finished is True: + logging.debug("delete_blob_after_finished is True") + blobs = self.download_manager.blobs + if blob_num in blobs: + logging.debug("Telling the blob manager, %s, to delete blob %s", str(self.blob_manager), + blobs[blob_num].blob_hash) + self.blob_manager.delete_blobs([blobs[blob_num].blob_hash]) + else: + logging.debug("Blob number %s was not in blobs", str(blob_num)) + else: + logging.debug("delete_blob_after_finished is False") + + +class FullStreamProgressManager(StreamProgressManager): + def __init__(self, finished_callback, blob_manager, download_manager, delete_blob_after_finished=False): + StreamProgressManager.__init__(self, finished_callback, blob_manager, download_manager, + delete_blob_after_finished) + self.outputting_d = None + + ######### IProgressManager ######### + + def stream_position(self): + blobs = self.download_manager.blobs + if not blobs: + return 0 + else: + for i in xrange(max(blobs.iterkeys())): + if not i in blobs or (not blobs[i].is_validated() and not i in self.provided_blob_nums): + return i + return max(blobs.iterkeys()) + 1 + + def needed_blobs(self): + blobs = self.download_manager.blobs + return [b for n, b in blobs.iteritems() if not b.is_validated() and not n in self.provided_blob_nums] + + ######### internal ######### + + def _output_loop(self): + + from twisted.internet import reactor + + if self.stopped: + if self.outputting_d is not None: + self.outputting_d.callback(True) + self.outputting_d = None + return + + if self.outputting_d is None: + self.outputting_d = defer.Deferred() + blobs = self.download_manager.blobs + + def finished_outputting_blob(): + self.last_blob_outputted += 1 + final_blob_num = self.download_manager.final_blob_num() + if final_blob_num is not None and final_blob_num == self.last_blob_outputted: + self._finished_outputting() + self.outputting_d.callback(True) + self.outputting_d = None + else: + reactor.callLater(0, self._output_loop) + + current_blob_num = self.last_blob_outputted + 1 + + if current_blob_num in blobs and blobs[current_blob_num].is_validated(): + logging.info("Outputting blob %s", str(self.last_blob_outputted + 1)) + self.provided_blob_nums.append(self.last_blob_outputted + 1) + d = self.download_manager.handle_blob(self.last_blob_outputted + 1) + d.addCallback(lambda _: finished_outputting_blob()) + d.addCallback(lambda _: self._finished_with_blob(current_blob_num)) + + def log_error(err): + logging.warning("Error occurred in the output loop. Error: %s", err.getErrorMessage()) + + d.addErrback(log_error) + else: + self.outputting_d.callback(True) + self.outputting_d = None \ No newline at end of file diff --git a/lbrynet/core/client/__init__.py b/lbrynet/core/client/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/core/cryptoutils.py b/lbrynet/core/cryptoutils.py new file mode 100644 index 000000000..d77013c92 --- /dev/null +++ b/lbrynet/core/cryptoutils.py @@ -0,0 +1,18 @@ +from Crypto.Hash import SHA384 +import seccure + + +def get_lbry_hash_obj(): + return SHA384.new() + + +def get_pub_key(pass_phrase): + return str(seccure.passphrase_to_pubkey(pass_phrase, curve="brainpoolp384r1")) + + +def sign_with_pass_phrase(m, pass_phrase): + return seccure.sign(m, pass_phrase, curve="brainpoolp384r1") + + +def verify_signature(m, signature, pub_key): + return seccure.verify(m, signature, pub_key, curve="brainpoolp384r1") \ No newline at end of file diff --git a/lbrynet/core/server/BlobAvailabilityHandler.py b/lbrynet/core/server/BlobAvailabilityHandler.py new file mode 100644 index 000000000..0361e21cb --- /dev/null +++ b/lbrynet/core/server/BlobAvailabilityHandler.py @@ -0,0 +1,55 @@ +import logging +from twisted.internet import defer +from zope.interface import implements +from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler + + +class BlobAvailabilityHandlerFactory(object): + implements(IQueryHandlerFactory) + + def __init__(self, blob_manager): + self.blob_manager = blob_manager + + ######### IQueryHandlerFactory ######### + + def build_query_handler(self): + q_h = BlobAvailabilityHandler(self.blob_manager) + return q_h + + def get_primary_query_identifier(self): + return 'requested_blobs' + + def get_description(self): + return "Blob Availability - blobs that are available to be uploaded" + + +class BlobAvailabilityHandler(object): + implements(IQueryHandler) + + def __init__(self, blob_manager): + self.blob_manager = blob_manager + self.query_identifiers = ['requested_blobs'] + + ######### IQueryHandler ######### + + def register_with_request_handler(self, request_handler, peer): + request_handler.register_query_handler(self, self.query_identifiers) + + def handle_queries(self, queries): + if self.query_identifiers[0] in queries: + logging.debug("Received the client's list of requested blobs") + d = self._get_available_blobs(queries[self.query_identifiers[0]]) + + def set_field(available_blobs): + return {'available_blobs': available_blobs} + + d.addCallback(set_field) + return d + return defer.succeed({}) + + ######### internal ######### + + def _get_available_blobs(self, requested_blobs): + d = self.blob_manager.completed_blobs(requested_blobs) + + return d \ No newline at end of file diff --git a/lbrynet/core/server/BlobRequestHandler.py b/lbrynet/core/server/BlobRequestHandler.py new file mode 100644 index 000000000..ee61fdb37 --- /dev/null +++ b/lbrynet/core/server/BlobRequestHandler.py @@ -0,0 +1,156 @@ +import logging +from twisted.internet import defer +from twisted.protocols.basic import FileSender +from twisted.python.failure import Failure +from zope.interface import implements +from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler, IBlobSender + + +class BlobRequestHandlerFactory(object): + implements(IQueryHandlerFactory) + + def __init__(self, blob_manager, wallet, payment_rate_manager): + self.blob_manager = blob_manager + self.wallet = wallet + self.payment_rate_manager = payment_rate_manager + + ######### IQueryHandlerFactory ######### + + def build_query_handler(self): + q_h = BlobRequestHandler(self.blob_manager, self.wallet, self.payment_rate_manager) + return q_h + + def get_primary_query_identifier(self): + return 'requested_blob' + + def get_description(self): + return "Blob Uploader - uploads blobs" + + +class BlobRequestHandler(object): + implements(IQueryHandler, IBlobSender) + + def __init__(self, blob_manager, wallet, payment_rate_manager): + self.blob_manager = blob_manager + self.payment_rate_manager = payment_rate_manager + self.wallet = wallet + self.query_identifiers = ['blob_data_payment_rate', 'requested_blob'] + self.peer = None + self.blob_data_payment_rate = None + self.read_handle = None + self.currently_uploading = None + self.file_sender = None + self.blob_bytes_uploaded = 0 + + ######### IQueryHandler ######### + + def register_with_request_handler(self, request_handler, peer): + self.peer = peer + request_handler.register_query_handler(self, self.query_identifiers) + request_handler.register_blob_sender(self) + + def handle_queries(self, queries): + response = {} + if self.query_identifiers[0] in queries: + if not self.handle_blob_data_payment_rate(queries[self.query_identifiers[0]]): + response['blob_data_payment_rate'] = "RATE_TOO_LOW" + else: + response['blob_data_payment_rate'] = 'RATE_ACCEPTED' + + if self.query_identifiers[1] in queries: + logging.debug("Received the client's request to send a blob") + response_fields = {} + response['incoming_blob'] = response_fields + + if self.blob_data_payment_rate is None: + response_fields['error'] = "RATE_UNSET" + return defer.succeed(response) + else: + + d = self.blob_manager.get_blob(queries[self.query_identifiers[1]], True) + + def open_blob_for_reading(blob): + if blob.is_validated(): + read_handle = blob.open_for_reading() + if read_handle is not None: + self.currently_uploading = blob + self.read_handle = read_handle + logging.debug("Sending %s to client", str(blob)) + response_fields['blob_hash'] = blob.blob_hash + response_fields['length'] = blob.length + return response + logging.debug("We can not send %s", str(blob)) + response_fields['error'] = "BLOB_UNAVAILABLE" + return response + + d.addCallback(open_blob_for_reading) + + return d + else: + return defer.succeed(response) + + ######### IBlobSender ######### + + def send_blob_if_requested(self, consumer): + if self.currently_uploading is not None: + return self.send_file(consumer) + return defer.succeed(True) + + def cancel_send(self, err): + if self.currently_uploading is not None: + self.currently_uploading.close_read_handle(self.read_handle) + self.read_handle = None + self.currently_uploading = None + return err + + ######### internal ######### + + def handle_blob_data_payment_rate(self, requested_payment_rate): + if not self.payment_rate_manager.accept_rate_blob_data(self.peer, requested_payment_rate): + return False + else: + self.blob_data_payment_rate = requested_payment_rate + return True + + def send_file(self, consumer): + + def _send_file(): + inner_d = start_transfer() + # TODO: if the transfer fails, check if it's because the connection was cut off. + # TODO: if so, perhaps bill the client + inner_d.addCallback(lambda _: set_expected_payment()) + inner_d.addBoth(set_not_uploading) + return inner_d + + def count_bytes(data): + self.blob_bytes_uploaded += len(data) + self.peer.update_stats('blob_bytes_uploaded', len(data)) + return data + + def start_transfer(): + self.file_sender = FileSender() + logging.info("Starting the file upload") + assert self.read_handle is not None, "self.read_handle was None when trying to start the transfer" + d = self.file_sender.beginFileTransfer(self.read_handle, consumer, count_bytes) + return d + + def set_expected_payment(): + logging.info("Setting expected payment") + if self.blob_bytes_uploaded != 0 and self.blob_data_payment_rate is not None: + self.wallet.add_expected_payment(self.peer, + self.currently_uploading.length * 1.0 * + self.blob_data_payment_rate / 2**20) + self.blob_bytes_uploaded = 0 + self.peer.update_stats('blobs_uploaded', 1) + return None + + def set_not_uploading(reason=None): + if self.currently_uploading is not None: + self.currently_uploading.close_read_handle(self.read_handle) + self.read_handle = None + self.currently_uploading = None + self.file_sender = None + if reason is not None and isinstance(reason, Failure): + logging.warning("Upload has failed. Reason: %s", reason.getErrorMessage()) + + return _send_file() \ No newline at end of file diff --git a/lbrynet/core/server/DHTHashAnnouncer.py b/lbrynet/core/server/DHTHashAnnouncer.py new file mode 100644 index 000000000..4df03bc76 --- /dev/null +++ b/lbrynet/core/server/DHTHashAnnouncer.py @@ -0,0 +1,81 @@ +import binascii +from twisted.internet import defer, task, reactor +import collections + + +class DHTHashAnnouncer(object): + """This class announces to the DHT that this peer has certain blobs""" + def __init__(self, dht_node, peer_port): + self.dht_node = dht_node + self.peer_port = peer_port + self.suppliers = [] + self.next_manage_call = None + self.hash_queue = collections.deque() + self._concurrent_announcers = 0 + + def run_manage_loop(self): + + from twisted.internet import reactor + + if self.peer_port is not None: + self._announce_available_hashes() + self.next_manage_call = reactor.callLater(60, self.run_manage_loop) + + def stop(self): + if self.next_manage_call is not None: + self.next_manage_call.cancel() + self.next_manage_call = None + + def add_supplier(self, supplier): + self.suppliers.append(supplier) + + def immediate_announce(self, blob_hashes): + if self.peer_port is not None: + return self._announce_hashes(blob_hashes) + else: + return defer.succeed(False) + + def _announce_available_hashes(self): + ds = [] + for supplier in self.suppliers: + d = supplier.hashes_to_announce() + d.addCallback(self._announce_hashes) + ds.append(d) + dl = defer.DeferredList(ds) + return dl + + def _announce_hashes(self, hashes): + + ds = [] + + for h in hashes: + announce_deferred = defer.Deferred() + ds.append(announce_deferred) + self.hash_queue.append((h, announce_deferred)) + + def announce(): + if len(self.hash_queue): + h, announce_deferred = self.hash_queue.popleft() + d = self.dht_node.announceHaveBlob(binascii.unhexlify(h), self.peer_port) + d.chainDeferred(announce_deferred) + d.addBoth(lambda _: reactor.callLater(0, announce)) + else: + self._concurrent_announcers -= 1 + + for i in range(self._concurrent_announcers, 5): + # TODO: maybe make the 5 configurable + self._concurrent_announcers += 1 + announce() + return defer.DeferredList(ds) + + +class DHTHashSupplier(object): + """Classes derived from this class give hashes to a hash announcer""" + def __init__(self, announcer): + if announcer is not None: + announcer.add_supplier(self) + self.hash_announcer = announcer + self.hash_reannounce_time = 60 * 60 # 1 hour + + def hashes_to_announce(self): + pass \ No newline at end of file diff --git a/lbrynet/core/server/ServerProtocol.py b/lbrynet/core/server/ServerProtocol.py new file mode 100644 index 000000000..106734bc9 --- /dev/null +++ b/lbrynet/core/server/ServerProtocol.py @@ -0,0 +1,91 @@ +import logging +from twisted.internet import interfaces, error +from twisted.internet.protocol import Protocol, ServerFactory +from twisted.python import failure +from zope.interface import implements +from lbrynet.core.server.ServerRequestHandler import ServerRequestHandler + + +class ServerProtocol(Protocol): + """ServerProtocol needs to: + + 1) Receive requests from its transport + 2) Pass those requests on to its request handler + 3) Tell the request handler to pause/resume producing + 4) Tell its transport to pause/resume producing + 5) Hang up when the request handler is done producing + 6) Tell the request handler to stop producing if the connection is lost + 7) Upon creation, register with the rate limiter + 8) Upon connection loss, unregister with the rate limiter + 9) Report all uploaded and downloaded bytes to the rate limiter + 10) Pause/resume production when told by the rate limiter + """ + + implements(interfaces.IConsumer) + + #Protocol stuff + + def connectionMade(self): + logging.debug("Got a connection") + peer_info = self.transport.getPeer() + self.peer = self.factory.peer_manager.get_peer(peer_info.host, peer_info.port) + self.request_handler = ServerRequestHandler(self) + for query_handler_factory, enabled in self.factory.query_handler_factories.iteritems(): + if enabled is True: + query_handler = query_handler_factory.build_query_handler() + query_handler.register_with_request_handler(self.request_handler, self.peer) + logging.debug("Setting the request handler") + self.factory.rate_limiter.register_protocol(self) + + def connectionLost(self, reason=failure.Failure(error.ConnectionDone())): + if self.request_handler is not None: + self.request_handler.stopProducing() + self.factory.rate_limiter.unregister_protocol(self) + if not reason.check(error.ConnectionDone): + logging.warning("Closing a connection. Reason: %s", reason.getErrorMessage()) + + def dataReceived(self, data): + logging.debug("Receiving %s bytes of data from the transport", str(len(data))) + self.factory.rate_limiter.report_dl_bytes(len(data)) + if self.request_handler is not None: + self.request_handler.data_received(data) + + #IConsumer stuff + + def registerProducer(self, producer, streaming): + logging.debug("Registering the producer") + assert streaming is True + + def unregisterProducer(self): + self.request_handler = None + self.transport.loseConnection() + + def write(self, data): + logging.debug("Writing %s bytes of data to the transport", str(len(data))) + self.transport.write(data) + self.factory.rate_limiter.report_ul_bytes(len(data)) + + #Rate limiter stuff + + def throttle_upload(self): + if self.request_handler is not None: + self.request_handler.pauseProducing() + + def unthrottle_upload(self): + if self.request_handler is not None: + self.request_handler.resumeProducing() + + def throttle_download(self): + self.transport.pauseProducing() + + def unthrottle_download(self): + self.transport.resumeProducing() + + +class ServerProtocolFactory(ServerFactory): + protocol = ServerProtocol + + def __init__(self, rate_limiter, query_handler_factories, peer_manager): + self.rate_limiter = rate_limiter + self.query_handler_factories = query_handler_factories + self.peer_manager = peer_manager \ No newline at end of file diff --git a/lbrynet/core/server/ServerRequestHandler.py b/lbrynet/core/server/ServerRequestHandler.py new file mode 100644 index 000000000..2cf534129 --- /dev/null +++ b/lbrynet/core/server/ServerRequestHandler.py @@ -0,0 +1,171 @@ +import json +import logging +from twisted.internet import interfaces, defer +from zope.interface import implements +from lbrynet.interfaces import IRequestHandler + + +class ServerRequestHandler(object): + """This class handles requests from clients. It can upload blobs and return request for information about + more blobs that are associated with streams""" + + implements(interfaces.IPushProducer, interfaces.IConsumer, IRequestHandler) + + def __init__(self, consumer): + self.consumer = consumer + self.production_paused = False + self.request_buff = '' + self.response_buff = '' + self.producer = None + self.request_received = False + self.CHUNK_SIZE = 2**14 + self.query_handlers = {} # {IQueryHandler: [query_identifiers]} + self.blob_sender = None + self.consumer.registerProducer(self, True) + + #IPushProducer stuff + + def pauseProducing(self): + self.production_paused = True + + def stopProducing(self): + if self.producer is not None: + self.producer.stopProducing() + self.producer = None + self.production_paused = True + self.consumer.unregisterProducer() + + def resumeProducing(self): + + from twisted.internet import reactor + + self.production_paused = False + self._produce_more() + if self.producer is not None: + reactor.callLater(0, self.producer.resumeProducing) + + def _produce_more(self): + + from twisted.internet import reactor + + if self.production_paused is False: + chunk = self.response_buff[:self.CHUNK_SIZE] + self.response_buff = self.response_buff[self.CHUNK_SIZE:] + if chunk != '': + logging.debug("writing %s bytes to the client", str(len(chunk))) + self.consumer.write(chunk) + reactor.callLater(0, self._produce_more) + + #IConsumer stuff + + def registerProducer(self, producer, streaming): + #assert self.file_sender == producer + self.producer = producer + assert streaming is False + producer.resumeProducing() + + def unregisterProducer(self): + self.producer = None + + def write(self, data): + + from twisted.internet import reactor + + self.response_buff = self.response_buff + data + self._produce_more() + + def get_more_data(): + if self.producer is not None: + logging.debug("Requesting more data from the producer") + self.producer.resumeProducing() + + reactor.callLater(0, get_more_data) + + #From Protocol + + def data_received(self, data): + logging.debug("Received data") + logging.debug("%s", str(data)) + if self.request_received is False: + self.request_buff = self.request_buff + data + msg = self.try_to_parse_request(self.request_buff) + if msg is not None: + self.request_buff = '' + d = self.handle_request(msg) + if self.blob_sender is not None: + d.addCallback(lambda _: self.blob_sender.send_blob_if_requested(self)) + d.addCallbacks(lambda _: self.finished_response(), self.request_failure_handler) + else: + logging.info("Request buff not a valid json message") + logging.info("Request buff: %s", str(self.request_buff)) + else: + logging.warning("The client sent data when we were uploading a file. This should not happen") + + ######### IRequestHandler ######### + + def register_query_handler(self, query_handler, query_identifiers): + self.query_handlers[query_handler] = query_identifiers + + def register_blob_sender(self, blob_sender): + self.blob_sender = blob_sender + + #response handling + + def request_failure_handler(self, err): + logging.warning("An error occurred handling a request. Error: %s", err.getErrorMessage()) + self.stopProducing() + return err + + def finished_response(self): + self.request_received = False + self._produce_more() + + def send_response(self, msg): + m = json.dumps(msg) + logging.info("Sending a response of length %s", str(len(m))) + logging.debug("Response: %s", str(m)) + self.response_buff = self.response_buff + m + self._produce_more() + return True + + def handle_request(self, msg): + logging.debug("Handling a request") + logging.debug(str(msg)) + + def create_response_message(results): + response = {} + for success, result in results: + if success is True: + response.update(result) + else: + # result is a Failure + return result + logging.debug("Finished making the response message. Response: %s", str(response)) + return response + + def log_errors(err): + logging.warning("An error occurred handling a client request. Error message: %s", err.getErrorMessage()) + return err + + def send_response(response): + self.send_response(response) + return True + + ds = [] + for query_handler, query_identifiers in self.query_handlers.iteritems(): + queries = {q_i: msg[q_i] for q_i in query_identifiers if q_i in msg} + d = query_handler.handle_queries(queries) + d.addErrback(log_errors) + ds.append(d) + + dl = defer.DeferredList(ds) + dl.addCallback(create_response_message) + dl.addCallback(send_response) + return dl + + def try_to_parse_request(self, request_buff): + try: + msg = json.loads(request_buff) + return msg + except ValueError: + return None \ No newline at end of file diff --git a/lbrynet/core/server/__init__.py b/lbrynet/core/server/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/core/utils.py b/lbrynet/core/utils.py new file mode 100644 index 000000000..89b57fb0c --- /dev/null +++ b/lbrynet/core/utils.py @@ -0,0 +1,28 @@ +from lbrynet.core.cryptoutils import get_lbry_hash_obj +import random + + +blobhash_length = get_lbry_hash_obj().digest_size * 2 # digest_size is in bytes, and blob hashes are hex encoded + + +def generate_id(num=None): + h = get_lbry_hash_obj() + if num is not None: + h.update(str(num)) + else: + h.update(str(random.getrandbits(512))) + return h.digest() + + +def is_valid_blobhash(blobhash): + """ + @param blobhash: string, the blobhash to check + + @return: Whether the blobhash is the correct length and contains only valid characters (0-9, a-f) + """ + if len(blobhash) != blobhash_length: + return False + for l in blobhash: + if l not in "0123456789abcdef": + return False + return True \ No newline at end of file diff --git a/lbrynet/create_network.py b/lbrynet/create_network.py new file mode 100644 index 000000000..611ae5eb0 --- /dev/null +++ b/lbrynet/create_network.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# + +# Thanks to Paul Cannon for IP-address resolution functions (taken from aspn.activestate.com) + +import argparse +import os, sys, time, signal + +amount = 0 + + +def destroyNetwork(nodes): + print 'Destroying Kademlia network...' + i = 0 + for node in nodes: + i += 1 + hashAmount = i*50/amount + hashbar = '#'*hashAmount + output = '\r[%-50s] %d/%d' % (hashbar, i, amount) + sys.stdout.write(output) + time.sleep(0.15) + os.kill(node, signal.SIGTERM) + print + + +def main(): + + parser = argparse.ArgumentParser(description="Launch a network of dht nodes") + + parser.add_argument("amount_of_nodes", + help="The number of nodes to create", + type=int) + parser.add_argument("--nic_ip_address", + help="The network interface on which these nodes will listen for connections " + "from each other and from other nodes. If omitted, an attempt will be " + "made to automatically determine the system's IP address, but this may " + "result in the nodes being reachable only from this system") + + args = parser.parse_args() + + global amount + amount = args.amount_of_nodes + if args.nic_ip_address: + ipAddress = args.nic_ip_address + else: + import socket + ipAddress = socket.gethostbyname(socket.gethostname()) + print 'Network interface IP address omitted; using %s...' % ipAddress + + startPort = 4000 + port = startPort+1 + nodes = [] + print 'Creating Kademlia network...' + try: + nodes.append(os.spawnlp(os.P_NOWAIT, 'lbrynet-launch-node', 'lbrynet-launch-node', str(startPort))) + for i in range(amount-1): + time.sleep(0.15) + hashAmount = i*50/amount + hashbar = '#'*hashAmount + output = '\r[%-50s] %d/%d' % (hashbar, i, amount) + sys.stdout.write(output) + nodes.append(os.spawnlp(os.P_NOWAIT, 'lbrynet-launch-node', 'lbrynet-launch-node', str(port), ipAddress, str(startPort))) + port += 1 + except KeyboardInterrupt: + '\nNetwork creation cancelled.' + destroyNetwork(nodes) + sys.exit(1) + + print '\n\n---------------\nNetwork running\n---------------\n' + try: + while 1: + time.sleep(1) + except KeyboardInterrupt: + pass + finally: + destroyNetwork(nodes) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/lbrynet/cryptstream/CryptBlob.py b/lbrynet/cryptstream/CryptBlob.py new file mode 100644 index 000000000..558980a19 --- /dev/null +++ b/lbrynet/cryptstream/CryptBlob.py @@ -0,0 +1,106 @@ +import binascii +import logging +from Crypto.Cipher import AES +from lbrynet.conf import BLOB_SIZE +from lbrynet.core.BlobInfo import BlobInfo + + +class CryptBlobInfo(BlobInfo): + def __init__(self, blob_hash, blob_num, length, iv): + BlobInfo.__init__(self, blob_hash, blob_num, length) + self.iv = iv + + +class StreamBlobDecryptor(object): + def __init__(self, blob, key, iv, length): + self.blob = blob + self.key = key + self.iv = iv + self.length = length + self.buff = b'' + self.len_read = 0 + self.cipher = AES.new(self.key, AES.MODE_CBC, self.iv) + + def decrypt(self, write_func): + + def remove_padding(data): + pad_len = ord(data[-1]) + data, padding = data[:-1 * pad_len], data[-1 * pad_len:] + for c in padding: + assert ord(c) == pad_len + return data + + def write_bytes(): + if self.len_read < self.length: + num_bytes_to_decrypt = (len(self.buff) // self.cipher.block_size) * self.cipher.block_size + data_to_decrypt, self.buff = self.buff[:num_bytes_to_decrypt], self.buff[num_bytes_to_decrypt:] + write_func(self.cipher.decrypt(data_to_decrypt)) + + def finish_decrypt(): + assert len(self.buff) % self.cipher.block_size == 0 + data_to_decrypt, self.buff = self.buff, b'' + write_func(remove_padding(self.cipher.decrypt(data_to_decrypt))) + + def decrypt_bytes(data): + self.buff += data + self.len_read += len(data) + write_bytes() + #write_func(remove_padding(self.cipher.decrypt(self.buff))) + + d = self.blob.read(decrypt_bytes) + d.addCallback(lambda _: finish_decrypt()) + return d + + +class CryptStreamBlobMaker(object): + """This class encrypts data and writes it to a new blob""" + def __init__(self, key, iv, blob_num, blob): + self.key = key + self.iv = iv + self.blob_num = blob_num + self.blob = blob + self.cipher = AES.new(self.key, AES.MODE_CBC, self.iv) + self.buff = b'' + self.length = 0 + + def write(self, data): + max_bytes_to_write = BLOB_SIZE - self.length - 1 + done = False + if max_bytes_to_write <= len(data): + num_bytes_to_write = max_bytes_to_write + done = True + else: + num_bytes_to_write = len(data) + self.length += num_bytes_to_write + data_to_write = data[:num_bytes_to_write] + self.buff += data_to_write + self._write_buffer() + return done, num_bytes_to_write + + def close(self): + logging.debug("closing blob %s with plaintext len %s", str(self.blob_num), str(self.length)) + if self.length != 0: + self._close_buffer() + d = self.blob.close() + d.addCallback(self._return_info) + logging.debug("called the finished_callback from CryptStreamBlobMaker.close") + return d + + def _write_buffer(self): + num_bytes_to_encrypt = (len(self.buff) // AES.block_size) * AES.block_size + data_to_encrypt, self.buff = self.buff[:num_bytes_to_encrypt], self.buff[num_bytes_to_encrypt:] + encrypted_data = self.cipher.encrypt(data_to_encrypt) + self.blob.write(encrypted_data) + + def _close_buffer(self): + data_to_encrypt, self.buff = self.buff, b'' + assert len(data_to_encrypt) < AES.block_size + pad_len = AES.block_size - len(data_to_encrypt) + padded_data = data_to_encrypt + chr(pad_len) * pad_len + self.length += pad_len + assert len(padded_data) == AES.block_size + encrypted_data = self.cipher.encrypt(padded_data) + self.blob.write(encrypted_data) + + def _return_info(self, blob_hash): + return CryptBlobInfo(blob_hash, self.blob_num, self.length, binascii.hexlify(self.iv)) \ No newline at end of file diff --git a/lbrynet/cryptstream/CryptStreamCreator.py b/lbrynet/cryptstream/CryptStreamCreator.py new file mode 100644 index 000000000..561fb85ac --- /dev/null +++ b/lbrynet/cryptstream/CryptStreamCreator.py @@ -0,0 +1,94 @@ +""" +Utility for creating Crypt Streams, which are encrypted blobs and associated metadata. +""" + +import logging + +from Crypto import Random +from Crypto.Cipher import AES + +from twisted.internet import defer +from lbrynet.core.StreamCreator import StreamCreator +from lbrynet.cryptstream.CryptBlob import CryptStreamBlobMaker + + +class CryptStreamCreator(StreamCreator): + """Create a new stream with blobs encrypted by a symmetric cipher. + + Each blob is encrypted with the same key, but each blob has its own initialization vector + which is associated with the blob when the blob is associated with the stream.""" + def __init__(self, blob_manager, name=None, key=None, iv_generator=None): + """ + @param blob_manager: Object that stores and provides access to blobs. + @type blob_manager: BlobManager + + @param name: the name of the stream, which will be presented to the user + @type name: string + + @param key: the raw AES key which will be used to encrypt the blobs. If None, a random key will + be generated. + @type key: string + + @param iv_generator: a generator which yields initialization vectors for the blobs. Will be called + once for each blob. + @type iv_generator: a generator function which yields strings + + @return: None + """ + StreamCreator.__init__(self, name) + self.blob_manager = blob_manager + self.key = key + if iv_generator is None: + self.iv_generator = self.random_iv_generator() + else: + self.iv_generator = iv_generator + + @staticmethod + def random_iv_generator(): + while 1: + yield Random.new().read(AES.block_size) + + def setup(self): + """Create the symmetric key if it wasn't provided""" + + if self.key is None: + self.key = Random.new().read(AES.block_size) + + return defer.succeed(True) + + def _finalize(self): + logging.debug("_finalize has been called") + self.blob_count += 1 + iv = self.iv_generator.next() + final_blob_creator = self.blob_manager.get_blob_creator() + logging.debug("Created the finished_deferred") + final_blob = self._get_blob_maker(iv, final_blob_creator) + logging.debug("Created the final blob") + logging.debug("Calling close on final blob") + d = final_blob.close() + d.addCallback(self._blob_finished) + self.finished_deferreds.append(d) + logging.debug("called close on final blob, returning from make_final_blob") + return d + + def _write(self, data): + + def close_blob(blob): + d = blob.close() + d.addCallback(self._blob_finished) + self.finished_deferreds.append(d) + + while len(data) > 0: + if self.current_blob is None: + next_blob_creator = self.blob_manager.get_blob_creator() + self.blob_count += 1 + iv = self.iv_generator.next() + self.current_blob = self._get_blob_maker(iv, next_blob_creator) + done, num_bytes_written = self.current_blob.write(data) + data = data[num_bytes_written:] + if done is True: + close_blob(self.current_blob) + self.current_blob = None + + def _get_blob_maker(self, iv, blob_creator): + return CryptStreamBlobMaker(self.key, iv, self.blob_count, blob_creator) \ No newline at end of file diff --git a/lbrynet/cryptstream/__init__.py b/lbrynet/cryptstream/__init__.py new file mode 100644 index 000000000..df825f573 --- /dev/null +++ b/lbrynet/cryptstream/__init__.py @@ -0,0 +1,8 @@ +""" +Classes and functions for dealing with Crypt Streams. + +Crypt Streams are encrypted blobs and metadata tying those blobs together. At least some of the +metadata is generally stored in a Stream Descriptor File, for example containing a public key +used to bind blobs to the stream and a symmetric key used to encrypt the blobs. The list of blobs +may or may not be present. +""" \ No newline at end of file diff --git a/lbrynet/cryptstream/client/CryptBlobHandler.py b/lbrynet/cryptstream/client/CryptBlobHandler.py new file mode 100644 index 000000000..0860d8dca --- /dev/null +++ b/lbrynet/cryptstream/client/CryptBlobHandler.py @@ -0,0 +1,19 @@ +import binascii +from zope.interface import implements +from lbrynet.cryptstream.CryptBlob import StreamBlobDecryptor +from lbrynet.interfaces import IBlobHandler + + +class CryptBlobHandler(object): + implements(IBlobHandler) + + def __init__(self, key, write_func): + self.key = key + self.write_func = write_func + + ######## IBlobHandler ######### + + def handle_blob(self, blob, blob_info): + blob_decryptor = StreamBlobDecryptor(blob, self.key, binascii.unhexlify(blob_info.iv), blob_info.length) + d = blob_decryptor.decrypt(self.write_func) + return d \ No newline at end of file diff --git a/lbrynet/cryptstream/client/CryptStreamDownloader.py b/lbrynet/cryptstream/client/CryptStreamDownloader.py new file mode 100644 index 000000000..b39818ef9 --- /dev/null +++ b/lbrynet/cryptstream/client/CryptStreamDownloader.py @@ -0,0 +1,213 @@ +from zope.interface import implements +from lbrynet.interfaces import IStreamDownloader +from lbrynet.core.client.BlobRequester import BlobRequester +from lbrynet.core.client.ConnectionManager import ConnectionManager +from lbrynet.core.client.DownloadManager import DownloadManager +from lbrynet.core.client.StreamProgressManager import FullStreamProgressManager +from lbrynet.cryptstream.client.CryptBlobHandler import CryptBlobHandler +from twisted.internet import defer +from twisted.python.failure import Failure + + +class StartFailedError(Exception): + pass + + +class AlreadyRunningError(Exception): + pass + + +class AlreadyStoppedError(Exception): + pass + + +class CurrentlyStoppingError(Exception): + pass + + +class CurrentlyStartingError(Exception): + pass + + +class CryptStreamDownloader(object): + + implements(IStreamDownloader) + + def __init__(self, peer_finder, rate_limiter, blob_manager, + payment_rate_manager, wallet, upload_allowed): + """ + Initialize a CryptStreamDownloader + + @param peer_finder: An object which implements the IPeerFinder interface. Used to look up peers by a hashsum. + + @param rate_limiter: An object which implements the IRateLimiter interface + + @param blob_manager: A BlobManager object + + @param payment_rate_manager: A PaymentRateManager object + + @param wallet: An object which implements the ILBRYWallet interface + + @return: + """ + + self.peer_finder = peer_finder + self.rate_limiter = rate_limiter + self.blob_manager = blob_manager + self.payment_rate_manager = payment_rate_manager + self.wallet = wallet + self.upload_allowed = upload_allowed + + self.key = None + self.stream_name = None + + self.completed = False + self.stopped = True + self.stopping = False + self.starting = False + + self.download_manager = None + self.finished_deferred = None + + self.points_paid = 0.0 + + def toggle_running(self): + if self.stopped is True: + return self.start() + else: + return self.stop() + + def start(self): + + def set_finished_deferred(): + self.finished_deferred = defer.Deferred() + return self.finished_deferred + + if self.starting is True: + raise CurrentlyStartingError() + if self.stopping is True: + raise CurrentlyStoppingError() + if self.stopped is False: + raise AlreadyRunningError() + assert self.download_manager is None + self.starting = True + self.completed = False + d = self._start() + d.addCallback(lambda _: set_finished_deferred()) + return d + + def stop(self): + + def check_if_stop_succeeded(success): + self.stopping = False + if success is True: + self.stopped = True + self._remove_download_manager() + return success + + if self.stopped is True: + raise AlreadyStoppedError() + if self.stopping is True: + raise CurrentlyStoppingError() + assert self.download_manager is not None + self.stopping = True + d = self.download_manager.stop_downloading() + self._fire_completed_deferred() + d.addCallback(check_if_stop_succeeded) + return d + + def _start_failed(self): + + def set_stopped(): + self.stopped = True + self.stopping = False + self.starting = False + + if self.download_manager is not None: + d = self.download_manager.stop_downloading() + d.addCallback(lambda _: self._remove_download_manager()) + else: + d = defer.succeed(True) + d.addCallback(lambda _: set_stopped()) + d.addCallback(lambda _: Failure(StartFailedError())) + return d + + def _start(self): + + def check_start_succeeded(success): + if success: + self.starting = False + self.stopped = False + self.completed = False + return True + else: + return self._start_failed() + + self.download_manager = self._get_download_manager() + d = self.download_manager.start_downloading() + d.addCallbacks(check_start_succeeded) + return d + + def _get_download_manager(self): + download_manager = DownloadManager(self.blob_manager, self.upload_allowed) + download_manager.blob_info_finder = self._get_metadata_handler(download_manager) + download_manager.blob_requester = self._get_blob_requester(download_manager) + download_manager.progress_manager = self._get_progress_manager(download_manager) + download_manager.blob_handler = self._get_blob_handler(download_manager) + download_manager.wallet_info_exchanger = self.wallet.get_info_exchanger() + download_manager.connection_manager = self._get_connection_manager(download_manager) + #return DownloadManager(self.blob_manager, self.blob_requester, self.metadata_handler, + # self.progress_manager, self.blob_handler, self.connection_manager) + return download_manager + + def _remove_download_manager(self): + self.download_manager.blob_info_finder = None + self.download_manager.blob_requester = None + self.download_manager.progress_manager = None + self.download_manager.blob_handler = None + self.download_manager.wallet_info_exchanger = None + self.download_manager.connection_manager = None + self.download_manager = None + + def _get_primary_request_creators(self, download_manager): + return [download_manager.blob_requester] + + def _get_secondary_request_creators(self, download_manager): + return [download_manager.wallet_info_exchanger] + + def _get_metadata_handler(self, download_manager): + pass + + def _get_blob_requester(self, download_manager): + return BlobRequester(self.blob_manager, self.peer_finder, self.payment_rate_manager, self.wallet, + download_manager) + + def _get_progress_manager(self, download_manager): + return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager) + + def _get_write_func(self): + pass + + def _get_blob_handler(self, download_manager): + return CryptBlobHandler(self.key, self._get_write_func()) + + def _get_connection_manager(self, download_manager): + return ConnectionManager(self, self.rate_limiter, + self._get_primary_request_creators(download_manager), + self._get_secondary_request_creators(download_manager)) + + def _fire_completed_deferred(self): + self.finished_deferred, d = None, self.finished_deferred + if d is not None: + d.callback(self._get_finished_deferred_callback_value()) + + def _get_finished_deferred_callback_value(self): + return None + + def _finished_downloading(self, finished): + if finished is True: + self.completed = True + return self.stop() + + def insufficient_funds(self): + return self.stop() \ No newline at end of file diff --git a/lbrynet/cryptstream/client/__init__.py b/lbrynet/cryptstream/client/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/dht/AUTHORS b/lbrynet/dht/AUTHORS new file mode 100644 index 000000000..9a8063643 --- /dev/null +++ b/lbrynet/dht/AUTHORS @@ -0,0 +1,7 @@ +Francois Aucamp + +Thanks goes to the following people for providing patches/suggestions/tests: + +Neil Kleynhans +Haiyang Ma +Bryan McAlister diff --git a/lbrynet/dht/COPYING b/lbrynet/dht/COPYING new file mode 100644 index 000000000..fc8a5de7e --- /dev/null +++ b/lbrynet/dht/COPYING @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/lbrynet/dht/__init__.py b/lbrynet/dht/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/dht/constants.py b/lbrynet/dht/constants.py new file mode 100644 index 000000000..853856a8a --- /dev/null +++ b/lbrynet/dht/constants.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# +# The docstrings in this module contain epytext markup; API documentation +# may be created by processing this file with epydoc: http://epydoc.sf.net + +""" This module defines the charaterizing constants of the Kademlia network + +C{checkRefreshInterval} and C{udpDatagramMaxSize} are implementation-specific +constants, and do not affect general Kademlia operation. +""" + +######### KADEMLIA CONSTANTS ########### + +#: Small number Representing the degree of parallelism in network calls +alpha = 3 + +#: Maximum number of contacts stored in a bucket; this should be an even number +k = 8 + +#: Timeout for network operations (in seconds) +rpcTimeout = 5 + +# Delay between iterations of iterative node lookups (for loose parallelism) (in seconds) +iterativeLookupDelay = rpcTimeout / 2 + +#: If a k-bucket has not been used for this amount of time, refresh it (in seconds) +refreshTimeout = 3600 # 1 hour +#: The interval at which nodes replicate (republish/refresh) data they are holding +replicateInterval = refreshTimeout +# The time it takes for data to expire in the network; the original publisher of the data +# will also republish the data at this time if it is still valid +dataExpireTimeout = 86400 # 24 hours + +tokenSecretChangeInterval = 300 # 5 minutes + +peer_request_timeout = 10 + +######## IMPLEMENTATION-SPECIFIC CONSTANTS ########### + +#: The interval in which the node should check its whether any buckets need refreshing, +#: or whether any data needs to be republished (in seconds) +checkRefreshInterval = refreshTimeout/5 + +#: Max size of a single UDP datagram, in bytes. If a message is larger than this, it will +#: be spread accross several UDP packets. +udpDatagramMaxSize = 8192 # 8 KB + +key_bits = 384 \ No newline at end of file diff --git a/lbrynet/dht/contact.py b/lbrynet/dht/contact.py new file mode 100644 index 000000000..c68a4207a --- /dev/null +++ b/lbrynet/dht/contact.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# +# The docstrings in this module contain epytext markup; API documentation +# may be created by processing this file with epydoc: http://epydoc.sf.net + + +class Contact(object): + """ Encapsulation for remote contact + + This class contains information on a single remote contact, and also + provides a direct RPC API to the remote node which it represents + """ + def __init__(self, id, ipAddress, udpPort, networkProtocol, firstComm=0): + self.id = id + self.address = ipAddress + self.port = udpPort + self._networkProtocol = networkProtocol + self.commTime = firstComm + + def __eq__(self, other): + if isinstance(other, Contact): + return self.id == other.id + elif isinstance(other, str): + return self.id == other + else: + return False + + def __ne__(self, other): + if isinstance(other, Contact): + return self.id != other.id + elif isinstance(other, str): + return self.id != other + else: + return True + + def compact_ip(self): + compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), self.address.split('.'), bytearray()) + return str(compact_ip) + + def __str__(self): + return '<%s.%s object; IP address: %s, UDP port: %d>' % (self.__module__, self.__class__.__name__, self.address, self.port) + + def __getattr__(self, name): + """ This override allows the host node to call a method of the remote + node (i.e. this contact) as if it was a local function. + + For instance, if C{remoteNode} is a instance of C{Contact}, the + following will result in C{remoteNode}'s C{test()} method to be + called with argument C{123}:: + remoteNode.test(123) + + Such a RPC method call will return a Deferred, which will callback + when the contact responds with the result (or an error occurs). + This happens via this contact's C{_networkProtocol} object (i.e. the + host Node's C{_protocol} object). + """ + def _sendRPC(*args, **kwargs): + return self._networkProtocol.sendRPC(self, name, args, **kwargs) + return _sendRPC diff --git a/lbrynet/dht/datastore.py b/lbrynet/dht/datastore.py new file mode 100644 index 000000000..04d74fdc7 --- /dev/null +++ b/lbrynet/dht/datastore.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# +# The docstrings in this module contain epytext markup; API documentation +# may be created by processing this file with epydoc: http://epydoc.sf.net + +import UserDict +#import sqlite3 +import cPickle as pickle +import time +import os +import constants + + + +class DataStore(UserDict.DictMixin): + """ Interface for classes implementing physical storage (for data + published via the "STORE" RPC) for the Kademlia DHT + + @note: This provides an interface for a dict-like object + """ + def keys(self): + """ Return a list of the keys in this data store """ + +# def lastPublished(self, key): +# """ Get the time the C{(key, value)} pair identified by C{key} +# was last published """ + +# def originalPublisherID(self, key): +# """ Get the original publisher of the data's node ID +# +# @param key: The key that identifies the stored data +# @type key: str +# +# @return: Return the node ID of the original publisher of the +# C{(key, value)} pair identified by C{key}. +# """ + +# def originalPublishTime(self, key): +# """ Get the time the C{(key, value)} pair identified by C{key} +# was originally published """ + +# def setItem(self, key, value, lastPublished, originallyPublished, originalPublisherID): +# """ Set the value of the (key, value) pair identified by C{key}; +# this should set the "last published" value for the (key, value) +# pair to the current time +# """ + + def addPeerToBlob(self, key, value, lastPublished, originallyPublished, originalPublisherID): + pass + +# def __getitem__(self, key): +# """ Get the value identified by C{key} """ + +# def __setitem__(self, key, value): +# """ Convenience wrapper to C{setItem}; this accepts a tuple in the +# format: (value, lastPublished, originallyPublished, originalPublisherID) """ +# self.setItem(key, *value) + +# def __delitem__(self, key): +# """ Delete the specified key (and its value) """ + +class DictDataStore(DataStore): + """ A datastore using an in-memory Python dictionary """ + def __init__(self): + # Dictionary format: + # { : (, , ) } + self._dict = {} + + def keys(self): + """ Return a list of the keys in this data store """ + return self._dict.keys() + +# def lastPublished(self, key): +# """ Get the time the C{(key, value)} pair identified by C{key} +# was last published """ +# return self._dict[key][1] + +# def originalPublisherID(self, key): +# """ Get the original publisher of the data's node ID +# +# @param key: The key that identifies the stored data +# @type key: str +# +# @return: Return the node ID of the original publisher of the +# C{(key, value)} pair identified by C{key}. +# """ +# return self._dict[key][3] + +# def originalPublishTime(self, key): +# """ Get the time the C{(key, value)} pair identified by C{key} +# was originally published """ +# return self._dict[key][2] + + def removeExpiredPeers(self): + now = int(time.time()) + def notExpired(peer): + if (now - peer[2]) > constants.dataExpireTimeout: + return False + return True + for key in self._dict.keys(): + unexpired_peers = filter(notExpired, self._dict[key]) + self._dict[key] = unexpired_peers + + def hasPeersForBlob(self, key): + if key in self._dict and len(self._dict[key]) > 0: + return True + return False + + def addPeerToBlob(self, key, value, lastPublished, originallyPublished, originalPublisherID): + if key in self._dict: + self._dict[key].append((value, lastPublished, originallyPublished, originalPublisherID)) + else: + self._dict[key] = [(value, lastPublished, originallyPublished, originalPublisherID)] + + def getPeersForBlob(self, key): + if key in self._dict: + return [val[0] for val in self._dict[key]] + +# def setItem(self, key, value, lastPublished, originallyPublished, originalPublisherID): +# """ Set the value of the (key, value) pair identified by C{key}; +# this should set the "last published" value for the (key, value) +# pair to the current time +# """ +# self._dict[key] = (value, lastPublished, originallyPublished, originalPublisherID) + +# def __getitem__(self, key): +# """ Get the value identified by C{key} """ +# return self._dict[key][0] + +# def __delitem__(self, key): +# """ Delete the specified key (and its value) """ +# del self._dict[key] + + +#class SQLiteDataStore(DataStore): +# """ Example of a SQLite database-based datastore +# """ +# def __init__(self, dbFile=':memory:'): +# """ +# @param dbFile: The name of the file containing the SQLite database; if +# unspecified, an in-memory database is used. +# @type dbFile: str +# """ +# createDB = not os.path.exists(dbFile) +# self._db = sqlite3.connect(dbFile) +# self._db.isolation_level = None +# self._db.text_factory = str +# if createDB: +# self._db.execute('CREATE TABLE data(key, value, lastPublished, originallyPublished, originalPublisherID)') +# self._cursor = self._db.cursor() + +# def keys(self): +# """ Return a list of the keys in this data store """ +# keys = [] +# try: +# self._cursor.execute("SELECT key FROM data") +# for row in self._cursor: +# keys.append(row[0].decode('hex')) +# finally: +# return keys + +# def lastPublished(self, key): +# """ Get the time the C{(key, value)} pair identified by C{key} +# was last published """ +# return int(self._dbQuery(key, 'lastPublished')) + +# def originalPublisherID(self, key): +# """ Get the original publisher of the data's node ID + +# @param key: The key that identifies the stored data +# @type key: str + +# @return: Return the node ID of the original publisher of the +# C{(key, value)} pair identified by C{key}. +# """ +# return self._dbQuery(key, 'originalPublisherID') + +# def originalPublishTime(self, key): +# """ Get the time the C{(key, value)} pair identified by C{key} +# was originally published """ +# return int(self._dbQuery(key, 'originallyPublished')) + +# def setItem(self, key, value, lastPublished, originallyPublished, originalPublisherID): +# # Encode the key so that it doesn't corrupt the database +# encodedKey = key.encode('hex') +# self._cursor.execute("select key from data where key=:reqKey", {'reqKey': encodedKey}) +# if self._cursor.fetchone() == None: +# self._cursor.execute('INSERT INTO data(key, value, lastPublished, originallyPublished, originalPublisherID) VALUES (?, ?, ?, ?, ?)', (encodedKey, buffer(pickle.dumps(value, pickle.HIGHEST_PROTOCOL)), lastPublished, originallyPublished, originalPublisherID)) +# else: +# self._cursor.execute('UPDATE data SET value=?, lastPublished=?, originallyPublished=?, originalPublisherID=? WHERE key=?', (buffer(pickle.dumps(value, pickle.HIGHEST_PROTOCOL)), lastPublished, originallyPublished, originalPublisherID, encodedKey)) + +# def _dbQuery(self, key, columnName, unpickle=False): +# try: +# self._cursor.execute("SELECT %s FROM data WHERE key=:reqKey" % columnName, {'reqKey': key.encode('hex')}) +# row = self._cursor.fetchone() +# value = str(row[0]) +# except TypeError: +# raise KeyError, key +# else: +# if unpickle: +# return pickle.loads(value) +# else: +# return value + +# def __getitem__(self, key): +# return self._dbQuery(key, 'value', unpickle=True) + +# def __delitem__(self, key): +# self._cursor.execute("DELETE FROM data WHERE key=:reqKey", {'reqKey': key.encode('hex')}) diff --git a/lbrynet/dht/encoding.py b/lbrynet/dht/encoding.py new file mode 100644 index 000000000..a57747628 --- /dev/null +++ b/lbrynet/dht/encoding.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# +# The docstrings in this module contain epytext markup; API documentation +# may be created by processing this file with epydoc: http://epydoc.sf.net + +class DecodeError(Exception): + """ Should be raised by an C{Encoding} implementation if decode operation + fails + """ + +class Encoding(object): + """ Interface for RPC message encoders/decoders + + All encoding implementations used with this library should inherit and + implement this. + """ + def encode(self, data): + """ Encode the specified data + + @param data: The data to encode + This method has to support encoding of the following + types: C{str}, C{int} and C{long} + Any additional data types may be supported as long as the + implementing class's C{decode()} method can successfully + decode them. + + @return: The encoded data + @rtype: str + """ + def decode(self, data): + """ Decode the specified data string + + @param data: The data (byte string) to decode. + @type data: str + + @return: The decoded data (in its correct type) + """ + +class Bencode(Encoding): + """ Implementation of a Bencode-based algorithm (Bencode is the encoding + algorithm used by Bittorrent). + + @note: This algorithm differs from the "official" Bencode algorithm in + that it can encode/decode floating point values in addition to + integers. + """ + + def encode(self, data): + """ Encoder implementation of the Bencode algorithm + + @param data: The data to encode + @type data: int, long, tuple, list, dict or str + + @return: The encoded data + @rtype: str + """ + if type(data) in (int, long): + return 'i%de' % data + elif type(data) == str: + return '%d:%s' % (len(data), data) + elif type(data) in (list, tuple): + encodedListItems = '' + for item in data: + encodedListItems += self.encode(item) + return 'l%se' % encodedListItems + elif type(data) == dict: + encodedDictItems = '' + keys = data.keys() + keys.sort() + for key in keys: + encodedDictItems += self.encode(key) + encodedDictItems += self.encode(data[key]) + return 'd%se' % encodedDictItems + elif type(data) == float: + # This (float data type) is a non-standard extension to the original Bencode algorithm + return 'f%fe' % data + elif data == None: + # This (None/NULL data type) is a non-standard extension to the original Bencode algorithm + return 'n' + else: + print data + raise TypeError, "Cannot bencode '%s' object" % type(data) + + def decode(self, data): + """ Decoder implementation of the Bencode algorithm + + @param data: The encoded data + @type data: str + + @note: This is a convenience wrapper for the recursive decoding + algorithm, C{_decodeRecursive} + + @return: The decoded data, as a native Python type + @rtype: int, list, dict or str + """ + if len(data) == 0: + raise DecodeError, 'Cannot decode empty string' + return self._decodeRecursive(data)[0] + + @staticmethod + def _decodeRecursive(data, startIndex=0): + """ Actual implementation of the recursive Bencode algorithm + + Do not call this; use C{decode()} instead + """ + if data[startIndex] == 'i': + endPos = data[startIndex:].find('e')+startIndex + return (int(data[startIndex+1:endPos]), endPos+1) + elif data[startIndex] == 'l': + startIndex += 1 + decodedList = [] + while data[startIndex] != 'e': + listData, startIndex = Bencode._decodeRecursive(data, startIndex) + decodedList.append(listData) + return (decodedList, startIndex+1) + elif data[startIndex] == 'd': + startIndex += 1 + decodedDict = {} + while data[startIndex] != 'e': + key, startIndex = Bencode._decodeRecursive(data, startIndex) + value, startIndex = Bencode._decodeRecursive(data, startIndex) + decodedDict[key] = value + return (decodedDict, startIndex) + elif data[startIndex] == 'f': + # This (float data type) is a non-standard extension to the original Bencode algorithm + endPos = data[startIndex:].find('e')+startIndex + return (float(data[startIndex+1:endPos]), endPos+1) + elif data[startIndex] == 'n': + # This (None/NULL data type) is a non-standard extension to the original Bencode algorithm + return (None, startIndex+1) + else: + splitPos = data[startIndex:].find(':')+startIndex + try: + length = int(data[startIndex:splitPos]) + except ValueError, e: + raise DecodeError, e + startIndex = splitPos+1 + endPos = startIndex+length + bytes = data[startIndex:endPos] + return (bytes, endPos) diff --git a/lbrynet/dht/hashwatcher.py b/lbrynet/dht/hashwatcher.py new file mode 100644 index 000000000..fbe37202a --- /dev/null +++ b/lbrynet/dht/hashwatcher.py @@ -0,0 +1,35 @@ + +from collections import Counter +import datetime + + +class HashWatcher(): + def __init__(self, ttl=600): + self.ttl = 600 + self.hashes = [] + self.next_tick = None + + def tick(self): + + from twisted.internet import reactor + + self._remove_old_hashes() + self.next_tick = reactor.callLater(10, self.tick) + + def stop(self): + if self.next_tick is not None: + self.next_tick.cancel() + self.next_tick = None + + def add_requested_hash(self, hashsum, from_ip): + matching_hashes = [h for h in self.hashes if h[0] == hashsum and h[2] == from_ip] + if len(matching_hashes) == 0: + self.hashes.append((hashsum, datetime.datetime.now(), from_ip)) + + def most_popular_hashes(self, num_to_return=10): + hash_counter = Counter([h[0] for h in self.hashes]) + return hash_counter.most_common(num_to_return) + + def _remove_old_hashes(self): + remove_time = datetime.datetime.now() - datetime.timedelta(minutes=10) + self.hashes = [h for h in self.hashes if h[1] < remove_time] \ No newline at end of file diff --git a/lbrynet/dht/kbucket.py b/lbrynet/dht/kbucket.py new file mode 100644 index 000000000..a29904d75 --- /dev/null +++ b/lbrynet/dht/kbucket.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# +# The docstrings in this module contain epytext markup; API documentation +# may be created by processing this file with epydoc: http://epydoc.sf.net + +import constants + +class BucketFull(Exception): + """ Raised when the bucket is full """ + + +class KBucket(object): + """ Description - later + """ + def __init__(self, rangeMin, rangeMax): + """ + @param rangeMin: The lower boundary for the range in the n-bit ID + space covered by this k-bucket + @param rangeMax: The upper boundary for the range in the ID space + covered by this k-bucket + """ + self.lastAccessed = 0 + self.rangeMin = rangeMin + self.rangeMax = rangeMax + self._contacts = list() + + def addContact(self, contact): + """ Add contact to _contact list in the right order. This will move the + contact to the end of the k-bucket if it is already present. + + @raise kademlia.kbucket.BucketFull: Raised when the bucket is full and + the contact isn't in the bucket + already + + @param contact: The contact to add + @type contact: kademlia.contact.Contact + """ + if contact in self._contacts: + # Move the existing contact to the end of the list + # - using the new contact to allow add-on data (e.g. optimization-specific stuff) to pe updated as well + self._contacts.remove(contact) + self._contacts.append(contact) + elif len(self._contacts) < constants.k: + self._contacts.append(contact) + else: + raise BucketFull("No space in bucket to insert contact") + + def getContact(self, contactID): + """ Get the contact specified node ID""" + index = self._contacts.index(contactID) + return self._contacts[index] + + def getContacts(self, count=-1, excludeContact=None): + """ Returns a list containing up to the first count number of contacts + + @param count: The amount of contacts to return (if 0 or less, return + all contacts) + @type count: int + @param excludeContact: A contact to exclude; if this contact is in + the list of returned values, it will be + discarded before returning. If a C{str} is + passed as this argument, it must be the + contact's ID. + @type excludeContact: kademlia.contact.Contact or str + + + @raise IndexError: If the number of requested contacts is too large + + @return: Return up to the first count number of contacts in a list + If no contacts are present an empty is returned + @rtype: list + """ + # Return all contacts in bucket + if count <= 0: + count = len(self._contacts) + + # Get current contact number + currentLen = len(self._contacts) + + # If count greater than k - return only k contacts + if count > constants.k: + count = constants.k + + # Check if count value in range and, + # if count number of contacts are available + if not currentLen: + contactList = list() + + # length of list less than requested amount + elif currentLen < count: + contactList = self._contacts[0:currentLen] + # enough contacts in list + else: + contactList = self._contacts[0:count] + + if excludeContact in contactList: + contactList.remove(excludeContact) + + return contactList + + def removeContact(self, contact): + """ Remove given contact from list + + @param contact: The contact to remove, or a string containing the + contact's node ID + @type contact: kademlia.contact.Contact or str + + @raise ValueError: The specified contact is not in this bucket + """ + self._contacts.remove(contact) + + def keyInRange(self, key): + """ Tests whether the specified key (i.e. node ID) is in the range + of the n-bit ID space covered by this k-bucket (in otherwords, it + returns whether or not the specified key should be placed in this + k-bucket) + + @param key: The key to test + @type key: str or int + + @return: C{True} if the key is in this k-bucket's range, or C{False} + if not. + @rtype: bool + """ + if isinstance(key, str): + key = long(key.encode('hex'), 16) + return self.rangeMin <= key < self.rangeMax + + def __len__(self): + return len(self._contacts) diff --git a/lbrynet/dht/msgformat.py b/lbrynet/dht/msgformat.py new file mode 100644 index 000000000..c0c5ce852 --- /dev/null +++ b/lbrynet/dht/msgformat.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# +# The docstrings in this module contain epytext markup; API documentation +# may be created by processing this file with epydoc: http://epydoc.sf.net + +import msgtypes + +class MessageTranslator(object): + """ Interface for RPC message translators/formatters + + Classes inheriting from this should provide a translation services between + the classes used internally by this Kademlia implementation and the actual + data that is transmitted between nodes. + """ + def fromPrimitive(self, msgPrimitive): + """ Create an RPC Message from a message's string representation + + @param msgPrimitive: The unencoded primitive representation of a message + @type msgPrimitive: str, int, list or dict + + @return: The translated message object + @rtype: entangled.kademlia.msgtypes.Message + """ + + def toPrimitive(self, message): + """ Create a string representation of a message + + @param message: The message object + @type message: msgtypes.Message + + @return: The message's primitive representation in a particular + messaging format + @rtype: str, int, list or dict + """ + +class DefaultFormat(MessageTranslator): + """ The default on-the-wire message format for this library """ + typeRequest, typeResponse, typeError = range(3) + headerType, headerMsgID, headerNodeID, headerPayload, headerArgs = range(5) + + def fromPrimitive(self, msgPrimitive): + msgType = msgPrimitive[self.headerType] + if msgType == self.typeRequest: + msg = msgtypes.RequestMessage(msgPrimitive[self.headerNodeID], msgPrimitive[self.headerPayload], msgPrimitive[self.headerArgs], msgPrimitive[self.headerMsgID]) + elif msgType == self.typeResponse: + msg = msgtypes.ResponseMessage(msgPrimitive[self.headerMsgID], msgPrimitive[self.headerNodeID], msgPrimitive[self.headerPayload]) + elif msgType == self.typeError: + msg = msgtypes.ErrorMessage(msgPrimitive[self.headerMsgID], msgPrimitive[self.headerNodeID], msgPrimitive[self.headerPayload], msgPrimitive[self.headerArgs]) + else: + # Unknown message, no payload + msg = msgtypes.Message(msgPrimitive[self.headerMsgID], msgPrimitive[self.headerNodeID]) + return msg + + def toPrimitive(self, message): + msg = {self.headerMsgID: message.id, + self.headerNodeID: message.nodeID} + if isinstance(message, msgtypes.RequestMessage): + msg[self.headerType] = self.typeRequest + msg[self.headerPayload] = message.request + msg[self.headerArgs] = message.args + elif isinstance(message, msgtypes.ErrorMessage): + msg[self.headerType] = self.typeError + msg[self.headerPayload] = message.exceptionType + msg[self.headerArgs] = message.response + elif isinstance(message, msgtypes.ResponseMessage): + msg[self.headerType] = self.typeResponse + msg[self.headerPayload] = message.response + return msg diff --git a/lbrynet/dht/msgtypes.py b/lbrynet/dht/msgtypes.py new file mode 100644 index 000000000..de97fd239 --- /dev/null +++ b/lbrynet/dht/msgtypes.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# +# The docstrings in this module contain epytext markup; API documentation +# may be created by processing this file with epydoc: http://epydoc.sf.net + +import hashlib +import random + +class Message(object): + """ Base class for messages - all "unknown" messages use this class """ + def __init__(self, rpcID, nodeID): + self.id = rpcID + self.nodeID = nodeID + + +class RequestMessage(Message): + """ Message containing an RPC request """ + def __init__(self, nodeID, method, methodArgs, rpcID=None): + if rpcID == None: + hash = hashlib.sha384() + hash.update(str(random.getrandbits(255))) + rpcID = hash.digest() + Message.__init__(self, rpcID, nodeID) + self.request = method + self.args = methodArgs + + +class ResponseMessage(Message): + """ Message containing the result from a successful RPC request """ + def __init__(self, rpcID, nodeID, response): + Message.__init__(self, rpcID, nodeID) + self.response = response + + +class ErrorMessage(ResponseMessage): + """ Message containing the error from an unsuccessful RPC request """ + def __init__(self, rpcID, nodeID, exceptionType, errorMessage): + ResponseMessage.__init__(self, rpcID, nodeID, errorMessage) + if isinstance(exceptionType, type): + self.exceptionType = '%s.%s' % (exceptionType.__module__, exceptionType.__name__) + else: + self.exceptionType = exceptionType diff --git a/lbrynet/dht/node.py b/lbrynet/dht/node.py new file mode 100644 index 000000000..04a71c08c --- /dev/null +++ b/lbrynet/dht/node.py @@ -0,0 +1,1011 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# +# The docstrings in this module contain epytext markup; API documentation +# may be created by processing this file with epydoc: http://epydoc.sf.net + +import hashlib, random, struct, time, math, binascii +import argparse +from twisted.internet import defer +import constants +import routingtable +import datastore +import protocol +import twisted.internet.reactor +import twisted.internet.threads +import twisted.python.log +from contact import Contact +from hashwatcher import HashWatcher +import logging + + +def rpcmethod(func): + """ Decorator to expose Node methods as remote procedure calls + + Apply this decorator to methods in the Node class (or a subclass) in order + to make them remotely callable via the DHT's RPC mechanism. + """ + func.rpcmethod = True + return func + +class Node(object): + """ Local node in the Kademlia network + + This class represents a single local node in a Kademlia network; in other + words, this class encapsulates an Entangled-using application's "presence" + in a Kademlia network. + + In Entangled, all interactions with the Kademlia network by a client + application is performed via this class (or a subclass). + """ + def __init__(self, id=None, udpPort=4000, dataStore=None, routingTableClass=None, networkProtocol=None, lbryid=None, externalIP=None): + """ + @param dataStore: The data store to use. This must be class inheriting + from the C{DataStore} interface (or providing the + same API). How the data store manages its data + internally is up to the implementation of that data + store. + @type dataStore: entangled.kademlia.datastore.DataStore + @param routingTable: The routing table class to use. Since there exists + some ambiguity as to how the routing table should be + implemented in Kademlia, a different routing table + may be used, as long as the appropriate API is + exposed. This should be a class, not an object, + in order to allow the Node to pass an + auto-generated node ID to the routingtable object + upon instantiation (if necessary). + @type routingTable: entangled.kademlia.routingtable.RoutingTable + @param networkProtocol: The network protocol to use. This can be + overridden from the default to (for example) + change the format of the physical RPC messages + being transmitted. + @type networkProtocol: entangled.kademlia.protocol.KademliaProtocol + """ + if id != None: + self.id = id + else: + self.id = self._generateID() + self.lbryid = lbryid + self.port = udpPort + self._listeningPort = None # object implementing Twisted IListeningPort + # This will contain a deferred created when joining the network, to enable publishing/retrieving information from + # the DHT as soon as the node is part of the network (add callbacks to this deferred if scheduling such operations + # before the node has finished joining the network) + self._joinDeferred = None + self.next_refresh_call = None + self.next_change_token_call = None + # Create k-buckets (for storing contacts) + #self._buckets = [] + #for i in range(160): + # self._buckets.append(kbucket.KBucket()) + if routingTableClass == None: + self._routingTable = routingtable.OptimizedTreeRoutingTable(self.id) + else: + self._routingTable = routingTableClass(self.id) + + # Initialize this node's network access mechanisms + if networkProtocol == None: + self._protocol = protocol.KademliaProtocol(self) + else: + self._protocol = networkProtocol + # Initialize the data storage mechanism used by this node + self.token_secret = self._generateID() + self.old_token_secret = None + self.change_token() + if dataStore == None: + self._dataStore = datastore.DictDataStore() + else: + self._dataStore = dataStore + # Try to restore the node's state... + if 'nodeState' in self._dataStore: + state = self._dataStore['nodeState'] + self.id = state['id'] + for contactTriple in state['closestNodes']: + contact = Contact(contactTriple[0], contactTriple[1], contactTriple[2], self._protocol) + self._routingTable.addContact(contact) + self.externalIP = externalIP + self.hash_watcher = HashWatcher() + + def __del__(self): + #self._persistState() + if self._listeningPort is not None: + self._listeningPort.stopListening() + + def stop(self): + #cancel callLaters: + if self.next_refresh_call is not None: + self.next_refresh_call.cancel() + self.next_refresh_call = None + if self.next_change_token_call is not None: + self.next_change_token_call.cancel() + self.next_change_token_call = None + if self._listeningPort is not None: + self._listeningPort.stopListening() + self.hash_watcher.stop() + + + def joinNetwork(self, knownNodeAddresses=None): + """ Causes the Node to join the Kademlia network; normally, this + should be called before any other DHT operations. + + @param knownNodeAddresses: A sequence of tuples containing IP address + information for existing nodes on the + Kademlia network, in the format: + C{(, (udp port>)} + @type knownNodeAddresses: tuple + """ + # Prepare the underlying Kademlia protocol + if self.port is not None: + self._listeningPort = twisted.internet.reactor.listenUDP(self.port, self._protocol) #IGNORE:E1101 + # Create temporary contact information for the list of addresses of known nodes + if knownNodeAddresses != None: + bootstrapContacts = [] + for address, port in knownNodeAddresses: + contact = Contact(self._generateID(), address, port, self._protocol) + bootstrapContacts.append(contact) + else: + bootstrapContacts = None + # Initiate the Kademlia joining sequence - perform a search for this node's own ID + self._joinDeferred = self._iterativeFind(self.id, bootstrapContacts) +# #TODO: Refresh all k-buckets further away than this node's closest neighbour +# def getBucketAfterNeighbour(*args): +# for i in range(160): +# if len(self._buckets[i]) > 0: +# return i+1 +# return 160 +# df.addCallback(getBucketAfterNeighbour) +# df.addCallback(self._refreshKBuckets) + #protocol.reactor.callLater(10, self.printContacts) + #self._joinDeferred.addCallback(self._persistState) + #self._joinDeferred.addCallback(self.printContacts) + # Start refreshing k-buckets periodically, if necessary + self.next_refresh_call = twisted.internet.reactor.callLater(constants.checkRefreshInterval, self._refreshNode) #IGNORE:E1101 + self.hash_watcher.tick() + return self._joinDeferred + + def printContacts(self, *args): + print '\n\nNODE CONTACTS\n===============' + for i in range(len(self._routingTable._buckets)): + for contact in self._routingTable._buckets[i]._contacts: + print contact + print '==================================' + #twisted.internet.reactor.callLater(10, self.printContacts) + + def getApproximateTotalDHTNodes(self): + # get the deepest bucket and the number of contacts in that bucket and multiply it + # by the number of equivalently deep buckets in the whole DHT to get a really bad + # estimate! + bucket = self._routingTable._buckets[self._routingTable._kbucketIndex(self.id)] + num_in_bucket = len(bucket._contacts) + factor = (2**constants.key_bits) / (bucket.rangeMax - bucket.rangeMin) + return num_in_bucket * factor + + def getApproximateTotalHashes(self): + # Divide the number of hashes we know about by k to get a really, really, really + # bad estimate of the average number of hashes per node, then multiply by the + # approximate number of nodes to get a horrendous estimate of the total number + # of hashes in the DHT + num_in_data_store = len(self._dataStore._dict) + if num_in_data_store == 0: + return 0 + return num_in_data_store * self.getApproximateTotalDHTNodes() / 8 + + def announceHaveBlob(self, key, port): + return self.iterativeAnnounceHaveBlob(key, {'port': port, 'lbryid': self.lbryid}) + + def getPeersForBlob(self, blob_hash): + + def expand_and_filter(result): + expanded_peers = [] + if type(result) == dict: + if blob_hash in result: + for peer in result[blob_hash]: + #print peer + if self.lbryid != peer[6:]: + host = ".".join([str(ord(d)) for d in peer[:4]]) + if host == "127.0.0.1": + if "from_peer" in result: + if result["from_peer"] != "self": + host = result["from_peer"] + port, = struct.unpack('>H', peer[4:6]) + expanded_peers.append((host, port)) + return expanded_peers + + def find_failed(err): + #print "An exception occurred in the DHT" + #print err.getErrorMessage() + return [] + + d = self.iterativeFindValue(blob_hash) + d.addCallbacks(expand_and_filter, find_failed) + return d + + def get_most_popular_hashes(self, num_to_return): + return self.hash_watcher.most_popular_hashes(num_to_return) + + def iterativeAnnounceHaveBlob(self, blob_hash, value): + + known_nodes = {} + + def log_error(err, n): + logging.error("error storing blob_hash %s at %s", binascii.hexlify(blob_hash), str(n)) + logging.error(binascii.hexlify(err.getErrorMessage())) + logging.error(err.getTraceback()) + + def log_success(res): + logging.debug("Response to store request: %s", str(res)) + return res + + def announce_to_peer(responseTuple): + """ @type responseMsg: kademlia.msgtypes.ResponseMessage """ + # The "raw response" tuple contains the response message, and the originating address info + responseMsg = responseTuple[0] + originAddress = responseTuple[1] # tuple: (ip adress, udp port) + # Make sure the responding node is valid, and abort the operation if it isn't + if not responseMsg.nodeID in known_nodes: + return responseMsg.nodeID + + n = known_nodes[responseMsg.nodeID] + + result = responseMsg.response + if 'token' in result: + #print "Printing result...", result + value['token'] = result['token'] + d = n.store(blob_hash, value, self.id, 0) + d.addCallback(log_success) + d.addErrback(log_error, n) + else: + d = defer.succeed(False) + #else: + # print "result:", result + # print "No token where it should be" + return d + + def requestPeers(contacts): + if self.externalIP is not None and len(contacts) >= constants.k: + if self._routingTable.distance(blob_hash, self.id) < self._routingTable.distance(blob_hash, contacts[-1].id): + contacts.pop() + self.store(blob_hash, value, self_store=True, originalPublisherID=self.id) + elif self.externalIP is not None: + #print "attempting to self-store" + self.store(blob_hash, value, self_store=True, originalPublisherID=self.id) + ds = [] + for contact in contacts: + known_nodes[contact.id] = contact + rpcMethod = getattr(contact, "findValue") + df = rpcMethod(blob_hash, rawResponse=True) + df.addCallback(announce_to_peer) + df.addErrback(log_error, contact) + ds.append(df) + return defer.DeferredList(ds) + + d = self.iterativeFindNode(blob_hash) + d.addCallbacks(requestPeers) + return d + + def change_token(self): + self.old_token_secret = self.token_secret + self.token_secret = self._generateID() + self.next_change_token_call = twisted.internet.reactor.callLater(constants.tokenSecretChangeInterval, self.change_token) + + def make_token(self, compact_ip): + h = hashlib.new('sha384') + h.update(self.token_secret + compact_ip) + return h.digest() + + def verify_token(self, token, compact_ip): + h = hashlib.new('sha384') + h.update(self.token_secret + compact_ip) + if not token == h.digest(): + h = hashlib.new('sha384') + h.update(self.old_token_secret + compact_ip) + if not token == h.digest(): + #print 'invalid token found' + return False + return True + + # def iterativeStore(self, key, value, originalPublisherID=None, age=0): + # """ This is deprecated. Use iterativeAnnounceHaveBlob instead. + # + # The Kademlia store operation + # + # Call this to store/republish data in the DHT. + # + # @param key: The hashtable key of the data + # @type key: str + # @param value: The actual data (the value associated with C{key}) + # @type value: str + # @param originalPublisherID: The node ID of the node that is the + # B{original} publisher of the data + # @type originalPublisherID: str + # @param age: The relative age of the data (time in seconds since it was + # originally published). Note that the original publish time + # isn't actually given, to compensate for clock skew between + # different nodes. + # @type age: int + # """ + # #print ' iterativeStore called' + # if originalPublisherID == None: + # originalPublisherID = self.id + # + # def log_error(err): + # logging.error(err.getErrorMessage()) + # + # # Prepare a callback for doing "STORE" RPC calls + # def executeStoreRPCs(nodes): + # #print ' .....execStoreRPCs called' + # if len(nodes) >= constants.k: + # # If this node itself is closer to the key than the last (furthest) node in the list, + # # we should store the value at ourselves as well + # if self._routingTable.distance(key, self.id) < self._routingTable.distance(key, nodes[-1].id): + # nodes.pop() + # self.store(key, value, originalPublisherID=originalPublisherID, age=age) + # else: + # self.store(key, value, originalPublisherID=originalPublisherID, age=age) + # for contact in nodes: + # d = contact.store(key, value, originalPublisherID, age) + # d.addErrback(log_error) + # return nodes + # # Find k nodes closest to the key... + # df = self.iterativeFindNode(key) + # # ...and send them STORE RPCs as soon as they've been found + # df.addCallback(executeStoreRPCs) + # return df + + def iterativeFindNode(self, key): + """ The basic Kademlia node lookup operation + + Call this to find a remote node in the P2P overlay network. + + @param key: the n-bit key (i.e. the node or value ID) to search for + @type key: str + + @return: This immediately returns a deferred object, which will return + a list of k "closest" contacts (C{kademlia.contact.Contact} + objects) to the specified key as soon as the operation is + finished. + @rtype: twisted.internet.defer.Deferred + """ + return self._iterativeFind(key) + + def iterativeFindValue(self, key): + """ The Kademlia search operation (deterministic) + + Call this to retrieve data from the DHT. + + @param key: the n-bit key (i.e. the value ID) to search for + @type key: str + + @return: This immediately returns a deferred object, which will return + either one of two things: + - If the value was found, it will return a Python + dictionary containing the searched-for key (the C{key} + parameter passed to this method), and its associated + value, in the format: + C{key: data_value} + - If the value was not found, it will return a list of k + "closest" contacts (C{kademlia.contact.Contact} objects) + to the specified key + @rtype: twisted.internet.defer.Deferred + """ + # Prepare a callback for this operation + outerDf = defer.Deferred() + def checkResult(result): + if type(result) == dict: + # We have found the value; now see who was the closest contact without it... +# if 'closestNodeNoValue' in result: + # ...and store the key/value pair +# contact = result['closestNodeNoValue'] +# contact.store(key, result[key]) + outerDf.callback(result) + else: + # The value wasn't found, but a list of contacts was returned + # Now, see if we have the value (it might seem wasteful to search on the network + # first, but it ensures that all values are properly propagated through the + # network + #if key in self._dataStore: + if self._dataStore.hasPeersForBlob(key): + # Ok, we have the value locally, so use that + peers = self._dataStore.getPeersForBlob(key) + # Send this value to the closest node without it + #if len(result) > 0: + # contact = result[0] + # contact.store(key, value) + outerDf.callback({key: peers, "from_peer": 'self'}) + else: + # Ok, value does not exist in DHT at all + outerDf.callback(result) + + # Execute the search + df = self._iterativeFind(key, rpc='findValue') + df.addCallback(checkResult) + return outerDf + + def addContact(self, contact): + """ Add/update the given contact; simple wrapper for the same method + in this object's RoutingTable object + + @param contact: The contact to add to this node's k-buckets + @type contact: kademlia.contact.Contact + """ + self._routingTable.addContact(contact) + + def removeContact(self, contactID): + """ Remove the contact with the specified node ID from this node's + table of known nodes. This is a simple wrapper for the same method + in this object's RoutingTable object + + @param contactID: The node ID of the contact to remove + @type contactID: str + """ + self._routingTable.removeContact(contactID) + + def findContact(self, contactID): + """ Find a entangled.kademlia.contact.Contact object for the specified + cotact ID + + @param contactID: The contact ID of the required Contact object + @type contactID: str + + @return: Contact object of remote node with the specified node ID, + or None if the contact was not found + @rtype: twisted.internet.defer.Deferred + """ + try: + contact = self._routingTable.getContact(contactID) + df = defer.Deferred() + df.callback(contact) + except ValueError: + def parseResults(nodes): + if contactID in nodes: + contact = nodes[nodes.index(contactID)] + return contact + else: + return None + df = self.iterativeFindNode(contactID) + df.addCallback(parseResults) + return df + + @rpcmethod + def ping(self): + """ Used to verify contact between two Kademlia nodes + + @rtype: str + """ + return 'pong' + + @rpcmethod + def store(self, key, value, originalPublisherID=None, self_store=False, **kwargs): + """ Store the received data in this node's local hash table + + @param key: The hashtable key of the data + @type key: str + @param value: The actual data (the value associated with C{key}) + @type value: str + @param originalPublisherID: The node ID of the node that is the + B{original} publisher of the data + @type originalPublisherID: str + @param age: The relative age of the data (time in seconds since it was + originally published). Note that the original publish time + isn't actually given, to compensate for clock skew between + different nodes. + @type age: int + + @rtype: str + + @todo: Since the data (value) may be large, passing it around as a buffer + (which is the case currently) might not be a good idea... will have + to fix this (perhaps use a stream from the Protocol class?) + """ + # Get the sender's ID (if any) + if originalPublisherID == None: + if '_rpcNodeID' in kwargs: + originalPublisherID = kwargs['_rpcNodeID'] + else: + raise TypeError, 'No NodeID given. Therefore we can\'t store this node' + + if self_store is True and self.externalIP is not None: + contact = Contact(self.id, self.externalIP, self.port, None, None) + compact_ip = contact.compact_ip() + elif '_rpcNodeContact' in kwargs: + contact = kwargs['_rpcNodeContact'] + #print contact.address + compact_ip = contact.compact_ip() + #print compact_ip + else: + return 'Not OK' + #raise TypeError, 'No contact info available' + + if ((self_store is False) and + (not 'token' in value or not self.verify_token(value['token'], compact_ip))): + #if not 'token' in value: + # print "Couldn't find token in value" + #elif not self.verify_token(value['token'], contact.compact_ip()): + # print "Token is invalid" + raise ValueError('Invalid or missing token') + + if 'port' in value: + port = int(value['port']) + if 0 <= port <= 65536: + compact_port = str(struct.pack('>H', port)) + else: + raise TypeError, 'Invalid port' + else: + raise TypeError, 'No port available' + + if 'lbryid' in value: + if len(value['lbryid']) > constants.key_bits: + raise ValueError, 'Invalid lbryid' + else: + compact_address = compact_ip + compact_port + value['lbryid'] + else: + raise TypeError, 'No lbryid given' + + #if originalPublisherID == None: + #if rpcSenderID != None: + # originalPublisherID = rpcSenderID + #else: + # raise TypeError, 'No publisher specifed, and RPC caller ID not available. Data requires an original publisher.' + #if self_store is True: + # print "got this far" + now = int(time.time()) + originallyPublished = now# - age + #print compact_address + self._dataStore.addPeerToBlob(key, compact_address, now, originallyPublished, originalPublisherID) + #if self_store is True: + # print "looks like it was successful maybe" + return 'OK' + + @rpcmethod + def findNode(self, key, **kwargs): + """ Finds a number of known nodes closest to the node/value with the + specified key. + + @param key: the n-bit key (i.e. the node or value ID) to search for + @type key: str + + @return: A list of contact triples closest to the specified key. + This method will return C{k} (or C{count}, if specified) + contacts if at all possible; it will only return fewer if the + node is returning all of the contacts that it knows of. + @rtype: list + """ + # Get the sender's ID (if any) + if '_rpcNodeID' in kwargs: + rpcSenderID = kwargs['_rpcNodeID'] + else: + rpcSenderID = None + contacts = self._routingTable.findCloseNodes(key, constants.k, rpcSenderID) + contactTriples = [] + for contact in contacts: + contactTriples.append( (contact.id, contact.address, contact.port) ) + return contactTriples + + @rpcmethod + def findValue(self, key, **kwargs): + """ Return the value associated with the specified key if present in + this node's data, otherwise execute FIND_NODE for the key + + @param key: The hashtable key of the data to return + @type key: str + + @return: A dictionary containing the requested key/value pair, + or a list of contact triples closest to the requested key. + @rtype: dict or list + """ + if self._dataStore.hasPeersForBlob(key): + rval = {key: self._dataStore.getPeersForBlob(key)} + else: + contactTriples = self.findNode(key, **kwargs) + rval = {'contacts': contactTriples} + if '_rpcNodeContact' in kwargs: + contact = kwargs['_rpcNodeContact'] + compact_ip = contact.compact_ip() + rval['token'] = self.make_token(compact_ip) + self.hash_watcher.add_requested_hash(key, compact_ip) + return rval + +# def _distance(self, keyOne, keyTwo): +# """ Calculate the XOR result between two string variables +# +# @return: XOR result of two long variables +# @rtype: long +# """ +# valKeyOne = long(keyOne.encode('hex'), 16) +# valKeyTwo = long(keyTwo.encode('hex'), 16) +# return valKeyOne ^ valKeyTwo + + def _generateID(self): + """ Generates an n-bit pseudo-random identifier + + @return: A globally unique n-bit pseudo-random identifier + @rtype: str + """ + hash = hashlib.sha384() + hash.update(str(random.getrandbits(255))) + return hash.digest() + + def _iterativeFind(self, key, startupShortlist=None, rpc='findNode'): + """ The basic Kademlia iterative lookup operation (for nodes/values) + + This builds a list of k "closest" contacts through iterative use of + the "FIND_NODE" RPC, or if C{findValue} is set to C{True}, using the + "FIND_VALUE" RPC, in which case the value (if found) may be returned + instead of a list of contacts + + @param key: the n-bit key (i.e. the node or value ID) to search for + @type key: str + @param startupShortlist: A list of contacts to use as the starting + shortlist for this search; this is normally + only used when the node joins the network + @type startupShortlist: list + @param rpc: The name of the RPC to issue to remote nodes during the + Kademlia lookup operation (e.g. this sets whether this + algorithm should search for a data value (if + rpc='findValue') or not. It can thus be used to perform + other operations that piggy-back on the basic Kademlia + lookup operation (Entangled's "delete" RPC, for instance). + @type rpc: str + + @return: If C{findValue} is C{True}, the algorithm will stop as soon + as a data value for C{key} is found, and return a dictionary + containing the key and the found value. Otherwise, it will + return a list of the k closest nodes to the specified key + @rtype: twisted.internet.defer.Deferred + """ + if rpc != 'findNode': + findValue = True + else: + findValue = False + shortlist = [] + if startupShortlist == None: + shortlist = self._routingTable.findCloseNodes(key, constants.alpha) + if key != self.id: + # Update the "last accessed" timestamp for the appropriate k-bucket + self._routingTable.touchKBucket(key) + if len(shortlist) == 0: + # This node doesn't know of any other nodes + fakeDf = defer.Deferred() + fakeDf.callback([]) + return fakeDf + else: + # This is used during the bootstrap process; node ID's are most probably fake + shortlist = startupShortlist + + # List of active queries; len() indicates number of active probes + # - using lists for these variables, because Python doesn't allow binding a new value to a name in an enclosing (non-global) scope + activeProbes = [] + # List of contact IDs that have already been queried + alreadyContacted = [] + # Probes that were active during the previous iteration + # A list of found and known-to-be-active remote nodes + activeContacts = [] + # This should only contain one entry; the next scheduled iteration call + pendingIterationCalls = [] + prevClosestNode = [None] + findValueResult = {} + slowNodeCount = [0] + + def extendShortlist(responseTuple): + """ @type responseMsg: kademlia.msgtypes.ResponseMessage """ + # The "raw response" tuple contains the response message, and the originating address info + responseMsg = responseTuple[0] + originAddress = responseTuple[1] # tuple: (ip adress, udp port) + # Make sure the responding node is valid, and abort the operation if it isn't + if responseMsg.nodeID in activeContacts or responseMsg.nodeID == self.id: + return responseMsg.nodeID + + # Mark this node as active + if responseMsg.nodeID in shortlist: + # Get the contact information from the shortlist... + aContact = shortlist[shortlist.index(responseMsg.nodeID)] + else: + # If it's not in the shortlist; we probably used a fake ID to reach it + # - reconstruct the contact, using the real node ID this time + aContact = Contact(responseMsg.nodeID, originAddress[0], originAddress[1], self._protocol) + activeContacts.append(aContact) + # This makes sure "bootstrap"-nodes with "fake" IDs don't get queried twice + if responseMsg.nodeID not in alreadyContacted: + alreadyContacted.append(responseMsg.nodeID) + # Now grow extend the (unverified) shortlist with the returned contacts + result = responseMsg.response + #TODO: some validation on the result (for guarding against attacks) + # If we are looking for a value, first see if this result is the value + # we are looking for before treating it as a list of contact triples + if findValue is True and key in result and not 'contacts' in result: + # We have found the value + findValueResult[key] = result[key] + findValueResult['from_peer'] = aContact.address + else: + if findValue is True: + # We are looking for a value, and the remote node didn't have it + # - mark it as the closest "empty" node, if it is + if 'closestNodeNoValue' in findValueResult: + if self._routingTable.distance(key, responseMsg.nodeID) < self._routingTable.distance(key, activeContacts[0].id): + findValueResult['closestNodeNoValue'] = aContact + else: + findValueResult['closestNodeNoValue'] = aContact + contactTriples = result['contacts'] + else: + contactTriples = result + for contactTriple in contactTriples: + if isinstance(contactTriple, (list, tuple)) and len(contactTriple) == 3: + testContact = Contact(contactTriple[0], contactTriple[1], contactTriple[2], self._protocol) + if testContact not in shortlist: + shortlist.append(testContact) + return responseMsg.nodeID + + def removeFromShortlist(failure): + """ @type failure: twisted.python.failure.Failure """ + failure.trap(protocol.TimeoutError) + deadContactID = failure.getErrorMessage() + if deadContactID in shortlist: + shortlist.remove(deadContactID) + return deadContactID + + def cancelActiveProbe(contactID): + activeProbes.pop() + if len(activeProbes) <= constants.alpha/2 and len(pendingIterationCalls): + # Force the iteration + pendingIterationCalls[0].cancel() + del pendingIterationCalls[0] + #print 'forcing iteration =================' + searchIteration() + + def log_error(err): + logging.error(err.getErrorMessage()) + + # Send parallel, asynchronous FIND_NODE RPCs to the shortlist of contacts + def searchIteration(): + #print '==> searchiteration' + slowNodeCount[0] = len(activeProbes) + # Sort the discovered active nodes from closest to furthest + activeContacts.sort(lambda firstContact, secondContact, targetKey=key: cmp(self._routingTable.distance(firstContact.id, targetKey), self._routingTable.distance(secondContact.id, targetKey))) + # This makes sure a returning probe doesn't force calling this function by mistake + while len(pendingIterationCalls): + del pendingIterationCalls[0] + # See if should continue the search + if key in findValueResult: + #print '++++++++++++++ DONE (findValue found) +++++++++++++++\n\n' + outerDf.callback(findValueResult) + return + elif len(activeContacts) and findValue == False: + if (len(activeContacts) >= constants.k) or (activeContacts[0] == prevClosestNode[0] and len(activeProbes) == slowNodeCount[0]): + # TODO: Re-send the FIND_NODEs to all of the k closest nodes not already queried + # Ok, we're done; either we have accumulated k active contacts or no improvement in closestNode has been noted + #if len(activeContacts) >= constants.k: + # print '++++++++++++++ DONE (test for k active contacts) +++++++++++++++\n\n' + #else: + # print '++++++++++++++ DONE (test for closest node) +++++++++++++++\n\n' + outerDf.callback(activeContacts) + return + # The search continues... + if len(activeContacts): + prevClosestNode[0] = activeContacts[0] + contactedNow = 0 + shortlist.sort(lambda firstContact, secondContact, targetKey=key: cmp(self._routingTable.distance(firstContact.id, targetKey), self._routingTable.distance(secondContact.id, targetKey))) + # Store the current shortList length before contacting other nodes + prevShortlistLength = len(shortlist) + for contact in shortlist: + if contact.id not in alreadyContacted: + activeProbes.append(contact.id) + rpcMethod = getattr(contact, rpc) + df = rpcMethod(key, rawResponse=True) + df.addCallback(extendShortlist) + df.addErrback(removeFromShortlist) + df.addCallback(cancelActiveProbe) + df.addErrback(log_error) + alreadyContacted.append(contact.id) + contactedNow += 1 + if contactedNow == constants.alpha: + break + if len(activeProbes) > slowNodeCount[0] \ + or (len(shortlist) < constants.k and len(activeContacts) < len(shortlist) and len(activeProbes) > 0): + #print '----------- scheduling next call -------------' + # Schedule the next iteration if there are any active calls (Kademlia uses loose parallelism) + call = twisted.internet.reactor.callLater(constants.iterativeLookupDelay, searchIteration) #IGNORE:E1101 + pendingIterationCalls.append(call) + # Check for a quick contact response that made an update to the shortList + elif prevShortlistLength < len(shortlist): + # Ensure that the closest contacts are taken from the updated shortList + searchIteration() + else: + #print '++++++++++++++ DONE (logically) +++++++++++++\n\n' + # If no probes were sent, there will not be any improvement, so we're done + outerDf.callback(activeContacts) + + outerDf = defer.Deferred() + # Start the iterations + searchIteration() + return outerDf + +# def _kbucketIndex(self, key): +# """ Calculate the index of the k-bucket which is responsible for the +# specified key +# +# @param key: The key for which to find the appropriate k-bucket index +# @type key: str +# +# @return: The index of the k-bucket responsible for the specified key +# @rtype: int +# """ +# distance = self._distance(self.id, key) +# bucketIndex = int(math.log(distance, 2)) +# return bucketIndex + +# def _randomIDInBucketRange(self, bucketIndex): +# """ Returns a random ID in the specified k-bucket's range +# +# @param bucketIndex: The index of the k-bucket to use +# @type bucketIndex: int +# """ +# def makeIDString(distance): +# id = hex(distance)[2:] +# if id[-1] == 'L': +# id = id[:-1] +# if len(id) % 2 != 0: +# id = '0' + id +# id = id.decode('hex') +# id = (20 - len(id))*'\x00' + id +# return id +# min = math.pow(2, bucketIndex) +# max = math.pow(2, bucketIndex+1) +# distance = random.randrange(min, max) +# distanceStr = makeIDString(distance) +# randomID = makeIDString(self._distance(distanceStr, self.id)) +# return randomID + +# def _refreshKBuckets(self, startIndex=0, force=False): +# """ Refreshes all k-buckets that need refreshing, starting at the +# k-bucket with the specified index +# +# @param startIndex: The index of the bucket to start refreshing at; +# this bucket and those further away from it will +# be refreshed. For example, when joining the +# network, this node will set this to the index of +# the bucket after the one containing it's closest +# neighbour. +# @type startIndex: index +# @param force: If this is C{True}, all buckets (in the specified range) +# will be refreshed, regardless of the time they were last +# accessed. +# @type force: bool +# """ +# #print '_refreshKbuckets called with index:',startIndex +# bucketIndex = [] +# bucketIndex.append(startIndex + 1) +# outerDf = defer.Deferred() +# def refreshNextKBucket(dfResult=None): +# #print ' refreshNexKbucket called; bucketindex is', bucketIndex[0] +# bucketIndex[0] += 1 +# while bucketIndex[0] < 160: +# if force or (int(time.time()) - self._buckets[bucketIndex[0]].lastAccessed >= constants.refreshTimeout): +# searchID = self._randomIDInBucketRange(bucketIndex[0]) +# self._buckets[bucketIndex[0]].lastAccessed = int(time.time()) +# #print ' refreshing bucket',bucketIndex[0] +# df = self.iterativeFindNode(searchID) +# df.addCallback(refreshNextKBucket) +# return +# else: +# bucketIndex[0] += 1 +# # If this is reached, we have refreshed all the buckets +# #print ' all buckets refreshed; initiating outer deferred callback' +# outerDf.callback(None) +# #print '_refreshKbuckets starting cycle' +# refreshNextKBucket() +# #print '_refreshKbuckets returning' +# return outerDf + + #def _persistState(self, *args): + # state = {'id': self.id, + # 'closestNodes': self.findNode(self.id)} + # now = int(time.time()) + # self._dataStore.setItem('nodeState', state, now, now, self.id) + + def _refreshNode(self): + """ Periodically called to perform k-bucket refreshes and data + replication/republishing as necessary """ + #print 'refreshNode called' + df = self._refreshRoutingTable() + #df.addCallback(self._republishData) + df.addCallback(self._removeExpiredPeers) + df.addCallback(self._scheduleNextNodeRefresh) + + def _refreshRoutingTable(self): + nodeIDs = self._routingTable.getRefreshList(0, False) + outerDf = defer.Deferred() + def searchForNextNodeID(dfResult=None): + if len(nodeIDs) > 0: + searchID = nodeIDs.pop() + df = self.iterativeFindNode(searchID) + df.addCallback(searchForNextNodeID) + else: + # If this is reached, we have finished refreshing the routing table + outerDf.callback(None) + # Start the refreshing cycle + searchForNextNodeID() + return outerDf + + #def _republishData(self, *args): + # #print '---republishData() called' + # df = twisted.internet.threads.deferToThread(self._threadedRepublishData) + # return df + + def _scheduleNextNodeRefresh(self, *args): + #print '==== sheduling next refresh' + self.next_refresh_call = twisted.internet.reactor.callLater(constants.checkRefreshInterval, self._refreshNode) + + def _removeExpiredPeers(self, *args):#args put here because _refreshRoutingTable does outerDF.callback(None) + df = twisted.internet.threads.deferToThread(self._dataStore.removeExpiredPeers) + return df + + #def _threadedRepublishData(self, *args): + # """ Republishes and expires any stored data (i.e. stored + # C{(key, value pairs)} that need to be republished/expired + # + # This method should run in a deferred thread + # """ + # #print '== republishData called, node:',ord(self.id[0]) + # expiredKeys = [] + # for key in self._dataStore: + # # Filter internal variables stored in the datastore + # if key == 'nodeState': + # continue + # now = int(time.time()) + # originalPublisherID = self._dataStore.originalPublisherID(key) + # age = now - self._dataStore.originalPublishTime(key) + # #print ' node:',ord(self.id[0]),'key:',ord(key[0]),'orig publishing time:',self._dataStore.originalPublishTime(key),'now:',now,'age:',age,'lastPublished age:',now - self._dataStore.lastPublished(key),'original pubID:', ord(originalPublisherID[0]) + # if originalPublisherID == self.id: + # # This node is the original publisher; it has to republish + # # the data before it expires (24 hours in basic Kademlia) + # if age >= constants.dataExpireTimeout: + # #print ' REPUBLISHING key:', key + # #self.iterativeStore(key, self._dataStore[key]) + # twisted.internet.reactor.callFromThread(self.iterativeStore, key, self._dataStore[key]) + # else: + # # This node needs to replicate the data at set intervals, + # # until it expires, without changing the metadata associated with it + # # First, check if the data has expired + # if age >= constants.dataExpireTimeout: + # # This key/value pair has expired (and it has not been republished by the original publishing node + # # - remove it + # expiredKeys.append(key) + # elif now - self._dataStore.lastPublished(key) >= constants.replicateInterval: + # # ...data has not yet expired, and we need to replicate it + # #print ' replicating key:', key,'age:',age + # #self.iterativeStore(key=key, value=self._dataStore[key], originalPublisherID=originalPublisherID, age=age) + # twisted.internet.reactor.callFromThread(self.iterativeStore, key=key, value=self._dataStore[key], originalPublisherID=originalPublisherID, age=age) + # for key in expiredKeys: + # #print ' expiring key:', key + # del self._dataStore[key] + # #print 'done with threadedDataRefresh()' + + +def main(): + + parser = argparse.ArgumentParser(description="Launch a dht node") + parser.add_argument("udp_port", help="The UDP port on which the node will listen", + type=int) + parser.add_argument("known_node_ip", + help="The IP of a known node to be used to bootstrap into the network", + nargs='?') + parser.add_argument("known_node_port", + help="The port of a known node to be used to bootstrap into the network", + nargs='?', default=4000, type=int) + + args = parser.parse_args() + + if args.known_node_ip: + known_nodes = [(args.known_node_ip, args.known_node_port)] + else: + known_nodes = [] + + node = Node(udpPort=args.udp_port) + node.joinNetwork(known_nodes) + twisted.internet.reactor.run() + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/lbrynet/dht/protocol.py b/lbrynet/dht/protocol.py new file mode 100644 index 000000000..1a9f2c262 --- /dev/null +++ b/lbrynet/dht/protocol.py @@ -0,0 +1,305 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# +# The docstrings in this module contain epytext markup; API documentation +# may be created by processing this file with epydoc: http://epydoc.sf.net + +import time + +from twisted.internet import protocol, defer +from twisted.python import failure +import twisted.internet.reactor + +import constants +import encoding +import msgtypes +import msgformat +from contact import Contact + +reactor = twisted.internet.reactor + +class TimeoutError(Exception): + """ Raised when a RPC times out """ + +class KademliaProtocol(protocol.DatagramProtocol): + """ Implements all low-level network-related functions of a Kademlia node """ + msgSizeLimit = constants.udpDatagramMaxSize-26 + maxToSendDelay = 10**-3#0.05 + minToSendDelay = 10**-5#0.01 + + def __init__(self, node, msgEncoder=encoding.Bencode(), msgTranslator=msgformat.DefaultFormat()): + self._node = node + self._encoder = msgEncoder + self._translator = msgTranslator + self._sentMessages = {} + self._partialMessages = {} + self._partialMessagesProgress = {} + self._next = 0 + self._callLaterList = {} + + def sendRPC(self, contact, method, args, rawResponse=False): + """ Sends an RPC to the specified contact + + @param contact: The contact (remote node) to send the RPC to + @type contact: kademlia.contacts.Contact + @param method: The name of remote method to invoke + @type method: str + @param args: A list of (non-keyword) arguments to pass to the remote + method, in the correct order + @type args: tuple + @param rawResponse: If this is set to C{True}, the caller of this RPC + will receive a tuple containing the actual response + message object and the originating address tuple as + a result; in other words, it will not be + interpreted by this class. Unless something special + needs to be done with the metadata associated with + the message, this should remain C{False}. + @type rawResponse: bool + + @return: This immediately returns a deferred object, which will return + the result of the RPC call, or raise the relevant exception + if the remote node raised one. If C{rawResponse} is set to + C{True}, however, it will always return the actual response + message (which may be a C{ResponseMessage} or an + C{ErrorMessage}). + @rtype: twisted.internet.defer.Deferred + """ + msg = msgtypes.RequestMessage(self._node.id, method, args) + msgPrimitive = self._translator.toPrimitive(msg) + encodedMsg = self._encoder.encode(msgPrimitive) + + df = defer.Deferred() + if rawResponse: + df._rpcRawResponse = True + + # Set the RPC timeout timer + timeoutCall = reactor.callLater(constants.rpcTimeout, self._msgTimeout, msg.id) #IGNORE:E1101 + # Transmit the data + self._send(encodedMsg, msg.id, (contact.address, contact.port)) + self._sentMessages[msg.id] = (contact.id, df, timeoutCall) + return df + + def datagramReceived(self, datagram, address): + """ Handles and parses incoming RPC messages (and responses) + + @note: This is automatically called by Twisted when the protocol + receives a UDP datagram + """ + if datagram[0] == '\x00' and datagram[25] == '\x00': + totalPackets = (ord(datagram[1]) << 8) | ord(datagram[2]) + msgID = datagram[5:25] + seqNumber = (ord(datagram[3]) << 8) | ord(datagram[4]) + if msgID not in self._partialMessages: + self._partialMessages[msgID] = {} + self._partialMessages[msgID][seqNumber] = datagram[26:] + if len(self._partialMessages[msgID]) == totalPackets: + keys = self._partialMessages[msgID].keys() + keys.sort() + data = '' + for key in keys: + data += self._partialMessages[msgID][key] + datagram = data + del self._partialMessages[msgID] + else: + return + try: + msgPrimitive = self._encoder.decode(datagram) + except encoding.DecodeError: + # We received some rubbish here + return + + message = self._translator.fromPrimitive(msgPrimitive) + remoteContact = Contact(message.nodeID, address[0], address[1], self) + + # Refresh the remote node's details in the local node's k-buckets + self._node.addContact(remoteContact) + + if isinstance(message, msgtypes.RequestMessage): + # This is an RPC method request + self._handleRPC(remoteContact, message.id, message.request, message.args) + elif isinstance(message, msgtypes.ResponseMessage): + # Find the message that triggered this response + if self._sentMessages.has_key(message.id): + # Cancel timeout timer for this RPC + df, timeoutCall = self._sentMessages[message.id][1:3] + timeoutCall.cancel() + del self._sentMessages[message.id] + + if hasattr(df, '_rpcRawResponse'): + # The RPC requested that the raw response message and originating address be returned; do not interpret it + df.callback((message, address)) + elif isinstance(message, msgtypes.ErrorMessage): + # The RPC request raised a remote exception; raise it locally + if message.exceptionType.startswith('exceptions.'): + exceptionClassName = message.exceptionType[11:] + else: + localModuleHierarchy = self.__module__.split('.') + remoteHierarchy = message.exceptionType.split('.') + #strip the remote hierarchy + while remoteHierarchy[0] == localModuleHierarchy[0]: + remoteHierarchy.pop(0) + localModuleHierarchy.pop(0) + exceptionClassName = '.'.join(remoteHierarchy) + remoteException = None + try: + exec 'remoteException = %s("%s")' % (exceptionClassName, message.response) + except Exception: + # We could not recreate the exception; create a generic one + remoteException = Exception(message.response) + df.errback(remoteException) + else: + # We got a result from the RPC + df.callback(message.response) + else: + # If the original message isn't found, it must have timed out + #TODO: we should probably do something with this... + pass + + def _send(self, data, rpcID, address): + """ Transmit the specified data over UDP, breaking it up into several + packets if necessary + + If the data is spread over multiple UDP datagrams, the packets have the + following structure:: + | | | | | |||||||||||| 0x00 | + |Transmision|Total number|Sequence number| RPC ID |Header end| + | type ID | of packets |of this packet | | indicator| + | (1 byte) | (2 bytes) | (2 bytes) |(20 bytes)| (1 byte) | + | | | | | |||||||||||| | + + @note: The header used for breaking up large data segments will + possibly be moved out of the KademliaProtocol class in the + future, into something similar to a message translator/encoder + class (see C{kademlia.msgformat} and C{kademlia.encoding}). + """ + if len(data) > self.msgSizeLimit: + # We have to spread the data over multiple UDP datagrams, and provide sequencing information + # 1st byte is transmission type id, bytes 2 & 3 are the total number of packets in this transmission, bytes 4 & 5 are the sequence number for this specific packet + totalPackets = len(data) / self.msgSizeLimit + if len(data) % self.msgSizeLimit > 0: + totalPackets += 1 + encTotalPackets = chr(totalPackets >> 8) + chr(totalPackets & 0xff) + seqNumber = 0 + startPos = 0 + while seqNumber < totalPackets: + #reactor.iterate() #IGNORE:E1101 + packetData = data[startPos:startPos+self.msgSizeLimit] + encSeqNumber = chr(seqNumber >> 8) + chr(seqNumber & 0xff) + txData = '\x00%s%s%s\x00%s' % (encTotalPackets, encSeqNumber, rpcID, packetData) + self._sendNext(txData, address) + + startPos += self.msgSizeLimit + seqNumber += 1 + else: + self._sendNext(data, address) + + def _sendNext(self, txData, address): + """ Send the next UDP packet """ + ts = time.time() + delay = 0 + if ts >= self._next: + delay = self.minToSendDelay + self._next = ts + self.minToSendDelay + else: + delay = (self._next-ts) + self.maxToSendDelay + self._next += self.maxToSendDelay + if self.transport: + laterCall = reactor.callLater(delay, self.transport.write, txData, address) + for key in self._callLaterList.keys(): + if key <= ts: + del self._callLaterList[key] + self._callLaterList[self._next] = laterCall + + def _sendResponse(self, contact, rpcID, response): + """ Send a RPC response to the specified contact + """ + msg = msgtypes.ResponseMessage(rpcID, self._node.id, response) + msgPrimitive = self._translator.toPrimitive(msg) + encodedMsg = self._encoder.encode(msgPrimitive) + self._send(encodedMsg, rpcID, (contact.address, contact.port)) + + def _sendError(self, contact, rpcID, exceptionType, exceptionMessage): + """ Send an RPC error message to the specified contact + """ + msg = msgtypes.ErrorMessage(rpcID, self._node.id, exceptionType, exceptionMessage) + msgPrimitive = self._translator.toPrimitive(msg) + encodedMsg = self._encoder.encode(msgPrimitive) + self._send(encodedMsg, rpcID, (contact.address, contact.port)) + + def _handleRPC(self, senderContact, rpcID, method, args): + """ Executes a local function in response to an RPC request """ + # Set up the deferred callchain + def handleError(f): + self._sendError(senderContact, rpcID, f.type, f.getErrorMessage()) + + def handleResult(result): + self._sendResponse(senderContact, rpcID, result) + + df = defer.Deferred() + df.addCallback(handleResult) + df.addErrback(handleError) + + # Execute the RPC + func = getattr(self._node, method, None) + if callable(func) and hasattr(func, 'rpcmethod'): + # Call the exposed Node method and return the result to the deferred callback chain + try: + ##try: + ## # Try to pass the sender's node id to the function... + result = func(*args, **{'_rpcNodeID': senderContact.id, '_rpcNodeContact': senderContact}) + ##except TypeError: + ## # ...or simply call it if that fails + ## result = func(*args) + except Exception, e: + df.errback(failure.Failure(e)) + else: + df.callback(result) + else: + # No such exposed method + df.errback( failure.Failure( AttributeError('Invalid method: %s' % method) ) ) + + def _msgTimeout(self, messageID): + """ Called when an RPC request message times out """ + # Find the message that timed out + if self._sentMessages.has_key(messageID): + remoteContactID, df = self._sentMessages[messageID][0:2] + if self._partialMessages.has_key(messageID): + # We are still receiving this message + # See if any progress has been made; if not, kill the message + if self._partialMessagesProgress.has_key(messageID): + if len(self._partialMessagesProgress[messageID]) == len(self._partialMessages[messageID]): + # No progress has been made + del self._partialMessagesProgress[messageID] + del self._partialMessages[messageID] + df.errback(failure.Failure(TimeoutError(remoteContactID))) + return + # Reset the RPC timeout timer + timeoutCall = reactor.callLater(constants.rpcTimeout, self._msgTimeout, messageID) #IGNORE:E1101 + self._sentMessages[messageID] = (remoteContactID, df, timeoutCall) + return + del self._sentMessages[messageID] + # The message's destination node is now considered to be dead; + # raise an (asynchronous) TimeoutError exception and update the host node + self._node.removeContact(remoteContactID) + df.errback(failure.Failure(TimeoutError(remoteContactID))) + else: + # This should never be reached + print "ERROR: deferred timed out, but is not present in sent messages list!" + + def stopProtocol(self): + """ Called when the transport is disconnected. + + Will only be called once, after all ports are disconnected. + """ + for key in self._callLaterList.keys(): + try: + if key > time.time(): + self._callLaterList[key].cancel() + except Exception, e: + print e + del self._callLaterList[key] + #TODO: test: do we really need the reactor.iterate() call? + reactor.iterate() diff --git a/lbrynet/dht/routingtable.py b/lbrynet/dht/routingtable.py new file mode 100644 index 000000000..0a237cea9 --- /dev/null +++ b/lbrynet/dht/routingtable.py @@ -0,0 +1,422 @@ +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# +# The docstrings in this module contain epytext markup; API documentation +# may be created by processing this file with epydoc: http://epydoc.sf.net + +import time, random + +import constants +import kbucket +from protocol import TimeoutError + +class RoutingTable(object): + """ Interface for RPC message translators/formatters + + Classes inheriting from this should provide a suitable routing table for + a parent Node object (i.e. the local entity in the Kademlia network) + """ + def __init__(self, parentNodeID): + """ + @param parentNodeID: The n-bit node ID of the node to which this + routing table belongs + @type parentNodeID: str + """ + def addContact(self, contact): + """ Add the given contact to the correct k-bucket; if it already + exists, its status will be updated + + @param contact: The contact to add to this node's k-buckets + @type contact: kademlia.contact.Contact + """ + + def distance(self, keyOne, keyTwo): + """ Calculate the XOR result between two string variables + + @return: XOR result of two long variables + @rtype: long + """ + valKeyOne = long(keyOne.encode('hex'), 16) + valKeyTwo = long(keyTwo.encode('hex'), 16) + return valKeyOne ^ valKeyTwo + + def findCloseNodes(self, key, count, _rpcNodeID=None): + """ Finds a number of known nodes closest to the node/value with the + specified key. + + @param key: the n-bit key (i.e. the node or value ID) to search for + @type key: str + @param count: the amount of contacts to return + @type count: int + @param _rpcNodeID: Used during RPC, this is be the sender's Node ID + Whatever ID is passed in the paramater will get + excluded from the list of returned contacts. + @type _rpcNodeID: str + + @return: A list of node contacts (C{kademlia.contact.Contact instances}) + closest to the specified key. + This method will return C{k} (or C{count}, if specified) + contacts if at all possible; it will only return fewer if the + node is returning all of the contacts that it knows of. + @rtype: list + """ + def getContact(self, contactID): + """ Returns the (known) contact with the specified node ID + + @raise ValueError: No contact with the specified contact ID is known + by this node + """ + def getRefreshList(self, startIndex=0, force=False): + """ Finds all k-buckets that need refreshing, starting at the + k-bucket with the specified index, and returns IDs to be searched for + in order to refresh those k-buckets + + @param startIndex: The index of the bucket to start refreshing at; + this bucket and those further away from it will + be refreshed. For example, when joining the + network, this node will set this to the index of + the bucket after the one containing it's closest + neighbour. + @type startIndex: index + @param force: If this is C{True}, all buckets (in the specified range) + will be refreshed, regardless of the time they were last + accessed. + @type force: bool + + @return: A list of node ID's that the parent node should search for + in order to refresh the routing Table + @rtype: list + """ + def removeContact(self, contactID): + """ Remove the contact with the specified node ID from the routing + table + + @param contactID: The node ID of the contact to remove + @type contactID: str + """ + def touchKBucket(self, key): + """ Update the "last accessed" timestamp of the k-bucket which covers + the range containing the specified key in the key/ID space + + @param key: A key in the range of the target k-bucket + @type key: str + """ + + +class TreeRoutingTable(RoutingTable): + """ This class implements a routing table used by a Node class. + + The Kademlia routing table is a binary tree whose leaves are k-buckets, + where each k-bucket contains nodes with some common prefix of their IDs. + This prefix is the k-bucket's position in the binary tree; it therefore + covers some range of ID values, and together all of the k-buckets cover + the entire n-bit ID (or key) space (with no overlap). + + @note: In this implementation, nodes in the tree (the k-buckets) are + added dynamically, as needed; this technique is described in the 13-page + version of the Kademlia paper, in section 2.4. It does, however, use the + C{PING} RPC-based k-bucket eviction algorithm described in section 2.2 of + that paper. + """ + def __init__(self, parentNodeID): + """ + @param parentNodeID: The n-bit node ID of the node to which this + routing table belongs + @type parentNodeID: str + """ + # Create the initial (single) k-bucket covering the range of the entire n-bit ID space + self._buckets = [kbucket.KBucket(rangeMin=0, rangeMax=2**constants.key_bits)] + self._parentNodeID = parentNodeID + + def addContact(self, contact): + """ Add the given contact to the correct k-bucket; if it already + exists, its status will be updated + + @param contact: The contact to add to this node's k-buckets + @type contact: kademlia.contact.Contact + """ + if contact.id == self._parentNodeID: + return + + bucketIndex = self._kbucketIndex(contact.id) + try: + self._buckets[bucketIndex].addContact(contact) + except kbucket.BucketFull: + # The bucket is full; see if it can be split (by checking if its range includes the host node's id) + if self._buckets[bucketIndex].keyInRange(self._parentNodeID): + self._splitBucket(bucketIndex) + # Retry the insertion attempt + self.addContact(contact) + else: + # We can't split the k-bucket + # NOTE: + # In section 2.4 of the 13-page version of the Kademlia paper, it is specified that + # in this case, the new contact should simply be dropped. However, in section 2.2, + # it states that the head contact in the k-bucket (i.e. the least-recently seen node) + # should be pinged - if it does not reply, it should be dropped, and the new contact + # added to the tail of the k-bucket. This implementation follows section 2.2 regarding + # this point. + headContact = self._buckets[bucketIndex]._contacts[0] + + def replaceContact(failure): + """ Callback for the deferred PING RPC to see if the head + node in the k-bucket is still responding + + @type failure: twisted.python.failure.Failure + """ + failure.trap(TimeoutError) + print '==replacing contact==' + # Remove the old contact... + deadContactID = failure.getErrorMessage() + try: + self._buckets[bucketIndex].removeContact(deadContactID) + except ValueError: + # The contact has already been removed (probably due to a timeout) + pass + # ...and add the new one at the tail of the bucket + self.addContact(contact) + + # Ping the least-recently seen contact in this k-bucket + headContact = self._buckets[bucketIndex]._contacts[0] + df = headContact.ping() + # If there's an error (i.e. timeout), remove the head contact, and append the new one + df.addErrback(replaceContact) + + def findCloseNodes(self, key, count, _rpcNodeID=None): + """ Finds a number of known nodes closest to the node/value with the + specified key. + + @param key: the n-bit key (i.e. the node or value ID) to search for + @type key: str + @param count: the amount of contacts to return + @type count: int + @param _rpcNodeID: Used during RPC, this is be the sender's Node ID + Whatever ID is passed in the paramater will get + excluded from the list of returned contacts. + @type _rpcNodeID: str + + @return: A list of node contacts (C{kademlia.contact.Contact instances}) + closest to the specified key. + This method will return C{k} (or C{count}, if specified) + contacts if at all possible; it will only return fewer if the + node is returning all of the contacts that it knows of. + @rtype: list + """ + #if key == self.id: + # bucketIndex = 0 #TODO: maybe not allow this to continue? + #else: + bucketIndex = self._kbucketIndex(key) + closestNodes = self._buckets[bucketIndex].getContacts(constants.k, _rpcNodeID) + # This method must return k contacts (even if we have the node with the specified key as node ID), + # unless there is less than k remote nodes in the routing table + i = 1 + canGoLower = bucketIndex-i >= 0 + canGoHigher = bucketIndex+i < len(self._buckets) + # Fill up the node list to k nodes, starting with the closest neighbouring nodes known + while len(closestNodes) < constants.k and (canGoLower or canGoHigher): + #TODO: this may need to be optimized + if canGoLower: + closestNodes.extend(self._buckets[bucketIndex-i].getContacts(constants.k - len(closestNodes), _rpcNodeID)) + canGoLower = bucketIndex-(i+1) >= 0 + if canGoHigher: + closestNodes.extend(self._buckets[bucketIndex+i].getContacts(constants.k - len(closestNodes), _rpcNodeID)) + canGoHigher = bucketIndex+(i+1) < len(self._buckets) + i += 1 + return closestNodes + + def getContact(self, contactID): + """ Returns the (known) contact with the specified node ID + + @raise ValueError: No contact with the specified contact ID is known + by this node + """ + bucketIndex = self._kbucketIndex(contactID) + try: + contact = self._buckets[bucketIndex].getContact(contactID) + except ValueError: + raise + else: + return contact + + def getRefreshList(self, startIndex=0, force=False): + """ Finds all k-buckets that need refreshing, starting at the + k-bucket with the specified index, and returns IDs to be searched for + in order to refresh those k-buckets + + @param startIndex: The index of the bucket to start refreshing at; + this bucket and those further away from it will + be refreshed. For example, when joining the + network, this node will set this to the index of + the bucket after the one containing it's closest + neighbour. + @type startIndex: index + @param force: If this is C{True}, all buckets (in the specified range) + will be refreshed, regardless of the time they were last + accessed. + @type force: bool + + @return: A list of node ID's that the parent node should search for + in order to refresh the routing Table + @rtype: list + """ + bucketIndex = startIndex + refreshIDs = [] + for bucket in self._buckets[startIndex:]: + if force or (int(time.time()) - bucket.lastAccessed >= constants.refreshTimeout): + searchID = self._randomIDInBucketRange(bucketIndex) + refreshIDs.append(searchID) + bucketIndex += 1 + return refreshIDs + + def removeContact(self, contactID): + """ Remove the contact with the specified node ID from the routing + table + + @param contactID: The node ID of the contact to remove + @type contactID: str + """ + bucketIndex = self._kbucketIndex(contactID) + try: + self._buckets[bucketIndex].removeContact(contactID) + except ValueError: + #print 'removeContact(): Contact not in routing table' + return + + def touchKBucket(self, key): + """ Update the "last accessed" timestamp of the k-bucket which covers + the range containing the specified key in the key/ID space + + @param key: A key in the range of the target k-bucket + @type key: str + """ + bucketIndex = self._kbucketIndex(key) + self._buckets[bucketIndex].lastAccessed = int(time.time()) + + def _kbucketIndex(self, key): + """ Calculate the index of the k-bucket which is responsible for the + specified key (or ID) + + @param key: The key for which to find the appropriate k-bucket index + @type key: str + + @return: The index of the k-bucket responsible for the specified key + @rtype: int + """ + valKey = long(key.encode('hex'), 16) + i = 0 + for bucket in self._buckets: + if bucket.keyInRange(valKey): + return i + else: + i += 1 + return i + + def _randomIDInBucketRange(self, bucketIndex): + """ Returns a random ID in the specified k-bucket's range + + @param bucketIndex: The index of the k-bucket to use + @type bucketIndex: int + """ + idValue = random.randrange(self._buckets[bucketIndex].rangeMin, self._buckets[bucketIndex].rangeMax) + randomID = hex(idValue)[2:] + if randomID[-1] == 'L': + randomID = randomID[:-1] + if len(randomID) % 2 != 0: + randomID = '0' + randomID + randomID = randomID.decode('hex') + randomID = (constants.key_bits/8 - len(randomID))*'\x00' + randomID + return randomID + + def _splitBucket(self, oldBucketIndex): + """ Splits the specified k-bucket into two new buckets which together + cover the same range in the key/ID space + + @param oldBucketIndex: The index of k-bucket to split (in this table's + list of k-buckets) + @type oldBucketIndex: int + """ + # Resize the range of the current (old) k-bucket + oldBucket = self._buckets[oldBucketIndex] + splitPoint = oldBucket.rangeMax - (oldBucket.rangeMax - oldBucket.rangeMin)/2 + # Create a new k-bucket to cover the range split off from the old bucket + newBucket = kbucket.KBucket(splitPoint, oldBucket.rangeMax) + oldBucket.rangeMax = splitPoint + # Now, add the new bucket into the routing table tree + self._buckets.insert(oldBucketIndex + 1, newBucket) + # Finally, copy all nodes that belong to the new k-bucket into it... + for contact in oldBucket._contacts: + if newBucket.keyInRange(contact.id): + newBucket.addContact(contact) + # ...and remove them from the old bucket + for contact in newBucket._contacts: + oldBucket.removeContact(contact) + +class OptimizedTreeRoutingTable(TreeRoutingTable): + """ A version of the "tree"-type routing table specified by Kademlia, + along with contact accounting optimizations specified in section 4.1 of + of the 13-page version of the Kademlia paper. + """ + def __init__(self, parentNodeID): + TreeRoutingTable.__init__(self, parentNodeID) + # Cache containing nodes eligible to replace stale k-bucket entries + self._replacementCache = {} + + def addContact(self, contact): + """ Add the given contact to the correct k-bucket; if it already + exists, its status will be updated + + @param contact: The contact to add to this node's k-buckets + @type contact: kademlia.contact.Contact + """ + if contact.id == self._parentNodeID: + return + + # Initialize/reset the "successively failed RPC" counter + contact.failedRPCs = 0 + + bucketIndex = self._kbucketIndex(contact.id) + try: + self._buckets[bucketIndex].addContact(contact) + except kbucket.BucketFull: + # The bucket is full; see if it can be split (by checking if its range includes the host node's id) + if self._buckets[bucketIndex].keyInRange(self._parentNodeID): + self._splitBucket(bucketIndex) + # Retry the insertion attempt + self.addContact(contact) + else: + # We can't split the k-bucket + # NOTE: This implementation follows section 4.1 of the 13 page version + # of the Kademlia paper (optimized contact accounting without PINGs + #- results in much less network traffic, at the expense of some memory) + + # Put the new contact in our replacement cache for the corresponding k-bucket (or update it's position if it exists already) + if not self._replacementCache.has_key(bucketIndex): + self._replacementCache[bucketIndex] = [] + if contact in self._replacementCache[bucketIndex]: + self._replacementCache[bucketIndex].remove(contact) + #TODO: Using k to limit the size of the contact replacement cache - maybe define a seperate value for this in constants.py? + elif len(self._replacementCache) >= constants.k: + self._replacementCache.pop(0) + self._replacementCache[bucketIndex].append(contact) + + def removeContact(self, contactID): + """ Remove the contact with the specified node ID from the routing + table + + @param contactID: The node ID of the contact to remove + @type contactID: str + """ + bucketIndex = self._kbucketIndex(contactID) + try: + contact = self._buckets[bucketIndex].getContact(contactID) + except ValueError: + #print 'removeContact(): Contact not in routing table' + return + contact.failedRPCs += 1 + if contact.failedRPCs >= 5: + self._buckets[bucketIndex].removeContact(contactID) + # Replace this stale contact with one from our replacemnent cache, if we have any + if self._replacementCache.has_key(bucketIndex): + if len(self._replacementCache[bucketIndex]) > 0: + self._buckets[bucketIndex].addContact( self._replacementCache[bucketIndex].pop() ) diff --git a/lbrynet/dht_scripts.py b/lbrynet/dht_scripts.py new file mode 100644 index 000000000..952f54bc8 --- /dev/null +++ b/lbrynet/dht_scripts.py @@ -0,0 +1,100 @@ +from lbrynet.dht.node import Node +import binascii +from twisted.internet import reactor, task +import logging +import sys +from lbrynet.core.utils import generate_id + + +def print_usage(): + print "Usage:\n%s UDP_PORT KNOWN_NODE_IP KNOWN_NODE_PORT HASH" + + +def join_network(udp_port, known_nodes): + lbryid = generate_id() + + logging.info('Creating Node...') + node = Node(udpPort=udp_port, lbryid=lbryid) + + logging.info('Joining network...') + d = node.joinNetwork(known_nodes) + + def log_network_size(): + logging.info("Approximate number of nodes in DHT: %s", str(node.getApproximateTotalDHTNodes())) + logging.info("Approximate number of blobs in DHT: %s", str(node.getApproximateTotalHashes())) + + d.addCallback(lambda _: log_network_size()) + + d.addCallback(lambda _: node) + + return d + + +def get_hosts(node, h): + + def print_hosts(hosts): + print "Hosts returned from the DHT: " + print hosts + + logging.info("Looking up %s", h) + d = node.getPeersForBlob(h) + d.addCallback(print_hosts) + return d + + +def announce_hash(node, h): + d = node.announceHaveBlob(h, 34567) + + def log_results(results): + for success, result in results: + if success: + logging.info("Succeeded: %s", str(result)) + else: + logging.info("Failed: %s", str(result.getErrorMessage())) + + d.addCallback(log_results) + return d + + +def get_args(): + if len(sys.argv) < 5: + print_usage() + sys.exit(1) + udp_port = int(sys.argv[1]) + known_nodes = [(sys.argv[2], int(sys.argv[3]))] + h = binascii.unhexlify(sys.argv[4]) + return udp_port, known_nodes, h + + +def run_dht_script(dht_func): + log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s" + logging.basicConfig(level=logging.DEBUG, format=log_format) + + udp_port, known_nodes, h = get_args() + + d = task.deferLater(reactor, 0, join_network, udp_port, known_nodes) + + def run_dht_func(node): + return dht_func(node, h) + + d.addCallback(run_dht_func) + + def log_err(err): + logging.error("An error occurred: %s", err.getTraceback()) + return err + + def shut_down(): + logging.info("Shutting down") + reactor.stop() + + d.addErrback(log_err) + d.addBoth(lambda _: shut_down()) + reactor.run() + + +def get_hosts_for_hash_in_dht(): + run_dht_script(get_hosts) + + +def announce_hash_to_dht(): + run_dht_script(announce_hash) \ No newline at end of file diff --git a/lbrynet/dhttest.py b/lbrynet/dhttest.py new file mode 100644 index 000000000..fc3e9a8b9 --- /dev/null +++ b/lbrynet/dhttest.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python +# +# This is a basic single-node example of how to use the Entangled DHT. It creates a Node and (optionally) joins an existing DHT. +# It then does a Kademlia store and find, and then it deletes the stored value (non-Kademlia method). +# +# No tuple space functionality is demonstrated by this script. +# +# To test it properly, start a multi-node Kademlia DHT with the "create_network.py" +# script and point this node to that, e.g.: +# $python create_network.py 10 127.0.0.1 +# +# $python basic_example.py 5000 127.0.0.1 4000 +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# + +# Thanks to Paul Cannon for IP-address resolution functions (taken from aspn.activestate.com) + + + +import os, sys, time, signal, hashlib, random +import twisted.internet.reactor +from lbrynet.dht.node import Node +#from entangled.kademlia.datastore import SQLiteDataStore + +# The Entangled DHT node; instantiated in the main() method +node = None + +# The key to use for this example when storing/retrieving data +hash = hashlib.sha384() +hash.update("key") +KEY = hash.digest() +# The value to store +VALUE = random.randint(10000, 20000) +import binascii +lbryid = KEY + + +def storeValue(key, value): + """ Stores the specified value in the DHT using the specified key """ + global node + print '\nStoring value; Key: %s, Value: %s' % (key, value) + # Store the value in the DHT. This method returns a Twisted Deferred result, which we then add callbacks to + deferredResult = node.announceHaveHash(key, value) + # Add our callback; this method is called when the operation completes... + deferredResult.addCallback(storeValueCallback) + # ...and for error handling, add an "error callback" as well. + # For this example script, I use a generic error handler; usually you would need something more specific + deferredResult.addErrback(genericErrorCallback) + + +def storeValueCallback(*args, **kwargs): + """ Callback function that is invoked when the storeValue() operation succeeds """ + print 'Value has been stored in the DHT' + # Now that the value has been stored, schedule that the value is read again after 2.5 seconds + print 'Scheduling retrieval in 2.5 seconds...' + twisted.internet.reactor.callLater(2.5, getValue) + + +def genericErrorCallback(failure): + """ Callback function that is invoked if an error occurs during any of the DHT operations """ + print 'An error has occurred:', failure.getErrorMessage() + twisted.internet.reactor.callLater(0, stop) + +def getValue(): + """ Retrieves the value of the specified key (KEY) from the DHT """ + global node, KEY + # Get the value for the specified key (immediately returns a Twisted deferred result) + print '\nRetrieving value from DHT for key "%s"...' % binascii.unhexlify("f7d9dc4de674eaa2c5a022eb95bc0d33ec2e75c6") + deferredResult = node.iterativeFindValue(binascii.unhexlify("f7d9dc4de674eaa2c5a022eb95bc0d33ec2e75c6")) + #deferredResult = node.iterativeFindValue(KEY) + # Add a callback to this result; this will be called as soon as the operation has completed + deferredResult.addCallback(getValueCallback) + # As before, add the generic error callback + deferredResult.addErrback(genericErrorCallback) + + +def getValueCallback(result): + """ Callback function that is invoked when the getValue() operation succeeds """ + # Check if the key was found (result is a dict of format {key: value}) or not (in which case a list of "closest" Kademlia contacts would be returned instead") + print "Got the value" + print result + #if type(result) == dict: + # for v in result[binascii.unhexlify("5292fa9c426621f02419f5050900392bdff5036c")]: + # print "v:", v + # print "v[6:", v[6:] + # print "lbryid:",lbryid + # print "lbryid == v[6:]:", lbryid == v[6:] + # print 'Value successfully retrieved: %s' % result[KEY] + + #else: + # print 'Value not found' + # Either way, schedule a "delete" operation for the key + #print 'Scheduling removal in 2.5 seconds...' + #twisted.internet.reactor.callLater(2.5, deleteValue) + print 'Scheduling shutdown in 2.5 seconds...' + twisted.internet.reactor.callLater(2.5, stop) + + +def stop(): + """ Stops the Twisted reactor, and thus the script """ + print '\nStopping Kademlia node and terminating script...' + twisted.internet.reactor.stop() + +if __name__ == '__main__': + + import sys, os + if len(sys.argv) < 2: + print 'Usage:\n%s UDP_PORT [KNOWN_NODE_IP KNOWN_NODE_PORT]' % sys.argv[0] + print 'or:\n%s UDP_PORT [FILE_WITH_KNOWN_NODES]' % sys.argv[0] + print '\nIf a file is specified, it should containg one IP address and UDP port\nper line, seperated by a space.' + sys.exit(1) + try: + int(sys.argv[1]) + except ValueError: + print '\nUDP_PORT must be an integer value.\n' + print 'Usage:\n%s UDP_PORT [KNOWN_NODE_IP KNOWN_NODE_PORT]' % sys.argv[0] + print 'or:\n%s UDP_PORT [FILE_WITH_KNOWN_NODES]' % sys.argv[0] + print '\nIf a file is specified, it should contain one IP address and UDP port\nper line, seperated by a space.' + sys.exit(1) + + if len(sys.argv) == 4: + knownNodes = [(sys.argv[2], int(sys.argv[3]))] + elif len(sys.argv) == 3: + knownNodes = [] + f = open(sys.argv[2], 'r') + lines = f.readlines() + f.close() + for line in lines: + ipAddress, udpPort = line.split() + knownNodes.append((ipAddress, int(udpPort))) + else: + knownNodes = None + print '\nNOTE: You have not specified any remote DHT node(s) to connect to' + print 'It will thus not be aware of any existing DHT, but will still function as a self-contained DHT (until another node contacts it).' + print 'Run this script without any arguments for info.\n' + + # Set up SQLite-based data store (you could use an in-memory store instead, for example) + #if os.path.isfile('/tmp/dbFile%s.db' % sys.argv[1]): + # os.remove('/tmp/dbFile%s.db' % sys.argv[1]) + #dataStore = SQLiteDataStore(dbFile = '/tmp/dbFile%s.db' % sys.argv[1]) + # Create the Entangled node. It extends the functionality of a basic Kademlia node (but is fully backwards-compatible with a Kademlia-only network) + # If you wish to have a pure Kademlia network, use the entangled.kademlia.node.Node class instead + print 'Creating Node...' + #node = EntangledNode( udpPort=int(sys.argv[1]), dataStore=dataStore ) + node = Node( udpPort=int(sys.argv[1]), lbryid=lbryid) + + # Schedule the node to join the Kademlia/Entangled DHT + node.joinNetwork(knownNodes) + # Schedule the "storeValue() call to be invoked after 2.5 seconds, using KEY and VALUE as arguments + #twisted.internet.reactor.callLater(2.5, storeValue, KEY, VALUE) + twisted.internet.reactor.callLater(2.5, getValue) + # Start the Twisted reactor - this fires up all networking, and allows the scheduled join operation to take place + print 'Twisted reactor started (script will commence in 2.5 seconds)' + twisted.internet.reactor.run() + diff --git a/lbrynet/interfaces.py b/lbrynet/interfaces.py new file mode 100644 index 000000000..6eae6af0e --- /dev/null +++ b/lbrynet/interfaces.py @@ -0,0 +1,633 @@ +""" +Interfaces which are implemented by various classes within LBRYnet. +""" +from zope.interface import Interface + + +class IPeerFinder(Interface): + """ + Used to find peers by sha384 hashes which they claim to be associated with. + """ + def find_peers_for_blob(self, blob_hash): + """ + Look for peers claiming to be associated with a sha384 hashsum. + + @param blob_hash: The sha384 hashsum to use to look up peers. + @type blob_hash: string, hex encoded + + @return: a Deferred object which fires with a list of Peer objects + @rtype: Deferred which fires with [Peer] + """ + + +class IRequestSender(Interface): + """ + Used to connect to a peer, send requests to it, and return the responses to those requests. + """ + def add_request(self, request): + """ + Add a request to the next message that will be sent to the peer + + @param request: a request to be sent to the peer in the next message + @type request: ClientRequest + + @return: Deferred object which will callback with the response to this request, a dict + @rtype: Deferred which fires with dict + """ + + def add_blob_request(self, blob_request): + """ + Add a request for a blob to the next message that will be sent to the peer. + + This will cause the protocol to call blob_request.write(data) for all incoming + data, after the response message has been parsed out, until blob_request.finished_deferred fires. + + @param blob_request: the request for the blob + @type blob_request: ClientBlobRequest + + @return: Deferred object which will callback with the response to this request + @rtype: Deferred which fires with dict + """ + + +class IRequestCreator(Interface): + """ + Send requests, via an IRequestSender, to peers. + """ + + def send_next_request(self, peer, protocol): + """ + Create a Request object for the peer and then give the protocol that request. + + @param peer: the Peer object which the request will be sent to. + @type peer: Peer + + @param protocol: the protocol to pass the request to. + @type protocol: object which implements IRequestSender + + @return: Deferred object which will callback with True or False depending on whether a Request was sent + @rtype: Deferred which fires with boolean + """ + + def get_new_peers(self): + """ + Get some new peers which the request creator wants to send requests to. + + @return: Deferred object which will callback with [Peer] + @rtype: Deferred which fires with [Peer] + """ + + +class IMetadataHandler(Interface): + """ + Get metadata for the IDownloadManager. + """ + def get_initial_blobs(self): + """ + Return metadata about blobs that are known to be associated with the stream at the time that the + stream is set up. + + @return: Deferred object which will call back with a list of BlobInfo objects + @rtype: Deferred which fires with [BlobInfo] + """ + + def final_blob_num(self): + """ + If the last blob in the stream is known, return its blob_num. Otherwise, return None. + + @return: integer representing the final blob num in the stream, or None + @rtype: integer or None + """ + + +class IDownloadManager(Interface): + """ + Manage the downloading of an associated group of blobs, referred to as a stream. + + These objects keep track of metadata about the stream, are responsible for starting and stopping + other components, and handle communication between other components. + """ + + def start_downloading(self): + """ + Load the initial metadata about the stream and then start the other components. + + @return: Deferred which fires when the other components have been started. + @rtype: Deferred which fires with boolean + """ + + def resume_downloading(self): + """ + Start the other components after they have been stopped. + + @return: Deferred which fires when the other components have been started. + @rtype: Deferred which fires with boolean + """ + + def pause_downloading(self): + """ + Stop the other components. + + @return: Deferred which fires when the other components have been stopped. + @rtype: Deferred which fires with boolean + """ + + def add_blobs_to_download(self, blobs): + """ + Add blobs to the list of blobs that should be downloaded + + @param blobs: list of BlobInfos that are associated with the stream being downloaded + @type blobs: [BlobInfo] + + @return: DeferredList which fires with the result of adding each previously unknown BlobInfo + to the list of known BlobInfos. + @rtype: DeferredList which fires with [(boolean, Failure/None)] + """ + + def stream_position(self): + """ + Returns the blob_num of the next blob needed in the stream. + + If the stream already has all of the blobs it needs, then this will return the blob_num + of the last blob in the stream plus 1. + + @return: the blob_num of the next blob needed, or the last blob_num + 1. + @rtype: integer + """ + + def needed_blobs(self): + """ + Returns a list of BlobInfos representing all of the blobs that the stream still needs to download. + + @return: the list of BlobInfos representing blobs that the stream still needs to download. + @rtype: [BlobInfo] + """ + + def final_blob_num(self): + """ + If the last blob in the stream is known, return its blob_num. If not, return None. + + @return: The blob_num of the last blob in the stream, or None if it is unknown. + @rtype: integer or None + """ + + def handle_blob(self, blob_num): + """ + This function is called when the next blob in the stream is ready to be handled, whatever that may mean. + + @param blob_num: The blob_num of the blob that is ready to be handled. + @type blob_num: integer + + @return: A Deferred which fires when the blob has been 'handled' + @rtype: Deferred which can fire with anything + """ + + +class IConnectionManager(Interface): + """ + Connects to peers so that IRequestCreators can send their requests. + """ + def get_next_request(self, peer, protocol): + """ + Ask all IRequestCreators belonging to this object to create a Request for peer and give it to protocol + + @param peer: the peer which the request will be sent to. + @type peer: Peer + + @param protocol: the protocol which the request should be sent to by the IRequestCreator. + @type protocol: IRequestSender + + @return: Deferred object which will callback with True or False depending on whether the IRequestSender + should send the request or hang up + @rtype: Deferred which fires with boolean + """ + + def protocol_disconnected(self, peer, protocol): + """ + Inform the IConnectionManager that the protocol has been disconnected + + @param peer: The peer which the connection was to. + @type peer: Peer + + @param protocol: The protocol which was disconnected. + @type protocol: Protocol + + @return: None + """ + + +class IProgressManager(Interface): + """ + Responsible for keeping track of the progress of the download. + + Specifically, it is their responsibility to decide which blobs need to be downloaded and keep track of + the progress of the download + """ + def stream_position(self): + """ + Returns the blob_num of the next blob needed in the stream. + + If the stream already has all of the blobs it needs, then this will return the blob_num + of the last blob in the stream plus 1. + + @return: the blob_num of the next blob needed, or the last blob_num + 1. + @rtype: integer + """ + + def needed_blobs(self): + """ + Returns a list of BlobInfos representing all of the blobs that the stream still needs to download. + + @return: the list of BlobInfos representing blobs that the stream still needs to download. + @rtype: [BlobInfo] + """ + + def blob_downloaded(self, blob, blob_info): + """ + Mark that a blob has been downloaded and does not need to be downloaded again + + @param blob: the blob that has been downloaded. + @type blob: Blob + + @param blob_info: the metadata of the blob that has been downloaded. + @type blob_info: BlobInfo + + @return: None + """ + + +class IBlobHandler(Interface): + """ + Responsible for doing whatever should be done with blobs that have been downloaded. + """ + def blob_downloaded(self, blob, blob_info): + """ + Do whatever the downloader is supposed to do when a blob has been downloaded + + @param blob: The downloaded blob + @type blob: Blob + + @param blob_info: The metadata of the downloaded blob + @type blob_info: BlobInfo + + @return: A Deferred which fires when the blob has been handled. + @rtype: Deferred which can fire with anything + """ + + +class IRateLimited(Interface): + """ + Have the ability to be throttled (temporarily stopped). + """ + def throttle_upload(self): + """ + Stop uploading data until unthrottle_upload is called. + + @return: None + """ + + def throttle_download(self): + """ + Stop downloading data until unthrottle_upload is called. + + @return: None + """ + + def unthrottle_upload(self): + """ + Resume uploading data at will until throttle_upload is called. + + @return: None + """ + + def unthrottle_downlad(self): + """ + Resume downloading data at will until throttle_download is called. + + @return: None + """ + + +class IRateLimiter(Interface): + """ + Can keep track of download and upload rates and can throttle objects which implement the + IRateLimited interface. + """ + def report_dl_bytes(self, num_bytes): + """ + Inform the IRateLimiter that num_bytes have been downloaded. + + @param num_bytes: the number of bytes that have been downloaded + @type num_bytes: integer + + @return: None + """ + + def report_ul_bytes(self, num_bytes): + """ + Inform the IRateLimiter that num_bytes have been uploaded. + + @param num_bytes: the number of bytes that have been uploaded + @type num_bytes: integer + + @return: None + """ + + def register_protocol(self, protocol): + """ + Register an IRateLimited object with the IRateLimiter so that the IRateLimiter can throttle it + + @param protocol: An object implementing the interface IRateLimited + @type protocol: Object implementing IRateLimited + + @return: None + """ + + def unregister_protocol(self, protocol): + """ + Unregister an IRateLimited object so that it won't be throttled any more. + + @param protocol: An object implementing the interface IRateLimited, which was previously registered with this + IRateLimiter via "register_protocol" + @type protocol: Object implementing IRateLimited + + @return: None + """ + + +class IRequestHandler(Interface): + """ + Pass client queries on to IQueryHandlers + """ + def register_query_handler(self, query_handler, query_identifiers): + """ + Register a query handler, which will be passed any queries that + match any of the identifiers in query_identifiers + + @param query_handler: the object which will handle queries matching the given query_identifiers + @type query_handler: Object implementing IQueryHandler + + @param query_identifiers: A list of strings representing the query identifiers + for queries that should be passed to this handler + @type query_identifiers: [string] + + @return: None + """ + + def register_blob_sender(self, blob_sender): + """ + Register a blob sender which will be called after the response has + finished to see if it wants to send a blob + + @param blob_sender: the object which will upload the blob to the client. + @type blob_sender: IBlobSender + + @return: None + """ + + +class IBlobSender(Interface): + """ + Upload blobs to clients. + """ + def send_blob_if_requested(self, consumer): + """ + If a blob has been requested, write it to 'write' func of the consumer and then + callback the returned deferred when it has all been written + + @param consumer: the object implementing IConsumer which the file will be written to + @type consumer: object which implements IConsumer + + @return: Deferred which will fire when the blob sender is done, which will be + immediately if no blob should be sent. + @rtype: Deferred which fires with anything + """ + + +class IQueryHandler(Interface): + """ + Respond to requests from clients. + """ + def register_with_request_handler(self, request_handler, peer): + """ + Register with the request handler to receive queries + + @param request_handler: the object implementing IRequestHandler to register with + @type request_handler: object implementing IRequestHandler + + @param peer: the Peer which this query handler will be answering requests from + @type peer: Peer + + @return: None + """ + + def handle_queries(self, queries): + """ + Return responses to queries from the client. + + @param queries: a dict representing the query_identifiers:queries that should be handled + @type queries: {string: dict} + + @return: a Deferred object which will callback with a dict of query responses + @rtype: Deferred which fires with {string: dict} + """ + + +class IQueryHandlerFactory(Interface): + """ + Construct IQueryHandlers to handle queries from each new client that connects. + """ + def build_query_handler(self): + """ + Create an object that implements the IQueryHandler interface + + @return: object that implements IQueryHandler + """ + + +class IStreamDownloaderFactory(Interface): + """ + Construct IStreamDownloaders and provide options that will be passed to those IStreamDownloaders. + """ + def get_downloader_options(self, sd_validator, payment_rate_manager): + """ + Return the list of options that can be used to modify IStreamDownloader behavior + + @param sd_validator: object containing stream metadata, which the options may depend on + @type sd_validator: object which implements IStreamDescriptorValidator interface + + @param payment_rate_manager: The payment rate manager currently in effect for the downloader + @type payment_rate_manager: PaymentRateManager + + @return: [(option_description, default)] + @rtype: [(string, string)] + """ + + def make_downloader(self, sd_validator, options, payment_rate_manager): + """ + Create an object that implements the IStreamDownloader interface + + @param sd_validator: object containing stream metadata which will be given to the IStreamDownloader + @type sd_validator: object which implements IStreamDescriptorValidator interface + + @param options: a list of strings that will be used by the IStreamDownloaderFactory to + construct the IStreamDownloader. the options are in the same order as they were given + by get_downloader_options. + @type options: [string] + + @param payment_rate_manager: the PaymentRateManager which the IStreamDownloader should use. + @type payment_rate_manager: PaymentRateManager + + @return: a Deferred which fires with the downloader object + @rtype: Deferred which fires with IStreamDownloader + """ + + def get_description(self): + """ + Return a string detailing what this downloader does with streams + + @return: short description of what the IStreamDownloader does. + @rtype: string + """ + + +class IStreamDownloader(Interface): + """ + Use metadata and data from the network for some useful purpose. + """ + def start(self): + """ + start downloading the stream + + @return: a Deferred which fires when the stream is finished downloading, or errbacks when the stream is + cancelled. + @rtype: Deferred which fires with anything + """ + + def insufficient_funds(self): + """ + this function informs the stream downloader that funds are too low to finish downloading. + + @return: None + """ + + +class IStreamDescriptorValidator(Interface): + """ + Pull metadata out of Stream Descriptor Files and perform some + validation on the metadata. + """ + def validate(self): + """ + @return: whether the stream descriptor passes validation checks + @rtype: boolean + """ + + def info_to_show(self): + """ + @return: A list of tuples representing metadata that should be presented to the user before starting the + download + @rtype: [(string, string)] + """ + + +class ILBRYWallet(Interface): + """ + Send and receive payments. + + To send a payment, a payment reservation must be obtained first. This guarantees that a payment + isn't promised if it can't be paid. When the service in question is rendered, the payment + reservation must be given to the ILBRYWallet along with the final price. The reservation can also + be canceled. + """ + def stop(self): + """ + Send out any unsent payments, close any connections, and stop checking for incoming payments. + + @return: None + """ + + def start(self): + """ + Set up any connections and start checking for incoming payments + + @return: None + """ + def get_info_exchanger(self): + """ + Get the object that will be used to find the payment addresses of peers. + + @return: The object that will be used to find the payment addresses of peers. + @rtype: An object implementing IRequestCreator + """ + + def get_wallet_info_query_handler_factory(self): + """ + Get the object that will be used to give our payment address to peers. + + This must return an object implementing IQueryHandlerFactory. It will be used to + create IQueryHandler objects that will be registered with an IRequestHandler. + + @return: The object that will be used to give our payment address to peers. + @rtype: An object implementing IQueryHandlerFactory + """ + + def reserve_points(self, peer, amount): + """ + Ensure a certain amount of points are available to be sent as payment, before the service is rendered + + @param peer: The peer to which the payment will ultimately be sent + @type peer: Peer + + @param amount: The amount of points to reserve + @type amount: float + + @return: A ReservedPoints object which is given to send_points once the service has been rendered + @rtype: ReservedPoints + """ + + def cancel_point_reservation(self, reserved_points): + """ + Return all of the points that were reserved previously for some ReservedPoints object + + @param reserved_points: ReservedPoints previously returned by reserve_points + @type reserved_points: ReservedPoints + + @return: None + """ + + def send_points(self, reserved_points, amount): + """ + Schedule a payment to be sent to a peer + + @param reserved_points: ReservedPoints object previously returned by reserve_points. + @type reserved_points: ReservedPoints + + @param amount: amount of points to actually send, must be less than or equal to the + amount reserved in reserved_points + @type amount: float + + @return: Deferred which fires when the payment has been scheduled + @rtype: Deferred which fires with anything + """ + + def get_balance(self): + """ + Return the balance of this wallet + + @return: Deferred which fires with the balance of the wallet + @rtype: Deferred which fires with float + """ + + def add_expected_payment(self, peer, amount): + """ + Increase the number of points expected to be paid by a peer + + @param peer: the peer which is expected to pay the points + @type peer: Peer + + @param amount: the amount of points expected to be paid + @type amount: float + + @return: None + """ \ No newline at end of file diff --git a/lbrynet/lbryfile/LBRYFileMetadataManager.py b/lbrynet/lbryfile/LBRYFileMetadataManager.py new file mode 100644 index 000000000..270d1f0e3 --- /dev/null +++ b/lbrynet/lbryfile/LBRYFileMetadataManager.py @@ -0,0 +1,268 @@ +import logging +import leveldb +import json +import os +from twisted.internet import threads, defer +from lbrynet.core.Error import DuplicateStreamHashError + + +class DBLBRYFileMetadataManager(object): + """Store and provide access to LBRY file metadata using leveldb files""" + + def __init__(self, db_dir): + self.db_dir = db_dir + self.stream_info_db = None + self.stream_blob_db = None + self.stream_desc_db = None + + def setup(self): + return threads.deferToThread(self._open_db) + + def stop(self): + self.stream_info_db = None + self.stream_blob_db = None + self.stream_desc_db = None + return defer.succeed(True) + + def get_all_streams(self): + return threads.deferToThread(self._get_all_streams) + + def save_stream(self, stream_hash, file_name, key, suggested_file_name, blobs): + d = threads.deferToThread(self._store_stream, stream_hash, file_name, key, suggested_file_name) + d.addCallback(lambda _: self.add_blobs_to_stream(stream_hash, blobs)) + return d + + def get_stream_info(self, stream_hash): + return threads.deferToThread(self._get_stream_info, stream_hash) + + def check_if_stream_exists(self, stream_hash): + return threads.deferToThread(self._check_if_stream_exists, stream_hash) + + def delete_stream(self, stream_hash): + return threads.deferToThread(self._delete_stream, stream_hash) + + def add_blobs_to_stream(self, stream_hash, blobs): + + def add_blobs(): + self._add_blobs_to_stream(stream_hash, blobs, ignore_duplicate_error=True) + + return threads.deferToThread(add_blobs) + + def get_blobs_for_stream(self, stream_hash, start_blob=None, end_blob=None, count=None, reverse=False): + logging.info("Getting blobs for a stream. Count is %s", str(count)) + + def get_positions_of_start_and_end(): + if start_blob is not None: + start_num = self._get_blob_num_by_hash(stream_hash, start_blob) + else: + start_num = None + if end_blob is not None: + end_num = self._get_blob_num_by_hash(stream_hash, end_blob) + else: + end_num = None + return start_num, end_num + + def get_blob_infos(nums): + start_num, end_num = nums + return threads.deferToThread(self._get_further_blob_infos, stream_hash, start_num, end_num, + count, reverse) + + d = threads.deferToThread(get_positions_of_start_and_end) + d.addCallback(get_blob_infos) + return d + + def get_stream_of_blob(self, blob_hash): + return threads.deferToThread(self._get_stream_of_blobhash, blob_hash) + + def save_sd_blob_hash_to_stream(self, stream_hash, sd_blob_hash): + return threads.deferToThread(self._save_sd_blob_hash_to_stream, stream_hash, sd_blob_hash) + + def get_sd_blob_hashes_for_stream(self, stream_hash): + return threads.deferToThread(self._get_sd_blob_hashes_for_stream, stream_hash) + + def _open_db(self): + self.stream_info_db = leveldb.LevelDB(os.path.join(self.db_dir, "lbryfile_info.db")) + self.stream_blob_db = leveldb.LevelDB(os.path.join(self.db_dir, "lbryfile_blob.db")) + self.stream_desc_db = leveldb.LevelDB(os.path.join(self.db_dir, "lbryfile_desc.db")) + + def _delete_stream(self, stream_hash): + desc_batch = leveldb.WriteBatch() + for sd_blob_hash, s_h in self.stream_desc_db.RangeIter(): + if stream_hash == s_h: + desc_batch.Delete(sd_blob_hash) + self.stream_desc_db.Write(desc_batch, sync=True) + + blob_batch = leveldb.WriteBatch() + for blob_hash_stream_hash, blob_info in self.stream_blob_db.RangeIter(): + b_h, s_h = json.loads(blob_hash_stream_hash) + if stream_hash == s_h: + blob_batch.Delete(blob_hash_stream_hash) + self.stream_blob_db.Write(blob_batch, sync=True) + + stream_batch = leveldb.WriteBatch() + for s_h, stream_info in self.stream_info_db.RangeIter(): + if stream_hash == s_h: + stream_batch.Delete(s_h) + self.stream_info_db.Write(stream_batch, sync=True) + + def _store_stream(self, stream_hash, name, key, suggested_file_name): + try: + self.stream_info_db.Get(stream_hash) + raise DuplicateStreamHashError("Stream hash %s already exists" % stream_hash) + except KeyError: + pass + self.stream_info_db.Put(stream_hash, json.dumps((key, name, suggested_file_name)), sync=True) + + def _get_all_streams(self): + return [stream_hash for stream_hash, stream_info in self.stream_info_db.RangeIter()] + + def _get_stream_info(self, stream_hash): + return json.loads(self.stream_info_db.Get(stream_hash))[:3] + + def _check_if_stream_exists(self, stream_hash): + try: + self.stream_info_db.Get(stream_hash) + return True + except KeyError: + return False + + def _get_blob_num_by_hash(self, stream_hash, blob_hash): + blob_hash_stream_hash = json.dumps((blob_hash, stream_hash)) + return json.loads(self.stream_blob_db.Get(blob_hash_stream_hash))[0] + + def _get_further_blob_infos(self, stream_hash, start_num, end_num, count=None, reverse=False): + blob_infos = [] + for blob_hash_stream_hash, blob_info in self.stream_blob_db.RangeIter(): + b_h, s_h = json.loads(blob_hash_stream_hash) + if stream_hash == s_h: + position, iv, length = json.loads(blob_info) + if (start_num is None) or (position > start_num): + if (end_num is None) or (position < end_num): + blob_infos.append((b_h, position, iv, length)) + blob_infos.sort(key=lambda i: i[1], reverse=reverse) + if count is not None: + blob_infos = blob_infos[:count] + return blob_infos + + def _add_blobs_to_stream(self, stream_hash, blob_infos, ignore_duplicate_error=False): + batch = leveldb.WriteBatch() + for blob_info in blob_infos: + blob_hash_stream_hash = json.dumps((blob_info.blob_hash, stream_hash)) + try: + self.stream_blob_db.Get(blob_hash_stream_hash) + if ignore_duplicate_error is False: + raise KeyError() # TODO: change this to DuplicateStreamBlobError? + continue + except KeyError: + pass + batch.Put(blob_hash_stream_hash, + json.dumps((blob_info.blob_num, + blob_info.iv, + blob_info.length))) + self.stream_blob_db.Write(batch, sync=True) + + def _get_stream_of_blobhash(self, blob_hash): + for blob_hash_stream_hash, blob_info in self.stream_blob_db.RangeIter(): + b_h, s_h = json.loads(blob_hash_stream_hash) + if blob_hash == b_h: + return s_h + return None + + def _save_sd_blob_hash_to_stream(self, stream_hash, sd_blob_hash): + self.stream_desc_db.Put(sd_blob_hash, stream_hash) + + def _get_sd_blob_hashes_for_stream(self, stream_hash): + return [sd_blob_hash for sd_blob_hash, s_h in self.stream_desc_db.RangeIter() if stream_hash == s_h] + + +class TempLBRYFileMetadataManager(object): + def __init__(self): + self.streams = {} + self.stream_blobs = {} + self.sd_files = {} + + def setup(self): + return defer.succeed(True) + + def stop(self): + return defer.succeed(True) + + def get_all_streams(self): + return defer.succeed(self.streams.keys()) + + def save_stream(self, stream_hash, file_name, key, suggested_file_name, blobs): + self.streams[stream_hash] = {'suggested_file_name': suggested_file_name, + 'stream_name': file_name, + 'key': key} + d = self.add_blobs_to_stream(stream_hash, blobs) + d.addCallback(lambda _: stream_hash) + return d + + def get_stream_info(self, stream_hash): + if stream_hash in self.streams: + stream_info = self.streams[stream_hash] + return defer.succeed([stream_info['key'], stream_info['stream_name'], + stream_info['suggested_file_name']]) + return defer.succeed(None) + + def delete_stream(self, stream_hash): + if stream_hash in self.streams: + del self.streams[stream_hash] + for (s_h, b_h) in self.stream_blobs.keys(): + if s_h == stream_hash: + del self.stream_blobs[(s_h, b_h)] + return defer.succeed(True) + + def add_blobs_to_stream(self, stream_hash, blobs): + assert stream_hash in self.streams, "Can't add blobs to a stream that isn't known" + for blob in blobs: + info = {} + info['blob_num'] = blob.blob_num + info['length'] = blob.length + info['iv'] = blob.iv + self.stream_blobs[(stream_hash, blob.blob_hash)] = info + return defer.succeed(True) + + def get_blobs_for_stream(self, stream_hash, start_blob=None, end_blob=None, count=None, reverse=False): + + if start_blob is not None: + start_num = self._get_blob_num_by_hash(stream_hash, start_blob) + else: + start_num = None + if end_blob is not None: + end_num = self._get_blob_num_by_hash(stream_hash, end_blob) + else: + end_num = None + return self._get_further_blob_infos(stream_hash, start_num, end_num, count, reverse) + + def get_stream_of_blob(self, blob_hash): + for (s_h, b_h) in self.stream_blobs.iterkeys(): + if b_h == blob_hash: + return defer.succeed(s_h) + return defer.succeed(None) + + def _get_further_blob_infos(self, stream_hash, start_num, end_num, count=None, reverse=False): + blob_infos = [] + for (s_h, b_h), info in self.stream_blobs.iteritems(): + if stream_hash == s_h: + position = info['blob_num'] + length = info['length'] + iv = info['iv'] + if (start_num is None) or (position > start_num): + if (end_num is None) or (position < end_num): + blob_infos.append((b_h, position, iv, length)) + blob_infos.sort(key=lambda i: i[1], reverse=reverse) + if count is not None: + blob_infos = blob_infos[:count] + return defer.succeed(blob_infos) + + def _get_blob_num_by_hash(self, stream_hash, blob_hash): + if (stream_hash, blob_hash) in self.stream_blobs: + return defer.succeed(self.stream_blobs[(stream_hash, blob_hash)]['blob_num']) + + def save_sd_blob_hash_to_stream(self, stream_hash, sd_blob_hash): + self.sd_files[sd_blob_hash] = stream_hash + return defer.succeed(True) + + def get_sd_blob_hashes_for_stream(self, stream_hash): + return defer.succeed([sd_hash for sd_hash, s_h in self.sd_files.iteritems() if stream_hash == s_h]) \ No newline at end of file diff --git a/lbrynet/lbryfile/StreamDescriptor.py b/lbrynet/lbryfile/StreamDescriptor.py new file mode 100644 index 000000000..c1244c086 --- /dev/null +++ b/lbrynet/lbryfile/StreamDescriptor.py @@ -0,0 +1,138 @@ +import binascii +import logging +from lbrynet.core.cryptoutils import get_lbry_hash_obj +from lbrynet.cryptstream.CryptBlob import CryptBlobInfo +from twisted.internet import defer +from lbrynet.core.Error import DuplicateStreamHashError + + +LBRYFileStreamType = "lbryfile" + + +def save_sd_info(stream_info_manager, sd_info, ignore_duplicate=False): + logging.debug("Saving info for %s", str(sd_info['stream_name'])) + hex_stream_name = sd_info['stream_name'] + key = sd_info['key'] + stream_hash = sd_info['stream_hash'] + raw_blobs = sd_info['blobs'] + suggested_file_name = sd_info['suggested_file_name'] + crypt_blobs = [] + for blob in raw_blobs: + length = blob['length'] + if length != 0: + blob_hash = blob['blob_hash'] + else: + blob_hash = None + blob_num = blob['blob_num'] + iv = blob['iv'] + crypt_blobs.append(CryptBlobInfo(blob_hash, blob_num, length, iv)) + logging.debug("Trying to save stream info for %s", str(hex_stream_name)) + d = stream_info_manager.save_stream(stream_hash, hex_stream_name, key, + suggested_file_name, crypt_blobs) + + def check_if_duplicate(err): + if ignore_duplicate is True: + err.trap(DuplicateStreamHashError) + + d.addErrback(check_if_duplicate) + + d.addCallback(lambda _: stream_hash) + return d + + +def get_sd_info(stream_info_manager, stream_hash, include_blobs): + d = stream_info_manager.get_stream_info(stream_hash) + + def format_info(stream_info): + fields = {} + fields['stream_type'] = LBRYFileStreamType + fields['stream_name'] = stream_info[1] + fields['key'] = stream_info[0] + fields['suggested_file_name'] = stream_info[2] + fields['stream_hash'] = stream_hash + + def format_blobs(blobs): + formatted_blobs = [] + for blob_hash, blob_num, iv, length in blobs: + blob = {} + if length != 0: + blob['blob_hash'] = blob_hash + blob['blob_num'] = blob_num + blob['iv'] = iv + blob['length'] = length + formatted_blobs.append(blob) + fields['blobs'] = formatted_blobs + return fields + + if include_blobs is True: + d = stream_info_manager.get_blobs_for_stream(stream_hash) + else: + d = defer.succeed([]) + d.addCallback(format_blobs) + return d + + d.addCallback(format_info) + return d + + +class LBRYFileStreamDescriptorValidator(object): + def __init__(self, raw_info): + self.raw_info = raw_info + + def validate(self): + logging.debug("Trying to validate stream descriptor for %s", str(self.raw_info['stream_name'])) + try: + hex_stream_name = self.raw_info['stream_name'] + key = self.raw_info['key'] + hex_suggested_file_name = self.raw_info['suggested_file_name'] + stream_hash = self.raw_info['stream_hash'] + blobs = self.raw_info['blobs'] + except KeyError as e: + raise ValueError("Invalid stream descriptor. Missing '%s'" % (e.args[0])) + for c in hex_suggested_file_name: + if c not in '0123456789abcdef': + raise ValueError("Invalid stream descriptor: " + "suggested file name is not a hex-encoded string") + h = get_lbry_hash_obj() + h.update(hex_stream_name) + h.update(key) + h.update(hex_suggested_file_name) + + def get_blob_hashsum(b): + length = b['length'] + if length != 0: + blob_hash = b['blob_hash'] + else: + blob_hash = None + blob_num = b['blob_num'] + iv = b['iv'] + blob_hashsum = get_lbry_hash_obj() + if length != 0: + blob_hashsum.update(blob_hash) + blob_hashsum.update(str(blob_num)) + blob_hashsum.update(iv) + blob_hashsum.update(str(length)) + return blob_hashsum.digest() + + blobs_hashsum = get_lbry_hash_obj() + for blob in blobs: + blobs_hashsum.update(get_blob_hashsum(blob)) + if blobs[-1]['length'] != 0: + raise ValueError("Improperly formed stream descriptor. Must end with a zero-length blob.") + h.update(blobs_hashsum.digest()) + if h.hexdigest() != stream_hash: + raise ValueError("Stream hash does not match stream metadata") + return defer.succeed(True) + + def info_to_show(self): + info = [] + info.append(("stream_name", binascii.unhexlify(self.raw_info.get("stream_name")))) + size_so_far = 0 + for blob_info in self.raw_info.get("blobs", []): + size_so_far += int(blob_info['length']) + info.append(("stream_size", str(size_so_far))) + suggested_file_name = self.raw_info.get("suggested_file_name", None) + if suggested_file_name is not None: + suggested_file_name = binascii.unhexlify(suggested_file_name) + info.append(("suggested_file_name", suggested_file_name)) + return info \ No newline at end of file diff --git a/lbrynet/lbryfile/__init__.py b/lbrynet/lbryfile/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/lbryfile/client/LBRYFileDownloader.py b/lbrynet/lbryfile/client/LBRYFileDownloader.py new file mode 100644 index 000000000..9613ca76f --- /dev/null +++ b/lbrynet/lbryfile/client/LBRYFileDownloader.py @@ -0,0 +1,284 @@ +import subprocess +import binascii + +from zope.interface import implements + +from lbrynet.core.DownloadOption import DownloadOption +from lbrynet.lbryfile.StreamDescriptor import save_sd_info +from lbrynet.cryptstream.client.CryptStreamDownloader import CryptStreamDownloader +from lbrynet.core.client.StreamProgressManager import FullStreamProgressManager +from lbrynet.interfaces import IStreamDownloaderFactory +from lbrynet.lbryfile.client.LBRYFileMetadataHandler import LBRYFileMetadataHandler +import os +from twisted.internet import defer, threads, reactor + + +class LBRYFileDownloader(CryptStreamDownloader): + """Classes which inherit from this class download LBRY files""" + + def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, + stream_info_manager, payment_rate_manager, wallet, upload_allowed): + CryptStreamDownloader.__init__(self, peer_finder, rate_limiter, blob_manager, + payment_rate_manager, wallet, upload_allowed) + self.stream_hash = stream_hash + self.stream_info_manager = stream_info_manager + self.suggested_file_name = None + self._calculated_total_bytes = None + + def set_stream_info(self): + if self.key is None: + d = self.stream_info_manager.get_stream_info(self.stream_hash) + + def set_stream_info(stream_info): + key, stream_name, suggested_file_name = stream_info + self.key = binascii.unhexlify(key) + self.stream_name = binascii.unhexlify(stream_name) + self.suggested_file_name = binascii.unhexlify(suggested_file_name) + + d.addCallback(set_stream_info) + return d + else: + return defer.succeed(True) + + def stop(self): + d = self._close_output() + d.addCallback(lambda _: CryptStreamDownloader.stop(self)) + return d + + def _get_progress_manager(self, download_manager): + return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager) + + def _start(self): + d = self._setup_output() + d.addCallback(lambda _: CryptStreamDownloader._start(self)) + return d + + def _setup_output(self): + pass + + def _close_output(self): + pass + + def get_total_bytes(self): + if self._calculated_total_bytes is None or self._calculated_total_bytes == 0: + if self.download_manager is None: + return 0 + else: + self._calculated_total_bytes = self.download_manager.calculate_total_bytes() + return self._calculated_total_bytes + + def get_bytes_left_to_output(self): + if self.download_manager is not None: + return self.download_manager.calculate_bytes_left_to_output() + else: + return 0 + + def get_bytes_left_to_download(self): + if self.download_manager is not None: + return self.download_manager.calculate_bytes_left_to_download() + else: + return 0 + + def _get_metadata_handler(self, download_manager): + return LBRYFileMetadataHandler(self.stream_hash, self.stream_info_manager, download_manager) + + +class LBRYFileDownloaderFactory(object): + implements(IStreamDownloaderFactory) + + def __init__(self, peer_finder, rate_limiter, blob_manager, stream_info_manager, + wallet): + self.peer_finder = peer_finder + self.rate_limiter = rate_limiter + self.blob_manager = blob_manager + self.stream_info_manager = stream_info_manager + self.wallet = wallet + + def get_downloader_options(self, sd_validator, payment_rate_manager): + options = [ + DownloadOption( + [float, None], + "rate which will be paid for data (None means use application default)", + "data payment rate", + None + ), + DownloadOption( + [bool], + "allow reuploading data downloaded for this file", + "allow upload", + True + ), + ] + return options + + def make_downloader(self, sd_validator, options, payment_rate_manager, **kwargs): + if options[0] is not None: + payment_rate_manager.float(options[0]) + upload_allowed = options[1] + + def create_downloader(stream_hash): + downloader = self._make_downloader(stream_hash, payment_rate_manager, sd_validator.raw_info, + upload_allowed) + d = downloader.set_stream_info() + d.addCallback(lambda _: downloader) + return d + + d = save_sd_info(self.stream_info_manager, sd_validator.raw_info) + + d.addCallback(create_downloader) + return d + + def _make_downloader(self, stream_hash, payment_rate_manager, stream_info, upload_allowed): + pass + + +class LBRYFileSaver(LBRYFileDownloader): + def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager, + payment_rate_manager, wallet, download_directory, upload_allowed, file_name=None): + LBRYFileDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, + stream_info_manager, payment_rate_manager, wallet, upload_allowed) + self.download_directory = download_directory + self.file_name = file_name + self.file_handle = None + + def set_stream_info(self): + d = LBRYFileDownloader.set_stream_info(self) + + def set_file_name(): + if self.file_name is None: + if self.suggested_file_name: + self.file_name = os.path.basename(self.suggested_file_name) + else: + self.file_name = os.path.basename(self.stream_name) + + d.addCallback(lambda _: set_file_name()) + return d + + def stop(self): + d = LBRYFileDownloader.stop(self) + d.addCallback(lambda _: self._delete_from_info_manager()) + return d + + def _get_progress_manager(self, download_manager): + return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager, + delete_blob_after_finished=True) + + def _setup_output(self): + def open_file(): + if self.file_handle is None: + file_name = self.file_name + if not file_name: + file_name = "_" + if os.path.exists(os.path.join(self.download_directory, file_name)): + ext_num = 1 + while os.path.exists(os.path.join(self.download_directory, + file_name + "_" + str(ext_num))): + ext_num += 1 + file_name = file_name + "_" + str(ext_num) + self.file_handle = open(os.path.join(self.download_directory, file_name), 'wb') + return threads.deferToThread(open_file) + + def _close_output(self): + self.file_handle, file_handle = None, self.file_handle + + def close_file(): + if file_handle is not None: + name = file_handle.name + file_handle.close() + if self.completed is False: + os.remove(name) + + return threads.deferToThread(close_file) + + def _get_write_func(self): + def write_func(data): + if self.stopped is False and self.file_handle is not None: + self.file_handle.write(data) + return write_func + + def _delete_from_info_manager(self): + return self.stream_info_manager.delete_stream(self.stream_hash) + + +class LBRYFileSaverFactory(LBRYFileDownloaderFactory): + def __init__(self, peer_finder, rate_limiter, blob_manager, stream_info_manager, + wallet, download_directory): + LBRYFileDownloaderFactory.__init__(self, peer_finder, rate_limiter, blob_manager, + stream_info_manager, wallet) + self.download_directory = download_directory + + def _make_downloader(self, stream_hash, payment_rate_manager, stream_info, upload_allowed): + return LBRYFileSaver(stream_hash, self.peer_finder, self.rate_limiter, self.blob_manager, + self.stream_info_manager, payment_rate_manager, self.wallet, + self.download_directory, upload_allowed) + + def get_description(self): + return "Save" + + +class LBRYFileOpener(LBRYFileDownloader): + def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager, + payment_rate_manager, wallet, upload_allowed): + LBRYFileDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, + stream_info_manager, payment_rate_manager, wallet, upload_allowed) + self.process = None + self.process_log = None + + def stop(self): + d = LBRYFileDownloader.stop(self) + d.addCallback(lambda _: self._delete_from_info_manager()) + return d + + def _get_progress_manager(self, download_manager): + return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager, + delete_blob_after_finished=True) + + def _setup_output(self): + def start_process(): + if os.name == "nt": + paths = [r'C:\Program Files\VideoLAN\VLC\vlc.exe', + r'C:\Program Files (x86)\VideoLAN\VLC\vlc.exe'] + for p in paths: + if os.path.exists(p): + vlc_path = p + break + else: + raise ValueError("You must install VLC media player to stream files") + else: + vlc_path = 'vlc' + self.process_log = open("vlc.out", 'a') + try: + self.process = subprocess.Popen([vlc_path, '-'], stdin=subprocess.PIPE, + stdout=self.process_log, stderr=self.process_log) + except OSError: + raise ValueError("VLC media player could not be opened") + + d = threads.deferToThread(start_process) + return d + + def _close_output(self): + if self.process is not None: + self.process.stdin.close() + self.process = None + return defer.succeed(True) + + def _get_write_func(self): + def write_func(data): + if self.stopped is False and self.process is not None: + try: + self.process.stdin.write(data) + except IOError: + reactor.callLater(0, self.stop) + return write_func + + def _delete_from_info_manager(self): + return self.stream_info_manager.delete_stream(self.stream_hash) + + +class LBRYFileOpenerFactory(LBRYFileDownloaderFactory): + def _make_downloader(self, stream_hash, payment_rate_manager, stream_info, upload_allowed): + return LBRYFileOpener(stream_hash, self.peer_finder, self.rate_limiter, self.blob_manager, + self.stream_info_manager, payment_rate_manager, self.wallet, upload_allowed) + + def get_description(self): + return "Stream" \ No newline at end of file diff --git a/lbrynet/lbryfile/client/LBRYFileMetadataHandler.py b/lbrynet/lbryfile/client/LBRYFileMetadataHandler.py new file mode 100644 index 000000000..fe1acf921 --- /dev/null +++ b/lbrynet/lbryfile/client/LBRYFileMetadataHandler.py @@ -0,0 +1,36 @@ +import logging +from zope.interface import implements +from lbrynet.cryptstream.CryptBlob import CryptBlobInfo +from lbrynet.interfaces import IMetadataHandler + + +class LBRYFileMetadataHandler(object): + implements(IMetadataHandler) + + def __init__(self, stream_hash, stream_info_manager, download_manager): + self.stream_hash = stream_hash + self.stream_info_manager = stream_info_manager + self.download_manager = download_manager + self._final_blob_num = None + + ######### IMetadataHandler ######### + + def get_initial_blobs(self): + d = self.stream_info_manager.get_blobs_for_stream(self.stream_hash) + d.addCallback(self._format_initial_blobs_for_download_manager) + return d + + def final_blob_num(self): + return self._final_blob_num + + ######### internal calls ######### + + def _format_initial_blobs_for_download_manager(self, blob_infos): + infos = [] + for blob_hash, blob_num, iv, length in blob_infos: + if blob_hash is not None: + infos.append(CryptBlobInfo(blob_hash, blob_num, length, iv)) + else: + logging.debug("Setting _final_blob_num to %s", str(blob_num - 1)) + self._final_blob_num = blob_num - 1 + return infos \ No newline at end of file diff --git a/lbrynet/lbryfile/client/__init__.py b/lbrynet/lbryfile/client/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/lbryfilemanager/LBRYFileCreator.py b/lbrynet/lbryfilemanager/LBRYFileCreator.py new file mode 100644 index 000000000..444d55cf1 --- /dev/null +++ b/lbrynet/lbryfilemanager/LBRYFileCreator.py @@ -0,0 +1,159 @@ +""" +Utilities for turning plain files into LBRY Files. +""" + +import binascii +import logging +import os +from lbrynet.core.StreamDescriptor import PlainStreamDescriptorWriter +from lbrynet.cryptstream.CryptStreamCreator import CryptStreamCreator +from lbrynet import conf +from lbrynet.lbryfile.StreamDescriptor import get_sd_info +from lbrynet.core.cryptoutils import get_lbry_hash_obj +from twisted.protocols.basic import FileSender +from lbrynet.lbryfilemanager.LBRYFileDownloader import ManagedLBRYFileDownloader + + +class LBRYFileStreamCreator(CryptStreamCreator): + """ + A CryptStreamCreator which adds itself and its additional metadata to an LBRYFileManager + """ + def __init__(self, blob_manager, lbry_file_manager, name=None, + key=None, iv_generator=None, suggested_file_name=None): + CryptStreamCreator.__init__(self, blob_manager, name, key, iv_generator) + self.lbry_file_manager = lbry_file_manager + if suggested_file_name is None: + self.suggested_file_name = name + else: + self.suggested_file_name = suggested_file_name + self.stream_hash = None + self.blob_infos = [] + + def _blob_finished(self, blob_info): + logging.debug("length: %s", str(blob_info.length)) + self.blob_infos.append(blob_info) + + def _save_lbry_file_info(self): + stream_info_manager = self.lbry_file_manager.stream_info_manager + d = stream_info_manager.save_stream(self.stream_hash, binascii.hexlify(self.name), + binascii.hexlify(self.key), + binascii.hexlify(self.suggested_file_name), + self.blob_infos) + return d + + def setup(self): + d = CryptStreamCreator.setup(self) + d.addCallback(lambda _: self.stream_hash) + + return d + + def _get_blobs_hashsum(self): + blobs_hashsum = get_lbry_hash_obj() + for blob_info in sorted(self.blob_infos, key=lambda b_i: b_i.blob_num): + length = blob_info.length + if length != 0: + blob_hash = blob_info.blob_hash + else: + blob_hash = None + blob_num = blob_info.blob_num + iv = blob_info.iv + blob_hashsum = get_lbry_hash_obj() + if length != 0: + blob_hashsum.update(blob_hash) + blob_hashsum.update(str(blob_num)) + blob_hashsum.update(iv) + blob_hashsum.update(str(length)) + blobs_hashsum.update(blob_hashsum.digest()) + return blobs_hashsum.digest() + + def _make_stream_hash(self): + hashsum = get_lbry_hash_obj() + hashsum.update(binascii.hexlify(self.name)) + hashsum.update(binascii.hexlify(self.key)) + hashsum.update(binascii.hexlify(self.suggested_file_name)) + hashsum.update(self._get_blobs_hashsum()) + self.stream_hash = hashsum.hexdigest() + + def _finished(self): + self._make_stream_hash() + d = self._save_lbry_file_info() + d.addCallback(lambda _: self.lbry_file_manager.change_lbry_file_status( + self.stream_hash, ManagedLBRYFileDownloader.STATUS_FINISHED + )) + return d + + +def create_lbry_file(session, lbry_file_manager, file_name, file_handle, key=None, + iv_generator=None, suggested_file_name=None): + """ + Turn a plain file into an LBRY File. + + An LBRY File is a collection of encrypted blobs of data and the metadata that binds them + together which, when decrypted and put back together according to the metadata, results + in the original file. + + The stream parameters that aren't specified are generated, the file is read and broken + into chunks and encrypted, and then a stream descriptor file with the stream parameters + and other metadata is written to disk. + + @param session: An LBRYSession object. + @type session: LBRYSession + + @param lbry_file_manager: The LBRYFileManager object this LBRY File will be added to. + @type lbry_file_manager: LBRYFileManager + + @param file_name: The path to the plain file. + @type file_name: string + + @param file_handle: The file-like object to read + @type file_handle: any file-like object which can be read by twisted.protocols.basic.FileSender + + @param secret_pass_phrase: A string that will be used to generate the public key. If None, a + random string will be used. + @type secret_pass_phrase: string + + @param key: the raw AES key which will be used to encrypt the blobs. If None, a random key will + be generated. + @type key: string + + @param iv_generator: a generator which yields initialization vectors for the blobs. Will be called + once for each blob. + @type iv_generator: a generator function which yields strings + + @param suggested_file_name: what the file should be called when the LBRY File is saved to disk. + @type suggested_file_name: string + + @return: a Deferred which fires with the stream_hash of the LBRY File + @rtype: Deferred which fires with hex-encoded string + """ + + def stop_file(creator): + logging.debug("the file sender has triggered its deferred. stopping the stream writer") + return creator.stop() + + def make_stream_desc_file(stream_hash): + logging.debug("creating the stream descriptor file") + descriptor_writer = PlainStreamDescriptorWriter(file_name + conf.CRYPTSD_FILE_EXTENSION) + + d = get_sd_info(lbry_file_manager.stream_info_manager, stream_hash, True) + + d.addCallback(descriptor_writer.create_descriptor) + + return d + + base_file_name = os.path.basename(file_name) + + lbry_file_creator = LBRYFileStreamCreator(session.blob_manager, lbry_file_manager, base_file_name, + key, iv_generator, suggested_file_name) + + def start_stream(): + file_sender = FileSender() + d = file_sender.beginFileTransfer(file_handle, lbry_file_creator) + d.addCallback(lambda _: stop_file(lbry_file_creator)) + d.addCallback(lambda _: make_stream_desc_file(lbry_file_creator.stream_hash)) + d.addCallback(lambda _: lbry_file_creator.stream_hash) + return d + + d = lbry_file_creator.setup() + d.addCallback(lambda _: start_stream()) + return d \ No newline at end of file diff --git a/lbrynet/lbryfilemanager/LBRYFileDownloader.py b/lbrynet/lbryfilemanager/LBRYFileDownloader.py new file mode 100644 index 000000000..edb6b3f7b --- /dev/null +++ b/lbrynet/lbryfilemanager/LBRYFileDownloader.py @@ -0,0 +1,149 @@ +""" +Download LBRY Files from LBRYnet and save them to disk. +""" + +from lbrynet.core.DownloadOption import DownloadOption +from zope.interface import implements +from lbrynet.core.client.StreamProgressManager import FullStreamProgressManager +from lbrynet.lbryfile.client.LBRYFileDownloader import LBRYFileSaver, LBRYFileDownloader +from lbrynet.lbryfilemanager.LBRYFileStatusReport import LBRYFileStatusReport +from lbrynet.interfaces import IStreamDownloaderFactory +from lbrynet.lbryfile.StreamDescriptor import save_sd_info +from twisted.internet import defer + + +class ManagedLBRYFileDownloader(LBRYFileSaver): + + STATUS_RUNNING = "running" + STATUS_STOPPED = "stopped" + STATUS_FINISHED = "finished" + + def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager, + lbry_file_manager, payment_rate_manager, wallet, download_directory, upload_allowed, + file_name=None): + LBRYFileSaver.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, + stream_info_manager, payment_rate_manager, wallet, download_directory, + upload_allowed) + self.lbry_file_manager = lbry_file_manager + self.file_name = file_name + self.file_handle = None + self.saving_status = False + + def restore(self): + d = self.lbry_file_manager.get_lbry_file_status(self.stream_hash) + + def restore_status(status): + if status == ManagedLBRYFileDownloader.STATUS_RUNNING: + return self.start() + elif status == ManagedLBRYFileDownloader.STATUS_STOPPED: + return defer.succeed(False) + elif status == ManagedLBRYFileDownloader.STATUS_FINISHED: + self.completed = True + return defer.succeed(True) + + d.addCallback(restore_status) + return d + + def stop(self, change_status=True): + + def set_saving_status_done(): + self.saving_status = False + + d = LBRYFileDownloader.stop(self) # LBRYFileSaver deletes metadata when it's stopped. We don't want that here. + if change_status is True: + self.saving_status = True + d.addCallback(lambda _: self._save_status()) + d.addCallback(lambda _: set_saving_status_done()) + return d + + def status(self): + def find_completed_blobhashes(blobs): + blobhashes = [b[0] for b in blobs if b[0] is not None] + + def get_num_completed(completed_blobs): + return len(completed_blobs), len(blobhashes) + + inner_d = self.blob_manager.completed_blobs(blobhashes) + inner_d.addCallback(get_num_completed) + return inner_d + + def make_full_status(progress): + num_completed = progress[0] + num_known = progress[1] + if self.completed is True: + s = "completed" + elif self.stopped is True: + s = "stopped" + else: + s = "running" + status = LBRYFileStatusReport(self.file_name, num_completed, num_known, s) + return status + + d = self.stream_info_manager.get_blobs_for_stream(self.stream_hash) + d.addCallback(find_completed_blobhashes) + d.addCallback(make_full_status) + return d + + def _start(self): + + d = LBRYFileSaver._start(self) + + d.addCallback(lambda _: self._save_status()) + + return d + + def _get_finished_deferred_callback_value(self): + if self.completed is True: + return "Download successful" + else: + return "Download stopped" + + def _save_status(self): + if self.completed is True: + s = ManagedLBRYFileDownloader.STATUS_FINISHED + elif self.stopped is True: + s = ManagedLBRYFileDownloader.STATUS_STOPPED + else: + s = ManagedLBRYFileDownloader.STATUS_RUNNING + return self.lbry_file_manager.change_lbry_file_status(self.stream_hash, s) + + def _get_progress_manager(self, download_manager): + return FullStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager) + + +class ManagedLBRYFileDownloaderFactory(object): + implements(IStreamDownloaderFactory) + + def __init__(self, lbry_file_manager): + self.lbry_file_manager = lbry_file_manager + + def get_downloader_options(self, sd_validator, payment_rate_manager): + options = [ + DownloadOption( + [float, None], + "rate which will be paid for data (None means use application default)", + "data payment rate", + None + ), + DownloadOption( + [bool], + "allow reuploading data downloaded for this file", + "allow upload", + True + ), + ] + return options + + def make_downloader(self, sd_validator, options, payment_rate_manager): + data_rate = options[0] + upload_allowed = options[1] + + d = save_sd_info(self.lbry_file_manager.stream_info_manager, sd_validator.raw_info) + d.addCallback(lambda stream_hash: self.lbry_file_manager.add_lbry_file(stream_hash, + payment_rate_manager, + data_rate, + upload_allowed)) + return d + + def get_description(self): + return "Save the file to disk" \ No newline at end of file diff --git a/lbrynet/lbryfilemanager/LBRYFileManager.py b/lbrynet/lbryfilemanager/LBRYFileManager.py new file mode 100644 index 000000000..365081b93 --- /dev/null +++ b/lbrynet/lbryfilemanager/LBRYFileManager.py @@ -0,0 +1,255 @@ +""" +Keep track of which LBRY Files are downloading and store their LBRY File specific metadata +""" + +import logging +import json + +import leveldb + +from lbrynet.lbryfile.StreamDescriptor import LBRYFileStreamDescriptorValidator +import os +from lbrynet.lbryfilemanager.LBRYFileDownloader import ManagedLBRYFileDownloader +from lbrynet.lbryfilemanager.LBRYFileDownloader import ManagedLBRYFileDownloaderFactory +from lbrynet.lbryfile.StreamDescriptor import LBRYFileStreamType +from lbrynet.core.PaymentRateManager import PaymentRateManager +from twisted.internet import threads, defer, task, reactor +from twisted.python.failure import Failure +from lbrynet.cryptstream.client.CryptStreamDownloader import AlreadyStoppedError, CurrentlyStoppingError + + +class LBRYFileManager(object): + """ + Keeps track of currently opened LBRY Files, their options, and their LBRY File specific metadata. + """ + SETTING = "s" + LBRYFILE_STATUS = "t" + LBRYFILE_OPTIONS = "o" + + def __init__(self, session, stream_info_manager, sd_identifier): + self.session = session + self.stream_info_manager = stream_info_manager + self.sd_identifier = sd_identifier + self.lbry_files = [] + self.db = None + self.download_directory = os.getcwd() + + def setup(self): + d = threads.deferToThread(self._open_db) + d.addCallback(lambda _: self._add_to_sd_identifier()) + d.addCallback(lambda _: self._start_lbry_files()) + return d + + def get_all_lbry_file_stream_hashes_and_options(self): + d = threads.deferToThread(self._get_all_lbry_file_stream_hashes) + + def get_options(stream_hashes): + ds = [] + + def get_options_for_stream_hash(stream_hash): + d = self.get_lbry_file_options(stream_hash) + d.addCallback(lambda options: (stream_hash, options)) + return d + + for stream_hash in stream_hashes: + ds.append(get_options_for_stream_hash(stream_hash)) + dl = defer.DeferredList(ds) + dl.addCallback(lambda results: [r[1] for r in results if r[0]]) + return dl + + d.addCallback(get_options) + return d + + def get_lbry_file_status(self, stream_hash): + return threads.deferToThread(self._get_lbry_file_status, stream_hash) + + def save_lbry_file_options(self, stream_hash, blob_data_rate): + return threads.deferToThread(self._save_lbry_file_options, stream_hash, blob_data_rate) + + def get_lbry_file_options(self, stream_hash): + return threads.deferToThread(self._get_lbry_file_options, stream_hash) + + def delete_lbry_file_options(self, stream_hash): + return threads.deferToThread(self._delete_lbry_file_options, stream_hash) + + def set_lbry_file_data_payment_rate(self, stream_hash, new_rate): + return threads.deferToThread(self._set_lbry_file_payment_rate, stream_hash, new_rate) + + def change_lbry_file_status(self, stream_hash, status): + logging.debug("Changing status of %s to %s", stream_hash, status) + return threads.deferToThread(self._change_file_status, stream_hash, status) + + def delete_lbry_file_status(self, stream_hash): + return threads.deferToThread(self._delete_lbry_file_status, stream_hash) + + def get_lbry_file_status_reports(self): + ds = [] + + for lbry_file in self.lbry_files: + ds.append(lbry_file.status()) + + dl = defer.DeferredList(ds) + + def filter_failures(status_reports): + return [status_report for success, status_report in status_reports if success is True] + + dl.addCallback(filter_failures) + return dl + + def _add_to_sd_identifier(self): + downloader_factory = ManagedLBRYFileDownloaderFactory(self) + self.sd_identifier.add_stream_info_validator(LBRYFileStreamType, LBRYFileStreamDescriptorValidator) + self.sd_identifier.add_stream_downloader_factory(LBRYFileStreamType, downloader_factory) + + def _start_lbry_files(self): + + def set_options_and_restore(stream_hash, options): + payment_rate_manager = PaymentRateManager(self.session.base_payment_rate_manager) + d = self.add_lbry_file(stream_hash, payment_rate_manager, blob_data_rate=options[0]) + d.addCallback(lambda downloader: downloader.restore()) + return d + + def log_error(err): + logging.error("An error occurred while starting a lbry file: %s", err.getErrorMessage()) + + def start_lbry_files(stream_hashes_and_options): + for stream_hash, options in stream_hashes_and_options: + d = set_options_and_restore(stream_hash, options) + d.addErrback(log_error) + return True + + d = self.get_all_lbry_file_stream_hashes_and_options() + d.addCallback(start_lbry_files) + return d + + def add_lbry_file(self, stream_hash, payment_rate_manager, blob_data_rate=None, upload_allowed=True): + payment_rate_manager.min_blob_data_payment_rate = blob_data_rate + lbry_file_downloader = ManagedLBRYFileDownloader(stream_hash, self.session.peer_finder, + self.session.rate_limiter, self.session.blob_manager, + self.stream_info_manager, self, + payment_rate_manager, self.session.wallet, + self.download_directory, + upload_allowed) + self.lbry_files.append(lbry_file_downloader) + d = self.save_lbry_file_options(stream_hash, blob_data_rate) + d.addCallback(lambda _: lbry_file_downloader.set_stream_info()) + d.addCallback(lambda _: lbry_file_downloader) + return d + + def delete_lbry_file(self, stream_hash): + for l in self.lbry_files: + if l.stream_hash == stream_hash: + lbry_file = l + break + else: + return defer.fail(Failure(ValueError("Could not find an LBRY file with the given stream hash, " + + stream_hash))) + + def wait_for_finished(count=2): + if count <= 0 or lbry_file.saving_status is False: + return True + else: + return task.deferLater(reactor, 1, wait_for_finished, count=count - 1) + + def ignore_stopped(err): + err.trap(AlreadyStoppedError, CurrentlyStoppingError) + return wait_for_finished() + + d = lbry_file.stop() + d.addErrback(ignore_stopped) + + def remove_from_list(): + self.lbry_files.remove(lbry_file) + + d.addCallback(lambda _: remove_from_list()) + d.addCallback(lambda _: self.delete_lbry_file_options(stream_hash)) + d.addCallback(lambda _: self.delete_lbry_file_status(stream_hash)) + return d + + def toggle_lbry_file_running(self, stream_hash): + """Toggle whether a stream reader is currently running""" + for l in self.lbry_files: + if l.stream_hash == stream_hash: + return l.toggle_running() + else: + return defer.fail(Failure(ValueError("Could not find an LBRY file with the given stream hash, " + + stream_hash))) + + def get_stream_hash_from_name(self, lbry_file_name): + for l in self.lbry_files: + if l.file_name == lbry_file_name: + return l.stream_hash + return None + + def stop(self): + ds = [] + + def wait_for_finished(lbry_file, count=2): + if count <= 0 or lbry_file.saving_status is False: + return True + else: + return task.deferLater(reactor, 1, wait_for_finished, lbry_file, count=count - 1) + + def ignore_stopped(err, lbry_file): + err.trap(AlreadyStoppedError, CurrentlyStoppingError) + return wait_for_finished(lbry_file) + + for lbry_file in self.lbry_files: + d = lbry_file.stop(change_status=False) + d.addErrback(ignore_stopped, lbry_file) + ds.append(d) + dl = defer.DeferredList(ds) + + def close_db(): + self.db = None + + dl.addCallback(lambda _: close_db()) + return dl + + ######### database calls ######### + + def _open_db(self): + self.db = leveldb.LevelDB(os.path.join(self.session.db_dir, "lbryfiles.db")) + + def _save_payment_rate(self, rate_type, rate): + if rate is not None: + self.db.Put(json.dumps((self.SETTING, rate_type)), json.dumps(rate), sync=True) + else: + self.db.Delete(json.dumps((self.SETTING, rate_type)), sync=True) + + def _save_lbry_file_options(self, stream_hash, blob_data_rate): + self.db.Put(json.dumps((self.LBRYFILE_OPTIONS, stream_hash)), json.dumps((blob_data_rate,)), + sync=True) + + def _get_lbry_file_options(self, stream_hash): + try: + return json.loads(self.db.Get(json.dumps((self.LBRYFILE_OPTIONS, stream_hash)))) + except KeyError: + return None, None + + def _delete_lbry_file_options(self, stream_hash): + self.db.Delete(json.dumps((self.LBRYFILE_OPTIONS, stream_hash)), sync=True) + + def _set_lbry_file_payment_rate(self, stream_hash, new_rate): + + self.db.Put(json.dumps((self.LBRYFILE_OPTIONS, stream_hash)), json.dumps((new_rate, )), sync=True) + + def _get_all_lbry_file_stream_hashes(self): + hashes = [] + for k, v in self.db.RangeIter(): + key_type, stream_hash = json.loads(k) + if key_type == self.LBRYFILE_STATUS: + hashes.append(stream_hash) + return hashes + + def _change_file_status(self, stream_hash, new_status): + self.db.Put(json.dumps((self.LBRYFILE_STATUS, stream_hash)), new_status, sync=True) + + def _get_lbry_file_status(self, stream_hash): + try: + return self.db.Get(json.dumps((self.LBRYFILE_STATUS, stream_hash))) + except KeyError: + return ManagedLBRYFileDownloader.STATUS_STOPPED + + def _delete_lbry_file_status(self, stream_hash): + self.db.Delete(json.dumps((self.LBRYFILE_STATUS, stream_hash)), sync=True) \ No newline at end of file diff --git a/lbrynet/lbryfilemanager/LBRYFileStatusReport.py b/lbrynet/lbryfilemanager/LBRYFileStatusReport.py new file mode 100644 index 000000000..d434e1283 --- /dev/null +++ b/lbrynet/lbryfilemanager/LBRYFileStatusReport.py @@ -0,0 +1,6 @@ +class LBRYFileStatusReport(object): + def __init__(self, name, num_completed, num_known, running_status): + self.name = name + self.num_completed = num_completed + self.num_known = num_known + self.running_status = running_status \ No newline at end of file diff --git a/lbrynet/lbryfilemanager/__init__.py b/lbrynet/lbryfilemanager/__init__.py new file mode 100644 index 000000000..6f2017173 --- /dev/null +++ b/lbrynet/lbryfilemanager/__init__.py @@ -0,0 +1,7 @@ +""" +Classes and functions used to create and download LBRY Files. + +LBRY Files are Crypt Streams created from any regular file. The whole file is read +at the time that the LBRY File is created, so all constituent blobs are known and +included in the stream descriptor file. +""" \ No newline at end of file diff --git a/lbrynet/lbrylive/LBRYStdinUploader.py b/lbrynet/lbrylive/LBRYStdinUploader.py new file mode 100644 index 000000000..03570a975 --- /dev/null +++ b/lbrynet/lbrylive/LBRYStdinUploader.py @@ -0,0 +1,117 @@ +import logging +import sys +from lbrynet.lbrylive.LiveStreamCreator import StdOutLiveStreamCreator +from lbrynet.core.BlobManager import TempBlobManager +from lbrynet.core.Session import LBRYSession +from lbrynet.core.server.BlobAvailabilityHandler import BlobAvailabilityHandlerFactory +from lbrynet.core.server.BlobRequestHandler import BlobRequestHandlerFactory +from lbrynet.core.server.ServerProtocol import ServerProtocolFactory +from lbrynet.lbrylive.PaymentRateManager import BaseLiveStreamPaymentRateManager +from lbrynet.lbrylive.LiveStreamMetadataManager import DBLiveStreamMetadataManager +from lbrynet.lbrylive.server.LiveBlobInfoQueryHandler import CryptBlobInfoQueryHandlerFactory +from lbrynet.dht.node import Node +from twisted.internet import defer, task + + +class LBRYStdinUploader(): + """This class reads from standard in, creates a stream, and makes it available on the network.""" + def __init__(self, peer_port, dht_node_port, known_dht_nodes): + """ + @param peer_port: the network port on which to listen for peers + + @param dht_node_port: the network port on which to listen for nodes in the DHT + + @param known_dht_nodes: a list of (ip_address, dht_port) which will be used to join the DHT network + """ + self.peer_port = peer_port + self.lbry_server_port = None + self.session = LBRYSession(blob_manager_class=TempBlobManager, + stream_info_manager_class=DBLiveStreamMetadataManager, + dht_node_class=Node, dht_node_port=dht_node_port, + known_dht_nodes=known_dht_nodes, peer_port=self.peer_port, + use_upnp=False) + self.payment_rate_manager = BaseLiveStreamPaymentRateManager() + + def start(self): + """Initialize the session and start listening on the peer port""" + d = self.session.setup() + d.addCallback(lambda _: self._start()) + + return d + + def _start(self): + self._start_server() + return True + + def _start_server(self): + query_handler_factories = [ + CryptBlobInfoQueryHandlerFactory(self.stream_info_manager, self.session.wallet, + self.payment_rate_manager), + BlobAvailabilityHandlerFactory(self.session.blob_manager), + BlobRequestHandlerFactory(self.session.blob_manager, self.session.wallet, + self.payment_rate_manager), + self.session.wallet.get_wallet_info_query_handler_factory() + ] + + self.server_factory = ServerProtocolFactory(self.session.rate_limiter, + query_handler_factories, + self.session.peer_manager) + from twisted.internet import reactor + self.lbry_server_port = reactor.listenTCP(self.peer_port, self.server_factory) + + def start_live_stream(self, stream_name): + """Create the stream and start reading from stdin + + @param stream_name: a string, the suggested name of this stream + """ + stream_creator_helper = StdOutLiveStreamCreator(stream_name, self.session.blob_manager, + self.stream_info_manager) + d = stream_creator_helper.create_and_publish_stream_descriptor() + + def print_sd_hash(sd_hash): + print "Stream descriptor hash:", sd_hash + + d.addCallback(print_sd_hash) + d.addCallback(lambda _: stream_creator_helper.start_streaming()) + return d + + def shut_down(self): + """End the session and stop listening on the server port""" + d = self.session.shut_down() + d.addCallback(lambda _: self._shut_down()) + return d + + def _shut_down(self): + if self.lbry_server_port is not None: + d = defer.maybeDeferred(self.lbry_server_port.stopListening) + else: + d = defer.succeed(True) + return d + + +def launch_stdin_uploader(): + + from twisted.internet import reactor + + logging.basicConfig(level=logging.WARNING, filename="ul.log") + if len(sys.argv) == 4: + uploader = LBRYStdinUploader(int(sys.argv[2]), int(sys.argv[3]), []) + elif len(sys.argv) == 6: + uploader = LBRYStdinUploader(int(sys.argv[2]), int(sys.argv[3]), [(sys.argv[4], int(sys.argv[5]))]) + else: + print "Usage: lbrynet-stdin-uploader " \ + " [ ]" + sys.exit(1) + + def start_stdin_uploader(): + return uploader.start_live_stream(sys.argv[1]) + + def shut_down(): + logging.debug("Telling the reactor to stop in 60 seconds") + reactor.callLater(60, reactor.stop) + + d = task.deferLater(reactor, 0, uploader.start) + d.addCallback(lambda _: start_stdin_uploader()) + d.addCallback(lambda _: shut_down()) + reactor.addSystemEventTrigger('before', 'shutdown', uploader.shut_down) + reactor.run() \ No newline at end of file diff --git a/lbrynet/lbrylive/LBRYStdoutDownloader.py b/lbrynet/lbrylive/LBRYStdoutDownloader.py new file mode 100644 index 000000000..0c1fe8c60 --- /dev/null +++ b/lbrynet/lbrylive/LBRYStdoutDownloader.py @@ -0,0 +1,96 @@ +import logging +import sys + +from lbrynet.lbrynet_console.plugins.LBRYLive.LBRYLiveStreamDownloader import LBRYLiveStreamDownloader +from lbrynet.core.BlobManager import TempBlobManager +from lbrynet.core.Session import LBRYSession +from lbrynet.core.client.StandaloneBlobDownloader import StandaloneBlobDownloader +from lbrynet.core.StreamDescriptor import BlobStreamDescriptorReader +from lbrynet.lbrylive.PaymentRateManager import BaseLiveStreamPaymentRateManager +from lbrynet.lbrylive.LiveStreamMetadataManager import DBLiveStreamMetadataManager +from lbrynet.lbrylive.StreamDescriptor import save_sd_info +from lbrynet.dht.node import Node +from twisted.internet import task + + +class LBRYStdoutDownloader(): + """This class downloads a live stream from the network and outputs it to standard out.""" + def __init__(self, dht_node_port, known_dht_nodes): + """ + @param dht_node_port: the network port on which to listen for DHT node requests + + @param known_dht_nodes: a list of (ip_address, dht_port) which will be used to join the DHT network + + """ + self.session = LBRYSession(blob_manager_class=TempBlobManager, + stream_info_manager_class=DBLiveStreamMetadataManager, + dht_node_class=Node, dht_node_port=dht_node_port, known_dht_nodes=known_dht_nodes, + use_upnp=False) + self.payment_rate_manager = BaseLiveStreamPaymentRateManager() + + def start(self): + """Initialize the session""" + d = self.session.setup() + return d + + def read_sd_file(self, sd_blob): + reader = BlobStreamDescriptorReader(sd_blob) + return save_sd_info(self.stream_info_manager, reader, ignore_duplicate=True) + + def download_sd_file_from_hash(self, sd_hash): + downloader = StandaloneBlobDownloader(sd_hash, self.session.blob_manager, + self.session.peer_finder, self.session.rate_limiter, + self.session.wallet) + d = downloader.download() + return d + + def start_download(self, sd_hash): + """Start downloading the stream from the network and outputting it to standard out""" + d = self.download_sd_file_from_hash(sd_hash) + d.addCallbacks(self.read_sd_file) + + def start_stream(stream_hash): + consumer = LBRYLiveStreamDownloader(stream_hash, self.session.peer_finder, + self.session.rate_limiter, self.session.blob_manager, + self.stream_info_manager, self.payment_rate_manager, + self.session.wallet) + return consumer.start() + + d.addCallback(start_stream) + return d + + def shut_down(self): + """End the session""" + d = self.session.shut_down() + return d + + +def launch_stdout_downloader(): + + from twisted.internet import reactor + + logging.basicConfig(level=logging.WARNING, filename="dl.log") + if len(sys.argv) == 3: + downloader = LBRYStdoutDownloader(int(sys.argv[2]), []) + elif len(sys.argv) == 5: + downloader = LBRYStdoutDownloader(int(sys.argv[2]), [(sys.argv[3], int(sys.argv[4]))]) + else: + print "Usage: lbrynet-stdout-downloader " \ + " [ ]" + sys.exit(1) + + def start_stdout_downloader(): + return downloader.start_download(sys.argv[1]) + + def print_error(err): + logging.warning(err.getErrorMessage()) + + def shut_down(): + reactor.stop() + + d = task.deferLater(reactor, 0, downloader.start) + d.addCallback(lambda _: start_stdout_downloader()) + d.addErrback(print_error) + d.addCallback(lambda _: shut_down()) + reactor.addSystemEventTrigger('before', 'shutdown', downloader.shut_down) + reactor.run() \ No newline at end of file diff --git a/lbrynet/lbrylive/LiveBlob.py b/lbrynet/lbrylive/LiveBlob.py new file mode 100644 index 000000000..30d7efc3a --- /dev/null +++ b/lbrynet/lbrylive/LiveBlob.py @@ -0,0 +1,23 @@ +from lbrynet.cryptstream.CryptBlob import CryptStreamBlobMaker, CryptBlobInfo +import binascii + + +class LiveBlobInfo(CryptBlobInfo): + def __init__(self, blob_hash, blob_num, length, iv, revision, signature): + CryptBlobInfo.__init__(self, blob_hash, blob_num, length, iv) + self.revision = revision + self.signature = signature + + +class LiveStreamBlobMaker(CryptStreamBlobMaker): + def __init__(self, key, iv, blob_num, blob): + CryptStreamBlobMaker.__init__(self, key, iv, blob_num, blob) + # The following is a placeholder for a currently unimplemented feature. + # In the future it may be possible for the live stream creator to overwrite a blob + # with a newer revision. If that happens, the 0 will be incremented to the + # actual revision count + self.revision = 0 + + def _return_info(self, blob_hash): + return LiveBlobInfo(blob_hash, self.blob_num, self.length, binascii.hexlify(self.iv), + self.revision, None) \ No newline at end of file diff --git a/lbrynet/lbrylive/LiveStreamCreator.py b/lbrynet/lbrylive/LiveStreamCreator.py new file mode 100644 index 000000000..6b78d27ac --- /dev/null +++ b/lbrynet/lbrylive/LiveStreamCreator.py @@ -0,0 +1,189 @@ +from lbrynet.core.StreamDescriptor import BlobStreamDescriptorWriter +from lbrynet.lbrylive.StreamDescriptor import get_sd_info, LiveStreamType, LBRYLiveStreamDescriptorValidator +from lbrynet.cryptstream.CryptStreamCreator import CryptStreamCreator +from lbrynet.lbrylive.LiveBlob import LiveStreamBlobMaker +from lbrynet.lbrylive.PaymentRateManager import BaseLiveStreamPaymentRateManager +from lbrynet.core.cryptoutils import get_lbry_hash_obj, get_pub_key, sign_with_pass_phrase +from Crypto import Random +import binascii +import logging +from lbrynet.conf import CRYPTSD_FILE_EXTENSION +from lbrynet.conf import MIN_BLOB_INFO_PAYMENT_RATE +from lbrynet.lbrylive.client.LiveStreamDownloader import FullLiveStreamDownloaderFactory +from twisted.internet import interfaces, defer +from twisted.protocols.basic import FileSender +from zope.interface import implements + + +class LiveStreamCreator(CryptStreamCreator): + def __init__(self, blob_manager, stream_info_manager, name=None, key=None, iv_generator=None, + delete_after_num=None, secret_pass_phrase=None): + CryptStreamCreator.__init__(self, blob_manager, name, key, iv_generator) + self.stream_hash = None + self.stream_info_manager = stream_info_manager + self.delete_after_num = delete_after_num + self.secret_pass_phrase = secret_pass_phrase + self.file_extension = CRYPTSD_FILE_EXTENSION + self.finished_blob_hashes = {} + + def _save_stream(self): + d = self.stream_info_manager.save_stream(self.stream_hash, get_pub_key(self.secret_pass_phrase), + binascii.hexlify(self.name), binascii.hexlify(self.key), + []) + return d + + def _blob_finished(self, blob_info): + logging.debug("In blob_finished") + logging.debug("length: %s", str(blob_info.length)) + sig_hash = get_lbry_hash_obj() + sig_hash.update(self.stream_hash) + if blob_info.length != 0: + sig_hash.update(blob_info.blob_hash) + sig_hash.update(str(blob_info.blob_num)) + sig_hash.update(str(blob_info.revision)) + sig_hash.update(blob_info.iv) + sig_hash.update(str(blob_info.length)) + signature = sign_with_pass_phrase(sig_hash.digest(), self.secret_pass_phrase) + blob_info.signature = signature + self.finished_blob_hashes[blob_info.blob_num] = blob_info.blob_hash + if self.delete_after_num is not None: + self._delete_old_blobs(blob_info.blob_num) + d = self.stream_info_manager.add_blobs_to_stream(self.stream_hash, [blob_info]) + + def log_add_error(err): + logging.error("An error occurred adding a blob info to the stream info manager: %s", err.getErrorMessage()) + return err + + d.addErrback(log_add_error) + logging.debug("returning from blob_finished") + return d + + def setup(self): + """Create the secret pass phrase if it wasn't provided, compute the stream hash, + save the stream to the stream info manager, and return the stream hash + """ + if self.secret_pass_phrase is None: + self.secret_pass_phrase = Random.new().read(512) + + d = CryptStreamCreator.setup(self) + + def make_stream_hash(): + hashsum = get_lbry_hash_obj() + hashsum.update(binascii.hexlify(self.name)) + hashsum.update(get_pub_key(self.secret_pass_phrase)) + hashsum.update(binascii.hexlify(self.key)) + self.stream_hash = hashsum.hexdigest() + return self.stream_hash + + d.addCallback(lambda _: make_stream_hash()) + d.addCallback(lambda _: self._save_stream()) + d.addCallback(lambda _: self.stream_hash) + return d + + def publish_stream_descriptor(self): + descriptor_writer = BlobStreamDescriptorWriter(self.blob_manager) + d = get_sd_info(self.stream_info_manager, self.stream_hash, False) + d.addCallback(descriptor_writer.create_descriptor) + return d + + def _delete_old_blobs(self, newest_blob_num): + assert self.delete_after_num is not None, "_delete_old_blobs called with delete_after_num=None" + oldest_to_keep = newest_blob_num - self.delete_after_num + 1 + nums_to_delete = [num for num in self.finished_blob_hashes.iterkeys() if num < oldest_to_keep] + for num in nums_to_delete: + self.blob_manager.delete_blobs([self.finished_blob_hashes[num]]) + del self.finished_blob_hashes[num] + + def _get_blob_maker(self, iv, blob_creator): + return LiveStreamBlobMaker(self.key, iv, self.blob_count, blob_creator) + + +class StdOutLiveStreamCreator(LiveStreamCreator): + def __init__(self, stream_name, blob_manager, stream_info_manager): + LiveStreamCreator.__init__(self, blob_manager, stream_info_manager, stream_name, + delete_after_num=20) + + def start_streaming(self): + stdin_producer = StdinStreamProducer(self) + d = stdin_producer.begin_producing() + + def stop_stream(): + d = self.stop() + return d + + d.addCallback(lambda _: stop_stream()) + return d + + +class FileLiveStreamCreator(LiveStreamCreator): + def __init__(self, blob_manager, stream_info_manager, file_name, file_handle, + secret_pass_phrase=None, key=None, iv_generator=None, stream_name=None): + if stream_name is None: + stream_name = file_name + LiveStreamCreator.__init__(self, blob_manager, stream_info_manager, stream_name, + secret_pass_phrase, key, iv_generator) + self.file_name = file_name + self.file_handle = file_handle + + def start_streaming(self): + file_sender = FileSender() + d = file_sender.beginFileTransfer(self.file_handle, self) + + def stop_stream(): + d = self.stop() + return d + + d.addCallback(lambda _: stop_stream()) + return d + + +class StdinStreamProducer(object): + """This class reads data from standard in and sends it to a stream creator""" + + implements(interfaces.IPushProducer) + + def __init__(self, consumer): + self.consumer = consumer + self.reader = None + self.finished_deferred = None + + def begin_producing(self): + + self.finished_deferred = defer.Deferred() + self.consumer.registerProducer(self, True) + #self.reader = process.ProcessReader(reactor, self, 'read', 0) + self.resumeProducing() + return self.finished_deferred + + def resumeProducing(self): + if self.reader is not None: + self.reader.resumeProducing() + + def stopProducing(self): + if self.reader is not None: + self.reader.stopReading() + self.consumer.unregisterProducer() + self.finished_deferred.callback(True) + + def pauseProducing(self): + if self.reader is not None: + self.reader.pauseProducing() + + def childDataReceived(self, fd, data): + self.consumer.write(data) + + def childConnectionLost(self, fd, reason): + self.stopProducing() + + +def add_live_stream_to_sd_identifier(session, stream_info_manager, sd_identifier): + downloader_factory = FullLiveStreamDownloaderFactory(session.peer_finder, + session.rate_limiter, + session.blob_manager, + stream_info_manager, + session.wallet, + BaseLiveStreamPaymentRateManager( + MIN_BLOB_INFO_PAYMENT_RATE + )) + sd_identifier.add_stream_info_validator(LiveStreamType, LBRYLiveStreamDescriptorValidator) + sd_identifier.add_stream_downloader_factory(LiveStreamType, downloader_factory) \ No newline at end of file diff --git a/lbrynet/lbrylive/LiveStreamMetadataManager.py b/lbrynet/lbrylive/LiveStreamMetadataManager.py new file mode 100644 index 000000000..703197493 --- /dev/null +++ b/lbrynet/lbrylive/LiveStreamMetadataManager.py @@ -0,0 +1,328 @@ +import time +import logging +import leveldb +import json +import os +from twisted.internet import threads, defer +from lbrynet.core.server.DHTHashAnnouncer import DHTHashSupplier +from lbrynet.core.Error import DuplicateStreamHashError + + +class DBLiveStreamMetadataManager(DHTHashSupplier): + """This class stores all stream info in a leveldb database stored in the same directory as the blobfiles""" + + def __init__(self, db_dir, hash_announcer): + DHTHashSupplier.__init__(self, hash_announcer) + self.db_dir = db_dir + self.stream_info_db = None + self.stream_blob_db = None + self.stream_desc_db = None + + def setup(self): + return threads.deferToThread(self._open_db) + + def stop(self): + self.stream_info_db = None + self.stream_blob_db = None + self.stream_desc_db = None + return defer.succeed(True) + + def get_all_streams(self): + return threads.deferToThread(self._get_all_streams) + + def save_stream(self, stream_hash, pub_key, file_name, key, blobs): + next_announce_time = time.time() + self.hash_reannounce_time + d = threads.deferToThread(self._store_stream, stream_hash, pub_key, file_name, key, + next_announce_time=next_announce_time) + + def save_blobs(): + return self.add_blobs_to_stream(stream_hash, blobs) + + def announce_have_stream(): + if self.hash_announcer is not None: + self.hash_announcer.immediate_announce([stream_hash]) + return stream_hash + + d.addCallback(lambda _: save_blobs()) + d.addCallback(lambda _: announce_have_stream()) + return d + + def get_stream_info(self, stream_hash): + return threads.deferToThread(self._get_stream_info, stream_hash) + + def check_if_stream_exists(self, stream_hash): + return threads.deferToThread(self._check_if_stream_exists, stream_hash) + + def delete_stream(self, stream_hash): + return threads.deferToThread(self._delete_stream, stream_hash) + + def add_blobs_to_stream(self, stream_hash, blobs): + + def add_blobs(): + self._add_blobs_to_stream(stream_hash, blobs, ignore_duplicate_error=True) + + return threads.deferToThread(add_blobs) + + def get_blobs_for_stream(self, stream_hash, start_blob=None, end_blob=None, count=None, reverse=False): + logging.info("Getting blobs for a stream. Count is %s", str(count)) + + def get_positions_of_start_and_end(): + if start_blob is not None: + start_num = self._get_blob_num_by_hash(stream_hash, start_blob) + else: + start_num = None + if end_blob is not None: + end_num = self._get_blob_num_by_hash(stream_hash, end_blob) + else: + end_num = None + return start_num, end_num + + def get_blob_infos(nums): + start_num, end_num = nums + return threads.deferToThread(self._get_further_blob_infos, stream_hash, start_num, end_num, + count, reverse) + + d = threads.deferToThread(get_positions_of_start_and_end) + d.addCallback(get_blob_infos) + return d + + def get_stream_of_blob(self, blob_hash): + return threads.deferToThread(self._get_stream_of_blobhash, blob_hash) + + def save_sd_blob_hash_to_stream(self, stream_hash, sd_blob_hash): + return threads.deferToThread(self._save_sd_blob_hash_to_stream, stream_hash, sd_blob_hash) + + def get_sd_blob_hashes_for_stream(self, stream_hash): + return threads.deferToThread(self._get_sd_blob_hashes_for_stream, stream_hash) + + def hashes_to_announce(self): + next_announce_time = time.time() + self.hash_reannounce_time + return threads.deferToThread(self._get_streams_to_announce, next_announce_time) + + ######### database calls ######### + + def _open_db(self): + self.stream_info_db = leveldb.LevelDB(os.path.join(self.db_dir, "stream_info.db")) + self.stream_blob_db = leveldb.LevelDB(os.path.join(self.db_dir, "stream_blob.db")) + self.stream_desc_db = leveldb.LevelDB(os.path.join(self.db_dir, "stream_desc.db")) + + def _delete_stream(self, stream_hash): + desc_batch = leveldb.WriteBatch() + for sd_blob_hash, s_h in self.stream_desc_db.RangeIter(): + if stream_hash == s_h: + desc_batch.Delete(sd_blob_hash) + self.stream_desc_db.Write(desc_batch, sync=True) + + blob_batch = leveldb.WriteBatch() + for blob_hash_stream_hash, blob_info in self.stream_blob_db.RangeIter(): + b_h, s_h = json.loads(blob_hash_stream_hash) + if stream_hash == s_h: + blob_batch.Delete(blob_hash_stream_hash) + self.stream_blob_db.Write(blob_batch, sync=True) + + stream_batch = leveldb.WriteBatch() + for s_h, stream_info in self.stream_info_db.RangeIter(): + if stream_hash == s_h: + stream_batch.Delete(s_h) + self.stream_info_db.Write(stream_batch, sync=True) + + def _store_stream(self, stream_hash, public_key, name, key, next_announce_time=None): + try: + self.stream_info_db.Get(stream_hash) + raise DuplicateStreamHashError("Stream hash %s already exists" % stream_hash) + except KeyError: + pass + self.stream_info_db.Put(stream_hash, json.dumps((public_key, key, name, next_announce_time)), sync=True) + + def _get_all_streams(self): + return [stream_hash for stream_hash, stream_info in self.stream_info_db.RangeIter()] + + def _get_stream_info(self, stream_hash): + return json.loads(self.stream_info_db.Get(stream_hash))[:3] + + def _check_if_stream_exists(self, stream_hash): + try: + self.stream_info_db.Get(stream_hash) + return True + except KeyError: + return False + + def _get_streams_to_announce(self, next_announce_time): + # TODO: See if the following would be better for handling announce times: + # TODO: Have a separate db for them, and read the whole thing into memory + # TODO: on startup, and then write changes to db when they happen + stream_hashes = [] + batch = leveldb.WriteBatch() + current_time = time.time() + for stream_hash, stream_info in self.stream_info_db.RangeIter(): + public_key, key, name, announce_time = json.loads(stream_info) + if announce_time < current_time: + batch.Put(stream_hash, json.dumps((public_key, key, name, next_announce_time))) + stream_hashes.append(stream_hash) + self.stream_info_db.Write(batch, sync=True) + return stream_hashes + + def _get_blob_num_by_hash(self, stream_hash, blob_hash): + blob_hash_stream_hash = json.dumps((blob_hash, stream_hash)) + return json.loads(self.stream_blob_db.Get(blob_hash_stream_hash))[0] + + def _get_further_blob_infos(self, stream_hash, start_num, end_num, count=None, reverse=False): + blob_infos = [] + for blob_hash_stream_hash, blob_info in self.stream_blob_db.RangeIter(): + b_h, s_h = json.loads(blob_hash_stream_hash) + if stream_hash == s_h: + position, revision, iv, length, signature = json.loads(blob_info) + if (start_num is None) or (position > start_num): + if (end_num is None) or (position < end_num): + blob_infos.append((b_h, position, revision, iv, length, signature)) + blob_infos.sort(key=lambda i: i[1], reverse=reverse) + if count is not None: + blob_infos = blob_infos[:count] + return blob_infos + + def _add_blobs_to_stream(self, stream_hash, blob_infos, ignore_duplicate_error=False): + batch = leveldb.WriteBatch() + for blob_info in blob_infos: + blob_hash_stream_hash = json.dumps((blob_info.blob_hash, stream_hash)) + try: + self.stream_blob_db.Get(blob_hash_stream_hash) + if ignore_duplicate_error is False: + raise KeyError() # TODO: change this to DuplicateStreamBlobError? + continue + except KeyError: + pass + batch.Put(blob_hash_stream_hash, + json.dumps((blob_info.blob_num, + blob_info.revision, + blob_info.iv, + blob_info.length, + blob_info.signature))) + self.stream_blob_db.Write(batch, sync=True) + + def _get_stream_of_blobhash(self, blob_hash): + for blob_hash_stream_hash, blob_info in self.stream_blob_db.RangeIter(): + b_h, s_h = json.loads(blob_hash_stream_hash) + if blob_hash == b_h: + return s_h + return None + + def _save_sd_blob_hash_to_stream(self, stream_hash, sd_blob_hash): + self.stream_desc_db.Put(sd_blob_hash, stream_hash) + + def _get_sd_blob_hashes_for_stream(self, stream_hash): + return [sd_blob_hash for sd_blob_hash, s_h in self.stream_desc_db.RangeIter() if stream_hash == s_h] + + +class TempLiveStreamMetadataManager(DHTHashSupplier): + + def __init__(self, hash_announcer): + DHTHashSupplier.__init__(self, hash_announcer) + self.streams = {} + self.stream_blobs = {} + self.stream_desc = {} + + def setup(self): + return defer.succeed(True) + + def stop(self): + return defer.succeed(True) + + def get_all_streams(self): + return defer.succeed(self.streams.keys()) + + def save_stream(self, stream_hash, pub_key, file_name, key, blobs): + next_announce_time = time.time() + self.hash_reannounce_time + self.streams[stream_hash] = {'public_key': pub_key, 'stream_name': file_name, + 'key': key, 'next_announce_time': next_announce_time} + d = self.add_blobs_to_stream(stream_hash, blobs) + + def announce_have_stream(): + if self.hash_announcer is not None: + self.hash_announcer.immediate_announce([stream_hash]) + return stream_hash + + d.addCallback(lambda _: announce_have_stream()) + return d + + def get_stream_info(self, stream_hash): + if stream_hash in self.streams: + stream_info = self.streams[stream_hash] + return defer.succeed([stream_info['public_key'], stream_info['key'], stream_info['stream_name']]) + return defer.succeed(None) + + def delete_stream(self, stream_hash): + if stream_hash in self.streams: + del self.streams[stream_hash] + for (s_h, b_h) in self.stream_blobs.keys(): + if s_h == stream_hash: + del self.stream_blobs[(s_h, b_h)] + return defer.succeed(True) + + def add_blobs_to_stream(self, stream_hash, blobs): + assert stream_hash in self.streams, "Can't add blobs to a stream that isn't known" + for blob in blobs: + info = {} + info['blob_num'] = blob.blob_num + info['length'] = blob.length + info['iv'] = blob.iv + info['revision'] = blob.revision + info['signature'] = blob.signature + self.stream_blobs[(stream_hash, blob.blob_hash)] = info + return defer.succeed(True) + + def get_blobs_for_stream(self, stream_hash, start_blob=None, end_blob=None, count=None, reverse=False): + + if start_blob is not None: + start_num = self._get_blob_num_by_hash(stream_hash, start_blob) + else: + start_num = None + if end_blob is not None: + end_num = self._get_blob_num_by_hash(stream_hash, end_blob) + else: + end_num = None + return self._get_further_blob_infos(stream_hash, start_num, end_num, count, reverse) + + def get_stream_of_blob(self, blob_hash): + for (s_h, b_h) in self.stream_blobs.iterkeys(): + if b_h == blob_hash: + return defer.succeed(s_h) + return defer.succeed(None) + + def _get_further_blob_infos(self, stream_hash, start_num, end_num, count=None, reverse=False): + blob_infos = [] + for (s_h, b_h), info in self.stream_blobs.iteritems(): + if stream_hash == s_h: + position = info['blob_num'] + length = info['length'] + iv = info['iv'] + revision = info['revision'] + signature = info['signature'] + if (start_num is None) or (position > start_num): + if (end_num is None) or (position < end_num): + blob_infos.append((b_h, position, revision, iv, length, signature)) + blob_infos.sort(key=lambda i: i[1], reverse=reverse) + if count is not None: + blob_infos = blob_infos[:count] + return defer.succeed(blob_infos) + + def _get_blob_num_by_hash(self, stream_hash, blob_hash): + if (stream_hash, blob_hash) in self.stream_blobs: + return self.stream_blobs[(stream_hash, blob_hash)]['blob_num'] + + def save_sd_blob_hash_to_stream(self, stream_hash, sd_blob_hash): + self.stream_desc[sd_blob_hash] = stream_hash + return defer.succeed(True) + + def get_sd_blob_hashes_for_stream(self, stream_hash): + return defer.succeed([sd_hash for sd_hash, s_h in self.stream_desc.iteritems() if s_h == stream_hash]) + + def hashes_to_announce(self): + next_announce_time = time.time() + self.hash_reannounce_time + stream_hashes = [] + current_time = time.time() + for stream_hash, stream_info in self.streams.iteritems(): + announce_time = stream_info['announce_time'] + if announce_time < current_time: + self.streams[stream_hash]['announce_time'] = next_announce_time + stream_hashes.append(stream_hash) + return stream_hashes \ No newline at end of file diff --git a/lbrynet/lbrylive/PaymentRateManager.py b/lbrynet/lbrylive/PaymentRateManager.py new file mode 100644 index 000000000..77ff09030 --- /dev/null +++ b/lbrynet/lbrylive/PaymentRateManager.py @@ -0,0 +1,45 @@ +class BaseLiveStreamPaymentRateManager(object): + def __init__(self, blob_info_rate, blob_data_rate=None): + self.min_live_blob_info_payment_rate = blob_info_rate + self.min_blob_data_payment_rate = blob_data_rate + + +class LiveStreamPaymentRateManager(object): + def __init__(self, base_live_stream_payment_rate_manager, payment_rate_manager, + blob_info_rate=None, blob_data_rate=None): + self._base_live_stream_payment_rate_manager = base_live_stream_payment_rate_manager + self._payment_rate_manager = payment_rate_manager + self.min_live_blob_info_payment_rate = blob_info_rate + self.min_blob_data_payment_rate = blob_data_rate + self.points_paid = 0.0 + + def get_rate_live_blob_info(self, peer): + return self.get_effective_min_live_blob_info_payment_rate() + + def accept_rate_live_blob_info(self, peer, payment_rate): + return payment_rate >= self.get_effective_min_live_blob_info_payment_rate() + + def get_rate_blob_data(self, peer): + return self.get_effective_min_blob_data_payment_rate() + + def accept_rate_blob_data(self, peer, payment_rate): + return payment_rate >= self.get_effective_min_blob_data_payment_rate() + + def get_effective_min_blob_data_payment_rate(self): + rate = self.min_blob_data_payment_rate + if rate is None: + rate = self._payment_rate_manager.min_blob_data_payment_rate + if rate is None: + rate = self._base_live_stream_payment_rate_manager.min_blob_data_payment_rate + if rate is None: + rate = self._payment_rate_manager.get_effective_min_blob_data_payment_rate() + return rate + + def get_effective_min_live_blob_info_payment_rate(self): + rate = self.min_live_blob_info_payment_rate + if rate is None: + rate = self._base_live_stream_payment_rate_manager.min_live_blob_info_payment_rate + return rate + + def record_points_paid(self, amount): + self.points_paid += amount \ No newline at end of file diff --git a/lbrynet/lbrylive/StreamDescriptor.py b/lbrynet/lbrylive/StreamDescriptor.py new file mode 100644 index 000000000..441c26ea4 --- /dev/null +++ b/lbrynet/lbrylive/StreamDescriptor.py @@ -0,0 +1,131 @@ +import binascii +import logging +from lbrynet.core.cryptoutils import get_lbry_hash_obj, verify_signature +from twisted.internet import defer, threads +from lbrynet.core.Error import DuplicateStreamHashError +from lbrynet.lbrylive.LiveBlob import LiveBlobInfo +from lbrynet.interfaces import IStreamDescriptorValidator +from zope.interface import implements + + +LiveStreamType = "lbrylive" + + +def save_sd_info(stream_info_manager, sd_info, ignore_duplicate=False): + logging.debug("Saving info for %s", str(sd_info['stream_name'])) + hex_stream_name = sd_info['stream_name'] + public_key = sd_info['public_key'] + key = sd_info['key'] + stream_hash = sd_info['stream_hash'] + raw_blobs = sd_info['blobs'] + crypt_blobs = [] + for blob in raw_blobs: + length = blob['length'] + if length != 0: + blob_hash = blob['blob_hash'] + else: + blob_hash = None + blob_num = blob['blob_num'] + revision = blob['revision'] + iv = blob['iv'] + signature = blob['signature'] + crypt_blobs.append(LiveBlobInfo(blob_hash, blob_num, length, iv, revision, signature)) + logging.debug("Trying to save stream info for %s", str(hex_stream_name)) + d = stream_info_manager.save_stream(stream_hash, public_key, hex_stream_name, + key, crypt_blobs) + + def check_if_duplicate(err): + if ignore_duplicate is True: + err.trap(DuplicateStreamHashError) + + d.addErrback(check_if_duplicate) + + d.addCallback(lambda _: stream_hash) + return d + + +def get_sd_info(stream_info_manager, stream_hash, include_blobs): + d = stream_info_manager.get_stream_info(stream_hash) + + def format_info(stream_info): + fields = {} + fields['stream_type'] = LiveStreamType + fields['stream_name'] = stream_info[2] + fields['public_key'] = stream_info[0] + fields['key'] = stream_info[1] + fields['stream_hash'] = stream_hash + + def format_blobs(blobs): + formatted_blobs = [] + for blob_hash, blob_num, revision, iv, length, signature in blobs: + blob = {} + if length != 0: + blob['blob_hash'] = blob_hash + blob['blob_num'] = blob_num + blob['revision'] = revision + blob['iv'] = iv + blob['length'] = length + blob['signature'] = signature + formatted_blobs.append(blob) + fields['blobs'] = formatted_blobs + return fields + + if include_blobs is True: + d = stream_info_manager.get_blobs_for_stream(stream_hash) + else: + d = defer.succeed([]) + d.addCallback(format_blobs) + return d + + d.addCallback(format_info) + return d + + +class LBRYLiveStreamDescriptorValidator(object): + implements(IStreamDescriptorValidator) + + def __init__(self, raw_info): + self.raw_info = raw_info + + def validate(self): + logging.debug("Trying to validate stream descriptor for %s", str(self.raw_info['stream_name'])) + hex_stream_name = self.raw_info['stream_name'] + public_key = self.raw_info['public_key'] + key = self.raw_info['key'] + stream_hash = self.raw_info['stream_hash'] + h = get_lbry_hash_obj() + h.update(hex_stream_name) + h.update(public_key) + h.update(key) + if h.hexdigest() != stream_hash: + raise ValueError("Stream hash does not match stream metadata") + blobs = self.raw_info['blobs'] + + def check_blob_signatures(): + for blob in blobs: + length = blob['length'] + if length != 0: + blob_hash = blob['blob_hash'] + else: + blob_hash = None + blob_num = blob['blob_num'] + revision = blob['revision'] + iv = blob['iv'] + signature = blob['signature'] + hashsum = get_lbry_hash_obj() + hashsum.update(stream_hash) + if length != 0: + hashsum.update(blob_hash) + hashsum.update(str(blob_num)) + hashsum.update(str(revision)) + hashsum.update(iv) + hashsum.update(str(length)) + if not verify_signature(hashsum.digest(), signature, public_key): + raise ValueError("Invalid signature in stream descriptor") + + return threads.deferToThread(check_blob_signatures) + + def info_to_show(self): + info = [] + info.append(("stream_name", binascii.unhexlify(self.raw_info.get("stream_name")))) + return info \ No newline at end of file diff --git a/lbrynet/lbrylive/__init__.py b/lbrynet/lbrylive/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/lbrylive/client/LiveStreamDownloader.py b/lbrynet/lbrylive/client/LiveStreamDownloader.py new file mode 100644 index 000000000..da7efae0f --- /dev/null +++ b/lbrynet/lbrylive/client/LiveStreamDownloader.py @@ -0,0 +1,180 @@ +import binascii +from lbrynet.core.DownloadOption import DownloadOption +from lbrynet.cryptstream.client.CryptStreamDownloader import CryptStreamDownloader +from zope.interface import implements +from lbrynet.lbrylive.client.LiveStreamMetadataHandler import LiveStreamMetadataHandler +from lbrynet.lbrylive.client.LiveStreamProgressManager import LiveStreamProgressManager +import os +from lbrynet.lbrylive.StreamDescriptor import save_sd_info +from lbrynet.lbrylive.PaymentRateManager import LiveStreamPaymentRateManager +from twisted.internet import defer, threads # , process +from lbrynet.interfaces import IStreamDownloaderFactory + + +class LiveStreamDownloader(CryptStreamDownloader): + + def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager, + payment_rate_manager, wallet, upload_allowed): + CryptStreamDownloader.__init__(self, peer_finder, rate_limiter, blob_manager, + payment_rate_manager, wallet, upload_allowed) + self.stream_hash = stream_hash + self.stream_info_manager = stream_info_manager + self.public_key = None + + def set_stream_info(self): + if self.public_key is None and self.key is None: + + d = self.stream_info_manager.get_stream_info(self.stream_hash) + + def set_stream_info(stream_info): + public_key, key, stream_name = stream_info + self.public_key = public_key + self.key = binascii.unhexlify(key) + self.stream_name = binascii.unhexlify(stream_name) + + d.addCallback(set_stream_info) + return d + else: + return defer.succeed(True) + + +class LBRYLiveStreamDownloader(LiveStreamDownloader): + def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager, + payment_rate_manager, wallet, upload_allowed): + LiveStreamDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, + stream_info_manager, payment_rate_manager, wallet, upload_allowed) + + #self.writer = process.ProcessWriter(reactor, self, 'write', 1) + + def _get_metadata_handler(self, download_manager): + return LiveStreamMetadataHandler(self.stream_hash, self.stream_info_manager, + self.peer_finder, self.public_key, False, + self.payment_rate_manager, self.wallet, download_manager, 10) + + def _get_progress_manager(self, download_manager): + return LiveStreamProgressManager(self._finished_downloading, self.blob_manager, download_manager, + delete_blob_after_finished=True, download_whole=False, + max_before_skip_ahead=10) + + def _get_write_func(self): + def write_func(data): + if self.stopped is False: + #self.writer.write(data) + pass + return write_func + + +class FullLiveStreamDownloader(LiveStreamDownloader): + def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager, + payment_rate_manager, wallet, upload_allowed): + LiveStreamDownloader.__init__(self, stream_hash, peer_finder, rate_limiter, + blob_manager, stream_info_manager, payment_rate_manager, + wallet, upload_allowed) + self.file_handle = None + self.file_name = None + + def set_stream_info(self): + d = LiveStreamDownloader.set_stream_info(self) + + def set_file_name_if_unset(): + if not self.file_name: + if not self.stream_name: + self.stream_name = "_" + self.file_name = os.path.basename(self.stream_name) + + d.addCallback(lambda _: set_file_name_if_unset()) + return d + + def stop(self): + d = self._close_file() + d.addBoth(lambda _: LiveStreamDownloader.stop(self)) + return d + + def _start(self): + if self.file_handle is None: + d = self._open_file() + else: + d = defer.succeed(True) + d.addCallback(lambda _: LiveStreamDownloader._start(self)) + return d + + def _open_file(self): + def open_file(): + self.file_handle = open(self.file_name, 'wb') + return threads.deferToThread(open_file) + + def _get_metadata_handler(self, download_manager): + return LiveStreamMetadataHandler(self.stream_hash, self.stream_info_manager, + self.peer_finder, self.public_key, True, + self.payment_rate_manager, self.wallet, download_manager) + + def _get_primary_request_creators(self, download_manager): + return [download_manager.blob_requester, download_manager.blob_info_finder] + + def _get_write_func(self): + def write_func(data): + if self.stopped is False: + self.file_handle.write(data) + return write_func + + def _close_file(self): + def close_file(): + if self.file_handle is not None: + self.file_handle.close() + self.file_handle = None + return threads.deferToThread(close_file) + + +class FullLiveStreamDownloaderFactory(object): + + implements(IStreamDownloaderFactory) + + def __init__(self, peer_finder, rate_limiter, blob_manager, stream_info_manager, wallet, + default_payment_rate_manager): + self.peer_finder = peer_finder + self.rate_limiter = rate_limiter + self.blob_manager = blob_manager + self.stream_info_manager = stream_info_manager + self.wallet = wallet + self.default_payment_rate_manager = default_payment_rate_manager + + def get_downloader_options(self, sd_validator, payment_rate_manager): + options = [ + DownloadOption( + [float, None], + "rate which will be paid for data (None means use application default)", + "data payment rate", + None + ), + DownloadOption( + [float, None], + "rate which will be paid for metadata (None means use application default)", + "metadata payment rate", + None + ), + DownloadOption( + [bool], + "allow reuploading data downloaded for this file", + "allow upload", + True + ), + ] + return options + + def make_downloader(self, sd_validator, options, payment_rate_manager): + # TODO: check options for payment rate manager parameters + payment_rate_manager = LiveStreamPaymentRateManager(self.default_payment_rate_manager, + payment_rate_manager) + d = save_sd_info(self.stream_info_manager, sd_validator.raw_info) + + def create_downloader(stream_hash): + stream_downloader = FullLiveStreamDownloader(stream_hash, self.peer_finder, self.rate_limiter, + self.blob_manager, self.stream_info_manager, + payment_rate_manager, self.wallet, True) + # TODO: change upload_allowed=True above to something better + d = stream_downloader.set_stream_info() + d.addCallback(lambda _: stream_downloader) + return d + + d.addCallback(create_downloader) + return d \ No newline at end of file diff --git a/lbrynet/lbrylive/client/LiveStreamMetadataHandler.py b/lbrynet/lbrylive/client/LiveStreamMetadataHandler.py new file mode 100644 index 000000000..2167bd719 --- /dev/null +++ b/lbrynet/lbrylive/client/LiveStreamMetadataHandler.py @@ -0,0 +1,342 @@ +from collections import defaultdict +import logging +from zope.interface import implements +from twisted.internet import defer +from twisted.python.failure import Failure +from lbrynet.conf import MAX_BLOB_INFOS_TO_REQUEST +from lbrynet.core.client.ClientRequest import ClientRequest, ClientPaidRequest +from lbrynet.lbrylive.LiveBlob import LiveBlobInfo +from lbrynet.core.cryptoutils import get_lbry_hash_obj, verify_signature +from lbrynet.interfaces import IRequestCreator, IMetadataHandler +from lbrynet.core.Error import InsufficientFundsError, InvalidResponseError, RequestCanceledError +from lbrynet.core.Error import NoResponseError + + +class LiveStreamMetadataHandler(object): + implements(IRequestCreator, IMetadataHandler) + + def __init__(self, stream_hash, stream_info_manager, peer_finder, stream_pub_key, download_whole, + payment_rate_manager, wallet, download_manager, max_before_skip_ahead=None): + self.stream_hash = stream_hash + self.stream_info_manager = stream_info_manager + self.payment_rate_manager = payment_rate_manager + self.wallet = wallet + self.peer_finder = peer_finder + self.stream_pub_key = stream_pub_key + self.download_whole = download_whole + self.max_before_skip_ahead = max_before_skip_ahead + if self.download_whole is False: + assert self.max_before_skip_ahead is not None, \ + "If download whole is False, max_before_skip_ahead must be set" + self.download_manager = download_manager + self._peers = defaultdict(int) # {Peer: score} + self._protocol_prices = {} + self._final_blob_num = None + self._price_disagreements = [] # [Peer] + self._incompatible_peers = [] # [Peer] + + ######### IMetadataHandler ######### + + def get_initial_blobs(self): + d = self.stream_info_manager.get_blobs_for_stream(self.stream_hash) + d.addCallback(self._format_initial_blobs_for_download_manager) + return d + + def final_blob_num(self): + return self._final_blob_num + + ######## IRequestCreator ######### + + def send_next_request(self, peer, protocol): + if self._finished_discovery() is False and self._should_send_request_to(peer) is True: + p_r = None + if not self._price_settled(protocol): + p_r = self._get_price_request(peer, protocol) + d_r = self._get_discover_request(peer) + reserved_points = self._reserve_points(peer, protocol, d_r.max_pay_units) + if reserved_points is not None: + d1 = protocol.add_request(d_r) + d1.addCallback(self._handle_discover_response, peer, d_r) + d1.addBoth(self._pay_or_cancel_payment, protocol, reserved_points) + d1.addErrback(self._request_failed, peer) + if p_r is not None: + d2 = protocol.add_request(p_r) + d2.addCallback(self._handle_price_response, peer, p_r, protocol) + d2.addErrback(self._request_failed, peer) + return defer.succeed(True) + else: + return defer.fail(InsufficientFundsError()) + return defer.succeed(False) + + def get_new_peers(self): + d = self._get_hash_for_peer_search() + d.addCallback(self._find_peers_for_hash) + return d + + ######### internal calls ######### + + def _get_hash_for_peer_search(self): + r = None + if self._finished_discovery() is False: + r = self.stream_hash + logging.debug("Info finder peer search response for stream %s: %s", str(self.stream_hash), str(r)) + return defer.succeed(r) + + def _find_peers_for_hash(self, h): + if h is None: + return None + else: + d = self.peer_finder.find_peers_for_blob(h) + + def choose_best_peers(peers): + bad_peers = self._get_bad_peers() + return [p for p in peers if not p in bad_peers] + + d.addCallback(choose_best_peers) + return d + + def _format_initial_blobs_for_download_manager(self, blob_infos): + infos = [] + for blob_hash, blob_num, revision, iv, length, signature in blob_infos: + if blob_hash is not None: + infos.append(LiveBlobInfo(blob_hash, blob_num, length, iv, revision, signature)) + else: + logging.debug("Setting _final_blob_num to %s", str(blob_num - 1)) + self._final_blob_num = blob_num - 1 + return infos + + def _should_send_request_to(self, peer): + if self._peers[peer] < -5.0: + return False + if peer in self._price_disagreements: + return False + return True + + def _get_bad_peers(self): + return [p for p in self._peers.iterkeys() if not self._should_send_request_to(p)] + + def _finished_discovery(self): + if self._get_discovery_params() is None: + return True + return False + + def _get_discover_request(self, peer): + discovery_params = self._get_discovery_params() + if discovery_params: + further_blobs_request = {} + reference, start, end, count = discovery_params + further_blobs_request['reference'] = reference + if start is not None: + further_blobs_request['start'] = start + if end is not None: + further_blobs_request['end'] = end + if count is not None: + further_blobs_request['count'] = count + else: + further_blobs_request['count'] = MAX_BLOB_INFOS_TO_REQUEST + logging.debug("Requesting %s blob infos from %s", str(further_blobs_request['count']), str(peer)) + r_dict = {'further_blobs': further_blobs_request} + response_identifier = 'further_blobs' + request = ClientPaidRequest(r_dict, response_identifier, further_blobs_request['count']) + return request + return None + + def _get_discovery_params(self): + logging.debug("In _get_discovery_params") + stream_position = self.download_manager.stream_position() + blobs = self.download_manager.blobs + if blobs: + last_blob_num = max(blobs.iterkeys()) + else: + last_blob_num = -1 + final_blob_num = self.final_blob_num() + if final_blob_num is not None: + last_blob_num = final_blob_num + if self.download_whole is False: + logging.debug("download_whole is False") + if final_blob_num is not None: + for i in xrange(stream_position, final_blob_num + 1): + if not i in blobs: + count = min(self.max_before_skip_ahead, (final_blob_num - i + 1)) + return self.stream_hash, None, 'end', count + return None + else: + if blobs: + for i in xrange(stream_position, last_blob_num + 1): + if not i in blobs: + if i == 0: + return self.stream_hash, 'beginning', 'end', -1 * self.max_before_skip_ahead + else: + return self.stream_hash, blobs[i-1].blob_hash, 'end', -1 * self.max_before_skip_ahead + return self.stream_hash, blobs[last_blob_num].blob_hash, 'end', -1 * self.max_before_skip_ahead + else: + return self.stream_hash, None, 'end', -1 * self.max_before_skip_ahead + logging.debug("download_whole is True") + beginning = None + end = None + for i in xrange(stream_position, last_blob_num + 1): + if not i in blobs: + if beginning is None: + if i == 0: + beginning = 'beginning' + else: + beginning = blobs[i-1].blob_hash + else: + if beginning is not None: + end = blobs[i].blob_hash + break + if beginning is None: + if final_blob_num is not None: + logging.debug("Discovery is finished. stream_position: %s, last_blob_num + 1: %s", str(stream_position), + str(last_blob_num + 1)) + return None + else: + logging.debug("Discovery is not finished. final blob num is unknown.") + if last_blob_num != -1: + return self.stream_hash, blobs[last_blob_num].blob_hash, None, None + else: + return self.stream_hash, 'beginning', None, None + else: + logging.info("Discovery is not finished. Not all blobs are known.") + return self.stream_hash, beginning, end, None + + def _price_settled(self, protocol): + if protocol in self._protocol_prices: + return True + return False + + def _update_local_score(self, peer, amount): + self._peers[peer] += amount + + def _reserve_points(self, peer, protocol, max_infos): + assert protocol in self._protocol_prices + point_amount = 1.0 * max_infos * self._protocol_prices[protocol] / 1000.0 + return self.wallet.reserve_points(peer, point_amount) + + def _pay_or_cancel_payment(self, arg, protocol, reserved_points): + if isinstance(arg, Failure) or arg == 0: + self._cancel_points(reserved_points) + else: + self._pay_peer(protocol, arg, reserved_points) + return arg + + def _pay_peer(self, protocol, num_infos, reserved_points): + assert num_infos != 0 + assert protocol in self._protocol_prices + point_amount = 1.0 * num_infos * self._protocol_prices[protocol] / 1000.0 + self.wallet.send_points(reserved_points, point_amount) + self.payment_rate_manager.record_points_paid(point_amount) + + def _cancel_points(self, reserved_points): + return self.wallet.cancel_point_reservation(reserved_points) + + def _get_price_request(self, peer, protocol): + self._protocol_prices[protocol] = self.payment_rate_manager.get_rate_live_blob_info(peer) + request_dict = {'blob_info_payment_rate': self._protocol_prices[protocol]} + request = ClientRequest(request_dict, 'blob_info_payment_rate') + return request + + def _handle_price_response(self, response_dict, peer, request, protocol): + if not request.response_identifier in response_dict: + return InvalidResponseError("response identifier not in response") + assert protocol in self._protocol_prices + response = response_dict[request.response_identifier] + if response == "RATE_ACCEPTED": + return True + else: + logging.info("Rate offer has been rejected by %s", str(peer)) + del self._protocol_prices[protocol] + self._price_disagreements.append(peer) + return True + + def _handle_discover_response(self, response_dict, peer, request): + if not request.response_identifier in response_dict: + return InvalidResponseError("response identifier not in response") + response = response_dict[request.response_identifier] + blob_infos = [] + if 'error' in response: + if response['error'] == 'RATE_UNSET': + return defer.succeed(0) + else: + return InvalidResponseError("Got an unknown error from the peer: %s" % + (response['error'],)) + if not 'blob_infos' in response: + return InvalidResponseError("Missing the required field 'blob_infos'") + raw_blob_infos = response['blob_infos'] + logging.info("Handling %s further blobs from %s", str(len(raw_blob_infos)), str(peer)) + logging.debug("blobs: %s", str(raw_blob_infos)) + for raw_blob_info in raw_blob_infos: + length = raw_blob_info['length'] + if length != 0: + blob_hash = raw_blob_info['blob_hash'] + else: + blob_hash = None + num = raw_blob_info['blob_num'] + revision = raw_blob_info['revision'] + iv = raw_blob_info['iv'] + signature = raw_blob_info['signature'] + blob_info = LiveBlobInfo(blob_hash, num, length, iv, revision, signature) + logging.debug("Learned about a potential blob: %s", str(blob_hash)) + if self._verify_blob(blob_info): + if blob_hash is None: + logging.info("Setting _final_blob_num to %s", str(num - 1)) + self._final_blob_num = num - 1 + else: + blob_infos.append(blob_info) + else: + raise ValueError("Peer sent an invalid blob info") + d = self.stream_info_manager.add_blobs_to_stream(self.stream_hash, blob_infos) + + def add_blobs_to_download_manager(): + blob_nums = [b.blob_num for b in blob_infos] + logging.info("Adding the following blob nums to the download manager: %s", str(blob_nums)) + self.download_manager.add_blobs_to_download(blob_infos) + + d.addCallback(lambda _: add_blobs_to_download_manager()) + + def pay_or_penalize_peer(): + if len(blob_infos): + self._update_local_score(peer, len(blob_infos)) + peer.update_stats('downloaded_crypt_blob_infos', len(blob_infos)) + peer.update_score(len(blob_infos)) + else: + self._update_local_score(peer, -.0001) + return len(blob_infos) + + d.addCallback(lambda _: pay_or_penalize_peer()) + + return d + + def _verify_blob(self, blob): + logging.debug("Got an unverified blob to check:") + logging.debug("blob_hash: %s", blob.blob_hash) + logging.debug("blob_num: %s", str(blob.blob_num)) + logging.debug("revision: %s", str(blob.revision)) + logging.debug("iv: %s", blob.iv) + logging.debug("length: %s", str(blob.length)) + hashsum = get_lbry_hash_obj() + hashsum.update(self.stream_hash) + if blob.length != 0: + hashsum.update(blob.blob_hash) + hashsum.update(str(blob.blob_num)) + hashsum.update(str(blob.revision)) + hashsum.update(blob.iv) + hashsum.update(str(blob.length)) + logging.debug("hexdigest to be verified: %s", hashsum.hexdigest()) + if verify_signature(hashsum.digest(), blob.signature, self.stream_pub_key): + logging.debug("Blob info is valid") + return True + else: + logging.debug("The blob info is invalid") + return False + + def _request_failed(self, reason, peer): + if reason.check(RequestCanceledError): + return + if reason.check(NoResponseError): + self._incompatible_peers.append(peer) + return + logging.warning("Crypt stream info finder: a request failed. Reason: %s", reason.getErrorMessage()) + self._update_local_score(peer, -5.0) + peer.update_score(-10.0) + return reason \ No newline at end of file diff --git a/lbrynet/lbrylive/client/LiveStreamProgressManager.py b/lbrynet/lbrylive/client/LiveStreamProgressManager.py new file mode 100644 index 000000000..057f3fc7f --- /dev/null +++ b/lbrynet/lbrylive/client/LiveStreamProgressManager.py @@ -0,0 +1,87 @@ +import logging +from lbrynet.core.client.StreamProgressManager import StreamProgressManager +from twisted.internet import defer + + +class LiveStreamProgressManager(StreamProgressManager): + def __init__(self, finished_callback, blob_manager, download_manager, delete_blob_after_finished=False, + download_whole=True, max_before_skip_ahead=5): + self.download_whole = download_whole + self.max_before_skip_ahead = max_before_skip_ahead + StreamProgressManager.__init__(self, finished_callback, blob_manager, download_manager, + delete_blob_after_finished) + + ######### IProgressManager ######### + + def stream_position(self): + blobs = self.download_manager.blobs + if not blobs: + return 0 + else: + newest_known_blobnum = max(blobs.iterkeys()) + position = newest_known_blobnum + oldest_relevant_blob_num = (max(0, newest_known_blobnum - self.max_before_skip_ahead + 1)) + for i in xrange(newest_known_blobnum, oldest_relevant_blob_num - 1, -1): + if i in blobs and (not blobs[i].is_validated() and not i in self.provided_blob_nums): + position = i + return position + + def needed_blobs(self): + blobs = self.download_manager.blobs + stream_position = self.stream_position() + if blobs: + newest_known_blobnum = max(blobs.iterkeys()) + else: + newest_known_blobnum = -1 + blobs_needed = [] + for i in xrange(stream_position, newest_known_blobnum + 1): + if i in blobs and not blobs[i].is_validated() and not i in self.provided_blob_nums: + blobs_needed.append(blobs[i]) + return blobs_needed + + ######### internal ######### + + def _output_loop(self): + + from twisted.internet import reactor + + if self.stopped is True: + if self.outputting_d is not None: + self.outputting_d.callback(True) + self.outputting_d = None + return + + blobs = self.download_manager.blobs + logging.info("In _output_loop. last_blob_outputted: %s", str(self.last_blob_outputted)) + if blobs: + logging.debug("Newest blob number: %s", str(max(blobs.iterkeys()))) + if self.outputting_d is None: + self.outputting_d = defer.Deferred() + + current_blob_num = self.last_blob_outputted + 1 + + def finished_outputting_blob(): + self.last_blob_outputted += 1 + final_blob_num = self.download_manager.final_blob_num() + if final_blob_num is not None and final_blob_num == self.last_blob_outputted: + self._finished_outputting() + self.outputting_d.callback(True) + self.outputting_d = None + else: + reactor.callLater(0, self._output_loop) + + if current_blob_num in blobs and blobs[current_blob_num].is_validated(): + logging.info("Outputting blob %s", str(current_blob_num)) + self.provided_blob_nums.append(current_blob_num) + d = self.download_manager.handle_blob(current_blob_num) + d.addCallback(lambda _: finished_outputting_blob()) + d.addCallback(lambda _: self._finished_with_blob(current_blob_num)) + elif blobs and max(blobs.iterkeys()) > self.last_blob_outputted + self.max_before_skip_ahead - 1: + self.last_blob_outputted += 1 + logging.info("Skipping blob number %s due to knowing about blob number %s", + str(self.last_blob_outputted), str(max(blobs.iterkeys()))) + self._finished_with_blob(current_blob_num) + reactor.callLater(0, self._output_loop) + else: + self.outputting_d.callback(True) + self.outputting_d = None \ No newline at end of file diff --git a/lbrynet/lbrylive/client/__init__.py b/lbrynet/lbrylive/client/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/lbrylive/server/LiveBlobInfoQueryHandler.py b/lbrynet/lbrylive/server/LiveBlobInfoQueryHandler.py new file mode 100644 index 000000000..0308df168 --- /dev/null +++ b/lbrynet/lbrylive/server/LiveBlobInfoQueryHandler.py @@ -0,0 +1,180 @@ +import logging +from twisted.internet import defer +from zope.interface import implements +from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler + + +class CryptBlobInfoQueryHandlerFactory(object): + implements(IQueryHandlerFactory) + + def __init__(self, stream_info_manager, wallet, payment_rate_manager): + self.stream_info_manager = stream_info_manager + self.wallet = wallet + self.payment_rate_manager = payment_rate_manager + + ######### IQueryHandlerFactory ######### + + def build_query_handler(self): + q_h = CryptBlobInfoQueryHandler(self.stream_info_manager, self.wallet, self.payment_rate_manager) + return q_h + + def get_primary_query_identifier(self): + return 'further_blobs' + + def get_description(self): + return ("Stream Blob Information - blob hashes that are associated with streams," + " and the blobs' associated metadata") + + +class CryptBlobInfoQueryHandler(object): + implements(IQueryHandler) + + def __init__(self, stream_info_manager, wallet, payment_rate_manager): + self.stream_info_manager = stream_info_manager + self.wallet = wallet + self.payment_rate_manager = payment_rate_manager + self.query_identifiers = ['blob_info_payment_rate', 'further_blobs'] + self.blob_info_payment_rate = None + self.peer = None + + ######### IQueryHandler ######### + + def register_with_request_handler(self, request_handler, peer): + self.peer = peer + request_handler.register_query_handler(self, self.query_identifiers) + + def handle_queries(self, queries): + response = {} + + if self.query_identifiers[0] in queries: + if not self.handle_blob_info_payment_rate(queries[self.query_identifiers[0]]): + return defer.succeed({'blob_info_payment_rate': 'RATE_TOO_LOW'}) + else: + response['blob_info_payment_rate'] = "RATE_ACCEPTED" + + if self.query_identifiers[1] in queries: + further_blobs_request = queries[self.query_identifiers[1]] + logging.debug("Received the client's request for additional blob information") + + if self.blob_info_payment_rate is None: + response['further_blobs'] = {'error': 'RATE_UNSET'} + return defer.succeed(response) + + def count_and_charge(blob_infos): + if len(blob_infos) != 0: + logging.info("Responding with %s infos", str(len(blob_infos))) + expected_payment = 1.0 * len(blob_infos) * self.blob_info_payment_rate / 1000.0 + self.wallet.add_expected_payment(self.peer, expected_payment) + self.peer.update_stats('uploaded_crypt_blob_infos', len(blob_infos)) + return blob_infos + + def set_field(further_blobs): + response['further_blobs'] = {'blob_infos': further_blobs} + return response + + def get_further_blobs(stream_hash): + if stream_hash is None: + response['further_blobs'] = {'error': 'REFERENCE_HASH_UNKNOWN'} + return defer.succeed(response) + start = further_blobs_request.get("start") + end = further_blobs_request.get("end") + count = further_blobs_request.get("count") + if count is not None: + try: + count = int(count) + except ValueError: + response['further_blobs'] = {'error': 'COUNT_NON_INTEGER'} + return defer.succeed(response) + + if len([x for x in [start, end, count] if x is not None]) < 2: + response['further_blobs'] = {'error': 'TOO_FEW_PARAMETERS'} + return defer.succeed(response) + + inner_d = self.get_further_blobs(stream_hash, start, end, count) + + inner_d.addCallback(count_and_charge) + inner_d.addCallback(self.format_blob_infos) + inner_d.addCallback(set_field) + return inner_d + + if 'reference' in further_blobs_request: + d = self.get_stream_hash_from_reference(further_blobs_request['reference']) + d.addCallback(get_further_blobs) + return d + else: + response['further_blobs'] = {'error': 'NO_REFERENCE_SENT'} + return defer.succeed(response) + else: + return defer.succeed({}) + + ######### internal ######### + + def handle_blob_info_payment_rate(self, requested_payment_rate): + if not self.payment_rate_manager.accept_rate_live_blob_info(self.peer, requested_payment_rate): + return False + else: + self.blob_info_payment_rate = requested_payment_rate + return True + + def format_blob_infos(self, blobs): + blob_infos = [] + for blob_hash, blob_num, revision, iv, length, signature in blobs: + blob_info = {} + if length != 0: + blob_info['blob_hash'] = blob_hash + blob_info['blob_num'] = blob_num + blob_info['revision'] = revision + blob_info['iv'] = iv + blob_info['length'] = length + blob_info['signature'] = signature + blob_infos.append(blob_info) + return blob_infos + + def get_stream_hash_from_reference(self, reference): + d = self.stream_info_manager.check_if_stream_exists(reference) + + def check_if_stream_found(result): + if result is True: + return reference + else: + return self.stream_info_manager.get_stream_of_blob(reference) + + d.addCallback(check_if_stream_found) + return d + + def get_further_blobs(self, stream_hash, start, end, count): + ds = [] + if start is not None and start != "beginning": + ds.append(self.stream_info_manager.get_stream_of_blob(start)) + if end is not None and end != 'end': + ds.append(self.stream_info_manager.get_stream_of_blob(end)) + dl = defer.DeferredList(ds, fireOnOneErrback=True) + + def ensure_streams_match(results): + for success, stream_of_blob in results: + if stream_of_blob != stream_hash: + raise ValueError("Blob does not match stream") + return True + + def get_blob_infos(): + reverse = False + count_to_use = count + if start is None: + reverse = True + elif end is not None and count_to_use is not None and count_to_use < 0: + reverse = True + if count_to_use is not None and count_to_use < 0: + count_to_use *= -1 + if start == "beginning" or start is None: + s = None + else: + s = start + if end == "end" or end is None: + e = None + else: + e = end + return self.stream_info_manager.get_blobs_for_stream(stream_hash, s, e, count_to_use, reverse) + + dl.addCallback(ensure_streams_match) + dl.addCallback(lambda _: get_blob_infos()) + return dl \ No newline at end of file diff --git a/lbrynet/lbrylive/server/__init__.py b/lbrynet/lbrylive/server/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/lbrynet_console/ConsoleControl.py b/lbrynet/lbrynet_console/ConsoleControl.py new file mode 100644 index 000000000..5d7566516 --- /dev/null +++ b/lbrynet/lbrynet_console/ConsoleControl.py @@ -0,0 +1,60 @@ +from twisted.protocols import basic +from twisted.internet import defer + + +class ConsoleControl(basic.LineReceiver): + from os import linesep as delimiter + + def __init__(self, control_handlers): + self.control_handlers = {} + self.categories = {} + categories = set([category for category, handler in control_handlers]) + prompt_number = 0 + for category in categories: + self.categories[prompt_number] = category + for handler in [handler for cat, handler in control_handlers if cat == category]: + self.control_handlers[prompt_number] = handler + prompt_number += 1 + self.current_handler = None + + def connectionMade(self): + self.show_prompt() + + def lineReceived(self, line): + + def show_response(response): + if response is not None: + self.sendLine(response) + + def show_error(err): + self.sendLine(err.getTraceback()) + + if self.current_handler is None: + try: + num = int(line) + except ValueError: + num = None + if num in self.control_handlers: + self.current_handler = self.control_handlers[num].get_handler() + line = None + if self.current_handler is not None: + try: + r = self.current_handler.handle_line(line) + done, ds = r[0], [d for d in r[1:] if d is not None] + except Exception as e: + done = True + ds = [defer.fail(e)] + if done is True: + self.current_handler = None + map(lambda d: d.addCallbacks(show_response, show_error), ds) + if self.current_handler is None: + self.show_prompt() + + def show_prompt(self): + self.sendLine("Options:") + for num, handler in self.control_handlers.iteritems(): + if num in self.categories: + self.sendLine("") + self.sendLine(self.categories[num]) + self.sendLine("") + self.sendLine("[" + str(num) + "] " + handler.get_prompt_description()) \ No newline at end of file diff --git a/lbrynet/lbrynet_console/ControlHandlers.py b/lbrynet/lbrynet_console/ControlHandlers.py new file mode 100644 index 000000000..92ffa5d29 --- /dev/null +++ b/lbrynet/lbrynet_console/ControlHandlers.py @@ -0,0 +1,1236 @@ +import logging +from zope.interface import implements +from lbrynet.core.StreamDescriptor import PlainStreamDescriptorWriter, BlobStreamDescriptorWriter +from lbrynet.core.PaymentRateManager import PaymentRateManager +from lbrynet.lbryfilemanager.LBRYFileCreator import create_lbry_file +from lbrynet.lbryfile.StreamDescriptor import get_sd_info +from lbrynet.lbrynet_console.interfaces import IControlHandler, IControlHandlerFactory +from lbrynet.core.StreamDescriptor import download_sd_blob +from twisted.internet import defer + + +class InvalidChoiceError(Exception): + pass + + +class ControlHandlerFactory(object): + implements(IControlHandlerFactory) + + control_handler_class = None + + def get_prompt_description(self): + return self.control_handler_class.prompt_description + + def __init__(self, *args): + self.args = args + + def get_handler(self): + args = self.args + return self.control_handler_class(*args) + + +class ControlHandler(object): + implements(IControlHandler) + + prompt_description = None + + +class RecursiveControlHandler(ControlHandler): + + def __init__(self, exit_after_one_done=False, reset_after_each_done=False): + self.current_handler = None + self.exit_after_one_done = exit_after_one_done + self.reset_after_each_done = reset_after_each_done + self._set_control_handlers() + + def _get_control_handler_factories(self): + pass + + def _set_control_handlers(self): + self.control_handlers = {i + 1: handler for i, handler in enumerate(self._get_control_handler_factories())} + + def handle_line(self, line): + if self.current_handler is None: + if line is None: + num = None + else: + try: + num = int(line) + except ValueError: + num = None + if num == 0: + return True, None + if num in self.control_handlers: + self.current_handler = self.control_handlers[num].get_handler() + line = None + ds = [] + if self.current_handler is not None: + r = self.current_handler.handle_line(line) + done, ds = r[0], list(r[1:]) + if done is True: + self.current_handler = None + if self.exit_after_one_done is True: + return r + if self.reset_after_each_done: + self._set_control_handlers() + if self.current_handler is None: + ds += [self.get_prompt()] + return (False,) + tuple(ds) + + def get_prompt(self): + prompt_string = "Options:\n" + prompt_string += "[0] Exit this menu\n" + for num, handler in self.control_handlers.iteritems(): + prompt_string += "[" + str(num) + "] " + handler.get_prompt_description() + "\n" + return defer.succeed(prompt_string) + + +class ModifyPaymentRate(ControlHandler): + + def __init__(self): + self._prompt_choices = {'cancel': (self._cancel, "Don't change anything")} + + def handle_line(self, line): + if line is None: + return False, defer.succeed(self._get_prompt_string()) + elif line.lower() in self._prompt_choices: + return self._prompt_choices[line.lower()][0]() + else: + try: + rate = float(line) + except ValueError: + return True, defer.succeed("Rate must be a number") + d = self._set_rate(rate) + d.addCallback(lambda _: "Successfully set the rate") + return True, d + + def _cancel(self): + return True, defer.succeed("No change was made") + + def _set_rate(self, rate): + pass + + def _get_current_status(self): + pass + + def _get_prompt_string(self): + prompt_string = self._get_current_status() + "\n" + for prompt_choice, (func, help_string) in self._prompt_choices.iteritems(): + prompt_string += prompt_choice + ": " + help_string + "\n" + prompt_string += "To change the current rate, enter the desired rate\n" + prompt_string += "Then hit enter\n" + return prompt_string + + +class ApplicationStatus(ControlHandler): + prompt_description = "Application Status" + + def __init__(self, rate_limiter, dht_node): + self.rate_limiter = rate_limiter + self.dht_node = dht_node + + def handle_line(self, line): + assert line is None, "Application status should not be passed any arguments" + status = "Total bytes uploaded: " + str(self.rate_limiter.total_ul_bytes) + "\n" + status += "Total bytes downloaded: " + str(self.rate_limiter.total_dl_bytes) + "\n" + if self.dht_node is not None: + status += "Approximate number of nodes in DHT: " + str(self.dht_node.getApproximateTotalDHTNodes()) + "\n" + status += "Approximate number of blobs in DHT: " + str(self.dht_node.getApproximateTotalHashes()) + "\n" + return True, defer.succeed(status) + + +class ApplicationStatusFactory(ControlHandlerFactory): + control_handler_class = ApplicationStatus + + +class GetWalletBalances(ControlHandler): + prompt_description = "Show wallet point balances" + + def __init__(self, wallet): + self.wallet = wallet + + def handle_line(self, line): + assert line is None, "Show wallet balances should not be passed any arguments" + return True, self._get_wallet_balances() + + def _get_wallet_balances(self): + d = self.wallet.get_balance() + + def format_balance(balance): + balance_string = "id: 1\n" + balance_string += "balance: " + str(balance) + "\n" + return balance_string + + d.addCallback(format_balance) + return d + + +class GetWalletBalancesFactory(ControlHandlerFactory): + control_handler_class = GetWalletBalances + + +class ShutDown(ControlHandler): + prompt_description = "Shut down" + + def __init__(self, lbry_service): + self.lbry_service = lbry_service + + def handle_line(self, line): + assert line is None, "Shut down should not be passed any arguments" + return True, self._shut_down() + + def _shut_down(self): + d = self.lbry_service.shut_down() + + def stop_reactor(): + from twisted.internet import reactor + reactor.stop() + + d.addBoth(lambda _: stop_reactor()) + + d.addCallback(lambda _: "Shut down successfully") + return d + + +class ShutDownFactory(ControlHandlerFactory): + control_handler_class = ShutDown + + +class LBRYFileStatus(ControlHandler): + prompt_description = "Print status information for all LBRY Files" + + def __init__(self, lbry_file_manager): + self.lbry_file_manager = lbry_file_manager + + def handle_line(self, line): + assert line is None, "print status should not be passed any arguments" + d = self.lbry_file_manager.get_lbry_file_status_reports() + d.addCallback(self.format_statuses) + return True, d + + def format_statuses(self, status_reports): + status_strings = [] + for status_report in status_reports: + s = status_report.name + " status: " + status_report.running_status + "\n" + s += str(status_report.num_completed) + " completed out of " + str(status_report.num_known) + "\n" + status_strings.append(s) + return ''.join(status_strings) + + +class LBRYFileStatusFactory(ControlHandlerFactory): + control_handler_class = LBRYFileStatus + + +class AddStream(ControlHandler): + prompt_description = None + line_prompt = None + cancel_prompt = "Trying to locate the stream descriptor. Type \"cancel\" to cancel." + canceled_message = "Canceled locating the stream descriptor" + line_prompt2 = "Modify options? (y/n)" + line_prompt3 = "Start download? (y/n)" + + def __init__(self, sd_identifier, base_payment_rate_manager): + self.sd_identifier = sd_identifier + self.loading_info_and_factories_deferred = None + self.factories = None + self.factory = None + self.info_validator = None + self.options_left = [] + self.options_chosen = [] + self.current_option = None + self.downloader = None + self.got_options_response = False + self.loading_failed = False + self.payment_rate_manager = PaymentRateManager(base_payment_rate_manager) + + def handle_line(self, line): + if line is None: + return False, defer.succeed(self.line_prompt) + if self.loading_failed is True: + return True, None + if self.loading_info_and_factories_deferred is not None: + if line.lower() == "cancel": + self.loading_info_and_factories_deferred.cancel() + self.loading_info_and_factories_deferred = None + return True, None + else: + return False, defer.succeed(self.cancel_prompt) + if self.factories is None: + self.loading_info_and_factories_deferred = self._load_info_and_factories(line) + cancel_prompt_d = defer.succeed(self.cancel_prompt) + self.loading_info_and_factories_deferred.addCallback(self._choose_factory) + self.loading_info_and_factories_deferred.addErrback(self._handle_load_canceled) + self.loading_info_and_factories_deferred.addErrback(self._handle_load_failed) + return False, cancel_prompt_d, self.loading_info_and_factories_deferred + if self.factory is None: + try: + choice = int(line) + except ValueError: + return False, defer.succeed(self._show_factory_choices()) + if choice in xrange(len(self.factories)): + self.factory = self.factories[choice] + return False, defer.succeed(self._show_info_and_options()) + else: + return False, defer.succeed(self._show_factory_choices()) + if self.got_options_response is False: + self.got_options_response = True + if line == 'y' or line == 'Y': + if self.options_left: + return False, defer.succeed(self._get_next_option_prompt()) + self.options_chosen = [option.default for option in self.options_left] + self.options_left = [] + return False, defer.succeed(self.line_prompt3) + if self.current_option is not None: + try: + choice = self._get_choice_from_input(line) + except InvalidChoiceError: + return False, defer.succeed(self._get_next_option_prompt(invalid_response=True)) + self.options_chosen.append(choice) + self.options_left = self.options_left[1:] + if self.options_left: + return False, defer.succeed(self._get_next_option_prompt()) + else: + self.current_option = None + return False, defer.succeed(self.line_prompt3) + if line == 'y' or line == 'Y': + d = self._start_download() + else: + d = defer.succeed("Download cancelled") + return True, d + + def _get_choice_from_input(self, line): + if line == "": + return self.current_option.default + for option_type in self.current_option.option_types: + if option_type == float: + try: + return float(line) + except ValueError: + pass + if option_type is None: + if line.lower() == "none": + return None + if option_type == bool: + if line.lower() == "true" or line.lower() == "t": + return True + if line.lower() == "false" or line.lower() == "f": + return False + raise InvalidChoiceError(line) + + def _load_info_and_factories(self, sd_file): + return defer.fail(NotImplementedError()) + + def _handle_load_canceled(self, err): + err.trap(defer.CancelledError) + return defer.succeed(self.canceled_message) + + def _handle_load_failed(self, err): + self.loading_failed = True + logging.error("An exception occurred attempting to load the stream descriptor: %s", err.getTraceback()) + return defer.succeed("Encountered a problem while loading the stream descriptor: %s\n" + "See console.log for further details.\n" + "Press enter to continue" % err.getErrorMessage()) + + def _choose_factory(self, info_and_factories): + self.loading_info_and_factories_deferred = None + self.info_validator, self.factories = info_and_factories + if len(self.factories) == 1: + self.factory = self.factories[0] + return self._show_info_and_options() + return self._show_factory_choices() + + def _show_factory_choices(self): + prompt = "Choose what to do with the file:\n" + for i, factory in enumerate(self.factories): + prompt += "[" + str(i) + "] " + factory.get_description() + '\n' + return str(prompt) + + def _show_info_and_options(self): + self.options_left = self.factory.get_downloader_options(self.info_validator, + self.payment_rate_manager) + prompt = "Stream info:\n" + for info_line in self.info_validator.info_to_show(): + prompt += info_line[0] + ": " + info_line[1] + "\n" + prompt += "\nOptions:\n" + for option in self.options_left: + prompt += option.long_description + ": " + str(option.default) + "\n" + prompt += "\nModify options? (y/n)" + return str(prompt) + + def _get_option_type_description(self, option_type): + if option_type == float: + return "floating point number (e.g. 1.0)" + if option_type == bool: + return "True or False" + if option_type is None: + return "None" + + def _get_next_option_prompt(self, invalid_response=False): + assert len(self.options_left), "Something went wrong. There were no options left" + choice = self.options_left[0] + choice_string = "" + if invalid_response is True: + choice_string += "Invalid response entered. Try again.\n" + choice_string += choice.long_description + "\n" + choice_string += "Valid inputs:\n" + for option_type in choice.option_types: + choice_string += "\t" + self._get_option_type_description(option_type) + "\n" + choice_string += "Leave blank for default (" + str(choice.default) + ")\n" + choice_string += "Enter choice:" + self.current_option = choice + return choice_string + + def _start_download(self): + d = self._make_downloader() + d.addCallback(lambda stream_downloader: stream_downloader.start()) + return d + + def _make_downloader(self): + return self.factory.make_downloader(self.info_validator, self.options_chosen, + self.payment_rate_manager) + + +class AddStreamFromSD(AddStream): + prompt_description = "Add a stream from a stream descriptor file" + line_prompt = "Stream descriptor file name:" + + def _load_info_and_factories(self, sd_file): + return self.sd_identifier.get_info_and_factories_for_sd_file(sd_file) + + +class AddStreamFromSDFactory(ControlHandlerFactory): + control_handler_class = AddStreamFromSD + + +class AddStreamFromHash(AddStream): + prompt_description = "Add a stream from a hash" + line_prompt = "Stream descriptor hash:" + + def __init__(self, sd_identifier, session): + AddStream.__init__(self, sd_identifier, session.base_payment_rate_manager) + self.session = session + + def _load_info_and_factories(self, sd_hash): + d = download_sd_blob(self.session, sd_hash, self.payment_rate_manager) + d.addCallback(self.sd_identifier.get_info_and_factories_for_sd_blob) + return d + + +class AddStreamFromHashFactory(ControlHandlerFactory): + control_handler_class = AddStreamFromHash + + +class AddStreamFromLBRYcrdName(AddStreamFromHash): + prompt_description = "Add a stream from a short name" + line_prompt = "Short name:" + + def __init__(self, sd_identifier, session, name_resolver): + AddStreamFromHash.__init__(self, sd_identifier, session) + self.name_resolver = name_resolver + + def _load_info_and_factories(self, name): + d = self._resolve_name(name) + d.addCallback(lambda stream_hash: AddStreamFromHash._load_info_and_factories(self, stream_hash)) + return d + + def _resolve_name(self, name): + def get_name_from_info(stream_info): + return stream_info['stream_hash'] + d = self.name_resolver.get_stream_info_for_name(name) + d.addCallback(get_name_from_info) + return d + + +class AddStreamFromLBRYcrdNameFactory(ControlHandlerFactory): + control_handler_class = AddStreamFromLBRYcrdName + + +class LBRYFileChooser(RecursiveControlHandler): + + def __init__(self, lbry_file_manager, factory_class, *args, **kwargs): + """ + @param lbry_file_manager: + + @param factory_class: + + @param args: all arguments that will be passed to the factory + + @param kwargs: all arguments that will be passed to the superclass' __init__ + + @return: + """ + self.lbry_file_manager = lbry_file_manager + self.factory_class = factory_class + self.args = args + RecursiveControlHandler.__init__(self, **kwargs) + + def _get_control_handler_factories(self): + control_handler_factories = [] + for lbry_file in self.lbry_file_manager.lbry_files: + control_handler_factories.append(self.factory_class(lbry_file, *self.args)) + return control_handler_factories + + +class LBRYFileChooserFactory(ControlHandlerFactory): + def get_prompt_description(self): + lbry_file = self.args[0] + return lbry_file.file_name + + +class DeleteLBRYFileChooser(LBRYFileChooser): + prompt_description = "Delete LBRY File" + + def __init__(self, stream_info_manager, blob_manager, lbry_file_manager): + LBRYFileChooser.__init__(self, lbry_file_manager, DeleteLBRYFileFactory, stream_info_manager, + blob_manager, lbry_file_manager, exit_after_one_done=True) + + +class DeleteLBRYFileChooserFactory(ControlHandlerFactory): + control_handler_class = DeleteLBRYFileChooser + + +class DeleteLBRYFile(ControlHandler): + prompt_description = "Delete LBRY File" + line_prompt = "Also delete data? (y/n):" + + def __init__(self, lbry_file, stream_info_manager, blob_manager, lbry_file_manager): + self.lbry_file = lbry_file + self.stream_info_manager = stream_info_manager + self.blob_manager = blob_manager + self.lbry_file_manager = lbry_file_manager + + def handle_line(self, line): + if line is None: + return False, defer.succeed(self.line_prompt) + delete_data = False + if line == 'y' or line == 'Y': + delete_data = True + d = self._delete_lbry_file(delete_data) + d.addCallback(lambda _: "Successfully deleted " + str(self.lbry_file.stream_name)) + return True, d + + def _delete_lbry_file(self, delete_data): + d = self.lbry_file_manager.delete_lbry_file(self.lbry_file.stream_hash) + + def finish_deletion(): + if delete_data is True: + d = self._delete_data() + else: + d = defer.succeed(True) + d.addCallback(lambda _: self._delete_stream_data()) + return d + + d.addCallback(lambda _: finish_deletion()) + return d + + def _delete_data(self): + d1 = self.stream_info_manager.get_blobs_for_stream(self.lbry_file.stream_hash) + + def get_blob_hashes(blob_infos): + return [b[0] for b in blob_infos if b[0] is not None] + + d1.addCallback(get_blob_hashes) + d2 = self.stream_info_manager.get_sd_blob_hashes_for_stream(self.lbry_file.stream_hash) + + def combine_blob_hashes(results): + blob_hashes = [] + for success, result in results: + if success is True: + blob_hashes.extend(result) + return blob_hashes + + def delete_blobs(blob_hashes): + self.blob_manager.delete_blobs(blob_hashes) + return True + + dl = defer.DeferredList([d1, d2], fireOnOneErrback=True) + dl.addCallback(combine_blob_hashes) + dl.addCallback(delete_blobs) + return dl + + def _delete_stream_data(self): + return self.stream_info_manager.delete_stream(self.lbry_file.stream_hash) + + +class DeleteLBRYFileFactory(LBRYFileChooserFactory): + control_handler_class = DeleteLBRYFile + + +class ToggleLBRYFileRunningChooser(LBRYFileChooser): + prompt_description = "Toggle whether an LBRY File is running" + + def __init__(self, lbry_file_manager): + LBRYFileChooser.__init__(self, lbry_file_manager, ToggleLBRYFileRunningFactory, lbry_file_manager, + exit_after_one_done=True) + + +class ToggleLBRYFileRunningChooserFactory(ControlHandlerFactory): + control_handler_class = ToggleLBRYFileRunningChooser + + +class ToggleLBRYFileRunning(ControlHandler): + prompt_description = "Toggle whether an LBRY File is running" + + def __init__(self, lbry_file, lbry_file_manager): + self.lbry_file = lbry_file + self.lbry_file_manager = lbry_file_manager + + def handle_line(self, line): + d = self.lbry_file_manager.toggle_lbry_file_running(self.lbry_file.stream_hash) + return True, d + + +class ToggleLBRYFileRunningFactory(LBRYFileChooserFactory): + control_handler_class = ToggleLBRYFileRunning + + +class CreateLBRYFile(ControlHandler): + prompt_description = "Create an LBRY File from file" + line_prompt = "File name: " + + def __init__(self, session, lbry_file_manager): + self.session = session + self.lbry_file_manager = lbry_file_manager + + def handle_line(self, line): + if line is None: + return False, defer.succeed(self.line_prompt) + else: + d = create_lbry_file(self.session, self.lbry_file_manager, line, open(line)) + d.addCallback(self.add_to_lbry_files) + d.addCallback(lambda _: "Successfully created " + str(line)) + return True, d + + def add_to_lbry_files(self, stream_hash): + prm = PaymentRateManager(self.session.base_payment_rate_manager) + d = self.lbry_file_manager.add_lbry_file(stream_hash, prm) + d.addCallback(lambda lbry_file_downloader: lbry_file_downloader.restore()) + return d + + +class CreateLBRYFileFactory(ControlHandlerFactory): + control_handler_class = CreateLBRYFile + + +class PublishStreamDescriptorChooser(LBRYFileChooser): + prompt_description = "Publish a stream descriptor file to the DHT for an LBRY File" + + def __init__(self, stream_info_manager, blob_manager, lbry_file_manager): + LBRYFileChooser.__init__(self, lbry_file_manager, PublishStreamDescriptorFactory, stream_info_manager, + blob_manager, lbry_file_manager, exit_after_one_done=True) + + +class PublishStreamDescriptorChooserFactory(ControlHandlerFactory): + control_handler_class = PublishStreamDescriptorChooser + + +class PublishStreamDescriptor(ControlHandler): + prompt_description = "Publish a stream descriptor file to the DHT for an LBRY File" + + def __init__(self, lbry_file, stream_info_manager, blob_manager, lbry_file_manager): + self.lbry_file = lbry_file + self.stream_info_manager = stream_info_manager + self.blob_manager = blob_manager + self.lbry_file_manager = lbry_file_manager + + def handle_line(self, line): + return True, self._publish_sd_blob() + + def _publish_sd_blob(self): + descriptor_writer = BlobStreamDescriptorWriter(self.blob_manager) + + d = get_sd_info(self.lbry_file_manager.stream_info_manager, self.lbry_file.stream_hash, True) + d.addCallback(descriptor_writer.create_descriptor) + + def add_sd_blob_to_stream(sd_blob_hash): + d = self.stream_info_manager.save_sd_blob_hash_to_stream(self.lbry_file.stream_hash, sd_blob_hash) + d.addCallback(lambda _: sd_blob_hash) + return d + + d.addCallback(add_sd_blob_to_stream) + return d + + +class PublishStreamDescriptorFactory(LBRYFileChooserFactory): + control_handler_class = PublishStreamDescriptor + + +class ShowPublishedSDHashesChooser(LBRYFileChooser): + prompt_description = "Show published stream descriptors for an LBRY File" + + def __init__(self, stream_info_manager, lbry_file_manager): + LBRYFileChooser.__init__(self, lbry_file_manager, ShowPublishedSDHashesFactory, stream_info_manager, + lbry_file_manager) + + +class ShowPublishedSDHashesChooserFactory(ControlHandlerFactory): + control_handler_class = ShowPublishedSDHashesChooser + + +class ShowPublishedSDHashes(ControlHandler): + prompt_description = "Show published stream descriptors for an LBRY File" + + def __init__(self, lbry_file, stream_info_manager, lbry_file_manager): + self.lbry_file = lbry_file + self.stream_info_manager = stream_info_manager + self.lbry_file_manager = lbry_file_manager + + def handle_line(self, line): + return True, self._show_sd_hashes() + + def _show_sd_hashes(self): + d = self.stream_info_manager.get_sd_blob_hashes_for_stream(self.lbry_file.stream_hash) + + def format_blob_hashes(sd_blob_hashes): + return "\n".join([str(b) for b in sd_blob_hashes]) + + d.addCallback(format_blob_hashes) + return d + + +class ShowPublishedSDHashesFactory(LBRYFileChooserFactory): + control_handler_class = ShowPublishedSDHashes + + +class CreatePlainStreamDescriptorChooser(LBRYFileChooser): + prompt_description = "Create a plain stream descriptor file for an LBRY File" + + def __init__(self, lbry_file_manager): + LBRYFileChooser.__init__(self, lbry_file_manager, CreatePlainStreamDescriptorFactory, lbry_file_manager, + exit_after_one_done=True) + + +class CreatePlainStreamDescriptorChooserFactory(ControlHandlerFactory): + control_handler_class = CreatePlainStreamDescriptorChooser + + +class CreatePlainStreamDescriptor(ControlHandler): + prompt_description = "Create a plain stream descriptor file for an LBRY File" + line_prompt = "Stream Descriptor file name (blank for default):" + + def __init__(self, lbry_file, lbry_file_manager): + self.lbry_file = lbry_file + self.lbry_file_manager = lbry_file_manager + self.sd_file_name = None + + def handle_line(self, line): + if line is None: + return False, defer.succeed(self.line_prompt) + self.sd_file_name = line + return True, self._create_sd() + + def _create_sd(self): + if not self.sd_file_name: + self.sd_file_name = None + descriptor_writer = PlainStreamDescriptorWriter(self.sd_file_name) + d = get_sd_info(self.lbry_file_manager.stream_info_manager, self.lbry_file.stream_hash, True) + d.addCallback(descriptor_writer.create_descriptor) + return d + + +class CreatePlainStreamDescriptorFactory(LBRYFileChooserFactory): + control_handler_class = CreatePlainStreamDescriptor + + +class ShowLBRYFileStreamHashChooser(LBRYFileChooser): + prompt_description = "Show an LBRY File's stream hash (not usually what you want)" + + def __init__(self, lbry_file_manager): + LBRYFileChooser.__init__(self, lbry_file_manager, ShowLBRYFileStreamHashFactory) + + +class ShowLBRYFileStreamHashChooserFactory(ControlHandlerFactory): + control_handler_class = ShowLBRYFileStreamHashChooser + + +class ShowLBRYFileStreamHash(ControlHandler): + prompt_description = "Show an LBRY File's stream hash (not usually what you want)" + + def __init__(self, lbry_file): + self.lbry_file = lbry_file + + def handle_line(self, line): + return True, defer.succeed(str(self.lbry_file.stream_hash)) + + +class ShowLBRYFileStreamHashFactory(LBRYFileChooserFactory): + control_handler_class = ShowLBRYFileStreamHash + + +class ModifyLBRYFileDataPaymentRate(ModifyPaymentRate): + prompt_description = "Modify LBRY File data payment rate" + + def __init__(self, lbry_file, lbry_file_manager): + ModifyPaymentRate.__init__(self) + self._prompt_choices['unset'] = (self._unset, "Use the default LBRY file data rate") + self.lbry_file = lbry_file + self.lbry_file_manager = lbry_file_manager + self.payment_rate_manager = lbry_file.payment_rate_manager + + def _unset(self): + d = self._set_rate(None) + d.addCallback(lambda _: "Using the default LBRY file data rate") + return True, d + + def _set_rate(self, rate): + self.payment_rate_manager.min_blob_data_payment_rate = rate + return self.lbry_file_manager.set_lbry_file_data_payment_rate(self.lbry_file.stream_hash, rate) + + def _get_current_status(self): + status = "The LBRY file's current data payment rate is " + effective_rate = self.payment_rate_manager.get_effective_min_blob_data_payment_rate() + if self.payment_rate_manager.min_blob_data_payment_rate is None: + status += "set to use the default LBRY file data payment rate, " + status += str(effective_rate) + return status + + +class ModifyLBRYFileDataPaymentRateFactory(ControlHandlerFactory): + control_handler_class = ModifyLBRYFileDataPaymentRate + + +class ModifyLBRYFileOptionsChooser(LBRYFileChooser): + prompt_description = "Modify an LBRY File's options" + + def __init__(self, lbry_file_manager): + LBRYFileChooser.__init__(self, lbry_file_manager, ModifyLBRYFileOptionsFactory, lbry_file_manager) + + +class ModifyLBRYFileOptionsChooserFactory(ControlHandlerFactory): + control_handler_class = ModifyLBRYFileOptionsChooser + + +class ModifyLBRYFileOptions(RecursiveControlHandler): + prompt_description = "Modify an LBRY File's options" + + def __init__(self, lbry_file, lbry_file_manager): + self.lbry_file = lbry_file + self.lbry_file_manager = lbry_file_manager + RecursiveControlHandler.__init__(self) + + def _get_control_handler_factories(self): + factories = [] + factories.append(ModifyLBRYFileDataPaymentRateFactory(self.lbry_file, self.lbry_file_manager)) + return factories + + +class ModifyLBRYFileOptionsFactory(LBRYFileChooserFactory): + control_handler_class = ModifyLBRYFileOptions + + +class ClaimName(ControlHandler): + prompt_description = "Associate a short name with a stream descriptor hash" + line_prompt = "Stream descriptor hash:" + line_prompt_2 = "Short name:" + line_prompt_3 = "Amount:" + + def __init__(self, name_resolver): + self.name_resolver = name_resolver + self.sd_hash = None + self.short_name = None + self.amount = None + + def handle_line(self, line): + if line is None: + return False, defer.succeed(self.line_prompt) + if self.sd_hash is None: + self.sd_hash = line + return False, defer.succeed(self.line_prompt_2) + if self.short_name is None: + self.short_name = line + return False, defer.succeed(self.line_prompt_3) + self.amount = line + return True, self._claim_name() + + def _claim_name(self): + return self.name_resolver.claim_name(self.sd_hash, self.short_name, float(self.amount)) + + +class ClaimNameFactory(ControlHandlerFactory): + control_handler_class = ClaimName + + +class ModifyDefaultDataPaymentRate(ModifyPaymentRate): + prompt_description = "Modify default data payment rate" + + def __init__(self, payment_rate_manager, settings): + ModifyPaymentRate.__init__(self) + self.settings = settings + self.payment_rate_manager = payment_rate_manager + + def _set_rate(self, rate): + self.payment_rate_manager.min_blob_data_payment_rate = rate + return self.settings.save_default_data_payment_rate(rate) + + def _get_current_status(self): + status = "The current default data payment rate is " + status += str(self.payment_rate_manager.min_blob_data_payment_rate) + return status + + +class ModifyDefaultDataPaymentRateFactory(ControlHandlerFactory): + control_handler_class = ModifyDefaultDataPaymentRate + + +class ForceCheckBlobFileConsistency(ControlHandler): + prompt_description = "Verify consistency of stored blobs" + + def __init__(self, blob_manager): + self.blob_manager = blob_manager + + def handle_line(self, line): + assert line is None, "Check consistency should not be passed any arguments" + return True, self._check_consistency() + + def _check_consistency(self): + d = self.blob_manager.check_consistency() + d.addCallback(lambda _: "Finished checking stored blobs") + return d + + +class ForceCheckBlobFileConsistencyFactory(ControlHandlerFactory): + control_handler_class = ForceCheckBlobFileConsistency + + +class ModifyApplicationDefaults(RecursiveControlHandler): + prompt_description = "Modify application settings" + + def __init__(self, lbry_service): + self.lbry_service = lbry_service + RecursiveControlHandler.__init__(self) + + def _get_control_handler_factories(self): + return [ModifyDefaultDataPaymentRateFactory(self.lbry_service.session.base_payment_rate_manager, + self.lbry_service.settings), + ForceCheckBlobFileConsistencyFactory(self.lbry_service.session.blob_manager)] + + +class ModifyApplicationDefaultsFactory(ControlHandlerFactory): + control_handler_class = ModifyApplicationDefaults + + +class ShowServerStatus(ControlHandler): + prompt_description = "Show the status of the server" + + def __init__(self, lbry_service): + self.lbry_service = lbry_service + + def handle_line(self, line): + assert line is None, "Show server status should not be passed any arguments" + return True, self._get_status() + + def _get_status(self): + status_string = "Server status:\n" + status_string += "Port: " + str(self.lbry_service.peer_port) + "\n" + status_string += "Running: " + str(self.lbry_service.lbry_server_port is not None) + "\n" + if self.lbry_service.blob_request_payment_rate_manager is not None: + rate = self.lbry_service.blob_request_payment_rate_manager.get_effective_min_blob_data_payment_rate() + status_string += "Min blob data payment rate: " + if self.lbry_service.blob_request_payment_rate_manager.min_blob_data_payment_rate is None: + status_string += "Using application default (" + str(rate) + ")\n" + else: + status_string += str(rate) + status_string += "\n" + #status_string += "Min crypt info payment rate: " + #status_string += str(self.lbry_service._server_payment_rate_manager.get_min_live_blob_info_payment_rate()) + #status_string += "\n" + return defer.succeed(status_string) + + +class ShowServerStatusFactory(ControlHandlerFactory): + control_handler_class = ShowServerStatus + + +class StartServer(ControlHandler): + prompt_description = "Start the server" + + def __init__(self, lbry_service): + self.lbry_service = lbry_service + + def handle_line(self, line): + assert line is None, "Start server should not be passed any arguments" + d = self.lbry_service.start_server() + d.addCallback(lambda _: self.lbry_service.settings.save_server_running_status(running=True)) + d.addCallback(lambda _: "Successfully started the server") + return True, d + + +class StartServerFactory(ControlHandlerFactory): + control_handler_class = StartServer + + +class StopServer(ControlHandler): + prompt_description = "Stop the server" + + def __init__(self, lbry_service): + self.lbry_service = lbry_service + + def handle_line(self, line): + assert line is None, "Stop server should not be passed any arguments" + d = self.lbry_service.stop_server() + d.addCallback(lambda _: self.lbry_service.settings.save_server_running_status(running=False)) + d.addCallback(lambda _: "Successfully stopped the server") + return True, d + + +class StopServerFactory(ControlHandlerFactory): + control_handler_class = StopServer + + +class ModifyServerDataPaymentRate(ModifyPaymentRate): + prompt_description = "Modify server data payment rate" + + def __init__(self, payment_rate_manager, settings): + ModifyPaymentRate.__init__(self) + self._prompt_choices['unset'] = (self._unset, "Use the application default data rate") + self.settings = settings + self.payment_rate_manager = payment_rate_manager + + def _unset(self): + d = self._set_rate(None) + d.addCallback(lambda _: "Using the application default data rate") + return True, d + + def _set_rate(self, rate): + self.payment_rate_manager.min_blob_data_payment_rate = rate + return self.settings.save_server_data_payment_rate(rate) + + def _get_current_status(self): + effective_rate = self.payment_rate_manager.get_effective_min_blob_data_payment_rate() + status = "The current server data payment rate is " + if self.payment_rate_manager.min_blob_data_payment_rate is None: + status += "set to use the application default, " + status += str(effective_rate) + return status + + +class ModifyServerDataPaymentRateFactory(ControlHandlerFactory): + control_handler_class = ModifyServerDataPaymentRate + + +# class ModifyServerCryptInfoPaymentRate(ModifyPaymentRate): +# prompt_description = "Modify server live stream metadata payment rate" +# +# def __init__(self, payment_rate_manager, settings): +# ModifyPaymentRate.__init__(self) +# self._prompt_choices['unset'] = (self._unset, +# "Use the application default live stream metadata rate") +# self.settings = settings +# self.payment_rate_manager = payment_rate_manager +# +# def _unset(self): +# d = self._set_rate(None) +# d.addCallback(lambda _: "Using the application default live stream metadata rate") +# return True, d +# +# def _set_rate(self, rate): +# self.payment_rate_manager.min_live_blob_info_payment_rate = rate +# return self.settings.save_server_crypt_info_payment_rate(rate) +# +# def _get_current_status(self): +# effective_rate = self.payment_rate_manager.get_effective_min_live_blob_info_payment_rate() +# status = "The current server live stream metadata payment rate is " +# if self.payment_rate_manager.get_min_blob_data_payment_rate() is None: +# status += "set to use the application default, " +# status += str(effective_rate) +# else: +# status += str(effective_rate) +# return status +# +# +# class ModifyServerCryptInfoPaymentRateFactory(ControlHandlerFactory): +# control_handler_class = ModifyServerCryptInfoPaymentRate + + +class DisableQueryHandler(ControlHandler): + def __init__(self, query_handlers, query_handler, settings): + self.query_handlers = query_handlers + self.query_handler = query_handler + self.settings = settings + + def handle_line(self, line): + assert line is None, "DisableQueryHandler should not be passed any arguments" + self.query_handlers[self.query_handler] = False + d = self.settings.disable_query_handler(self.query_handler.get_primary_query_identifier()) + d.addCallback(lambda _: "Disabled the query handler") + return True, d + + +class DisableQueryHandlerFactory(ControlHandlerFactory): + control_handler_class = DisableQueryHandler + + def get_prompt_description(self): + query_handler = self.args[1] + return "Disable " + str(query_handler.get_description()) + + +class EnableQueryHandler(ControlHandler): + def __init__(self, query_handlers, query_handler, settings): + self.query_handlers = query_handlers + self.query_handler = query_handler + self.settings = settings + + def handle_line(self, line): + assert line is None, "EnableQueryHandler should not be passed any arguments" + self.query_handlers[self.query_handler] = True + d = self.settings.enable_query_handler(self.query_handler.get_primary_query_identifier()) + d.addCallback(lambda _: "Enabled the query handler") + return True, d + + +class EnableQueryHandlerFactory(ControlHandlerFactory): + control_handler_class = EnableQueryHandler + + def get_prompt_description(self): + query_handler = self.args[1] + return "Enable " + str(query_handler.get_description()) + + +class ModifyServerEnabledQueries(RecursiveControlHandler): + prompt_description = "Modify which queries the server will respond to" + + def __init__(self, query_handlers, settings): + self.query_handlers = query_handlers + self.settings = settings + RecursiveControlHandler.__init__(self, reset_after_each_done=True) + + def _get_control_handler_factories(self): + factories = [] + for query_handler, enabled in self.query_handlers.iteritems(): + if enabled: + factories.append(DisableQueryHandlerFactory(self.query_handlers, query_handler, self.settings)) + else: + factories.append(EnableQueryHandlerFactory(self.query_handlers, query_handler, self.settings)) + return factories + + +class ModifyServerEnabledQueriesFactory(ControlHandlerFactory): + control_handler_class = ModifyServerEnabledQueries + + +class ImmediateAnnounceAllBlobs(ControlHandler): + prompt_description = "Immediately announce all blob hashes to the DHT" + + def __init__(self, blob_manager): + self.blob_manager = blob_manager + + def handle_line(self, line): + assert line is None, "Immediate Announce should not be passed any arguments" + d = self.blob_manager.immediate_announce_all_blobs() + d.addCallback(lambda _: "Done announcing") + return True, d + + +class ImmediateAnnounceAllBlobsFactory(ControlHandlerFactory): + control_handler_class = ImmediateAnnounceAllBlobs + + +class ModifyServerSettings(RecursiveControlHandler): + prompt_description = "Modify server settings" + + def __init__(self, lbry_service): + self.lbry_service = lbry_service + RecursiveControlHandler.__init__(self, reset_after_each_done=True) + + def _get_control_handler_factories(self): + factories = [] + if self.lbry_service.lbry_server_port is not None: + factories.append(StopServerFactory(self.lbry_service)) + else: + factories.append(StartServerFactory(self.lbry_service)) + factories.append( + ModifyServerDataPaymentRateFactory( + self.lbry_service.blob_request_payment_rate_manager, + self.lbry_service.settings + ) + ) + #factories.append(ModifyServerCryptInfoPaymentRateFactory(self.lbry_service._server_payment_rate_manager, + # self.lbry_service.settings)) + factories.append(ModifyServerEnabledQueriesFactory(self.lbry_service.query_handlers, + self.lbry_service.settings)) + factories.append(ImmediateAnnounceAllBlobsFactory(self.lbry_service.session.blob_manager)) + return factories + + +class ModifyServerSettingsFactory(ControlHandlerFactory): + control_handler_class = ModifyServerSettings + + +class PeerChooser(RecursiveControlHandler): + + def __init__(self, peer_manager, factory_class, *args, **kwargs): + """ + @param peer_manager: + + @param factory_class: + + @param args: all arguments that will be passed to the factory + + @param kwargs: all arguments that will be passed to the superclass' __init__ + + @return: + """ + self.peer_manager = peer_manager + self.factory_class = factory_class + self.args = args + RecursiveControlHandler.__init__(self, **kwargs) + + def _get_control_handler_factories(self): + control_handler_factories = [] + for peer in self.peer_manager.peers: + control_handler_factories.append(self.factory_class(peer, *self.args)) + return control_handler_factories + + +class PeerChooserFactory(ControlHandlerFactory): + def get_prompt_description(self): + peer = self.args[0] + return str(peer) + + +class ShowPeerStats(ControlHandler): + prompt_description = "Show the peer's stats" + + def __init__(self, peer): + self.peer = peer + + def handle_line(self, line): + return True, defer.succeed(self._get_peer_stats_string()) + + def _get_peer_stats_string(self): + stats = "Statistics for " + str(self.peer) + '\n' + stats += " current_score: " + str(self.peer.score) + '\n' + stats += " is_available: " + str(self.peer.is_available()) + '\n' + for stat_type, amount in self.peer.stats.iteritems(): + stats += " " + stat_type + ": " + str(amount) + "\n" + return stats + + +class ShowPeerStatsFactory(ControlHandlerFactory): + control_handler_class = ShowPeerStats + + +class PeerStatsAndSettings(RecursiveControlHandler): + def __init__(self, peer): + self.peer = peer + RecursiveControlHandler.__init__(self, reset_after_each_done=True) + + def _get_control_handler_factories(self): + factories = [] + factories.append(ShowPeerStatsFactory(self.peer)) + return factories + + +class PeerStatsAndSettingsFactory(PeerChooserFactory): + control_handler_class = PeerStatsAndSettings + + +class PeerStatsAndSettingsChooser(PeerChooser): + prompt_description = "View peer stats and modify peer settings" + + def __init__(self, peer_manager): + PeerChooser.__init__(self, peer_manager, PeerStatsAndSettingsFactory) + + +class PeerStatsAndSettingsChooserFactory(ControlHandlerFactory): + control_handler_class = PeerStatsAndSettingsChooser \ No newline at end of file diff --git a/lbrynet/lbrynet_console/LBRYConsole.py b/lbrynet/lbrynet_console/LBRYConsole.py new file mode 100644 index 000000000..0aa9cb386 --- /dev/null +++ b/lbrynet/lbrynet_console/LBRYConsole.py @@ -0,0 +1,407 @@ +import logging +from lbrynet.core.Session import LBRYSession +import os.path +import argparse +from yapsy.PluginManager import PluginManager +from twisted.internet import defer, threads, stdio, task +from lbrynet.lbrynet_console.ConsoleControl import ConsoleControl +from lbrynet.lbrynet_console.LBRYSettings import LBRYSettings +from lbrynet.lbryfilemanager.LBRYFileManager import LBRYFileManager +from lbrynet.conf import MIN_BLOB_DATA_PAYMENT_RATE # , MIN_BLOB_INFO_PAYMENT_RATE +from lbrynet.core.utils import generate_id +from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier +from lbrynet.core.PaymentRateManager import PaymentRateManager +from lbrynet.core.server.BlobAvailabilityHandler import BlobAvailabilityHandlerFactory +from lbrynet.core.server.BlobRequestHandler import BlobRequestHandlerFactory +from lbrynet.core.server.ServerProtocol import ServerProtocolFactory +from lbrynet.core.PTCWallet import PTCWallet +from lbrynet.lbryfile.client.LBRYFileDownloader import LBRYFileOpenerFactory +from lbrynet.lbryfile.StreamDescriptor import LBRYFileStreamType +from lbrynet.lbryfile.LBRYFileMetadataManager import DBLBRYFileMetadataManager, TempLBRYFileMetadataManager +#from lbrynet.lbrylive.PaymentRateManager import LiveStreamPaymentRateManager +from lbrynet.lbrynet_console.ControlHandlers import ApplicationStatusFactory, GetWalletBalancesFactory, ShutDownFactory +from lbrynet.lbrynet_console.ControlHandlers import LBRYFileStatusFactory, DeleteLBRYFileChooserFactory +from lbrynet.lbrynet_console.ControlHandlers import ToggleLBRYFileRunningChooserFactory +from lbrynet.lbrynet_console.ControlHandlers import ModifyApplicationDefaultsFactory +from lbrynet.lbrynet_console.ControlHandlers import CreateLBRYFileFactory, PublishStreamDescriptorChooserFactory +from lbrynet.lbrynet_console.ControlHandlers import ShowPublishedSDHashesChooserFactory +from lbrynet.lbrynet_console.ControlHandlers import CreatePlainStreamDescriptorChooserFactory +from lbrynet.lbrynet_console.ControlHandlers import ShowLBRYFileStreamHashChooserFactory, AddStreamFromHashFactory +from lbrynet.lbrynet_console.ControlHandlers import AddStreamFromSDFactory, AddStreamFromLBRYcrdNameFactory +from lbrynet.lbrynet_console.ControlHandlers import ClaimNameFactory +from lbrynet.lbrynet_console.ControlHandlers import ShowServerStatusFactory, ModifyServerSettingsFactory +from lbrynet.lbrynet_console.ControlHandlers import ModifyLBRYFileOptionsChooserFactory +from lbrynet.lbrynet_console.ControlHandlers import PeerStatsAndSettingsChooserFactory +from lbrynet.core.LBRYcrdWallet import LBRYcrdWallet + + +class LBRYConsole(): + """A class which can upload and download file streams to and from the network""" + def __init__(self, peer_port, dht_node_port, known_dht_nodes, control_class, wallet_type, lbrycrd_rpc_port, + use_upnp, conf_dir, data_dir): + """ + @param peer_port: the network port on which to listen for peers + + @param dht_node_port: the network port on which to listen for dht node requests + + @param known_dht_nodes: a list of (ip_address, dht_port) which will be used to join the DHT network + """ + self.peer_port = peer_port + self.dht_node_port = dht_node_port + self.known_dht_nodes = known_dht_nodes + self.wallet_type = wallet_type + self.wallet_rpc_port = lbrycrd_rpc_port + self.use_upnp = use_upnp + self.lbry_server_port = None + self.control_class = control_class + self.session = None + self.lbry_file_metadata_manager = None + self.lbry_file_manager = None + self.conf_dir = conf_dir + self.data_dir = data_dir + self.plugin_manager = PluginManager() + self.plugin_manager.setPluginPlaces([ + os.path.join(self.conf_dir, "plugins"), + os.path.join(os.path.dirname(__file__), "plugins"), + ]) + self.control_handlers = [] + self.query_handlers = {} + + self.settings = LBRYSettings(self.conf_dir) + self.blob_request_payment_rate_manager = None + self.lbryid = None + self.sd_identifier = StreamDescriptorIdentifier() + + def start(self): + """Initialize the session and restore everything to its saved state""" + d = threads.deferToThread(self._create_directory) + d.addCallback(lambda _: self._get_settings()) + d.addCallback(lambda _: self._get_session()) + d.addCallback(lambda _: self._setup_lbry_file_manager()) + d.addCallback(lambda _: self._setup_lbry_file_opener()) + d.addCallback(lambda _: self._setup_control_handlers()) + d.addCallback(lambda _: self._setup_query_handlers()) + d.addCallback(lambda _: self._load_plugins()) + d.addCallback(lambda _: self._setup_server()) + d.addCallback(lambda _: self._start_controller()) + return d + + def shut_down(self): + """Stop the session, all currently running streams, and stop the server""" + d = self.session.shut_down() + d.addCallback(lambda _: self._shut_down()) + return d + + def add_control_handlers(self, control_handlers): + for control_handler in control_handlers: + self.control_handlers.append(control_handler) + + def add_query_handlers(self, query_handlers): + + def _set_query_handlers(statuses): + from future_builtins import zip + for handler, (success, status) in zip(query_handlers, statuses): + if success is True: + self.query_handlers[handler] = status + + ds = [] + for handler in query_handlers: + ds.append(self.settings.get_query_handler_status(handler.get_primary_query_identifier())) + dl = defer.DeferredList(ds) + dl.addCallback(_set_query_handlers) + return dl + + def _create_directory(self): + if not os.path.exists(self.conf_dir): + os.makedirs(self.conf_dir) + logging.debug("Created the configuration directory: %s", str(self.conf_dir)) + if not os.path.exists(self.data_dir): + os.makedirs(self.data_dir) + logging.debug("Created the data directory: %s", str(self.data_dir)) + + def _get_settings(self): + d = self.settings.start() + d.addCallback(lambda _: self.settings.get_lbryid()) + d.addCallback(self.set_lbryid) + return d + + def set_lbryid(self, lbryid): + if lbryid is None: + return self._make_lbryid() + else: + self.lbryid = lbryid + + def _make_lbryid(self): + self.lbryid = generate_id() + d = self.settings.save_lbryid(self.lbryid) + return d + + def _get_session(self): + d = self.settings.get_default_data_payment_rate() + + def create_session(default_data_payment_rate): + if default_data_payment_rate is None: + default_data_payment_rate = MIN_BLOB_DATA_PAYMENT_RATE + if self.wallet_type == "lbrycrd": + wallet = LBRYcrdWallet("rpcuser", "rpcpassword", "127.0.0.1", self.wallet_rpc_port) + else: + wallet = PTCWallet(self.conf_dir) + self.session = LBRYSession(default_data_payment_rate, db_dir=self.conf_dir, lbryid=self.lbryid, + blob_dir=self.data_dir, dht_node_port=self.dht_node_port, + known_dht_nodes=self.known_dht_nodes, peer_port=self.peer_port, + use_upnp=self.use_upnp, wallet=wallet) + + d.addCallback(create_session) + + d.addCallback(lambda _: self.session.setup()) + + return d + + def _setup_lbry_file_manager(self): + self.lbry_file_metadata_manager = DBLBRYFileMetadataManager(self.conf_dir) + d = self.lbry_file_metadata_manager.setup() + + def set_lbry_file_manager(): + self.lbry_file_manager = LBRYFileManager(self.session, self.lbry_file_metadata_manager, self.sd_identifier) + return self.lbry_file_manager.setup() + + d.addCallback(lambda _: set_lbry_file_manager()) + + return d + + def _setup_lbry_file_opener(self): + stream_info_manager = TempLBRYFileMetadataManager() + downloader_factory = LBRYFileOpenerFactory(self.session.peer_finder, self.session.rate_limiter, + self.session.blob_manager, stream_info_manager, + self.session.wallet) + self.sd_identifier.add_stream_downloader_factory(LBRYFileStreamType, downloader_factory) + return defer.succeed(True) + + def _setup_control_handlers(self): + handlers = [ + ('General', + ApplicationStatusFactory(self.session.rate_limiter, self.session.dht_node)), + ('General', + GetWalletBalancesFactory(self.session.wallet)), + ('General', + ModifyApplicationDefaultsFactory(self)), + ('General', + ShutDownFactory(self)), + ('General', + PeerStatsAndSettingsChooserFactory(self.session.peer_manager)), + ('lbryfile', + LBRYFileStatusFactory(self.lbry_file_manager)), + ('Stream Downloading', + AddStreamFromSDFactory(self.sd_identifier, self.session.base_payment_rate_manager)), + ('lbryfile', + DeleteLBRYFileChooserFactory(self.lbry_file_metadata_manager, self.session.blob_manager, + self.lbry_file_manager)), + ('lbryfile', + ToggleLBRYFileRunningChooserFactory(self.lbry_file_manager)), + ('lbryfile', + CreateLBRYFileFactory(self.session, self.lbry_file_manager)), + ('lbryfile', + PublishStreamDescriptorChooserFactory(self.lbry_file_metadata_manager, + self.session.blob_manager, + self.lbry_file_manager)), + ('lbryfile', + ShowPublishedSDHashesChooserFactory(self.lbry_file_metadata_manager, + self.lbry_file_manager)), + ('lbryfile', + CreatePlainStreamDescriptorChooserFactory(self.lbry_file_manager)), + ('lbryfile', + ShowLBRYFileStreamHashChooserFactory(self.lbry_file_manager)), + ('lbryfile', + ModifyLBRYFileOptionsChooserFactory(self.lbry_file_manager)), + ('Stream Downloading', + AddStreamFromHashFactory(self.sd_identifier, self.session)) + ] + self.add_control_handlers(handlers) + if self.wallet_type == 'lbrycrd': + lbrycrd_handlers = [ + ('Stream Downloading', + AddStreamFromLBRYcrdNameFactory(self.sd_identifier, self.session, + self.session.wallet)), + ('General', + ClaimNameFactory(self.session.wallet)), + ] + self.add_control_handlers(lbrycrd_handlers) + if self.peer_port is not None: + server_handlers = [ + ('Server', + ShowServerStatusFactory(self)), + ('Server', + ModifyServerSettingsFactory(self)), + ] + self.add_control_handlers(server_handlers) + + def _setup_query_handlers(self): + handlers = [ + #CryptBlobInfoQueryHandlerFactory(self.lbry_file_metadata_manager, self.session.wallet, + # self._server_payment_rate_manager), + BlobAvailabilityHandlerFactory(self.session.blob_manager), + #BlobRequestHandlerFactory(self.session.blob_manager, self.session.wallet, + # self._server_payment_rate_manager), + self.session.wallet.get_wallet_info_query_handler_factory(), + ] + + def get_blob_request_handler_factory(rate): + self.blob_request_payment_rate_manager = PaymentRateManager( + self.session.base_payment_rate_manager, rate + ) + handlers.append(BlobRequestHandlerFactory(self.session.blob_manager, self.session.wallet, + self.blob_request_payment_rate_manager)) + + d1 = self.settings.get_server_data_payment_rate() + d1.addCallback(get_blob_request_handler_factory) + + dl = defer.DeferredList([d1]) + dl.addCallback(lambda _: self.add_query_handlers(handlers)) + return dl + + def _load_plugins(self): + d = threads.deferToThread(self.plugin_manager.collectPlugins) + + def setup_plugins(): + ds = [] + for plugin in self.plugin_manager.getAllPlugins(): + ds.append(plugin.plugin_object.setup(self)) + return defer.DeferredList(ds) + + d.addCallback(lambda _: setup_plugins()) + return d + + def _setup_server(self): + + def restore_running_status(running): + if running is True: + return self.start_server() + return defer.succeed(True) + + dl = self.settings.get_server_running_status() + dl.addCallback(restore_running_status) + return dl + + def start_server(self): + + if self.peer_port is not None: + + server_factory = ServerProtocolFactory(self.session.rate_limiter, + self.query_handlers, + self.session.peer_manager) + from twisted.internet import reactor + self.lbry_server_port = reactor.listenTCP(self.peer_port, server_factory) + return defer.succeed(True) + + def stop_server(self): + if self.lbry_server_port is not None: + self.lbry_server_port, p = None, self.lbry_server_port + return defer.maybeDeferred(p.stopListening) + else: + return defer.succeed(True) + + def _start_controller(self): + self.control_class(self.control_handlers) + return defer.succeed(True) + + def _shut_down(self): + self.plugin_manager = None + d1 = self.lbry_file_metadata_manager.stop() + d1.addCallback(lambda _: self.lbry_file_manager.stop()) + d2 = self.stop_server() + dl = defer.DeferredList([d1, d2]) + return dl + + +class StdIOControl(): + def __init__(self, control_handlers): + stdio.StandardIO(ConsoleControl(control_handlers)) + + +def launch_lbry_console(): + + from twisted.internet import reactor + + parser = argparse.ArgumentParser(description="Launch a lbrynet console") + parser.add_argument("--no_listen_peer", + help="Don't listen for incoming data connections.", + action="store_true") + parser.add_argument("--peer_port", + help="The port on which the console will listen for incoming data connections.", + type=int, default=3333) + parser.add_argument("--no_listen_dht", + help="Don't listen for incoming DHT connections.", + action="store_true") + parser.add_argument("--dht_node_port", + help="The port on which the console will listen for DHT connections.", + type=int, default=4444) + parser.add_argument("--wallet_type", + help="Either 'lbrycrd' or 'ptc'.", + type=str, default="lbrycrd") + parser.add_argument("--lbrycrd_wallet_rpc_port", + help="The rpc port on which the LBRYcrd wallet is listening", + type=int, default=8332) + parser.add_argument("--no_dht_bootstrap", + help="Don't try to connect to the DHT", + action="store_true") + parser.add_argument("--dht_bootstrap_host", + help="The hostname of a known DHT node, to be used to bootstrap into the DHT. " + "Must be used with --dht_bootstrap_port", + type=str, default='104.236.42.182') + parser.add_argument("--dht_bootstrap_port", + help="The port of a known DHT node, to be used to bootstrap into the DHT. Must " + "be used with --dht_bootstrap_host", + type=int, default=4000) + parser.add_argument("--use_upnp", + help="Try to use UPnP to enable incoming connections through the firewall", + action="store_true") + parser.add_argument("--conf_dir", + help=("The full path to the directory in which to store configuration " + "options and user added plugins. Default: ~/.lbrynet"), + type=str) + parser.add_argument("--data_dir", + help=("The full path to the directory in which to store data chunks " + "downloaded from lbrynet. Default: /blobfiles"), + type=str) + + args = parser.parse_args() + + if args.no_dht_bootstrap: + bootstrap_nodes = [] + else: + bootstrap_nodes = [(args.dht_bootstrap_host, args.dht_bootstrap_port)] + + if args.no_listen_peer: + peer_port = None + else: + peer_port = args.peer_port + + if args.no_listen_dht: + dht_node_port = None + else: + dht_node_port = args.dht_node_port + + if not args.conf_dir: + conf_dir = os.path.join(os.path.expanduser("~"), ".lbrynet") + else: + conf_dir = args.conf_dir + if not os.path.exists(conf_dir): + os.mkdir(conf_dir) + if not args.data_dir: + data_dir = os.path.join(conf_dir, "blobfiles") + else: + data_dir = args.data_dir + if not os.path.exists(data_dir): + os.mkdir(data_dir) + + log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s" + logging.basicConfig(level=logging.DEBUG, filename=os.path.join(conf_dir, "console.log"), + format=log_format) + + console = LBRYConsole(peer_port, dht_node_port, bootstrap_nodes, StdIOControl, wallet_type=args.wallet_type, + lbrycrd_rpc_port=args.lbrycrd_wallet_rpc_port, use_upnp=args.use_upnp, + conf_dir=conf_dir, data_dir=data_dir) + + d = task.deferLater(reactor, 0, console.start) + reactor.addSystemEventTrigger('before', 'shutdown', console.shut_down) + reactor.run() \ No newline at end of file diff --git a/lbrynet/lbrynet_console/LBRYPlugin.py b/lbrynet/lbrynet_console/LBRYPlugin.py new file mode 100644 index 000000000..2a22f6bfc --- /dev/null +++ b/lbrynet/lbrynet_console/LBRYPlugin.py @@ -0,0 +1,10 @@ +from yapsy.IPlugin import IPlugin + + +class LBRYPlugin(IPlugin): + + def __init__(self): + IPlugin.__init__(self) + + def setup(self, lbry_console): + raise NotImplementedError \ No newline at end of file diff --git a/lbrynet/lbrynet_console/LBRYSettings.py b/lbrynet/lbrynet_console/LBRYSettings.py new file mode 100644 index 000000000..a1909044f --- /dev/null +++ b/lbrynet/lbrynet_console/LBRYSettings.py @@ -0,0 +1,116 @@ +import binascii +import json +import leveldb +import logging +import os +from twisted.internet import threads, defer + + +class LBRYSettings(object): + def __init__(self, db_dir): + self.db_dir = db_dir + self.db = None + + def start(self): + return threads.deferToThread(self._open_db) + + def stop(self): + self.db = None + return defer.succeed(True) + + def _open_db(self): + logging.debug("Opening %s as the settings database", str(os.path.join(self.db_dir, "settings.db"))) + self.db = leveldb.LevelDB(os.path.join(self.db_dir, "settings.db")) + + def save_lbryid(self, lbryid): + + def save_lbryid(): + self.db.Put("lbryid", binascii.hexlify(lbryid), sync=True) + + return threads.deferToThread(save_lbryid) + + def get_lbryid(self): + + def get_lbryid(): + try: + return binascii.unhexlify(self.db.Get("lbryid")) + except KeyError: + return None + + return threads.deferToThread(get_lbryid) + + def get_server_running_status(self): + + def get_status(): + try: + return json.loads(self.db.Get("server_running")) + except KeyError: + return True + + return threads.deferToThread(get_status) + + def save_server_running_status(self, running): + + def save_status(): + self.db.Put("server_running", json.dumps(running), sync=True) + + return threads.deferToThread(save_status) + + def get_default_data_payment_rate(self): + return self._get_payment_rate("default_data_payment_rate") + + def save_default_data_payment_rate(self, rate): + return self._save_payment_rate("default_data_payment_rate", rate) + + def get_server_data_payment_rate(self): + return self._get_payment_rate("server_data_payment_rate") + + def save_server_data_payment_rate(self, rate): + return self._save_payment_rate("server_data_payment_rate", rate) + + def get_server_crypt_info_payment_rate(self): + return self._get_payment_rate("server_crypt_info_payment_rate") + + def save_server_crypt_info_payment_rate(self, rate): + return self._save_payment_rate("server_crypt_info_payment_rate", rate) + + def _get_payment_rate(self, rate_type): + + def get_rate(): + try: + return json.loads(self.db.Get(rate_type)) + except KeyError: + return None + + return threads.deferToThread(get_rate) + + def _save_payment_rate(self, rate_type, rate): + + def save_rate(): + if rate is not None: + self.db.Put(rate_type, json.dumps(rate), sync=True) + else: + self.db.Delete(rate_type, sync=True) + + return threads.deferToThread(save_rate) + + def get_query_handler_status(self, query_identifier): + + def get_status(): + try: + return json.loads(self.db.Get(json.dumps(('q_h', query_identifier)))) + except KeyError: + return True + + return threads.deferToThread(get_status) + + def enable_query_handler(self, query_identifier): + return self._set_query_handler_status(query_identifier, True) + + def disable_query_handler(self, query_identifier): + return self._set_query_handler_status(query_identifier, False) + + def _set_query_handler_status(self, query_identifier, status): + def set_status(): + self.db.Put(json.dumps(('q_h', query_identifier)), json.dumps(status), sync=True) + return threads.deferToThread(set_status) \ No newline at end of file diff --git a/lbrynet/lbrynet_console/__init__.py b/lbrynet/lbrynet_console/__init__.py new file mode 100644 index 000000000..5cbf600c2 --- /dev/null +++ b/lbrynet/lbrynet_console/__init__.py @@ -0,0 +1,8 @@ +""" +A plugin-enabled console application for interacting with the LBRY network called lbrynet-console. + +lbrynet-console can be used to download and upload LBRY Files and includes plugins for streaming +LBRY Files to an external application and to download unknown chunks of data for the purpose of +re-uploading them. It gives the user some control over how much will be paid for data and +metadata and also what types of queries from clients. +""" \ No newline at end of file diff --git a/lbrynet/lbrynet_console/interfaces.py b/lbrynet/lbrynet_console/interfaces.py new file mode 100644 index 000000000..54bc33b64 --- /dev/null +++ b/lbrynet/lbrynet_console/interfaces.py @@ -0,0 +1,14 @@ +from zope.interface import Interface + + +class IControlHandlerFactory(Interface): + def get_prompt_description(self): + pass + + def get_handler(self): + pass + + +class IControlHandler(Interface): + def handle_line(self, line): + pass \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindBlobHandler.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindBlobHandler.py new file mode 100644 index 000000000..dd0f981f5 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindBlobHandler.py @@ -0,0 +1,15 @@ +from zope.interface import implements +from lbrynet.interfaces import IBlobHandler +from twisted.internet import defer + + +class BlindBlobHandler(object): + implements(IBlobHandler) + + def __init__(self): + pass + + ######### IBlobHandler ######### + + def handle_blob(self, blob, blob_info): + return defer.succeed(True) \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindInfoManager.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindInfoManager.py new file mode 100644 index 000000000..7815789c0 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindInfoManager.py @@ -0,0 +1,62 @@ +from twisted.internet import threads, defer +from ValuableBlobInfo import ValuableBlobInfo +from db_keys import BLOB_INFO_TYPE +import json +import leveldb + + +class BlindInfoManager(object): + + def __init__(self, db, peer_manager): + self.db = db + self.peer_manager = peer_manager + + def setup(self): + return defer.succeed(True) + + def stop(self): + self.db = None + return defer.succeed(True) + + def get_all_blob_infos(self): + d = threads.deferToThread(self._get_all_blob_infos) + + def make_blob_infos(blob_data): + blob_infos = [] + for blob in blob_data: + blob_hash, length, reference, peer_host, peer_port, peer_score = blob + peer = self.peer_manager.get_peer(peer_host, peer_port) + blob_info = ValuableBlobInfo(blob_hash, length, reference, peer, peer_score) + blob_infos.append(blob_info) + return blob_infos + d.addCallback(make_blob_infos) + return d + + def save_blob_infos(self, blob_infos): + blobs = [] + for blob_info in blob_infos: + blob_hash = blob_info.blob_hash + length = blob_info.length + reference = blob_info.reference + peer_host = blob_info.peer.host + peer_port = blob_info.peer.port + peer_score = blob_info.peer_score + blobs.append((blob_hash, length, reference, peer_host, peer_port, peer_score)) + return threads.deferToThread(self._save_blob_infos, blobs) + + def _get_all_blob_infos(self): + blob_infos = [] + for key, blob_info in self.db.RangeIter(): + key_type, blob_hash = json.loads(key) + if key_type == BLOB_INFO_TYPE: + blob_infos.append([blob_hash] + json.loads(blob_info)) + return blob_infos + + def _save_blob_infos(self, blobs): + batch = leveldb.WriteBatch() + for blob in blobs: + try: + self.db.Get(json.dumps((BLOB_INFO_TYPE, blob[0]))) + except KeyError: + batch.Put(json.dumps((BLOB_INFO_TYPE, blob[0])), json.dumps(blob[1:])) + self.db.Write(batch, sync=True) \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindMetadataHandler.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindMetadataHandler.py new file mode 100644 index 000000000..5697981b0 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindMetadataHandler.py @@ -0,0 +1,317 @@ +from zope.interface import implements +from lbrynet.interfaces import IMetadataHandler, IRequestCreator +from lbrynet.core.client.ClientRequest import ClientRequest, ClientPaidRequest +from lbrynet.core.Error import InsufficientFundsError, InvalidResponseError, RequestCanceledError +from lbrynet.core.Error import NoResponseError +from ValuableBlobInfo import ValuableBlobInfo +import datetime +import logging +import random +from twisted.internet import defer +from twisted.python.failure import Failure +from collections import defaultdict + + +class BlindMetadataHandler(object): + implements(IMetadataHandler, IRequestCreator) + + def __init__(self, info_manager, peers, peer_finder, approved_peers, payment_rate_manager, wallet, + download_manager): + self.info_manager = info_manager + self.payment_rate_manager = payment_rate_manager + self.wallet = wallet + self.download_manager = download_manager + self._peers = peers # {Peer: score} + self.peer_finder = peer_finder + self.approved_peers = approved_peers + self._valuable_protocol_prices = {} + self._info_protocol_prices = {} + self._price_disagreements = [] # [Peer] + self._incompatible_peers = [] # [Peer] + self._last_blob_hashes_from_peers = {} # {Peer: (blob_hash, expire_time)} + self._valuable_hashes = {} # {blob_hash: (peer score, reference, peer)} + self._blob_infos = {} # {blob_hash: ValuableBlobInfo} + self._peer_search_results = defaultdict(list) # {peer: [blob_hash]} + + ######### IMetadataHandler ######### + + def get_initial_blobs(self): + d = self.info_manager.get_all_blob_infos() + return d + + def final_blob_num(self): + return None + + ######### IRequestCreator ######### + + def send_next_request(self, peer, protocol): + # Basic idea: + # If the peer has been sending us blob hashes to download recently (10 minutes?), + # send back an example of one (the most recent?) so that it can + # keep sending us more like it. Otherwise, just ask for + # valuable blobs + sent_request = False + if self._should_send_request_to(peer): + v_r = self._get_valuable_blob_request(peer) + if v_r is not None: + v_p_r = self._get_valuable_price_request(peer, protocol) + reserved_points = self._reserve_points_valuable(peer, protocol, v_r.max_pay_units) + if reserved_points is not None: + d1 = protocol.add_request(v_r) + d1.addCallback(self._handle_valuable_blob_response, peer, v_r) + d1.addBoth(self._pay_or_cancel_payment, protocol, reserved_points, + self._info_protocol_prices) + d1.addErrback(self._request_failed, "valuable blob request", peer) + sent_request = True + if v_p_r is not None: + d2 = protocol.add_request(v_p_r) + d2.addCallback(self._handle_valuable_price_response, peer, v_p_r, protocol) + d2.addErrback(self._request_failed, "valuable price request", peer) + else: + return defer.fail(InsufficientFundsError()) + i_r = self._get_info_request(peer) + if i_r is not None: + i_p_r = self._get_info_price_request(peer, protocol) + reserved_points = self._reserve_points_info(peer, protocol, i_r.max_pay_units) + if reserved_points is not None: + d3 = protocol.add_request(i_r) + d3.addCallback(self._handle_info_response, peer, i_r, protocol, reserved_points) + d3.addBoth(self._pay_or_cancel_payment, protocol, reserved_points, + self._valuable_protocol_prices) + d3.addErrback(self._request_failed, "info request", peer, reserved_points) + sent_request = True + if i_p_r is not None: + d4 = protocol.add_request(i_p_r) + d4.addCallback(self._handle_info_price_response, peer, i_p_r, protocol) + d4.addErrback(self._request_failed, "info price request", peer) + else: + return defer.fail(InsufficientFundsError()) + return defer.succeed(sent_request) + + def get_new_peers(self): + peers = None + if self._peer_search_results: + peers = self._peer_search_results.keys() + elif len(self.approved_peers) != 0: + peers = random.sample(self.approved_peers, len(self.approved_peers)) + return defer.succeed(peers) + + ######### internal ######### + + def _should_send_request_to(self, peer): + if peer in self._incompatible_peers: + return False + if self._peers[peer] >= 0: + return True + return False + + def _get_valuable_blob_request(self, peer): + blob_hash = None + if peer in self._last_blob_hashes_from_peers: + h, expire_time = self._last_blob_hashes_from_peers[peer] + if datetime.datetime.now() > expire_time: + del self._last_blob_hashes_from_peers[peer] + else: + blob_hash = h + r_dict = {'valuable_blob_hashes': {'reference': blob_hash, 'max_blob_hashes': 20}} + response_identifier = 'valuable_blob_hashes' + request = ClientPaidRequest(r_dict, response_identifier, 20) + return request + + def _get_valuable_price_request(self, peer, protocol): + request = None + if not protocol in self._valuable_protocol_prices: + self._valuable_protocol_prices[protocol] = self.payment_rate_manager.get_rate_valuable_blob_hash(peer) + request_dict = {'valuable_blob_payment_rate': self._valuable_protocol_prices[protocol]} + request = ClientRequest(request_dict, 'valuable_blob_payment_rate') + return request + + def _get_info_request(self, peer): + if peer in self._peer_search_results: + blob_hashes = self._peer_search_results[peer] + del self._peer_search_results[peer] + references = [] + for blob_hash in blob_hashes: + if blob_hash in self._valuable_hashes: + references.append(self._valuable_hashes[blob_hash][1]) + hashes_to_search = [h for h, (s, r, p) in self._valuable_hashes.iteritems() if r in references] + if hashes_to_search: + r_dict = {'blob_length': {'blob_hashes': hashes_to_search}} + response_identifier = 'blob_length' + request = ClientPaidRequest(r_dict, response_identifier, len(hashes_to_search)) + return request + if not self._peer_search_results: + self._search_for_peers() + return None + + def _get_info_price_request(self, peer, protocol): + request = None + if not protocol in self._info_protocol_prices: + self._info_protocol_prices[protocol] = self.payment_rate_manager.get_rate_valuable_blob_info(peer) + request_dict = {'blob_length_payment_rate': self._info_protocol_prices[protocol]} + request = ClientRequest(request_dict, 'blob_length_payment_rate') + return request + + def _update_local_score(self, peer, amount): + self._peers[peer] += amount + + def _reserve_points_valuable(self, peer, protocol, max_units): + return self._reserve_points(peer, protocol, max_units, self._valuable_protocol_prices) + + def _reserve_points_info(self, peer, protocol, max_units): + return self._reserve_points(peer, protocol, max_units, self._info_protocol_prices) + + def _reserve_points(self, peer, protocol, max_units, prices): + assert protocol in prices + points_to_reserve = 1.0 * max_units * prices[protocol] / 1000.0 + return self.wallet.reserve_points(peer, points_to_reserve) + + def _pay_or_cancel_payment(self, arg, protocol, reserved_points, protocol_prices): + if isinstance(arg, Failure) or arg == 0: + self._cancel_points(reserved_points) + else: + self._pay_peer(protocol, arg, reserved_points, protocol_prices) + return arg + + def _pay_peer(self, protocol, num_units, reserved_points, prices): + assert num_units != 0 + assert protocol in prices + point_amount = 1.0 * num_units * prices[protocol] / 1000.0 + self.wallet.send_points(reserved_points, point_amount) + + def _cancel_points(self, reserved_points): + self.wallet.cancel_point_reservation(reserved_points) + + def _handle_valuable_blob_response(self, response_dict, peer, request): + if not request.response_identifier in response_dict: + return InvalidResponseError("response identifier not in response") + response = response_dict[request.response_identifier] + if 'error' in response: + if response['error'] == "RATE_UNSET": + return 0 + else: + return InvalidResponseError("Got an unknown error from the peer: %s" % + (response['error'],)) + if not 'valuable_blob_hashes' in response: + return InvalidResponseError("Missing the required field 'valuable_blob_hashes'") + hashes = response['valuable_blob_hashes'] + logging.info("Handling %s valuable blob hashes from %s", str(len(hashes)), str(peer)) + expire_time = datetime.datetime.now() + datetime.timedelta(minutes=10) + reference = None + unique_hashes = set() + if 'reference' in response: + reference = response['reference'] + for blob_hash, peer_score in hashes: + if reference is None: + reference = blob_hash + self._last_blob_hashes_from_peers[peer] = (blob_hash, expire_time) + if not (blob_hash in self._valuable_hashes or blob_hash in self._blob_infos): + self._valuable_hashes[blob_hash] = (peer_score, reference, peer) + unique_hashes.add(blob_hash) + + if len(unique_hashes): + self._update_local_score(peer, len(unique_hashes)) + peer.update_stats('downloaded_valuable_blob_hashes', len(unique_hashes)) + peer.update_score(len(unique_hashes)) + else: + self._update_local_score(peer, -.0001) + return len(unique_hashes) + + def _handle_info_response(self, response_dict, peer, request): + if not request.response_identifier in response_dict: + return InvalidResponseError("response identifier not in response") + response = response_dict[request.response_identifier] + if 'error' in response: + if response['error'] == 'RATE_UNSET': + return 0 + else: + return InvalidResponseError("Got an unknown error from the peer: %s" % + (response['error'],)) + if not 'blob_lengths' in response: + return InvalidResponseError("Missing the required field 'blob_lengths'") + raw_blob_lengths = response['blob_lengths'] + logging.info("Handling %s blob lengths from %s", str(len(raw_blob_lengths)), str(peer)) + logging.debug("blobs: %s", str(raw_blob_lengths)) + infos = [] + unique_hashes = set() + for blob_hash, length in raw_blob_lengths: + if blob_hash in self._valuable_hashes: + peer_score, reference, peer = self._valuable_hashes[blob_hash] + del self._valuable_hashes[blob_hash] + infos.append(ValuableBlobInfo(blob_hash, length, reference, peer, peer_score)) + unique_hashes.add(blob_hash) + elif blob_hash in request.request_dict['blob_length']['blob_hashes']: + unique_hashes.add(blob_hash) + d = self.info_manager.save_blob_infos(infos) + d.addCallback(lambda _: self.download_manager.add_blobs_to_download(infos)) + + def pay_or_penalize_peer(): + if len(unique_hashes): + self._update_local_score(peer, len(unique_hashes)) + peer.update_stats('downloaded_valuable_blob_infos', len(unique_hashes)) + peer.update_score(len(unique_hashes)) + else: + self._update_local_score(peer, -.0001) + return len(unique_hashes) + + d.addCallback(lambda _: pay_or_penalize_peer()) + + return d + + def _handle_valuable_price_response(self, response_dict, peer, request, protocol): + if not request.response_identifier in response_dict: + return InvalidResponseError("response identifier not in response") + assert protocol in self._valuable_protocol_prices + response = response_dict[request.response_identifier] + if response == "RATE_ACCEPTED": + return True + else: + del self._valuable_protocol_prices[protocol] + self._price_disagreements.append(peer) + return True + + def _handle_info_price_response(self, response_dict, peer, request, protocol): + if not request.response_identifier in response_dict: + return InvalidResponseError("response identifier not in response") + assert protocol in self._info_protocol_prices + response = response_dict[request.response_identifier] + if response == "RATE_ACCEPTED": + return True + else: + del self._info_protocol_prices[protocol] + self._price_disagreements.append(peer) + return True + + def _request_failed(self, reason, request_type, peer): + if reason.check(RequestCanceledError): + return + if reason.check(NoResponseError): + self._incompatible_peers.append(peer) + return + logging.warning("Valuable blob info requester: a request of type %s has failed. Reason: %s", + str(request_type), str(reason.getErrorMessage())) + self._update_local_score(peer, -10.0) + peer.update_score(-5.0) + return reason + + def _search_for_peers(self): + references_with_sources = set() + for h_list in self._peer_search_results.itervalues(): + for h in h_list: + if h in self._valuable_hashes: + references_with_sources.add(self._valuable_hashes[h][1]) + hash_to_search = None + used_references = [] + for h, (s, r, p) in self._valuable_hashes.iteritems(): + if not r in used_references: + used_references.append(r) + hash_to_search = h + if not r in references_with_sources: + break + if hash_to_search: + d = self.peer_finder.find_peers_for_blob(hash_to_search) + d.addCallback(self._set_peer_search_results, hash_to_search) + + def _set_peer_search_results(self, peers, searched_hash): + for peer in peers: + self._peer_search_results[peer].append(searched_hash) \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindProgressManager.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindProgressManager.py new file mode 100644 index 000000000..e603dae35 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindProgressManager.py @@ -0,0 +1,93 @@ +from zope.interface import implements +from lbrynet.interfaces import IProgressManager +from twisted.internet import defer + + +class BlindProgressManager(object): + implements(IProgressManager) + + def __init__(self, blob_manager, peers, max_space, blob_scorers, download_manager): + self.blob_manager = blob_manager + self.peers = peers + self.max_space = max_space + self.blob_scorers = blob_scorers + self.download_manager = download_manager + self.paused = True + self.blobs_to_download = [] + self._next_manage_downloaded_blobs = None + + def set_max_space(self, max_space): + self.max_space = max_space + + ######### IProgressManager ######### + + def start(self): + from twisted.internet import reactor + + self.paused = False + self._next_manage_downloaded_blobs = reactor.callLater(0, self._manage_downloaded_blobs) + return defer.succeed(True) + + def stop(self): + self.paused = True + if self._next_manage_downloaded_blobs is not None and self._next_manage_downloaded_blobs.active(): + self._next_manage_downloaded_blobs.cancel() + self._next_manage_downloaded_blobs = None + return defer.succeed(True) + + def stream_position(self): + return 0 + + def needed_blobs(self): + needed_blobs = [b for b in self.blobs_to_download if not b.is_validated()] + return sorted(needed_blobs, key=lambda x: x.is_downloading(), reverse=True)[:20] + + ######### internal ######### + + def _manage_downloaded_blobs(self): + + self._next_manage_downloaded_blobs = None + + from twisted.internet import reactor + + blobs = self.download_manager.blobs + blob_infos = self.download_manager.blob_infos + + blob_hashes = [b.blob_hash for b in blobs] + + blobs_to_score = [(blobs[blob_hash], blob_infos[blob_hash]) for blob_hash in blob_hashes] + + scores = self._score_blobs(blobs_to_score) + + from future_builtins import zip + + scored_blobs = zip(blobs_to_score, scores) + ranked_blobs = sorted(scored_blobs, key=lambda x: x[1], reverse=True) + + space_so_far = 0 + blobs_to_delete = [] + blobs_to_download = [] + + for (blob, blob_info), score in ranked_blobs: + space_so_far += blob.blob_length + if blob.is_validated() and space_so_far >= self.max_space: + blobs_to_delete.append(blob) + elif not blob.is_validated() and space_so_far < self.max_space: + blobs_to_download.append(blob) + + self.blob_manager.delete_blobs(blobs_to_delete) + self.blobs_to_download = blobs_to_download + + self._next_manage_downloaded_blobs = reactor.callLater(30, self._manage_downloaded_blobs) + + def _score_blobs(self, blobs): + scores = [] + for blob, blob_info in blobs: + summands = [] + multiplicands = [] + for blob_scorer in self.blob_scorers: + s, m = blob_scorer.score_blob(blob, blob_info) + summands.append(s) + multiplicands.append(m) + scores.append(1.0 * sum(summands) * reduce(lambda x, y: x * y, multiplicands, 1)) + return scores \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindRepeater.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindRepeater.py new file mode 100644 index 000000000..284114017 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindRepeater.py @@ -0,0 +1,113 @@ +from twisted.internet import defer +from twisted.python.failure import Failure +from lbrynet.core.client.BlobRequester import BlobRequester +from lbrynet.core.client.ConnectionManager import ConnectionManager +from lbrynet.core.client.DownloadManager import DownloadManager +from BlindMetadataHandler import BlindMetadataHandler +from BlindProgressManager import BlindProgressManager +from BlindBlobHandler import BlindBlobHandler +from collections import defaultdict +from interfaces import IBlobScorer +from zope.interface import implements + + +class PeerScoreBasedScorer(object): + implements(IBlobScorer) + + def __init__(self): + pass + + def score_blob(self, blob, blob_info): + return blob_info.peer_score, 1 + + +class LengthBasedScorer(object): + implements(IBlobScorer) + + def __init__(self): + pass + + def score_blob(self, blob, blob_info): + return 0, 1.0 * blob.get_length() / 2**21 + + +class BlindRepeater(object): + def __init__(self, peer_finder, rate_limiter, blob_manager, info_manager, wallet, payment_rate_manager): + self.peer_finder = peer_finder + self.rate_limiter = rate_limiter + self.blob_manager = blob_manager + self.info_manager = info_manager + self.wallet = wallet + self.payment_rate_manager = payment_rate_manager + self.download_manager = None + self.progress_manager = None + self.max_space = 0 + self.peers = defaultdict(int) + self.approved_peers = set() + self.stopped = True + + def setup(self): + return defer.succeed(True) + + def start(self): + if self.stopped is True: + return self._start() + else: + return defer.fail(Failure(ValueError("The repeater is already running"))) + + def stop(self): + if self.stopped is False: + return self._stop() + else: + return defer.fail(Failure(ValueError("The repeater is not running"))) + + def status(self): + if self.stopped is True: + return "stopped" + else: + return "running" + + def set_max_space(self, max_space): + self.max_space = max_space + if self.progress_manager is not None: + self.progress_manager.set_max_space(self.max_space) + + def add_approved_peer(self, peer): + self.approved_peers.add(peer) + + def remove_approved_peer(self, peer): + self.approved_peers.remove(peer) + + def _start(self): + self.download_manager = DownloadManager(self.blob_manager, True) + info_finder = BlindMetadataHandler(self.info_manager, self.peers, self.peer_finder, + self.approved_peers, self.payment_rate_manager, + self.wallet, self.download_manager) + self.download_manager.blob_info_finder = info_finder + blob_requester = BlobRequester(self.blob_manager, self.peer_finder, self.payment_rate_manager, + self.wallet, self.download_manager) + self.download_manager.blob_requester = blob_requester + self.progress_manager = BlindProgressManager(self.blob_manager, self.peers, self.max_space, + [PeerScoreBasedScorer(), LengthBasedScorer()], + self.download_manager) + self.download_manager.progress_manager = self.progress_manager + self.download_manager.blob_handler = BlindBlobHandler() + wallet_info_exchanger = self.wallet.get_info_exchanger() + self.download_manager.wallet_info_exchanger = wallet_info_exchanger + connection_manager = ConnectionManager(self, self.rate_limiter, [info_finder, blob_requester], + [wallet_info_exchanger]) + self.download_manager.connection_manager = connection_manager + d = defer.maybeDeferred(self.download_manager.start_downloading) + d.addCallback(lambda _: self._update_status(stopped=False)) + return d + + def _stop(self): + d = defer.maybeDeferred(self.download_manager.stop_downloading) + d.addCallback(lambda _: self._update_status(stopped=True)) + return d + + def _update_status(self, stopped=True): + self.stopped = stopped + + def insufficient_funds(self): + return self.stop() \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindRepeaterControlHandlers.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindRepeaterControlHandlers.py new file mode 100644 index 000000000..6ee7bd254 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindRepeaterControlHandlers.py @@ -0,0 +1,318 @@ +from lbrynet.lbrynet_console.ControlHandlers import ControlHandler, ControlHandlerFactory +from lbrynet.lbrynet_console.ControlHandlers import RecursiveControlHandler, ModifyPaymentRate +from twisted.internet import defer + + +class StartRepeater(ControlHandler): + prompt_description = "Start the blind repeater" + + def __init__(self, repeater, settings): + self.repeater = repeater + self.settings = settings + + def handle_line(self, line): + assert line is None, "Start repeater should not be passed any arguments" + d = self.settings.save_repeater_status(running=True) + d.addCallback(lambda _: self.repeater.start()) + d.addCallback(lambda _: "Started the repeater") + return True, d + + +class StartRepeaterFactory(ControlHandlerFactory): + control_handler_class = StartRepeater + + +class StopRepeater(ControlHandler): + prompt_description = "Stop the blind repeater" + + def __init__(self, repeater, settings): + self.repeater = repeater + self.settings = settings + + def handle_line(self, line): + assert line is None, "Stop repeater should not be passed any arguments" + d = self.settings.save_repeater_status(running=False) + d.addCallback(lambda _: self.repeater.stop()) + d.addCallback(lambda _: "Stopped the repeater") + return True, d + + +class StopRepeaterFactory(ControlHandlerFactory): + control_handler_class = StopRepeater + + +class UpdateMaxSpace(ControlHandler): + prompt_description = "Set the maximum space to be used by the blind repeater" + line_prompt = "Maximum space (in bytes):" + + def __init__(self, repeater, settings): + self.repeater = repeater + self.settings = settings + + def handle_line(self, line): + if line is None: + return False, defer.succeed(self.line_prompt) + return True, self._set_max_space(line) + + def _set_max_space(self, line): + max_space = int(line) + d = self.settings.save_max_space(max_space) + d.addCallback(lambda _: self.repeater.set_max_space(max_space)) + d.addCallback(lambda _: "Set the maximum space to " + str(max_space) + " bytes") + return d + + +class UpdateMaxSpaceFactory(ControlHandlerFactory): + control_handler_class = UpdateMaxSpace + + +class AddApprovedPeer(ControlHandler): + prompt_description = "Add a peer to the approved list of peers to check for valuable blob hashes" + host_prompt = "Peer host in dotted quad (e.g. 127.0.0.1)" + port_prompt = "Peer port (e.g. 4444)" + + def __init__(self, repeater, peer_manager, settings): + self.repeater = repeater + self.peer_manager = peer_manager + self.settings = settings + self.host_to_add = None + + def handle_line(self, line): + if line is None: + return False, defer.succeed(self.host_prompt) + elif self.host_to_add is None: + self.host_to_add = line + return False, defer.succeed(self.port_prompt) + else: + self.host_to_add, host = None, self.host_to_add + return True, self._add_peer(host, line) + + def _add_peer(self, host, port): + peer = self.peer_manager.get_peer(host, int(port)) + d = self.settings.save_approved_peer(host, int(port)) + d.addCallback(lambda _: self.repeater.add_approved_peer(peer)) + d.addCallback(lambda _: "Successfully added peer") + return d + + +class AddApprovedPeerFactory(ControlHandlerFactory): + control_handler_class = AddApprovedPeer + + +class ApprovedPeerChooser(RecursiveControlHandler): + + def __init__(self, repeater, factory_class, *args, **kwargs): + self.repeater = repeater + self.factory_class = factory_class + self.args = args + RecursiveControlHandler.__init__(self, **kwargs) + + def _get_control_handler_factories(self): + control_handler_factories = [] + for peer in self.repeater.approved_peers: + control_handler_factories.append(self.factory_class(peer, *self.args)) + return control_handler_factories + + +class ApprovedPeerChooserFactory(ControlHandlerFactory): + def get_prompt_description(self): + peer = self.args[0] + return str(peer) + + +class DeleteApprovedPeerChooser(ApprovedPeerChooser): + prompt_description = "Remove a peer from the approved list of peers to check for valuable blob hashes" + + def __init__(self, repeater, settings): + ApprovedPeerChooser.__init__(self, repeater, DeleteApprovedPeerFactory, repeater, settings, + exit_after_one_done=True) + + +class DeleteApprovedPeerChooserFactory(ControlHandlerFactory): + control_handler_class = DeleteApprovedPeerChooser + + +class DeleteApprovedPeer(ControlHandler): + prompt_description = "Remove a peer from the approved list of peers to check for valuable blob hashes" + + def __init__(self, peer, repeater, settings): + self.repeater = repeater + self.settings = settings + self.peer_to_remove = peer + + def handle_line(self, line): + return True, self._remove_peer() + + def _remove_peer(self): + d = self.settings.remove_approved_peer(self.peer_to_remove.host, int(self.peer_to_remove.port)) + d.addCallback(lambda _: self.repeater.remove_approved_peer(self.peer_to_remove)) + d.addCallback(lambda _: "Successfully removed peer") + return d + + +class DeleteApprovedPeerFactory(ApprovedPeerChooserFactory): + control_handler_class = DeleteApprovedPeer + + +class ShowApprovedPeers(ControlHandler): + prompt_description = "Show the list of peers approved to be checked for valuable blob hashes" + + def __init__(self, repeater): + self.repeater = repeater + + def handle_line(self, line): + assert line is None, "Show approved peers should not be passed any arguments" + return True, self._show_peers() + + def _show_peers(self): + peer_string = "Approved peers:\n" + for peer in self.repeater.approved_peers: + peer_string += str(peer) + "\n" + return defer.succeed(peer_string) + + +class ShowApprovedPeersFactory(ControlHandlerFactory): + control_handler_class = ShowApprovedPeers + + +class RepeaterStatus(ControlHandler): + prompt_description = "Show the repeater's status" + + def __init__(self, repeater): + self.repeater = repeater + + def handle_line(self, line): + assert line is None, "Show repeater status should not be passed any arguments" + return True, defer.maybeDeferred(self._get_status) + + def _get_status(self): + status_string = "Repeater status: " + self.repeater.status() + "\n" + + if self.repeater.stopped is False: + max_space = self.repeater.progress_manager.max_space + space_used = 0 + for blob in self.repeater.download_manager.blobs: + if blob.is_validated(): + space_used += blob.get_length() + + status_string += "Maximum space: " + str(max_space) + " bytes\n" + status_string += "Space used: " + str(space_used) + " bytes\n" + return defer.succeed(status_string) + + +class RepeaterStatusFactory(ControlHandlerFactory): + control_handler_class = RepeaterStatus + + +class ModifyDataPaymentRate(ModifyPaymentRate): + prompt_description = "Modify Blind Repeater data payment rate" + + def __init__(self, repeater, settings): + ModifyPaymentRate.__init__(self) + self._prompt_choices['unset'] = (self._unset, "Use the application default data rate") + self.payment_rate_manager = repeater.payment_rate_manager + self.settings = settings + + def _unset(self): + self._set_rate(None) + return True, defer.succeed("Using the application default data rate") + + def _set_rate(self, rate): + + def set_data_payment_rate(rate): + self.payment_rate_manager.min_blob_data_payment_rate = rate + + d = self.settings.save_data_payment_rate(rate) + d.addCallback(lambda _: set_data_payment_rate(rate)) + return d + + def _get_current_status(self): + effective_rate = self.payment_rate_manager.get_effective_min_blob_data_payment_rate() + if self.payment_rate_manager.min_blob_data_payment_rate is None: + status = "The current data payment rate is set to use the application default, " + status += str(effective_rate) + else: + status = "The current data payment rate is " + status += str(effective_rate) + return status + + +class ModifyDataPaymentRateFactory(ControlHandlerFactory): + control_handler_class = ModifyDataPaymentRate + + +class ModifyInfoPaymentRate(ModifyPaymentRate): + prompt_description = "Modify Blind Repeater valuable info payment rate" + + def __init__(self, repeater, settings): + ModifyPaymentRate.__init__(self) + self.payment_rate_manager = repeater.payment_rate_manager + self.settings = settings + + def _set_rate(self, rate): + + def set_info_payment_rate(rate): + self.payment_rate_manager.min_valuable_blob_info_payment_rate = rate + + d = self.settings.save_valuable_info_payment_rate(rate) + d.addCallback(lambda _: set_info_payment_rate(rate)) + return d + + def _get_current_status(self): + status = "The current valuable blob info payment rate is " + status += str(self.payment_rate_manager.min_valuable_blob_info_payment_rate) + return status + + +class ModifyInfoPaymentRateFactory(ControlHandlerFactory): + control_handler_class = ModifyInfoPaymentRate + + +class ModifyHashPaymentRate(ModifyPaymentRate): + prompt_description = "Modify Blind Repeater valuable hash payment rate" + + def __init__(self, repeater, settings): + ModifyPaymentRate.__init__(self) + self.payment_rate_manager = repeater.payment_rate_manager + self.settings = settings + + def _set_rate(self, rate): + + def set_hash_payment_rate(rate): + self.payment_rate_manager.min_valuable_blob_hash_payment_rate = rate + + d = self.settings.save_valuable_hash_payment_rate(rate) + d.addCallback(lambda _: set_hash_payment_rate(rate)) + return d + + def _get_current_status(self): + status = "The current valuable blob hash payment rate is " + status += str(self.payment_rate_manager.min_valuable_blob_hash_payment_rate) + return status + + +class ModifyHashPaymentRateFactory(ControlHandlerFactory): + control_handler_class = ModifyHashPaymentRate + + +class ModifyRepeaterOptions(RecursiveControlHandler): + prompt_description = "Modify Blind Repeater options" + + def __init__(self, repeater, lbry_session, settings): + self.repeater = repeater + self.lbry_session = lbry_session + self.settings = settings + RecursiveControlHandler.__init__(self) + + def _get_control_handler_factories(self): + return [ModifyDataPaymentRateFactory(self.repeater, self.settings), + ModifyInfoPaymentRateFactory(self.repeater, self.settings), + ModifyHashPaymentRateFactory(self.repeater, self.settings), + UpdateMaxSpaceFactory(self.repeater, self.settings), + AddApprovedPeerFactory(self.repeater, self.lbry_session.peer_manager, self.settings), + DeleteApprovedPeerChooserFactory(self.repeater, self.settings), + ] + + +class ModifyRepeaterOptionsFactory(ControlHandlerFactory): + control_handler_class = ModifyRepeaterOptions \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindRepeaterSettings.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindRepeaterSettings.py new file mode 100644 index 000000000..c7f4353d8 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/BlindRepeaterSettings.py @@ -0,0 +1,94 @@ +from db_keys import SETTING_TYPE, PEER_TYPE +from twisted.internet import threads +import json + + +class BlindRepeaterSettings(object): + + def __init__(self, db): + self.db = db + + def save_repeater_status(self, running): + def save_status(): + self.db.Put(json.dumps((SETTING_TYPE, "running")), json.dumps(running), sync=True) + + return threads.deferToThread(save_status) + + def get_repeater_saved_status(self): + def get_status(): + try: + return json.loads(self.db.Get(json.dumps((SETTING_TYPE, "running")))) + except KeyError: + return False + + return threads.deferToThread(get_status) + + def save_max_space(self, max_space): + def save_space(): + self.db.Put(json.dumps((SETTING_TYPE, "max_space")), str(max_space), sync=True) + + return threads.deferToThread(save_space) + + def get_saved_max_space(self): + def get_space(): + try: + return int(self.db.Get(json.dumps((SETTING_TYPE, "max_space")))) + except KeyError: + return 0 + + return threads.deferToThread(get_space) + + def save_approved_peer(self, host, port): + def add_peer(): + peer_string = json.dumps((PEER_TYPE, (host, port))) + self.db.Put(peer_string, "", sync=True) + + return threads.deferToThread(add_peer) + + def remove_approved_peer(self, host, port): + def remove_peer(): + peer_string = json.dumps((PEER_TYPE, (host, port))) + self.db.Delete(peer_string, sync=True) + + return threads.deferToThread(remove_peer) + + def get_approved_peers(self): + def get_peers(): + peers = [] + for k, v in self.db.RangeIter(): + key_type, peer_info = json.loads(k) + if key_type == PEER_TYPE: + peers.append(peer_info) + return peers + + return threads.deferToThread(get_peers) + + def get_data_payment_rate(self): + return threads.deferToThread(self._get_rate, "data_payment_rate") + + def save_data_payment_rate(self, rate): + return threads.deferToThread(self._save_rate, "data_payment_rate", rate) + + def get_valuable_info_payment_rate(self): + return threads.deferToThread(self._get_rate, "valuable_info_rate") + + def save_valuable_info_payment_rate(self, rate): + return threads.deferToThread(self._save_rate, "valuable_info_rate", rate) + + def get_valuable_hash_payment_rate(self): + return threads.deferToThread(self._get_rate, "valuable_hash_rate") + + def save_valuable_hash_payment_rate(self, rate): + return threads.deferToThread(self._save_rate, "valuable_hash_rate", rate) + + def _get_rate(self, rate_type): + try: + return json.loads(self.db.Get(json.dumps((SETTING_TYPE, rate_type)))) + except KeyError: + return None + + def _save_rate(self, rate_type, rate): + if rate is not None: + self.db.Put(json.dumps((SETTING_TYPE, rate_type)), json.dumps(rate), sync=True) + else: + self.db.Delete(json.dumps((SETTING_TYPE, rate_type)), sync=True) \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/PaymentRateManager.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/PaymentRateManager.py new file mode 100644 index 000000000..9f149f0c2 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/PaymentRateManager.py @@ -0,0 +1,20 @@ +from lbrynet.core.PaymentRateManager import PaymentRateManager + + +class BlindRepeaterPaymentRateManager(PaymentRateManager): + def __init__(self, base, valuable_info_rate, valuable_hash_rate, blob_data_rate=None): + PaymentRateManager.__init__(self, base, blob_data_rate) + self.min_valuable_blob_info_payment_rate = valuable_info_rate + self.min_valuable_blob_hash_payment_rate = valuable_hash_rate + + def get_rate_valuable_blob_info(self, peer): + return self.min_valuable_blob_info_payment_rate + + def accept_rate_valuable_blob_info(self, peer, payment_rate): + return payment_rate >= self.min_valuable_blob_info_payment_rate + + def get_rate_valuable_blob_hash(self, peer): + return self.min_valuable_blob_hash_payment_rate + + def accept_rate_valuable_blob_hash(self, peer, payment_rate): + return payment_rate >= self.min_valuable_blob_hash_payment_rate \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/ValuableBlobInfo.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/ValuableBlobInfo.py new file mode 100644 index 000000000..453365c0a --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/ValuableBlobInfo.py @@ -0,0 +1,9 @@ +from lbrynet.core.BlobInfo import BlobInfo + + +class ValuableBlobInfo(BlobInfo): + def __init__(self, blob_hash, length, reference, peer, peer_score): + BlobInfo.__init__(self, blob_hash, blob_hash, length) + self.reference = reference + self.peer = peer + self.peer_score = peer_score diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/ValuableBlobQueryHandler.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/ValuableBlobQueryHandler.py new file mode 100644 index 000000000..3d96eb23e --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/ValuableBlobQueryHandler.py @@ -0,0 +1,199 @@ +from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler +from zope.interface import implements +from twisted.internet import defer +import logging + + +class ValuableQueryHandler(object): + implements(IQueryHandler) + + def __init__(self, wallet, payment_rate_manager): + self.wallet = wallet + self.payment_rate_manager = payment_rate_manager + self.peer = None + self.payment_rate = None + self.query_identifiers = [] + + ######### IQueryHandler ######### + + def register_with_request_handler(self, request_handler, peer): + self.peer = peer + request_handler.register_query_handler(self, self.query_identifiers) + + def handle_queries(self, queries): + pass + + +class ValuableBlobHashQueryHandlerFactory(object): + implements(IQueryHandlerFactory) + + def __init__(self, peer_finder, wallet, payment_rate_manager): + self.peer_finder = peer_finder + self.wallet = wallet + self.payment_rate_manager = payment_rate_manager + + ######### IQueryHandlerFactory ######### + + def build_query_handler(self): + q_h = ValuableBlobHashQueryHandler(self.wallet, self.payment_rate_manager, self.peer_finder) + return q_h + + def get_primary_query_identifier(self): + return 'valuable_blob_hashes' + + def get_description(self): + return "Valuable Hashes - Hashes of blobs that it may be valuable to repeat" + + +class ValuableBlobHashQueryHandler(ValuableQueryHandler): + implements(IQueryHandler) + + def __init__(self, wallet, payment_rate_manager, peer_finder): + ValuableQueryHandler.__init__(self, wallet, payment_rate_manager) + self.peer_finder = peer_finder + self.query_identifiers = ['valuable_blob_hashes', 'valuable_blob_payment_rate'] + self.valuable_blob_hash_payment_rate = None + self.blob_length_payment_rate = None + + ######### IQueryHandler ######### + + def handle_queries(self, queries): + response = {} + + def set_fields(fields): + response.update(fields) + + if self.query_identifiers[1] in queries: + d = self._handle_valuable_blob_payment_rate(queries[self.query_identifiers[1]]) + d.addCallback(set_fields) + else: + d = defer.succeed(True) + + if self.query_identifiers[0] in queries: + d.addCallback(lambda _: self._handle_valuable_blob_hashes(queries[self.query_identifiers[0]])) + d.addCallback(set_fields) + + d.addCallback(lambda _: response) + return d + + ######### internal ######### + + def _handle_valuable_blob_payment_rate(self, requested_payment_rate): + if not self.payment_rate_manager.accept_rate_valuable_blob_hash(self.peer, "VALUABLE_BLOB_HASH", + requested_payment_rate): + r = "RATE_TOO_LOW" + else: + self.valuable_blob_hash_payment_rate = requested_payment_rate + r = "RATE_ACCEPTED" + return defer.succeed({'valuable_blob_payment_rate': r}) + + def _handle_valuable_blob_hashes(self, request): + # TODO: eventually, look at the request and respond appropriately given the 'reference' field + if self.valuable_blob_hash_payment_rate is not None: + max_hashes = 20 + if 'max_blob_hashes' in request: + max_hashes = int(request['max_blob_hash']) + valuable_hashes = self.peer_finder.get_most_popular_blobs(max_hashes) + hashes_and_scores = [] + for blob_hash, count in valuable_hashes: + hashes_and_scores.append((blob_hash, 1.0 * count / 10.0)) + if len(hashes_and_scores) != 0: + logging.info("Responding to a valuable blob hashes request with %s blob hashes: %s", + str(len(hashes_and_scores))) + expected_payment = 1.0 * len(hashes_and_scores) * self.valuable_blob_hash_payment_rate / 1000.0 + self.wallet.add_expected_payment(self.peer, expected_payment) + self.peer.update_stats('uploaded_valuable_blob_hashes', len(hashes_and_scores)) + return defer.succeed({'valuable_blob_hashes': {'blob_hashes': hashes_and_scores}}) + return defer.succeed({'valuable_blob_hashes': {'error': "RATE_UNSET"}}) + + +class ValuableBlobLengthQueryHandlerFactory(object): + implements(IQueryHandlerFactory) + + def __init__(self, wallet, payment_rate_manager, blob_manager): + self.blob_manager = blob_manager + self.wallet = wallet + self.payment_rate_manager = payment_rate_manager + + ######### IQueryHandlerFactory ######### + + def build_query_handler(self): + q_h = ValuableBlobLengthQueryHandler(self.wallet, self.payment_rate_manager, self.blob_manager) + return q_h + + def get_primary_query_identifier(self): + return 'blob_length' + + def get_description(self): + return "Valuable Blob Lengths - Lengths of blobs that it may be valuable to repeat" + + +class ValuableBlobLengthQueryHandler(ValuableQueryHandler): + + def __init__(self, wallet, payment_rate_manager, blob_manager): + ValuableQueryHandler.__init__(self, wallet, payment_rate_manager) + self.blob_manager = blob_manager + self.query_identifiers = ['blob_length', 'blob_length_payment_rate'] + self.valuable_blob_hash_payment_rate = None + self.blob_length_payment_rate = None + + ######## IQueryHandler ######### + + def handle_queries(self, queries): + response = {} + + def set_fields(fields): + response.update(fields) + + if self.query_identifiers[1] in queries: + d = self._handle_blob_length_payment_rate(queries[self.query_identifiers[1]]) + d.addCallback(set_fields) + else: + d = defer.succeed(True) + + if self.query_identifiers[0] in queries: + d.addCallback(lambda _: self._handle_blob_length(queries[self.query_identifiers[0]])) + d.addCallback(set_fields) + + d.addCallback(lambda _: response) + return d + + ######### internal ######### + + def _handle_blob_length_payment_rate(self, requested_payment_rate): + if not self.payment_rate_manager.accept_rate_valuable_blob_info(self.peer, "VALUABLE_BLOB_INFO", + requested_payment_rate): + r = "RATE_TOO_LOW" + else: + self.blob_length_payment_rate = requested_payment_rate + r = "RATE_ACCEPTED" + return defer.succeed({'blob_length_payment_rate': r}) + + def _handle_blob_length(self, request): + if self.blob_length_payment_rate is not None: + assert 'blob_hashes' in request + ds = [] + + def make_response_pair(length, blob_hash): + return blob_hash, length + + for blob_hash in request['blob_hashes']: + d = self.blob_manager.get_blob_length(blob_hash) + d.addCallback(make_response_pair, blob_hash) + ds.append(d) + + dl = defer.DeferredList(ds) + + def make_response(response_pairs): + lengths = [] + for success, response_pair in response_pairs: + if success is True: + lengths.append(response_pair) + if len(lengths) > 0: + logging.info("Responding with %s blob lengths: %s", str(len(lengths))) + expected_payment = 1.0 * len(lengths) * self.blob_length_payment_rate / 1000.0 + self.wallet.add_expected_payment(self.peer, expected_payment) + self.peer.update_stats('uploaded_valuable_blob_infos', len(lengths)) + return {'blob_length': {'blob_lengths': lengths}} + + dl.addCallback(make_response) \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/__init__.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/__init__.py new file mode 100644 index 000000000..f78730573 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/__init__.py @@ -0,0 +1,128 @@ +import leveldb +import os +from lbrynet.lbrynet_console import LBRYPlugin +from twisted.internet import defer, threads +from lbrynet.conf import MIN_VALUABLE_BLOB_HASH_PAYMENT_RATE, MIN_VALUABLE_BLOB_INFO_PAYMENT_RATE +from BlindRepeater import BlindRepeater +from BlindInfoManager import BlindInfoManager +from BlindRepeaterSettings import BlindRepeaterSettings +from BlindRepeaterControlHandlers import StartRepeaterFactory, StopRepeaterFactory, UpdateMaxSpaceFactory +from BlindRepeaterControlHandlers import AddApprovedPeerFactory, DeleteApprovedPeerFactory, RepeaterStatusFactory +from BlindRepeaterControlHandlers import ShowApprovedPeersFactory, ModifyRepeaterOptionsFactory +from ValuableBlobQueryHandler import ValuableBlobLengthQueryHandlerFactory +from ValuableBlobQueryHandler import ValuableBlobHashQueryHandlerFactory + +from PaymentRateManager import BlindRepeaterPaymentRateManager + + +class BlindRepeaterPlugin(LBRYPlugin.LBRYPlugin): + + def __init__(self): + LBRYPlugin.LBRYPlugin.__init__(self) + self.blind_info_manager = None + self.valuable_blob_length_query_handler = None + self.valuable_blob_hash_query_handler = None + self.repeater = None + self.control_handlers = None + self.payment_rate_manager = None + self.settings = None + self.db = None + + def setup(self, lbry_console): + lbry_session = lbry_console.session + d = threads.deferToThread(self._setup_db, lbry_session.db_dir) + d.addCallback(lambda _: self._setup_settings()) + d.addCallback(lambda _: self._get_payment_rate_manager(lbry_session.base_payment_rate_manager)) + d.addCallback(lambda _: self._setup_blind_info_manager(lbry_session.peer_manager)) + d.addCallback(lambda _: self._setup_blind_repeater(lbry_session)) + d.addCallback(lambda _: self._setup_valuable_blob_query_handler(lbry_session)) + d.addCallback(lambda _: self._create_control_handlers(lbry_session)) + d.addCallback(lambda _: self._restore_repeater_status(lbry_session)) + d.addCallback(lambda _: self._add_to_lbry_console(lbry_console)) + return d + + def _setup_db(self, db_dir): + self.db = leveldb.LevelDB(os.path.join(db_dir, "valuable_blobs.db")) + + def _setup_settings(self): + self.settings = BlindRepeaterSettings(self.db) + + def _get_payment_rate_manager(self, default_payment_rate_manager): + d1 = self.settings.get_data_payment_rate() + d2 = self.settings.get_valuable_info_payment_rate() + d3 = self.settings.get_valuable_hash_payment_rate() + + dl = defer.DeferredList([d1, d2, d3]) + + def get_payment_rate_manager(rates): + data_rate = rates[0][1] if rates[0][0] is True else None + info_rate = rates[1][1] if rates[1][0] is True else None + info_rate = info_rate if info_rate is not None else MIN_VALUABLE_BLOB_INFO_PAYMENT_RATE + hash_rate = rates[2][1] if rates[2][0] is True else None + hash_rate = hash_rate if hash_rate is not None else MIN_VALUABLE_BLOB_HASH_PAYMENT_RATE + self.payment_rate_manager = BlindRepeaterPaymentRateManager(default_payment_rate_manager, + info_rate, hash_rate, + blob_data_rate=data_rate) + + dl.addCallback(get_payment_rate_manager) + return dl + + def _setup_blind_info_manager(self, peer_manager): + self.blind_info_manager = BlindInfoManager(self.db, peer_manager) + return self.blind_info_manager.setup() + + def _setup_valuable_blob_query_handler(self, lbry_session): + self.valuable_blob_length_query_handler = ValuableBlobLengthQueryHandlerFactory(lbry_session.blob_manager, + lbry_session.wallet, + self.payment_rate_manager) + self.valuable_blob_hash_query_handler = ValuableBlobHashQueryHandlerFactory(lbry_session.peer_finder, + lbry_session.wallet, + self.payment_rate_manager) + + def _setup_blind_repeater(self, lbry_session): + self.repeater = BlindRepeater(lbry_session.peer_finder, lbry_session.rate_limiter, + lbry_session.blob_manager, self.blind_info_manager, + lbry_session.wallet, self.payment_rate_manager) + return self.repeater.setup() + + def _restore_repeater_status(self, lbry_session): + d = self.settings.get_saved_max_space() + + def set_max_space(max_space): + self.repeater.set_max_space(max_space) + + d.addCallback(set_max_space) + + d.addCallback(lambda _: self.settings.get_approved_peers()) + + def set_approved_peers(peers): + for host, port in peers: + peer = lbry_session.peer_manager.get_peer(host, int(port)) + self.repeater.add_approved_peer(peer) + + d.addCallback(set_approved_peers) + + d.addCallback(lambda _: self.settings.get_repeater_saved_status()) + + def restore_running(running): + if running: + return self.repeater.start() + else: + return defer.succeed(True) + + d.addCallback(restore_running) + return d + + def _create_control_handlers(self, lbry_session): + category = "Blind Repeater" + control_handlers = [StartRepeaterFactory(self.repeater, self.settings), + StopRepeaterFactory(self.repeater, self.settings), + RepeaterStatusFactory(self.repeater), + ShowApprovedPeersFactory(self.repeater), + ModifyRepeaterOptionsFactory(self.repeater, lbry_session, self.settings)] + self.control_handlers = zip([category] * len(control_handlers), control_handlers) + + def _add_to_lbry_console(self, lbry_console): + lbry_console.add_control_handlers(self.control_handlers) + lbry_console.add_query_handlers([self.valuable_blob_length_query_handler, + self.valuable_blob_hash_query_handler]) \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/db_keys.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/db_keys.py new file mode 100644 index 000000000..1b87e5ba7 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/db_keys.py @@ -0,0 +1,3 @@ +BLOB_INFO_TYPE = 'b' +SETTING_TYPE = 's' +PEER_TYPE = 'p' \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/BlindRepeater/interfaces.py b/lbrynet/lbrynet_console/plugins/BlindRepeater/interfaces.py new file mode 100644 index 000000000..812051e9b --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/BlindRepeater/interfaces.py @@ -0,0 +1,6 @@ +from zope.interface import Interface + + +class IBlobScorer(Interface): + def score_blob(self, blob, blob_info): + pass \ No newline at end of file diff --git a/lbrynet/lbrynet_console/plugins/__init__.py b/lbrynet/lbrynet_console/plugins/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/lbrynet_console/plugins/blindrepeater.yapsy-plugin b/lbrynet/lbrynet_console/plugins/blindrepeater.yapsy-plugin new file mode 100644 index 000000000..1092f3518 --- /dev/null +++ b/lbrynet/lbrynet_console/plugins/blindrepeater.yapsy-plugin @@ -0,0 +1,8 @@ +[Core] +Name = BlindRepeater +Module = BlindRepeater + +[Documentation] +Author = LBRY +Version = 0.1 +Description = A client which blindly downloads data it judges valuable so that it can be re-uploaded for profit \ No newline at end of file diff --git a/lbrynet/lbrynet_downloader_gui/Ic_arrow_drop_down_48px.svg.LICENSE b/lbrynet/lbrynet_downloader_gui/Ic_arrow_drop_down_48px.svg.LICENSE new file mode 100644 index 000000000..69face7d0 --- /dev/null +++ b/lbrynet/lbrynet_downloader_gui/Ic_arrow_drop_down_48px.svg.LICENSE @@ -0,0 +1,393 @@ +Attribution 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution 4.0 International Public License ("Public License"). To the +extent this Public License may be interpreted as a contract, You are +granted the Licensed Rights in consideration of Your acceptance of +these terms and conditions, and the Licensor grants You such rights in +consideration of benefits the Licensor receives from making the +Licensed Material available under these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. \ No newline at end of file diff --git a/lbrynet/lbrynet_downloader_gui/__init__.py b/lbrynet/lbrynet_downloader_gui/__init__.py new file mode 100644 index 000000000..f58b75595 --- /dev/null +++ b/lbrynet/lbrynet_downloader_gui/__init__.py @@ -0,0 +1 @@ +"""A gui application for downloading LBRY files from LBRYnet""" \ No newline at end of file diff --git a/lbrynet/lbrynet_downloader_gui/close.gif b/lbrynet/lbrynet_downloader_gui/close.gif new file mode 100644 index 000000000..9c9e74a2f Binary files /dev/null and b/lbrynet/lbrynet_downloader_gui/close.gif differ diff --git a/lbrynet/lbrynet_downloader_gui/close1.png b/lbrynet/lbrynet_downloader_gui/close1.png new file mode 100644 index 000000000..6c7cae01d Binary files /dev/null and b/lbrynet/lbrynet_downloader_gui/close1.png differ diff --git a/lbrynet/lbrynet_downloader_gui/close2.gif b/lbrynet/lbrynet_downloader_gui/close2.gif new file mode 100644 index 000000000..5d1cf62e5 Binary files /dev/null and b/lbrynet/lbrynet_downloader_gui/close2.gif differ diff --git a/lbrynet/lbrynet_downloader_gui/downloader.py b/lbrynet/lbrynet_downloader_gui/downloader.py new file mode 100644 index 000000000..f13bf7593 --- /dev/null +++ b/lbrynet/lbrynet_downloader_gui/downloader.py @@ -0,0 +1,874 @@ +import Tkinter as tk +import ttk +import tkFont +import tkMessageBox +import logging +from lbrynet.lbryfile.client.LBRYFileDownloader import LBRYFileSaverFactory, LBRYFileOpenerFactory +from twisted.internet import tksupport, reactor, defer, task, threads +import sys +import os +import locale +import binascii +from Crypto import Random +from lbrynet.conf import MIN_BLOB_DATA_PAYMENT_RATE +from lbrynet.core.Session import LBRYSession +from lbrynet.core.LBRYcrdWallet import LBRYcrdWallet +from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier +from lbrynet.core.PaymentRateManager import PaymentRateManager +from lbrynet.lbryfile.LBRYFileMetadataManager import TempLBRYFileMetadataManager +from lbrynet.core import StreamDescriptor +from lbrynet.lbryfile.StreamDescriptor import LBRYFileStreamType, LBRYFileStreamDescriptorValidator +import requests + + +class LBRYDownloader(object): + def __init__(self): + self.session = None + self.known_dht_nodes = [('104.236.42.182', 4000)] + self.conf_dir = os.path.join(os.path.expanduser("~"), ".lbrydownloader") + self.data_dir = os.path.join(self.conf_dir, "blobfiles") + self.wallet_dir = os.path.join(os.path.expanduser("~"), ".lbrycrd") + self.wallet_conf = os.path.join(self.wallet_dir, "lbrycrd.conf") + self.first_run = False + if os.name == "nt": + from lbrynet.winhelpers.knownpaths import get_path, FOLDERID, UserHandle + self.download_directory = get_path(FOLDERID.Downloads, UserHandle.current) + else: + self.download_directory = os.getcwd() + self.wallet_user = None + self.wallet_password = None + self.sd_identifier = StreamDescriptorIdentifier() + self.wallet_rpc_port = 8332 + self.download_deferreds = [] + self.stream_frames = [] + + def start(self): + d = threads.deferToThread(self._create_directory) + d.addCallback(lambda _: self._get_session()) + d.addCallback(lambda _: self._setup_stream_info_manager()) + d.addCallback(lambda _: self._setup_stream_identifier()) + return d + + def stop(self): + dl = defer.DeferredList(self.download_deferreds) + for stream_frame in self.stream_frames: + stream_frame.cancel_func() + if self.session is not None: + dl.addBoth(lambda _: self.session.shut_down()) + return dl + + def get_new_address(self): + return self.session.wallet.get_new_address() + + def _create_directory(self): + if not os.path.exists(self.conf_dir): + os.makedirs(self.conf_dir) + logging.debug("Created the configuration directory: %s", str(self.conf_dir)) + if not os.path.exists(self.data_dir): + os.makedirs(self.data_dir) + logging.debug("Created the data directory: %s", str(self.data_dir)) + if not os.path.exists(self.wallet_dir): + os.makedirs(self.wallet_dir) + if not os.path.exists(self.wallet_conf): + lbrycrd_conf = open(self.wallet_conf, mode='w') + self.wallet_user = "rpcuser" + lbrycrd_conf.write("rpcuser=%s\n" % self.wallet_user) + self.wallet_password = binascii.hexlify(Random.new().read(20)) + lbrycrd_conf.write("rpcpassword=%s\n" % self.wallet_password) + lbrycrd_conf.write("server=1\n") + lbrycrd_conf.close() + self.first_run = True + else: + lbrycrd_conf = open(self.wallet_conf) + for l in lbrycrd_conf: + if l.startswith("rpcuser="): + self.wallet_user = l[8:-1] + if l.startswith("rpcpassword="): + self.wallet_password = l[12:-1] + if l.startswith("rpcport="): + self.wallet_rpc_port = int(l[8:-1]) + + def _get_session(self): + wallet = LBRYcrdWallet(self.wallet_user, self.wallet_password, "127.0.0.1", self.wallet_rpc_port, + start_lbrycrdd=True, wallet_dir=self.wallet_dir, wallet_conf=self.wallet_conf) + self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=self.conf_dir, blob_dir=self.data_dir, + use_upnp=False, wallet=wallet, + known_dht_nodes=self.known_dht_nodes, dht_node_port=4446) + return self.session.setup() + + def _setup_stream_info_manager(self): + self.stream_info_manager = TempLBRYFileMetadataManager() + return defer.succeed(True) + + def _setup_stream_identifier(self): + self.sd_identifier.add_stream_info_validator(LBRYFileStreamType, LBRYFileStreamDescriptorValidator) + file_saver_factory = LBRYFileSaverFactory(self.session.peer_finder, self.session.rate_limiter, + self.session.blob_manager, self.stream_info_manager, + self.session.wallet, self.download_directory) + self.sd_identifier.add_stream_downloader_factory(LBRYFileStreamType, file_saver_factory) + file_opener_factory = LBRYFileOpenerFactory(self.session.peer_finder, self.session.rate_limiter, + self.session.blob_manager, self.stream_info_manager, + self.session.wallet) + self.sd_identifier.add_stream_downloader_factory(LBRYFileStreamType, file_opener_factory) + + def do_first_run(self): + if self.first_run is True: + d = self.session.wallet.get_new_address() + + def send_request(url, data): + r = requests.post(url, json=data) + if r.status_code == 200: + return r.json()['credits_sent'] + return 0.0 + + def log_error(err): + logging.warning("unable to request free credits. %s", err.getErrorMessage()) + return 0.0 + + def request_credits(address): + url = "http://credreq.lbry.io/requestcredits" + data = {"address": address} + d = threads.deferToThread(send_request, url, data) + d.addErrback(log_error) + return d + + d.addCallback(request_credits) + return d + return defer.succeed(0.0) + + def _resolve_name(self, uri): + return self.session.wallet.get_stream_info_for_name(uri) + + def download_stream(self, stream_frame, uri): + resolve_d = self._resolve_name(uri) + + stream_frame.show_metadata_status("resolving name...") + + stream_frame.cancel_func = resolve_d.cancel + payment_rate_manager = PaymentRateManager(self.session.base_payment_rate_manager) + + def update_stream_name(value): + if 'name' in value: + stream_frame.show_name(value['name']) + if 'description' in value: + stream_frame.show_description(value['description']) + return value + + def get_sd_hash(value): + if 'stream_hash' in value: + return value['stream_hash'] + raise ValueError("Invalid stream") + + def get_sd_blob(sd_hash): + stream_frame.show_metadata_status("name resolved, fetching metadata...") + get_sd_d = StreamDescriptor.download_sd_blob(self.session, sd_hash, + payment_rate_manager) + get_sd_d.addCallback(self.sd_identifier.get_info_and_factories_for_sd_blob) + get_sd_d.addCallbacks(choose_download_factory, bad_sd_blob) + return get_sd_d + + def get_info_from_validator(info_validator): + stream_name = None + stream_size = None + for field, val in info_validator.info_to_show(): + if field == "suggested_file_name": + stream_name = val + elif field == "stream_name" and stream_name is None: + stream_name = val + elif field == "stream_size": + stream_size = int(val) + if stream_size is None: + stream_size = "unknown" + if stream_name is None: + stream_name = "unknown" + return stream_name, stream_size + + def choose_download_factory(info_and_factories): + info_validator, factories = info_and_factories + stream_name, stream_size = get_info_from_validator(info_validator) + if isinstance(stream_size, (int, long)): + price = payment_rate_manager.get_effective_min_blob_data_payment_rate() + estimated_cost = stream_size * 1.0 / 2**20 * price + else: + estimated_cost = "unknown" + + stream_frame.show_stream_metadata(stream_name, stream_size, estimated_cost) + + get_downloader_d = defer.Deferred() + + def create_downloader(f): + + def fire_get_downloader_d(downloader): + if not get_downloader_d.called: + get_downloader_d.callback(downloader) + + stream_frame.disable_download_buttons() + download_options = [o.default for o in f.get_downloader_options(info_validator, payment_rate_manager)] + d = f.make_downloader(info_validator, download_options, + payment_rate_manager) + d.addCallback(fire_get_downloader_d) + + for factory in factories: + + def choose_factory(f=factory): + create_downloader(f) + + stream_frame.add_download_factory(factory, choose_factory) + + get_downloader_d.addCallback(start_download) + + return get_downloader_d + + def show_stream_status(downloader): + total_bytes = downloader.get_total_bytes() + bytes_left_to_download = downloader.get_bytes_left_to_download() + bytes_left_to_output = downloader.get_bytes_left_to_output() + points_paid = payment_rate_manager.points_paid + payment_rate = payment_rate_manager.get_effective_min_blob_data_payment_rate() + points_remaining = 1.0 * bytes_left_to_download * payment_rate / 2**20 + stream_frame.show_progress(total_bytes, bytes_left_to_download, bytes_left_to_output, + points_paid, points_remaining) + + def show_finished(arg, downloader): + show_stream_status(downloader) + stream_frame.show_download_done(payment_rate_manager.points_paid) + return arg + + def start_download(downloader): + l = task.LoopingCall(show_stream_status, downloader) + l.start(1) + d = downloader.start() + stream_frame.cancel_func = downloader.stop + + def stop_looping_call(arg): + l.stop() + stream_frame.cancel_func = resolve_d.cancel + return arg + + d.addBoth(stop_looping_call) + d.addCallback(show_finished, downloader) + return d + + def lookup_failed(err): + stream_frame.show_metadata_status("name lookup failed") + return err + + def bad_sd_blob(err): + stream_frame.show_metadata_status("Unknown type or badly formed metadata") + return err + + resolve_d.addCallback(update_stream_name) + resolve_d.addCallback(get_sd_hash) + resolve_d.addCallbacks(get_sd_blob, lookup_failed) + + def show_err(err): + tkMessageBox.showerror(title="Download Error", message=err.getErrorMessage()) + logging.error(err.getErrorMessage()) + stream_frame.show_download_done(payment_rate_manager.points_paid) + + resolve_d.addErrback(lambda err: err.trap(defer.CancelledError)) + resolve_d.addErrback(show_err) + self._add_download_deferred(resolve_d, stream_frame) + + def _add_download_deferred(self, d, stream_frame): + self.download_deferreds.append(d) + self.stream_frames.append(stream_frame) + + def remove_from_list(): + self.download_deferreds.remove(d) + self.stream_frames.remove(stream_frame) + + d.addBoth(lambda _: remove_from_list()) + + +class StreamFrame(object): + def __init__(self, app, uri): + self.app = app + self.uri = uri + self.cancel_func = None + + self.stream_frame = ttk.Frame(self.app.streams_frame, style="B.TFrame") + + self.stream_frame.pack(fill=tk.X, side=tk.BOTTOM, pady=(30, 0)) + + self.stream_frame_header = ttk.Frame(self.stream_frame, style="C.TFrame") + self.stream_frame_header.grid(sticky=tk.E + tk.W) + + self.uri_font = tkFont.Font(size=8) + self.uri_label = ttk.Label( + self.stream_frame_header, text=self.uri, font=self.uri_font, foreground="#666666" + ) + self.uri_label.grid(row=0, column=0, sticky=tk.W) + + if os.name == "nt": + close_cursor = "" + else: + close_cursor = "hand1" + + close_file_name = "close2.gif" + try: + close_file = os.path.join(os.path.dirname(__file__), close_file_name) + except NameError: + close_file = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "lbrynet", + "lbrynet_downloader_gui", close_file_name) + + self.close_picture = tk.PhotoImage( + file=close_file + ) + self.close_button = ttk.Button( + self.stream_frame_header, command=self.cancel, style="Stop.TButton", cursor=close_cursor + ) + self.close_button.config(image=self.close_picture) + self.close_button.grid(row=0, column=1, sticky=tk.E + tk.N) + + self.stream_frame_header.grid_columnconfigure(0, weight=1) + + self.stream_frame.grid_columnconfigure(0, weight=1) + + self.stream_frame_body = ttk.Frame(self.stream_frame, style="C.TFrame") + self.stream_frame_body.grid(row=1, column=0, sticky=tk.E + tk.W) + + self.name_frame = ttk.Frame(self.stream_frame_body, style="D.TFrame") + self.name_frame.grid(sticky=tk.W + tk.E) + self.name_frame.grid_columnconfigure(0, weight=1) + + self.stream_frame_body.grid_columnconfigure(0, weight=1) + + self.info_frame = ttk.Frame(self.stream_frame_body, style="D.TFrame") + self.info_frame.grid(sticky=tk.W + tk.E, row=1) + self.info_frame.grid_columnconfigure(0, weight=1) + + self.metadata_frame = ttk.Frame(self.info_frame, style="E.TFrame") + self.metadata_frame.grid(sticky=tk.W + tk.E) + self.metadata_frame.grid_columnconfigure(0, weight=1) + + self.outer_button_frame = ttk.Frame(self.stream_frame_body, style="D.TFrame") + self.outer_button_frame.grid(sticky=tk.W + tk.E, row=2) + + self.button_frame = ttk.Frame(self.outer_button_frame, style="E.TFrame") + self.button_frame.pack(side=tk.TOP) + + self.status_label = None + self.name_label = None + self.bytes_downloaded_label = None + self.bytes_outputted_label = None + + self.download_buttons = [] + self.name_font = None + self.description_label = None + self.file_name_frame = None + self.cost_frame = None + self.cost_description = None + self.remaining_cost_description = None + self.cost_label = None + self.remaining_cost_label = None + + def cancel(self): + if self.cancel_func is not None: + self.cancel_func() + self.stream_frame.destroy() + self.app.stream_removed() + + def show_name(self, name): + self.name_font = tkFont.Font(size=16) + self.name_label = ttk.Label( + self.name_frame, text=name, font=self.name_font + ) + self.name_label.grid(row=0, column=0, sticky=tk.W) + + def show_description(self, description): + if os.name == "nt": + wraplength = 580 + else: + wraplength = 600 + self.description_label = ttk.Label( + self.name_frame, text=description, wraplength=wraplength + ) + self.description_label.grid(row=1, column=0, sticky=tk.W) + + def show_metadata_status(self, value): + if self.status_label is None: + self.status_label = ttk.Label( + self.metadata_frame, text=value + ) + self.status_label.grid() + self.metadata_frame.grid_columnconfigure(0, weight=1) + else: + self.status_label.config(text=value) + + @staticmethod + def get_formatted_stream_size(stream_size): + if isinstance(stream_size, (int, long)): + if stream_size >= 2**40: + units = "TB" + factor = 2**40 + elif stream_size >= 2**30: + units = "GB" + factor = 2**30 + elif stream_size >= 2**20: + units = "MB" + factor = 2**20 + elif stream_size >= 2**10: + units = "KB" + factor = 2**10 + else: + return str(stream_size) + " B" + return "%.1f %s" % (round((stream_size * 1.0 / factor), 1), units) + return stream_size + + def show_stream_metadata(self, stream_name, stream_size, estimated_cost): + if self.status_label is not None: + self.status_label.destroy() + + self.file_name_frame = ttk.Frame(self.metadata_frame, style="F.TFrame") + self.file_name_frame.grid(row=0, column=0, sticky=tk.W) + self.metadata_frame.grid_columnconfigure(0, weight=1, uniform="metadata") + + file_size_label = ttk.Label( + self.file_name_frame, + text=self.get_formatted_stream_size(stream_size) + ) + file_size_label.grid(row=0, column=2) + + file_name_label = ttk.Label( + self.file_name_frame, + text=" - " + stream_name, + ) + file_name_label.grid(row=0, column=3) + + self.outer_button_frame = ttk.Frame(self.stream_frame_body, style="D.TFrame") + self.outer_button_frame.grid(sticky=tk.W + tk.E, row=2) + + self.cost_frame = ttk.Frame(self.outer_button_frame, style="F.TFrame") + self.cost_frame.grid(row=0, column=0, sticky=tk.W+tk.N, pady=(0, 12)) + + self.cost_label = ttk.Label( + self.cost_frame, + text=locale.format_string("%.2f LBC", (round(estimated_cost, 2),), grouping=True), + foreground="red" + ) + self.cost_label.grid(row=0, column=1, padx=(1, 0)) + + self.button_frame = ttk.Frame(self.outer_button_frame, style="E.TFrame") + self.button_frame.grid(row=0, column=1) + + self.outer_button_frame.grid_columnconfigure(0, weight=1, uniform="buttons") + self.outer_button_frame.grid_columnconfigure(1, weight=2, uniform="buttons1") + self.outer_button_frame.grid_columnconfigure(2, weight=1, uniform="buttons") + + def add_download_factory(self, factory, download_func): + if os.name == "nt": + button_cursor = "" + else: + button_cursor = "hand1" + download_button = ttk.Button( + self.button_frame, text=factory.get_description(), command=download_func, + style='LBRY.TButton', cursor=button_cursor + ) + self.download_buttons.append(download_button) + download_button.grid(row=0, column=len(self.download_buttons) - 1, padx=5, pady=(1, 2)) + + def disable_download_buttons(self): + for download_button in self.download_buttons: + download_button.config(state=tk.DISABLED) + + def remove_download_buttons(self): + for download_button in self.download_buttons: + download_button.destroy() + self.download_buttons = [] + + def show_progress(self, total_bytes, bytes_left_to_download, bytes_left_to_output, points_paid, + points_remaining): + if self.bytes_outputted_label is None: + self.remove_download_buttons() + self.button_frame.destroy() + self.outer_button_frame.grid_columnconfigure(2, weight=0, uniform="") + + self.bytes_outputted_label = ttk.Label( + self.file_name_frame, + text="" + ) + self.bytes_outputted_label.grid(row=0, column=0) + + self.bytes_downloaded_label = ttk.Label( + self.file_name_frame, + text="" + ) + self.bytes_downloaded_label.grid(row=0, column=1) + + if self.bytes_outputted_label.winfo_exists(): + self.bytes_outputted_label.config( + text=self.get_formatted_stream_size(total_bytes - bytes_left_to_output) + " / " + ) + if self.bytes_downloaded_label.winfo_exists(): + self.bytes_downloaded_label.config( + text=self.get_formatted_stream_size(total_bytes - bytes_left_to_download) + " / " + ) + if self.cost_label.winfo_exists(): + total_points = points_remaining + points_paid + self.cost_label.config(text=locale.format_string("%.2f/%.2f LBC", + (round(points_paid, 2), round(total_points, 2)), + grouping=True)) + + def show_download_done(self, total_points_paid): + if self.bytes_outputted_label is not None and self.bytes_outputted_label.winfo_exists(): + self.bytes_outputted_label.destroy() + if self.bytes_downloaded_label is not None and self.bytes_downloaded_label.winfo_exists(): + self.bytes_downloaded_label.destroy() + if self.cost_label is not None and self.cost_label.winfo_exists(): + self.cost_label.config(text=locale.format_string("%.2f LBC", + (round(total_points_paid, 2),), + grouping=True)) + + +class AddressWindow(object): + def __init__(self, root, address): + self.root = root + self.address = address + + def show(self): + window = tk.Toplevel(self.root, background="#FFFFFF") + window.transient(self.root) + window.wm_title("New address") + window.protocol("WM_DELETE_WINDOW", window.destroy) + window.resizable(0, 0) + + text_box = tk.Text(window, width=35, height=1, relief=tk.FLAT, borderwidth=0, + highlightthickness=0) + text_box.insert(tk.END, self.address) + text_box.grid(row=0, padx=10, pady=5, columnspan=2) + text_box.config(state='normal') + + def copy_to_clipboard(): + self.root.clipboard_clear() + self.root.clipboard_append(text_box.get('1.0', 'end-1c')) + + def copy_command(): + text_box.event_generate("") + + copy_menu = tk.Menu( + self.root, tearoff=0 + ) + copy_menu.add_command(label=" Copy ", command=copy_command) + + def popup(event): + if text_box.tag_ranges("sel"): + copy_menu.tk_popup(event.x_root, event.y_root) + + text_box.bind("", popup) + + copy_button = ttk.Button( + window, text="Copy", command=copy_to_clipboard, style="LBRY.TButton" + ) + copy_button.grid(row=1, column=0, pady=(0, 5), padx=5, sticky=tk.E) + + done_button = ttk.Button( + window, text="OK", command=window.destroy, style="LBRY.TButton" + ) + done_button.grid(row=1, column=1, pady=(0, 5), padx=5, sticky=tk.W) + window.focus_set() + + +class WelcomeWindow(object): + def __init__(self, root, points_sent): + self.root = root + self.points_sent = points_sent + + def show(self): + window = tk.Toplevel(self.root, background="#FFFFFF") + window.transient(self.root) + window.wm_title("Welcome to LBRY") + window.protocol("WM_DELETE_WINDOW", window.destroy) + window.resizable(0, 0) + + text_box = tk.Text(window, width=45, height=3, relief=tk.FLAT, borderwidth=0, + highlightthickness=0) + + points_string = locale.format_string("%.2f LBC", (round(self.points_sent, 2),), + grouping=True) + + text_box.insert(tk.END, "Thank you for using LBRY! You have been\n" + "given %s for free because we love\n" + "you. Please give them 60 seconds to show up." % points_string) + text_box.grid(row=0, padx=10, pady=5, columnspan=2) + text_box.config(state='normal') + + window.focus_set() + + +class App(object): + def __init__(self): + self.master = None + self.downloader = None + self.wallet_balance_check = None + self.streams_frame = None + + def start(self): + + d = defer.maybeDeferred(self._start_root) + d.addCallback(lambda _: self._draw_main()) + d.addCallback(lambda _: self._start_downloader()) + d.addCallback(lambda _: self._start_checking_wallet_balance()) + d.addCallback(lambda _: self._enable_lookup()) + + def show_error_and_stop(err): + logging.error(err.getErrorMessage()) + tkMessageBox.showerror(title="Start Error", message=err.getErrorMessage()) + return self.stop() + + d.addErrback(show_error_and_stop) + return d + + def stop(self): + + def log_error(err): + logging.error(err.getErrorMessage()) + + if self.downloader is not None: + d = self.downloader.stop() + else: + d = defer.succeed(True) + d.addErrback(log_error) + d.addCallback(lambda _: self._stop_checking_wallet_balance()) + d.addErrback(log_error) + d.addCallback(lambda _: reactor.stop()) + d.addErrback(log_error) + return d + + def _start_root(self): + if os.name == "nt": + button_foreground = "#104639" + lookup_button_padding = 10 + else: + button_foreground = "#FFFFFF" + lookup_button_padding = 11 + + root = tk.Tk() + root.resizable(0, 0) + root.wm_title("LBRY") + + tksupport.install(root) + + if os.name == "nt": + root.iconbitmap(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), + "lbrynet", "lbrynet_downloader_gui", "lbry-dark-icon.ico")) + else: + root.wm_iconbitmap("@" + os.path.join(os.path.dirname(__file__), "lbry-dark-icon.xbm")) + + root.button_font = tkFont.Font(size=9) + + ttk.Style().configure(".", background="#FFFFFF") + ttk.Style().configure("LBRY.TButton", background="#104639", foreground=button_foreground, + borderwidth=1, relief="solid", font=root.button_font) + ttk.Style().map("LBRY.TButton", + background=[('pressed', "#104639"), + ('active', "#104639")]) + #ttk.Style().configure("LBRY.TButton.border", background="#808080") + ttk.Style().configure("Lookup.LBRY.TButton", padding=lookup_button_padding) + ttk.Style().configure("Stop.TButton", padding=1, background="#FFFFFF", relief="flat", borderwidth=0) + ttk.Style().configure("TEntry", padding=11) + #ttk.Style().configure("A.TFrame", background="red") + #ttk.Style().configure("B.TFrame", background="green") + #ttk.Style().configure("B2.TFrame", background="#80FF80") + #ttk.Style().configure("C.TFrame", background="orange") + #ttk.Style().configure("D.TFrame", background="blue") + #ttk.Style().configure("E.TFrame", background="yellow") + #ttk.Style().configure("F.TFrame", background="#808080") + #ttk.Style().configure("LBRY.TProgressbar", background="#104639", orient="horizontal", thickness=5) + #ttk.Style().configure("LBRY.TProgressbar") + #ttk.Style().layout("Horizontal.LBRY.TProgressbar", ttk.Style().layout("Horizontal.TProgressbar")) + + root.configure(background="#FFFFFF") + + root.protocol("WM_DELETE_WINDOW", self.stop) + + self.master = root + + def _draw_main(self): + self.frame = ttk.Frame(self.master, style="A.TFrame") + self.frame.grid(padx=20, pady=20) + + logo_file_name = "lbry-dark-242x80.gif" + try: + logo_file = os.path.join(os.path.dirname(__file__), logo_file_name) + except NameError: + logo_file = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "lbrynet", + "lbrynet_downloader_gui", logo_file_name) + + self.logo_picture = tk.PhotoImage(file=logo_file) + + self.logo_frame = ttk.Frame(self.frame, style="B.TFrame") + self.logo_frame.grid(pady=5, sticky=tk.W + tk.E) + + self.dummy_frame = ttk.Frame(self.logo_frame, style="C.TFrame") # keeps the logo in the middle + self.dummy_frame.grid(row=0, column=1, padx=5) + + self.logo_label = ttk.Label(self.logo_frame, image=self.logo_picture) + self.logo_label.grid(row=0, column=1, padx=5) + + self.wallet_balance_frame = ttk.Frame(self.logo_frame, style="C.TFrame") + self.wallet_balance_frame.grid(sticky=tk.E + tk.N, row=0, column=2) + + self.logo_frame.grid_columnconfigure(0, weight=1, uniform="a") + self.logo_frame.grid_columnconfigure(1, weight=2, uniform="b") + self.logo_frame.grid_columnconfigure(2, weight=1, uniform="a") + + self.wallet_balance = ttk.Label( + self.wallet_balance_frame, + text=" -- LBC" + ) + self.wallet_balance.grid(row=0, column=0) + + dropdown_file_name = "drop_down.gif" + try: + dropdown_file = os.path.join(os.path.dirname(__file__), dropdown_file_name) + except NameError: + dropdown_file = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "lbrynet", + "lbrynet_downloader_gui", dropdown_file_name) + + self.dropdown_picture = tk.PhotoImage( + file=dropdown_file + ) + + def get_new_address(): + def show_address(address): + w = AddressWindow(self.master, address) + w.show() + d = defer.maybeDeferred(self.downloader.get_new_address) + d.addCallback(show_address) + + def show_error(err): + tkMessageBox.showerror(title="Failed to get new address", message=err.getErrorMessage()) + + d.addErrback(show_error) + + self.wallet_menu = tk.Menu( + self.master, tearoff=0 + ) + self.wallet_menu.add_command(label="Get new LBRYcrd address", command=get_new_address) + + if os.name == "nt": + button_cursor = "" + else: + button_cursor = "hand1" + + self.wallet_menu_button = ttk.Button(self.wallet_balance_frame, image=self.dropdown_picture, + style="Stop.TButton", cursor=button_cursor) + self.wallet_menu_button.grid(row=0, column=1, padx=(5, 0)) + + def popup(event): + self.wallet_menu.tk_popup(event.x_root, event.y_root) + + self.wallet_menu_button.bind("", popup) + + self.uri_frame = ttk.Frame(self.frame, style="B.TFrame") + self.uri_frame.grid() + + self.uri_label = ttk.Label( + self.uri_frame, text="lbry://" + ) + self.uri_label.grid(row=0, column=0, sticky=tk.E, pady=2) + + self.entry_font = tkFont.Font(size=11) + + self.uri_entry = ttk.Entry(self.uri_frame, width=50, foreground="#222222", font=self.entry_font) + self.uri_entry.grid(row=0, column=1, padx=2, pady=2) + + def copy_command(): + self.uri_entry.event_generate('') + + def cut_command(): + self.uri_entry.event_generate('') + + def paste_command(): + self.uri_entry.event_generate('') + + def popup(event): + selection_menu = tk.Menu( + self.master, tearoff=0 + ) + if self.uri_entry.selection_present(): + selection_menu.add_command(label=" Cut ", command=cut_command) + selection_menu.add_command(label=" Copy ", command=copy_command) + selection_menu.add_command(label=" Paste ", command=paste_command) + selection_menu.tk_popup(event.x_root, event.y_root) + + self.uri_entry.bind("", popup) + + self.uri_button = ttk.Button( + self.uri_frame, text="Go", command=self._open_stream, + style='Lookup.LBRY.TButton', cursor=button_cursor + ) + self.uri_button.grid(row=0, column=2, pady=2, padx=0) + + def _start_downloader(self): + self.downloader = LBRYDownloader() + d = self.downloader.start() + d.addCallback(lambda _: self.downloader.do_first_run()) + d.addCallback(self._show_welcome_message) + return d + + def _show_welcome_message(self, points_sent): + if points_sent != 0.0: + w = WelcomeWindow(self.master, points_sent) + w.show() + + def stream_removed(self): + if self.streams_frame is not None: + if len(self.streams_frame.winfo_children()) == 0: + self.streams_frame.destroy() + self.streams_frame = None + + def _start_checking_wallet_balance(self): + + def set_balance(balance): + self.wallet_balance.configure(text=locale.format_string("%.2f LBC", (round(balance, 2),), + grouping=True)) + + def update_balance(): + balance = self.downloader.session.wallet.get_available_balance() + set_balance(balance) + + def start_looping_call(): + self.wallet_balance_check = task.LoopingCall(update_balance) + self.wallet_balance_check.start(5) + + d = self.downloader.session.wallet.get_balance() + d.addCallback(set_balance) + d.addCallback(lambda _: start_looping_call()) + + def _stop_checking_wallet_balance(self): + if self.wallet_balance_check is not None: + self.wallet_balance_check.stop() + + def _enable_lookup(self): + self.uri_entry.bind('', self._open_stream) + + def _open_stream(self, event=None): + if self.streams_frame is None: + self.streams_frame = ttk.Frame(self.frame, style="B2.TFrame") + self.streams_frame.grid(sticky=tk.E + tk.W) + uri = self.uri_entry.get() + self.uri_entry.delete(0, tk.END) + stream_frame = StreamFrame(self, "lbry://" + uri) + + self.downloader.download_stream(stream_frame, uri) + + +def start_downloader(): + + log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s" + logging.basicConfig(level=logging.DEBUG, format=log_format, filename="downloader.log") + sys.stdout = open("downloader.out.log", 'w') + sys.stderr = open("downloader.err.log", 'w') + + locale.setlocale(locale.LC_ALL, '') + + app = App() + + d = task.deferLater(reactor, 0, app.start) + + reactor.run() + +if __name__ == "__main__": + start_downloader() \ No newline at end of file diff --git a/lbrynet/lbrynet_downloader_gui/drop_down.gif b/lbrynet/lbrynet_downloader_gui/drop_down.gif new file mode 100644 index 000000000..a61f65c6a Binary files /dev/null and b/lbrynet/lbrynet_downloader_gui/drop_down.gif differ diff --git a/lbrynet/lbrynet_downloader_gui/lbry-dark-242x80.gif b/lbrynet/lbrynet_downloader_gui/lbry-dark-242x80.gif new file mode 100755 index 000000000..65ce976d4 Binary files /dev/null and b/lbrynet/lbrynet_downloader_gui/lbry-dark-242x80.gif differ diff --git a/lbrynet/lbrynet_downloader_gui/lbry-dark-icon.ico b/lbrynet/lbrynet_downloader_gui/lbry-dark-icon.ico new file mode 100755 index 000000000..91cd30d56 Binary files /dev/null and b/lbrynet/lbrynet_downloader_gui/lbry-dark-icon.ico differ diff --git a/lbrynet/lbrynet_downloader_gui/lbry-dark-icon.xbm b/lbrynet/lbrynet_downloader_gui/lbry-dark-icon.xbm new file mode 100644 index 000000000..75b81527c --- /dev/null +++ b/lbrynet/lbrynet_downloader_gui/lbry-dark-icon.xbm @@ -0,0 +1,14 @@ +#define lbry_dark_icon2_width 32 +#define lbry_dark_icon2_height 32 +static unsigned char lbry_dark_icon2_bits[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x01, 0x00, + 0x00, 0xc0, 0x07, 0x00, 0x00, 0x70, 0x1c, 0x00, 0x00, 0x1c, 0xf0, 0x00, + 0x00, 0x0e, 0xc0, 0x03, 0x80, 0x03, 0x00, 0x0f, 0xe0, 0x00, 0x00, 0x3c, + 0x70, 0x00, 0x00, 0x70, 0x1c, 0x00, 0x00, 0x60, 0x06, 0x00, 0x00, 0x38, + 0x03, 0x00, 0x00, 0x0e, 0x13, 0x00, 0x00, 0x07, 0x71, 0x00, 0xc0, 0xf1, + 0xe3, 0x01, 0x60, 0x70, 0x03, 0x07, 0x38, 0x7c, 0x07, 0x1e, 0x0e, 0x0e, + 0x3c, 0x70, 0x87, 0x03, 0xf0, 0xe0, 0xc1, 0x00, 0xc0, 0x03, 0x70, 0x00, + 0x00, 0x0f, 0x1c, 0x00, 0x00, 0x38, 0x0e, 0x00, 0x00, 0xf0, 0x03, 0x00, + 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; diff --git a/lbrynet/node_rpc_cli.py b/lbrynet/node_rpc_cli.py new file mode 100644 index 000000000..d53560d98 --- /dev/null +++ b/lbrynet/node_rpc_cli.py @@ -0,0 +1,42 @@ +""" +CLI for sending rpc commands to a DHT node +""" + + +from twisted.internet import reactor +from txjsonrpc.web.jsonrpc import Proxy +import argparse +import sys + + +def print_value(value): + print value + + +def print_error(err): + print err.getErrorMessage() + + +def shut_down(): + reactor.stop() + + +def main(): + parser = argparse.ArgumentParser(description="Send an rpc command to a dht node") + parser.add_argument("rpc_command", + help="The rpc command to send to the dht node") + parser.add_argument("--node_host", + help="The host of the node to connect to", + default="127.0.0.1") + parser.add_argument("--node_port", + help="The port of the node to connect to", + default="8888") + + args = parser.parse_args() + connect_string = 'http://%s:%s' % (args.node_host, args.node_port) + proxy = Proxy(connect_string) + + d = proxy.callRemote(args.rpc_command) + d.addCallbacks(print_value, print_error) + d.addBoth(lambda _: shut_down()) + reactor.run() \ No newline at end of file diff --git a/lbrynet/pointtraderclient/__init__.py b/lbrynet/pointtraderclient/__init__.py new file mode 100644 index 000000000..f6337cb30 --- /dev/null +++ b/lbrynet/pointtraderclient/__init__.py @@ -0,0 +1,10 @@ +""" +A client library for sending and receiving payments on the point trader network. + +The point trader network is a simple payment system used solely for testing lbrynet-console. A user +creates a public key, registers it with the point trader server, and receives free points for +registering. The public key is used to spend points, and also used as an address to which points +are sent. To spend points, the public key signs a message containing the amount and the destination +public key and sends it to the point trader server. To check for payments, the recipient sends a +signed message asking the point trader server for its balance. +""" \ No newline at end of file diff --git a/lbrynet/pointtraderclient/pointtraderclient.py b/lbrynet/pointtraderclient/pointtraderclient.py new file mode 100644 index 000000000..0499a8b9e --- /dev/null +++ b/lbrynet/pointtraderclient/pointtraderclient.py @@ -0,0 +1,228 @@ +from lbrynet.conf import POINTTRADER_SERVER + +from twisted.web.client import Agent, FileBodyProducer, Headers, ResponseDone +from twisted.internet import threads, defer, protocol +from Crypto.Hash import SHA +from Crypto.PublicKey import RSA +from Crypto.Signature import PKCS1_PSS +from StringIO import StringIO +import time +import json +import binascii + + +class BeginningPrinter(protocol.Protocol): + def __init__(self, finished): + self.finished = finished + self.data = "" + + def dataReceived(self, bytes): + self.data = self.data + bytes + + def connectionLost(self, reason): + if reason.check(ResponseDone) is not None: + self.finished.callback(str(self.data)) + else: + self.finished.errback(reason) + + +def read_body(response): + d = defer.Deferred() + response.deliverBody(BeginningPrinter(d)) + return d + + +def get_body(response): + if response.code != 200: + print "\n\n\n\nbad error code\n\n\n\n" + raise ValueError(response.phrase) + else: + return read_body(response) + + +def get_body_from_request(path, data): + + from twisted.internet import reactor + + jsondata = FileBodyProducer(StringIO(json.dumps(data))) + agent = Agent(reactor) + d = agent.request('POST', POINTTRADER_SERVER + path, Headers({'Content-Type': ['application/json']}), jsondata) + d.addCallback(get_body) + return d + + +def print_response(response): + pass + + +def print_error(err): + print err.getTraceback() + return err + + +def register_new_account(private_key): + data = {} + data['pub_key'] = private_key.publickey().exportKey() + + def get_success_from_body(body): + r = json.loads(body) + if not 'success' in r or r['success'] is False: + return False + return True + + d = get_body_from_request('/register/', data) + + d.addCallback(get_success_from_body) + return d + + +def send_points(private_key, recipient_public_key, amount): + encoded_public_key = private_key.publickey().exportKey() + timestamp = time.time() + h = SHA.new() + h.update(encoded_public_key) + h.update(recipient_public_key) + h.update(str(amount)) + h.update(str(timestamp)) + signer = PKCS1_PSS.new(private_key) + signature = binascii.hexlify(signer.sign(h)) + + data = {} + data['sender_pub_key'] = encoded_public_key + data['recipient_pub_key'] = recipient_public_key + data['amount'] = amount + data['timestamp'] = timestamp + data['signature'] = signature + + def get_success_from_body(body): + r = json.loads(body) + if not 'success' in r or r['success'] is False: + return False + return True + + d = get_body_from_request('/send-points/', data) + + d.addCallback(get_success_from_body) + + return d + + +def get_recent_transactions(private_key): + encoded_public_key = private_key.publickey().exportKey() + timestamp = time.time() + h = SHA.new() + h.update(encoded_public_key) + h.update(str(timestamp)) + signer = PKCS1_PSS.new(private_key) + signature = binascii.hexlify(signer.sign(h)) + + data = {} + data['pub_key'] = encoded_public_key + data['timestamp'] = timestamp + data['signature'] = signature + data['end_time'] = 0 + data['start_time'] = 120 + + def get_transactions_from_body(body): + r = json.loads(body) + if "transactions" not in r: + raise ValueError("Invalid response: no 'transactions' field") + else: + return r['transactions'] + + d = get_body_from_request('/get-transactions/', data) + + d.addCallback(get_transactions_from_body) + + return d + + +def get_balance(private_key): + encoded_public_key = private_key.publickey().exportKey() + timestamp = time.time() + h = SHA.new() + h.update(encoded_public_key) + h.update(str(timestamp)) + signer = PKCS1_PSS.new(private_key) + signature = binascii.hexlify(signer.sign(h)) + + data = {} + data['pub_key'] = encoded_public_key + data['timestamp'] = timestamp + data['signature'] = signature + + def get_balance_from_body(body): + r = json.loads(body) + if not 'balance' in r: + raise ValueError("Invalid response: no 'balance' field") + else: + return float(r['balance']) + + d = get_body_from_request('/get-balance/', data) + + d.addCallback(get_balance_from_body) + + return d + + +def run_full_test(): + + keys = [] + + def save_key(private_key): + keys.append(private_key) + return private_key + + def check_balances_and_transactions(unused, bal1, bal2, num_transactions): + + def assert_balance_is(actual, expected): + assert abs(actual - expected) < .05 + print "correct balance. actual:", str(actual), "expected:", str(expected) + return True + + def assert_transaction_length_is(transactions, expected_length): + assert len(transactions) == expected_length + print "correct transaction length" + return True + + d1 = get_balance(keys[0]) + d1.addCallback(assert_balance_is, bal1) + + d2 = get_balance(keys[1]) + d2.addCallback(assert_balance_is, bal2) + + d3 = get_recent_transactions(keys[0]) + d3.addCallback(assert_transaction_length_is, num_transactions) + + d4 = get_recent_transactions(keys[1]) + d4.addCallback(assert_transaction_length_is, num_transactions) + + dl = defer.DeferredList([d1, d2, d3, d4]) + return dl + + def do_transfer(unused, amount): + d = send_points(keys[0], keys[1].publickey().exportKey(), amount) + return d + + d1 = threads.deferToThread(RSA.generate, 4096) + d1.addCallback(save_key) + d1.addCallback(register_new_account) + d2 = threads.deferToThread(RSA.generate, 4096) + d2.addCallback(save_key) + d2.addCallback(register_new_account) + dlist = defer.DeferredList([d1, d2]) + dlist.addCallback(check_balances_and_transactions, 1000, 1000, 0) + dlist.addCallback(do_transfer, 50) + dlist.addCallback(check_balances_and_transactions, 950, 1050, 1) + dlist.addCallback(do_transfer, 75) + dlist.addCallback(check_balances_and_transactions, 875, 1125, 2) + dlist.addErrback(print_error) + + +if __name__ == "__main__": + + from twisted.internet import reactor + + reactor.callLater(1, run_full_test) + reactor.callLater(25, reactor.stop) + reactor.run() \ No newline at end of file diff --git a/lbrynet/rpc_node.py b/lbrynet/rpc_node.py new file mode 100644 index 000000000..0c27e4595 --- /dev/null +++ b/lbrynet/rpc_node.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive +# + +# Thanks to Paul Cannon for IP-address resolution functions (taken from aspn.activestate.com) + + +""" +Launch a DHT node which can respond to RPC commands. +""" + + +import argparse +from lbrynet.dht.node import Node +from txjsonrpc.web import jsonrpc +from twisted.web import server +from twisted.internet import reactor, defer + + +class RPCNode(jsonrpc.JSONRPC): + def __init__(self, node, shut_down_cb): + jsonrpc.JSONRPC.__init__(self) + self.node = node + self.shut_down_cb = shut_down_cb + + def jsonrpc_total_dht_nodes(self): + return self.node.getApproximateTotalDHTNodes() + + def jsonrpc_total_dht_hashes(self): + return self.node.getApproximateTotalHashes() + + def jsonrpc_stop(self): + self.shut_down_cb() + return "fine" + + +def main(): + + parser = argparse.ArgumentParser(description="Launch a dht node which responds to rpc commands") + + parser.add_argument("node_port", + help="The UDP port on which the node will listen for connections from other dht nodes", + type=int) + parser.add_argument("rpc_port", + help="The TCP port on which the node will listen for rpc commands", + type=int) + parser.add_argument("dht_bootstrap_host", + help="The IP of a DHT node to be used to bootstrap into the network", + nargs='?') + parser.add_argument("dht_bootstrap_port", + help="The port of a DHT node to be used to bootstrap into the network", + nargs='?', default=4000, type=int) + parser.add_argument("--rpc_ip_address", + help="The network interface on which to listen for rpc connections", + default="127.0.0.1") + + args = parser.parse_args() + + def start_rpc(): + rpc_node = RPCNode(node, shut_down) + reactor.listenTCP(args.rpc_port, server.Site(rpc_node), interface=args.rpc_ip_address) + + def shut_down(): + d = defer.maybeDeferred(node.stop) + d.addBoth(lambda _: reactor.stop()) + return d + + known_nodes = [] + if args.dht_bootstrap_host: + known_nodes.append((args.dht_bootstrap_host, args.dht_bootstrap_port)) + + node = Node(udpPort=args.node_port) + node.joinNetwork(known_nodes) + d = node._joinDeferred + d.addCallback(lambda _: start_rpc()) + reactor.run() + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/lbrynet/winhelpers/__init__.py b/lbrynet/winhelpers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/lbrynet/winhelpers/knownpaths-LICENSE.txt b/lbrynet/winhelpers/knownpaths-LICENSE.txt new file mode 100644 index 000000000..5f9023892 --- /dev/null +++ b/lbrynet/winhelpers/knownpaths-LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Michael Kropat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/lbrynet/winhelpers/knownpaths.py b/lbrynet/winhelpers/knownpaths.py new file mode 100644 index 000000000..267e5eb7f --- /dev/null +++ b/lbrynet/winhelpers/knownpaths.py @@ -0,0 +1,165 @@ +from __future__ import print_function +import ctypes, sys +from ctypes import windll, wintypes +from uuid import UUID + +class GUID(ctypes.Structure): # [1] + _fields_ = [ + ("Data1", wintypes.DWORD), + ("Data2", wintypes.WORD), + ("Data3", wintypes.WORD), + ("Data4", wintypes.BYTE * 8) + ] + + def __init__(self, uuid_): + ctypes.Structure.__init__(self) + self.Data1, self.Data2, self.Data3, self.Data4[0], self.Data4[1], rest = uuid_.fields + for i in range(2, 8): + self.Data4[i] = rest>>(8 - i - 1)*8 & 0xff + +class FOLDERID: # [2] + AccountPictures = UUID('{008ca0b1-55b4-4c56-b8a8-4de4b299d3be}') + AdminTools = UUID('{724EF170-A42D-4FEF-9F26-B60E846FBA4F}') + ApplicationShortcuts = UUID('{A3918781-E5F2-4890-B3D9-A7E54332328C}') + CameraRoll = UUID('{AB5FB87B-7CE2-4F83-915D-550846C9537B}') + CDBurning = UUID('{9E52AB10-F80D-49DF-ACB8-4330F5687855}') + CommonAdminTools = UUID('{D0384E7D-BAC3-4797-8F14-CBA229B392B5}') + CommonOEMLinks = UUID('{C1BAE2D0-10DF-4334-BEDD-7AA20B227A9D}') + CommonPrograms = UUID('{0139D44E-6AFE-49F2-8690-3DAFCAE6FFB8}') + CommonStartMenu = UUID('{A4115719-D62E-491D-AA7C-E74B8BE3B067}') + CommonStartup = UUID('{82A5EA35-D9CD-47C5-9629-E15D2F714E6E}') + CommonTemplates = UUID('{B94237E7-57AC-4347-9151-B08C6C32D1F7}') + Contacts = UUID('{56784854-C6CB-462b-8169-88E350ACB882}') + Cookies = UUID('{2B0F765D-C0E9-4171-908E-08A611B84FF6}') + Desktop = UUID('{B4BFCC3A-DB2C-424C-B029-7FE99A87C641}') + DeviceMetadataStore = UUID('{5CE4A5E9-E4EB-479D-B89F-130C02886155}') + Documents = UUID('{FDD39AD0-238F-46AF-ADB4-6C85480369C7}') + DocumentsLibrary = UUID('{7B0DB17D-9CD2-4A93-9733-46CC89022E7C}') + Downloads = UUID('{374DE290-123F-4565-9164-39C4925E467B}') + Favorites = UUID('{1777F761-68AD-4D8A-87BD-30B759FA33DD}') + Fonts = UUID('{FD228CB7-AE11-4AE3-864C-16F3910AB8FE}') + GameTasks = UUID('{054FAE61-4DD8-4787-80B6-090220C4B700}') + History = UUID('{D9DC8A3B-B784-432E-A781-5A1130A75963}') + ImplicitAppShortcuts = UUID('{BCB5256F-79F6-4CEE-B725-DC34E402FD46}') + InternetCache = UUID('{352481E8-33BE-4251-BA85-6007CAEDCF9D}') + Libraries = UUID('{1B3EA5DC-B587-4786-B4EF-BD1DC332AEAE}') + Links = UUID('{bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968}') + LocalAppData = UUID('{F1B32785-6FBA-4FCF-9D55-7B8E7F157091}') + LocalAppDataLow = UUID('{A520A1A4-1780-4FF6-BD18-167343C5AF16}') + LocalizedResourcesDir = UUID('{2A00375E-224C-49DE-B8D1-440DF7EF3DDC}') + Music = UUID('{4BD8D571-6D19-48D3-BE97-422220080E43}') + MusicLibrary = UUID('{2112AB0A-C86A-4FFE-A368-0DE96E47012E}') + NetHood = UUID('{C5ABBF53-E17F-4121-8900-86626FC2C973}') + OriginalImages = UUID('{2C36C0AA-5812-4b87-BFD0-4CD0DFB19B39}') + PhotoAlbums = UUID('{69D2CF90-FC33-4FB7-9A0C-EBB0F0FCB43C}') + PicturesLibrary = UUID('{A990AE9F-A03B-4E80-94BC-9912D7504104}') + Pictures = UUID('{33E28130-4E1E-4676-835A-98395C3BC3BB}') + Playlists = UUID('{DE92C1C7-837F-4F69-A3BB-86E631204A23}') + PrintHood = UUID('{9274BD8D-CFD1-41C3-B35E-B13F55A758F4}') + Profile = UUID('{5E6C858F-0E22-4760-9AFE-EA3317B67173}') + ProgramData = UUID('{62AB5D82-FDC1-4DC3-A9DD-070D1D495D97}') + ProgramFiles = UUID('{905e63b6-c1bf-494e-b29c-65b732d3d21a}') + ProgramFilesX64 = UUID('{6D809377-6AF0-444b-8957-A3773F02200E}') + ProgramFilesX86 = UUID('{7C5A40EF-A0FB-4BFC-874A-C0F2E0B9FA8E}') + ProgramFilesCommon = UUID('{F7F1ED05-9F6D-47A2-AAAE-29D317C6F066}') + ProgramFilesCommonX64 = UUID('{6365D5A7-0F0D-45E5-87F6-0DA56B6A4F7D}') + ProgramFilesCommonX86 = UUID('{DE974D24-D9C6-4D3E-BF91-F4455120B917}') + Programs = UUID('{A77F5D77-2E2B-44C3-A6A2-ABA601054A51}') + Public = UUID('{DFDF76A2-C82A-4D63-906A-5644AC457385}') + PublicDesktop = UUID('{C4AA340D-F20F-4863-AFEF-F87EF2E6BA25}') + PublicDocuments = UUID('{ED4824AF-DCE4-45A8-81E2-FC7965083634}') + PublicDownloads = UUID('{3D644C9B-1FB8-4f30-9B45-F670235F79C0}') + PublicGameTasks = UUID('{DEBF2536-E1A8-4c59-B6A2-414586476AEA}') + PublicLibraries = UUID('{48DAF80B-E6CF-4F4E-B800-0E69D84EE384}') + PublicMusic = UUID('{3214FAB5-9757-4298-BB61-92A9DEAA44FF}') + PublicPictures = UUID('{B6EBFB86-6907-413C-9AF7-4FC2ABF07CC5}') + PublicRingtones = UUID('{E555AB60-153B-4D17-9F04-A5FE99FC15EC}') + PublicUserTiles = UUID('{0482af6c-08f1-4c34-8c90-e17ec98b1e17}') + PublicVideos = UUID('{2400183A-6185-49FB-A2D8-4A392A602BA3}') + QuickLaunch = UUID('{52a4f021-7b75-48a9-9f6b-4b87a210bc8f}') + Recent = UUID('{AE50C081-EBD2-438A-8655-8A092E34987A}') + RecordedTVLibrary = UUID('{1A6FDBA2-F42D-4358-A798-B74D745926C5}') + ResourceDir = UUID('{8AD10C31-2ADB-4296-A8F7-E4701232C972}') + Ringtones = UUID('{C870044B-F49E-4126-A9C3-B52A1FF411E8}') + RoamingAppData = UUID('{3EB685DB-65F9-4CF6-A03A-E3EF65729F3D}') + RoamedTileImages = UUID('{AAA8D5A5-F1D6-4259-BAA8-78E7EF60835E}') + RoamingTiles = UUID('{00BCFC5A-ED94-4e48-96A1-3F6217F21990}') + SampleMusic = UUID('{B250C668-F57D-4EE1-A63C-290EE7D1AA1F}') + SamplePictures = UUID('{C4900540-2379-4C75-844B-64E6FAF8716B}') + SamplePlaylists = UUID('{15CA69B3-30EE-49C1-ACE1-6B5EC372AFB5}') + SampleVideos = UUID('{859EAD94-2E85-48AD-A71A-0969CB56A6CD}') + SavedGames = UUID('{4C5C32FF-BB9D-43b0-B5B4-2D72E54EAAA4}') + SavedSearches = UUID('{7d1d3a04-debb-4115-95cf-2f29da2920da}') + Screenshots = UUID('{b7bede81-df94-4682-a7d8-57a52620b86f}') + SearchHistory = UUID('{0D4C3DB6-03A3-462F-A0E6-08924C41B5D4}') + SearchTemplates = UUID('{7E636BFE-DFA9-4D5E-B456-D7B39851D8A9}') + SendTo = UUID('{8983036C-27C0-404B-8F08-102D10DCFD74}') + SidebarDefaultParts = UUID('{7B396E54-9EC5-4300-BE0A-2482EBAE1A26}') + SidebarParts = UUID('{A75D362E-50FC-4fb7-AC2C-A8BEAA314493}') + SkyDrive = UUID('{A52BBA46-E9E1-435f-B3D9-28DAA648C0F6}') + SkyDriveCameraRoll = UUID('{767E6811-49CB-4273-87C2-20F355E1085B}') + SkyDriveDocuments = UUID('{24D89E24-2F19-4534-9DDE-6A6671FBB8FE}') + SkyDrivePictures = UUID('{339719B5-8C47-4894-94C2-D8F77ADD44A6}') + StartMenu = UUID('{625B53C3-AB48-4EC1-BA1F-A1EF4146FC19}') + Startup = UUID('{B97D20BB-F46A-4C97-BA10-5E3608430854}') + System = UUID('{1AC14E77-02E7-4E5D-B744-2EB1AE5198B7}') + SystemX86 = UUID('{D65231B0-B2F1-4857-A4CE-A8E7C6EA7D27}') + Templates = UUID('{A63293E8-664E-48DB-A079-DF759E0509F7}') + UserPinned = UUID('{9E3995AB-1F9C-4F13-B827-48B24B6C7174}') + UserProfiles = UUID('{0762D272-C50A-4BB0-A382-697DCD729B80}') + UserProgramFiles = UUID('{5CD7AEE2-2219-4A67-B85D-6C9CE15660CB}') + UserProgramFilesCommon = UUID('{BCBD3057-CA5C-4622-B42D-BC56DB0AE516}') + Videos = UUID('{18989B1D-99B5-455B-841C-AB7C74E4DDFC}') + VideosLibrary = UUID('{491E922F-5643-4AF4-A7EB-4E7A138D8174}') + Windows = UUID('{F38BF404-1D43-42F2-9305-67DE0B28FC23}') + +class UserHandle: # [3] + current = wintypes.HANDLE(0) + common = wintypes.HANDLE(-1) + +_CoTaskMemFree = windll.ole32.CoTaskMemFree # [4] +_CoTaskMemFree.restype= None +_CoTaskMemFree.argtypes = [ctypes.c_void_p] + +_SHGetKnownFolderPath = windll.shell32.SHGetKnownFolderPath # [5] [3] +_SHGetKnownFolderPath.argtypes = [ + ctypes.POINTER(GUID), wintypes.DWORD, wintypes.HANDLE, ctypes.POINTER(ctypes.c_wchar_p) +] + +class PathNotFoundException(Exception): pass + +def get_path(folderid, user_handle=UserHandle.common): + fid = GUID(folderid) + pPath = ctypes.c_wchar_p() + S_OK = 0 + if _SHGetKnownFolderPath(ctypes.byref(fid), 0, user_handle, ctypes.byref(pPath)) != S_OK: + raise PathNotFoundException() + path = pPath.value + _CoTaskMemFree(pPath) + return path + +if __name__ == '__main__': + if len(sys.argv) < 2 or sys.argv[1] in ['-?', '/?']: + print('python knownpaths.py FOLDERID {current|common}') + sys.exit(0) + + try: + folderid = getattr(FOLDERID, sys.argv[1]) + except AttributeError: + print('Unknown folder id "%s"' % sys.argv[1], file=sys.stderr) + sys.exit(1) + + try: + if len(sys.argv) == 2: + print(get_path(folderid)) + else: + print(get_path(folderid, getattr(UserHandle, sys.argv[2]))) + except PathNotFoundException: + print('Folder not found "%s"' % ' '.join(sys.argv[1:]), file=sys.stderr) + sys.exit(1) + +# [1] http://msdn.microsoft.com/en-us/library/windows/desktop/aa373931.aspx +# [2] http://msdn.microsoft.com/en-us/library/windows/desktop/dd378457.aspx +# [3] http://msdn.microsoft.com/en-us/library/windows/desktop/bb762188.aspx +# [4] http://msdn.microsoft.com/en-us/library/windows/desktop/ms680722.aspx +# [5] http://www.themacaque.com/?p=954 diff --git a/setup.py b/setup.py new file mode 100644 index 000000000..5d5e63bd3 --- /dev/null +++ b/setup.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python + +import ez_setup +ez_setup.use_setuptools() + +from setuptools import setup, find_packages + +setup(name='lbrynet', + version='0.0.4', + packages=find_packages(), + install_requires=['pycrypto', 'twisted', 'miniupnpc', 'yapsy', 'seccure', 'python-bitcoinrpc', 'leveldb', 'txJSON-RPC', 'requests'], + entry_points={ + 'console_scripts': [ + 'lbrynet-console = lbrynet.lbrynet_console.LBRYConsole:launch_lbry_console', + 'lbrynet-stdin-uploader = lbrynet.lbrynet_console.LBRYStdinUploader:launch_stdin_uploader', + 'lbrynet-stdout-downloader = lbrynet.lbrynet_console.LBRYStdoutDownloader:launch_stdout_downloader', + 'lbrynet-create-network = lbrynet.create_network:main', + 'lbrynet-launch-node = lbrynet.dht.node:main', + 'lbrynet-launch-rpc-node = lbrynet.rpc_node:main', + 'lbrynet-rpc-node-cli = lbrynet.node_rpc_cli:main', + 'lbrynet-gui = lbrynet.lbrynet_downloader_gui.downloader:start_downloader', + 'lbrynet-lookup-hosts-for-hash = lbrynet.dht_scripts:get_hosts_for_hash_in_dht', + 'lbrynet-announce_hash_to_dht = lbrynet.dht_scripts:announce_hash_to_dht', + ] + }, + data_files=[ + ('lbrynet/lbrynet_console/plugins', + [ + 'lbrynet/lbrynet_console/plugins/blindrepeater.yapsy-plugin', + ] + ), + ('lbrynet/lbrynet_downloader_gui', + [ + 'lbrynet/lbrynet_downloader_gui/close2.gif', + 'lbrynet/lbrynet_downloader_gui/lbry-dark-242x80.gif', + 'lbrynet/lbrynet_downloader_gui/lbry-dark-icon.xbm', + 'lbrynet/lbrynet_downloader_gui/lbry-dark-icon.ico', + 'lbrynet/lbrynet_downloader_gui/drop_down.gif', + ] + ) + ] + ) diff --git a/tests/dht/runalltests.py b/tests/dht/runalltests.py new file mode 100755 index 000000000..74f2a1e3f --- /dev/null +++ b/tests/dht/runalltests.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive + +""" Wrapper script to run all included test scripts """ + +import os, sys +import unittest + +def runTests(): + testRunner = unittest.TextTestRunner() + testRunner.run(additional_tests()) + +def additional_tests(): + """ Used directly by setuptools to run unittests """ + sys.path.insert(0, os.path.dirname(__file__)) + suite = unittest.TestSuite() + tests = os.listdir(os.path.dirname(__file__)) + tests = [n[:-3] for n in tests if n.startswith('test') and n.endswith('.py')] + for test in tests: + m = __import__(test) + if hasattr(m, 'suite'): + suite.addTest(m.suite()) + sys.path.pop(0) + return suite + + +if __name__ == '__main__': + # Add parent folder to sys path so it's easier to use + sys.path.insert(0,os.path.abspath('..')) + runTests() diff --git a/tests/dht/testContact.py b/tests/dht/testContact.py new file mode 100644 index 000000000..6c475cf28 --- /dev/null +++ b/tests/dht/testContact.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive + +import unittest + +import lbrynet.dht.contact + +class ContactOperatorsTest(unittest.TestCase): + """ Basic tests case for boolean operators on the Contact class """ + def setUp(self): + self.firstContact = lbrynet.dht.contact.Contact('firstContactID', '127.0.0.1', 1000, None, 1) + self.secondContact = lbrynet.dht.contact.Contact('2ndContactID', '192.168.0.1', 1000, None, 32) + self.secondContactCopy = lbrynet.dht.contact.Contact('2ndContactID', '192.168.0.1', 1000, None, 32) + self.firstContactDifferentValues = lbrynet.dht.contact.Contact('firstContactID', '192.168.1.20', 1000, None, 50) + + def testBoolean(self): + """ Test "equals" and "not equals" comparisons """ + self.failIfEqual(self.firstContact, self.secondContact, 'Contacts with different IDs should not be equal.') + self.failUnlessEqual(self.firstContact, self.firstContactDifferentValues, 'Contacts with same IDs should be equal, even if their other values differ.') + self.failUnlessEqual(self.secondContact, self.secondContactCopy, 'Different copies of the same Contact instance should be equal') + + def testStringComparisons(self): + """ Test comparisons of Contact objects with str types """ + self.failUnlessEqual('firstContactID', self.firstContact, 'The node ID string must be equal to the contact object') + self.failIfEqual('some random string', self.firstContact, "The tested string should not be equal to the contact object (not equal to it's ID)") + + def testIllogicalComparisons(self): + """ Test comparisons with non-Contact and non-str types """ + for item in (123, [1,2,3], {'key': 'value'}): + self.failIfEqual(self.firstContact, item, '"eq" operator: Contact object should not be equal to %s type' % type(item).__name__) + self.failUnless(self.firstContact != item, '"ne" operator: Contact object should not be equal to %s type' % type(item).__name__) + + def testCompactIP(self): + self.assertEqual(self.firstContact.compact_ip(), '\x7f\x00\x00\x01') + self.assertEqual(self.secondContact.compact_ip(), '\xc0\xa8\x00\x01') + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(ContactOperatorsTest)) + return suite + +if __name__ == '__main__': + # If this module is executed from the commandline, run all its tests + unittest.TextTestRunner().run(suite()) diff --git a/tests/dht/testDatastore.py b/tests/dht/testDatastore.py new file mode 100644 index 000000000..6c3496871 --- /dev/null +++ b/tests/dht/testDatastore.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive + +import unittest +import time +import datetime +import random + +import lbrynet.dht.datastore +import lbrynet.dht.constants + +import hashlib + +class DictDataStoreTest(unittest.TestCase): + """ Basic tests case for the reference DataStore API and implementation """ + def setUp(self): + #if not hasattr(self, 'ds'): + self.ds = lbrynet.dht.datastore.DictDataStore() + h = hashlib.sha1() + h.update('g') + hashKey = h.digest() + h2 = hashlib.sha1() + h2.update('dried') + hashKey2 = h2.digest() + h3 = hashlib.sha1() + h3.update('Boozoo Bajou - 09 - S.I.P.mp3') + hashKey3 = h3.digest() + #self.cases = (('a', 'hello there\nthis is a test'), + # ('b', unicode('jasdklfjklsdj;f2352352ljklzsdlkjkasf\ndsjklafsd')), + # ('e', 123), + # ('f', [('this', 'is', 1), {'complex': 'data entry'}]), + # ('aMuchLongerKeyThanAnyOfThePreviousOnes', 'some data'), + # (hashKey, 'some data'), + # (hashKey2, 'abcdefghijklmnopqrstuvwxz'), + # (hashKey3, '1 2 3 4 5 6 7 8 9 0')) + self.cases = ((hashKey, 'test1test1test1test1test1t'), + (hashKey, 'test2'), + (hashKey, 'test3test3test3test3test3test3test3test3'), + (hashKey2, 'test4'), + (hashKey3, 'test5'), + (hashKey3, 'test6')) + + def testReadWrite(self): + # Test write ability + for key, value in self.cases: + try: + now = int(time.time()) + self.ds.addPeerToBlob(key, value, now, now, 'node1') + except Exception: + import traceback + self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5))) + + # Verify writing (test query ability) + for key, value in self.cases: + try: + self.failUnless(self.ds.hasPeersForBlob(key), 'Key "%s" not found in DataStore! DataStore key dump: %s' % (key, self.ds.keys())) + except Exception: + import traceback + self.fail('Failed verifying that the following key exists: "%s"\n The error was: %s:' % (key, traceback.format_exc(5))) + + # Read back the data + for key, value in self.cases: + self.failUnless(value in self.ds.getPeersForBlob(key), 'DataStore returned invalid data! Expected "%s", got "%s"' % (value, self.ds.getPeersForBlob(key))) + + def testNonExistentKeys(self): + for key, value in self.cases: + self.failIf(key in self.ds.keys(), 'DataStore reports it has non-existent key: "%s"' % key) + + def testExpires(self): + now = int(time.time()) + + h1 = hashlib.sha1() + h1.update('test1') + key1 = h1.digest() + h2 = hashlib.sha1() + h2.update('test2') + key2 = h2.digest() + td = lbrynet.dht.constants.dataExpireTimeout - 100 + td2 = td + td + self.ds.addPeerToBlob(h1, 'val1', now - td, now - td, '1') + self.ds.addPeerToBlob(h1, 'val2', now - td2, now - td2, '2') + self.ds.addPeerToBlob(h2, 'val3', now - td2, now - td2, '3') + self.ds.addPeerToBlob(h2, 'val4', now, now, '4') + self.ds.removeExpiredPeers() + self.failUnless('val1' in self.ds.getPeersForBlob(h1), 'DataStore deleted an unexpired value! Value %s, publish time %s, current time %s' % ('val1', str(now - td), str(now))) + self.failIf('val2' in self.ds.getPeersForBlob(h1), 'DataStore failed to delete an expired value! Value %s, publish time %s, current time %s' % ('val2', str(now - td2), str(now))) + self.failIf('val3' in self.ds.getPeersForBlob(h2), 'DataStore failed to delete an expired value! Value %s, publish time %s, current time %s' % ('val3', str(now - td2), str(now))) + self.failUnless('val4' in self.ds.getPeersForBlob(h2), 'DataStore deleted an unexpired value! Value %s, publish time %s, current time %s' % ('val4', str(now), str(now))) + +# def testReplace(self): +# # First write with fake values +# now = int(time.time()) +# for key, value in self.cases: +# try: +# self.ds.setItem(key, 'abc', now, now, 'node1') +# except Exception: +# import traceback +# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5))) +# +# # write this stuff a second time, with the real values +# for key, value in self.cases: +# try: +# self.ds.setItem(key, value, now, now, 'node1') +# except Exception: +# import traceback +# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5))) +# +# self.failUnlessEqual(len(self.ds.keys()), len(self.cases), 'Values did not get overwritten properly; expected %d keys, got %d' % (len(self.cases), len(self.ds.keys()))) +# # Read back the data +# for key, value in self.cases: +# self.failUnlessEqual(self.ds[key], value, 'DataStore returned invalid data! Expected "%s", got "%s"' % (value, self.ds[key])) + +# def testDelete(self): +# # First some values +# now = int(time.time()) +# for key, value in self.cases: +# try: +# self.ds.setItem(key, 'abc', now, now, 'node1') +# except Exception: +# import traceback +# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5))) +# +# self.failUnlessEqual(len(self.ds.keys()), len(self.cases), 'Values did not get stored properly; expected %d keys, got %d' % (len(self.cases), len(self.ds.keys()))) +# +# # Delete an item from the data +# key, value == self.cases[0] +# del self.ds[key] +# self.failUnlessEqual(len(self.ds.keys()), len(self.cases)-1, 'Value was not deleted; expected %d keys, got %d' % (len(self.cases)-1, len(self.ds.keys()))) +# self.failIf(key in self.ds.keys(), 'Key was not deleted: %s' % key) + +# def testMetaData(self): +# now = int(time.time()) +# age = random.randint(10,3600) +# originallyPublished = [] +# for i in range(len(self.cases)): +# originallyPublished.append(now - age) +# # First some values with metadata +# i = 0 +# for key, value in self.cases: +# try: +# self.ds.setItem(key, 'abc', now, originallyPublished[i], 'node%d' % i) +# i += 1 +# except Exception: +# import traceback +# self.fail('Failed writing the following data: key: "%s", data: "%s"\n The error was: %s:' % (key, value, traceback.format_exc(5))) +# +# # Read back the meta-data +# i = 0 +# for key, value in self.cases: +# dsLastPublished = self.ds.lastPublished(key) +# dsOriginallyPublished = self.ds.originalPublishTime(key) +# dsOriginalPublisherID = self.ds.originalPublisherID(key) +# self.failUnless(type(dsLastPublished) == int, 'DataStore returned invalid type for "last published" time! Expected "int", got %s' % type(dsLastPublished)) +# self.failUnless(type(dsOriginallyPublished) == int, 'DataStore returned invalid type for "originally published" time! Expected "int", got %s' % type(dsOriginallyPublished)) +# self.failUnless(type(dsOriginalPublisherID) == str, 'DataStore returned invalid type for "original publisher ID"; Expected "str", got %s' % type(dsOriginalPublisherID)) +# self.failUnlessEqual(dsLastPublished, now, 'DataStore returned invalid "last published" time! Expected "%d", got "%d"' % (now, dsLastPublished)) +# self.failUnlessEqual(dsOriginallyPublished, originallyPublished[i], 'DataStore returned invalid "originally published" time! Expected "%d", got "%d"' % (originallyPublished[i], dsOriginallyPublished)) +# self.failUnlessEqual(dsOriginalPublisherID, 'node%d' % i, 'DataStore returned invalid "original publisher ID"; Expected "%s", got "%s"' % ('node%d' % i, dsOriginalPublisherID)) +# i += 1 + + +#class SQLiteDataStoreTest(DictDataStoreTest): +# def setUp(self): +# self.ds = entangled.kademlia.datastore.SQLiteDataStore() +# DictDataStoreTest.setUp(self) + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(DictDataStoreTest)) + #suite.addTest(unittest.makeSuite(SQLiteDataStoreTest)) + return suite + + +if __name__ == '__main__': + # If this module is executed from the commandline, run all its tests + unittest.TextTestRunner().run(suite()) diff --git a/tests/dht/testEncoding.py b/tests/dht/testEncoding.py new file mode 100644 index 000000000..25f56cd6f --- /dev/null +++ b/tests/dht/testEncoding.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive + +import unittest + +import lbrynet.dht.encoding + +class BencodeTest(unittest.TestCase): + """ Basic tests case for the Bencode implementation """ + def setUp(self): + self.encoding = lbrynet.dht.encoding.Bencode() + # Thanks goes to wikipedia for the initial test cases ;-) + self.cases = ((42, 'i42e'), + ('spam', '4:spam'), + (['spam',42], 'l4:spami42ee'), + ({'foo':42, 'bar':'spam'}, 'd3:bar4:spam3:fooi42ee'), + # ...and now the "real life" tests + ([['abc', '127.0.0.1', 1919], ['def', '127.0.0.1', 1921]], 'll3:abc9:127.0.0.1i1919eel3:def9:127.0.0.1i1921eee')) + # The following test cases are "bad"; i.e. sending rubbish into the decoder to test what exceptions get thrown + self.badDecoderCases = ('abcdefghijklmnopqrstuvwxyz', + '') + + def testEncoder(self): + """ Tests the bencode encoder """ + for value, encodedValue in self.cases: + result = self.encoding.encode(value) + self.failUnlessEqual(result, encodedValue, 'Value "%s" not correctly encoded! Expected "%s", got "%s"' % (value, encodedValue, result)) + + def testDecoder(self): + """ Tests the bencode decoder """ + for value, encodedValue in self.cases: + result = self.encoding.decode(encodedValue) + self.failUnlessEqual(result, value, 'Value "%s" not correctly decoded! Expected "%s", got "%s"' % (encodedValue, value, result)) + for encodedValue in self.badDecoderCases: + self.failUnlessRaises(lbrynet.dht.encoding.DecodeError, self.encoding.decode, encodedValue) + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(BencodeTest)) + return suite + +if __name__ == '__main__': + # If this module is executed from the commandline, run all its tests + unittest.TextTestRunner().run(suite()) diff --git a/tests/dht/testKBucket.py b/tests/dht/testKBucket.py new file mode 100644 index 000000000..676112be2 --- /dev/null +++ b/tests/dht/testKBucket.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive + +import unittest + +from lbrynet.dht import kbucket +import lbrynet.dht.contact as contact +from lbrynet.dht import constants + +class KBucketTest(unittest.TestCase): + """ Test case for the KBucket class """ + def setUp(self): + self.kbucket = kbucket.KBucket(0, 2**160) + + def testAddContact(self): + """ Tests if the bucket handles contact additions/updates correctly """ + # Test if contacts can be added to empty list + # Add k contacts to bucket + for i in range(constants.k): + tmpContact = contact.Contact('tempContactID%d' % i, str(i), i, i) + self.kbucket.addContact(tmpContact) + self.failUnlessEqual(self.kbucket._contacts[i], tmpContact, "Contact in position %d not the same as the newly-added contact" % i) + + # Test if contact is not added to full list + i += 1 + tmpContact = contact.Contact('tempContactID%d' % i, str(i), i, i) + self.failUnlessRaises(kbucket.BucketFull, self.kbucket.addContact, tmpContact) + + # Test if an existing contact is updated correctly if added again + existingContact = self.kbucket._contacts[0] + self.kbucket.addContact(existingContact) + self.failUnlessEqual(self.kbucket._contacts.index(existingContact), len(self.kbucket._contacts)-1, 'Contact not correctly updated; it should be at the end of the list of contacts') + + def testGetContacts(self): + # try and get 2 contacts from empty list + result = self.kbucket.getContacts(2) + self.failIf(len(result) != 0, "Returned list should be empty; returned list length: %d" % (len(result))) + + + # Add k-2 contacts + if constants.k >= 2: + for i in range(constants.k-2): + tmpContact = contact.Contact(i,i,i,i) + self.kbucket.addContact(tmpContact) + else: + # add k contacts + for i in range(constants.k): + tmpContact = contact.Contact(i,i,i,i) + self.kbucket.addContact(tmpContact) + + # try to get too many contacts + # requested count greater than bucket size; should return at most k contacts + contacts = self.kbucket.getContacts(constants.k+3) + self.failUnless(len(contacts) <= constants.k, 'Returned list should not have more than k entries!') + + # verify returned contacts in list + for i in range(constants.k-2): + self.failIf(self.kbucket._contacts[i].id != i, "Contact in position %s not same as added contact" % (str(i))) + + # try to get too many contacts + # requested count one greater than number of contacts + if constants.k >= 2: + result = self.kbucket.getContacts(constants.k-1) + self.failIf(len(result) != constants.k-2, "Too many contacts in returned list %s - should be %s" % (len(result), constants.k-2)) + else: + result = self.kbucket.getContacts(constants.k-1) + # if the count is <= 0, it should return all of it's contats + self.failIf(len(result) != constants.k, "Too many contacts in returned list %s - should be %s" % (len(result), constants.k-2)) + + # try to get contacts + # requested count less than contact number + if constants.k >= 3: + result = self.kbucket.getContacts(constants.k-3) + self.failIf(len(result) != constants.k-3, "Too many contacts in returned list %s - should be %s" % (len(result), constants.k-3)) + + def testRemoveContact(self): + # try remove contact from empty list + rmContact = contact.Contact('TestContactID1','127.0.0.1',1, 1) + self.failUnlessRaises(ValueError, self.kbucket.removeContact, rmContact) + + # Add couple contacts + for i in range(constants.k-2): + tmpContact = contact.Contact('tmpTestContactID%d' % i, str(i), i, i) + self.kbucket.addContact(tmpContact) + + # try remove contact from empty list + self.kbucket.addContact(rmContact) + result = self.kbucket.removeContact(rmContact) + self.failIf(rmContact in self.kbucket._contacts, "Could not remove contact from bucket") + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(KBucketTest)) + return suite + +if __name__ == '__main__': + # If this module is executed from the commandline, run all its tests + unittest.TextTestRunner().run(suite()) diff --git a/tests/dht/testMessages.py b/tests/dht/testMessages.py new file mode 100644 index 000000000..69f435d6a --- /dev/null +++ b/tests/dht/testMessages.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive + +import unittest + +from lbrynet.dht.msgtypes import Message, RequestMessage, ResponseMessage, ErrorMessage +from lbrynet.dht.msgformat import MessageTranslator, DefaultFormat + +class DefaultFormatTranslatorTest(unittest.TestCase): + """ Test case for the default message translator """ + def setUp(self): + self.cases = ((RequestMessage('node1', 'rpcMethod', {'arg1': 'a string', 'arg2': 123}, 'rpc1'), + {DefaultFormat.headerType: DefaultFormat.typeRequest, + DefaultFormat.headerNodeID: 'node1', + DefaultFormat.headerMsgID: 'rpc1', + DefaultFormat.headerPayload: 'rpcMethod', + DefaultFormat.headerArgs: {'arg1': 'a string', 'arg2': 123}}), + + (ResponseMessage('rpc2', 'node2', 'response'), + {DefaultFormat.headerType: DefaultFormat.typeResponse, + DefaultFormat.headerNodeID: 'node2', + DefaultFormat.headerMsgID: 'rpc2', + DefaultFormat.headerPayload: 'response'}), + + (ErrorMessage('rpc3', 'node3', "", 'this is a test exception'), + {DefaultFormat.headerType: DefaultFormat.typeError, + DefaultFormat.headerNodeID: 'node3', + DefaultFormat.headerMsgID: 'rpc3', + DefaultFormat.headerPayload: "", + DefaultFormat.headerArgs: 'this is a test exception'}), + + (ResponseMessage('rpc4', 'node4', [('H\x89\xb0\xf4\xc9\xe6\xc5`H>\xd5\xc2\xc5\xe8Od\xf1\xca\xfa\x82', '127.0.0.1', 1919), ('\xae\x9ey\x93\xdd\xeb\xf1^\xff\xc5\x0f\xf8\xac!\x0e\x03\x9fY@{', '127.0.0.1', 1921)]), + {DefaultFormat.headerType: DefaultFormat.typeResponse, + DefaultFormat.headerNodeID: 'node4', + DefaultFormat.headerMsgID: 'rpc4', + DefaultFormat.headerPayload: [('H\x89\xb0\xf4\xc9\xe6\xc5`H>\xd5\xc2\xc5\xe8Od\xf1\xca\xfa\x82', '127.0.0.1', 1919), ('\xae\x9ey\x93\xdd\xeb\xf1^\xff\xc5\x0f\xf8\xac!\x0e\x03\x9fY@{', '127.0.0.1', 1921)]}) + ) + self.translator = DefaultFormat() + self.failUnless(isinstance(self.translator, MessageTranslator), 'Translator class must inherit from entangled.kademlia.msgformat.MessageTranslator!') + + def testToPrimitive(self): + """ Tests translation from a Message object to a primitive """ + for msg, msgPrimitive in self.cases: + translatedObj = self.translator.toPrimitive(msg) + self.failUnlessEqual(len(translatedObj), len(msgPrimitive), "Translated object does not match example object's size") + for key in msgPrimitive: + self.failUnlessEqual(translatedObj[key], msgPrimitive[key], 'Message object type %s not translated correctly into primitive on key "%s"; expected "%s", got "%s"' % (msg.__class__.__name__, key, msgPrimitive[key], translatedObj[key])) + + def testFromPrimitive(self): + """ Tests translation from a primitive to a Message object """ + for msg, msgPrimitive in self.cases: + translatedObj = self.translator.fromPrimitive(msgPrimitive) + self.failUnlessEqual(type(translatedObj), type(msg), 'Message type incorrectly translated; expected "%s", got "%s"' % (type(msg), type(translatedObj))) + for key in msg.__dict__: + self.failUnlessEqual(msg.__dict__[key], translatedObj.__dict__[key], 'Message instance variable "%s" not translated correctly; expected "%s", got "%s"' % (key, msg.__dict__[key], translatedObj.__dict__[key])) + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(DefaultFormatTranslatorTest)) + return suite + +if __name__ == '__main__': + # If this module is executed from the commandline, run all its tests + unittest.TextTestRunner().run(suite()) diff --git a/tests/dht/testNode.py b/tests/dht/testNode.py new file mode 100644 index 000000000..e196d2c95 --- /dev/null +++ b/tests/dht/testNode.py @@ -0,0 +1,462 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive + +import hashlib +import unittest +import struct + +import lbrynet.dht.node +import lbrynet.dht.constants +import lbrynet.dht.datastore + +class NodeIDTest(unittest.TestCase): + """ Test case for the Node class's ID """ + def setUp(self): + self.node = lbrynet.dht.node.Node() + + def testAutoCreatedID(self): + """ Tests if a new node has a valid node ID """ + self.failUnlessEqual(type(self.node.id), str, 'Node does not have a valid ID') + self.failUnlessEqual(len(self.node.id), 20, 'Node ID length is incorrect! Expected 160 bits, got %d bits.' % (len(self.node.id)*8)) + + def testUniqueness(self): + """ Tests the uniqueness of the values created by the NodeID generator + """ + generatedIDs = [] + for i in range(100): + newID = self.node._generateID() + # ugly uniqueness test + self.failIf(newID in generatedIDs, 'Generated ID #%d not unique!' % (i+1)) + generatedIDs.append(newID) + + def testKeyLength(self): + """ Tests the key Node ID key length """ + for i in range(20): + id = self.node._generateID() + # Key length: 20 bytes == 160 bits + self.failUnlessEqual(len(id), 20, 'Length of generated ID is incorrect! Expected 160 bits, got %d bits.' % (len(id)*8)) + + +class NodeDataTest(unittest.TestCase): + """ Test case for the Node class's data-related functions """ + def setUp(self): + import lbrynet.dht.contact + h = hashlib.sha1() + h.update('test') + self.node = lbrynet.dht.node.Node() + self.contact = lbrynet.dht.contact.Contact(h.digest(), '127.0.0.1', 12345, self.node._protocol) + self.token = self.node.make_token(self.contact.compact_ip()) + self.cases = [] + for i in xrange(5): + h.update(str(i)) + self.cases.append((h.digest(), 5000+2*i)) + self.cases.append((h.digest(), 5001+2*i)) + #(('a', 'hello there\nthis is a test'), + # ('b', unicode('jasdklfjklsdj;f2352352ljklzsdlkjkasf\ndsjklafsd')), + # ('e', 123), + # ('f', [('this', 'is', 1), {'complex': 'data entry'}]), + # ('aMuchLongerKeyThanAnyOfThePreviousOnes', 'some data')) + + def testStore(self): + + def check_val_in_result(r, peer_info): + self.failUnless + + """ Tests if the node can store (and privately retrieve) some data """ + for key, value in self.cases: + self.node.store(key, {'port': value, 'bbid': self.contact.id, 'token': self.token}, self.contact.id, _rpcNodeContact=self.contact) + for key, value in self.cases: + expected_result = self.contact.compact_ip() + str(struct.pack('>H', value)) + self.contact.id + self.failUnless(self.node._dataStore.hasPeersForBlob(key), 'Stored key not found in node\'s DataStore: "%s"' % key) + self.failUnless(expected_result in self.node._dataStore.getPeersForBlob(key), 'Stored val not found in node\'s DataStore: key:"%s" port:"%s" %s' % (key, value, self.node._dataStore.getPeersForBlob(key))) + +class NodeContactTest(unittest.TestCase): + """ Test case for the Node class's contact management-related functions """ + def setUp(self): + self.node = lbrynet.dht.node.Node() + + def testAddContact(self): + """ Tests if a contact can be added and retrieved correctly """ + import lbrynet.dht.contact + # Create the contact + h = hashlib.sha1() + h.update('node1') + contactID = h.digest() + contact = lbrynet.dht.contact.Contact(contactID, '127.0.0.1', 91824, self.node._protocol) + # Now add it... + self.node.addContact(contact) + # ...and request the closest nodes to it using FIND_NODE + closestNodes = self.node._routingTable.findCloseNodes(contactID, lbrynet.dht.constants.k) + self.failUnlessEqual(len(closestNodes), 1, 'Wrong amount of contacts returned; expected 1, got %d' % len(closestNodes)) + self.failUnless(contact in closestNodes, 'Added contact not found by issueing _findCloseNodes()') + + def testAddSelfAsContact(self): + """ Tests the node's behaviour when attempting to add itself as a contact """ + import lbrynet.dht.contact + # Create a contact with the same ID as the local node's ID + contact = lbrynet.dht.contact.Contact(self.node.id, '127.0.0.1', 91824, None) + # Now try to add it + self.node.addContact(contact) + # ...and request the closest nodes to it using FIND_NODE + closestNodes = self.node._routingTable.findCloseNodes(self.node.id, lbrynet.dht.constants.k) + self.failIf(contact in closestNodes, 'Node added itself as a contact') + + +#class NodeLookupTest(unittest.TestCase): +# """ Test case for the Node class's iterative node lookup algorithm """ +# def setUp(self): +# import entangled.kademlia.contact +# self.node = entangled.kademlia.node.Node() +# self.remoteNodes = [] +# for i in range(10): +# remoteNode = entangled.kademlia.node.Node() +# remoteContact = entangled.kademlia.contact.Contact(remoteNode.id, '127.0.0.1', 91827+i, self.node._protocol) +# self.remoteNodes.append(remoteNode) +# self.node.addContact(remoteContact) + + +# def testIterativeFindNode(self): +# """ Ugly brute-force test to see if the iterative node lookup algorithm runs without failing """ +# import entangled.kademlia.protocol +# entangled.kademlia.protocol.reactor.listenUDP(91826, self.node._protocol) +# for i in range(10): +# entangled.kademlia.protocol.reactor.listenUDP(91827+i, self.remoteNodes[i]._protocol) +# df = self.node.iterativeFindNode(self.node.id) +# df.addBoth(lambda _: entangled.kademlia.protocol.reactor.stop()) +# entangled.kademlia.protocol.reactor.run() + + +""" Some scaffolding for the NodeLookupTest class. Allows isolated node testing by simulating remote node responses""" +from twisted.internet import protocol, defer, selectreactor +from lbrynet.dht.msgtypes import ResponseMessage +class FakeRPCProtocol(protocol.DatagramProtocol): + def __init__(self): + self.reactor = selectreactor.SelectReactor() + self.testResponse = None + self.network = None + + + def createNetwork(self, contactNetwork): + """ set up a list of contacts together with their closest contacts + @param contactNetwork: a sequence of tuples, each containing a contact together with its closest + contacts: C{(, )} + """ + self.network = contactNetwork + + """ Fake RPC protocol; allows entangled.kademlia.contact.Contact objects to "send" RPCs """ + def sendRPC(self, contact, method, args, rawResponse=False): + #print method + " " + str(args) + + if method == "findNode": + # get the specific contacts closest contacts + closestContacts = [] + #print "contact" + contact.id + for contactTuple in self.network: + #print contactTuple[0].id + if contact == contactTuple[0]: + # get the list of closest contacts for this contact + closestContactsList = contactTuple[1] + #print "contact" + contact.id + + # Pack the closest contacts into a ResponseMessage + for closeContact in closestContactsList: + #print closeContact.id + closestContacts.append((closeContact.id, closeContact.address, closeContact.port)) + message = ResponseMessage("rpcId", contact.id, closestContacts) + + df = defer.Deferred() + df.callback((message,(contact.address, contact.port))) + return df + elif method == "findValue": + for contactTuple in self.network: + if contact == contactTuple[0]: + # Get the data stored by this remote contact + dataDict = contactTuple[2] + dataKey = dataDict.keys()[0] + data = dataDict.get(dataKey) + + # Check if this contact has the requested value + if dataKey == args[0]: + # Return the data value + response = dataDict + + print "data found at contact: " + contact.id + else: + # Return the closest contact to the requested data key + print "data not found at contact: " + contact.id + closeContacts = contactTuple[1] + closestContacts = [] + for closeContact in closeContacts: + closestContacts.append((closeContact.id, closeContact.address, closeContact.port)) + response = closestContacts + + # Create the response message + message = ResponseMessage("rpcId", contact.id, response) + df = defer.Deferred() + df.callback((message,(contact.address, contact.port))) + return df + + + + + print "findValue" + + def _send(self, data, rpcID, address): + """ fake sending data """ + + + +class NodeLookupTest(unittest.TestCase): + """ Test case for the Node class's iterativeFind node lookup algorithm """ + + def setUp(self): + + # create a fake protocol to imitate communication with other nodes + self._protocol = FakeRPCProtocol() + + # Note: The reactor is never started for this test. All deferred calls run sequentially, + # since there is no asynchronous network communication + + # create the node to be tested in isolation + self.node = lbrynet.dht.node.Node(None, 4000, None, None, self._protocol) + + self.updPort = 81173 + + # create a dummy reactor + #self._protocol.reactor.listenUDP(self.updPort, self._protocol) + + self.contactsAmount = 80 + # set the node ID manually for testing + self.node.id = '12345678901234567800' + + # Reinitialise the routing table + self.node._routingTable = lbrynet.dht.routingtable.OptimizedTreeRoutingTable(self.node.id) + + # create 160 bit node ID's for test purposes + self.testNodeIDs = [] + #idNum = long(self.node.id.encode('hex'), 16) + idNum = int(self.node.id) + for i in range(self.contactsAmount): + # create the testNodeIDs in ascending order, away from the actual node ID, with regards to the distance metric + self.testNodeIDs.append(idNum + i + 1) + + + # generate contacts + self.contacts = [] + for i in range(self.contactsAmount): + contact = lbrynet.dht.contact.Contact(str(self.testNodeIDs[i]), "127.0.0.1", self.updPort + i + 1, self._protocol) + self.contacts.append(contact) + + # create the network of contacts in format: (contact, closest contacts) + contactNetwork = ((self.contacts[0], self.contacts[8:15]), + (self.contacts[1], self.contacts[16:23]), + (self.contacts[2], self.contacts[24:31]), + (self.contacts[3], self.contacts[32:39]), + (self.contacts[4], self.contacts[40:47]), + (self.contacts[5], self.contacts[48:55]), + (self.contacts[6], self.contacts[56:63]), + (self.contacts[7], self.contacts[64:71]), + (self.contacts[8], self.contacts[72:79]), + (self.contacts[40], self.contacts[41:48]), + (self.contacts[41], self.contacts[41:48]), + (self.contacts[42], self.contacts[41:48]), + (self.contacts[43], self.contacts[41:48]), + (self.contacts[44], self.contacts[41:48]), + (self.contacts[45], self.contacts[41:48]), + (self.contacts[46], self.contacts[41:48]), + (self.contacts[47], self.contacts[41:48]), + (self.contacts[48], self.contacts[41:48]), + (self.contacts[50], self.contacts[0:7]), + (self.contacts[51], self.contacts[8:15]), + (self.contacts[52], self.contacts[16:23])) + + contacts_with_datastores = [] + + for contact_tuple in contactNetwork: + contacts_with_datastores.append((contact_tuple[0], contact_tuple[1], lbrynet.dht.datastore.DictDataStore())) + + self._protocol.createNetwork(contacts_with_datastores) + + def testNodeBootStrap(self): + """ Test bootstrap with the closest possible contacts """ + + df = self.node._iterativeFind(self.node.id, self.contacts[0:8]) + # Set the expected result + expectedResult = [] + + for item in self.contacts[0:6]: + expectedResult.append(item.id) + #print item.id + + # Get the result from the deferred + activeContacts = df.result + + + # Check the length of the active contacts + self.failUnlessEqual(activeContacts.__len__(), expectedResult.__len__(), \ + "More active contacts should exist, there should be %d contacts" %expectedResult.__len__()) + + + # Check that the received active contacts are the same as the input contacts + self.failUnlessEqual(activeContacts, expectedResult, \ + "Active should only contain the closest possible contacts which were used as input for the boostrap") + +# def testFindingCloserNodes(self): +# """ Test discovery of closer contacts""" +# +# # Use input contacts that have knowledge of closer contacts, +# df = self.node._iterativeFind(self.node.id, self.contacts[50:53]) +# #set the expected result +# expectedResult = [] +# #print "############ Expected Active contacts #################" +# for item in self.contacts[0:9]: +# expectedResult.append(item.id) +# #print item.id +# #print "#######################################################" +# +# # Get the result from the deferred +# activeContacts = df.result +# +# #print "!!!!!!!!!!! Receieved Active contacts !!!!!!!!!!!!!!!" +# #for item in activeContacts: +# # print item.id +# #print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" +# +# # Check the length of the active contacts +# self.failUnlessEqual(activeContacts.__len__(), expectedResult.__len__(), \ +# "Length of received active contacts not as expected, should be %d" %expectedResult.__len__()) +# +# +# # Check that the received active contacts are now closer to this node +# self.failUnlessEqual(activeContacts, expectedResult, \ +# "Active contacts should now only contain the closest possible contacts") + + + +# def testIterativeStore(self): +# """ test storing values """ +# +# # create the network of contacts in format: (contact, closest contacts) +# contactNetwork = ((self.contacts[0], self.contacts[0:8]), +# (self.contacts[1], self.contacts[0:8]), +# (self.contacts[2], self.contacts[0:8]), +# (self.contacts[3], self.contacts[0:8]), +# (self.contacts[4], self.contacts[0:8]), +# (self.contacts[5], self.contacts[0:8]), +# (self.contacts[6], self.contacts[0:8]), +# (self.contacts[7], self.contacts[0:8]), +# (self.contacts[8], self.contacts[0:8]), +# (self.contacts[40], self.contacts[41:48]), +# (self.contacts[41], self.contacts[41:48]), +# (self.contacts[42], self.contacts[41:48]), +# (self.contacts[43], self.contacts[41:48]), +# (self.contacts[44], self.contacts[41:48]), +# (self.contacts[45], self.contacts[41:48]), +# (self.contacts[46], self.contacts[41:48]), +# (self.contacts[47], self.contacts[41:48]), +# (self.contacts[48], self.contacts[41:48])) +# contacts_with_datastores = [] +# +# for contact_tuple in contactNetwork: +# contacts_with_datastores.append((contact_tuple[0], contact_tuple[1], lbrynet.dht.datastore.DictDataStore())) +# +# self._protocol.createNetwork(contacts_with_datastores) +# +# +# #self._protocol.createNetwork(contactNetwork) +# +# +# # Test storing a value that has an hash id close to the known contacts +# # The value should only be stored at those nodes +# value = 'value' +# valueID = self.contacts[40].id +# +# # Manually populate the routing table with contacts that have ID's close to the valueID +# for contact in self.contacts[40:48]: +# self.node.addContact(contact) +# +# # Manually populate the routing table with contacts that have ID's far away from the valueID +# for contact in self.contacts[0:8]: +# self.node.addContact(contact) +# +# # Store the value +# df = self.node.announceHaveBlob(valueID, value) +# +# storageNodes = df.result +# +# storageNodeIDs = [] +# for item in storageNodes: +# storageNodeIDs.append(item.id) +# storageNodeIDs.sort() +# #print storageNodeIDs +# +# expectedIDs = [] +# for item in self.contacts[40:43]: +# expectedIDs.append(item.id) +# #print expectedIDs +# +# #print '#### storage nodes ####' +# #for node in storageNodes: +# # print node.id +# +# +# # check that the value has been stored at nodes with ID's close to the valueID +# self.failUnlessEqual(storageNodeIDs, expectedIDs, \ +# "Value not stored at nodes with ID's close to the valueID") +# +# def testFindValue(self): +# # create test values using the contact ID as the key +# testValues = ({self.contacts[0].id: "some test data"}, +# {self.contacts[1].id: "some more test data"}, +# {self.contacts[8].id: "and more data"} +# ) +# +# +# # create the network of contacts in format: (contact, closest contacts, values) +# contactNetwork = ((self.contacts[0], self.contacts[0:6], testValues[0]), +# (self.contacts[1], self.contacts[0:6], testValues[1]), +# (self.contacts[2], self.contacts[0:6], {'2':'2'}), +# (self.contacts[3], self.contacts[0:6], {'4':'5'}), +# (self.contacts[4], self.contacts[0:6], testValues[2]), +# (self.contacts[5], self.contacts[0:6], {'2':'2'}), +# (self.contacts[6], self.contacts[0:6], {'2':'2'})) +# +# self._protocol.createNetwork(contactNetwork) +# +# # Initialise the routing table with some contacts +# for contact in self.contacts[0:4]: +# self.node.addContact(contact) +# +# # Initialise the node with some known contacts +# #self.node._iterativeFind(self.node.id, self.contacts[0:3]) +# +# df = self.node.iterativeFindValue(testValues[1].keys()[0]) +# +# resultDict = df.result +# keys = resultDict.keys() +# +# for key in keys: +# if key == 'closestNodeNoValue': +# print "closest contact without data " + " " + resultDict.get(key).id +# else: +# print "data key :" + key + "; " + "data: " + resultDict.get(key) + + + + + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(NodeIDTest)) + suite.addTest(unittest.makeSuite(NodeDataTest)) + suite.addTest(unittest.makeSuite(NodeContactTest)) + suite.addTest(unittest.makeSuite(NodeLookupTest)) + return suite + +if __name__ == '__main__': + # If this module is executed from the commandline, run all its tests + unittest.TextTestRunner().run(suite()) diff --git a/tests/dht/testProtocol.py b/tests/dht/testProtocol.py new file mode 100644 index 000000000..91758c1cc --- /dev/null +++ b/tests/dht/testProtocol.py @@ -0,0 +1,242 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive + +import time +import unittest + +from twisted.internet import defer +from twisted.python import failure +import twisted.internet.selectreactor +from twisted.internet.protocol import DatagramProtocol + +import lbrynet.dht.protocol +import lbrynet.dht.contact +import lbrynet.dht.constants +import lbrynet.dht.msgtypes +from lbrynet.dht.node import rpcmethod + + +class FakeNode(object): + """ A fake node object implementing some RPC and non-RPC methods to + test the Kademlia protocol's behaviour + """ + def __init__(self, id): + self.id = id + self.contacts = [] + + @rpcmethod + def ping(self): + return 'pong' + + def pingNoRPC(self): + return 'pong' + + @rpcmethod + def echo(self, value): + return value + + def addContact(self, contact): + self.contacts.append(contact) + + def removeContact(self, contact): + self.contacts.remove(contact) + + def indirectPingContact(self, protocol, contact): + """ Pings the given contact (using the specified KademliaProtocol + object, not the direct Contact API), and removes the contact + on a timeout """ + df = protocol.sendRPC(contact, 'ping', {}) + def handleError(f): + if f.check(lbrynet.dht.protocol.TimeoutError): + self.removeContact(contact) + return f + else: + # This is some other error + return f + df.addErrback(handleError) + return df + +class ClientDatagramProtocol(lbrynet.dht.protocol.KademliaProtocol): + data = '' + msgID = '' + destination = ('127.0.0.1', 9182) + + def __init__(self): + lbrynet.dht.protocol.KademliaProtocol.__init__(self, None) + + def startProtocol(self): + #self.transport.connect(self.destination[0], self.destination[1]) + self.sendDatagram() + + def sendDatagram(self): + if len(self.data): + self._send(self.data, self.msgID, self.destination) + +# def datagramReceived(self, datagram, host): +# print 'Datagram received: ', repr(datagram) +# self.sendDatagram() + + + +class KademliaProtocolTest(unittest.TestCase): + """ Test case for the Protocol class """ + def setUp(self): + del lbrynet.dht.protocol.reactor + lbrynet.dht.protocol.reactor = twisted.internet.selectreactor.SelectReactor() + self.node = FakeNode('node1') + self.protocol = lbrynet.dht.protocol.KademliaProtocol(self.node) + + def testReactor(self): + """ Tests if the reactor can start/stop the protocol correctly """ + lbrynet.dht.protocol.reactor.listenUDP(0, self.protocol) + lbrynet.dht.protocol.reactor.callLater(0, lbrynet.dht.protocol.reactor.stop) + lbrynet.dht.protocol.reactor.run() + + def testRPCTimeout(self): + """ Tests if a RPC message sent to a dead remote node times out correctly """ + deadContact = lbrynet.dht.contact.Contact('node2', '127.0.0.1', 9182, self.protocol) + self.node.addContact(deadContact) + # Make sure the contact was added + self.failIf(deadContact not in self.node.contacts, 'Contact not added to fake node (error in test code)') + # Set the timeout to 0 for testing + tempTimeout = lbrynet.dht.constants.rpcTimeout + lbrynet.dht.constants.rpcTimeout = 0 + lbrynet.dht.protocol.reactor.listenUDP(0, self.protocol) + # Run the PING RPC (which should timeout) + df = self.node.indirectPingContact(self.protocol, deadContact) + # Stop the reactor if a result arrives (timeout or not) + df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop()) + lbrynet.dht.protocol.reactor.run() + # See if the contact was removed due to the timeout + self.failIf(deadContact in self.node.contacts, 'Contact was not removed after RPC timeout; check exception types.') + # Restore the global timeout + lbrynet.dht.constants.rpcTimeout = tempTimeout + + def testRPCRequest(self): + """ Tests if a valid RPC request is executed and responded to correctly """ + remoteContact = lbrynet.dht.contact.Contact('node2', '127.0.0.1', 9182, self.protocol) + self.node.addContact(remoteContact) + self.error = None + def handleError(f): + self.error = 'An RPC error occurred: %s' % f.getErrorMessage() + def handleResult(result): + expectedResult = 'pong' + if result != expectedResult: + self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' % (expectedResult, result) + # Publish the "local" node on the network + lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol) + # Simulate the RPC + df = remoteContact.ping() + df.addCallback(handleResult) + df.addErrback(handleError) + df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop()) + lbrynet.dht.protocol.reactor.run() + self.failIf(self.error, self.error) + # The list of sent RPC messages should be empty at this stage + self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!') + + def testRPCAccess(self): + """ Tests invalid RPC requests + + Verifies that a RPC request for an existing but unpublished + method is denied, and that the associated (remote) exception gets + raised locally """ + remoteContact = lbrynet.dht.contact.Contact('node2', '127.0.0.1', 9182, self.protocol) + self.node.addContact(remoteContact) + self.error = None + def handleError(f): + try: + f.raiseException() + except AttributeError, e: + # This is the expected outcome since the remote node did not publish the method + self.error = None + except Exception, e: + self.error = 'The remote method failed, but the wrong exception was raised; expected AttributeError, got %s' % type(e) + + def handleResult(result): + self.error = 'The remote method executed successfully, returning: "%s"; this RPC should not have been allowed.' % result + # Publish the "local" node on the network + lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol) + # Simulate the RPC + df = remoteContact.pingNoRPC() + df.addCallback(handleResult) + df.addErrback(handleError) + df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop()) + lbrynet.dht.protocol.reactor.run() + self.failIf(self.error, self.error) + # The list of sent RPC messages should be empty at this stage + self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!') + + def testRPCRequestArgs(self): + """ Tests if an RPC requiring arguments is executed correctly """ + remoteContact = lbrynet.dht.contact.Contact('node2', '127.0.0.1', 9182, self.protocol) + self.node.addContact(remoteContact) + self.error = None + def handleError(f): + self.error = 'An RPC error occurred: %s' % f.getErrorMessage() + def handleResult(result): + expectedResult = 'This should be returned.' + if result != 'This should be returned.': + self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' % (expectedResult, result) + # Publish the "local" node on the network + lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol) + # Simulate the RPC + df = remoteContact.echo('This should be returned.') + df.addCallback(handleResult) + df.addErrback(handleError) + df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop()) + lbrynet.dht.protocol.reactor.run() + self.failIf(self.error, self.error) + # The list of sent RPC messages should be empty at this stage + self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!') + +# def testDatagramLargeMessageReconstruction(self): +# """ Tests if a large amount of data can be successfully re-constructed from multiple UDP datagrams """ +# remoteContact = lbrynet.dht.contact.Contact('node2', '127.0.0.1', 9182, self.protocol) +# self.node.addContact(remoteContact) +# self.error = None +# #responseData = 8143 * '0' # Threshold for a single packet transmission +# responseData = 300000 * '0' +# def handleError(f): +# if f.check((lbrynet.dht.protocol.TimeoutError)): +# self.error = 'RPC from the following contact timed out: %s' % f.getErrorMessage() +# else: +# self.error = 'An RPC error occurred: %s' % f.getErrorMessage() +# def handleResult(result): +# if result != responseData: +# self.error = 'Result from RPC is incorrect; expected "%s", got "%s"' % (responseData, result) +# # Publish the "local" node on the network +# lbrynet.dht.protocol.reactor.listenUDP(9182, self.protocol) +# # ...and make it think it is waiting for a result from an RPC +# msgID = 'abcdefghij1234567890' +# df = defer.Deferred() +# timeoutCall = lbrynet.dht.protocol.reactor.callLater(lbrynet.dht.constants.rpcTimeout, self.protocol._msgTimeout, msgID) +# self.protocol._sentMessages[msgID] = (remoteContact.id, df, timeoutCall) +# # Simulate the "reply" transmission +# msg = lbrynet.dht.msgtypes.ResponseMessage(msgID, 'node2', responseData) +# msgPrimitive = self.protocol._translator.toPrimitive(msg) +# encodedMsg = self.protocol._encoder.encode(msgPrimitive) +# udpClient = ClientDatagramProtocol() +# udpClient.data = encodedMsg +# udpClient.msgID = msgID +# lbrynet.dht.protocol.reactor.listenUDP(0, udpClient) +# df.addCallback(handleResult) +# df.addErrback(handleError) +# df.addBoth(lambda _: lbrynet.dht.protocol.reactor.stop()) +# lbrynet.dht.protocol.reactor.run() +# self.failIf(self.error, self.error) +# # The list of sent RPC messages should be empty at this stage +# #self.failUnlessEqual(len(self.protocol._sentMessages), 0, 'The protocol is still waiting for a RPC result, but the transaction is already done!') + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(KademliaProtocolTest)) + return suite + +if __name__ == '__main__': + # If this module is executed from the commandline, run all its tests + unittest.TextTestRunner().run(suite()) \ No newline at end of file diff --git a/tests/dht/testRoutingTable.py b/tests/dht/testRoutingTable.py new file mode 100644 index 000000000..8a1ad9c54 --- /dev/null +++ b/tests/dht/testRoutingTable.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python +# +# This library is free software, distributed under the terms of +# the GNU Lesser General Public License Version 3, or any later version. +# See the COPYING file included in this archive + +import hashlib +import unittest + +import lbrynet.dht.constants +import lbrynet.dht.routingtable +import lbrynet.dht.contact + +class FakeRPCProtocol(object): + """ Fake RPC protocol; allows lbrynet.dht.contact.Contact objects to "send" RPCs """ + def sendRPC(self, *args, **kwargs): + return FakeDeferred() + + +class FakeDeferred(object): + """ Fake Twisted Deferred object; allows the routing table to add callbacks that do nothing """ + def addCallback(self, *args, **kwargs): + return + def addErrback(self, *args, **kwargs): + return + + +class TreeRoutingTableTest(unittest.TestCase): + """ Test case for the RoutingTable class """ + def setUp(self): + h = hashlib.sha1() + h.update('node1') + self.nodeID = h.digest() + self.protocol = FakeRPCProtocol() + self.routingTable = lbrynet.dht.routingtable.TreeRoutingTable(self.nodeID) + + def testDistance(self): + """ Test to see if distance method returns correct result""" + + # testList holds a couple 3-tuple (variable1, variable2, result) + basicTestList = [('123456789','123456789', 0L), ('12345', '98765', 34527773184L)] + + for test in basicTestList: + result = self.routingTable.distance(test[0], test[1]) + self.failIf(result != test[2], 'Result of _distance() should be %s but %s returned' % (test[2], result)) + + baseIp = '146.64.19.111' + ipTestList = ['146.64.29.222', '192.68.19.333'] + + distanceOne = self.routingTable.distance(baseIp, ipTestList[0]) + distanceTwo = self.routingTable.distance(baseIp, ipTestList[1]) + + self.failIf(distanceOne > distanceTwo, '%s should be closer to the base ip %s than %s' % (ipTestList[0], baseIp, ipTestList[1])) + + def testAddContact(self): + """ Tests if a contact can be added and retrieved correctly """ + # Create the contact + h = hashlib.sha1() + h.update('node2') + contactID = h.digest() + contact = lbrynet.dht.contact.Contact(contactID, '127.0.0.1', 91824, self.protocol) + # Now add it... + self.routingTable.addContact(contact) + # ...and request the closest nodes to it (will retrieve it) + closestNodes = self.routingTable.findCloseNodes(contactID, lbrynet.dht.constants.k) + self.failUnlessEqual(len(closestNodes), 1, 'Wrong amount of contacts returned; expected 1, got %d' % len(closestNodes)) + self.failUnless(contact in closestNodes, 'Added contact not found by issueing _findCloseNodes()') + + def testGetContact(self): + """ Tests if a specific existing contact can be retrieved correctly """ + h = hashlib.sha1() + h.update('node2') + contactID = h.digest() + contact = lbrynet.dht.contact.Contact(contactID, '127.0.0.1', 91824, self.protocol) + # Now add it... + self.routingTable.addContact(contact) + # ...and get it again + sameContact = self.routingTable.getContact(contactID) + self.failUnlessEqual(contact, sameContact, 'getContact() should return the same contact') + + def testAddParentNodeAsContact(self): + """ Tests the routing table's behaviour when attempting to add its parent node as a contact """ + # Create a contact with the same ID as the local node's ID + contact = lbrynet.dht.contact.Contact(self.nodeID, '127.0.0.1', 91824, self.protocol) + # Now try to add it + self.routingTable.addContact(contact) + # ...and request the closest nodes to it using FIND_NODE + closestNodes = self.routingTable.findCloseNodes(self.nodeID, lbrynet.dht.constants.k) + self.failIf(contact in closestNodes, 'Node added itself as a contact') + + def testRemoveContact(self): + """ Tests contact removal """ + # Create the contact + h = hashlib.sha1() + h.update('node2') + contactID = h.digest() + contact = lbrynet.dht.contact.Contact(contactID, '127.0.0.1', 91824, self.protocol) + # Now add it... + self.routingTable.addContact(contact) + # Verify addition + self.failUnlessEqual(len(self.routingTable._buckets[0]), 1, 'Contact not added properly') + # Now remove it + self.routingTable.removeContact(contact.id) + self.failUnlessEqual(len(self.routingTable._buckets[0]), 0, 'Contact not removed properly') + + def testSplitBucket(self): + """ Tests if the the routing table correctly dynamically splits k-buckets """ + self.failUnlessEqual(self.routingTable._buckets[0].rangeMax, 2**160, 'Initial k-bucket range should be 0 <= range < 2**160') + # Add k contacts + for i in range(lbrynet.dht.constants.k): + h = hashlib.sha1() + h.update('remote node %d' % i) + nodeID = h.digest() + contact = lbrynet.dht.contact.Contact(nodeID, '127.0.0.1', 91824, self.protocol) + self.routingTable.addContact(contact) + self.failUnlessEqual(len(self.routingTable._buckets), 1, 'Only k nodes have been added; the first k-bucket should now be full, but should not yet be split') + # Now add 1 more contact + h = hashlib.sha1() + h.update('yet another remote node') + nodeID = h.digest() + contact = lbrynet.dht.contact.Contact(nodeID, '127.0.0.1', 91824, self.protocol) + self.routingTable.addContact(contact) + self.failUnlessEqual(len(self.routingTable._buckets), 2, 'k+1 nodes have been added; the first k-bucket should have been split into two new buckets') + self.failIfEqual(self.routingTable._buckets[0].rangeMax, 2**160, 'K-bucket was split, but its range was not properly adjusted') + self.failUnlessEqual(self.routingTable._buckets[1].rangeMax, 2**160, 'K-bucket was split, but the second (new) bucket\'s max range was not set properly') + self.failUnlessEqual(self.routingTable._buckets[0].rangeMax, self.routingTable._buckets[1].rangeMin, 'K-bucket was split, but the min/max ranges were not divided properly') + + + def testFullBucketNoSplit(self): + """ Test that a bucket is not split if it full, but does not cover the range containing the parent node's ID """ + self.routingTable._parentNodeID = 21*'a' # more than 160 bits; this will not be in the range of _any_ k-bucket + # Add k contacts + for i in range(lbrynet.dht.constants.k): + h = hashlib.sha1() + h.update('remote node %d' % i) + nodeID = h.digest() + contact = lbrynet.dht.contact.Contact(nodeID, '127.0.0.1', 91824, self.protocol) + self.routingTable.addContact(contact) + self.failUnlessEqual(len(self.routingTable._buckets), 1, 'Only k nodes have been added; the first k-bucket should now be full, and there should not be more than 1 bucket') + self.failUnlessEqual(len(self.routingTable._buckets[0]._contacts), lbrynet.dht.constants.k, 'Bucket should have k contacts; expected %d got %d' % (lbrynet.dht.constants.k, len(self.routingTable._buckets[0]._contacts))) + # Now add 1 more contact + h = hashlib.sha1() + h.update('yet another remote node') + nodeID = h.digest() + contact = lbrynet.dht.contact.Contact(nodeID, '127.0.0.1', 91824, self.protocol) + self.routingTable.addContact(contact) + self.failUnlessEqual(len(self.routingTable._buckets), 1, 'There should not be more than 1 bucket, since the bucket should not have been split (parent node ID not in range)') + self.failUnlessEqual(len(self.routingTable._buckets[0]._contacts), lbrynet.dht.constants.k, 'Bucket should have k contacts; expected %d got %d' % (lbrynet.dht.constants.k, len(self.routingTable._buckets[0]._contacts))) + self.failIf(contact in self.routingTable._buckets[0]._contacts, 'New contact should have been discarded (since RPC is faked in this test)') + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(TreeRoutingTableTest)) + return suite + +if __name__ == '__main__': + # If this module is executed from the commandline, run all its tests + unittest.TextTestRunner().run(suite()) diff --git a/tests/functional_tests.py b/tests/functional_tests.py new file mode 100644 index 000000000..664cce127 --- /dev/null +++ b/tests/functional_tests.py @@ -0,0 +1,1023 @@ +import shutil +from multiprocessing import Process, Event, Queue +import logging +import sys +import random +import io +from Crypto.PublicKey import RSA +from Crypto import Random +from Crypto.Hash import MD5 +from lbrynet.conf import MIN_BLOB_DATA_PAYMENT_RATE +from lbrynet.conf import MIN_BLOB_INFO_PAYMENT_RATE +from lbrynet.lbrylive.LiveStreamCreator import FileLiveStreamCreator, add_live_stream_to_sd_identifier +from lbrynet.lbrylive.PaymentRateManager import BaseLiveStreamPaymentRateManager +from lbrynet.lbrylive.PaymentRateManager import LiveStreamPaymentRateManager +from lbrynet.lbrylive.LiveStreamMetadataManager import DBLiveStreamMetadataManager +from lbrynet.lbrylive.LiveStreamMetadataManager import TempLiveStreamMetadataManager +from lbrynet.lbryfile.LBRYFileMetadataManager import TempLBRYFileMetadataManager, DBLBRYFileMetadataManager +from lbrynet.lbryfilemanager.LBRYFileManager import LBRYFileManager +from lbrynet.core.PaymentRateManager import PaymentRateManager +from lbrynet.core.PTCWallet import PointTraderKeyQueryHandlerFactory, PointTraderKeyExchanger +from lbrynet.core.Session import LBRYSession +from lbrynet.core.client.StandaloneBlobDownloader import StandaloneBlobDownloader +from lbrynet.core.StreamDescriptor import BlobStreamDescriptorWriter +from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier +from lbrynet.core.StreamDescriptor import download_sd_blob +from lbrynet.lbryfilemanager.LBRYFileCreator import create_lbry_file +from lbrynet.lbryfile.StreamDescriptor import get_sd_info +from twisted.internet import defer, threads, task +from twisted.trial.unittest import TestCase +from twisted.python.failure import Failure +import os +from lbrynet.core.PeerManager import PeerManager +from lbrynet.core.RateLimiter import DummyRateLimiter, RateLimiter +from lbrynet.core.server.BlobAvailabilityHandler import BlobAvailabilityHandlerFactory +from lbrynet.core.server.BlobRequestHandler import BlobRequestHandlerFactory +from lbrynet.core.server.ServerProtocol import ServerProtocolFactory +from lbrynet.lbrylive.server.LiveBlobInfoQueryHandler import CryptBlobInfoQueryHandlerFactory + + +log_format = "%(funcName)s(): %(message)s" +logging.basicConfig(level=logging.WARNING, format=log_format) +logging.debug("test") + + +class FakeNode(object): + def __init__(self, *args, **kwargs): + pass + + def joinNetwork(self, *args): + pass + + def stop(self): + pass + + +class FakeWallet(object): + def __init__(self): + self.private_key = RSA.generate(1024) + self.encoded_public_key = self.private_key.publickey().exportKey() + + def start(self): + return defer.succeed(True) + + def stop(self): + return defer.succeed(True) + + def get_info_exchanger(self): + return PointTraderKeyExchanger(self) + + def get_wallet_info_query_handler_factory(self): + return PointTraderKeyQueryHandlerFactory(self) + + def reserve_points(self, *args): + return True + + def cancel_point_reservation(self, *args): + pass + + def send_points(self, *args): + return defer.succeed(True) + + def add_expected_payment(self, *args): + pass + + def get_balance(self): + return defer.succeed(1000) + + def set_public_key_for_peer(self, peer, public_key): + pass + + +class FakePeerFinder(object): + def __init__(self, port, peer_manager): + self.peer_manager = peer_manager + + def find_peers_for_blob(self, *args): + return defer.succeed([self.peer_manager.get_peer("127.0.0.1", 5553)]) + + def run_manage_loop(self): + pass + + def stop(self): + pass + + +class FakeTwoPeerFinder(object): + def __init__(self, port, peer_manager): + self.peer_manager = peer_manager + self.count = 0 + + def find_peers_for_blob(self, *args): + if self.count % 2 == 0: + peer_port = 5553 + else: + peer_port = 5554 + self.count += 1 + return defer.succeed([self.peer_manager.get_peer("127.0.0.1", peer_port)]) + + def run_manage_loop(self): + pass + + def stop(self): + pass + + +class FakeAnnouncer(object): + + def __init__(self, *args): + pass + + def add_supplier(self, supplier): + pass + + def immediate_announce(self, *args): + pass + + def run_manage_loop(self): + pass + + def stop(self): + pass + + +class GenFile(io.RawIOBase): + def __init__(self, size, pattern): + io.RawIOBase.__init__(self) + self.size = size + self.pattern = pattern + self.read_so_far = 0 + self.buff = b'' + self.last_offset = 0 + + def readable(self): + return True + + def writable(self): + return False + + def read(self, n=-1): + if n > -1: + bytes_to_read = min(n, self.size - self.read_so_far) + else: + bytes_to_read = self.size - self.read_so_far + output, self.buff = self.buff[:bytes_to_read], self.buff[bytes_to_read:] + bytes_to_read -= len(output) + while bytes_to_read > 0: + self.buff = self._generate_chunk() + new_output, self.buff = self.buff[:bytes_to_read], self.buff[bytes_to_read:] + bytes_to_read -= len(new_output) + output += new_output + self.read_so_far += len(output) + return output + + def readall(self): + return self.read() + + def _generate_chunk(self, n=2**10): + output = self.pattern[self.last_offset:self.last_offset + n] + n_left = n - len(output) + whole_patterns = n_left / len(self.pattern) + output += self.pattern * whole_patterns + self.last_offset = n - len(output) + output += self.pattern[:self.last_offset] + return output + + +test_create_stream_sd_file = { + 'stream_name': '746573745f66696c65', + 'blobs': [ + {'length': 2097152, 'blob_num': 0, + 'blob_hash': + 'dc4708f76a5e7af0f1cae0ee96b824e2ed9250c9346c093b441f0a20d3607c17948b6fcfb4bc62020fe5286693d08586', + 'iv': '30303030303030303030303030303031'}, + {'length': 2097152, 'blob_num': 1, + 'blob_hash': + 'f4067522c1b49432a2a679512e3917144317caa1abba0c041e0cd2cf9f635d4cf127ce1824fa04189b63916174951f70', + 'iv': '30303030303030303030303030303032'}, + {'length': 1015056, 'blob_num': 2, + 'blob_hash': + '305486c434260484fcb2968ce0e963b72f81ba56c11b08b1af0789b55b44d78422600f9a38e3cf4f2e9569897e5646a9', + 'iv': '30303030303030303030303030303033'}, + {'length': 0, 'blob_num': 3, 'iv': '30303030303030303030303030303034'}], + 'stream_type': 'lbryfile', + 'key': '30313233343536373031323334353637', + 'suggested_file_name': '746573745f66696c65', + 'stream_hash': '6d27fbe10c86d81aacfb897c7a426d0a2214f5a299455a6d315c0f998c4b3545c2dc60906122d94653c23b1898229e3f'} + + +def start_lbry_uploader(sd_hash_queue, kill_event, dead_event): + + sys.modules = sys.modules.copy() + + del sys.modules['twisted.internet.reactor'] + + import twisted.internet + + twisted.internet.reactor = twisted.internet.epollreactor.EPollReactor() + + sys.modules['twisted.internet.reactor'] = twisted.internet.reactor + + from twisted.internet import reactor + + logging.debug("Starting the uploader") + + Random.atfork() + + r = random.Random() + r.seed("start_lbry_uploader") + + wallet = FakeWallet() + peer_manager = PeerManager() + peer_finder = FakePeerFinder(5553, peer_manager) + hash_announcer = FakeAnnouncer() + rate_limiter = DummyRateLimiter() + sd_identifier = StreamDescriptorIdentifier() + + db_dir = "server" + os.mkdir(db_dir) + + session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd", + peer_finder=peer_finder, hash_announcer=hash_announcer, peer_port=5553, + use_upnp=False, rate_limiter=rate_limiter, wallet=wallet) + + stream_info_manager = TempLBRYFileMetadataManager() + + lbry_file_manager = LBRYFileManager(session, stream_info_manager, sd_identifier) + + def start_all(): + + d = session.setup() + d.addCallback(lambda _: lbry_file_manager.setup()) + d.addCallback(lambda _: start_server()) + d.addCallback(lambda _: create_stream()) + d.addCallback(create_stream_descriptor) + d.addCallback(put_sd_hash_on_queue) + + def print_error(err): + logging.critical("Server error: %s", err.getErrorMessage()) + + d.addErrback(print_error) + return d + + def start_server(): + + server_port = None + + query_handler_factories = { + BlobAvailabilityHandlerFactory(session.blob_manager): True, + BlobRequestHandlerFactory(session.blob_manager, session.wallet, + PaymentRateManager(session.base_payment_rate_manager)): True, + session.wallet.get_wallet_info_query_handler_factory(): True, + } + + server_factory = ServerProtocolFactory(session.rate_limiter, + query_handler_factories, + session.peer_manager) + + server_port = reactor.listenTCP(5553, server_factory) + logging.debug("Started listening") + + def kill_server(): + ds = [] + ds.append(session.shut_down()) + ds.append(lbry_file_manager.stop()) + if server_port: + ds.append(server_port.stopListening()) + kill_check.stop() + dead_event.set() + dl = defer.DeferredList(ds) + dl.addCallback(lambda _: reactor.stop()) + return dl + + def check_for_kill(): + if kill_event.is_set(): + kill_server() + + kill_check = task.LoopingCall(check_for_kill) + kill_check.start(1.0) + return True + + def create_stream(): + test_file = GenFile(5209343, b''.join([chr(i) for i in xrange(0, 64, 6)])) + d = create_lbry_file(session, lbry_file_manager, "test_file", test_file) + return d + + def create_stream_descriptor(stream_hash): + descriptor_writer = BlobStreamDescriptorWriter(session.blob_manager) + d = get_sd_info(lbry_file_manager.stream_info_manager, stream_hash, True) + d.addCallback(descriptor_writer.create_descriptor) + return d + + def put_sd_hash_on_queue(sd_hash): + sd_hash_queue.put(sd_hash) + + reactor.callLater(1, start_all) + reactor.run() + + +def start_live_server(sd_hash_queue, kill_event, dead_event): + + sys.modules = sys.modules.copy() + + del sys.modules['twisted.internet.reactor'] + + import twisted.internet + + twisted.internet.reactor = twisted.internet.epollreactor.EPollReactor() + + sys.modules['twisted.internet.reactor'] = twisted.internet.reactor + + from twisted.internet import reactor + + logging.debug("In start_server.") + + Random.atfork() + + r = random.Random() + r.seed("start_live_server") + + wallet = FakeWallet() + peer_manager = PeerManager() + peer_finder = FakePeerFinder(5553, peer_manager) + hash_announcer = FakeAnnouncer() + rate_limiter = DummyRateLimiter() + sd_identifier = StreamDescriptorIdentifier() + + db_dir = "server" + os.mkdir(db_dir) + + session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd", + peer_finder=peer_finder, hash_announcer=hash_announcer, peer_port=5553, + use_upnp=False, rate_limiter=rate_limiter, wallet=wallet) + + base_payment_rate_manager = BaseLiveStreamPaymentRateManager(MIN_BLOB_INFO_PAYMENT_RATE) + data_payment_rate_manager = PaymentRateManager(session.base_payment_rate_manager) + payment_rate_manager = LiveStreamPaymentRateManager(base_payment_rate_manager, + data_payment_rate_manager) + + stream_info_manager = DBLiveStreamMetadataManager(session.db_dir, hash_announcer) + + logging.debug("Created the session") + + server_port = [] + + def start_listening(): + logging.debug("Starting the server protocol") + query_handler_factories = { + CryptBlobInfoQueryHandlerFactory(stream_info_manager, session.wallet, + payment_rate_manager): True, + BlobAvailabilityHandlerFactory(session.blob_manager): True, + BlobRequestHandlerFactory(session.blob_manager, session.wallet, + payment_rate_manager): True, + session.wallet.get_wallet_info_query_handler_factory(): True, + } + + server_factory = ServerProtocolFactory(session.rate_limiter, + query_handler_factories, + session.peer_manager) + server_port.append(reactor.listenTCP(5553, server_factory)) + logging.debug("Server protocol has started") + + def create_stream(): + logging.debug("Making the live stream") + test_file = GenFile(5209343, b''.join([chr(i + 2) for i in xrange(0, 64, 6)])) + stream_creator_helper = FileLiveStreamCreator(session.blob_manager, stream_info_manager, + "test_file", test_file) + d = stream_creator_helper.setup() + d.addCallback(lambda _: stream_creator_helper.publish_stream_descriptor()) + d.addCallback(put_sd_hash_on_queue) + d.addCallback(lambda _: stream_creator_helper.start_streaming()) + return d + + def put_sd_hash_on_queue(sd_hash): + logging.debug("Telling the client to start running. Stream hash: %s", str(sd_hash)) + sd_hash_queue.put(sd_hash) + logging.debug("sd hash has been added to the queue") + + def set_dead_event(): + logging.debug("Setting the dead event") + dead_event.set() + + def print_error(err): + logging.debug("An error occurred during shutdown: %s", err.getTraceback()) + + def stop_reactor(): + logging.debug("Server is stopping its reactor") + reactor.stop() + + def shut_down(arg): + logging.debug("Shutting down") + if isinstance(arg, Failure): + logging.error("Shut down is due to an error: %s", arg.getTraceback()) + d = defer.maybeDeferred(server_port[0].stopListening) + d.addErrback(print_error) + d.addCallback(lambda _: session.shut_down()) + d.addCallback(lambda _: stream_info_manager.stop()) + d.addErrback(print_error) + d.addCallback(lambda _: set_dead_event()) + d.addErrback(print_error) + d.addCallback(lambda _: reactor.callLater(0, stop_reactor)) + d.addErrback(print_error) + return d + + def wait_for_kill_event(): + + d = defer.Deferred() + + def check_for_kill(): + if kill_event.is_set(): + logging.debug("Kill event has been found set") + kill_check.stop() + d.callback(True) + + kill_check = task.LoopingCall(check_for_kill) + kill_check.start(1.0) + + return d + + def enable_live_stream(): + return add_live_stream_to_sd_identifier(session, stream_info_manager, sd_identifier) + + def run_server(): + d = session.setup() + d.addCallback(lambda _: stream_info_manager.setup()) + d.addCallback(lambda _: enable_live_stream()) + d.addCallback(lambda _: start_listening()) + d.addCallback(lambda _: create_stream()) + d.addCallback(lambda _: wait_for_kill_event()) + d.addBoth(shut_down) + return d + + reactor.callLater(1, run_server) + reactor.run() + + +def start_blob_uploader(blob_hash_queue, kill_event, dead_event, slow): + + sys.modules = sys.modules.copy() + + del sys.modules['twisted.internet.reactor'] + + import twisted.internet + + twisted.internet.reactor = twisted.internet.epollreactor.EPollReactor() + + sys.modules['twisted.internet.reactor'] = twisted.internet.reactor + + from twisted.internet import reactor + + logging.debug("Starting the uploader") + + Random.atfork() + + wallet = FakeWallet() + peer_manager = PeerManager() + peer_finder = FakePeerFinder(5554, peer_manager) + hash_announcer = FakeAnnouncer() + rate_limiter = RateLimiter() + + if slow is True: + peer_port = 5553 + db_dir = "server1" + else: + peer_port = 5554 + db_dir = "server2" + blob_dir = os.path.join(db_dir, "blobfiles") + os.mkdir(db_dir) + os.mkdir(blob_dir) + + session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="efgh", + peer_finder=peer_finder, hash_announcer=hash_announcer, + blob_dir=blob_dir, peer_port=peer_port, + use_upnp=False, rate_limiter=rate_limiter, wallet=wallet) + + if slow is True: + session.rate_limiter.set_ul_limit(2**11) + + def start_all(): + d = session.setup() + d.addCallback(lambda _: start_server()) + d.addCallback(lambda _: create_single_blob()) + d.addCallback(put_blob_hash_on_queue) + + def print_error(err): + logging.critical("Server error: %s", err.getErrorMessage()) + + d.addErrback(print_error) + return d + + def start_server(): + + server_port = None + + query_handler_factories = { + BlobAvailabilityHandlerFactory(session.blob_manager): True, + BlobRequestHandlerFactory(session.blob_manager, session.wallet, + PaymentRateManager(session.base_payment_rate_manager)): True, + session.wallet.get_wallet_info_query_handler_factory(): True, + } + + server_factory = ServerProtocolFactory(session.rate_limiter, + query_handler_factories, + session.peer_manager) + + server_port = reactor.listenTCP(peer_port, server_factory) + logging.debug("Started listening") + + def kill_server(): + ds = [] + ds.append(session.shut_down()) + if server_port: + ds.append(server_port.stopListening()) + kill_check.stop() + dead_event.set() + dl = defer.DeferredList(ds) + dl.addCallback(lambda _: reactor.stop()) + return dl + + def check_for_kill(): + if kill_event.is_set(): + kill_server() + + kill_check = task.LoopingCall(check_for_kill) + kill_check.start(1.0) + return True + + def create_single_blob(): + blob_creator = session.blob_manager.get_blob_creator() + blob_creator.write("0" * 2**21) + return blob_creator.close() + + def put_blob_hash_on_queue(blob_hash): + logging.debug("Telling the client to start running. Blob hash: %s", str(blob_hash)) + blob_hash_queue.put(blob_hash) + logging.debug("blob hash has been added to the queue") + + reactor.callLater(1, start_all) + reactor.run() + + +class TestTransfer(TestCase): + def setUp(self): + self.server_processes = [] + self.session = None + self.stream_info_manager = None + self.lbry_file_manager = None + self.addCleanup(self.take_down_env) + + def take_down_env(self): + + d = defer.succeed(True) + if self.lbry_file_manager is not None: + d.addCallback(lambda _: self.lbry_file_manager.stop()) + if self.session is not None: + d.addCallback(lambda _: self.session.shut_down()) + if self.stream_info_manager is not None: + d.addCallback(lambda _: self.stream_info_manager.stop()) + + def delete_test_env(): + dirs = ['server', 'server1', 'server2', 'client'] + files = ['test_file'] + for di in dirs: + if os.path.exists(di): + shutil.rmtree(di) + for f in files: + if os.path.exists(f): + os.remove(f) + for p in self.server_processes: + p.terminate() + return True + + d.addCallback(lambda _: threads.deferToThread(delete_test_env)) + return d + + @staticmethod + def wait_for_dead_event(dead_event): + + from twisted.internet import reactor + d = defer.Deferred() + + def stop(): + dead_check.stop() + if stop_call.active(): + stop_call.cancel() + d.callback(True) + + def check_if_dead_event_set(): + if dead_event.is_set(): + logging.debug("Dead event has been found set") + stop() + + def done_waiting(): + logging.warning("Dead event has not been found set and timeout has expired") + stop() + + dead_check = task.LoopingCall(check_if_dead_event_set) + dead_check.start(.1) + stop_call = reactor.callLater(15, done_waiting) + return d + + @staticmethod + def wait_for_hash_from_queue(hash_queue): + logging.debug("Waiting for the sd_hash to come through the queue") + + d = defer.Deferred() + + def check_for_start(): + if hash_queue.empty() is False: + logging.debug("Client start event has been found set") + start_check.stop() + d.callback(hash_queue.get(False)) + else: + logging.debug("Client start event has NOT been found set") + + start_check = task.LoopingCall(check_for_start) + start_check.start(1.0) + + return d + + def test_lbry_transfer(self): + + sd_hash_queue = Queue() + kill_event = Event() + dead_event = Event() + uploader = Process(target=start_lbry_uploader, args=(sd_hash_queue, kill_event, dead_event)) + uploader.start() + self.server_processes.append(uploader) + + logging.debug("Testing transfer") + + wallet = FakeWallet() + peer_manager = PeerManager() + peer_finder = FakePeerFinder(5553, peer_manager) + hash_announcer = FakeAnnouncer() + rate_limiter = DummyRateLimiter() + sd_identifier = StreamDescriptorIdentifier() + + db_dir = "client" + blob_dir = os.path.join(db_dir, "blobfiles") + os.mkdir(db_dir) + os.mkdir(blob_dir) + + self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd", + peer_finder=peer_finder, hash_announcer=hash_announcer, + blob_dir=blob_dir, peer_port=5553, + use_upnp=False, rate_limiter=rate_limiter, wallet=wallet) + + self.stream_info_manager = TempLBRYFileMetadataManager() + + self.lbry_file_manager = LBRYFileManager(self.session, self.stream_info_manager, sd_identifier) + + def make_downloader(info_and_factories, prm): + info_validator, factories = info_and_factories + options = [o.default for o in factories[0].get_downloader_options(info_validator, prm)] + return factories[0].make_downloader(info_validator, options, prm) + + def download_file(sd_hash): + prm = PaymentRateManager(self.session.base_payment_rate_manager) + d = download_sd_blob(self.session, sd_hash, prm) + d.addCallback(sd_identifier.get_info_and_factories_for_sd_blob) + d.addCallback(make_downloader, prm) + d.addCallback(lambda downloader: downloader.start()) + return d + + def check_md5_sum(): + f = open('test_file') + hashsum = MD5.new() + hashsum.update(f.read()) + self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be") + + def start_transfer(sd_hash): + + logging.debug("Starting the transfer") + + d = self.session.setup() + d.addCallback(lambda _: self.lbry_file_manager.setup()) + d.addCallback(lambda _: download_file(sd_hash)) + d.addCallback(lambda _: check_md5_sum()) + + return d + + def stop(arg): + if isinstance(arg, Failure): + logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback()) + else: + logging.debug("Client is stopping normally.") + kill_event.set() + logging.debug("Set the kill event") + d = self.wait_for_dead_event(dead_event) + + def print_shutting_down(): + logging.info("Client is shutting down") + + d.addCallback(lambda _: print_shutting_down()) + d.addCallback(lambda _: arg) + return d + + d = self.wait_for_hash_from_queue(sd_hash_queue) + d.addCallback(start_transfer) + d.addBoth(stop) + + return d + + def test_live_transfer(self): + + sd_hash_queue = Queue() + kill_event = Event() + dead_event = Event() + server_args = (sd_hash_queue, kill_event, dead_event) + server = Process(target=start_live_server, args=server_args) + server.start() + self.server_processes.append(server) + + wallet = FakeWallet() + peer_manager = PeerManager() + peer_finder = FakePeerFinder(5553, peer_manager) + hash_announcer = FakeAnnouncer() + rate_limiter = DummyRateLimiter() + sd_identifier = StreamDescriptorIdentifier() + + db_dir = "client" + os.mkdir(db_dir) + + self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd", + peer_finder=peer_finder, hash_announcer=hash_announcer, blob_dir=None, + peer_port=5553, use_upnp=False, rate_limiter=rate_limiter, wallet=wallet) + + self.stream_info_manager = TempLiveStreamMetadataManager(hash_announcer) + + d = self.wait_for_hash_from_queue(sd_hash_queue) + + def create_downloader(info_and_factories, prm): + info_validator, factories = info_and_factories + options = [o.default for o in factories[0].get_downloader_options(info_validator, prm)] + return factories[0].make_downloader(info_validator, options, prm) + + def start_lbry_file(lbry_file): + lbry_file = lbry_file + logging.debug("Calling lbry_file.start()") + return lbry_file.start() + + def download_stream(sd_blob_hash): + logging.debug("Downloaded the sd blob. Reading it now") + prm = PaymentRateManager(self.session.base_payment_rate_manager) + d = download_sd_blob(self.session, sd_blob_hash, prm) + d.addCallback(sd_identifier.get_info_and_factories_for_sd_blob) + d.addCallback(create_downloader, prm) + d.addCallback(start_lbry_file) + return d + + def do_download(sd_blob_hash): + logging.debug("Starting the download") + d = self.session.setup() + d.addCallback(lambda _: enable_live_stream()) + d.addCallback(lambda _: download_stream(sd_blob_hash)) + return d + + def enable_live_stream(): + return add_live_stream_to_sd_identifier(self.session, self.stream_info_manager, sd_identifier) + + d.addCallback(do_download) + + def check_md5_sum(): + f = open('test_file') + hashsum = MD5.new() + hashsum.update(f.read()) + self.assertEqual(hashsum.hexdigest(), "215b177db8eed86d028b37e5cbad55c7") + + d.addCallback(lambda _: check_md5_sum()) + + def stop(arg): + if isinstance(arg, Failure): + logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback()) + else: + logging.debug("Client is stopping normally.") + kill_event.set() + logging.debug("Set the kill event") + d = self.wait_for_dead_event(dead_event) + + def print_shutting_down(): + logging.info("Client is shutting down") + + d.addCallback(lambda _: print_shutting_down()) + d.addCallback(lambda _: arg) + return d + + d.addBoth(stop) + return d + + def test_last_blob_retrieval(self): + + kill_event = Event() + dead_event_1 = Event() + blob_hash_queue_1 = Queue() + blob_hash_queue_2 = Queue() + fast_uploader = Process(target=start_blob_uploader, + args=(blob_hash_queue_1, kill_event, dead_event_1, False)) + fast_uploader.start() + self.server_processes.append(fast_uploader) + dead_event_2 = Event() + slow_uploader = Process(target=start_blob_uploader, + args=(blob_hash_queue_2, kill_event, dead_event_2, True)) + slow_uploader.start() + self.server_processes.append(slow_uploader) + + logging.debug("Testing transfer") + + wallet = FakeWallet() + peer_manager = PeerManager() + peer_finder = FakeTwoPeerFinder(5553, peer_manager) + hash_announcer = FakeAnnouncer() + rate_limiter = DummyRateLimiter() + + db_dir = "client" + blob_dir = os.path.join(db_dir, "blobfiles") + os.mkdir(db_dir) + os.mkdir(blob_dir) + + self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd", + peer_finder=peer_finder, hash_announcer=hash_announcer, + blob_dir=blob_dir, peer_port=5553, + use_upnp=False, rate_limiter=rate_limiter, wallet=wallet) + + d1 = self.wait_for_hash_from_queue(blob_hash_queue_1) + d2 = self.wait_for_hash_from_queue(blob_hash_queue_2) + d = defer.DeferredList([d1, d2], fireOnOneErrback=True) + + def get_blob_hash(results): + self.assertEqual(results[0][1], results[1][1]) + return results[0][1] + + d.addCallback(get_blob_hash) + + def download_blob(blob_hash): + prm = PaymentRateManager(self.session.base_payment_rate_manager) + downloader = StandaloneBlobDownloader(blob_hash, self.session.blob_manager, peer_finder, + rate_limiter, prm, wallet) + d = downloader.download() + return d + + def start_transfer(blob_hash): + + logging.debug("Starting the transfer") + + d = self.session.setup() + d.addCallback(lambda _: download_blob(blob_hash)) + + return d + + d.addCallback(start_transfer) + + def stop(arg): + if isinstance(arg, Failure): + logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback()) + else: + logging.debug("Client is stopping normally.") + kill_event.set() + logging.debug("Set the kill event") + d1 = self.wait_for_dead_event(dead_event_1) + d2 = self.wait_for_dead_event(dead_event_2) + dl = defer.DeferredList([d1, d2]) + + def print_shutting_down(): + logging.info("Client is shutting down") + + dl.addCallback(lambda _: print_shutting_down()) + dl.addCallback(lambda _: arg) + return dl + + d.addBoth(stop) + + return d + + +class TestStreamify(TestCase): + + def setUp(self): + self.session = None + self.stream_info_manager = None + self.lbry_file_manager = None + self.addCleanup(self.take_down_env) + + def take_down_env(self): + + d = defer.succeed(True) + if self.lbry_file_manager is not None: + d.addCallback(lambda _: self.lbry_file_manager.stop()) + if self.session is not None: + d.addCallback(lambda _: self.session.shut_down()) + if self.stream_info_manager is not None: + d.addCallback(lambda _: self.stream_info_manager.stop()) + + def delete_test_env(): + shutil.rmtree('client') + + d.addCallback(lambda _: threads.deferToThread(delete_test_env)) + return d + + def test_create_stream(self): + + wallet = FakeWallet() + peer_manager = PeerManager() + peer_finder = FakeTwoPeerFinder(5553, peer_manager) + hash_announcer = FakeAnnouncer() + rate_limiter = DummyRateLimiter() + sd_identifier = StreamDescriptorIdentifier() + + db_dir = "client" + blob_dir = os.path.join(db_dir, "blobfiles") + os.mkdir(db_dir) + os.mkdir(blob_dir) + + self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd", + peer_finder=peer_finder, hash_announcer=hash_announcer, + blob_dir=blob_dir, peer_port=5553, + use_upnp=False, rate_limiter=rate_limiter, wallet=wallet) + + self.stream_info_manager = TempLBRYFileMetadataManager() + + self.lbry_file_manager = LBRYFileManager(self.session, self.stream_info_manager, sd_identifier) + + d = self.session.setup() + d.addCallback(lambda _: self.stream_info_manager.setup()) + d.addCallback(lambda _: self.lbry_file_manager.setup()) + + def verify_equal(sd_info): + self.assertEqual(sd_info, test_create_stream_sd_file) + + def verify_stream_descriptor_file(stream_hash): + d = get_sd_info(self.lbry_file_manager.stream_info_manager, stream_hash, True) + d.addCallback(verify_equal) + return d + + def iv_generator(): + iv = 0 + while 1: + iv += 1 + yield "%016d" % iv + + def create_stream(): + test_file = GenFile(5209343, b''.join([chr(i + 3) for i in xrange(0, 64, 6)])) + d = create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file, + key="0123456701234567", iv_generator=iv_generator()) + return d + + d.addCallback(lambda _: create_stream()) + d.addCallback(verify_stream_descriptor_file) + return d + + def test_create_and_combine_stream(self): + + wallet = FakeWallet() + peer_manager = PeerManager() + peer_finder = FakeTwoPeerFinder(5553, peer_manager) + hash_announcer = FakeAnnouncer() + rate_limiter = DummyRateLimiter() + sd_identifier = StreamDescriptorIdentifier() + + db_dir = "client" + blob_dir = os.path.join(db_dir, "blobfiles") + os.mkdir(db_dir) + os.mkdir(blob_dir) + + self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd", + peer_finder=peer_finder, hash_announcer=hash_announcer, + blob_dir=blob_dir, peer_port=5553, + use_upnp=False, rate_limiter=rate_limiter, wallet=wallet) + + self.stream_info_manager = DBLBRYFileMetadataManager(self.session.db_dir) + + self.lbry_file_manager = LBRYFileManager(self.session, self.stream_info_manager, sd_identifier) + + def start_lbry_file(lbry_file): + logging.debug("Calling lbry_file.start()") + d = lbry_file.start() + return d + + def combine_stream(stream_hash): + + prm = PaymentRateManager(self.session.base_payment_rate_manager) + d = self.lbry_file_manager.add_lbry_file(stream_hash, prm) + d.addCallback(start_lbry_file) + + def check_md5_sum(): + f = open('test_file') + hashsum = MD5.new() + hashsum.update(f.read()) + self.assertEqual(hashsum.hexdigest(), "68959747edc73df45e45db6379dd7b3b") + + d.addCallback(lambda _: check_md5_sum()) + return d + + def create_stream(): + test_file = GenFile(53209343, b''.join([chr(i + 5) for i in xrange(0, 64, 6)])) + return create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file, + suggested_file_name="test_file") + + d = self.session.setup() + d.addCallback(lambda _: self.stream_info_manager.setup()) + d.addCallback(lambda _: self.lbry_file_manager.setup()) + d.addCallback(lambda _: create_stream()) + d.addCallback(combine_stream) + return d \ No newline at end of file