forked from LBRYCommunity/lbry-sdk
Merge pull request #317 from lbryio/remove-lbrynet-console
Remove lbrynet-console
This commit is contained in:
commit
f84df50d7f
22 changed files with 1 additions and 4858 deletions
|
@ -1,574 +0,0 @@
|
|||
# TODO: THERE IS A LOT OF CODE IN THIS MODULE THAT SHOULD BE REMOVED
|
||||
# AS IT IS REPEATED IN THE LBRYDaemon MODULE
|
||||
import logging
|
||||
import os.path
|
||||
import argparse
|
||||
import requests
|
||||
import locale
|
||||
import sys
|
||||
import webbrowser
|
||||
|
||||
if sys.platform == "darwin":
|
||||
from appdirs import user_data_dir
|
||||
from yapsy.PluginManager import PluginManager
|
||||
from twisted.internet import defer, threads, stdio, task, error
|
||||
from lbrynet.lbrynet_daemon.auth.client import LBRYAPIClient
|
||||
|
||||
from lbrynet import analytics
|
||||
from lbrynet.core.Session import Session
|
||||
from lbrynet.lbrynet_console.ConsoleControl import ConsoleControl
|
||||
from lbrynet.lbrynet_console.Settings import Settings
|
||||
from lbrynet.lbryfilemanager.EncryptedFileManager import EncryptedFileManager
|
||||
from lbrynet.conf import settings
|
||||
from lbrynet.core.utils import generate_id
|
||||
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
|
||||
from lbrynet.core.PaymentRateManager import PaymentRateManager
|
||||
from lbrynet.core.server.BlobRequestHandler import BlobRequestHandlerFactory
|
||||
from lbrynet.core.server.ServerProtocol import ServerProtocolFactory
|
||||
from lbrynet.core.PTCWallet import PTCWallet
|
||||
from lbrynet.lbryfile.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
|
||||
from lbrynet.lbryfile.client.EncryptedFileDownloader import EncryptedFileOpenerFactory
|
||||
from lbrynet.lbryfile.StreamDescriptor import EncryptedFileStreamType
|
||||
from lbrynet.lbryfile.EncryptedFileMetadataManager import DBEncryptedFileMetadataManager, TempEncryptedFileMetadataManager
|
||||
from lbrynet.lbrynet_console.ControlHandlers import ApplicationStatusFactory, GetWalletBalancesFactory, ShutDownFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import ImmediateAnnounceAllBlobsFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import EncryptedFileStatusFactory, DeleteEncryptedFileChooserFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import ToggleEncryptedFileRunningChooserFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import ModifyApplicationDefaultsFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import CreateEncryptedFileFactory, PublishStreamDescriptorChooserFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import ShowPublishedSDHashesChooserFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import CreatePlainStreamDescriptorChooserFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import ShowEncryptedFileStreamHashChooserFactory, AddStreamFromHashFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import AddStreamFromSDFactory, AddStreamFromLBRYcrdNameFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import ClaimNameFactory, GetNewWalletAddressFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import ShowServerStatusFactory, ModifyServerSettingsFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import ModifyEncryptedFileOptionsChooserFactory, StatusFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import PeerStatsAndSettingsChooserFactory, PublishFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import BlockchainStatusFactory
|
||||
from lbrynet.core.Wallet import LBRYumWallet
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
alert = logging.getLogger("lbryalert." + __name__)
|
||||
|
||||
|
||||
class Console():
|
||||
"""A class which can upload and download file streams to and from the network"""
|
||||
def __init__(self, peer_port, dht_node_port, known_dht_nodes, fake_wallet,
|
||||
lbrycrd_conf, lbrycrd_dir, use_upnp, data_dir, created_data_dir,
|
||||
lbrycrdd_path):
|
||||
"""
|
||||
@param peer_port: the network port on which to listen for peers
|
||||
|
||||
@param dht_node_port: the network port on which to listen for dht node requests
|
||||
|
||||
@param known_dht_nodes: a list of (ip_address, dht_port) which will be used to join the DHT network
|
||||
"""
|
||||
self.peer_port = peer_port
|
||||
self.dht_node_port = dht_node_port
|
||||
self.known_dht_nodes = known_dht_nodes
|
||||
self.fake_wallet = fake_wallet
|
||||
self.lbrycrd_conf = lbrycrd_conf
|
||||
self.lbrycrd_dir = lbrycrd_dir
|
||||
if not self.lbrycrd_dir:
|
||||
if sys.platform == "darwin":
|
||||
self.lbrycrd_dir = os.path.join(os.path.expanduser("~"), "Library/Application Support/lbrycrd")
|
||||
else:
|
||||
self.lbrycrd_dir = os.path.join(os.path.expanduser("~"), ".lbrycrd")
|
||||
if not self.lbrycrd_conf:
|
||||
self.lbrycrd_conf = os.path.join(self.lbrycrd_dir, "lbrycrd.conf")
|
||||
self.lbrycrdd_path = lbrycrdd_path
|
||||
self.use_upnp = use_upnp
|
||||
self.lbry_server_port = None
|
||||
self.session = None
|
||||
self.lbry_file_metadata_manager = None
|
||||
self.lbry_file_manager = None
|
||||
self.db_dir = data_dir
|
||||
self.current_db_revision = 1
|
||||
self.blobfile_dir = os.path.join(self.db_dir, "blobfiles")
|
||||
self.created_data_dir = created_data_dir
|
||||
self.plugin_manager = PluginManager()
|
||||
self.plugin_manager.setPluginPlaces([
|
||||
os.path.join(self.db_dir, "plugins"),
|
||||
os.path.join(os.path.dirname(__file__), "plugins"),
|
||||
])
|
||||
self.command_handlers = []
|
||||
self.query_handlers = {}
|
||||
|
||||
self.settings = Settings(self.db_dir)
|
||||
self.blob_request_payment_rate_manager = None
|
||||
self.lbryid = None
|
||||
self.sd_identifier = StreamDescriptorIdentifier()
|
||||
self.plugin_objects = []
|
||||
self.db_migration_revisions = None
|
||||
|
||||
def start(self):
|
||||
"""Initialize the session and restore everything to its saved state"""
|
||||
d = self._setup_controller()
|
||||
d.addCallback(lambda _: threads.deferToThread(self._setup_data_directory))
|
||||
d.addCallback(lambda _: self._check_db_migration())
|
||||
d.addCallback(lambda _: self._get_settings())
|
||||
d.addCallback(lambda _: self._get_session())
|
||||
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(self.sd_identifier))
|
||||
d.addCallback(lambda _: self._setup_lbry_file_manager())
|
||||
d.addCallback(lambda _: self._setup_lbry_file_opener())
|
||||
d.addCallback(lambda _: self._setup_control_handlers())
|
||||
d.addCallback(lambda _: self._setup_query_handlers())
|
||||
d.addCallback(lambda _: self._load_plugins())
|
||||
d.addCallback(lambda _: self._setup_server())
|
||||
d.addCallback(lambda _: self._start_controller())
|
||||
d.addErrback(self._show_start_error)
|
||||
return d
|
||||
|
||||
def _show_start_error(self, error):
|
||||
print error.getTraceback()
|
||||
log.error("An error occurred during start up: %s", error.getTraceback())
|
||||
return error
|
||||
|
||||
def _show_shutdown_error(self, error):
|
||||
print error.getErrorMessage()
|
||||
log.error("An error occurred during shutdown: %s", error.getTraceback())
|
||||
return error
|
||||
|
||||
def shut_down(self):
|
||||
"""Stop the session, all currently running streams, and stop the server"""
|
||||
d = self._shut_down()
|
||||
if self.session is not None:
|
||||
d.addCallback(lambda _: self.session.shut_down())
|
||||
d.addErrback(self._show_shutdown_error)
|
||||
return d
|
||||
|
||||
def add_control_handlers(self, control_handlers):
|
||||
for control_handler in control_handlers:
|
||||
self.command_handlers.append(control_handler)
|
||||
|
||||
def add_query_handlers(self, query_handlers):
|
||||
|
||||
def _set_query_handlers(statuses):
|
||||
from future_builtins import zip
|
||||
for handler, (success, status) in zip(query_handlers, statuses):
|
||||
if success is True:
|
||||
self.query_handlers[handler] = status
|
||||
|
||||
ds = []
|
||||
for handler in query_handlers:
|
||||
ds.append(self.settings.get_query_handler_status(handler.get_primary_query_identifier()))
|
||||
dl = defer.DeferredList(ds)
|
||||
dl.addCallback(_set_query_handlers)
|
||||
return dl
|
||||
|
||||
def _setup_data_directory(self):
|
||||
alert.info("Loading databases...")
|
||||
if self.created_data_dir:
|
||||
db_revision = open(os.path.join(self.db_dir, "db_revision"), mode='w')
|
||||
db_revision.write(str(self.current_db_revision))
|
||||
db_revision.close()
|
||||
log.debug("Created the db revision file: %s", str(os.path.join(self.db_dir, "db_revision")))
|
||||
if not os.path.exists(self.blobfile_dir):
|
||||
os.mkdir(self.blobfile_dir)
|
||||
log.debug("Created the blobfile directory: %s", str(self.blobfile_dir))
|
||||
|
||||
def _check_db_migration(self):
|
||||
old_revision = 1
|
||||
db_revision_file = os.path.join(self.db_dir, "db_revision")
|
||||
if os.path.exists(db_revision_file):
|
||||
old_revision = int(open(db_revision_file).read().strip())
|
||||
if old_revision < self.current_db_revision:
|
||||
from lbrynet.db_migrator import dbmigrator
|
||||
print "Upgrading your databases..."
|
||||
d = threads.deferToThread(dbmigrator.migrate_db, self.db_dir, old_revision, self.current_db_revision)
|
||||
|
||||
def print_success(old_dirs):
|
||||
success_string = "Finished upgrading the databases. It is now safe to delete the"
|
||||
success_string += " following directories, if you feel like it. It won't make any"
|
||||
success_string += " difference.\nAnyway here they are: "
|
||||
for i, old_dir in enumerate(old_dirs):
|
||||
success_string += old_dir
|
||||
if i + 1 < len(old_dir):
|
||||
success_string += ", "
|
||||
print success_string
|
||||
|
||||
d.addCallback(print_success)
|
||||
return d
|
||||
return defer.succeed(True)
|
||||
|
||||
def _get_settings(self):
|
||||
d = self.settings.start()
|
||||
d.addCallback(lambda _: self.settings.get_lbryid())
|
||||
d.addCallback(self.set_lbryid)
|
||||
return d
|
||||
|
||||
def set_lbryid(self, lbryid):
|
||||
if lbryid is None:
|
||||
return self._make_lbryid()
|
||||
else:
|
||||
self.lbryid = lbryid
|
||||
|
||||
def _make_lbryid(self):
|
||||
self.lbryid = generate_id()
|
||||
d = self.settings.save_lbryid(self.lbryid)
|
||||
return d
|
||||
|
||||
def _get_session(self):
|
||||
def get_default_data_rate():
|
||||
d = self.settings.get_default_data_payment_rate()
|
||||
d.addCallback(lambda rate: {"default_data_payment_rate": rate if rate is not None else settings.data_rate})
|
||||
return d
|
||||
|
||||
def get_wallet():
|
||||
if self.fake_wallet:
|
||||
d = defer.succeed(PTCWallet(self.db_dir))
|
||||
else:
|
||||
d = defer.succeed(LBRYumWallet(self.db_dir))
|
||||
d.addCallback(lambda wallet: {"wallet": wallet})
|
||||
return d
|
||||
|
||||
d1 = get_default_data_rate()
|
||||
d2 = get_wallet()
|
||||
|
||||
def combine_results(results):
|
||||
r = {}
|
||||
for success, result in results:
|
||||
if success is True:
|
||||
r.update(result)
|
||||
return r
|
||||
|
||||
def create_session(results):
|
||||
|
||||
alert.info("Databases loaded.")
|
||||
|
||||
self.session = Session(results['default_data_payment_rate'], db_dir=self.db_dir, lbryid=self.lbryid,
|
||||
blob_dir=self.blobfile_dir, dht_node_port=self.dht_node_port,
|
||||
known_dht_nodes=self.known_dht_nodes, peer_port=self.peer_port,
|
||||
use_upnp=self.use_upnp, wallet=results['wallet'])
|
||||
|
||||
dl = defer.DeferredList([d1, d2], fireOnOneErrback=True)
|
||||
|
||||
dl.addCallback(combine_results)
|
||||
|
||||
dl.addCallback(create_session)
|
||||
|
||||
dl.addCallback(lambda _: self.session.setup())
|
||||
|
||||
dl.addCallback(lambda _: self.check_first_run())
|
||||
|
||||
dl.addCallback(self._show_first_run_result)
|
||||
|
||||
return dl
|
||||
|
||||
def check_first_run(self):
|
||||
d = self.session.wallet.is_first_run()
|
||||
d.addCallback(lambda is_first_run: self._do_first_run() if is_first_run else 0.0)
|
||||
return d
|
||||
|
||||
def _do_first_run(self):
|
||||
d = self.session.wallet.get_new_address()
|
||||
|
||||
def send_request(url, data):
|
||||
r = requests.post(url, json=data)
|
||||
if r.status_code == 200:
|
||||
return r.json()['credits_sent']
|
||||
return 0.0
|
||||
|
||||
def log_error(err):
|
||||
log.warning("unable to request free credits. %s", err.getErrorMessage())
|
||||
return 0.0
|
||||
|
||||
def request_credits(address):
|
||||
url = "http://credreq.lbry.io/requestcredits"
|
||||
data = {"address": address}
|
||||
d = threads.deferToThread(send_request, url, data)
|
||||
d.addErrback(log_error)
|
||||
return d
|
||||
|
||||
d.addCallback(request_credits)
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
def _show_first_run_result(credits_received):
|
||||
if credits_received != 0.0:
|
||||
points_string = locale.format_string("%.2f LBC", (round(credits_received, 2),),
|
||||
grouping=True)
|
||||
alert.info("\n\nThank you for testing the alpha version of LBRY!\n\n"
|
||||
"You have been given %s for free because we love you.\n"
|
||||
"Please give them a few minutes to show up while you\n"
|
||||
"catch up with our blockchain.\n", points_string)
|
||||
|
||||
def _setup_lbry_file_manager(self):
|
||||
self.lbry_file_metadata_manager = DBEncryptedFileMetadataManager(self.db_dir)
|
||||
d = self.lbry_file_metadata_manager.setup()
|
||||
|
||||
def set_lbry_file_manager():
|
||||
self.lbry_file_manager = EncryptedFileManager(self.session, self.lbry_file_metadata_manager, self.sd_identifier)
|
||||
return self.lbry_file_manager.setup()
|
||||
|
||||
d.addCallback(lambda _: set_lbry_file_manager())
|
||||
|
||||
return d
|
||||
|
||||
def _setup_lbry_file_opener(self):
|
||||
stream_info_manager = TempEncryptedFileMetadataManager()
|
||||
downloader_factory = EncryptedFileOpenerFactory(self.session.peer_finder, self.session.rate_limiter,
|
||||
self.session.blob_manager, stream_info_manager,
|
||||
self.session.wallet)
|
||||
self.sd_identifier.add_stream_downloader_factory(EncryptedFileStreamType, downloader_factory)
|
||||
return defer.succeed(True)
|
||||
|
||||
def _setup_control_handlers(self):
|
||||
handlers = [
|
||||
ApplicationStatusFactory(self.session.rate_limiter, self.session.dht_node),
|
||||
GetWalletBalancesFactory(self.session.wallet),
|
||||
ModifyApplicationDefaultsFactory(self),
|
||||
ShutDownFactory(self),
|
||||
PeerStatsAndSettingsChooserFactory(self.session.peer_manager),
|
||||
EncryptedFileStatusFactory(self.lbry_file_manager),
|
||||
AddStreamFromSDFactory(self.sd_identifier, self.session.base_payment_rate_manager,
|
||||
self.session.wallet),
|
||||
DeleteEncryptedFileChooserFactory(self.lbry_file_metadata_manager, self.session.blob_manager,
|
||||
self.lbry_file_manager),
|
||||
ToggleEncryptedFileRunningChooserFactory(self.lbry_file_manager),
|
||||
CreateEncryptedFileFactory(self.session, self.lbry_file_manager),
|
||||
PublishStreamDescriptorChooserFactory(self.lbry_file_metadata_manager,
|
||||
self.session.blob_manager),
|
||||
ShowPublishedSDHashesChooserFactory(self.lbry_file_metadata_manager,
|
||||
self.lbry_file_manager),
|
||||
CreatePlainStreamDescriptorChooserFactory(self.lbry_file_manager),
|
||||
ShowEncryptedFileStreamHashChooserFactory(self.lbry_file_manager),
|
||||
ModifyEncryptedFileOptionsChooserFactory(self.lbry_file_manager),
|
||||
AddStreamFromHashFactory(self.sd_identifier, self.session, self.session.wallet),
|
||||
StatusFactory(self, self.session.rate_limiter, self.lbry_file_manager,
|
||||
self.session.blob_manager, self.session.wallet if not self.fake_wallet else None),
|
||||
ImmediateAnnounceAllBlobsFactory(self.session.blob_manager)
|
||||
]
|
||||
self.add_control_handlers(handlers)
|
||||
if not self.fake_wallet:
|
||||
lbrycrd_handlers = [
|
||||
AddStreamFromLBRYcrdNameFactory(self.sd_identifier, self.session,
|
||||
self.session.wallet),
|
||||
ClaimNameFactory(self.session.wallet, self.lbry_file_manager,
|
||||
self.session.blob_manager),
|
||||
GetNewWalletAddressFactory(self.session.wallet),
|
||||
PublishFactory(self.session, self.lbry_file_manager, self.session.wallet),
|
||||
BlockchainStatusFactory(self.session.wallet)
|
||||
]
|
||||
self.add_control_handlers(lbrycrd_handlers)
|
||||
if self.peer_port is not None:
|
||||
server_handlers = [
|
||||
ShowServerStatusFactory(self),
|
||||
ModifyServerSettingsFactory(self),
|
||||
]
|
||||
self.add_control_handlers(server_handlers)
|
||||
|
||||
def _setup_query_handlers(self):
|
||||
handlers = [
|
||||
self.session.wallet.get_wallet_info_query_handler_factory(),
|
||||
]
|
||||
|
||||
def get_blob_request_handler_factory(rate):
|
||||
self.blob_request_payment_rate_manager = PaymentRateManager(
|
||||
self.session.base_payment_rate_manager, rate
|
||||
)
|
||||
handlers.append(BlobRequestHandlerFactory(
|
||||
self.session.blob_manager, self.session.wallet,
|
||||
self.blob_request_payment_rate_manager, analytics.Track()
|
||||
))
|
||||
|
||||
d1 = self.settings.get_server_data_payment_rate()
|
||||
d1.addCallback(get_blob_request_handler_factory)
|
||||
|
||||
dl = defer.DeferredList([d1])
|
||||
dl.addCallback(lambda _: self.add_query_handlers(handlers))
|
||||
return dl
|
||||
|
||||
def _load_plugins(self):
|
||||
d = threads.deferToThread(self.plugin_manager.collectPlugins)
|
||||
|
||||
def setup_plugins():
|
||||
ds = []
|
||||
for plugin in self.plugin_manager.getAllPlugins():
|
||||
self.plugin_objects.append(plugin.plugin_object)
|
||||
ds.append(plugin.plugin_object.setup(self))
|
||||
return defer.DeferredList(ds)
|
||||
|
||||
d.addCallback(lambda _: setup_plugins())
|
||||
return d
|
||||
|
||||
def _stop_plugins(self):
|
||||
ds = []
|
||||
for plugin_object in self.plugin_objects:
|
||||
ds.append(defer.maybeDeferred(plugin_object.stop))
|
||||
return defer.DeferredList(ds)
|
||||
|
||||
def _setup_server(self):
|
||||
|
||||
def restore_running_status(running):
|
||||
if running is True:
|
||||
return self.start_server()
|
||||
return defer.succeed(True)
|
||||
|
||||
dl = self.settings.get_server_running_status()
|
||||
dl.addCallback(restore_running_status)
|
||||
return dl
|
||||
|
||||
def start_server(self):
|
||||
|
||||
if self.peer_port is not None:
|
||||
|
||||
server_factory = ServerProtocolFactory(self.session.rate_limiter,
|
||||
self.query_handlers,
|
||||
self.session.peer_manager)
|
||||
from twisted.internet import reactor
|
||||
try:
|
||||
self.lbry_server_port = reactor.listenTCP(self.peer_port, server_factory)
|
||||
except error.CannotListenError as e:
|
||||
import traceback
|
||||
log.error("Couldn't bind to port %d. %s", self.peer_port, traceback.format_exc())
|
||||
raise ValueError("%s lbrynet may already be running on your computer.", str(e))
|
||||
return defer.succeed(True)
|
||||
|
||||
def stop_server(self):
|
||||
if self.lbry_server_port is not None:
|
||||
self.lbry_server_port, p = None, self.lbry_server_port
|
||||
return defer.maybeDeferred(p.stopListening)
|
||||
else:
|
||||
return defer.succeed(True)
|
||||
|
||||
def _setup_controller(self):
|
||||
self.controller = ConsoleControl()
|
||||
stdio.StandardIO(self.controller)
|
||||
logger = logging.getLogger()
|
||||
formatter = logging.Formatter("%(message)s")
|
||||
alert_handler = logging.StreamHandler(self.controller)
|
||||
alert_handler.setFormatter(formatter)
|
||||
alert_handler.addFilter(logging.Filter("lbryalert"))
|
||||
alert_handler.setLevel(logging.DEBUG)
|
||||
logger.addHandler(alert_handler)
|
||||
return defer.succeed(True)
|
||||
|
||||
def _start_controller(self):
|
||||
return self.controller.start(self.command_handlers)
|
||||
|
||||
def _shut_down(self):
|
||||
self.plugin_manager = None
|
||||
ds = []
|
||||
if self.lbry_file_metadata_manager is not None:
|
||||
d = self.lbry_file_metadata_manager.stop()
|
||||
d.addCallback(lambda _: self.lbry_file_manager.stop())
|
||||
ds.append(d)
|
||||
ds.append(self.stop_server())
|
||||
ds.append(self._stop_plugins())
|
||||
dl = defer.DeferredList(ds)
|
||||
return dl
|
||||
|
||||
|
||||
def launch_lbry_console():
|
||||
from twisted.internet import reactor
|
||||
|
||||
parser = argparse.ArgumentParser(description="Launch a lbrynet console")
|
||||
parser.add_argument("--no_listen_peer",
|
||||
help="Don't listen for incoming data connections.",
|
||||
action="store_true")
|
||||
parser.add_argument("--peer_port",
|
||||
help="The port on which the console will listen for incoming data connections.",
|
||||
type=int, default=3333)
|
||||
parser.add_argument("--no_listen_dht",
|
||||
help="Don't listen for incoming DHT connections.",
|
||||
action="store_true")
|
||||
parser.add_argument("--dht_node_port",
|
||||
help="The port on which the console will listen for DHT connections.",
|
||||
type=int, default=4444)
|
||||
parser.add_argument("--fake_wallet",
|
||||
help="Testing purposes only. Use a non-blockchain wallet.",
|
||||
action="store_true")
|
||||
parser.add_argument("--no_dht_bootstrap",
|
||||
help="Don't try to connect to the DHT",
|
||||
action="store_true")
|
||||
parser.add_argument("--dht_bootstrap_host",
|
||||
help="The hostname of a known DHT node, to be used to bootstrap into the DHT. "
|
||||
"Must be used with --dht_bootstrap_port",
|
||||
type=str, default='104.236.42.182')
|
||||
parser.add_argument("--dht_bootstrap_port",
|
||||
help="The port of a known DHT node, to be used to bootstrap into the DHT. Must "
|
||||
"be used with --dht_bootstrap_host",
|
||||
type=int, default=4000)
|
||||
parser.add_argument("--disable_upnp",
|
||||
help="Don't try to use UPnP to enable incoming connections through the firewall",
|
||||
action="store_true")
|
||||
parser.add_argument("--data_dir",
|
||||
help=("The full path to the directory in which lbrynet data and metadata will be stored. "
|
||||
"Default: ~/.lbrynet on linux, ~/Library/Application Support/lbrynet on OS X"),
|
||||
type=str)
|
||||
parser.add_argument("--lbrycrdd_path",
|
||||
help="The path to lbrycrdd, which will be launched if it isn't running. If"
|
||||
"this option is chosen, lbrycrdd will be used as the interface to the"
|
||||
"blockchain. By default, a lightweight interface is used.")
|
||||
parser.add_argument("--lbrycrd_wallet_dir",
|
||||
help="The directory in which lbrycrd data will stored. Used if lbrycrdd is "
|
||||
"launched by this application.")
|
||||
parser.add_argument("--lbrycrd_wallet_conf",
|
||||
help="The configuration file for the LBRYcrd wallet. Default: ~/.lbrycrd/lbrycrd.conf",
|
||||
type=str)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.no_dht_bootstrap:
|
||||
bootstrap_nodes = []
|
||||
else:
|
||||
bootstrap_nodes = [(args.dht_bootstrap_host, args.dht_bootstrap_port)]
|
||||
|
||||
if args.no_listen_peer:
|
||||
peer_port = None
|
||||
else:
|
||||
peer_port = args.peer_port
|
||||
|
||||
if args.no_listen_dht:
|
||||
dht_node_port = None
|
||||
else:
|
||||
dht_node_port = args.dht_node_port
|
||||
|
||||
created_data_dir = False
|
||||
if not args.data_dir:
|
||||
if sys.platform == "darwin":
|
||||
data_dir = user_data_dir("LBRY")
|
||||
else:
|
||||
data_dir = os.path.join(os.path.expanduser("~"), ".lbrynet")
|
||||
else:
|
||||
data_dir = args.data_dir
|
||||
if not os.path.exists(data_dir):
|
||||
os.mkdir(data_dir)
|
||||
created_data_dir = True
|
||||
|
||||
daemon = LBRYAPIClient.config()
|
||||
try:
|
||||
daemon.is_running()
|
||||
log.info("Attempt to start lbrynet-console while lbrynet-daemon is running")
|
||||
print "lbrynet-daemon is running, you must turn it off before using lbrynet-console"
|
||||
print "If you're running the app, quit before starting lbrynet-console"
|
||||
print "If you're running lbrynet-daemon in a terminal, run 'stop-lbrynet-daemon' to turn it off"
|
||||
|
||||
webbrowser.open("http://localhost:5279")
|
||||
|
||||
except:
|
||||
log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s"
|
||||
formatter = logging.Formatter(log_format)
|
||||
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.DEBUG)
|
||||
file_handler = logging.FileHandler(os.path.join(data_dir, "console.log"))
|
||||
file_handler.setFormatter(formatter)
|
||||
file_handler.addFilter(logging.Filter("lbrynet"))
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
|
||||
console = Console(peer_port, dht_node_port, bootstrap_nodes, fake_wallet=args.fake_wallet,
|
||||
lbrycrd_conf=args.lbrycrd_wallet_conf, lbrycrd_dir=args.lbrycrd_wallet_dir,
|
||||
use_upnp=not args.disable_upnp, data_dir=data_dir,
|
||||
created_data_dir=created_data_dir, lbrycrdd_path=args.lbrycrdd_path)
|
||||
|
||||
d = task.deferLater(reactor, 0, console.start)
|
||||
d.addErrback(lambda _: reactor.stop())
|
||||
reactor.addSystemEventTrigger('before', 'shutdown', console.shut_down)
|
||||
reactor.run()
|
||||
|
||||
if __name__ == "__main__":
|
||||
launch_lbry_console()
|
|
@ -1,176 +0,0 @@
|
|||
from twisted.protocols import basic
|
||||
from twisted.internet import defer
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConsoleControl(basic.LineReceiver):
|
||||
from os import linesep as delimiter
|
||||
|
||||
def __init__(self):
|
||||
self.connected = False
|
||||
self.buffer = []
|
||||
self.command_handlers = {}
|
||||
self.current_handler = None
|
||||
|
||||
def start(self, command_handlers):
|
||||
self.command_handlers = {h.command: h for h in command_handlers}
|
||||
self.current_handler = None
|
||||
self.send_initial_prompt()
|
||||
return defer.succeed(True)
|
||||
|
||||
def connectionMade(self):
|
||||
self.connected = True
|
||||
if self.buffer:
|
||||
self.send(self.buffer)
|
||||
self.buffer = []
|
||||
|
||||
def send_initial_prompt(self):
|
||||
self.sendLine("")
|
||||
self.sendLine("You should have received 1000 LBC the first time you ran\n"
|
||||
"this program. If you did not, let us know! But first give\n"
|
||||
"them a moment to show up. The block time is currently 30\n"
|
||||
"seconds so they should show up within a couple of minutes.\n\n"
|
||||
"Welcome to lbrynet-console!")
|
||||
self.sendLine("")
|
||||
self.sendLine("Enter a command. Try 'get wonderfullife' or 'help' to see more options.")
|
||||
self.show_prompt()
|
||||
|
||||
def send(self, s):
|
||||
self.transport.write(s)
|
||||
|
||||
def write(self, s):
|
||||
if self.connected is False:
|
||||
self.buffer.append(s)
|
||||
else:
|
||||
self.send(s)
|
||||
|
||||
def flush(self):
|
||||
if self.connected is True and self.buffer:
|
||||
self.send(self.buffer)
|
||||
self.buffer = []
|
||||
|
||||
def show_prompt(self):
|
||||
self.send("> ")
|
||||
|
||||
def show_quick_help(self):
|
||||
self.sendLine("Available commands:")
|
||||
self.sendLine("")
|
||||
showed_help_all = False
|
||||
sorted_handlers = sorted(self.command_handlers.items(), key=lambda x: x[0])
|
||||
sorted_handlers = sorted(sorted_handlers, key=lambda x: x[1].priority, reverse=True)
|
||||
for command, handler in sorted_handlers:
|
||||
if handler.priority > 0:
|
||||
if showed_help_all is False and handler.priority < 10:
|
||||
self.sendLine("help-all - Show the full list of available commands")
|
||||
showed_help_all = True
|
||||
self.sendLine(command + " - " + handler.short_help)
|
||||
self.sendLine("")
|
||||
self.sendLine("For more information about any command type 'help <command>'")
|
||||
|
||||
def show_full_help(self):
|
||||
self.sendLine("Available commands:")
|
||||
self.sendLine("")
|
||||
for command, handler in sorted(self.command_handlers.items(), key=lambda x: x[0]):
|
||||
self.sendLine(command + " - " + handler.short_help)
|
||||
self.sendLine("")
|
||||
self.sendLine("For more information about any command type 'help <command>'")
|
||||
|
||||
def handler_done(self):
|
||||
self.current_handler = None
|
||||
self.show_prompt()
|
||||
|
||||
def handler_failed(self, err):
|
||||
self.current_handler = None
|
||||
self.sendLine("An error occurred:")
|
||||
self.sendLine(err.getTraceback())
|
||||
self.show_prompt()
|
||||
|
||||
def lineReceived(self, line):
|
||||
if not self.command_handlers:
|
||||
return
|
||||
if self.current_handler is None:
|
||||
words = line.split()
|
||||
if len(words) == 0:
|
||||
self.show_prompt()
|
||||
return
|
||||
command, args = words[0], words[1:]
|
||||
if command == "help":
|
||||
if len(args) == 0:
|
||||
self.show_quick_help()
|
||||
self.show_prompt()
|
||||
return
|
||||
if args[0] in self.command_handlers:
|
||||
self.sendLine(self.command_handlers[args[0]].full_help)
|
||||
self.show_prompt()
|
||||
return
|
||||
if args[0] == "help-all":
|
||||
self.sendLine("Show the full list of available commands!")
|
||||
self.show_prompt()
|
||||
return
|
||||
self.sendLine("Can't help you with '%s'. Sorry!" % args[0])
|
||||
self.show_prompt()
|
||||
return
|
||||
elif command == "help-all":
|
||||
self.show_full_help()
|
||||
self.show_prompt()
|
||||
return
|
||||
if command in self.command_handlers:
|
||||
command_handler = self.command_handlers[command]
|
||||
else:
|
||||
candidates = [k for k in self.command_handlers.keys() if k.startswith(command)]
|
||||
if len(candidates) == 0:
|
||||
self.sendLine("Unknown command. Type 'help' for a list of commands.")
|
||||
self.show_prompt()
|
||||
return
|
||||
if len(candidates) >= 2:
|
||||
l = "Ambiguous command. Matches: "
|
||||
for candidate in candidates:
|
||||
l += candidate
|
||||
l += ", "
|
||||
l = l[:-2]
|
||||
l += l
|
||||
self.sendLine(l)
|
||||
self.show_prompt()
|
||||
return
|
||||
else:
|
||||
command_handler = self.command_handlers[candidates[0]]
|
||||
try:
|
||||
self.current_handler = command_handler.get_handler(self)
|
||||
except:
|
||||
self.current_handler = None
|
||||
import traceback
|
||||
self.sendLine(traceback.format_exc())
|
||||
log.error(traceback.format_exc())
|
||||
self.show_prompt()
|
||||
return
|
||||
try:
|
||||
self.current_handler.start(*args)
|
||||
except TypeError:
|
||||
self.current_handler = None
|
||||
self.sendLine("Invalid arguments. Type 'help <command>' for the argument list.")
|
||||
import traceback
|
||||
log.error(traceback.format_exc())
|
||||
self.show_prompt()
|
||||
return
|
||||
except:
|
||||
self.current_handler = None
|
||||
import traceback
|
||||
self.sendLine(traceback.format_exc())
|
||||
log.error(traceback.format_exc())
|
||||
self.show_prompt()
|
||||
return
|
||||
self.current_handler.finished_deferred.addCallbacks(lambda _: self.handler_done(),
|
||||
self.handler_failed)
|
||||
else:
|
||||
try:
|
||||
self.current_handler.handle_line(line)
|
||||
except Exception as e:
|
||||
self.current_handler = None
|
||||
import traceback
|
||||
self.sendLine(traceback.format_exc())
|
||||
log.error(traceback.format_exc())
|
||||
self.show_prompt()
|
||||
return
|
File diff suppressed because it is too large
Load diff
|
@ -1,13 +0,0 @@
|
|||
from yapsy.IPlugin import IPlugin
|
||||
|
||||
|
||||
class Plugin(IPlugin):
|
||||
|
||||
def __init__(self):
|
||||
IPlugin.__init__(self)
|
||||
|
||||
def setup(self, lbry_console):
|
||||
raise NotImplementedError
|
||||
|
||||
def stop(self):
|
||||
raise NotImplementedError
|
|
@ -1,8 +0,0 @@
|
|||
"""
|
||||
A plugin-enabled console application for interacting with the LBRY network called lbrynet-console.
|
||||
|
||||
lbrynet-console can be used to download and upload LBRY Files and includes plugins for streaming
|
||||
LBRY Files to an external application and to download unknown chunks of data for the purpose of
|
||||
re-uploading them. It gives the user some control over how much will be paid for data and
|
||||
metadata and also what types of queries from clients.
|
||||
"""
|
|
@ -1,24 +0,0 @@
|
|||
from zope.interface import Interface
|
||||
|
||||
|
||||
class IControlHandlerFactory(Interface):
|
||||
def get_prompt_description(self):
|
||||
pass
|
||||
|
||||
def get_handler(self):
|
||||
pass
|
||||
|
||||
|
||||
class IControlHandler(Interface):
|
||||
def handle_line(self, line):
|
||||
pass
|
||||
|
||||
|
||||
class ICommandHandlerFactory(Interface):
|
||||
def get_handler(self):
|
||||
pass
|
||||
|
||||
|
||||
class ICommandHandler(Interface):
|
||||
def handle_line(self, line):
|
||||
pass
|
|
@ -1,15 +0,0 @@
|
|||
from zope.interface import implements
|
||||
from lbrynet.interfaces import IBlobHandler
|
||||
from twisted.internet import defer
|
||||
|
||||
|
||||
class BlindBlobHandler(object):
|
||||
implements(IBlobHandler)
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
######### IBlobHandler #########
|
||||
|
||||
def handle_blob(self, blob, blob_info):
|
||||
return defer.succeed(True)
|
|
@ -1,78 +0,0 @@
|
|||
from twisted.internet import defer
|
||||
from ValuableBlobInfo import ValuableBlobInfo
|
||||
import os
|
||||
import sqlite3
|
||||
from twisted.enterprise import adbapi
|
||||
from lbrynet.core.sqlite_helpers import rerun_if_locked
|
||||
|
||||
|
||||
class BlindInfoManager(object):
|
||||
|
||||
def __init__(self, db_dir, peer_manager):
|
||||
self.db_dir = db_dir
|
||||
self.db_conn = None
|
||||
self.peer_manager = peer_manager
|
||||
|
||||
def setup(self):
|
||||
# check_same_thread=False is solely to quiet a spurious error that appears to be due
|
||||
# to a bug in twisted, where the connection is closed by a different thread than the
|
||||
# one that opened it. The individual connections in the pool are not used in multiple
|
||||
# threads.
|
||||
self.db_conn = adbapi.ConnectionPool('sqlite3', os.path.join(self.db_dir, "blind_info.db"),
|
||||
check_same_thread=False)
|
||||
|
||||
def set_up_table(transaction):
|
||||
transaction.execute("create table if not exists valuable_blobs (" +
|
||||
" blob_hash text primary key, " +
|
||||
" blob_length integer, " +
|
||||
" reference text, " +
|
||||
" peer_host text, " +
|
||||
" peer_port integer, " +
|
||||
" peer_score text" +
|
||||
")")
|
||||
return self.db_conn.runInteraction(set_up_table)
|
||||
|
||||
def stop(self):
|
||||
self.db = None
|
||||
return defer.succeed(True)
|
||||
|
||||
def get_all_blob_infos(self):
|
||||
d = self._get_all_blob_infos()
|
||||
|
||||
def make_blob_infos(blob_data):
|
||||
blob_infos = []
|
||||
for blob in blob_data:
|
||||
blob_hash, length, reference, peer_host, peer_port, peer_score = blob
|
||||
peer = self.peer_manager.get_peer(peer_host, peer_port)
|
||||
blob_info = ValuableBlobInfo(blob_hash, length, reference, peer, peer_score)
|
||||
blob_infos.append(blob_info)
|
||||
return blob_infos
|
||||
d.addCallback(make_blob_infos)
|
||||
return d
|
||||
|
||||
def save_blob_infos(self, blob_infos):
|
||||
blobs = []
|
||||
for blob_info in blob_infos:
|
||||
blob_hash = blob_info.blob_hash
|
||||
length = blob_info.length
|
||||
reference = blob_info.reference
|
||||
peer_host = blob_info.peer.host
|
||||
peer_port = blob_info.peer.port
|
||||
peer_score = blob_info.peer_score
|
||||
blobs.append((blob_hash, length, reference, peer_host, peer_port, peer_score))
|
||||
return self._save_blob_infos(blobs)
|
||||
|
||||
@rerun_if_locked
|
||||
def _get_all_blob_infos(self):
|
||||
return self.db_conn.runQuery("select * from valuable_blobs")
|
||||
|
||||
@rerun_if_locked
|
||||
def _save_blob_infos(self, blobs):
|
||||
def save_infos(transaction):
|
||||
for blob in blobs:
|
||||
try:
|
||||
transaction.execute("insert into valuable_blobs values (?, ?, ?, ?, ?, ?)",
|
||||
blob)
|
||||
except sqlite3.IntegrityError:
|
||||
pass
|
||||
return self.db_conn.runInteraction(save_infos)
|
|
@ -1,321 +0,0 @@
|
|||
from zope.interface import implements
|
||||
from lbrynet.interfaces import IMetadataHandler, IRequestCreator
|
||||
from lbrynet.core.client.ClientRequest import ClientRequest, ClientPaidRequest
|
||||
from lbrynet.core.Error import InsufficientFundsError, InvalidResponseError, RequestCanceledError
|
||||
from lbrynet.core.Error import NoResponseError, ConnectionClosedBeforeResponseError
|
||||
from ValuableBlobInfo import ValuableBlobInfo
|
||||
import datetime
|
||||
import logging
|
||||
import random
|
||||
from twisted.internet import defer
|
||||
from twisted.python.failure import Failure
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlindMetadataHandler(object):
|
||||
implements(IMetadataHandler, IRequestCreator)
|
||||
|
||||
def __init__(self, info_manager, peers, peer_finder, approved_peers, payment_rate_manager, wallet,
|
||||
download_manager):
|
||||
self.info_manager = info_manager
|
||||
self.payment_rate_manager = payment_rate_manager
|
||||
self.wallet = wallet
|
||||
self.download_manager = download_manager
|
||||
self._peers = peers # {Peer: score}
|
||||
self.peer_finder = peer_finder
|
||||
self.approved_peers = approved_peers
|
||||
self._valuable_protocol_prices = {}
|
||||
self._info_protocol_prices = {}
|
||||
self._price_disagreements = [] # [Peer]
|
||||
self._incompatible_peers = [] # [Peer]
|
||||
self._last_blob_hashes_from_peers = {} # {Peer: (blob_hash, expire_time)}
|
||||
self._valuable_hashes = {} # {blob_hash: (peer score, reference, peer)}
|
||||
self._blob_infos = {} # {blob_hash: ValuableBlobInfo}
|
||||
self._peer_search_results = defaultdict(list) # {peer: [blob_hash]}
|
||||
|
||||
######### IMetadataHandler #########
|
||||
|
||||
def get_initial_blobs(self):
|
||||
d = self.info_manager.get_all_blob_infos()
|
||||
return d
|
||||
|
||||
def final_blob_num(self):
|
||||
return None
|
||||
|
||||
######### IRequestCreator #########
|
||||
|
||||
def send_next_request(self, peer, protocol):
|
||||
# Basic idea:
|
||||
# If the peer has been sending us blob hashes to download recently (10 minutes?),
|
||||
# send back an example of one (the most recent?) so that it can
|
||||
# keep sending us more like it. Otherwise, just ask for
|
||||
# valuable blobs
|
||||
sent_request = False
|
||||
if self._should_send_request_to(peer):
|
||||
v_r = self._get_valuable_blob_request(peer)
|
||||
if v_r is not None:
|
||||
v_p_r = self._get_valuable_price_request(peer, protocol)
|
||||
reserved_points = self._reserve_points_valuable(peer, protocol, v_r.max_pay_units)
|
||||
if reserved_points is not None:
|
||||
d1 = protocol.add_request(v_r)
|
||||
d1.addCallback(self._handle_valuable_blob_response, peer, v_r)
|
||||
d1.addBoth(self._pay_or_cancel_payment, protocol, reserved_points,
|
||||
self._info_protocol_prices)
|
||||
d1.addErrback(self._request_failed, "valuable blob request", peer)
|
||||
sent_request = True
|
||||
if v_p_r is not None:
|
||||
d2 = protocol.add_request(v_p_r)
|
||||
d2.addCallback(self._handle_valuable_price_response, peer, v_p_r, protocol)
|
||||
d2.addErrback(self._request_failed, "valuable price request", peer)
|
||||
else:
|
||||
return defer.fail(InsufficientFundsError())
|
||||
i_r = self._get_info_request(peer)
|
||||
if i_r is not None:
|
||||
i_p_r = self._get_info_price_request(peer, protocol)
|
||||
reserved_points = self._reserve_points_info(peer, protocol, i_r.max_pay_units)
|
||||
if reserved_points is not None:
|
||||
d3 = protocol.add_request(i_r)
|
||||
d3.addCallback(self._handle_info_response, peer, i_r, protocol, reserved_points)
|
||||
d3.addBoth(self._pay_or_cancel_payment, protocol, reserved_points,
|
||||
self._valuable_protocol_prices)
|
||||
d3.addErrback(self._request_failed, "info request", peer, reserved_points)
|
||||
sent_request = True
|
||||
if i_p_r is not None:
|
||||
d4 = protocol.add_request(i_p_r)
|
||||
d4.addCallback(self._handle_info_price_response, peer, i_p_r, protocol)
|
||||
d4.addErrback(self._request_failed, "info price request", peer)
|
||||
else:
|
||||
return defer.fail(InsufficientFundsError())
|
||||
return defer.succeed(sent_request)
|
||||
|
||||
def get_new_peers(self):
|
||||
peers = None
|
||||
if self._peer_search_results:
|
||||
peers = self._peer_search_results.keys()
|
||||
elif len(self.approved_peers) != 0:
|
||||
peers = random.sample(self.approved_peers, len(self.approved_peers))
|
||||
return defer.succeed(peers)
|
||||
|
||||
######### internal #########
|
||||
|
||||
def _should_send_request_to(self, peer):
|
||||
if peer in self._incompatible_peers:
|
||||
return False
|
||||
if self._peers[peer] >= 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_valuable_blob_request(self, peer):
|
||||
blob_hash = None
|
||||
if peer in self._last_blob_hashes_from_peers:
|
||||
h, expire_time = self._last_blob_hashes_from_peers[peer]
|
||||
if datetime.datetime.now() > expire_time:
|
||||
del self._last_blob_hashes_from_peers[peer]
|
||||
else:
|
||||
blob_hash = h
|
||||
r_dict = {'valuable_blob_hashes': {'reference': blob_hash, 'max_blob_hashes': 20}}
|
||||
response_identifier = 'valuable_blob_hashes'
|
||||
request = ClientPaidRequest(r_dict, response_identifier, 20)
|
||||
return request
|
||||
|
||||
def _get_valuable_price_request(self, peer, protocol):
|
||||
request = None
|
||||
if not protocol in self._valuable_protocol_prices:
|
||||
self._valuable_protocol_prices[protocol] = self.payment_rate_manager.get_rate_valuable_blob_hash(peer)
|
||||
request_dict = {'valuable_blob_payment_rate': self._valuable_protocol_prices[protocol]}
|
||||
request = ClientRequest(request_dict, 'valuable_blob_payment_rate')
|
||||
return request
|
||||
|
||||
def _get_info_request(self, peer):
|
||||
if peer in self._peer_search_results:
|
||||
blob_hashes = self._peer_search_results[peer]
|
||||
del self._peer_search_results[peer]
|
||||
references = []
|
||||
for blob_hash in blob_hashes:
|
||||
if blob_hash in self._valuable_hashes:
|
||||
references.append(self._valuable_hashes[blob_hash][1])
|
||||
hashes_to_search = [h for h, (s, r, p) in self._valuable_hashes.iteritems() if r in references]
|
||||
if hashes_to_search:
|
||||
r_dict = {'blob_length': {'blob_hashes': hashes_to_search}}
|
||||
response_identifier = 'blob_length'
|
||||
request = ClientPaidRequest(r_dict, response_identifier, len(hashes_to_search))
|
||||
return request
|
||||
if not self._peer_search_results:
|
||||
self._search_for_peers()
|
||||
return None
|
||||
|
||||
def _get_info_price_request(self, peer, protocol):
|
||||
request = None
|
||||
if not protocol in self._info_protocol_prices:
|
||||
self._info_protocol_prices[protocol] = self.payment_rate_manager.get_rate_valuable_blob_info(peer)
|
||||
request_dict = {'blob_length_payment_rate': self._info_protocol_prices[protocol]}
|
||||
request = ClientRequest(request_dict, 'blob_length_payment_rate')
|
||||
return request
|
||||
|
||||
def _update_local_score(self, peer, amount):
|
||||
self._peers[peer] += amount
|
||||
|
||||
def _reserve_points_valuable(self, peer, protocol, max_units):
|
||||
return self._reserve_points(peer, protocol, max_units, self._valuable_protocol_prices)
|
||||
|
||||
def _reserve_points_info(self, peer, protocol, max_units):
|
||||
return self._reserve_points(peer, protocol, max_units, self._info_protocol_prices)
|
||||
|
||||
def _reserve_points(self, peer, protocol, max_units, prices):
|
||||
assert protocol in prices
|
||||
points_to_reserve = 1.0 * max_units * prices[protocol] / 1000.0
|
||||
return self.wallet.reserve_points(peer, points_to_reserve)
|
||||
|
||||
def _pay_or_cancel_payment(self, arg, protocol, reserved_points, protocol_prices):
|
||||
if isinstance(arg, Failure) or arg == 0:
|
||||
self._cancel_points(reserved_points)
|
||||
else:
|
||||
self._pay_peer(protocol, arg, reserved_points, protocol_prices)
|
||||
return arg
|
||||
|
||||
def _pay_peer(self, protocol, num_units, reserved_points, prices):
|
||||
assert num_units != 0
|
||||
assert protocol in prices
|
||||
point_amount = 1.0 * num_units * prices[protocol] / 1000.0
|
||||
self.wallet.send_points(reserved_points, point_amount)
|
||||
|
||||
def _cancel_points(self, reserved_points):
|
||||
self.wallet.cancel_point_reservation(reserved_points)
|
||||
|
||||
def _handle_valuable_blob_response(self, response_dict, peer, request):
|
||||
if not request.response_identifier in response_dict:
|
||||
return InvalidResponseError("response identifier not in response")
|
||||
response = response_dict[request.response_identifier]
|
||||
if 'error' in response:
|
||||
if response['error'] == "RATE_UNSET":
|
||||
return 0
|
||||
else:
|
||||
return InvalidResponseError("Got an unknown error from the peer: %s" %
|
||||
(response['error'],))
|
||||
if not 'valuable_blob_hashes' in response:
|
||||
return InvalidResponseError("Missing the required field 'valuable_blob_hashes'")
|
||||
hashes = response['valuable_blob_hashes']
|
||||
log.info("Handling %s valuable blob hashes from %s", str(len(hashes)), str(peer))
|
||||
expire_time = datetime.datetime.now() + datetime.timedelta(minutes=10)
|
||||
reference = None
|
||||
unique_hashes = set()
|
||||
if 'reference' in response:
|
||||
reference = response['reference']
|
||||
for blob_hash, peer_score in hashes:
|
||||
if reference is None:
|
||||
reference = blob_hash
|
||||
self._last_blob_hashes_from_peers[peer] = (blob_hash, expire_time)
|
||||
if not (blob_hash in self._valuable_hashes or blob_hash in self._blob_infos):
|
||||
self._valuable_hashes[blob_hash] = (peer_score, reference, peer)
|
||||
unique_hashes.add(blob_hash)
|
||||
|
||||
if len(unique_hashes):
|
||||
self._update_local_score(peer, len(unique_hashes))
|
||||
peer.update_stats('downloaded_valuable_blob_hashes', len(unique_hashes))
|
||||
peer.update_score(len(unique_hashes))
|
||||
else:
|
||||
self._update_local_score(peer, -.0001)
|
||||
return len(unique_hashes)
|
||||
|
||||
def _handle_info_response(self, response_dict, peer, request):
|
||||
if not request.response_identifier in response_dict:
|
||||
return InvalidResponseError("response identifier not in response")
|
||||
response = response_dict[request.response_identifier]
|
||||
if 'error' in response:
|
||||
if response['error'] == 'RATE_UNSET':
|
||||
return 0
|
||||
else:
|
||||
return InvalidResponseError("Got an unknown error from the peer: %s" %
|
||||
(response['error'],))
|
||||
if not 'blob_lengths' in response:
|
||||
return InvalidResponseError("Missing the required field 'blob_lengths'")
|
||||
raw_blob_lengths = response['blob_lengths']
|
||||
log.info("Handling %s blob lengths from %s", str(len(raw_blob_lengths)), str(peer))
|
||||
log.debug("blobs: %s", str(raw_blob_lengths))
|
||||
infos = []
|
||||
unique_hashes = set()
|
||||
for blob_hash, length in raw_blob_lengths:
|
||||
if blob_hash in self._valuable_hashes:
|
||||
peer_score, reference, peer = self._valuable_hashes[blob_hash]
|
||||
del self._valuable_hashes[blob_hash]
|
||||
infos.append(ValuableBlobInfo(blob_hash, length, reference, peer, peer_score))
|
||||
unique_hashes.add(blob_hash)
|
||||
elif blob_hash in request.request_dict['blob_length']['blob_hashes']:
|
||||
unique_hashes.add(blob_hash)
|
||||
d = self.info_manager.save_blob_infos(infos)
|
||||
d.addCallback(lambda _: self.download_manager.add_blobs_to_download(infos))
|
||||
|
||||
def pay_or_penalize_peer():
|
||||
if len(unique_hashes):
|
||||
self._update_local_score(peer, len(unique_hashes))
|
||||
peer.update_stats('downloaded_valuable_blob_infos', len(unique_hashes))
|
||||
peer.update_score(len(unique_hashes))
|
||||
else:
|
||||
self._update_local_score(peer, -.0001)
|
||||
return len(unique_hashes)
|
||||
|
||||
d.addCallback(lambda _: pay_or_penalize_peer())
|
||||
|
||||
return d
|
||||
|
||||
def _handle_valuable_price_response(self, response_dict, peer, request, protocol):
|
||||
if not request.response_identifier in response_dict:
|
||||
return InvalidResponseError("response identifier not in response")
|
||||
assert protocol in self._valuable_protocol_prices
|
||||
response = response_dict[request.response_identifier]
|
||||
if response == "RATE_ACCEPTED":
|
||||
return True
|
||||
else:
|
||||
del self._valuable_protocol_prices[protocol]
|
||||
self._price_disagreements.append(peer)
|
||||
return True
|
||||
|
||||
def _handle_info_price_response(self, response_dict, peer, request, protocol):
|
||||
if not request.response_identifier in response_dict:
|
||||
return InvalidResponseError("response identifier not in response")
|
||||
assert protocol in self._info_protocol_prices
|
||||
response = response_dict[request.response_identifier]
|
||||
if response == "RATE_ACCEPTED":
|
||||
return True
|
||||
else:
|
||||
del self._info_protocol_prices[protocol]
|
||||
self._price_disagreements.append(peer)
|
||||
return True
|
||||
|
||||
def _request_failed(self, reason, request_type, peer):
|
||||
if reason.check(RequestCanceledError):
|
||||
return
|
||||
if reason.check(NoResponseError):
|
||||
self._incompatible_peers.append(peer)
|
||||
log.warning("Valuable blob info requester: a request of type %s has failed. Reason: %s",
|
||||
str(request_type), str(reason.getErrorMessage()))
|
||||
self._update_local_score(peer, -10.0)
|
||||
peer.update_score(-5.0)
|
||||
if reason.check(ConnectionClosedBeforeResponseError):
|
||||
return
|
||||
return reason
|
||||
|
||||
def _search_for_peers(self):
|
||||
references_with_sources = set()
|
||||
for h_list in self._peer_search_results.itervalues():
|
||||
for h in h_list:
|
||||
if h in self._valuable_hashes:
|
||||
references_with_sources.add(self._valuable_hashes[h][1])
|
||||
hash_to_search = None
|
||||
used_references = []
|
||||
for h, (s, r, p) in self._valuable_hashes.iteritems():
|
||||
if not r in used_references:
|
||||
used_references.append(r)
|
||||
hash_to_search = h
|
||||
if not r in references_with_sources:
|
||||
break
|
||||
if hash_to_search:
|
||||
d = self.peer_finder.find_peers_for_blob(hash_to_search)
|
||||
d.addCallback(self._set_peer_search_results, hash_to_search)
|
||||
|
||||
def _set_peer_search_results(self, peers, searched_hash):
|
||||
for peer in peers:
|
||||
self._peer_search_results[peer].append(searched_hash)
|
|
@ -1,93 +0,0 @@
|
|||
from zope.interface import implements
|
||||
from lbrynet.interfaces import IProgressManager
|
||||
from twisted.internet import defer
|
||||
|
||||
|
||||
class BlindProgressManager(object):
|
||||
implements(IProgressManager)
|
||||
|
||||
def __init__(self, blob_manager, peers, max_space, blob_scorers, download_manager):
|
||||
self.blob_manager = blob_manager
|
||||
self.peers = peers
|
||||
self.max_space = max_space
|
||||
self.blob_scorers = blob_scorers
|
||||
self.download_manager = download_manager
|
||||
self.paused = True
|
||||
self.blobs_to_download = []
|
||||
self._next_manage_downloaded_blobs = None
|
||||
|
||||
def set_max_space(self, max_space):
|
||||
self.max_space = max_space
|
||||
|
||||
######### IProgressManager #########
|
||||
|
||||
def start(self):
|
||||
from twisted.internet import reactor
|
||||
|
||||
self.paused = False
|
||||
self._next_manage_downloaded_blobs = reactor.callLater(0, self._manage_downloaded_blobs)
|
||||
return defer.succeed(True)
|
||||
|
||||
def stop(self):
|
||||
self.paused = True
|
||||
if self._next_manage_downloaded_blobs is not None and self._next_manage_downloaded_blobs.active():
|
||||
self._next_manage_downloaded_blobs.cancel()
|
||||
self._next_manage_downloaded_blobs = None
|
||||
return defer.succeed(True)
|
||||
|
||||
def stream_position(self):
|
||||
return 0
|
||||
|
||||
def needed_blobs(self):
|
||||
needed_blobs = [b for b in self.blobs_to_download if not b.is_validated()]
|
||||
return sorted(needed_blobs, key=lambda x: x.is_downloading(), reverse=True)[:20]
|
||||
|
||||
######### internal #########
|
||||
|
||||
def _manage_downloaded_blobs(self):
|
||||
|
||||
self._next_manage_downloaded_blobs = None
|
||||
|
||||
from twisted.internet import reactor
|
||||
|
||||
blobs = self.download_manager.blobs
|
||||
blob_infos = self.download_manager.blob_infos
|
||||
|
||||
blob_hashes = [b.blob_hash for b in blobs]
|
||||
|
||||
blobs_to_score = [(blobs[blob_hash], blob_infos[blob_hash]) for blob_hash in blob_hashes]
|
||||
|
||||
scores = self._score_blobs(blobs_to_score)
|
||||
|
||||
from future_builtins import zip
|
||||
|
||||
scored_blobs = zip(blobs_to_score, scores)
|
||||
ranked_blobs = sorted(scored_blobs, key=lambda x: x[1], reverse=True)
|
||||
|
||||
space_so_far = 0
|
||||
blobs_to_delete = []
|
||||
blobs_to_download = []
|
||||
|
||||
for (blob, blob_info), score in ranked_blobs:
|
||||
space_so_far += blob.blob_length
|
||||
if blob.is_validated() and space_so_far >= self.max_space:
|
||||
blobs_to_delete.append(blob)
|
||||
elif not blob.is_validated() and space_so_far < self.max_space:
|
||||
blobs_to_download.append(blob)
|
||||
|
||||
self.blob_manager.delete_blobs(blobs_to_delete)
|
||||
self.blobs_to_download = blobs_to_download
|
||||
|
||||
self._next_manage_downloaded_blobs = reactor.callLater(30, self._manage_downloaded_blobs)
|
||||
|
||||
def _score_blobs(self, blobs):
|
||||
scores = []
|
||||
for blob, blob_info in blobs:
|
||||
summands = []
|
||||
multiplicands = []
|
||||
for blob_scorer in self.blob_scorers:
|
||||
s, m = blob_scorer.score_blob(blob, blob_info)
|
||||
summands.append(s)
|
||||
multiplicands.append(m)
|
||||
scores.append(1.0 * sum(summands) * reduce(lambda x, y: x * y, multiplicands, 1))
|
||||
return scores
|
|
@ -1,113 +0,0 @@
|
|||
from twisted.internet import defer
|
||||
from twisted.python.failure import Failure
|
||||
from lbrynet.core.client.BlobRequester import BlobRequester
|
||||
from lbrynet.core.client.ConnectionManager import ConnectionManager
|
||||
from lbrynet.core.client.DownloadManager import DownloadManager
|
||||
from BlindMetadataHandler import BlindMetadataHandler
|
||||
from BlindProgressManager import BlindProgressManager
|
||||
from BlindBlobHandler import BlindBlobHandler
|
||||
from collections import defaultdict
|
||||
from interfaces import IBlobScorer
|
||||
from zope.interface import implements
|
||||
|
||||
|
||||
class PeerScoreBasedScorer(object):
|
||||
implements(IBlobScorer)
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def score_blob(self, blob, blob_info):
|
||||
return blob_info.peer_score, 1
|
||||
|
||||
|
||||
class LengthBasedScorer(object):
|
||||
implements(IBlobScorer)
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def score_blob(self, blob, blob_info):
|
||||
return 0, 1.0 * blob.get_length() / 2**21
|
||||
|
||||
|
||||
class BlindRepeater(object):
|
||||
def __init__(self, peer_finder, rate_limiter, blob_manager, info_manager, wallet, payment_rate_manager):
|
||||
self.peer_finder = peer_finder
|
||||
self.rate_limiter = rate_limiter
|
||||
self.blob_manager = blob_manager
|
||||
self.info_manager = info_manager
|
||||
self.wallet = wallet
|
||||
self.payment_rate_manager = payment_rate_manager
|
||||
self.download_manager = None
|
||||
self.progress_manager = None
|
||||
self.max_space = 0
|
||||
self.peers = defaultdict(int)
|
||||
self.approved_peers = set()
|
||||
self.stopped = True
|
||||
|
||||
def setup(self):
|
||||
return defer.succeed(True)
|
||||
|
||||
def start(self):
|
||||
if self.stopped is True:
|
||||
return self._start()
|
||||
else:
|
||||
return defer.fail(Failure(ValueError("The repeater is already running")))
|
||||
|
||||
def stop(self):
|
||||
if self.stopped is False:
|
||||
return self._stop()
|
||||
else:
|
||||
return defer.fail(Failure(ValueError("The repeater is not running")))
|
||||
|
||||
def status(self):
|
||||
if self.stopped is True:
|
||||
return "stopped"
|
||||
else:
|
||||
return "running"
|
||||
|
||||
def set_max_space(self, max_space):
|
||||
self.max_space = max_space
|
||||
if self.progress_manager is not None:
|
||||
self.progress_manager.set_max_space(self.max_space)
|
||||
|
||||
def add_approved_peer(self, peer):
|
||||
self.approved_peers.add(peer)
|
||||
|
||||
def remove_approved_peer(self, peer):
|
||||
self.approved_peers.remove(peer)
|
||||
|
||||
def _start(self):
|
||||
self.download_manager = DownloadManager(self.blob_manager, True)
|
||||
info_finder = BlindMetadataHandler(self.info_manager, self.peers, self.peer_finder,
|
||||
self.approved_peers, self.payment_rate_manager,
|
||||
self.wallet, self.download_manager)
|
||||
self.download_manager.blob_info_finder = info_finder
|
||||
blob_requester = BlobRequester(self.blob_manager, self.peer_finder, self.payment_rate_manager,
|
||||
self.wallet, self.download_manager)
|
||||
self.download_manager.blob_requester = blob_requester
|
||||
self.progress_manager = BlindProgressManager(self.blob_manager, self.peers, self.max_space,
|
||||
[PeerScoreBasedScorer(), LengthBasedScorer()],
|
||||
self.download_manager)
|
||||
self.download_manager.progress_manager = self.progress_manager
|
||||
self.download_manager.blob_handler = BlindBlobHandler()
|
||||
wallet_info_exchanger = self.wallet.get_info_exchanger()
|
||||
self.download_manager.wallet_info_exchanger = wallet_info_exchanger
|
||||
connection_manager = ConnectionManager(self, self.rate_limiter, [info_finder, blob_requester],
|
||||
[wallet_info_exchanger])
|
||||
self.download_manager.connection_manager = connection_manager
|
||||
d = defer.maybeDeferred(self.download_manager.start_downloading)
|
||||
d.addCallback(lambda _: self._update_status(stopped=False))
|
||||
return d
|
||||
|
||||
def _stop(self):
|
||||
d = defer.maybeDeferred(self.download_manager.stop_downloading)
|
||||
d.addCallback(lambda _: self._update_status(stopped=True))
|
||||
return d
|
||||
|
||||
def _update_status(self, stopped=True):
|
||||
self.stopped = stopped
|
||||
|
||||
def insufficient_funds(self, err):
|
||||
return self.stop()
|
|
@ -1,335 +0,0 @@
|
|||
from lbrynet.lbrynet_console.ControlHandlers import CommandHandler, CommandHandlerFactory
|
||||
from lbrynet.lbrynet_console.ControlHandlers import RecursiveCommandHandler, ModifyPaymentRate
|
||||
from twisted.internet import defer
|
||||
|
||||
|
||||
class StartRepeater(CommandHandler):
|
||||
prompt_description = "Start the blind repeater"
|
||||
|
||||
def __init__(self, console, repeater, settings):
|
||||
CommandHandler.__init__(self, console)
|
||||
self.repeater = repeater
|
||||
self.settings = settings
|
||||
|
||||
def start(self):
|
||||
#assert line is None, "Start repeater should not be passed any arguments"
|
||||
d = self.settings.save_repeater_status(running=True)
|
||||
d.addCallback(lambda _: self.repeater.start())
|
||||
d.addCallback(lambda _: self.console.sendLine("Started the repeater"))
|
||||
d.chainDeferred(self.finished_deferred)
|
||||
|
||||
|
||||
class StartRepeaterFactory(CommandHandlerFactory):
|
||||
control_handler_class = StartRepeater
|
||||
|
||||
|
||||
class StopRepeater(CommandHandler):
|
||||
prompt_description = "Stop the blind repeater"
|
||||
|
||||
def __init__(self, console, repeater, settings):
|
||||
CommandHandler.__init__(self, console)
|
||||
self.repeater = repeater
|
||||
self.settings = settings
|
||||
|
||||
def start(self):
|
||||
#assert line is None, "Stop repeater should not be passed any arguments"
|
||||
d = self.settings.save_repeater_status(running=False)
|
||||
d.addCallback(lambda _: self.repeater.stop())
|
||||
d.addCallback(lambda _: self.console.sendLine("Stopped the repeater"))
|
||||
d.chainDeferred(self.finished_deferred)
|
||||
|
||||
|
||||
class StopRepeaterFactory(CommandHandlerFactory):
|
||||
control_handler_class = StopRepeater
|
||||
|
||||
|
||||
class UpdateMaxSpace(CommandHandler):
|
||||
prompt_description = "Set the maximum space to be used by the blind repeater"
|
||||
line_prompt = "Maximum space (in bytes):"
|
||||
|
||||
def __init__(self, console, repeater, settings):
|
||||
CommandHandler.__init__(self, console)
|
||||
self.repeater = repeater
|
||||
self.settings = settings
|
||||
|
||||
def start(self):
|
||||
self.console.sendLine(self.line_prompt)
|
||||
|
||||
def handle_line(self, line):
|
||||
d = self._set_max_space(line)
|
||||
d.chainDeferred(self.finished_deferred)
|
||||
|
||||
def _set_max_space(self, line):
|
||||
max_space = int(line)
|
||||
d = self.settings.save_max_space(max_space)
|
||||
d.addCallback(lambda _: self.repeater.set_max_space(max_space))
|
||||
d.addCallback(lambda _: self.console.sendLine("Set the maximum space to " + str(max_space) + " bytes"))
|
||||
return d
|
||||
|
||||
|
||||
class UpdateMaxSpaceFactory(CommandHandlerFactory):
|
||||
control_handler_class = UpdateMaxSpace
|
||||
|
||||
|
||||
class AddApprovedPeer(CommandHandler):
|
||||
prompt_description = "Add a peer to the approved list of peers to check for valuable blob hashes"
|
||||
host_prompt = "Peer host in dotted quad (e.g. 127.0.0.1)"
|
||||
port_prompt = "Peer port (e.g. 4444)"
|
||||
|
||||
def __init__(self, console, repeater, peer_manager, settings):
|
||||
CommandHandler.__init__(self, console)
|
||||
self.repeater = repeater
|
||||
self.peer_manager = peer_manager
|
||||
self.settings = settings
|
||||
self.host_to_add = None
|
||||
|
||||
def start(self):
|
||||
self.console.sendLine(self.host_prompt)
|
||||
|
||||
def handle_line(self, line):
|
||||
#if line is None:
|
||||
# return False, defer.succeed(self.host_prompt)
|
||||
if self.host_to_add is None:
|
||||
self.host_to_add = line
|
||||
self.console.sendLine(self.port_prompt)
|
||||
else:
|
||||
self.host_to_add, host = None, self.host_to_add
|
||||
d = self._add_peer(host, line)
|
||||
d.chainDeferred(self.finished_deferred)
|
||||
|
||||
def _add_peer(self, host, port):
|
||||
peer = self.peer_manager.get_peer(host, int(port))
|
||||
d = self.settings.save_approved_peer(host, int(port))
|
||||
d.addCallback(lambda _: self.repeater.add_approved_peer(peer))
|
||||
d.addCallback(lambda _: self.console.sendLine("Successfully added peer"))
|
||||
return d
|
||||
|
||||
|
||||
class AddApprovedPeerFactory(CommandHandlerFactory):
|
||||
control_handler_class = AddApprovedPeer
|
||||
|
||||
|
||||
class ApprovedPeerChooser(RecursiveCommandHandler):
|
||||
|
||||
def __init__(self, console, repeater, factory_class, *args, **kwargs):
|
||||
self.repeater = repeater
|
||||
self.factory_class = factory_class
|
||||
self.args = args
|
||||
RecursiveCommandHandler.__init__(self, console, **kwargs)
|
||||
|
||||
def _get_control_handler_factories(self):
|
||||
control_handler_factories = []
|
||||
for peer in self.repeater.approved_peers:
|
||||
control_handler_factories.append(self.factory_class(peer, *self.args))
|
||||
return control_handler_factories
|
||||
|
||||
|
||||
class ApprovedPeerChooserFactory(CommandHandlerFactory):
|
||||
def get_prompt_description(self):
|
||||
peer = self.args[0]
|
||||
return str(peer)
|
||||
|
||||
|
||||
class DeleteApprovedPeerChooser(ApprovedPeerChooser):
|
||||
prompt_description = "Remove a peer from the approved list of peers to check for valuable blob hashes"
|
||||
|
||||
def __init__(self, console, repeater, settings):
|
||||
ApprovedPeerChooser.__init__(self, console, repeater, DeleteApprovedPeerFactory, repeater,
|
||||
settings, exit_after_one_done=True)
|
||||
|
||||
|
||||
class DeleteApprovedPeerChooserFactory(CommandHandlerFactory):
|
||||
control_handler_class = DeleteApprovedPeerChooser
|
||||
|
||||
|
||||
class DeleteApprovedPeer(CommandHandler):
|
||||
prompt_description = "Remove a peer from the approved list of peers to check for valuable blob hashes"
|
||||
|
||||
def __init__(self, console, peer, repeater, settings):
|
||||
CommandHandler.__init__(self, console)
|
||||
self.repeater = repeater
|
||||
self.settings = settings
|
||||
self.peer_to_remove = peer
|
||||
|
||||
def start(self):
|
||||
d = self._remove_peer()
|
||||
d.chainDeferred(self.finished_deferred)
|
||||
|
||||
def _remove_peer(self):
|
||||
d = self.settings.remove_approved_peer(self.peer_to_remove.host, int(self.peer_to_remove.port))
|
||||
d.addCallback(lambda _: self.repeater.remove_approved_peer(self.peer_to_remove))
|
||||
d.addCallback(lambda _: self.console.sendLine("Successfully removed peer"))
|
||||
return d
|
||||
|
||||
|
||||
class DeleteApprovedPeerFactory(ApprovedPeerChooserFactory):
|
||||
control_handler_class = DeleteApprovedPeer
|
||||
|
||||
|
||||
class ShowApprovedPeers(CommandHandler):
|
||||
prompt_description = "Show the list of peers approved to be checked for valuable blob hashes"
|
||||
|
||||
def __init__(self, console, repeater):
|
||||
CommandHandler.__init__(self, console)
|
||||
self.repeater = repeater
|
||||
|
||||
def start(self):
|
||||
#assert line is None, "Show approved peers should not be passed any arguments"
|
||||
d = self._show_peers()
|
||||
d.chainDeferred(self.finished_deferred)
|
||||
|
||||
def _show_peers(self):
|
||||
peer_string = "Approved peers:\n"
|
||||
for peer in self.repeater.approved_peers:
|
||||
peer_string += str(peer) + "\n"
|
||||
self.console.sendLine(peer_string)
|
||||
return defer.succeed(None)
|
||||
|
||||
|
||||
class ShowApprovedPeersFactory(CommandHandlerFactory):
|
||||
control_handler_class = ShowApprovedPeers
|
||||
|
||||
|
||||
class RepeaterStatus(CommandHandler):
|
||||
prompt_description = "Show the repeater's status"
|
||||
|
||||
def __init__(self, console, repeater):
|
||||
CommandHandler.__init__(self, console)
|
||||
self.repeater = repeater
|
||||
|
||||
def start(self):
|
||||
#assert line is None, "Show repeater status should not be passed any arguments"
|
||||
self._show_status()
|
||||
self.finished_deferred.callback(None)
|
||||
|
||||
def _show_status(self):
|
||||
status_string = "Repeater status: " + self.repeater.status() + "\n"
|
||||
|
||||
if self.repeater.stopped is False:
|
||||
max_space = self.repeater.progress_manager.max_space
|
||||
space_used = 0
|
||||
for blob in self.repeater.download_manager.blobs:
|
||||
if blob.is_validated():
|
||||
space_used += blob.get_length()
|
||||
|
||||
status_string += "Maximum space: " + str(max_space) + " bytes\n"
|
||||
status_string += "Space used: " + str(space_used) + " bytes\n"
|
||||
self.console.sendLine(status_string)
|
||||
|
||||
|
||||
class RepeaterStatusFactory(CommandHandlerFactory):
|
||||
control_handler_class = RepeaterStatus
|
||||
|
||||
|
||||
class ModifyDataPaymentRate(ModifyPaymentRate):
|
||||
prompt_description = "Modify Blind Repeater data payment rate"
|
||||
|
||||
def __init__(self, console, repeater, settings):
|
||||
ModifyPaymentRate.__init__(self, console)
|
||||
self._prompt_choices['unset'] = (self._unset, "Use the application default data rate")
|
||||
self.payment_rate_manager = repeater.payment_rate_manager
|
||||
self.settings = settings
|
||||
|
||||
def _unset(self):
|
||||
self._set_rate(None)
|
||||
return defer.succeed("Using the application default data rate")
|
||||
|
||||
def _set_rate(self, rate):
|
||||
|
||||
def set_data_payment_rate(rate):
|
||||
self.payment_rate_manager.min_blob_data_payment_rate = rate
|
||||
|
||||
d = self.settings.save_data_payment_rate(rate)
|
||||
d.addCallback(lambda _: set_data_payment_rate(rate))
|
||||
return d
|
||||
|
||||
def _get_current_status(self):
|
||||
effective_rate = self.payment_rate_manager.get_effective_min_blob_data_payment_rate()
|
||||
if self.payment_rate_manager.min_blob_data_payment_rate is None:
|
||||
status = "The current data payment rate is set to use the application default, "
|
||||
status += str(effective_rate)
|
||||
else:
|
||||
status = "The current data payment rate is "
|
||||
status += str(effective_rate)
|
||||
return status
|
||||
|
||||
|
||||
class ModifyDataPaymentRateFactory(CommandHandlerFactory):
|
||||
control_handler_class = ModifyDataPaymentRate
|
||||
|
||||
|
||||
class ModifyInfoPaymentRate(ModifyPaymentRate):
|
||||
prompt_description = "Modify Blind Repeater valuable info payment rate"
|
||||
|
||||
def __init__(self, console, repeater, settings):
|
||||
ModifyPaymentRate.__init__(self, console)
|
||||
self.payment_rate_manager = repeater.payment_rate_manager
|
||||
self.settings = settings
|
||||
|
||||
def _set_rate(self, rate):
|
||||
|
||||
def set_info_payment_rate(rate):
|
||||
self.payment_rate_manager.min_valuable_blob_info_payment_rate = rate
|
||||
|
||||
d = self.settings.save_valuable_info_payment_rate(rate)
|
||||
d.addCallback(lambda _: set_info_payment_rate(rate))
|
||||
return d
|
||||
|
||||
def _get_current_status(self):
|
||||
status = "The current valuable blob info payment rate is "
|
||||
status += str(self.payment_rate_manager.min_valuable_blob_info_payment_rate)
|
||||
return status
|
||||
|
||||
|
||||
class ModifyInfoPaymentRateFactory(CommandHandlerFactory):
|
||||
control_handler_class = ModifyInfoPaymentRate
|
||||
|
||||
|
||||
class ModifyHashPaymentRate(ModifyPaymentRate):
|
||||
prompt_description = "Modify Blind Repeater valuable hash payment rate"
|
||||
|
||||
def __init__(self, console, repeater, settings):
|
||||
ModifyPaymentRate.__init__(self, console)
|
||||
self.payment_rate_manager = repeater.payment_rate_manager
|
||||
self.settings = settings
|
||||
|
||||
def _set_rate(self, rate):
|
||||
|
||||
def set_hash_payment_rate(rate):
|
||||
self.payment_rate_manager.min_valuable_blob_hash_payment_rate = rate
|
||||
|
||||
d = self.settings.save_valuable_hash_payment_rate(rate)
|
||||
d.addCallback(lambda _: set_hash_payment_rate(rate))
|
||||
return d
|
||||
|
||||
def _get_current_status(self):
|
||||
status = "The current valuable blob hash payment rate is "
|
||||
status += str(self.payment_rate_manager.min_valuable_blob_hash_payment_rate)
|
||||
return status
|
||||
|
||||
|
||||
class ModifyHashPaymentRateFactory(CommandHandlerFactory):
|
||||
control_handler_class = ModifyHashPaymentRate
|
||||
|
||||
|
||||
class ModifyRepeaterOptions(RecursiveCommandHandler):
|
||||
prompt_description = "Modify Blind Repeater options"
|
||||
|
||||
def __init__(self, console, repeater, lbry_session, settings):
|
||||
self.repeater = repeater
|
||||
self.lbry_session = lbry_session
|
||||
self.settings = settings
|
||||
RecursiveCommandHandler.__init__(self, console)
|
||||
|
||||
def _get_control_handler_factories(self):
|
||||
return [ModifyDataPaymentRateFactory(self.repeater, self.settings),
|
||||
ModifyInfoPaymentRateFactory(self.repeater, self.settings),
|
||||
ModifyHashPaymentRateFactory(self.repeater, self.settings),
|
||||
UpdateMaxSpaceFactory(self.repeater, self.settings),
|
||||
AddApprovedPeerFactory(self.repeater, self.lbry_session.peer_manager, self.settings),
|
||||
DeleteApprovedPeerChooserFactory(self.repeater, self.settings),
|
||||
]
|
||||
|
||||
|
||||
class ModifyRepeaterOptionsFactory(CommandHandlerFactory):
|
||||
control_handler_class = ModifyRepeaterOptions
|
|
@ -1,106 +0,0 @@
|
|||
from twisted.internet import threads, defer
|
||||
import json
|
||||
import unqlite
|
||||
import os
|
||||
from twisted.enterprise import adbapi
|
||||
from lbrynet.core.sqlite_helpers import rerun_if_locked
|
||||
|
||||
class BlindRepeaterSettings(object):
|
||||
|
||||
def __init__(self, db_dir):
|
||||
self.db_dir = db_dir
|
||||
self.unq_db = None
|
||||
self.sql_db = None
|
||||
|
||||
def setup(self):
|
||||
self.unq_db = unqlite.UnQLite(os.path.join(self.db_dir, "blind_settings.db"))
|
||||
# check_same_thread=False is solely to quiet a spurious error that appears to be due
|
||||
# to a bug in twisted, where the connection is closed by a different thread than the
|
||||
# one that opened it. The individual connections in the pool are not used in multiple
|
||||
# threads.
|
||||
self.sql_db = adbapi.ConnectionPool('sqlite3', os.path.join(self.db_dir, "blind_peers.db"),
|
||||
check_same_thread=False)
|
||||
|
||||
return self.sql_db.runQuery("create table if not exists approved_peers (" +
|
||||
" ip_address text, " +
|
||||
" port integer" +
|
||||
")")
|
||||
|
||||
def stop(self):
|
||||
self.unq_db = None
|
||||
self.sql_db = None
|
||||
return defer.succeed(True)
|
||||
|
||||
def save_repeater_status(self, running):
|
||||
def save_status():
|
||||
self.unq_db["running"] = json.dumps(running)
|
||||
|
||||
return threads.deferToThread(save_status)
|
||||
|
||||
def get_repeater_saved_status(self):
|
||||
def get_status():
|
||||
if "running" in self.unq_db:
|
||||
return json.loads(self.unq_db['running'])
|
||||
else:
|
||||
return False
|
||||
|
||||
return threads.deferToThread(get_status)
|
||||
|
||||
def save_max_space(self, max_space):
|
||||
def save_space():
|
||||
self.unq_db['max_space'] = json.dumps(max_space)
|
||||
|
||||
return threads.deferToThread(save_space)
|
||||
|
||||
def get_saved_max_space(self):
|
||||
def get_space():
|
||||
if 'max_space' in self.unq_db:
|
||||
return json.loads(self.unq_db['max_space'])
|
||||
else:
|
||||
return 0
|
||||
|
||||
return threads.deferToThread(get_space)
|
||||
|
||||
@rerun_if_locked
|
||||
def save_approved_peer(self, host, port):
|
||||
return self.sql_db.runQuery("insert into approved_peers values (?, ?)",
|
||||
(host, port))
|
||||
|
||||
@rerun_if_locked
|
||||
def remove_approved_peer(self, host, port):
|
||||
return self.sql_db.runQuery("delete from approved_peers where ip_address = ? and port = ?",
|
||||
(host, port))
|
||||
|
||||
@rerun_if_locked
|
||||
def get_approved_peers(self):
|
||||
return self.sql_db.runQuery("select * from approved_peers")
|
||||
|
||||
def get_data_payment_rate(self):
|
||||
return threads.deferToThread(self._get_rate, "data_payment_rate")
|
||||
|
||||
def save_data_payment_rate(self, rate):
|
||||
return threads.deferToThread(self._save_rate, "data_payment_rate", rate)
|
||||
|
||||
def get_valuable_info_payment_rate(self):
|
||||
return threads.deferToThread(self._get_rate, "valuable_info_rate")
|
||||
|
||||
def save_valuable_info_payment_rate(self, rate):
|
||||
return threads.deferToThread(self._save_rate, "valuable_info_rate", rate)
|
||||
|
||||
def get_valuable_hash_payment_rate(self):
|
||||
return threads.deferToThread(self._get_rate, "valuable_hash_rate")
|
||||
|
||||
def save_valuable_hash_payment_rate(self, rate):
|
||||
return threads.deferToThread(self._save_rate, "valuable_hash_rate", rate)
|
||||
|
||||
def _get_rate(self, rate_type):
|
||||
if rate_type in self.unq_db:
|
||||
return json.loads(self.unq_db[rate_type])
|
||||
else:
|
||||
return None
|
||||
|
||||
def _save_rate(self, rate_type, rate):
|
||||
if rate is not None:
|
||||
self.unq_db[rate_type] = json.dumps(rate)
|
||||
elif rate_type in self.unq_db:
|
||||
del self.unq_db[rate_type]
|
|
@ -1,20 +0,0 @@
|
|||
from lbrynet.core.PaymentRateManager import PaymentRateManager
|
||||
|
||||
|
||||
class BlindRepeaterPaymentRateManager(PaymentRateManager):
|
||||
def __init__(self, base, valuable_info_rate, valuable_hash_rate, blob_data_rate=None):
|
||||
PaymentRateManager.__init__(self, base, blob_data_rate)
|
||||
self.min_valuable_blob_info_payment_rate = valuable_info_rate
|
||||
self.min_valuable_blob_hash_payment_rate = valuable_hash_rate
|
||||
|
||||
def get_rate_valuable_blob_info(self, peer):
|
||||
return self.min_valuable_blob_info_payment_rate
|
||||
|
||||
def accept_rate_valuable_blob_info(self, peer, payment_rate):
|
||||
return payment_rate >= self.min_valuable_blob_info_payment_rate
|
||||
|
||||
def get_rate_valuable_blob_hash(self, peer):
|
||||
return self.min_valuable_blob_hash_payment_rate
|
||||
|
||||
def accept_rate_valuable_blob_hash(self, peer, payment_rate):
|
||||
return payment_rate >= self.min_valuable_blob_hash_payment_rate
|
|
@ -1,9 +0,0 @@
|
|||
from lbrynet.core.BlobInfo import BlobInfo
|
||||
|
||||
|
||||
class ValuableBlobInfo(BlobInfo):
|
||||
def __init__(self, blob_hash, length, reference, peer, peer_score):
|
||||
BlobInfo.__init__(self, blob_hash, blob_hash, length)
|
||||
self.reference = reference
|
||||
self.peer = peer
|
||||
self.peer_score = peer_score
|
|
@ -1,202 +0,0 @@
|
|||
from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler
|
||||
from zope.interface import implements
|
||||
from twisted.internet import defer
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ValuableQueryHandler(object):
|
||||
implements(IQueryHandler)
|
||||
|
||||
def __init__(self, wallet, payment_rate_manager):
|
||||
self.wallet = wallet
|
||||
self.payment_rate_manager = payment_rate_manager
|
||||
self.peer = None
|
||||
self.payment_rate = None
|
||||
self.query_identifiers = []
|
||||
|
||||
######### IQueryHandler #########
|
||||
|
||||
def register_with_request_handler(self, request_handler, peer):
|
||||
self.peer = peer
|
||||
request_handler.register_query_handler(self, self.query_identifiers)
|
||||
|
||||
def handle_queries(self, queries):
|
||||
pass
|
||||
|
||||
|
||||
class ValuableBlobHashQueryHandlerFactory(object):
|
||||
implements(IQueryHandlerFactory)
|
||||
|
||||
def __init__(self, peer_finder, wallet, payment_rate_manager):
|
||||
self.peer_finder = peer_finder
|
||||
self.wallet = wallet
|
||||
self.payment_rate_manager = payment_rate_manager
|
||||
|
||||
######### IQueryHandlerFactory #########
|
||||
|
||||
def build_query_handler(self):
|
||||
q_h = ValuableBlobHashQueryHandler(self.wallet, self.payment_rate_manager, self.peer_finder)
|
||||
return q_h
|
||||
|
||||
def get_primary_query_identifier(self):
|
||||
return 'valuable_blob_hashes'
|
||||
|
||||
def get_description(self):
|
||||
return "Valuable Hashes - Hashes of blobs that it may be valuable to repeat"
|
||||
|
||||
|
||||
class ValuableBlobHashQueryHandler(ValuableQueryHandler):
|
||||
implements(IQueryHandler)
|
||||
|
||||
def __init__(self, wallet, payment_rate_manager, peer_finder):
|
||||
ValuableQueryHandler.__init__(self, wallet, payment_rate_manager)
|
||||
self.peer_finder = peer_finder
|
||||
self.query_identifiers = ['valuable_blob_hashes', 'valuable_blob_payment_rate']
|
||||
self.valuable_blob_hash_payment_rate = None
|
||||
self.blob_length_payment_rate = None
|
||||
|
||||
######### IQueryHandler #########
|
||||
|
||||
def handle_queries(self, queries):
|
||||
response = {}
|
||||
|
||||
def set_fields(fields):
|
||||
response.update(fields)
|
||||
|
||||
if self.query_identifiers[1] in queries:
|
||||
d = self._handle_valuable_blob_payment_rate(queries[self.query_identifiers[1]])
|
||||
d.addCallback(set_fields)
|
||||
else:
|
||||
d = defer.succeed(True)
|
||||
|
||||
if self.query_identifiers[0] in queries:
|
||||
d.addCallback(lambda _: self._handle_valuable_blob_hashes(queries[self.query_identifiers[0]]))
|
||||
d.addCallback(set_fields)
|
||||
|
||||
d.addCallback(lambda _: response)
|
||||
return d
|
||||
|
||||
######### internal #########
|
||||
|
||||
def _handle_valuable_blob_payment_rate(self, requested_payment_rate):
|
||||
if not self.payment_rate_manager.accept_rate_valuable_blob_hash(self.peer, "VALUABLE_BLOB_HASH",
|
||||
requested_payment_rate):
|
||||
r = "RATE_TOO_LOW"
|
||||
else:
|
||||
self.valuable_blob_hash_payment_rate = requested_payment_rate
|
||||
r = "RATE_ACCEPTED"
|
||||
return defer.succeed({'valuable_blob_payment_rate': r})
|
||||
|
||||
def _handle_valuable_blob_hashes(self, request):
|
||||
# TODO: eventually, look at the request and respond appropriately given the 'reference' field
|
||||
if self.valuable_blob_hash_payment_rate is not None:
|
||||
max_hashes = 20
|
||||
if 'max_blob_hashes' in request:
|
||||
max_hashes = int(request['max_blob_hash'])
|
||||
valuable_hashes = self.peer_finder.get_most_popular_blobs(max_hashes)
|
||||
hashes_and_scores = []
|
||||
for blob_hash, count in valuable_hashes:
|
||||
hashes_and_scores.append((blob_hash, 1.0 * count / 10.0))
|
||||
if len(hashes_and_scores) != 0:
|
||||
log.info("Responding to a valuable blob hashes request with %s blob hashes",
|
||||
str(len(hashes_and_scores)))
|
||||
expected_payment = 1.0 * len(hashes_and_scores) * self.valuable_blob_hash_payment_rate / 1000.0
|
||||
self.wallet.add_expected_payment(self.peer, expected_payment)
|
||||
self.peer.update_stats('uploaded_valuable_blob_hashes', len(hashes_and_scores))
|
||||
return defer.succeed({'valuable_blob_hashes': {'blob_hashes': hashes_and_scores}})
|
||||
return defer.succeed({'valuable_blob_hashes': {'error': "RATE_UNSET"}})
|
||||
|
||||
|
||||
class ValuableBlobLengthQueryHandlerFactory(object):
|
||||
implements(IQueryHandlerFactory)
|
||||
|
||||
def __init__(self, wallet, payment_rate_manager, blob_manager):
|
||||
self.blob_manager = blob_manager
|
||||
self.wallet = wallet
|
||||
self.payment_rate_manager = payment_rate_manager
|
||||
|
||||
######### IQueryHandlerFactory #########
|
||||
|
||||
def build_query_handler(self):
|
||||
q_h = ValuableBlobLengthQueryHandler(self.wallet, self.payment_rate_manager, self.blob_manager)
|
||||
return q_h
|
||||
|
||||
def get_primary_query_identifier(self):
|
||||
return 'blob_length'
|
||||
|
||||
def get_description(self):
|
||||
return "Valuable Blob Lengths - Lengths of blobs that it may be valuable to repeat"
|
||||
|
||||
|
||||
class ValuableBlobLengthQueryHandler(ValuableQueryHandler):
|
||||
|
||||
def __init__(self, wallet, payment_rate_manager, blob_manager):
|
||||
ValuableQueryHandler.__init__(self, wallet, payment_rate_manager)
|
||||
self.blob_manager = blob_manager
|
||||
self.query_identifiers = ['blob_length', 'blob_length_payment_rate']
|
||||
self.valuable_blob_hash_payment_rate = None
|
||||
self.blob_length_payment_rate = None
|
||||
|
||||
######## IQueryHandler #########
|
||||
|
||||
def handle_queries(self, queries):
|
||||
response = {}
|
||||
|
||||
def set_fields(fields):
|
||||
response.update(fields)
|
||||
|
||||
if self.query_identifiers[1] in queries:
|
||||
d = self._handle_blob_length_payment_rate(queries[self.query_identifiers[1]])
|
||||
d.addCallback(set_fields)
|
||||
else:
|
||||
d = defer.succeed(True)
|
||||
|
||||
if self.query_identifiers[0] in queries:
|
||||
d.addCallback(lambda _: self._handle_blob_length(queries[self.query_identifiers[0]]))
|
||||
d.addCallback(set_fields)
|
||||
|
||||
d.addCallback(lambda _: response)
|
||||
return d
|
||||
|
||||
######### internal #########
|
||||
|
||||
def _handle_blob_length_payment_rate(self, requested_payment_rate):
|
||||
if not self.payment_rate_manager.accept_rate_valuable_blob_info(self.peer, "VALUABLE_BLOB_INFO",
|
||||
requested_payment_rate):
|
||||
r = "RATE_TOO_LOW"
|
||||
else:
|
||||
self.blob_length_payment_rate = requested_payment_rate
|
||||
r = "RATE_ACCEPTED"
|
||||
return defer.succeed({'blob_length_payment_rate': r})
|
||||
|
||||
def _handle_blob_length(self, request):
|
||||
if self.blob_length_payment_rate is not None:
|
||||
assert 'blob_hashes' in request
|
||||
ds = []
|
||||
|
||||
def make_response_pair(length, blob_hash):
|
||||
return blob_hash, length
|
||||
|
||||
for blob_hash in request['blob_hashes']:
|
||||
d = self.blob_manager.get_blob_length(blob_hash)
|
||||
d.addCallback(make_response_pair, blob_hash)
|
||||
ds.append(d)
|
||||
|
||||
dl = defer.DeferredList(ds)
|
||||
|
||||
def make_response(response_pairs):
|
||||
lengths = []
|
||||
for success, response_pair in response_pairs:
|
||||
if success is True:
|
||||
lengths.append(response_pair)
|
||||
if len(lengths) > 0:
|
||||
log.info("Responding with %s blob lengths", str(len(lengths)))
|
||||
expected_payment = 1.0 * len(lengths) * self.blob_length_payment_rate / 1000.0
|
||||
self.wallet.add_expected_payment(self.peer, expected_payment)
|
||||
self.peer.update_stats('uploaded_valuable_blob_infos', len(lengths))
|
||||
return {'blob_length': {'blob_lengths': lengths}}
|
||||
|
||||
dl.addCallback(make_response)
|
|
@ -1,130 +0,0 @@
|
|||
from lbrynet.lbrynet_console import Plugin
|
||||
from twisted.internet import defer
|
||||
from lbrynet.conf import settings
|
||||
from BlindRepeater import BlindRepeater
|
||||
from BlindInfoManager import BlindInfoManager
|
||||
from BlindRepeaterSettings import BlindRepeaterSettings
|
||||
from BlindRepeaterControlHandlers import StartRepeaterFactory, StopRepeaterFactory, UpdateMaxSpaceFactory
|
||||
from BlindRepeaterControlHandlers import AddApprovedPeerFactory, DeleteApprovedPeerFactory, RepeaterStatusFactory
|
||||
from BlindRepeaterControlHandlers import ShowApprovedPeersFactory, ModifyRepeaterOptionsFactory
|
||||
from ValuableBlobQueryHandler import ValuableBlobLengthQueryHandlerFactory
|
||||
from ValuableBlobQueryHandler import ValuableBlobHashQueryHandlerFactory
|
||||
|
||||
from PaymentRateManager import BlindRepeaterPaymentRateManager
|
||||
|
||||
|
||||
class BlindRepeaterPlugin(Plugin.Plugin):
|
||||
|
||||
def __init__(self):
|
||||
Plugin.Plugin.__init__(self)
|
||||
self.enabled = False
|
||||
self.blind_info_manager = None
|
||||
self.valuable_blob_length_query_handler = None
|
||||
self.valuable_blob_hash_query_handler = None
|
||||
self.repeater = None
|
||||
self.control_handlers = None
|
||||
self.payment_rate_manager = None
|
||||
self.settings = None
|
||||
|
||||
def setup(self, lbry_console):
|
||||
if not self.enabled:
|
||||
return defer.succeed(True)
|
||||
lbry_session = lbry_console.session
|
||||
d = self._setup_settings(lbry_session.db_dir)
|
||||
d.addCallback(lambda _: self._get_payment_rate_manager(lbry_session.base_payment_rate_manager))
|
||||
d.addCallback(lambda _: self._setup_blind_info_manager(lbry_session.peer_manager, lbry_session.db_dir))
|
||||
d.addCallback(lambda _: self._setup_blind_repeater(lbry_session))
|
||||
d.addCallback(lambda _: self._setup_valuable_blob_query_handler(lbry_session))
|
||||
d.addCallback(lambda _: self._create_control_handlers(lbry_session))
|
||||
d.addCallback(lambda _: self._restore_repeater_status(lbry_session))
|
||||
d.addCallback(lambda _: self._add_to_lbry_console(lbry_console))
|
||||
return d
|
||||
|
||||
def stop(self):
|
||||
if self.settings is not None:
|
||||
return self.settings.stop()
|
||||
return defer.succeed(True)
|
||||
|
||||
def _setup_settings(self, db_dir):
|
||||
self.settings = BlindRepeaterSettings(db_dir)
|
||||
return self.settings.setup()
|
||||
|
||||
def _get_payment_rate_manager(self, default_payment_rate_manager):
|
||||
d1 = self.settings.get_data_payment_rate()
|
||||
d2 = self.settings.get_valuable_info_payment_rate()
|
||||
d3 = self.settings.get_valuable_hash_payment_rate()
|
||||
|
||||
dl = defer.DeferredList([d1, d2, d3])
|
||||
|
||||
def get_payment_rate_manager(rates):
|
||||
data_rate = rates[0][1] if rates[0][0] is True else None
|
||||
info_rate = rates[1][1] if rates[1][0] is True else None
|
||||
info_rate = info_rate if info_rate is not None else settings.min_valuable_info_rate
|
||||
hash_rate = rates[2][1] if rates[2][0] is True else None
|
||||
hash_rate = hash_rate if hash_rate is not None else settings.min_valuable_hash_rate
|
||||
self.payment_rate_manager = BlindRepeaterPaymentRateManager(default_payment_rate_manager,
|
||||
info_rate, hash_rate,
|
||||
blob_data_rate=data_rate)
|
||||
|
||||
dl.addCallback(get_payment_rate_manager)
|
||||
return dl
|
||||
|
||||
def _setup_blind_info_manager(self, peer_manager, db_dir):
|
||||
self.blind_info_manager = BlindInfoManager(db_dir, peer_manager)
|
||||
return self.blind_info_manager.setup()
|
||||
|
||||
def _setup_valuable_blob_query_handler(self, lbry_session):
|
||||
self.valuable_blob_length_query_handler = ValuableBlobLengthQueryHandlerFactory(lbry_session.blob_manager,
|
||||
lbry_session.wallet,
|
||||
self.payment_rate_manager)
|
||||
self.valuable_blob_hash_query_handler = ValuableBlobHashQueryHandlerFactory(lbry_session.peer_finder,
|
||||
lbry_session.wallet,
|
||||
self.payment_rate_manager)
|
||||
|
||||
def _setup_blind_repeater(self, lbry_session):
|
||||
self.repeater = BlindRepeater(lbry_session.peer_finder, lbry_session.rate_limiter,
|
||||
lbry_session.blob_manager, self.blind_info_manager,
|
||||
lbry_session.wallet, self.payment_rate_manager)
|
||||
return self.repeater.setup()
|
||||
|
||||
def _restore_repeater_status(self, lbry_session):
|
||||
d = self.settings.get_saved_max_space()
|
||||
|
||||
def set_max_space(max_space):
|
||||
self.repeater.set_max_space(max_space)
|
||||
|
||||
d.addCallback(set_max_space)
|
||||
|
||||
d.addCallback(lambda _: self.settings.get_approved_peers())
|
||||
|
||||
def set_approved_peers(peers):
|
||||
for host, port in peers:
|
||||
peer = lbry_session.peer_manager.get_peer(host, int(port))
|
||||
self.repeater.add_approved_peer(peer)
|
||||
|
||||
d.addCallback(set_approved_peers)
|
||||
|
||||
d.addCallback(lambda _: self.settings.get_repeater_saved_status())
|
||||
|
||||
def restore_running(running):
|
||||
if running:
|
||||
return self.repeater.start()
|
||||
else:
|
||||
return defer.succeed(True)
|
||||
|
||||
d.addCallback(restore_running)
|
||||
return d
|
||||
|
||||
def _create_control_handlers(self, lbry_session):
|
||||
category = "Blind Repeater"
|
||||
control_handlers = [StartRepeaterFactory(self.repeater, self.settings),
|
||||
StopRepeaterFactory(self.repeater, self.settings),
|
||||
RepeaterStatusFactory(self.repeater),
|
||||
ShowApprovedPeersFactory(self.repeater),
|
||||
ModifyRepeaterOptionsFactory(self.repeater, lbry_session, self.settings)]
|
||||
self.control_handlers = zip([category] * len(control_handlers), control_handlers)
|
||||
|
||||
def _add_to_lbry_console(self, lbry_console):
|
||||
lbry_console.add_control_handlers(self.control_handlers)
|
||||
lbry_console.add_query_handlers([self.valuable_blob_length_query_handler,
|
||||
self.valuable_blob_hash_query_handler])
|
|
@ -1,6 +0,0 @@
|
|||
from zope.interface import Interface
|
||||
|
||||
|
||||
class IBlobScorer(Interface):
|
||||
def score_blob(self, blob, blob_info):
|
||||
pass
|
|
@ -1,8 +0,0 @@
|
|||
[Core]
|
||||
Name = BlindRepeater
|
||||
Module = BlindRepeater
|
||||
|
||||
[Documentation]
|
||||
Author = LBRY
|
||||
Version = 0.1
|
||||
Description = A client which blindly downloads data it judges valuable so that it can be re-uploaded for profit
|
|
@ -32,7 +32,7 @@ from lbrynet.lbryfile.EncryptedFileMetadataManager import DBEncryptedFileMetadat
|
|||
from lbrynet.lbryfile.EncryptedFileMetadataManager import TempEncryptedFileMetadataManager
|
||||
from lbrynet.lbryfile.StreamDescriptor import EncryptedFileStreamType
|
||||
from lbrynet.lbryfilemanager.EncryptedFileManager import EncryptedFileManager
|
||||
from lbrynet.lbrynet_console.Settings import Settings
|
||||
from lbrynet.lbrynet_daemon.Settings import Settings
|
||||
from lbrynet.lbrynet_daemon.UIManager import UIManager
|
||||
from lbrynet.lbrynet_daemon.Downloader import GetStream
|
||||
from lbrynet.lbrynet_daemon.Publisher import Publisher
|
||||
|
|
Loading…
Reference in a new issue