2016-10-20 18:23:39 +02:00
|
|
|
import io
|
2015-08-20 17:27:15 +02:00
|
|
|
import logging
|
2016-10-20 18:23:39 +02:00
|
|
|
from multiprocessing import Process, Event, Queue
|
|
|
|
import os
|
2016-09-28 01:01:47 +02:00
|
|
|
import platform
|
2016-10-20 18:23:39 +02:00
|
|
|
import shutil
|
2015-08-20 17:27:15 +02:00
|
|
|
import sys
|
|
|
|
import random
|
2016-09-10 08:44:07 +02:00
|
|
|
import unittest
|
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
from Crypto.PublicKey import RSA
|
|
|
|
from Crypto import Random
|
|
|
|
from Crypto.Hash import MD5
|
2016-12-21 20:55:43 +01:00
|
|
|
from lbrynet import conf
|
2015-08-27 21:41:17 +02:00
|
|
|
from lbrynet.lbrylive.LiveStreamCreator import FileLiveStreamCreator
|
2015-08-20 17:27:15 +02:00
|
|
|
from lbrynet.lbrylive.LiveStreamMetadataManager import DBLiveStreamMetadataManager
|
|
|
|
from lbrynet.lbrylive.LiveStreamMetadataManager import TempLiveStreamMetadataManager
|
2016-10-26 09:16:33 +02:00
|
|
|
from lbrynet.lbryfile.EncryptedFileMetadataManager import TempEncryptedFileMetadataManager, \
|
|
|
|
DBEncryptedFileMetadataManager
|
2016-09-30 06:26:27 +02:00
|
|
|
from lbrynet import analytics
|
2015-08-27 21:41:17 +02:00
|
|
|
from lbrynet.lbrylive.LiveStreamCreator import FileLiveStreamCreator
|
2015-08-20 17:27:15 +02:00
|
|
|
from lbrynet.lbrylive.LiveStreamMetadataManager import DBLiveStreamMetadataManager
|
|
|
|
from lbrynet.lbrylive.LiveStreamMetadataManager import TempLiveStreamMetadataManager
|
2016-10-20 18:23:39 +02:00
|
|
|
from lbrynet.lbryfile.EncryptedFileMetadataManager import TempEncryptedFileMetadataManager
|
|
|
|
from lbrynet.lbryfile.EncryptedFileMetadataManager import DBEncryptedFileMetadataManager
|
2016-09-27 21:08:32 +02:00
|
|
|
from lbrynet.lbryfilemanager.EncryptedFileManager import EncryptedFileManager
|
2015-08-20 17:27:15 +02:00
|
|
|
from lbrynet.core.PTCWallet import PointTraderKeyQueryHandlerFactory, PointTraderKeyExchanger
|
2016-09-26 03:35:37 +02:00
|
|
|
from lbrynet.core.Session import Session
|
2016-09-30 06:26:27 +02:00
|
|
|
from lbrynet.core.server.BlobAvailabilityHandler import BlobAvailabilityHandlerFactory
|
2015-08-20 17:27:15 +02:00
|
|
|
from lbrynet.core.client.StandaloneBlobDownloader import StandaloneBlobDownloader
|
|
|
|
from lbrynet.core.StreamDescriptor import BlobStreamDescriptorWriter
|
|
|
|
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
|
|
|
|
from lbrynet.core.StreamDescriptor import download_sd_blob
|
2016-09-27 21:08:32 +02:00
|
|
|
from lbrynet.lbryfilemanager.EncryptedFileCreator import create_lbry_file
|
|
|
|
from lbrynet.lbryfile.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
|
2015-08-20 17:27:15 +02:00
|
|
|
from lbrynet.lbryfile.StreamDescriptor import get_sd_info
|
2016-10-03 22:37:27 +02:00
|
|
|
from twisted.internet import defer, threads, task
|
2015-08-20 17:27:15 +02:00
|
|
|
from twisted.trial.unittest import TestCase
|
|
|
|
from twisted.python.failure import Failure
|
2016-10-20 18:23:39 +02:00
|
|
|
|
2016-10-03 22:37:27 +02:00
|
|
|
from lbrynet.dht.node import Node
|
2015-08-20 17:27:15 +02:00
|
|
|
from lbrynet.core.PeerManager import PeerManager
|
|
|
|
from lbrynet.core.RateLimiter import DummyRateLimiter, RateLimiter
|
|
|
|
from lbrynet.core.server.BlobRequestHandler import BlobRequestHandlerFactory
|
|
|
|
from lbrynet.core.server.ServerProtocol import ServerProtocolFactory
|
2016-10-20 18:23:39 +02:00
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
from lbrynet.lbrylive.server.LiveBlobInfoQueryHandler import CryptBlobInfoQueryHandlerFactory
|
2015-08-27 21:41:17 +02:00
|
|
|
from lbrynet.lbrylive.client.LiveStreamOptions import add_live_stream_to_sd_identifier
|
|
|
|
from lbrynet.lbrylive.client.LiveStreamDownloader import add_full_live_stream_downloader_to_sd_identifier
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
from tests import mocks
|
|
|
|
|
2016-10-27 17:18:56 +02:00
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
FakeNode = mocks.Node
|
|
|
|
FakeWallet = mocks.Wallet
|
|
|
|
FakePeerFinder = mocks.PeerFinder
|
|
|
|
FakeAnnouncer = mocks.Announcer
|
|
|
|
GenFile = mocks.GenFile
|
|
|
|
test_create_stream_sd_file = mocks.create_stream_sd_file
|
|
|
|
DummyBlobAvailabilityTracker = mocks.BlobAvailabilityTracker
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2016-10-27 17:18:56 +02:00
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
log_format = "%(funcName)s(): %(message)s"
|
|
|
|
logging.basicConfig(level=logging.WARNING, format=log_format)
|
|
|
|
|
|
|
|
|
2016-09-28 01:01:47 +02:00
|
|
|
def require_system(system):
|
|
|
|
def wrapper(fn):
|
|
|
|
return fn
|
2016-10-26 09:16:33 +02:00
|
|
|
|
2016-09-28 01:01:47 +02:00
|
|
|
if platform.system() == system:
|
|
|
|
return wrapper
|
|
|
|
else:
|
|
|
|
return unittest.skip("Skipping. Test can only be run on " + system)
|
|
|
|
|
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
def use_epoll_on_linux():
|
2016-10-03 22:37:27 +02:00
|
|
|
if sys.platform.startswith("linux"):
|
|
|
|
sys.modules = sys.modules.copy()
|
|
|
|
del sys.modules['twisted.internet.reactor']
|
|
|
|
import twisted.internet
|
|
|
|
twisted.internet.reactor = twisted.internet.epollreactor.EPollReactor()
|
|
|
|
sys.modules['twisted.internet.reactor'] = twisted.internet.reactor
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
class LbryUploader(object):
|
|
|
|
def __init__(self, sd_hash_queue, kill_event, dead_event,
|
|
|
|
file_size, ul_rate_limit=None, is_generous=False):
|
|
|
|
self.sd_hash_queue = sd_hash_queue
|
|
|
|
self.kill_event = kill_event
|
|
|
|
self.dead_event = dead_event
|
|
|
|
self.file_size = file_size
|
|
|
|
self.ul_rate_limit = ul_rate_limit
|
|
|
|
self.is_generous = is_generous
|
|
|
|
# these attributes get defined in `start`
|
|
|
|
self.reactor = None
|
|
|
|
self.sd_identifier = None
|
|
|
|
self.session = None
|
|
|
|
self.lbry_file_manager = None
|
|
|
|
self.server_port = None
|
|
|
|
self.kill_check = None
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
def start(self):
|
|
|
|
use_epoll_on_linux()
|
|
|
|
from twisted.internet import reactor
|
|
|
|
self.reactor = reactor
|
|
|
|
logging.debug("Starting the uploader")
|
|
|
|
Random.atfork()
|
|
|
|
r = random.Random()
|
|
|
|
r.seed("start_lbry_uploader")
|
|
|
|
wallet = FakeWallet()
|
|
|
|
peer_manager = PeerManager()
|
|
|
|
peer_finder = FakePeerFinder(5553, peer_manager, 1)
|
|
|
|
hash_announcer = FakeAnnouncer()
|
|
|
|
rate_limiter = RateLimiter()
|
|
|
|
self.sd_identifier = StreamDescriptorIdentifier()
|
|
|
|
db_dir = "server"
|
|
|
|
os.mkdir(db_dir)
|
|
|
|
self.session = Session(
|
2016-12-21 20:55:43 +01:00
|
|
|
conf.settings.data_rate, db_dir=db_dir, lbryid="abcd",
|
2016-10-20 18:23:39 +02:00
|
|
|
peer_finder=peer_finder, hash_announcer=hash_announcer, peer_port=5553,
|
|
|
|
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
|
|
|
|
blob_tracker_class=DummyBlobAvailabilityTracker,
|
|
|
|
dht_node_class=Node, is_generous=self.is_generous)
|
|
|
|
stream_info_manager = TempEncryptedFileMetadataManager()
|
|
|
|
self.lbry_file_manager = EncryptedFileManager(
|
|
|
|
self.session, stream_info_manager, self.sd_identifier)
|
|
|
|
if self.ul_rate_limit is not None:
|
|
|
|
self.session.rate_limiter.set_ul_limit(self.ul_rate_limit)
|
|
|
|
reactor.callLater(1, self.start_all)
|
|
|
|
if not reactor.running:
|
|
|
|
reactor.run()
|
|
|
|
|
|
|
|
def start_all(self):
|
|
|
|
d = self.session.setup()
|
|
|
|
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(self.sd_identifier))
|
|
|
|
d.addCallback(lambda _: self.lbry_file_manager.setup())
|
|
|
|
d.addCallback(lambda _: self.start_server())
|
|
|
|
d.addCallback(lambda _: self.create_stream())
|
|
|
|
d.addCallback(self.create_stream_descriptor)
|
|
|
|
d.addCallback(self.put_sd_hash_on_queue)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
def print_error(err):
|
|
|
|
logging.critical("Server error: %s", err.getErrorMessage())
|
|
|
|
|
|
|
|
d.addErrback(print_error)
|
|
|
|
return d
|
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
def start_server(self):
|
|
|
|
session = self.session
|
2015-08-20 17:27:15 +02:00
|
|
|
query_handler_factories = {
|
2016-12-30 16:07:24 +01:00
|
|
|
1: BlobAvailabilityHandlerFactory(session.blob_manager),
|
|
|
|
2: BlobRequestHandlerFactory(
|
2016-09-30 06:26:27 +02:00
|
|
|
session.blob_manager, session.wallet,
|
|
|
|
session.payment_rate_manager,
|
2016-12-30 16:07:24 +01:00
|
|
|
analytics.Track()),
|
|
|
|
3: session.wallet.get_wallet_info_query_handler_factory(),
|
2015-08-20 17:27:15 +02:00
|
|
|
}
|
|
|
|
server_factory = ServerProtocolFactory(session.rate_limiter,
|
|
|
|
query_handler_factories,
|
|
|
|
session.peer_manager)
|
2016-10-20 18:23:39 +02:00
|
|
|
self.server_port = self.reactor.listenTCP(5553, server_factory)
|
2015-08-20 17:27:15 +02:00
|
|
|
logging.debug("Started listening")
|
2016-10-20 18:23:39 +02:00
|
|
|
self.kill_check = task.LoopingCall(self.check_for_kill)
|
|
|
|
self.kill_check.start(1.0)
|
2015-08-20 17:27:15 +02:00
|
|
|
return True
|
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
def kill_server(self):
|
|
|
|
session = self.session
|
|
|
|
ds = []
|
|
|
|
ds.append(session.shut_down())
|
|
|
|
ds.append(self.lbry_file_manager.stop())
|
|
|
|
if self.server_port:
|
|
|
|
ds.append(self.server_port.stopListening())
|
|
|
|
self.kill_check.stop()
|
|
|
|
self.dead_event.set()
|
|
|
|
dl = defer.DeferredList(ds)
|
|
|
|
dl.addCallback(lambda _: self.reactor.stop())
|
|
|
|
return dl
|
|
|
|
|
|
|
|
def check_for_kill(self):
|
|
|
|
if self.kill_event.is_set():
|
|
|
|
self.kill_server()
|
|
|
|
|
|
|
|
def create_stream(self):
|
|
|
|
test_file = GenFile(self.file_size, b''.join([chr(i) for i in xrange(0, 64, 6)]))
|
|
|
|
d = create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file)
|
2015-08-20 17:27:15 +02:00
|
|
|
return d
|
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
def create_stream_descriptor(self, stream_hash):
|
|
|
|
descriptor_writer = BlobStreamDescriptorWriter(self.session.blob_manager)
|
|
|
|
d = get_sd_info(self.lbry_file_manager.stream_info_manager, stream_hash, True)
|
2015-08-20 17:27:15 +02:00
|
|
|
d.addCallback(descriptor_writer.create_descriptor)
|
|
|
|
return d
|
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
def put_sd_hash_on_queue(self, sd_hash):
|
|
|
|
self.sd_hash_queue.put(sd_hash)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2016-01-22 21:50:18 +01:00
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
def start_lbry_reuploader(sd_hash, kill_event, dead_event,
|
|
|
|
ready_event, n, ul_rate_limit=None, is_generous=False):
|
|
|
|
use_epoll_on_linux()
|
2016-01-22 21:50:18 +01:00
|
|
|
from twisted.internet import reactor
|
|
|
|
|
|
|
|
logging.debug("Starting the uploader")
|
|
|
|
|
|
|
|
Random.atfork()
|
|
|
|
|
|
|
|
r = random.Random()
|
2016-10-20 18:23:39 +02:00
|
|
|
r.seed("start_lbry_reuploader")
|
2016-01-22 21:50:18 +01:00
|
|
|
|
|
|
|
wallet = FakeWallet()
|
|
|
|
peer_port = 5553 + n
|
|
|
|
peer_manager = PeerManager()
|
|
|
|
peer_finder = FakePeerFinder(5553, peer_manager, 1)
|
|
|
|
hash_announcer = FakeAnnouncer()
|
|
|
|
rate_limiter = RateLimiter()
|
|
|
|
sd_identifier = StreamDescriptorIdentifier()
|
|
|
|
|
|
|
|
db_dir = "server_" + str(n)
|
|
|
|
blob_dir = os.path.join(db_dir, "blobfiles")
|
|
|
|
os.mkdir(db_dir)
|
|
|
|
os.mkdir(blob_dir)
|
|
|
|
|
2016-12-21 20:55:43 +01:00
|
|
|
session = Session(conf.settings.data_rate, db_dir=db_dir, lbryid="abcd" + str(n),
|
2016-10-26 09:16:33 +02:00
|
|
|
peer_finder=peer_finder, hash_announcer=hash_announcer,
|
|
|
|
blob_dir=None, peer_port=peer_port,
|
|
|
|
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
|
2016-12-21 20:55:43 +01:00
|
|
|
blob_tracker_class=DummyBlobAvailabilityTracker, is_generous=conf.settings.is_generous_host)
|
2016-01-22 21:50:18 +01:00
|
|
|
|
2016-09-27 21:08:32 +02:00
|
|
|
stream_info_manager = TempEncryptedFileMetadataManager()
|
2016-01-22 21:50:18 +01:00
|
|
|
|
2016-09-27 21:08:32 +02:00
|
|
|
lbry_file_manager = EncryptedFileManager(session, stream_info_manager, sd_identifier)
|
2016-01-22 21:50:18 +01:00
|
|
|
|
|
|
|
if ul_rate_limit is not None:
|
|
|
|
session.rate_limiter.set_ul_limit(ul_rate_limit)
|
|
|
|
|
|
|
|
def make_downloader(metadata, prm):
|
|
|
|
info_validator = metadata.validator
|
|
|
|
options = metadata.options
|
|
|
|
factories = metadata.factories
|
|
|
|
chosen_options = [o.default_value for o in options.get_downloader_options(info_validator, prm)]
|
|
|
|
return factories[0].make_downloader(metadata, chosen_options, prm)
|
|
|
|
|
|
|
|
def download_file():
|
2016-10-03 08:59:40 +02:00
|
|
|
prm = session.payment_rate_manager
|
2016-01-22 21:50:18 +01:00
|
|
|
d = download_sd_blob(session, sd_hash, prm)
|
|
|
|
d.addCallback(sd_identifier.get_metadata_for_sd_blob)
|
|
|
|
d.addCallback(make_downloader, prm)
|
|
|
|
d.addCallback(lambda downloader: downloader.start())
|
|
|
|
return d
|
|
|
|
|
|
|
|
def start_transfer():
|
|
|
|
|
|
|
|
logging.debug("Starting the transfer")
|
|
|
|
|
|
|
|
d = session.setup()
|
|
|
|
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
|
|
|
|
d.addCallback(lambda _: lbry_file_manager.setup())
|
|
|
|
d.addCallback(lambda _: download_file())
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
def start_server():
|
|
|
|
|
|
|
|
server_port = None
|
|
|
|
|
|
|
|
query_handler_factories = {
|
2016-12-30 16:07:24 +01:00
|
|
|
1: BlobAvailabilityHandlerFactory(session.blob_manager),
|
|
|
|
2: BlobRequestHandlerFactory(
|
2016-09-30 06:26:27 +02:00
|
|
|
session.blob_manager, session.wallet,
|
|
|
|
session.payment_rate_manager,
|
2016-12-30 16:07:24 +01:00
|
|
|
analytics.Track()),
|
|
|
|
3: session.wallet.get_wallet_info_query_handler_factory(),
|
2016-01-22 21:50:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
server_factory = ServerProtocolFactory(session.rate_limiter,
|
|
|
|
query_handler_factories,
|
|
|
|
session.peer_manager)
|
|
|
|
|
|
|
|
server_port = reactor.listenTCP(peer_port, server_factory)
|
|
|
|
logging.debug("Started listening")
|
|
|
|
|
|
|
|
def kill_server():
|
|
|
|
ds = []
|
|
|
|
ds.append(session.shut_down())
|
|
|
|
ds.append(lbry_file_manager.stop())
|
|
|
|
if server_port:
|
|
|
|
ds.append(server_port.stopListening())
|
|
|
|
kill_check.stop()
|
|
|
|
dead_event.set()
|
|
|
|
dl = defer.DeferredList(ds)
|
|
|
|
dl.addCallback(lambda _: reactor.stop())
|
|
|
|
return dl
|
|
|
|
|
|
|
|
def check_for_kill():
|
|
|
|
if kill_event.is_set():
|
|
|
|
kill_server()
|
|
|
|
|
|
|
|
kill_check = task.LoopingCall(check_for_kill)
|
|
|
|
kill_check.start(1.0)
|
|
|
|
ready_event.set()
|
|
|
|
logging.debug("set the ready event")
|
|
|
|
|
|
|
|
d = task.deferLater(reactor, 1.0, start_transfer)
|
|
|
|
d.addCallback(lambda _: start_server())
|
2016-10-03 22:37:27 +02:00
|
|
|
if not reactor.running:
|
|
|
|
reactor.run()
|
2016-01-22 21:50:18 +01:00
|
|
|
|
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
def start_live_server(sd_hash_queue, kill_event, dead_event):
|
2016-10-20 18:23:39 +02:00
|
|
|
use_epoll_on_linux()
|
2015-08-20 17:27:15 +02:00
|
|
|
from twisted.internet import reactor
|
|
|
|
|
|
|
|
logging.debug("In start_server.")
|
|
|
|
|
|
|
|
Random.atfork()
|
|
|
|
|
|
|
|
r = random.Random()
|
|
|
|
r.seed("start_live_server")
|
|
|
|
|
|
|
|
wallet = FakeWallet()
|
|
|
|
peer_manager = PeerManager()
|
2016-01-22 21:50:18 +01:00
|
|
|
peer_finder = FakePeerFinder(5553, peer_manager, 1)
|
2015-08-20 17:27:15 +02:00
|
|
|
hash_announcer = FakeAnnouncer()
|
|
|
|
rate_limiter = DummyRateLimiter()
|
|
|
|
sd_identifier = StreamDescriptorIdentifier()
|
2016-10-06 04:58:34 +02:00
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
db_dir = "server"
|
|
|
|
os.mkdir(db_dir)
|
|
|
|
|
2016-12-21 20:55:43 +01:00
|
|
|
session = Session(conf.settings.data_rate, db_dir=db_dir, lbryid="abcd",
|
2016-10-13 19:36:22 +02:00
|
|
|
peer_finder=peer_finder, hash_announcer=hash_announcer, peer_port=5553,
|
|
|
|
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
|
2016-12-21 20:55:43 +01:00
|
|
|
blob_tracker_class=DummyBlobAvailabilityTracker, is_generous=conf.settings.is_generous_host)
|
2015-08-20 17:27:15 +02:00
|
|
|
stream_info_manager = DBLiveStreamMetadataManager(session.db_dir, hash_announcer)
|
|
|
|
|
|
|
|
logging.debug("Created the session")
|
|
|
|
|
|
|
|
server_port = []
|
|
|
|
|
|
|
|
def start_listening():
|
|
|
|
logging.debug("Starting the server protocol")
|
|
|
|
query_handler_factories = {
|
2016-12-30 16:07:24 +01:00
|
|
|
1: CryptBlobInfoQueryHandlerFactory(stream_info_manager, session.wallet,
|
|
|
|
session.payment_rate_manager),
|
|
|
|
2: BlobRequestHandlerFactory(session.blob_manager, session.wallet,
|
2016-09-30 06:26:27 +02:00
|
|
|
session.payment_rate_manager,
|
2016-12-30 16:07:24 +01:00
|
|
|
analytics.Track()),
|
|
|
|
3: session.wallet.get_wallet_info_query_handler_factory()
|
2015-08-20 17:27:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
server_factory = ServerProtocolFactory(session.rate_limiter,
|
|
|
|
query_handler_factories,
|
|
|
|
session.peer_manager)
|
|
|
|
server_port.append(reactor.listenTCP(5553, server_factory))
|
|
|
|
logging.debug("Server protocol has started")
|
|
|
|
|
|
|
|
def create_stream():
|
|
|
|
logging.debug("Making the live stream")
|
|
|
|
test_file = GenFile(5209343, b''.join([chr(i + 2) for i in xrange(0, 64, 6)]))
|
|
|
|
stream_creator_helper = FileLiveStreamCreator(session.blob_manager, stream_info_manager,
|
|
|
|
"test_file", test_file)
|
|
|
|
d = stream_creator_helper.setup()
|
|
|
|
d.addCallback(lambda _: stream_creator_helper.publish_stream_descriptor())
|
|
|
|
d.addCallback(put_sd_hash_on_queue)
|
|
|
|
d.addCallback(lambda _: stream_creator_helper.start_streaming())
|
|
|
|
return d
|
|
|
|
|
|
|
|
def put_sd_hash_on_queue(sd_hash):
|
|
|
|
logging.debug("Telling the client to start running. Stream hash: %s", str(sd_hash))
|
|
|
|
sd_hash_queue.put(sd_hash)
|
|
|
|
logging.debug("sd hash has been added to the queue")
|
|
|
|
|
|
|
|
def set_dead_event():
|
|
|
|
logging.debug("Setting the dead event")
|
|
|
|
dead_event.set()
|
|
|
|
|
|
|
|
def print_error(err):
|
|
|
|
logging.debug("An error occurred during shutdown: %s", err.getTraceback())
|
|
|
|
|
|
|
|
def stop_reactor():
|
|
|
|
logging.debug("Server is stopping its reactor")
|
|
|
|
reactor.stop()
|
|
|
|
|
|
|
|
def shut_down(arg):
|
|
|
|
logging.debug("Shutting down")
|
|
|
|
if isinstance(arg, Failure):
|
|
|
|
logging.error("Shut down is due to an error: %s", arg.getTraceback())
|
|
|
|
d = defer.maybeDeferred(server_port[0].stopListening)
|
|
|
|
d.addErrback(print_error)
|
|
|
|
d.addCallback(lambda _: session.shut_down())
|
|
|
|
d.addCallback(lambda _: stream_info_manager.stop())
|
|
|
|
d.addErrback(print_error)
|
|
|
|
d.addCallback(lambda _: set_dead_event())
|
|
|
|
d.addErrback(print_error)
|
|
|
|
d.addCallback(lambda _: reactor.callLater(0, stop_reactor))
|
|
|
|
d.addErrback(print_error)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def wait_for_kill_event():
|
|
|
|
|
|
|
|
d = defer.Deferred()
|
|
|
|
|
|
|
|
def check_for_kill():
|
|
|
|
if kill_event.is_set():
|
|
|
|
logging.debug("Kill event has been found set")
|
|
|
|
kill_check.stop()
|
|
|
|
d.callback(True)
|
|
|
|
|
|
|
|
kill_check = task.LoopingCall(check_for_kill)
|
|
|
|
kill_check.start(1.0)
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
def enable_live_stream():
|
2016-10-13 19:36:22 +02:00
|
|
|
add_live_stream_to_sd_identifier(sd_identifier, session.base_payment_rate_manager)
|
2015-08-27 21:41:17 +02:00
|
|
|
add_full_live_stream_downloader_to_sd_identifier(session, stream_info_manager, sd_identifier,
|
2016-10-13 19:36:22 +02:00
|
|
|
session.base_payment_rate_manager)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
def run_server():
|
|
|
|
d = session.setup()
|
|
|
|
d.addCallback(lambda _: stream_info_manager.setup())
|
|
|
|
d.addCallback(lambda _: enable_live_stream())
|
|
|
|
d.addCallback(lambda _: start_listening())
|
|
|
|
d.addCallback(lambda _: create_stream())
|
|
|
|
d.addCallback(lambda _: wait_for_kill_event())
|
|
|
|
d.addBoth(shut_down)
|
|
|
|
return d
|
|
|
|
|
|
|
|
reactor.callLater(1, run_server)
|
2016-10-03 22:37:27 +02:00
|
|
|
if not reactor.running:
|
|
|
|
reactor.run()
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
|
2016-10-20 07:35:18 +02:00
|
|
|
def start_blob_uploader(blob_hash_queue, kill_event, dead_event, slow, is_generous=False):
|
2016-10-20 18:23:39 +02:00
|
|
|
use_epoll_on_linux()
|
2015-08-20 17:27:15 +02:00
|
|
|
from twisted.internet import reactor
|
|
|
|
|
|
|
|
logging.debug("Starting the uploader")
|
|
|
|
|
|
|
|
Random.atfork()
|
|
|
|
|
|
|
|
wallet = FakeWallet()
|
|
|
|
peer_manager = PeerManager()
|
2016-01-22 21:50:18 +01:00
|
|
|
peer_finder = FakePeerFinder(5553, peer_manager, 1)
|
2015-08-20 17:27:15 +02:00
|
|
|
hash_announcer = FakeAnnouncer()
|
|
|
|
rate_limiter = RateLimiter()
|
|
|
|
|
|
|
|
if slow is True:
|
|
|
|
peer_port = 5553
|
|
|
|
db_dir = "server1"
|
|
|
|
else:
|
|
|
|
peer_port = 5554
|
|
|
|
db_dir = "server2"
|
|
|
|
blob_dir = os.path.join(db_dir, "blobfiles")
|
|
|
|
os.mkdir(db_dir)
|
|
|
|
os.mkdir(blob_dir)
|
|
|
|
|
2016-12-21 20:55:43 +01:00
|
|
|
session = Session(conf.settings.data_rate, db_dir=db_dir, lbryid="efgh",
|
2016-10-26 09:16:33 +02:00
|
|
|
peer_finder=peer_finder, hash_announcer=hash_announcer,
|
|
|
|
blob_dir=blob_dir, peer_port=peer_port,
|
|
|
|
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
|
2016-12-21 20:55:43 +01:00
|
|
|
blob_tracker_class=DummyBlobAvailabilityTracker, is_generous=conf.settings.is_generous_host)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
if slow is True:
|
2016-10-26 09:16:33 +02:00
|
|
|
session.rate_limiter.set_ul_limit(2 ** 11)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
def start_all():
|
|
|
|
d = session.setup()
|
|
|
|
d.addCallback(lambda _: start_server())
|
|
|
|
d.addCallback(lambda _: create_single_blob())
|
|
|
|
d.addCallback(put_blob_hash_on_queue)
|
|
|
|
|
|
|
|
def print_error(err):
|
|
|
|
logging.critical("Server error: %s", err.getErrorMessage())
|
|
|
|
|
|
|
|
d.addErrback(print_error)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def start_server():
|
|
|
|
|
|
|
|
server_port = None
|
|
|
|
|
|
|
|
query_handler_factories = {
|
2016-12-30 16:07:24 +01:00
|
|
|
1: BlobAvailabilityHandlerFactory(session.blob_manager),
|
|
|
|
2: BlobRequestHandlerFactory(session.blob_manager, session.wallet,
|
2016-09-30 06:26:27 +02:00
|
|
|
session.payment_rate_manager,
|
2016-12-30 16:07:24 +01:00
|
|
|
analytics.Track()),
|
|
|
|
3: session.wallet.get_wallet_info_query_handler_factory(),
|
2015-08-20 17:27:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
server_factory = ServerProtocolFactory(session.rate_limiter,
|
|
|
|
query_handler_factories,
|
|
|
|
session.peer_manager)
|
|
|
|
|
|
|
|
server_port = reactor.listenTCP(peer_port, server_factory)
|
|
|
|
logging.debug("Started listening")
|
|
|
|
|
|
|
|
def kill_server():
|
|
|
|
ds = []
|
|
|
|
ds.append(session.shut_down())
|
|
|
|
if server_port:
|
|
|
|
ds.append(server_port.stopListening())
|
|
|
|
kill_check.stop()
|
|
|
|
dead_event.set()
|
|
|
|
dl = defer.DeferredList(ds)
|
|
|
|
dl.addCallback(lambda _: reactor.stop())
|
|
|
|
return dl
|
|
|
|
|
|
|
|
def check_for_kill():
|
|
|
|
if kill_event.is_set():
|
|
|
|
kill_server()
|
|
|
|
|
|
|
|
kill_check = task.LoopingCall(check_for_kill)
|
|
|
|
kill_check.start(1.0)
|
|
|
|
return True
|
|
|
|
|
|
|
|
def create_single_blob():
|
|
|
|
blob_creator = session.blob_manager.get_blob_creator()
|
2016-10-26 09:16:33 +02:00
|
|
|
blob_creator.write("0" * 2 ** 21)
|
2015-08-20 17:27:15 +02:00
|
|
|
return blob_creator.close()
|
|
|
|
|
|
|
|
def put_blob_hash_on_queue(blob_hash):
|
|
|
|
logging.debug("Telling the client to start running. Blob hash: %s", str(blob_hash))
|
|
|
|
blob_hash_queue.put(blob_hash)
|
|
|
|
logging.debug("blob hash has been added to the queue")
|
|
|
|
|
|
|
|
reactor.callLater(1, start_all)
|
2016-10-03 22:37:27 +02:00
|
|
|
if not reactor.running:
|
|
|
|
reactor.run()
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
class TestTransfer(TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.server_processes = []
|
|
|
|
self.session = None
|
|
|
|
self.stream_info_manager = None
|
|
|
|
self.lbry_file_manager = None
|
2016-10-20 07:35:18 +02:00
|
|
|
self.is_generous = True
|
2015-08-20 17:27:15 +02:00
|
|
|
self.addCleanup(self.take_down_env)
|
|
|
|
|
|
|
|
def take_down_env(self):
|
|
|
|
|
|
|
|
d = defer.succeed(True)
|
|
|
|
if self.lbry_file_manager is not None:
|
|
|
|
d.addCallback(lambda _: self.lbry_file_manager.stop())
|
|
|
|
if self.session is not None:
|
|
|
|
d.addCallback(lambda _: self.session.shut_down())
|
|
|
|
if self.stream_info_manager is not None:
|
|
|
|
d.addCallback(lambda _: self.stream_info_manager.stop())
|
|
|
|
|
|
|
|
def delete_test_env():
|
|
|
|
dirs = ['server', 'server1', 'server2', 'client']
|
|
|
|
files = ['test_file']
|
|
|
|
for di in dirs:
|
|
|
|
if os.path.exists(di):
|
|
|
|
shutil.rmtree(di)
|
|
|
|
for f in files:
|
|
|
|
if os.path.exists(f):
|
|
|
|
os.remove(f)
|
|
|
|
for p in self.server_processes:
|
|
|
|
p.terminate()
|
|
|
|
return True
|
|
|
|
|
|
|
|
d.addCallback(lambda _: threads.deferToThread(delete_test_env))
|
|
|
|
return d
|
|
|
|
|
|
|
|
@staticmethod
|
2016-01-22 21:50:18 +01:00
|
|
|
def wait_for_event(event, timeout):
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
from twisted.internet import reactor
|
|
|
|
d = defer.Deferred()
|
|
|
|
|
|
|
|
def stop():
|
2016-01-22 21:50:18 +01:00
|
|
|
set_check.stop()
|
2015-08-20 17:27:15 +02:00
|
|
|
if stop_call.active():
|
|
|
|
stop_call.cancel()
|
|
|
|
d.callback(True)
|
|
|
|
|
2016-01-22 21:50:18 +01:00
|
|
|
def check_if_event_set():
|
|
|
|
if event.is_set():
|
2015-08-20 17:27:15 +02:00
|
|
|
logging.debug("Dead event has been found set")
|
|
|
|
stop()
|
|
|
|
|
|
|
|
def done_waiting():
|
2016-01-22 21:50:18 +01:00
|
|
|
logging.warning("Event has not been found set and timeout has expired")
|
2015-08-20 17:27:15 +02:00
|
|
|
stop()
|
|
|
|
|
2016-01-22 21:50:18 +01:00
|
|
|
set_check = task.LoopingCall(check_if_event_set)
|
|
|
|
set_check.start(.1)
|
|
|
|
stop_call = reactor.callLater(timeout, done_waiting)
|
2015-08-20 17:27:15 +02:00
|
|
|
return d
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def wait_for_hash_from_queue(hash_queue):
|
|
|
|
logging.debug("Waiting for the sd_hash to come through the queue")
|
|
|
|
|
|
|
|
d = defer.Deferred()
|
|
|
|
|
|
|
|
def check_for_start():
|
|
|
|
if hash_queue.empty() is False:
|
|
|
|
logging.debug("Client start event has been found set")
|
|
|
|
start_check.stop()
|
|
|
|
d.callback(hash_queue.get(False))
|
|
|
|
else:
|
|
|
|
logging.debug("Client start event has NOT been found set")
|
|
|
|
|
|
|
|
start_check = task.LoopingCall(check_for_start)
|
|
|
|
start_check.start(1.0)
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_lbry_transfer(self):
|
|
|
|
sd_hash_queue = Queue()
|
|
|
|
kill_event = Event()
|
|
|
|
dead_event = Event()
|
2016-10-20 18:23:39 +02:00
|
|
|
lbry_uploader = LbryUploader(sd_hash_queue, kill_event, dead_event, 5209343)
|
|
|
|
uploader = Process(target=lbry_uploader.start)
|
2015-08-20 17:27:15 +02:00
|
|
|
uploader.start()
|
|
|
|
self.server_processes.append(uploader)
|
|
|
|
|
|
|
|
logging.debug("Testing transfer")
|
|
|
|
|
|
|
|
wallet = FakeWallet()
|
|
|
|
peer_manager = PeerManager()
|
2016-01-22 21:50:18 +01:00
|
|
|
peer_finder = FakePeerFinder(5553, peer_manager, 1)
|
2015-08-20 17:27:15 +02:00
|
|
|
hash_announcer = FakeAnnouncer()
|
|
|
|
rate_limiter = DummyRateLimiter()
|
|
|
|
sd_identifier = StreamDescriptorIdentifier()
|
2016-10-06 04:58:34 +02:00
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
db_dir = "client"
|
|
|
|
blob_dir = os.path.join(db_dir, "blobfiles")
|
|
|
|
os.mkdir(db_dir)
|
|
|
|
os.mkdir(blob_dir)
|
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
self.session = Session(
|
2016-12-21 20:55:43 +01:00
|
|
|
conf.settings.data_rate, db_dir=db_dir, lbryid="abcd",
|
2016-10-20 18:23:39 +02:00
|
|
|
peer_finder=peer_finder, hash_announcer=hash_announcer,
|
|
|
|
blob_dir=blob_dir, peer_port=5553,
|
|
|
|
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
|
|
|
|
blob_tracker_class=DummyBlobAvailabilityTracker,
|
|
|
|
dht_node_class=Node, is_generous=self.is_generous)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2016-09-27 21:08:32 +02:00
|
|
|
self.stream_info_manager = TempEncryptedFileMetadataManager()
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
self.lbry_file_manager = EncryptedFileManager(
|
|
|
|
self.session, self.stream_info_manager, sd_identifier)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2015-09-22 04:06:04 +02:00
|
|
|
def make_downloader(metadata, prm):
|
|
|
|
info_validator = metadata.validator
|
|
|
|
options = metadata.options
|
|
|
|
factories = metadata.factories
|
2016-10-20 18:23:39 +02:00
|
|
|
chosen_options = [
|
2016-12-30 17:47:34 +01:00
|
|
|
o.default_value for o in options.get_downloader_options(info_validator, prm)
|
|
|
|
]
|
2015-09-22 04:06:04 +02:00
|
|
|
return factories[0].make_downloader(metadata, chosen_options, prm)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
def download_file(sd_hash):
|
2016-10-03 08:59:40 +02:00
|
|
|
prm = self.session.payment_rate_manager
|
2015-08-20 17:27:15 +02:00
|
|
|
d = download_sd_blob(self.session, sd_hash, prm)
|
2015-09-22 04:06:04 +02:00
|
|
|
d.addCallback(sd_identifier.get_metadata_for_sd_blob)
|
2015-08-20 17:27:15 +02:00
|
|
|
d.addCallback(make_downloader, prm)
|
|
|
|
d.addCallback(lambda downloader: downloader.start())
|
|
|
|
return d
|
|
|
|
|
|
|
|
def check_md5_sum():
|
|
|
|
f = open('test_file')
|
|
|
|
hashsum = MD5.new()
|
|
|
|
hashsum.update(f.read())
|
|
|
|
self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be")
|
|
|
|
|
2016-12-30 17:47:34 +01:00
|
|
|
@defer.inlineCallbacks
|
2015-08-20 17:27:15 +02:00
|
|
|
def start_transfer(sd_hash):
|
|
|
|
logging.debug("Starting the transfer")
|
2016-12-30 17:47:34 +01:00
|
|
|
yield self.session.setup()
|
|
|
|
yield add_lbry_file_to_sd_identifier(sd_identifier)
|
|
|
|
yield self.lbry_file_manager.setup()
|
|
|
|
yield download_file(sd_hash)
|
|
|
|
yield check_md5_sum()
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
def stop(arg):
|
|
|
|
if isinstance(arg, Failure):
|
|
|
|
logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
|
|
|
|
else:
|
|
|
|
logging.debug("Client is stopping normally.")
|
|
|
|
kill_event.set()
|
|
|
|
logging.debug("Set the kill event")
|
2016-01-22 21:50:18 +01:00
|
|
|
d = self.wait_for_event(dead_event, 15)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
def print_shutting_down():
|
|
|
|
logging.info("Client is shutting down")
|
|
|
|
|
|
|
|
d.addCallback(lambda _: print_shutting_down())
|
|
|
|
d.addCallback(lambda _: arg)
|
|
|
|
return d
|
|
|
|
|
|
|
|
d = self.wait_for_hash_from_queue(sd_hash_queue)
|
|
|
|
d.addCallback(start_transfer)
|
|
|
|
d.addBoth(stop)
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
2016-10-06 05:17:45 +02:00
|
|
|
@unittest.skip("Sadly skipping failing test instead of fixing it")
|
2015-08-20 17:27:15 +02:00
|
|
|
def test_live_transfer(self):
|
|
|
|
|
|
|
|
sd_hash_queue = Queue()
|
|
|
|
kill_event = Event()
|
|
|
|
dead_event = Event()
|
|
|
|
server_args = (sd_hash_queue, kill_event, dead_event)
|
|
|
|
server = Process(target=start_live_server, args=server_args)
|
|
|
|
server.start()
|
|
|
|
self.server_processes.append(server)
|
|
|
|
|
|
|
|
wallet = FakeWallet()
|
|
|
|
peer_manager = PeerManager()
|
2016-01-22 21:50:18 +01:00
|
|
|
peer_finder = FakePeerFinder(5553, peer_manager, 1)
|
2015-08-20 17:27:15 +02:00
|
|
|
hash_announcer = FakeAnnouncer()
|
|
|
|
rate_limiter = DummyRateLimiter()
|
|
|
|
sd_identifier = StreamDescriptorIdentifier()
|
2016-10-06 04:58:34 +02:00
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
db_dir = "client"
|
|
|
|
os.mkdir(db_dir)
|
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
self.session = Session(
|
2016-12-21 20:55:43 +01:00
|
|
|
conf.settings.data_rate, db_dir=db_dir, lbryid="abcd",
|
2016-10-20 18:23:39 +02:00
|
|
|
peer_finder=peer_finder, hash_announcer=hash_announcer, blob_dir=None,
|
|
|
|
peer_port=5553, use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
|
|
|
|
blob_tracker_class=DummyBlobAvailabilityTracker, dht_node_class=Node
|
|
|
|
)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
self.stream_info_manager = TempLiveStreamMetadataManager(hash_announcer)
|
|
|
|
|
|
|
|
d = self.wait_for_hash_from_queue(sd_hash_queue)
|
|
|
|
|
2015-09-22 04:06:04 +02:00
|
|
|
def create_downloader(metadata, prm):
|
|
|
|
info_validator = metadata.validator
|
|
|
|
options = metadata.options
|
|
|
|
factories = metadata.factories
|
2016-10-20 18:23:39 +02:00
|
|
|
chosen_options = [
|
|
|
|
o.default_value for o in options.get_downloader_options(info_validator, prm)]
|
2015-09-22 04:06:04 +02:00
|
|
|
return factories[0].make_downloader(metadata, chosen_options, prm)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
def start_lbry_file(lbry_file):
|
|
|
|
lbry_file = lbry_file
|
|
|
|
return lbry_file.start()
|
|
|
|
|
|
|
|
def download_stream(sd_blob_hash):
|
2016-10-03 08:59:40 +02:00
|
|
|
prm = self.session.payment_rate_manager
|
2015-08-20 17:27:15 +02:00
|
|
|
d = download_sd_blob(self.session, sd_blob_hash, prm)
|
2015-09-22 04:06:04 +02:00
|
|
|
d.addCallback(sd_identifier.get_metadata_for_sd_blob)
|
2015-08-20 17:27:15 +02:00
|
|
|
d.addCallback(create_downloader, prm)
|
|
|
|
d.addCallback(start_lbry_file)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def do_download(sd_blob_hash):
|
|
|
|
logging.debug("Starting the download")
|
2016-10-13 19:36:22 +02:00
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
d = self.session.setup()
|
|
|
|
d.addCallback(lambda _: enable_live_stream())
|
|
|
|
d.addCallback(lambda _: download_stream(sd_blob_hash))
|
|
|
|
return d
|
|
|
|
|
|
|
|
def enable_live_stream():
|
2016-10-13 19:36:22 +02:00
|
|
|
add_live_stream_to_sd_identifier(sd_identifier, self.session.payment_rate_manager)
|
2015-08-27 21:41:17 +02:00
|
|
|
add_full_live_stream_downloader_to_sd_identifier(self.session, self.stream_info_manager,
|
|
|
|
sd_identifier,
|
2016-10-13 19:36:22 +02:00
|
|
|
self.session.payment_rate_manager)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
d.addCallback(do_download)
|
|
|
|
|
|
|
|
def check_md5_sum():
|
|
|
|
f = open('test_file')
|
|
|
|
hashsum = MD5.new()
|
|
|
|
hashsum.update(f.read())
|
|
|
|
self.assertEqual(hashsum.hexdigest(), "215b177db8eed86d028b37e5cbad55c7")
|
|
|
|
|
|
|
|
d.addCallback(lambda _: check_md5_sum())
|
|
|
|
|
|
|
|
def stop(arg):
|
|
|
|
if isinstance(arg, Failure):
|
|
|
|
logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
|
|
|
|
else:
|
|
|
|
logging.debug("Client is stopping normally.")
|
|
|
|
kill_event.set()
|
|
|
|
logging.debug("Set the kill event")
|
2016-01-22 21:50:18 +01:00
|
|
|
d = self.wait_for_event(dead_event, 15)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
def print_shutting_down():
|
|
|
|
logging.info("Client is shutting down")
|
|
|
|
|
|
|
|
d.addCallback(lambda _: print_shutting_down())
|
|
|
|
d.addCallback(lambda _: arg)
|
|
|
|
return d
|
|
|
|
|
|
|
|
d.addBoth(stop)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def test_last_blob_retrieval(self):
|
|
|
|
kill_event = Event()
|
|
|
|
dead_event_1 = Event()
|
|
|
|
blob_hash_queue_1 = Queue()
|
|
|
|
blob_hash_queue_2 = Queue()
|
|
|
|
fast_uploader = Process(target=start_blob_uploader,
|
|
|
|
args=(blob_hash_queue_1, kill_event, dead_event_1, False))
|
|
|
|
fast_uploader.start()
|
|
|
|
self.server_processes.append(fast_uploader)
|
|
|
|
dead_event_2 = Event()
|
|
|
|
slow_uploader = Process(target=start_blob_uploader,
|
|
|
|
args=(blob_hash_queue_2, kill_event, dead_event_2, True))
|
|
|
|
slow_uploader.start()
|
|
|
|
self.server_processes.append(slow_uploader)
|
|
|
|
|
|
|
|
logging.debug("Testing transfer")
|
|
|
|
|
|
|
|
wallet = FakeWallet()
|
|
|
|
peer_manager = PeerManager()
|
2016-01-22 21:50:18 +01:00
|
|
|
peer_finder = FakePeerFinder(5553, peer_manager, 2)
|
2015-08-20 17:27:15 +02:00
|
|
|
hash_announcer = FakeAnnouncer()
|
|
|
|
rate_limiter = DummyRateLimiter()
|
2016-10-06 04:58:34 +02:00
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
db_dir = "client"
|
|
|
|
blob_dir = os.path.join(db_dir, "blobfiles")
|
|
|
|
os.mkdir(db_dir)
|
|
|
|
os.mkdir(blob_dir)
|
|
|
|
|
2016-10-20 18:23:39 +02:00
|
|
|
self.session = Session(
|
2016-12-21 20:55:43 +01:00
|
|
|
conf.settings.data_rate, db_dir=db_dir, lbryid="abcd",
|
2016-10-20 18:23:39 +02:00
|
|
|
peer_finder=peer_finder, hash_announcer=hash_announcer,
|
|
|
|
blob_dir=blob_dir, peer_port=5553,
|
|
|
|
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
|
2016-10-27 17:18:56 +02:00
|
|
|
blob_tracker_class=DummyBlobAvailabilityTracker,
|
2016-12-21 20:55:43 +01:00
|
|
|
is_generous=conf.settings.is_generous_host)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
|
|
|
d1 = self.wait_for_hash_from_queue(blob_hash_queue_1)
|
|
|
|
d2 = self.wait_for_hash_from_queue(blob_hash_queue_2)
|
|
|
|
d = defer.DeferredList([d1, d2], fireOnOneErrback=True)
|
|
|
|
|
|
|
|
def get_blob_hash(results):
|
|
|
|
self.assertEqual(results[0][1], results[1][1])
|
|
|
|
return results[0][1]
|
|
|
|
|
|
|
|
d.addCallback(get_blob_hash)
|
|
|
|
|
|
|
|
def download_blob(blob_hash):
|
2016-10-03 08:59:40 +02:00
|
|
|
prm = self.session.payment_rate_manager
|
2016-10-20 18:23:39 +02:00
|
|
|
downloader = StandaloneBlobDownloader(
|
|
|
|
blob_hash, self.session.blob_manager, peer_finder, rate_limiter, prm, wallet)
|
2015-08-20 17:27:15 +02:00
|
|
|
d = downloader.download()
|
|
|
|
return d
|
|
|
|
|
|
|
|
def start_transfer(blob_hash):
|
|
|
|
|
|
|
|
logging.debug("Starting the transfer")
|
|
|
|
|
|
|
|
d = self.session.setup()
|
|
|
|
d.addCallback(lambda _: download_blob(blob_hash))
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
d.addCallback(start_transfer)
|
|
|
|
|
|
|
|
def stop(arg):
|
|
|
|
if isinstance(arg, Failure):
|
|
|
|
logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
|
|
|
|
else:
|
|
|
|
logging.debug("Client is stopping normally.")
|
|
|
|
kill_event.set()
|
|
|
|
logging.debug("Set the kill event")
|
2016-01-22 21:50:18 +01:00
|
|
|
d1 = self.wait_for_event(dead_event_1, 15)
|
|
|
|
d2 = self.wait_for_event(dead_event_2, 15)
|
2015-08-20 17:27:15 +02:00
|
|
|
dl = defer.DeferredList([d1, d2])
|
|
|
|
def print_shutting_down():
|
|
|
|
logging.info("Client is shutting down")
|
|
|
|
dl.addCallback(lambda _: print_shutting_down())
|
|
|
|
dl.addCallback(lambda _: arg)
|
|
|
|
return dl
|
|
|
|
d.addBoth(stop)
|
|
|
|
return d
|
|
|
|
|
2016-01-16 07:16:37 +01:00
|
|
|
def test_double_download(self):
|
|
|
|
sd_hash_queue = Queue()
|
|
|
|
kill_event = Event()
|
|
|
|
dead_event = Event()
|
2016-10-20 18:23:39 +02:00
|
|
|
lbry_uploader = LbryUploader(sd_hash_queue, kill_event, dead_event, 5209343)
|
|
|
|
uploader = Process(target=lbry_uploader.start)
|
2016-01-16 07:16:37 +01:00
|
|
|
uploader.start()
|
|
|
|
self.server_processes.append(uploader)
|
|
|
|
|
|
|
|
logging.debug("Testing double download")
|
|
|
|
|
|
|
|
wallet = FakeWallet()
|
|
|
|
peer_manager = PeerManager()
|
2016-01-22 21:50:18 +01:00
|
|
|
peer_finder = FakePeerFinder(5553, peer_manager, 1)
|
2016-01-16 07:16:37 +01:00
|
|
|
hash_announcer = FakeAnnouncer()
|
|
|
|
rate_limiter = DummyRateLimiter()
|
|
|
|
sd_identifier = StreamDescriptorIdentifier()
|
2016-10-06 04:58:34 +02:00
|
|
|
|
2016-01-16 07:16:37 +01:00
|
|
|
downloaders = []
|
|
|
|
|
|
|
|
db_dir = "client"
|
|
|
|
blob_dir = os.path.join(db_dir, "blobfiles")
|
|
|
|
os.mkdir(db_dir)
|
|
|
|
os.mkdir(blob_dir)
|
|
|
|
|
2016-12-21 20:55:43 +01:00
|
|
|
self.session = Session(conf.settings.data_rate, db_dir=db_dir, lbryid="abcd",
|
2016-10-26 09:16:33 +02:00
|
|
|
peer_finder=peer_finder, hash_announcer=hash_announcer,
|
|
|
|
blob_dir=blob_dir, peer_port=5553, use_upnp=False,
|
|
|
|
rate_limiter=rate_limiter, wallet=wallet,
|
2016-12-21 20:55:43 +01:00
|
|
|
blob_tracker_class=DummyBlobAvailabilityTracker, is_generous=conf.settings.is_generous_host)
|
2016-01-16 07:16:37 +01:00
|
|
|
|
2016-09-27 21:08:32 +02:00
|
|
|
self.stream_info_manager = DBEncryptedFileMetadataManager(self.session.db_dir)
|
|
|
|
self.lbry_file_manager = EncryptedFileManager(self.session, self.stream_info_manager, sd_identifier)
|
2016-01-16 07:16:37 +01:00
|
|
|
|
2016-12-30 17:47:34 +01:00
|
|
|
@defer.inlineCallbacks
|
2016-01-16 07:16:37 +01:00
|
|
|
def make_downloader(metadata, prm):
|
|
|
|
info_validator = metadata.validator
|
|
|
|
options = metadata.options
|
|
|
|
factories = metadata.factories
|
2016-12-30 17:47:34 +01:00
|
|
|
chosen_options = [
|
|
|
|
o.default_value for o in options.get_downloader_options(info_validator, prm)
|
|
|
|
]
|
|
|
|
downloader = yield factories[0].make_downloader(metadata, chosen_options, prm)
|
|
|
|
defer.returnValue(downloader)
|
2016-01-16 07:16:37 +01:00
|
|
|
|
|
|
|
def append_downloader(downloader):
|
|
|
|
downloaders.append(downloader)
|
|
|
|
return downloader
|
|
|
|
|
2016-12-30 17:47:34 +01:00
|
|
|
@defer.inlineCallbacks
|
2016-01-16 07:16:37 +01:00
|
|
|
def download_file(sd_hash):
|
2016-10-03 08:59:40 +02:00
|
|
|
prm = self.session.payment_rate_manager
|
2016-12-30 17:47:34 +01:00
|
|
|
sd_blob = yield download_sd_blob(self.session, sd_hash, prm)
|
|
|
|
metadata = yield sd_identifier.get_metadata_for_sd_blob(sd_blob)
|
|
|
|
downloader = yield make_downloader(metadata, prm)
|
|
|
|
downloaders.append(downloader)
|
|
|
|
finished_value = yield downloader.start()
|
|
|
|
defer.returnValue(finished_value)
|
2016-01-16 07:16:37 +01:00
|
|
|
|
|
|
|
def check_md5_sum():
|
|
|
|
f = open('test_file')
|
|
|
|
hashsum = MD5.new()
|
|
|
|
hashsum.update(f.read())
|
|
|
|
self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be")
|
|
|
|
|
|
|
|
def delete_lbry_file():
|
2017-01-04 23:10:36 +01:00
|
|
|
logging.debug("deleting the file")
|
2016-01-16 07:16:37 +01:00
|
|
|
d = self.lbry_file_manager.delete_lbry_file(downloaders[0])
|
|
|
|
d.addCallback(lambda _: self.lbry_file_manager.get_count_for_stream_hash(downloaders[0].stream_hash))
|
2016-10-26 09:16:33 +02:00
|
|
|
d.addCallback(
|
|
|
|
lambda c: self.stream_info_manager.delete_stream(downloaders[1].stream_hash) if c == 0 else True)
|
2016-01-16 07:16:37 +01:00
|
|
|
return d
|
|
|
|
|
|
|
|
def check_lbry_file():
|
|
|
|
d = downloaders[1].status()
|
|
|
|
d.addCallback(lambda _: downloaders[1].status())
|
|
|
|
|
|
|
|
def check_status_report(status_report):
|
|
|
|
self.assertEqual(status_report.num_known, status_report.num_completed)
|
|
|
|
self.assertEqual(status_report.num_known, 3)
|
|
|
|
|
|
|
|
d.addCallback(check_status_report)
|
|
|
|
return d
|
|
|
|
|
2016-12-30 17:47:34 +01:00
|
|
|
@defer.inlineCallbacks
|
2016-01-16 07:16:37 +01:00
|
|
|
def start_transfer(sd_hash):
|
|
|
|
logging.debug("Starting the transfer")
|
2016-12-30 17:47:34 +01:00
|
|
|
yield self.session.setup()
|
|
|
|
yield self.stream_info_manager.setup()
|
|
|
|
yield add_lbry_file_to_sd_identifier(sd_identifier)
|
|
|
|
yield self.lbry_file_manager.setup()
|
|
|
|
yield download_file(sd_hash)
|
|
|
|
yield check_md5_sum()
|
|
|
|
yield download_file(sd_hash)
|
|
|
|
yield delete_lbry_file()
|
|
|
|
yield check_lbry_file()
|
2016-01-16 07:16:37 +01:00
|
|
|
|
|
|
|
def stop(arg):
|
|
|
|
if isinstance(arg, Failure):
|
|
|
|
logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
|
|
|
|
else:
|
|
|
|
logging.debug("Client is stopping normally.")
|
|
|
|
kill_event.set()
|
|
|
|
logging.debug("Set the kill event")
|
2016-01-22 21:50:18 +01:00
|
|
|
d = self.wait_for_event(dead_event, 15)
|
2016-01-16 07:16:37 +01:00
|
|
|
|
|
|
|
def print_shutting_down():
|
|
|
|
logging.info("Client is shutting down")
|
|
|
|
|
|
|
|
d.addCallback(lambda _: print_shutting_down())
|
|
|
|
d.addCallback(lambda _: arg)
|
|
|
|
return d
|
|
|
|
|
|
|
|
d = self.wait_for_hash_from_queue(sd_hash_queue)
|
|
|
|
d.addCallback(start_transfer)
|
|
|
|
d.addBoth(stop)
|
|
|
|
return d
|
|
|
|
|
2016-09-10 08:39:35 +02:00
|
|
|
@unittest.skip("Sadly skipping failing test instead of fixing it")
|
2016-01-22 21:50:18 +01:00
|
|
|
def test_multiple_uploaders(self):
|
|
|
|
sd_hash_queue = Queue()
|
|
|
|
num_uploaders = 3
|
|
|
|
kill_event = Event()
|
|
|
|
dead_events = [Event() for _ in range(num_uploaders)]
|
|
|
|
ready_events = [Event() for _ in range(1, num_uploaders)]
|
2016-10-20 18:23:39 +02:00
|
|
|
lbry_uploader = LbryUploader(
|
|
|
|
sd_hash_queue, kill_event, dead_events[0], 5209343, 9373419, 2**22)
|
|
|
|
uploader = Process(target=lbry_uploader.start)
|
2016-01-22 21:50:18 +01:00
|
|
|
uploader.start()
|
|
|
|
self.server_processes.append(uploader)
|
|
|
|
|
|
|
|
logging.debug("Testing multiple uploaders")
|
|
|
|
|
|
|
|
wallet = FakeWallet()
|
|
|
|
peer_manager = PeerManager()
|
|
|
|
peer_finder = FakePeerFinder(5553, peer_manager, num_uploaders)
|
|
|
|
hash_announcer = FakeAnnouncer()
|
|
|
|
rate_limiter = DummyRateLimiter()
|
|
|
|
sd_identifier = StreamDescriptorIdentifier()
|
2016-10-06 04:58:34 +02:00
|
|
|
|
2016-01-22 21:50:18 +01:00
|
|
|
db_dir = "client"
|
|
|
|
blob_dir = os.path.join(db_dir, "blobfiles")
|
|
|
|
os.mkdir(db_dir)
|
|
|
|
os.mkdir(blob_dir)
|
|
|
|
|
2016-12-21 20:55:43 +01:00
|
|
|
self.session = Session(conf.settings.data_rate, db_dir=db_dir, lbryid="abcd",
|
2016-10-26 09:16:33 +02:00
|
|
|
peer_finder=peer_finder, hash_announcer=hash_announcer,
|
|
|
|
blob_dir=None, peer_port=5553,
|
|
|
|
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
|
2016-10-27 17:18:56 +02:00
|
|
|
blob_tracker_class=DummyBlobAvailabilityTracker,
|
2016-12-21 20:55:43 +01:00
|
|
|
is_generous=conf.settings.is_generous_host)
|
2016-01-22 21:50:18 +01:00
|
|
|
|
2016-09-27 21:08:32 +02:00
|
|
|
self.stream_info_manager = TempEncryptedFileMetadataManager()
|
2016-01-22 21:50:18 +01:00
|
|
|
|
2016-10-27 17:18:56 +02:00
|
|
|
self.lbry_file_manager = EncryptedFileManager(
|
|
|
|
self.session, self.stream_info_manager, sd_identifier)
|
2016-01-22 21:50:18 +01:00
|
|
|
|
|
|
|
def start_additional_uploaders(sd_hash):
|
|
|
|
for i in range(1, num_uploaders):
|
|
|
|
uploader = Process(target=start_lbry_reuploader,
|
2016-10-26 09:16:33 +02:00
|
|
|
args=(sd_hash, kill_event, dead_events[i], ready_events[i - 1], i, 2 ** 10))
|
2016-01-22 21:50:18 +01:00
|
|
|
uploader.start()
|
|
|
|
self.server_processes.append(uploader)
|
|
|
|
return defer.succeed(True)
|
|
|
|
|
|
|
|
def wait_for_ready_events():
|
|
|
|
return defer.DeferredList([self.wait_for_event(ready_event, 60) for ready_event in ready_events])
|
|
|
|
|
|
|
|
def make_downloader(metadata, prm):
|
|
|
|
info_validator = metadata.validator
|
|
|
|
options = metadata.options
|
|
|
|
factories = metadata.factories
|
|
|
|
chosen_options = [o.default_value for o in options.get_downloader_options(info_validator, prm)]
|
|
|
|
return factories[0].make_downloader(metadata, chosen_options, prm)
|
|
|
|
|
|
|
|
def download_file(sd_hash):
|
2016-10-03 08:59:40 +02:00
|
|
|
prm = self.session.payment_rate_manager
|
2016-01-22 21:50:18 +01:00
|
|
|
d = download_sd_blob(self.session, sd_hash, prm)
|
|
|
|
d.addCallback(sd_identifier.get_metadata_for_sd_blob)
|
|
|
|
d.addCallback(make_downloader, prm)
|
|
|
|
d.addCallback(lambda downloader: downloader.start())
|
|
|
|
return d
|
|
|
|
|
|
|
|
def check_md5_sum():
|
|
|
|
f = open('test_file')
|
|
|
|
hashsum = MD5.new()
|
|
|
|
hashsum.update(f.read())
|
|
|
|
self.assertEqual(hashsum.hexdigest(), "e5941d615f53312fd66638239c1f90d5")
|
|
|
|
|
|
|
|
def start_transfer(sd_hash):
|
|
|
|
|
|
|
|
logging.debug("Starting the transfer")
|
|
|
|
|
|
|
|
d = start_additional_uploaders(sd_hash)
|
|
|
|
d.addCallback(lambda _: wait_for_ready_events())
|
|
|
|
d.addCallback(lambda _: self.session.setup())
|
|
|
|
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
|
|
|
|
d.addCallback(lambda _: self.lbry_file_manager.setup())
|
|
|
|
d.addCallback(lambda _: download_file(sd_hash))
|
|
|
|
d.addCallback(lambda _: check_md5_sum())
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
def stop(arg):
|
|
|
|
if isinstance(arg, Failure):
|
|
|
|
logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
|
|
|
|
else:
|
|
|
|
logging.debug("Client is stopping normally.")
|
|
|
|
kill_event.set()
|
|
|
|
logging.debug("Set the kill event")
|
|
|
|
d = defer.DeferredList([self.wait_for_event(dead_event, 15) for dead_event in dead_events])
|
|
|
|
|
|
|
|
def print_shutting_down():
|
|
|
|
logging.info("Client is shutting down")
|
|
|
|
|
|
|
|
d.addCallback(lambda _: print_shutting_down())
|
|
|
|
d.addCallback(lambda _: arg)
|
|
|
|
return d
|
|
|
|
|
|
|
|
d = self.wait_for_hash_from_queue(sd_hash_queue)
|
|
|
|
d.addCallback(start_transfer)
|
|
|
|
d.addBoth(stop)
|
|
|
|
|
|
|
|
return d
|