Merge branch 'master' into greedy_search

This commit is contained in:
Jack Robison 2018-08-03 13:08:31 -04:00 committed by GitHub
commit f1b19f10cc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
56 changed files with 4156 additions and 2514 deletions

View file

@ -4,7 +4,7 @@ Thanks for reporting an issue to LBRY and helping us improve!
To make it possible for us to help you, please fill out below information carefully.
Before reporting any issues, please make sure that you're using the latest version.
- App: https://github.com/lbryio/lbry-app/releases
- App: https://github.com/lbryio/lbry-desktop/releases
- Daemon: https://github.com/lbryio/lbry/releases
We are also available on Discord at https://chat.lbry.io

View file

@ -7,10 +7,6 @@ branches:
except:
- gh-pages
notifications:
slack:
secure: "Am13HPtpgCMljh0MDVuoFHvQXB8yhf4Kvf/qAeSp5N0vsHGL70CSF9Ahccw8dVPE6mbuak1OGtSUb6/UaErLHkpz3ztaRLkDa9x7CmBB3Kynnh8oO2VbB7b/2ROULqkhF4VZmAnNfwrQrbC3gs8Sybp261Nyc7y4ww15xDYBrk2fyq4ds2DCaJdRxfJUJFonrZ6KXr3fVaXosO6cjuyS8eRodcmrqsT4cCtinjNTD1hGWoH107E4ObSmpVelxQO193KhNJMRiLlEcVkvYUOqIWBtwdGHbNE/6Yeuq1TXgKJ0KeJWAmW3wTfUYNngGXNAsyCnrhul5TKNevNzfIAQZHvRsczYiWPJV6LtohHT0CcUiCXJtvEPOyahEBfwK3etY/xxFqny7N9OEmpdW2sgsEPNPX2LJynJti2rQA9SuAD1ogR3ZpDy/NXoaAZf8PTdPcuNUMULV9PGG7tVrLBecO/W1qO6hdFxwlLdgqGLxAENZgGp++v/DhPk/WvtmHj7iTbRq0nxaTWyX5uKOn2ADH+k/yfutjv6BsQU9xNyPeZEEtuEpc6X6waiYn/8G9vl9PecvKC5H0MgsZ6asAxmg7mZ3VSMFG7mo8ENeOhSZ0Oz6ZTBILL3wFccZA9uJIq7NWmqC9dRiGiuKXBB62No7sINoHg3114e2xYa9qvNmGg="
cache:
directories:
- $HOME/.cache/pip

View file

@ -13,24 +13,54 @@ at anytime.
*
### Fixed
* daemon cli spelling fixes
*
* loggly error reporting not following `share_usage_data`
* improper error handling when data is not valid JSON
* edge cases of http mirrored download of blobs
### Deprecated
*
* automatic claim renew, this is no longer needed
*
### Changed
*
*
* api server class to use components, and for all JSONRPC API commands to be callable so long as the required components are available.
* return error messages when required conditions on components are not met for API calls
* `status` to no longer return a base58 encoded `lbry_id`, instead return this as the hex encoded `node_id` in a new `dht` field.
* `startup_status` field in the response to `status` to be a dict of component names to status booleans
* renamed the `blockchain_status` field in the response to `status` to `wallet`
* moved and renamed `wallet_is_encrypted` to `is_encrypted` in the `wallet` field in the response to `status`
* moved wallet, upnp and dht startup code from `Session` to `Components`
* attempt blob downloads from http mirror sources (by default) concurrently to p2p sources
* replace miniupnpc with [txupnp](https://github.com/lbryio/txupnp). Since txupnp is still under development, it will internally fall back to miniupnpc.
* simplified test_misc.py in the functional tests
* update `cryptography` requirement to 2.3
### Added
* greedy search with exclude filtering on peer finder calls to iterative find value
*
* `skipped_components` list to the response from `status`
* component statuses (`blockchain_headers`, `dht`, `wallet`, `blob_manager` `hash_announcer`, and `file_manager`) to the response to `status`
* `skipped_components` config setting, accepts a list of names of components to not run
* `ComponentManager` for managing the life-cycles of dependencies
* `requires` decorator to register the components required by a `jsonrpc_` command, to facilitate commands registering asynchronously
* unittests for `ComponentManager`
* script to generate docs/api.json file (https://github.com/lbryio/lbry.tech/issues/42)
* additional information to the balance error message when editing a claim (https://github.com/lbryio/lbry/pull/1309)
* `address` and `port` arguments to `peer_ping` (https://github.com/lbryio/lbry/issues/1313)
* ability to download from HTTP mirrors by setting `download_mirrors`
* ability to filter peers from an iterative find value operation (finding peers for a blob). This is used to filter peers we've already found for a blob when accumulating the list of peers.
### Removed
*
*
* `session_status` argument and response field from `status`
* most of the internal attributes from `Daemon`
## [0.20.4] - 2018-07-18
### Fixed
* spelling errors in messages printed by `lbrynet-cli`
* high CPU usage when a stream is incomplete and the peers we're requesting from have no more blobs to send us (https://github.com/lbryio/lbry/pull/1301)
### Changed
* keep track of failures for DHT peers for up to ten minutes instead of indefinitely (https://github.com/lbryio/lbry/pull/1300)
* skip ignored peers from iterative lookups instead of blocking the peer who returned them to us too (https://github.com/lbryio/lbry/pull/1300)
* if a node becomes ignored during an iterative find cycle remove it from the shortlist so that we can't return it as a result nor try to probe it anyway (https://github.com/lbryio/lbry/pull/1303)
## [0.20.3] - 2018-07-03

1280
docs/api.json Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
import logging
__version__ = "0.20.3"
__version__ = "0.21.0rc3"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())

View file

@ -27,7 +27,7 @@ class HashBlobWriter(object):
def write(self, data):
if self.write_handle is None:
log.exception("writer has already been closed")
log.warning("writer has already been closed")
raise IOError('I/O operation on closed file')
self._hashsum.update(data)

View file

@ -168,9 +168,11 @@ def server_port(server_and_port):
def server_list(servers):
return [server_port(server) for server in servers]
def server_list_reverse(servers):
return ["%s:%s" % (server, port) for server, port in servers]
class Env(envparse.Env):
"""An Env parser that automatically namespaces the variables with LBRY"""
@ -266,6 +268,7 @@ ADJUSTABLE_SETTINGS = {
'dht_node_port': (int, 4444),
'download_directory': (str, default_download_dir),
'download_timeout': (int, 180),
'download_mirrors': (list, ['blobs.lbry.io']),
'is_generous_host': (bool, True),
'announce_head_blobs_only': (bool, True),
'concurrent_announcers': (int, DEFAULT_CONCURRENT_ANNOUNCERS),
@ -288,7 +291,7 @@ ADJUSTABLE_SETTINGS = {
'reflect_uploads': (bool, True),
'auto_re_reflect_interval': (int, 86400), # set to 0 to disable
'reflector_servers': (list, [('reflector2.lbry.io', 5566)], server_list, server_list_reverse),
'run_reflector_server': (bool, False),
'run_reflector_server': (bool, False), # adds `reflector` to components_to_skip unless True
'sd_download_timeout': (int, 3),
'share_usage_data': (bool, True), # whether to share usage stats and diagnostic info with LBRY
'peer_search_timeout': (int, 60),
@ -299,7 +302,8 @@ ADJUSTABLE_SETTINGS = {
'blockchain_name': (str, 'lbrycrd_main'),
'lbryum_servers': (list, [('lbryumx1.lbry.io', 50001), ('lbryumx2.lbry.io',
50001)], server_list, server_list_reverse),
's3_headers_depth': (int, 96 * 10) # download headers from s3 when the local height is more than 10 chunks behind
's3_headers_depth': (int, 96 * 10), # download headers from s3 when the local height is more than 10 chunks behind
'components_to_skip': (list, []) # components which will be skipped during start-up of daemon
}

View file

@ -1,8 +1,7 @@
import logging
import os
from sqlite3 import IntegrityError
from twisted.internet import threads, defer, task
from lbrynet import conf
from twisted.internet import threads, defer
from lbrynet.blob.blob_file import BlobFile
from lbrynet.blob.creator import BlobFileCreator
@ -26,22 +25,14 @@ class DiskBlobManager(object):
self.blobs = {}
self.blob_hashes_to_delete = {} # {blob_hash: being_deleted (True/False)}
self.check_should_announce_lc = None
if conf.settings['run_reflector_server']: # TODO: move this looping call to SQLiteStorage
self.check_should_announce_lc = task.LoopingCall(self.storage.verify_will_announce_all_head_and_sd_blobs)
@defer.inlineCallbacks
def setup(self):
if self.check_should_announce_lc and not self.check_should_announce_lc.running:
self.check_should_announce_lc.start(600)
if self._node_datastore is not None:
raw_blob_hashes = yield self.storage.get_all_finished_blobs()
self._node_datastore.completed_blobs.update(raw_blob_hashes)
defer.returnValue(True)
def stop(self):
if self.check_should_announce_lc and self.check_should_announce_lc.running:
self.check_should_announce_lc.stop()
return defer.succeed(True)
def get_blob(self, blob_hash, length=None):

View file

@ -155,13 +155,23 @@ class InvalidAuthenticationToken(Exception):
class NegotiationError(Exception):
pass
class InvalidCurrencyError(Exception):
def __init__(self, currency):
self.currency = currency
Exception.__init__(
self, 'Invalid currency: {} is not a supported currency.'.format(currency))
class NoSuchDirectoryError(Exception):
def __init__(self, directory):
self.directory = directory
Exception.__init__(self, 'No such directory {}'.format(directory))
class ComponentStartConditionNotMet(Exception):
pass
class ComponentsNotStarted(Exception):
pass

View file

@ -0,0 +1,100 @@
from random import choice
import logging
from twisted.internet import defer
import treq
from lbrynet.core.Error import DownloadCanceledError
log = logging.getLogger(__name__)
class HTTPBlobDownloader(object):
'''
A downloader that is able to get blobs from HTTP mirrors.
Note that when a blob gets downloaded from a mirror or from a peer, BlobManager will mark it as completed
and cause any other type of downloader to progress to the next missing blob. Also, BlobFile is naturally able
to cancel other writers when a writer finishes first. That's why there is no call to cancel/resume/stop between
different types of downloaders.
'''
def __init__(self, blob_manager, blob_hashes=None, servers=None, client=None):
self.blob_manager = blob_manager
self.servers = servers or []
self.client = client or treq
self.blob_hashes = blob_hashes or []
self.max_failures = 3
self.running = False
self.semaphore = defer.DeferredSemaphore(2)
self.deferreds = []
self.writers = []
def start(self):
if not self.running and self.blob_hashes and self.servers:
return self._start()
defer.succeed(None)
def stop(self):
if self.running:
for d in reversed(self.deferreds):
d.cancel()
for writer in self.writers:
writer.close(DownloadCanceledError())
self.running = False
self.blob_hashes = []
@defer.inlineCallbacks
def _start(self):
self.running = True
dl = []
for blob_hash in self.blob_hashes:
blob = yield self.blob_manager.get_blob(blob_hash)
if not blob.verified:
d = self.semaphore.run(self.download_blob, blob)
d.addErrback(lambda err: err.check(defer.TimeoutError, defer.CancelledError))
dl.append(d)
self.deferreds = dl
yield defer.DeferredList(dl)
@defer.inlineCallbacks
def download_blob(self, blob):
for _ in range(self.max_failures):
writer, finished_deferred = blob.open_for_writing('mirror')
self.writers.append(writer)
try:
downloaded = yield self._write_blob(writer, blob)
if downloaded:
yield finished_deferred # yield for verification errors, so we log them
if blob.verified:
log.info('Mirror completed download for %s', blob.blob_hash)
break
except (IOError, Exception) as e:
if isinstance(e, DownloadCanceledError) or 'closed file' in str(e):
# some other downloader finished first or it was simply cancelled
log.info("Mirror download cancelled: %s", blob.blob_hash)
break
else:
log.exception('Mirror failed downloading')
finally:
finished_deferred.addBoth(lambda _: None) # suppress echoed errors
if 'mirror' in blob.writers:
writer.close()
self.writers.remove(writer)
@defer.inlineCallbacks
def _write_blob(self, writer, blob):
response = yield self.client.get(url_for(choice(self.servers), blob.blob_hash))
if response.code != 200:
log.debug('Missing a blob: %s', blob.blob_hash)
if blob.blob_hash in self.blob_hashes:
self.blob_hashes.remove(blob.blob_hash)
defer.returnValue(False)
log.debug('Download started: %s', blob.blob_hash)
blob.set_length(response.length)
yield self.client.collect(response, writer.write)
defer.returnValue(True)
def url_for(server, blob_hash=''):
return 'http://{}/{}'.format(server, blob_hash)

View file

@ -1,282 +0,0 @@
import logging
import miniupnpc
from twisted.internet import threads, defer
from lbrynet.core.BlobManager import DiskBlobManager
from lbrynet.dht import node, hashannouncer
from lbrynet.database.storage import SQLiteStorage
from lbrynet.core.RateLimiter import RateLimiter
from lbrynet.core.utils import generate_id
from lbrynet.core.PaymentRateManager import BasePaymentRateManager, OnlyFreePaymentsManager
log = logging.getLogger(__name__)
class Session(object):
"""This class manages all important services common to any application that uses the network.
the hash announcer, which informs other peers that this peer is
associated with some hash. Usually, this means this peer has a
blob identified by the hash in question, but it can be used for
other purposes.
the peer finder, which finds peers that are associated with some
hash.
the blob manager, which keeps track of which blobs have been
downloaded and provides access to them,
the rate limiter, which attempts to ensure download and upload
rates stay below a set maximum
upnp, which opens holes in compatible firewalls so that remote
peers can connect to this peer.
"""
def __init__(self, blob_data_payment_rate, db_dir=None, node_id=None, peer_manager=None, dht_node_port=None,
known_dht_nodes=None, peer_finder=None, hash_announcer=None, blob_dir=None, blob_manager=None,
peer_port=None, use_upnp=True, rate_limiter=None, wallet=None, dht_node_class=node.Node,
blob_tracker_class=None, payment_rate_manager_class=None, is_generous=True, external_ip=None,
storage=None):
"""@param blob_data_payment_rate: The default payment rate for blob data
@param db_dir: The directory in which levelDB files should be stored
@param node_id: The unique ID of this node
@param peer_manager: An object which keeps track of all known
peers. If None, a PeerManager will be created
@param dht_node_port: The port on which the dht node should
listen for incoming connections
@param known_dht_nodes: A list of nodes which the dht node
should use to bootstrap into the dht
@param peer_finder: An object which is used to look up peers
that are associated with some hash. If None, a
DHTPeerFinder will be used, which looks for peers in the
distributed hash table.
@param hash_announcer: An object which announces to other
peers that this peer is associated with some hash. If
None, and peer_port is not None, a DHTHashAnnouncer will
be used. If None and peer_port is None, a
DummyHashAnnouncer will be used, which will not actually
announce anything.
@param blob_dir: The directory in which blobs will be
stored. If None and blob_manager is None, blobs will be
stored in memory only.
@param blob_manager: An object which keeps track of downloaded
blobs and provides access to them. If None, and blob_dir
is not None, a DiskBlobManager will be used, with the
given blob_dir. If None and blob_dir is None, a
TempBlobManager will be used, which stores blobs in memory
only.
@param peer_port: The port on which other peers should connect
to this peer
@param use_upnp: Whether or not to try to open a hole in the
firewall so that outside peers can connect to this peer's
peer_port and dht_node_port
@param rate_limiter: An object which keeps track of the amount
of data transferred to and from this peer, and can limit
that rate if desired
@param wallet: An object which will be used to keep track of
expected payments and which will pay peers. If None, a
wallet which uses the Point Trader system will be used,
which is meant for testing only
"""
self.db_dir = db_dir
self.node_id = node_id
self.peer_manager = peer_manager
self.peer_finder = peer_finder
self.hash_announcer = hash_announcer
self.dht_node_port = dht_node_port
self.known_dht_nodes = known_dht_nodes
if self.known_dht_nodes is None:
self.known_dht_nodes = []
self.blob_dir = blob_dir
self.blob_manager = blob_manager
# self.blob_tracker = None
# self.blob_tracker_class = blob_tracker_class or BlobAvailabilityTracker
self.peer_port = peer_port
self.use_upnp = use_upnp
self.rate_limiter = rate_limiter
self.external_ip = external_ip
self.upnp_redirects = []
self.wallet = wallet
self.dht_node_class = dht_node_class
self.dht_node = None
self.base_payment_rate_manager = BasePaymentRateManager(blob_data_payment_rate)
self.payment_rate_manager = OnlyFreePaymentsManager()
# self.payment_rate_manager_class = payment_rate_manager_class or NegotiatedPaymentRateManager
# self.is_generous = is_generous
self.storage = storage or SQLiteStorage(self.db_dir)
def setup(self):
"""Create the blob directory and database if necessary, start all desired services"""
log.debug("Starting session.")
if self.node_id is None:
self.node_id = generate_id()
if self.use_upnp is True:
d = self._try_upnp()
else:
d = defer.succeed(True)
d.addCallback(lambda _: self.storage.setup())
d.addCallback(lambda _: self._setup_dht())
d.addCallback(lambda _: self._setup_other_components())
return d
def shut_down(self):
"""Stop all services"""
log.info('Stopping session.')
ds = []
if self.hash_announcer:
self.hash_announcer.stop()
# if self.blob_tracker is not None:
# ds.append(defer.maybeDeferred(self.blob_tracker.stop))
if self.dht_node is not None:
ds.append(defer.maybeDeferred(self.dht_node.stop))
if self.rate_limiter is not None:
ds.append(defer.maybeDeferred(self.rate_limiter.stop))
if self.wallet is not None:
ds.append(defer.maybeDeferred(self.wallet.stop))
if self.blob_manager is not None:
ds.append(defer.maybeDeferred(self.blob_manager.stop))
if self.use_upnp is True:
ds.append(defer.maybeDeferred(self._unset_upnp))
return defer.DeferredList(ds)
def _try_upnp(self):
log.debug("In _try_upnp")
def get_free_port(upnp, port, protocol):
# returns an existing mapping if it exists
mapping = upnp.getspecificportmapping(port, protocol)
if not mapping:
return port
if upnp.lanaddr == mapping[0]:
return mapping[1]
return get_free_port(upnp, port + 1, protocol)
def get_port_mapping(upnp, port, protocol, description):
# try to map to the requested port, if there is already a mapping use the next external
# port available
if protocol not in ['UDP', 'TCP']:
raise Exception("invalid protocol")
port = get_free_port(upnp, port, protocol)
if isinstance(port, tuple):
log.info("Found existing UPnP redirect %s:%i (%s) to %s:%i, using it",
self.external_ip, port, protocol, upnp.lanaddr, port)
return port
upnp.addportmapping(port, protocol, upnp.lanaddr, port,
description, '')
log.info("Set UPnP redirect %s:%i (%s) to %s:%i", self.external_ip, port,
protocol, upnp.lanaddr, port)
return port
def threaded_try_upnp():
if self.use_upnp is False:
log.debug("Not using upnp")
return False
u = miniupnpc.UPnP()
num_devices_found = u.discover()
if num_devices_found > 0:
u.selectigd()
external_ip = u.externalipaddress()
if external_ip != '0.0.0.0' and not self.external_ip:
# best not to rely on this external ip, the router can be behind layers of NATs
self.external_ip = external_ip
if self.peer_port:
self.peer_port = get_port_mapping(u, self.peer_port, 'TCP', 'LBRY peer port')
self.upnp_redirects.append((self.peer_port, 'TCP'))
if self.dht_node_port:
self.dht_node_port = get_port_mapping(u, self.dht_node_port, 'UDP', 'LBRY DHT port')
self.upnp_redirects.append((self.dht_node_port, 'UDP'))
return True
return False
def upnp_failed(err):
log.warning("UPnP failed. Reason: %s", err.getErrorMessage())
return False
d = threads.deferToThread(threaded_try_upnp)
d.addErrback(upnp_failed)
return d
def _setup_dht(self): # does not block startup, the dht will re-attempt if necessary
self.dht_node = self.dht_node_class(
node_id=self.node_id,
udpPort=self.dht_node_port,
externalIP=self.external_ip,
peerPort=self.peer_port,
peer_manager=self.peer_manager,
peer_finder=self.peer_finder,
)
if not self.hash_announcer:
self.hash_announcer = hashannouncer.DHTHashAnnouncer(self.dht_node, self.storage)
self.peer_manager = self.dht_node.peer_manager
self.peer_finder = self.dht_node.peer_finder
d = self.dht_node.start(self.known_dht_nodes)
d.addCallback(lambda _: log.info("Joined the dht"))
d.addCallback(lambda _: self.hash_announcer.start())
def _setup_other_components(self):
log.debug("Setting up the rest of the components")
if self.rate_limiter is None:
self.rate_limiter = RateLimiter()
if self.blob_manager is None:
if self.blob_dir is None:
raise Exception(
"TempBlobManager is no longer supported, specify BlobManager or db_dir")
else:
self.blob_manager = DiskBlobManager(self.blob_dir, self.storage, self.dht_node._dataStore)
# if self.blob_tracker is None:
# self.blob_tracker = self.blob_tracker_class(
# self.blob_manager, self.dht_node.peer_finder, self.dht_node
# )
# if self.payment_rate_manager is None:
# self.payment_rate_manager = self.payment_rate_manager_class(
# self.base_payment_rate_manager, self.blob_tracker, self.is_generous
# )
self.rate_limiter.start()
d = self.blob_manager.setup()
d.addCallback(lambda _: self.wallet.start())
# d.addCallback(lambda _: self.blob_tracker.start())
return d
def _unset_upnp(self):
log.info("Unsetting upnp for session")
def threaded_unset_upnp():
u = miniupnpc.UPnP()
num_devices_found = u.discover()
if num_devices_found > 0:
u.selectigd()
for port, protocol in self.upnp_redirects:
if u.getspecificportmapping(port, protocol) is None:
log.warning(
"UPnP redirect for %s %d was removed by something else.",
protocol, port)
else:
u.deleteportmapping(port, protocol)
log.info("Removed UPnP redirect for %s %d.", protocol, port)
self.upnp_redirects = []
d = threads.deferToThread(threaded_unset_upnp)
d.addErrback(lambda err: str(err))
return d

View file

@ -7,7 +7,7 @@ from twisted.internet import threads, defer
from lbrynet.core.cryptoutils import get_lbry_hash_obj
from lbrynet.core.client.StandaloneBlobDownloader import StandaloneBlobDownloader
from lbrynet.core.Error import UnknownStreamTypeError, InvalidStreamDescriptorError
from lbrynet.core.HTTPBlobDownloader import HTTPBlobDownloader
log = logging.getLogger(__name__)
@ -425,7 +425,8 @@ class EncryptedFileStreamDescriptorValidator(object):
@defer.inlineCallbacks
def download_sd_blob(session, blob_hash, payment_rate_manager, timeout=None):
def download_sd_blob(blob_hash, blob_manager, peer_finder, rate_limiter, payment_rate_manager, wallet, timeout=None,
download_mirrors=None):
"""
Downloads a single blob from the network
@ -439,21 +440,24 @@ def download_sd_blob(session, blob_hash, payment_rate_manager, timeout=None):
"""
downloader = StandaloneBlobDownloader(blob_hash,
session.blob_manager,
session.peer_finder,
session.rate_limiter,
blob_manager,
peer_finder,
rate_limiter,
payment_rate_manager,
session.wallet,
wallet,
timeout)
mirror = HTTPBlobDownloader(blob_manager, [blob_hash], download_mirrors or [])
mirror.start()
sd_blob = yield downloader.download()
mirror.stop()
sd_reader = BlobStreamDescriptorReader(sd_blob)
sd_info = yield sd_reader.get_info()
try:
validate_descriptor(sd_info)
except InvalidStreamDescriptorError as err:
yield session.blob_manager.delete_blobs([blob_hash])
yield blob_manager.delete_blobs([blob_hash])
raise err
raw_sd = yield sd_reader._get_raw_data()
yield session.blob_manager.storage.add_known_blob(blob_hash, len(raw_sd))
yield save_sd_info(session.blob_manager, sd_blob.blob_hash, sd_info)
yield blob_manager.storage.add_known_blob(blob_hash, len(raw_sd))
yield save_sd_info(blob_manager, sd_blob.blob_hash, sd_info)
defer.returnValue(sd_blob)

View file

@ -1,30 +1,25 @@
import os
from collections import defaultdict, deque
import datetime
import logging
from decimal import Decimal
import treq
from zope.interface import implements
from twisted.internet import threads, reactor, defer, task
from twisted.python.failure import Failure
from twisted.internet.error import ConnectionAborted
from hashlib import sha256
from lbryum import wallet as lbryum_wallet
from lbryum.network import Network
from lbryum.simple_config import SimpleConfig
from lbryum.constants import COIN
from lbryum.commands import Commands
from lbryum.errors import InvalidPassword
from lbryum.constants import HEADERS_URL, HEADER_SIZE
from lbryschema.uri import parse_lbry_uri
from lbryschema.claim import ClaimDict
from lbryschema.error import DecodeError
from lbryschema.decode import smart_decode
from lbrynet.txlbryum.factory import StratumClient
from lbrynet.interfaces import IRequestCreator, IQueryHandlerFactory, IQueryHandler, IWallet
from lbrynet.core.utils import DeferredDict
from lbrynet.core.client.ClientRequest import ClientRequest
@ -92,107 +87,8 @@ class Wallet(object):
self._batch_count = 20
self._pending_claim_checker = task.LoopingCall(self.fetch_and_save_heights_for_pending_claims)
@defer.inlineCallbacks
def fetch_headers_from_s3(self):
local_header_size = self.local_header_file_size()
resume_header = {"Range": "bytes={}-".format(local_header_size)}
response = yield treq.get(HEADERS_URL, headers=resume_header)
got_406 = response.code == 406 # our file is bigger
final_size_after_download = response.length + local_header_size
if got_406:
log.warning("s3 is more out of date than we are")
# should have something to download and a final length divisible by the header size
elif final_size_after_download and not final_size_after_download % HEADER_SIZE:
s3_height = (final_size_after_download / HEADER_SIZE) - 1
local_height = self.local_header_file_height()
if s3_height > local_height:
if local_header_size:
log.info("Resuming download of %i bytes from s3", response.length)
with open(os.path.join(self.config.path, "blockchain_headers"), "a+b") as headers_file:
yield treq.collect(response, headers_file.write)
else:
with open(os.path.join(self.config.path, "blockchain_headers"), "wb") as headers_file:
yield treq.collect(response, headers_file.write)
log.info("fetched headers from s3 (s3 height: %i), now verifying integrity after download.", s3_height)
self._check_header_file_integrity()
else:
log.warning("s3 is more out of date than we are")
else:
log.error("invalid size for headers from s3")
def local_header_file_height(self):
return max((self.local_header_file_size() / HEADER_SIZE) - 1, 0)
def local_header_file_size(self):
headers_path = os.path.join(self.config.path, "blockchain_headers")
if os.path.isfile(headers_path):
return os.stat(headers_path).st_size
return 0
@defer.inlineCallbacks
def get_remote_height(self, server, port):
connected = defer.Deferred()
connected.addTimeout(3, reactor, lambda *_: None)
client = StratumClient(connected)
reactor.connectTCP(server, port, client)
yield connected
remote_height = yield client.blockchain_block_get_server_height()
client.client.transport.loseConnection()
defer.returnValue(remote_height)
@defer.inlineCallbacks
def should_download_headers_from_s3(self):
from lbrynet import conf
if conf.settings['blockchain_name'] != "lbrycrd_main":
defer.returnValue(False)
self._check_header_file_integrity()
s3_headers_depth = conf.settings['s3_headers_depth']
if not s3_headers_depth:
defer.returnValue(False)
local_height = self.local_header_file_height()
for server_url in self.config.get('default_servers'):
port = int(self.config.get('default_servers')[server_url]['t'])
try:
remote_height = yield self.get_remote_height(server_url, port)
log.info("%s:%i height: %i, local height: %s", server_url, port, remote_height, local_height)
if remote_height > (local_height + s3_headers_depth):
defer.returnValue(True)
except Exception as err:
log.warning("error requesting remote height from %s:%i - %s", server_url, port, err)
defer.returnValue(False)
def _check_header_file_integrity(self):
# TODO: temporary workaround for usability. move to txlbryum and check headers instead of file integrity
from lbrynet import conf
if conf.settings['blockchain_name'] != "lbrycrd_main":
return
hashsum = sha256()
checksum_height, checksum = conf.settings['HEADERS_FILE_SHA256_CHECKSUM']
checksum_length_in_bytes = checksum_height * HEADER_SIZE
if self.local_header_file_size() < checksum_length_in_bytes:
return
headers_path = os.path.join(self.config.path, "blockchain_headers")
with open(headers_path, "rb") as headers_file:
hashsum.update(headers_file.read(checksum_length_in_bytes))
current_checksum = hashsum.hexdigest()
if current_checksum != checksum:
msg = "Expected checksum {}, got {}".format(checksum, current_checksum)
log.warning("Wallet file corrupted, checksum mismatch. " + msg)
log.warning("Deleting header file so it can be downloaded again.")
os.unlink(headers_path)
elif (self.local_header_file_size() % HEADER_SIZE) != 0:
log.warning("Header file is good up to checkpoint height, but incomplete. Truncating to checkpoint.")
with open(headers_path, "rb+") as headers_file:
headers_file.truncate(checksum_length_in_bytes)
@defer.inlineCallbacks
def start(self):
should_download_headers = yield self.should_download_headers_from_s3()
if should_download_headers:
try:
yield self.fetch_headers_from_s3()
except Exception as err:
log.error("failed to fetch headers from s3: %s", err)
log.info("Starting wallet.")
yield self._start()
self.stopped = False
@ -938,9 +834,7 @@ class LBRYumWallet(Wallet):
self._lag_counter = 0
self.blocks_behind = 0
self.catchup_progress = 0
# fired when the wallet actually unlocks (wallet_unlocked_d can be called multiple times)
self.wallet_unlock_success = defer.Deferred()
self.is_wallet_unlocked = None
def _is_first_run(self):
return (not self.printed_retrieving_headers and
@ -953,21 +847,23 @@ class LBRYumWallet(Wallet):
return self._cmd_runner
def check_locked(self):
if not self.wallet.use_encryption:
log.info("Wallet is not encrypted")
self.wallet_unlock_success.callback(True)
elif not self._cmd_runner:
"""
Checks if the wallet is encrypted(locked) or not
:return: (boolean) indicating whether the wallet is locked or not
"""
if not self._cmd_runner:
raise Exception("Command runner hasn't been initialized yet")
elif self._cmd_runner.locked:
log.info("Waiting for wallet password")
self.wallet_unlocked_d.addCallback(self.unlock)
return self.wallet_unlock_success
return self.is_wallet_unlocked
def unlock(self, password):
if self._cmd_runner and self._cmd_runner.locked:
try:
self._cmd_runner.unlock_wallet(password)
self.wallet_unlock_success.callback(True)
self.is_wallet_unlocked = True
log.info("Unlocked the wallet!")
except InvalidPassword:
log.warning("Incorrect password, try again")
@ -1054,6 +950,7 @@ class LBRYumWallet(Wallet):
wallet.create_main_account()
wallet.synchronize()
self.wallet = wallet
self.is_wallet_unlocked = not self.wallet.use_encryption
self._check_large_wallet()
return defer.succeed(True)

View file

@ -5,3 +5,5 @@ This includes classes for connecting to other peers and downloading blobs from t
connections from peers and responding to their requests, managing locally stored blobs, sending
and receiving payments, and locating peers in the DHT.
"""
from lbrynet import custom_logger

View file

@ -354,6 +354,10 @@ class AvailabilityRequest(RequestHelper):
log.debug("Received a response to the availability request")
# save available blobs
blob_hashes = response_dict['available_blobs']
if not blob_hashes:
# should not send any more requests as it doesnt have any blob we need
self.update_local_score(-10.0)
return True
for blob_hash in blob_hashes:
if blob_hash in request.request_dict['requested_blobs']:
self.process_available_blob_hash(blob_hash, request)

View file

@ -1,8 +1,6 @@
import inspect
import json
import logging
import logging.handlers
import os
import sys
import traceback
@ -13,25 +11,6 @@ import twisted.python.log
from lbrynet import __version__ as lbrynet_version, build_type, conf
from lbrynet.core import utils
####
# This code is copied from logging/__init__.py in the python source code
####
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): # support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
#####
TRACE = 5
class HTTPSHandler(logging.Handler):
def __init__(self, url, fqdn=False, localname=None, facility=None, cookies=None):
@ -110,6 +89,7 @@ def disable_third_party_loggers():
logging.getLogger('BitcoinRPC').setLevel(logging.INFO)
logging.getLogger('lbryum').setLevel(logging.WARNING)
logging.getLogger('twisted').setLevel(logging.CRITICAL)
logging.getLogger('txupnp').setLevel(logging.WARNING)
@_log_decorator
@ -139,6 +119,8 @@ def get_loggly_url(token=None, version=None):
def configure_loggly_handler():
if build_type.BUILD == 'dev':
return
if not conf.settings['share_usage_data']:
return
level = logging.ERROR
handler = get_loggly_handler(level=level, installation_id=conf.settings.installation_id,
session_id=conf.settings.get_session_id())
@ -185,33 +167,6 @@ class JsonFormatter(logging.Formatter):
return json.dumps(data)
####
# This code is copied from logging/__init__.py in the python source code
####
def findCaller(srcfile=None):
"""Returns the filename, line number and function name of the caller"""
srcfile = srcfile or _srcfile
f = inspect.currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# ignore any function calls that are in this file
if filename == srcfile:
f = f.f_back
continue
rv = (filename, f.f_lineno, co.co_name)
break
return rv
###
def failure(failure, log, msg, *args):
"""Log a failure message from a deferred.
@ -316,65 +271,3 @@ def get_parent(logger_name):
return ''
names = names[:-1]
return '.'.join(names)
class Logger(logging.Logger):
"""A logger that has an extra `fail` method useful for handling twisted failures."""
def fail(self, callback=None, *args, **kwargs):
"""Returns a function to log a failure from an errback.
The returned function appends the error message and extracts
the traceback from `err`.
Example usage:
d.addErrback(log.fail(), 'This is an error message')
Although odd, making the method call is necessary to extract
out useful filename and line number information; otherwise the
reported values are from inside twisted's deferred handling
code.
Args:
callback: callable to call after making the log. The first argument
will be the `err` from the deferred
args: extra arguments to pass into `callback`
Returns: a function that takes the following arguments:
err: twisted.python.failure.Failure
msg: the message to log, using normal logging string iterpolation.
msg_args: the values to subtitute into `msg`
msg_kwargs: set `level` to change from the default ERROR severity. Other
keywoards are treated as normal log kwargs.
"""
fn, lno, func = findCaller()
def _fail(err, msg, *msg_args, **msg_kwargs):
level = msg_kwargs.pop('level', logging.ERROR)
msg += ": %s"
msg_args += (err.getErrorMessage(),)
exc_info = (err.type, err.value, err.getTracebackObject())
record = self.makeRecord(
self.name, level, fn, lno, msg, msg_args, exc_info, func, msg_kwargs)
self.handle(record)
if callback:
try:
return callback(err, *args, **kwargs)
except Exception:
# log.fail is almost always called within an
# errback. If callback fails and we didn't catch
# the exception we would need to attach a second
# errback to deal with that, which we will almost
# never do and then we end up with an unhandled
# error that will get swallowed by twisted
self.exception('Failed to run callback')
return _fail
def trace(self, msg, *args, **kwargs):
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
logging.setLoggerClass(Logger)
logging.addLevelName(TRACE, 'TRACE')

106
lbrynet/custom_logger.py Normal file
View file

@ -0,0 +1,106 @@
import os
import sys
import inspect
import logging
TRACE = 5
####
# This code is copied from logging/__init__.py in the python source code
####
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): # support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
def findCaller(srcfile=None):
"""Returns the filename, line number and function name of the caller"""
srcfile = srcfile or _srcfile
f = inspect.currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
# ignore any function calls that are in this file
if filename == srcfile:
f = f.f_back
continue
rv = (filename, f.f_lineno, co.co_name)
break
return rv
###
class Logger(logging.Logger):
"""A logger that has an extra `fail` method useful for handling twisted failures."""
def fail(self, callback=None, *args, **kwargs):
"""Returns a function to log a failure from an errback.
The returned function appends the error message and extracts
the traceback from `err`.
Example usage:
d.addErrback(log.fail(), 'This is an error message')
Although odd, making the method call is necessary to extract
out useful filename and line number information; otherwise the
reported values are from inside twisted's deferred handling
code.
Args:
callback: callable to call after making the log. The first argument
will be the `err` from the deferred
args: extra arguments to pass into `callback`
Returns: a function that takes the following arguments:
err: twisted.python.failure.Failure
msg: the message to log, using normal logging string iterpolation.
msg_args: the values to subtitute into `msg`
msg_kwargs: set `level` to change from the default ERROR severity. Other
keywoards are treated as normal log kwargs.
"""
fn, lno, func = findCaller()
def _fail(err, msg, *msg_args, **msg_kwargs):
level = msg_kwargs.pop('level', logging.ERROR)
msg += ": %s"
msg_args += (err.getErrorMessage(),)
exc_info = (err.type, err.value, err.getTracebackObject())
record = self.makeRecord(
self.name, level, fn, lno, msg, msg_args, exc_info, func, msg_kwargs)
self.handle(record)
if callback:
try:
return callback(err, *args, **kwargs)
except Exception:
# log.fail is almost always called within an
# errback. If callback fails and we didn't catch
# the exception we would need to attach a second
# errback to deal with that, which we will almost
# never do and then we end up with an unhandled
# error that will get swallowed by twisted
self.exception('Failed to run callback')
return _fail
def trace(self, msg, *args, **kwargs):
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
logging.setLoggerClass(Logger)
logging.addLevelName(TRACE, 'TRACE')

View file

@ -0,0 +1,75 @@
import logging
from twisted.internet import defer
from twisted._threads import AlreadyQuit
from ComponentManager import ComponentManager
log = logging.getLogger(__name__)
class ComponentType(type):
def __new__(mcs, name, bases, newattrs):
klass = type.__new__(mcs, name, bases, newattrs)
if name != "Component":
ComponentManager.default_component_classes[klass.component_name] = klass
return klass
class Component(object):
"""
lbrynet-daemon component helper
Inheriting classes will be automatically registered with the ComponentManager and must implement setup and stop
methods
"""
__metaclass__ = ComponentType
depends_on = []
component_name = None
def __init__(self, component_manager):
self.component_manager = component_manager
self._running = False
def __lt__(self, other):
return self.component_name < other.component_name
@property
def running(self):
return self._running
def get_status(self):
return
def start(self):
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
@property
def component(self):
raise NotImplementedError()
@defer.inlineCallbacks
def _setup(self):
try:
result = yield defer.maybeDeferred(self.start)
self._running = True
defer.returnValue(result)
except (defer.CancelledError, AlreadyQuit):
pass
except Exception as err:
log.exception("Error setting up %s", self.component_name or self.__class__.__name__)
raise err
@defer.inlineCallbacks
def _stop(self):
try:
result = yield defer.maybeDeferred(self.stop)
self._running = False
defer.returnValue(result)
except (defer.CancelledError, AlreadyQuit):
pass
except Exception as err:
log.exception("Error stopping %s", self.__class__.__name__)
raise err

View file

@ -0,0 +1,177 @@
import logging
from twisted.internet import defer
from lbrynet.core.Error import ComponentStartConditionNotMet
log = logging.getLogger(__name__)
class RegisteredConditions(object):
conditions = {}
class RequiredConditionType(type):
def __new__(mcs, name, bases, newattrs):
klass = type.__new__(mcs, name, bases, newattrs)
if name != "RequiredCondition":
if klass.name in RegisteredConditions.conditions:
raise SyntaxError("already have a component registered for \"%s\"" % klass.name)
RegisteredConditions.conditions[klass.name] = klass
return klass
class RequiredCondition(object):
name = ""
component = ""
message = ""
@staticmethod
def evaluate(component):
raise NotImplementedError()
__metaclass__ = RequiredConditionType
class ComponentManager(object):
default_component_classes = {}
def __init__(self, reactor=None, analytics_manager=None, skip_components=None, **override_components):
self.skip_components = skip_components or []
self.reactor = reactor
self.component_classes = {}
self.components = set()
self.analytics_manager = analytics_manager
for component_name, component_class in self.default_component_classes.iteritems():
if component_name in override_components:
component_class = override_components.pop(component_name)
if component_name not in self.skip_components:
self.component_classes[component_name] = component_class
if override_components:
raise SyntaxError("unexpected components: %s" % override_components)
for component_class in self.component_classes.itervalues():
self.components.add(component_class(self))
@defer.inlineCallbacks
def evaluate_condition(self, condition_name):
if condition_name not in RegisteredConditions.conditions:
raise NameError(condition_name)
condition = RegisteredConditions.conditions[condition_name]
try:
component = self.get_component(condition.component)
result = yield defer.maybeDeferred(condition.evaluate, component)
except Exception as err:
result = False
defer.returnValue((result, "" if result else condition.message))
def sort_components(self, reverse=False):
"""
Sort components by requirements
"""
steps = []
staged = set()
components = set(self.components)
# components with no requirements
step = []
for component in set(components):
if not component.depends_on:
step.append(component)
staged.add(component.component_name)
components.remove(component)
if step:
step.sort()
steps.append(step)
while components:
step = []
to_stage = set()
for component in set(components):
reqs_met = 0
for needed in component.depends_on:
if needed in staged:
reqs_met += 1
if reqs_met == len(component.depends_on):
step.append(component)
to_stage.add(component.component_name)
components.remove(component)
if step:
step.sort()
staged.update(to_stage)
steps.append(step)
elif components:
raise ComponentStartConditionNotMet("Unresolved dependencies for: %s" % components)
if reverse:
steps.reverse()
return steps
@defer.inlineCallbacks
def setup(self, **callbacks):
"""
Start Components in sequence sorted by requirements
:return: (defer.Deferred)
"""
for component_name, cb in callbacks.iteritems():
if component_name not in self.component_classes:
raise NameError("unknown component: %s" % component_name)
if not callable(cb):
raise ValueError("%s is not callable" % cb)
def _setup(component):
if component.component_name in callbacks:
d = component._setup()
d.addCallback(callbacks[component.component_name], component)
return d
return component._setup()
stages = self.sort_components()
for stage in stages:
yield defer.DeferredList([_setup(component) for component in stage])
@defer.inlineCallbacks
def stop(self):
"""
Stop Components in reversed startup order
:return: (defer.Deferred)
"""
stages = self.sort_components(reverse=True)
for stage in stages:
yield defer.DeferredList([component._stop() for component in stage if component.running])
def all_components_running(self, *component_names):
"""
Check if components are running
:return: (bool) True if all specified components are running
"""
components = {component.component_name: component for component in self.components}
for component in component_names:
if component not in components:
raise NameError("%s is not a known Component" % component)
if not components[component].running:
return False
return True
def get_components_status(self):
"""
List status of all the components, whether they are running or not
:return: (dict) {(str) component_name: (bool) True is running else False}
"""
return {
component.component_name: component.running
for component in self.components
}
def get_component(self, component_name):
for component in self.components:
if component.component_name == component_name:
return component.component
raise NameError(component_name)

View file

@ -0,0 +1,718 @@
import os
import logging
from hashlib import sha256
import treq
import math
import binascii
from twisted.internet import defer, threads, reactor, error
from txupnp.upnp import UPnP
from lbryum.simple_config import SimpleConfig
from lbryum.constants import HEADERS_URL, HEADER_SIZE
from lbrynet import conf
from lbrynet.core.utils import DeferredDict
from lbrynet.core.PaymentRateManager import OnlyFreePaymentsManager
from lbrynet.core.RateLimiter import RateLimiter
from lbrynet.core.BlobManager import DiskBlobManager
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier, EncryptedFileStreamType
from lbrynet.core.Wallet import LBRYumWallet
from lbrynet.core.server.BlobRequestHandler import BlobRequestHandlerFactory
from lbrynet.core.server.ServerProtocol import ServerProtocolFactory
from lbrynet.daemon.Component import Component
from lbrynet.daemon.ExchangeRateManager import ExchangeRateManager
from lbrynet.database.storage import SQLiteStorage
from lbrynet.dht import node, hashannouncer
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.lbry_file.client.EncryptedFileDownloader import EncryptedFileSaverFactory
from lbrynet.lbry_file.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
from lbrynet.reflector import ServerFactory as reflector_server_factory
from lbrynet.txlbryum.factory import StratumClient
from lbrynet.core.utils import generate_id
log = logging.getLogger(__name__)
# settings must be initialized before this file is imported
DATABASE_COMPONENT = "database"
BLOB_COMPONENT = "blob_manager"
HEADERS_COMPONENT = "blockchain_headers"
WALLET_COMPONENT = "wallet"
DHT_COMPONENT = "dht"
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
STREAM_IDENTIFIER_COMPONENT = "stream_identifier"
FILE_MANAGER_COMPONENT = "file_manager"
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
REFLECTOR_COMPONENT = "reflector"
UPNP_COMPONENT = "upnp"
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
RATE_LIMITER_COMPONENT = "rate_limiter"
PAYMENT_RATE_COMPONENT = "payment_rate_manager"
def get_wallet_config():
wallet_type = GCS('wallet')
if wallet_type == conf.LBRYCRD_WALLET:
raise ValueError('LBRYcrd Wallet is no longer supported')
elif wallet_type != conf.LBRYUM_WALLET:
raise ValueError('Wallet Type {} is not valid'.format(wallet_type))
lbryum_servers = {address: {'t': str(port)}
for address, port in GCS('lbryum_servers')}
config = {
'auto_connect': True,
'chain': GCS('blockchain_name'),
'default_servers': lbryum_servers
}
if 'use_keyring' in conf.settings:
config['use_keyring'] = GCS('use_keyring')
if conf.settings['lbryum_wallet_dir']:
config['lbryum_path'] = GCS('lbryum_wallet_dir')
return config
class ConfigSettings(object):
@staticmethod
def get_conf_setting(setting_name):
return conf.settings[setting_name]
@staticmethod
def get_blobfiles_dir():
if conf.settings['BLOBFILES_DIR'] == "blobfiles":
return os.path.join(GCS("data_dir"), "blobfiles")
else:
log.info("Using non-default blobfiles directory: %s", conf.settings['BLOBFILES_DIR'])
return conf.settings['BLOBFILES_DIR']
@staticmethod
def get_node_id():
return conf.settings.node_id
@staticmethod
def get_external_ip():
from lbrynet.core.system_info import get_platform
platform = get_platform(get_ip=True)
return platform['ip']
# Shorthand for common ConfigSettings methods
CS = ConfigSettings
GCS = ConfigSettings.get_conf_setting
class DatabaseComponent(Component):
component_name = DATABASE_COMPONENT
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.storage = None
@property
def component(self):
return self.storage
@staticmethod
def get_current_db_revision():
return 9
@staticmethod
def get_revision_filename():
return conf.settings.get_db_revision_filename()
@staticmethod
def _write_db_revision_file(version_num):
with open(conf.settings.get_db_revision_filename(), mode='w') as db_revision:
db_revision.write(str(version_num))
@defer.inlineCallbacks
def start(self):
# check directories exist, create them if they don't
log.info("Loading databases")
if not os.path.exists(GCS('download_directory')):
os.mkdir(GCS('download_directory'))
if not os.path.exists(GCS('data_dir')):
os.mkdir(GCS('data_dir'))
self._write_db_revision_file(self.get_current_db_revision())
log.debug("Created the db revision file: %s", self.get_revision_filename())
if not os.path.exists(CS.get_blobfiles_dir()):
os.mkdir(CS.get_blobfiles_dir())
log.debug("Created the blobfile directory: %s", str(CS.get_blobfiles_dir()))
if not os.path.exists(self.get_revision_filename()):
log.warning("db_revision file not found. Creating it")
self._write_db_revision_file(self.get_current_db_revision())
# check the db migration and run any needed migrations
with open(self.get_revision_filename(), "r") as revision_read_handle:
old_revision = int(revision_read_handle.read().strip())
if old_revision > self.get_current_db_revision():
raise Exception('This version of lbrynet is not compatible with the database\n'
'Your database is revision %i, expected %i' %
(old_revision, self.get_current_db_revision()))
if old_revision < self.get_current_db_revision():
from lbrynet.database.migrator import dbmigrator
log.info("Upgrading your databases (revision %i to %i)", old_revision, self.get_current_db_revision())
yield threads.deferToThread(
dbmigrator.migrate_db, GCS('data_dir'), old_revision, self.get_current_db_revision()
)
self._write_db_revision_file(self.get_current_db_revision())
log.info("Finished upgrading the databases.")
# start SQLiteStorage
self.storage = SQLiteStorage(GCS('data_dir'))
yield self.storage.setup()
@defer.inlineCallbacks
def stop(self):
yield self.storage.stop()
self.storage = None
class HeadersComponent(Component):
component_name = HEADERS_COMPONENT
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.config = SimpleConfig(get_wallet_config())
self._downloading_headers = None
self._headers_progress_percent = None
@property
def component(self):
return self
def get_status(self):
return {} if not self._downloading_headers else {
'downloading_headers': self._downloading_headers,
'download_progress': self._headers_progress_percent
}
@defer.inlineCallbacks
def fetch_headers_from_s3(self):
def collector(data, h_file):
h_file.write(data)
local_size = float(h_file.tell())
final_size = float(final_size_after_download)
self._headers_progress_percent = math.ceil(local_size / final_size * 100)
local_header_size = self.local_header_file_size()
resume_header = {"Range": "bytes={}-".format(local_header_size)}
response = yield treq.get(HEADERS_URL, headers=resume_header)
got_406 = response.code == 406 # our file is bigger
final_size_after_download = response.length + local_header_size
if got_406:
log.warning("s3 is more out of date than we are")
# should have something to download and a final length divisible by the header size
elif final_size_after_download and not final_size_after_download % HEADER_SIZE:
s3_height = (final_size_after_download / HEADER_SIZE) - 1
local_height = self.local_header_file_height()
if s3_height > local_height:
if local_header_size:
log.info("Resuming download of %i bytes from s3", response.length)
with open(os.path.join(self.config.path, "blockchain_headers"), "a+b") as headers_file:
yield treq.collect(response, lambda d: collector(d, headers_file))
else:
with open(os.path.join(self.config.path, "blockchain_headers"), "wb") as headers_file:
yield treq.collect(response, lambda d: collector(d, headers_file))
log.info("fetched headers from s3 (s3 height: %i), now verifying integrity after download.", s3_height)
self._check_header_file_integrity()
else:
log.warning("s3 is more out of date than we are")
else:
log.error("invalid size for headers from s3")
def local_header_file_height(self):
return max((self.local_header_file_size() / HEADER_SIZE) - 1, 0)
def local_header_file_size(self):
headers_path = os.path.join(self.config.path, "blockchain_headers")
if os.path.isfile(headers_path):
return os.stat(headers_path).st_size
return 0
@defer.inlineCallbacks
def get_remote_height(self, server, port):
connected = defer.Deferred()
connected.addTimeout(3, reactor, lambda *_: None)
client = StratumClient(connected)
reactor.connectTCP(server, port, client)
yield connected
remote_height = yield client.blockchain_block_get_server_height()
client.client.transport.loseConnection()
defer.returnValue(remote_height)
@defer.inlineCallbacks
def should_download_headers_from_s3(self):
if conf.settings['blockchain_name'] != "lbrycrd_main":
defer.returnValue(False)
self._check_header_file_integrity()
s3_headers_depth = conf.settings['s3_headers_depth']
if not s3_headers_depth:
defer.returnValue(False)
local_height = self.local_header_file_height()
for server_url in self.config.get('default_servers'):
port = int(self.config.get('default_servers')[server_url]['t'])
try:
remote_height = yield self.get_remote_height(server_url, port)
log.info("%s:%i height: %i, local height: %s", server_url, port, remote_height, local_height)
if remote_height > (local_height + s3_headers_depth):
defer.returnValue(True)
except Exception as err:
log.warning("error requesting remote height from %s:%i - %s", server_url, port, err)
defer.returnValue(False)
def _check_header_file_integrity(self):
# TODO: temporary workaround for usability. move to txlbryum and check headers instead of file integrity
if conf.settings['blockchain_name'] != "lbrycrd_main":
return
hashsum = sha256()
checksum_height, checksum = conf.settings['HEADERS_FILE_SHA256_CHECKSUM']
checksum_length_in_bytes = checksum_height * HEADER_SIZE
if self.local_header_file_size() < checksum_length_in_bytes:
return
headers_path = os.path.join(self.config.path, "blockchain_headers")
with open(headers_path, "rb") as headers_file:
hashsum.update(headers_file.read(checksum_length_in_bytes))
current_checksum = hashsum.hexdigest()
if current_checksum != checksum:
msg = "Expected checksum {}, got {}".format(checksum, current_checksum)
log.warning("Wallet file corrupted, checksum mismatch. " + msg)
log.warning("Deleting header file so it can be downloaded again.")
os.unlink(headers_path)
elif (self.local_header_file_size() % HEADER_SIZE) != 0:
log.warning("Header file is good up to checkpoint height, but incomplete. Truncating to checkpoint.")
with open(headers_path, "rb+") as headers_file:
headers_file.truncate(checksum_length_in_bytes)
@defer.inlineCallbacks
def start(self):
self._downloading_headers = yield self.should_download_headers_from_s3()
if self._downloading_headers:
try:
yield self.fetch_headers_from_s3()
except Exception as err:
log.error("failed to fetch headers from s3: %s", err)
def stop(self):
return defer.succeed(None)
class WalletComponent(Component):
component_name = WALLET_COMPONENT
depends_on = [DATABASE_COMPONENT, HEADERS_COMPONENT]
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.wallet = None
@property
def component(self):
return self.wallet
@defer.inlineCallbacks
def get_status(self):
if self.wallet:
local_height = self.wallet.network.get_local_height()
remote_height = self.wallet.network.get_server_height()
best_hash = yield self.wallet.get_best_blockhash()
defer.returnValue({
'blocks': local_height,
'blocks_behind': remote_height - local_height,
'best_blockhash': best_hash,
'is_encrypted': self.wallet.wallet.use_encryption
})
@defer.inlineCallbacks
def start(self):
storage = self.component_manager.get_component(DATABASE_COMPONENT)
config = get_wallet_config()
self.wallet = LBRYumWallet(storage, config)
yield self.wallet.start()
@defer.inlineCallbacks
def stop(self):
yield self.wallet.stop()
self.wallet = None
class BlobComponent(Component):
component_name = BLOB_COMPONENT
depends_on = [DATABASE_COMPONENT, DHT_COMPONENT]
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.blob_manager = None
@property
def component(self):
return self.blob_manager
def start(self):
storage = self.component_manager.get_component(DATABASE_COMPONENT)
dht_node = self.component_manager.get_component(DHT_COMPONENT)
self.blob_manager = DiskBlobManager(CS.get_blobfiles_dir(), storage, dht_node._dataStore)
return self.blob_manager.setup()
def stop(self):
return self.blob_manager.stop()
@defer.inlineCallbacks
def get_status(self):
count = 0
if self.blob_manager:
count = yield self.blob_manager.storage.count_finished_blobs()
defer.returnValue({
'finished_blobs': count
})
class DHTComponent(Component):
component_name = DHT_COMPONENT
depends_on = [UPNP_COMPONENT]
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.dht_node = None
self.upnp_component = None
self.udp_port = None
self.peer_port = None
@property
def component(self):
return self.dht_node
def get_status(self):
return {
'node_id': binascii.hexlify(CS.get_node_id()),
'peers_in_routing_table': 0 if not self.dht_node else len(self.dht_node.contacts)
}
@defer.inlineCallbacks
def start(self):
self.upnp_component = self.component_manager.get_component(UPNP_COMPONENT)
self.peer_port, self.udp_port = self.upnp_component.get_redirects()
node_id = CS.get_node_id()
if node_id is None:
node_id = generate_id()
self.dht_node = node.Node(
node_id=node_id,
udpPort=self.udp_port,
externalIP=CS.get_external_ip(),
peerPort=self.peer_port
)
self.dht_node.start_listening()
yield self.dht_node._protocol._listening
d = self.dht_node.joinNetwork(GCS('known_dht_nodes'))
d.addCallback(lambda _: self.dht_node.start_looping_calls())
d.addCallback(lambda _: log.info("Joined the dht"))
log.info("Started the dht")
@defer.inlineCallbacks
def stop(self):
yield self.dht_node.stop()
class HashAnnouncerComponent(Component):
component_name = HASH_ANNOUNCER_COMPONENT
depends_on = [DHT_COMPONENT, DATABASE_COMPONENT]
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.hash_announcer = None
@property
def component(self):
return self.hash_announcer
@defer.inlineCallbacks
def start(self):
storage = self.component_manager.get_component(DATABASE_COMPONENT)
dht_node = self.component_manager.get_component(DHT_COMPONENT)
self.hash_announcer = hashannouncer.DHTHashAnnouncer(dht_node, storage)
yield self.hash_announcer.start()
@defer.inlineCallbacks
def stop(self):
yield self.hash_announcer.stop()
def get_status(self):
return {
'announce_queue_size': 0 if not self.hash_announcer else len(self.hash_announcer.hash_queue)
}
class RateLimiterComponent(Component):
component_name = RATE_LIMITER_COMPONENT
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.rate_limiter = RateLimiter()
@property
def component(self):
return self.rate_limiter
def start(self):
self.rate_limiter.start()
return defer.succeed(None)
def stop(self):
self.rate_limiter.stop()
return defer.succeed(None)
class StreamIdentifierComponent(Component):
component_name = STREAM_IDENTIFIER_COMPONENT
depends_on = [DHT_COMPONENT, RATE_LIMITER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT]
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.sd_identifier = StreamDescriptorIdentifier()
@property
def component(self):
return self.sd_identifier
@defer.inlineCallbacks
def start(self):
dht_node = self.component_manager.get_component(DHT_COMPONENT)
rate_limiter = self.component_manager.get_component(RATE_LIMITER_COMPONENT)
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
storage = self.component_manager.get_component(DATABASE_COMPONENT)
wallet = self.component_manager.get_component(WALLET_COMPONENT)
add_lbry_file_to_sd_identifier(self.sd_identifier)
file_saver_factory = EncryptedFileSaverFactory(
dht_node.peer_finder,
rate_limiter,
blob_manager,
storage,
wallet,
GCS('download_directory')
)
yield self.sd_identifier.add_stream_downloader_factory(EncryptedFileStreamType, file_saver_factory)
def stop(self):
pass
class PaymentRateComponent(Component):
component_name = PAYMENT_RATE_COMPONENT
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.payment_rate_manager = OnlyFreePaymentsManager()
@property
def component(self):
return self.payment_rate_manager
def start(self):
return defer.succeed(None)
def stop(self):
return defer.succeed(None)
class FileManagerComponent(Component):
component_name = FILE_MANAGER_COMPONENT
depends_on = [DHT_COMPONENT, RATE_LIMITER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT,
STREAM_IDENTIFIER_COMPONENT, PAYMENT_RATE_COMPONENT]
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.file_manager = None
@property
def component(self):
return self.file_manager
def get_status(self):
if not self.file_manager:
return
return {
'managed_files': len(self.file_manager.lbry_files)
}
@defer.inlineCallbacks
def start(self):
dht_node = self.component_manager.get_component(DHT_COMPONENT)
rate_limiter = self.component_manager.get_component(RATE_LIMITER_COMPONENT)
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
storage = self.component_manager.get_component(DATABASE_COMPONENT)
wallet = self.component_manager.get_component(WALLET_COMPONENT)
sd_identifier = self.component_manager.get_component(STREAM_IDENTIFIER_COMPONENT)
payment_rate_manager = self.component_manager.get_component(PAYMENT_RATE_COMPONENT)
log.info('Starting the file manager')
self.file_manager = EncryptedFileManager(dht_node.peer_finder, rate_limiter, blob_manager, wallet,
payment_rate_manager, storage, sd_identifier)
yield self.file_manager.setup()
log.info('Done setting up file manager')
@defer.inlineCallbacks
def stop(self):
yield self.file_manager.stop()
class PeerProtocolServerComponent(Component):
component_name = PEER_PROTOCOL_SERVER_COMPONENT
depends_on = [UPNP_COMPONENT, DHT_COMPONENT, RATE_LIMITER_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT,
PAYMENT_RATE_COMPONENT]
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.lbry_server_port = None
@property
def component(self):
return self.lbry_server_port
@defer.inlineCallbacks
def start(self):
wallet = self.component_manager.get_component(WALLET_COMPONENT)
peer_port = self.component_manager.get_component(UPNP_COMPONENT).upnp_redirects["TCP"]
query_handlers = {
handler.get_primary_query_identifier(): handler for handler in [
BlobRequestHandlerFactory(
self.component_manager.get_component(BLOB_COMPONENT),
wallet,
self.component_manager.get_component(PAYMENT_RATE_COMPONENT),
self.component_manager.analytics_manager
),
wallet.get_wallet_info_query_handler_factory(),
]
}
server_factory = ServerProtocolFactory(
self.component_manager.get_component(RATE_LIMITER_COMPONENT), query_handlers,
self.component_manager.get_component(DHT_COMPONENT).peer_manager
)
try:
log.info("Peer protocol listening on TCP %d", peer_port)
self.lbry_server_port = yield reactor.listenTCP(peer_port, server_factory)
except error.CannotListenError as e:
import traceback
log.error("Couldn't bind to port %d. Visit lbry.io/faq/how-to-change-port for"
" more details.", peer_port)
log.error("%s", traceback.format_exc())
raise ValueError("%s lbrynet may already be running on your computer." % str(e))
@defer.inlineCallbacks
def stop(self):
if self.lbry_server_port is not None:
self.lbry_server_port, old_port = None, self.lbry_server_port
log.info('Stop listening on port %s', old_port.port)
yield old_port.stopListening()
class ReflectorComponent(Component):
component_name = REFLECTOR_COMPONENT
depends_on = [DHT_COMPONENT, BLOB_COMPONENT, FILE_MANAGER_COMPONENT]
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.reflector_server_port = GCS('reflector_port')
self.reflector_server = None
@property
def component(self):
return self.reflector_server
@defer.inlineCallbacks
def start(self):
log.info("Starting reflector server")
dht_node = self.component_manager.get_component(DHT_COMPONENT)
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
reflector_factory = reflector_server_factory(dht_node.peer_manager, blob_manager, file_manager)
try:
self.reflector_server = yield reactor.listenTCP(self.reflector_server_port, reflector_factory)
log.info('Started reflector on port %s', self.reflector_server_port)
except error.CannotListenError as e:
log.exception("Couldn't bind reflector to port %d", self.reflector_server_port)
raise ValueError("{} lbrynet may already be running on your computer.".format(e))
@defer.inlineCallbacks
def stop(self):
if self.reflector_server is not None:
log.info("Stopping reflector server")
self.reflector_server, p = None, self.reflector_server
yield p.stopListening
class UPnPComponent(Component):
component_name = UPNP_COMPONENT
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self._default_peer_port = GCS('peer_port')
self._default_dht_node_port = GCS('dht_node_port')
self.use_upnp = GCS('use_upnp')
self.external_ip = None
self.upnp = UPnP(self.component_manager.reactor, try_miniupnpc_fallback=True)
self.upnp_redirects = {}
@property
def component(self):
return self
def get_redirects(self):
if not self.use_upnp or not self.upnp_redirects:
return self._default_peer_port, self._default_dht_node_port
return self.upnp_redirects["TCP"], self.upnp_redirects["UDP"]
@defer.inlineCallbacks
def _setup_redirects(self):
self.external_ip = yield self.upnp.get_external_ip()
upnp_redirects = yield DeferredDict({
"UDP": self.upnp.get_next_mapping(self._default_dht_node_port, "UDP", "LBRY DHT port"),
"TCP": self.upnp.get_next_mapping(self._default_peer_port, "TCP", "LBRY peer port")
})
self.upnp_redirects.update(upnp_redirects)
@defer.inlineCallbacks
def start(self):
log.debug("In _try_upnp")
found = yield self.upnp.discover()
if found and not self.upnp.miniupnpc_runner:
log.info("set up redirects using txupnp")
elif found and self.upnp.miniupnpc_runner:
log.warning("failed to set up redirect with txupnp, miniupnpc fallback was successful")
if found:
try:
yield self._setup_redirects()
except Exception as err:
if not self.upnp.miniupnpc_runner:
started_fallback = yield self.upnp.start_miniupnpc_fallback()
if started_fallback:
yield self._setup_redirects()
else:
log.warning("failed to set up upnp redirects")
def stop(self):
return defer.DeferredList(
[self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()]
)
class ExchangeRateManagerComponent(Component):
component_name = EXCHANGE_RATE_MANAGER_COMPONENT
def __init__(self, component_manager):
Component.__init__(self, component_manager)
self.exchange_rate_manager = ExchangeRateManager()
@property
def component(self):
return self.exchange_rate_manager
@defer.inlineCallbacks
def start(self):
yield self.exchange_rate_manager.start()
@defer.inlineCallbacks
def stop(self):
yield self.exchange_rate_manager.stop()

File diff suppressed because it is too large Load diff

View file

@ -7,7 +7,7 @@ from collections import OrderedDict
from lbrynet import conf
from lbrynet.core import utils
from lbrynet.daemon.auth.client import JSONRPCException, LBRYAPIClient, AuthAPIClient
from lbrynet.daemon.Daemon import LOADING_WALLET_CODE, Daemon
from lbrynet.daemon.Daemon import Daemon
from lbrynet.core.system_info import get_platform
from jsonrpc.common import RPCError
from requests.exceptions import ConnectionError
@ -21,17 +21,13 @@ def remove_brackets(key):
return key
def set_flag_vals(flag_names, parsed_args):
def set_kwargs(parsed_args):
kwargs = OrderedDict()
for key, arg in parsed_args.iteritems():
if arg is None:
continue
elif key.startswith("--"):
if remove_brackets(key[2:]) not in kwargs:
k = remove_brackets(key[2:])
elif key in flag_names:
if remove_brackets(flag_names[key]) not in kwargs:
k = remove_brackets(flag_names[key])
elif key.startswith("--") and remove_brackets(key[2:]) not in kwargs:
k = remove_brackets(key[2:])
elif remove_brackets(key) not in kwargs:
k = remove_brackets(key)
kwargs[k] = guess_type(arg, k)
@ -79,26 +75,22 @@ def main():
method = new_method
fn = Daemon.callable_methods[method]
if hasattr(fn, "_flags"):
flag_names = fn._flags
else:
flag_names = {}
parsed = docopt(fn.__doc__, args)
kwargs = set_flag_vals(flag_names, parsed)
kwargs = set_kwargs(parsed)
colorama.init()
conf.initialize_settings()
try:
api = LBRYAPIClient.get_client()
status = api.status()
api.status()
except (URLError, ConnectionError) as err:
if isinstance(err, HTTPError) and err.code == UNAUTHORIZED:
api = AuthAPIClient.config()
# this can happen if the daemon is using auth with the --http-auth flag
# when the config setting is to not use it
try:
status = api.status()
api.status()
except:
print_error("Daemon requires authentication, but none was provided.",
suggest_help=False)
@ -108,20 +100,6 @@ def main():
suggest_help=False)
return 1
status_code = status['startup_status']['code']
if status_code != "started" and method not in Daemon.allowed_during_startup:
print "Daemon is in the process of starting. Please try again in a bit."
message = status['startup_status']['message']
if message:
if (
status['startup_status']['code'] == LOADING_WALLET_CODE
and status['blockchain_status']['blocks_behind'] > 0
):
message += '. Blocks left: ' + str(status['blockchain_status']['blocks_behind'])
print " Status: " + message
return 1
# TODO: check if port is bound. Error if its not
try:

View file

@ -10,7 +10,6 @@ from lbrynet import analytics
from lbrynet import conf
from lbrynet.core import utils
from lbrynet.core import log_support
from lbrynet.daemon.DaemonServer import DaemonServer
from lbrynet.daemon.auth.client import LBRYAPIClient
from lbrynet.daemon.Daemon import Daemon
@ -175,18 +174,7 @@ def start_server_and_listen(use_auth, analytics_manager, quiet):
logging.getLogger("requests").setLevel(logging.CRITICAL)
analytics_manager.send_server_startup()
daemon_server = DaemonServer(analytics_manager)
try:
yield daemon_server.start(use_auth)
analytics_manager.send_server_startup_success()
if not quiet:
print "Started lbrynet-daemon!"
defer.returnValue(True)
except Exception as e:
log.exception('Failed to start lbrynet-daemon')
analytics_manager.send_server_startup_error(str(e))
daemon_server.stop()
raise
yield Daemon().start_listening()
def threaded_terminal(started_daemon, quiet):

View file

@ -12,13 +12,12 @@ from lbrynet.core import log_support
import argparse
import logging.handlers
from twisted.internet import defer, reactor
from twisted.internet import reactor
from jsonrpc.proxy import JSONRPCProxy
from lbrynet import analytics
from lbrynet import conf
from lbrynet.core import utils, system_info
from lbrynet.daemon.DaemonServer import DaemonServer
from lbrynet.daemon.Daemon import Daemon
log = logging.getLogger(__name__)
@ -71,6 +70,7 @@ def start():
lbrynet_log = conf.settings.get_log_filename()
log_support.configure_logging(lbrynet_log, not args.quiet, args.verbose)
log_support.configure_loggly_handler()
log.debug('Final Settings: %s', conf.settings.get_current_settings_dict())
try:
@ -84,8 +84,8 @@ def start():
log.info("Starting lbrynet-daemon from command line")
if test_internet_connection():
analytics_manager = analytics.Manager.new_instance()
start_server_and_listen(analytics_manager)
daemon = Daemon()
daemon.start_listening()
reactor.run()
else:
log.info("Not connected to internet, unable to start")
@ -101,24 +101,5 @@ def update_settings_from_args(args):
}, data_types=(conf.TYPE_CLI,))
@defer.inlineCallbacks
def start_server_and_listen(analytics_manager):
"""
Args:
use_auth: set to true to enable http authentication
analytics_manager: to send analytics
"""
analytics_manager.send_server_startup()
daemon_server = DaemonServer(analytics_manager)
try:
yield daemon_server.start(conf.settings['use_auth_http'])
analytics_manager.send_server_startup_success()
except Exception as e:
log.exception('Failed to start lbrynet-daemon')
analytics_manager.send_server_startup_error(str(e))
daemon_server.stop()
if __name__ == "__main__":
start()

View file

@ -1,77 +0,0 @@
import logging
import os
from twisted.web import server, guard, resource
from twisted.internet import defer, reactor, error
from twisted.cred import portal
from lbrynet import conf
from lbrynet.daemon.Daemon import Daemon
from lbrynet.daemon.auth.auth import PasswordChecker, HttpPasswordRealm
from lbrynet.daemon.auth.util import initialize_api_key_file
log = logging.getLogger(__name__)
class IndexResource(resource.Resource):
def getChild(self, name, request):
request.setHeader('cache-control', 'no-cache, no-store, must-revalidate')
request.setHeader('expires', '0')
return self if name == '' else resource.Resource.getChild(self, name, request)
class DaemonServer(object):
def __init__(self, analytics_manager=None):
self._daemon = None
self.root = None
self.server_port = None
self.analytics_manager = analytics_manager
def _setup_server(self, use_auth):
self.root = IndexResource()
self._daemon = Daemon(self.analytics_manager)
self.root.putChild("", self._daemon)
# TODO: DEPRECATED, remove this and just serve the API at the root
self.root.putChild(conf.settings['API_ADDRESS'], self._daemon)
lbrynet_server = get_site_base(use_auth, self.root)
try:
self.server_port = reactor.listenTCP(
conf.settings['api_port'], lbrynet_server, interface=conf.settings['api_host'])
log.info("lbrynet API listening on TCP %s:%i", conf.settings['api_host'], conf.settings['api_port'])
except error.CannotListenError:
log.info('Daemon already running, exiting app')
raise
return defer.succeed(True)
@defer.inlineCallbacks
def start(self, use_auth):
yield self._setup_server(use_auth)
yield self._daemon.setup()
def stop(self):
if reactor.running:
log.info("Stopping the reactor")
reactor.fireSystemEvent("shutdown")
def get_site_base(use_auth, root):
if use_auth:
log.info("Using authenticated API")
root = create_auth_session(root)
else:
log.info("Using non-authenticated API")
return server.Site(root)
def create_auth_session(root):
pw_path = os.path.join(conf.settings['data_dir'], ".api_keys")
initialize_api_key_file(pw_path)
checker = PasswordChecker.load_file(pw_path)
realm = HttpPasswordRealm(root)
portal_to_realm = portal.Portal(realm, [checker, ])
factory = guard.BasicCredentialFactory('Login to lbrynet api')
_lbrynet_server = guard.HTTPAuthSessionWrapper(portal_to_realm, [factory, ])
return _lbrynet_server

View file

@ -30,8 +30,8 @@ log = logging.getLogger(__name__)
class GetStream(object):
def __init__(self, sd_identifier, session, exchange_rate_manager,
max_key_fee, disable_max_key_fee, data_rate=None, timeout=None):
def __init__(self, sd_identifier, wallet, exchange_rate_manager, blob_manager, peer_finder, rate_limiter,
payment_rate_manager, storage, max_key_fee, disable_max_key_fee, data_rate=None, timeout=None):
self.timeout = timeout or conf.settings['download_timeout']
self.data_rate = data_rate or conf.settings['data_rate']
@ -41,11 +41,14 @@ class GetStream(object):
self.timeout_counter = 0
self.code = None
self.sd_hash = None
self.session = session
self.wallet = self.session.wallet
self.blob_manager = blob_manager
self.peer_finder = peer_finder
self.rate_limiter = rate_limiter
self.wallet = wallet
self.exchange_rate_manager = exchange_rate_manager
self.payment_rate_manager = self.session.payment_rate_manager
self.payment_rate_manager = payment_rate_manager
self.sd_identifier = sd_identifier
self.storage = storage
self.downloader = None
self.checker = LoopingCall(self.check_status)
@ -174,15 +177,17 @@ class GetStream(object):
@defer.inlineCallbacks
def _download_sd_blob(self):
sd_blob = yield download_sd_blob(self.session, self.sd_hash,
self.payment_rate_manager, self.timeout)
sd_blob = yield download_sd_blob(
self.sd_hash, self.blob_manager, self.peer_finder, self.rate_limiter, self.payment_rate_manager,
self.wallet, self.timeout, conf.settings['download_mirrors']
)
defer.returnValue(sd_blob)
@defer.inlineCallbacks
def _download(self, sd_blob, name, key_fee, txid, nout, file_name=None):
self.downloader = yield self._create_downloader(sd_blob, file_name=file_name)
yield self.pay_key_fee(key_fee, name)
yield self.session.storage.save_content_claim(self.downloader.stream_hash, "%s:%i" % (txid, nout))
yield self.storage.save_content_claim(self.downloader.stream_hash, "%s:%i" % (txid, nout))
log.info("Downloading lbry://%s (%s) --> %s", name, self.sd_hash[:6], self.download_path)
self.finished_deferred = self.downloader.start()
self.finished_deferred.addCallbacks(lambda result: self.finish(result, name), self.fail)

View file

@ -12,7 +12,7 @@ log = logging.getLogger(__name__)
CURRENCY_PAIRS = ["USDBTC", "BTCLBC"]
BITTREX_FEE = 0.0025
COINBASE_FEE = 0.0 #add fee
COINBASE_FEE = 0.0 # add fee
class ExchangeRate(object):
@ -37,6 +37,7 @@ class ExchangeRate(object):
class MarketFeed(object):
REQUESTS_TIMEOUT = 20
EXCHANGE_RATE_UPDATE_RATE_SEC = 300
def __init__(self, market, name, url, params, fee):
self.market = market
self.name = name
@ -115,7 +116,7 @@ class BittrexFeed(MarketFeed):
qtys = sum([i['Quantity'] for i in trades])
if totals <= 0 or qtys <= 0:
raise InvalidExchangeRateResponse(self.market, 'quantities were not positive')
vwap = totals/qtys
vwap = totals / qtys
return defer.succeed(float(1.0 / vwap))
@ -175,12 +176,11 @@ class CryptonatorBTCFeed(MarketFeed):
except ValueError:
raise InvalidExchangeRateResponse(self.name, "invalid rate response")
if 'ticker' not in json_response or len(json_response['ticker']) == 0 or \
'success' not in json_response or json_response['success'] is not True:
'success' not in json_response or json_response['success'] is not True:
raise InvalidExchangeRateResponse(self.name, 'result not found')
return defer.succeed(float(json_response['ticker']['price']))
class CryptonatorFeed(MarketFeed):
def __init__(self):
MarketFeed.__init__(
@ -198,7 +198,7 @@ class CryptonatorFeed(MarketFeed):
except ValueError:
raise InvalidExchangeRateResponse(self.name, "invalid rate response")
if 'ticker' not in json_response or len(json_response['ticker']) == 0 or \
'success' not in json_response or json_response['success'] is not True:
'success' not in json_response or json_response['success'] is not True:
raise InvalidExchangeRateResponse(self.name, 'result not found')
return defer.succeed(float(json_response['ticker']['price']))
@ -231,11 +231,11 @@ class ExchangeRateManager(object):
for market in self.market_feeds:
if (market.rate_is_initialized() and market.is_online() and
market.rate.currency_pair == (from_currency, to_currency)):
market.rate.currency_pair == (from_currency, to_currency)):
return amount * market.rate.spot
for market in self.market_feeds:
if (market.rate_is_initialized() and market.is_online() and
market.rate.currency_pair[0] == from_currency):
market.rate.currency_pair[0] == from_currency):
return self.convert_currency(
market.rate.currency_pair[1], to_currency, amount * market.rate.spot)
raise Exception(

View file

@ -11,8 +11,10 @@ log = logging.getLogger(__name__)
class Publisher(object):
def __init__(self, session, lbry_file_manager, wallet, certificate_id):
self.session = session
def __init__(self, blob_manager, payment_rate_manager, storage, lbry_file_manager, wallet, certificate_id):
self.blob_manager = blob_manager
self.payment_rate_manager = payment_rate_manager
self.storage = storage
self.lbry_file_manager = lbry_file_manager
self.wallet = wallet
self.certificate_id = certificate_id
@ -30,8 +32,10 @@ class Publisher(object):
file_name = os.path.basename(file_path)
with file_utils.get_read_handle(file_path) as read_handle:
self.lbry_file = yield create_lbry_file(self.session, self.lbry_file_manager, file_name,
read_handle)
self.lbry_file = yield create_lbry_file(
self.blob_manager, self.storage, self.payment_rate_manager, self.lbry_file_manager, file_name,
read_handle
)
if 'source' not in claim_dict['stream']:
claim_dict['stream']['source'] = {}
@ -42,15 +46,16 @@ class Publisher(object):
claim_out = yield self.make_claim(name, bid, claim_dict, claim_address, change_address)
# check if we have a file already for this claim (if this is a publish update with a new stream)
old_stream_hashes = yield self.session.storage.get_old_stream_hashes_for_claim_id(claim_out['claim_id'],
self.lbry_file.stream_hash)
old_stream_hashes = yield self.storage.get_old_stream_hashes_for_claim_id(
claim_out['claim_id'], self.lbry_file.stream_hash
)
if old_stream_hashes:
for lbry_file in filter(lambda l: l.stream_hash in old_stream_hashes,
list(self.lbry_file_manager.lbry_files)):
yield self.lbry_file_manager.delete_lbry_file(lbry_file, delete_file=False)
log.info("Removed old stream for claim update: %s", lbry_file.stream_hash)
yield self.session.storage.save_content_claim(
yield self.storage.save_content_claim(
self.lbry_file.stream_hash, "%s:%i" % (claim_out['txid'], claim_out['nout'])
)
defer.returnValue(claim_out)
@ -60,8 +65,9 @@ class Publisher(object):
"""Make a claim without creating a lbry file"""
claim_out = yield self.make_claim(name, bid, claim_dict, claim_address, change_address)
if stream_hash: # the stream_hash returned from the db will be None if this isn't a stream we have
yield self.session.storage.save_content_claim(stream_hash, "%s:%i" % (claim_out['txid'],
claim_out['nout']))
yield self.storage.save_content_claim(
stream_hash, "%s:%i" % (claim_out['txid'], claim_out['nout'])
)
self.lbry_file = [f for f in self.lbry_file_manager.lbry_files if f.stream_hash == stream_hash][0]
defer.returnValue(claim_out)

View file

@ -1,3 +1,4 @@
from lbrynet import custom_logger
import Components # register Component classes
from lbrynet.daemon.auth.client import LBRYAPIClient
get_client = LBRYAPIClient.get_client

View file

@ -0,0 +1,38 @@
import logging
import os
from twisted.web import server, guard, resource
from twisted.cred import portal
from lbrynet import conf
from .auth import PasswordChecker, HttpPasswordRealm
from .util import initialize_api_key_file
log = logging.getLogger(__name__)
class AuthJSONRPCResource(resource.Resource):
def __init__(self, protocol):
resource.Resource.__init__(self)
self.putChild("", protocol)
self.putChild(conf.settings['API_ADDRESS'], protocol)
def getChild(self, name, request):
request.setHeader('cache-control', 'no-cache, no-store, must-revalidate')
request.setHeader('expires', '0')
return self if name == '' else resource.Resource.getChild(self, name, request)
def getServerFactory(self):
if conf.settings['use_auth_http']:
log.info("Using authenticated API")
pw_path = os.path.join(conf.settings['data_dir'], ".api_keys")
initialize_api_key_file(pw_path)
checker = PasswordChecker.load_file(pw_path)
realm = HttpPasswordRealm(self)
portal_to_realm = portal.Portal(realm, [checker, ])
factory = guard.BasicCredentialFactory('Login to lbrynet api')
root = guard.HTTPAuthSessionWrapper(portal_to_realm, [factory, ])
else:
log.info("Using non-authenticated API")
root = self
return server.Site(root)

View file

@ -2,8 +2,10 @@ import logging
import urlparse
import json
import inspect
import signal
from decimal import Decimal
from functools import wraps
from zope.interface import implements
from twisted.web import server, resource
from twisted.internet import defer
@ -12,13 +14,16 @@ from twisted.internet.error import ConnectionDone, ConnectionLost
from txjsonrpc import jsonrpclib
from traceback import format_exc
from lbrynet import conf
from lbrynet import conf, analytics
from lbrynet.core.Error import InvalidAuthenticationToken
from lbrynet.core import utils
from lbrynet.daemon.auth.util import APIKey, get_auth_message
from lbrynet.daemon.auth.client import LBRY_SECRET
from lbrynet.core.Error import ComponentsNotStarted, ComponentStartConditionNotMet
from lbrynet.core.looping_call_manager import LoopingCallManager
from lbrynet.daemon.ComponentManager import ComponentManager
from lbrynet.undecorated import undecorated
from .util import APIKey, get_auth_message
from .client import LBRY_SECRET
from .factory import AuthJSONRPCResource
log = logging.getLogger(__name__)
EMPTY_PARAMS = [{}]
@ -91,10 +96,6 @@ class UnknownAPIMethodError(Exception):
pass
class NotAllowedDuringStartupError(Exception):
pass
def trap(err, *to_trap):
err.trap(*to_trap)
@ -141,6 +142,29 @@ class AuthorizedBase(object):
return f
return _deprecated_wrapper
@staticmethod
def requires(*components, **conditions):
if conditions and ["conditions"] != conditions.keys():
raise SyntaxError("invalid conditions argument")
condition_names = conditions.get("conditions", [])
def _wrap(fn):
@defer.inlineCallbacks
@wraps(fn)
def _inner(*args, **kwargs):
component_manager = args[0].component_manager
for condition_name in condition_names:
condition_result, err_msg = yield component_manager.evaluate_condition(condition_name)
if not condition_result:
raise ComponentStartConditionNotMet(err_msg)
if not component_manager.all_components_running(*components):
raise ComponentsNotStarted("the following required components have not yet started: "
"%s" % json.dumps(components))
result = yield fn(*args, **kwargs)
defer.returnValue(result)
return _inner
return _wrap
class AuthJSONRPCServer(AuthorizedBase):
"""
@ -149,7 +173,6 @@ class AuthJSONRPCServer(AuthorizedBase):
API methods are named with a leading "jsonrpc_"
Attributes:
allowed_during_startup (list): list of api methods that are callable before the server has finished startup
sessions (dict): (dict): {<session id>: <lbrynet.daemon.auth.util.APIKey>}
callable_methods (dict): {<api method name>: <api method>}
@ -170,14 +193,88 @@ class AuthJSONRPCServer(AuthorizedBase):
isLeaf = True
allowed_during_startup = []
component_attributes = {}
def __init__(self, use_authentication=None):
def __init__(self, analytics_manager=None, component_manager=None, use_authentication=None, to_skip=None,
looping_calls=None, reactor=None):
if not reactor:
from twisted.internet import reactor
self.analytics_manager = analytics_manager or analytics.Manager.new_instance()
self.component_manager = component_manager or ComponentManager(
analytics_manager=self.analytics_manager,
skip_components=to_skip or [],
reactor=reactor
)
self.looping_call_manager = LoopingCallManager({n: lc for n, (lc, t) in (looping_calls or {}).iteritems()})
self._looping_call_times = {n: t for n, (lc, t) in (looping_calls or {}).iteritems()}
self._use_authentication = use_authentication or conf.settings['use_auth_http']
self._component_setup_deferred = None
self.announced_startup = False
self.sessions = {}
@defer.inlineCallbacks
def start_listening(self):
from twisted.internet import reactor, error as tx_error
try:
reactor.listenTCP(
conf.settings['api_port'], self.get_server_factory(), interface=conf.settings['api_host']
)
log.info("lbrynet API listening on TCP %s:%i", conf.settings['api_host'], conf.settings['api_port'])
yield self.setup()
self.analytics_manager.send_server_startup_success()
except tx_error.CannotListenError:
log.error('lbrynet API failed to bind TCP %s:%i for listening', conf.settings['api_host'],
conf.settings['api_port'])
reactor.fireSystemEvent("shutdown")
except defer.CancelledError:
log.info("shutting down before finished starting")
reactor.fireSystemEvent("shutdown")
except Exception as err:
self.analytics_manager.send_server_startup_error(str(err))
log.exception('Failed to start lbrynet-daemon')
reactor.fireSystemEvent("shutdown")
def setup(self):
return NotImplementedError()
from twisted.internet import reactor
reactor.addSystemEventTrigger('before', 'shutdown', self._shutdown)
if not self.analytics_manager.is_started:
self.analytics_manager.start()
for lc_name, lc_time in self._looping_call_times.iteritems():
self.looping_call_manager.start(lc_name, lc_time)
def update_attribute(setup_result, component):
setattr(self, self.component_attributes[component.component_name], component.component)
kwargs = {component: update_attribute for component in self.component_attributes.keys()}
self._component_setup_deferred = self.component_manager.setup(**kwargs)
return self._component_setup_deferred
@staticmethod
def _already_shutting_down(sig_num, frame):
log.info("Already shutting down")
def _shutdown(self):
# ignore INT/TERM signals once shutdown has started
signal.signal(signal.SIGINT, self._already_shutting_down)
signal.signal(signal.SIGTERM, self._already_shutting_down)
self.looping_call_manager.shutdown()
if self.analytics_manager:
self.analytics_manager.shutdown()
try:
self._component_setup_deferred.cancel()
except (AttributeError, defer.CancelledError):
pass
if self.component_manager is not None:
d = self.component_manager.stop()
d.addErrback(log.fail(), 'Failure while shutting down')
else:
d = defer.succeed(None)
return d
def get_server_factory(self):
return AuthJSONRPCResource(self).getServerFactory()
def _set_headers(self, request, data, update_secret=False):
if conf.settings['allowed_origin']:
@ -204,11 +301,13 @@ class AuthJSONRPCServer(AuthorizedBase):
# maybe its a twisted Failure with another type of error
error = JSONRPCError(failure.getErrorMessage() or failure.type.__name__,
traceback=failure.getTraceback())
if not failure.check(ComponentsNotStarted, ComponentStartConditionNotMet):
log.warning("error processing api request: %s\ntraceback: %s", error.message,
"\n".join(error.traceback))
else:
# last resort, just cast it as a string
error = JSONRPCError(str(failure))
log.warning("error processing api request: %s\ntraceback: %s", error.message,
"\n".join(error.traceback))
response_content = jsonrpc_dumps_pretty(error, id=id_)
self._set_headers(request, response_content)
request.setResponseCode(200)
@ -304,14 +403,6 @@ class AuthJSONRPCServer(AuthorizedBase):
request, request_id
)
return server.NOT_DONE_YET
except NotAllowedDuringStartupError:
log.warning('Function not allowed during startup: %s', function_name)
self._render_error(
JSONRPCError("This method is unavailable until the daemon is fully started",
code=JSONRPCError.CODE_INVALID_REQUEST),
request, request_id
)
return server.NOT_DONE_YET
if args == EMPTY_PARAMS or args == []:
_args, _kwargs = (), {}
@ -416,9 +507,6 @@ class AuthJSONRPCServer(AuthorizedBase):
def _verify_method_is_callable(self, function_path):
if function_path not in self.callable_methods:
raise UnknownAPIMethodError(function_path)
if not self.announced_startup:
if function_path not in self.allowed_during_startup:
raise NotAllowedDuringStartupError(function_path)
def _get_jsonrpc_method(self, function_path):
if function_path in self.deprecated_methods:

View file

@ -181,10 +181,17 @@ class SQLiteStorage(object):
# when it loads each file
self.content_claim_callbacks = {} # {<stream_hash>: <callable returning a deferred>}
if 'reflector' not in conf.settings['components_to_skip']:
self.check_should_announce_lc = task.LoopingCall(self.verify_will_announce_all_head_and_sd_blobs)
@defer.inlineCallbacks
def setup(self):
def _create_tables(transaction):
transaction.executescript(self.CREATE_TABLES_QUERY)
return self.db.runInteraction(_create_tables)
yield self.db.runInteraction(_create_tables)
if self.check_should_announce_lc and not self.check_should_announce_lc.running:
self.check_should_announce_lc.start(600)
defer.returnValue(None)
@defer.inlineCallbacks
def run_and_return_one_or_none(self, query, *args):
@ -203,6 +210,8 @@ class SQLiteStorage(object):
defer.returnValue([])
def stop(self):
if self.check_should_announce_lc and self.check_should_announce_lc.running:
self.check_should_announce_lc.stop()
self.db.close()
return defer.succeed(True)
@ -252,6 +261,11 @@ class SQLiteStorage(object):
)
defer.returnValue([blob_hash.decode('hex') for blob_hash in blob_hashes])
def count_finished_blobs(self):
return self.run_and_return_one_or_none(
"select count(*) from blob where status='finished'"
)
def update_last_announced_blob(self, blob_hash, last_announced):
return self.db.runOperation(
"update blob set next_announce_time=?, last_announced_time=?, single_announce=0 where blob_hash=?",

View file

@ -29,6 +29,8 @@ rpcTimeout = 5
# number of rpc attempts to make before a timeout results in the node being removed as a contact
rpcAttempts = 5
# time window to count failures (in seconds)
rpcAttemptsPruningTimeWindow = 600
# Delay between iterations of iterative node lookups (for loose parallelism) (in seconds)
iterativeLookupDelay = rpcTimeout / 2

View file

@ -185,5 +185,12 @@ class ContactManager(object):
return contact
def is_ignored(self, origin_tuple):
failed_rpc_count = len(self._rpc_failures.get(origin_tuple, []))
failed_rpc_count = len(self._prune_failures(origin_tuple))
return failed_rpc_count > constants.rpcAttempts
def _prune_failures(self, origin_tuple):
# Prunes recorded failures to the last time window of attempts
pruning_limit = self._get_time() - constants.rpcAttemptsPruningTimeWindow
pruned = list(filter(lambda t: t >= pruning_limit, self._rpc_failures.get(origin_tuple, [])))
self._rpc_failures[origin_tuple] = pruned
return pruned

View file

@ -123,7 +123,7 @@ class _IterativeFind(object):
if (contactTriple[1], contactTriple[2]) in ((c.address, c.port) for c in self.already_contacted):
continue
elif self.node.contact_manager.is_ignored((contactTriple[1], contactTriple[2])):
raise ValueError("contact is ignored")
continue
else:
found_contact = self.node.contact_manager.make_contact(contactTriple[0], contactTriple[1],
contactTriple[2], self.node._protocol)
@ -173,6 +173,9 @@ class _IterativeFind(object):
already_contacted_addresses = {(c.address, c.port) for c in self.already_contacted}
to_remove = []
for contact in self.shortlist:
if self.node.contact_manager.is_ignored((contact.address, contact.port)):
to_remove.append(contact) # a contact became bad during iteration
continue
if (contact.address, contact.port) not in already_contacted_addresses:
self.already_contacted.append(contact)
to_remove.append(contact)

View file

@ -99,7 +99,7 @@ class Node(MockKademliaHelper):
routingTableClass=None, networkProtocol=None,
externalIP=None, peerPort=3333, listenUDP=None,
callLater=None, resolve=None, clock=None, peer_finder=None,
peer_manager=None):
peer_manager=None, interface=''):
"""
@param dataStore: The data store to use. This must be class inheriting
from the C{DataStore} interface (or providing the
@ -128,6 +128,7 @@ class Node(MockKademliaHelper):
MockKademliaHelper.__init__(self, clock, callLater, resolve, listenUDP)
self.node_id = node_id or self._generateID()
self.port = udpPort
self._listen_interface = interface
self._change_token_lc = self.get_looping_call(self.change_token)
self._refresh_node_lc = self.get_looping_call(self._refreshNode)
self._refresh_contacts_lc = self.get_looping_call(self._refreshContacts)
@ -171,7 +172,8 @@ class Node(MockKademliaHelper):
def start_listening(self):
if not self._listeningPort:
try:
self._listeningPort = self.reactor_listenUDP(self.port, self._protocol)
self._listeningPort = self.reactor_listenUDP(self.port, self._protocol,
interface=self._listen_interface)
except error.CannotListenError as e:
import traceback
log.error("Couldn't bind to port %d. %s", self.port, traceback.format_exc())
@ -279,7 +281,9 @@ class Node(MockKademliaHelper):
yield self._protocol._listening
# TODO: Refresh all k-buckets further away than this node's closest neighbour
yield self.joinNetwork(known_node_addresses or [])
self.start_looping_calls()
def start_looping_calls(self):
self.safe_start_looping_call(self._change_token_lc, constants.tokenSecretChangeInterval)
# Start refreshing k-buckets periodically, if necessary
self.safe_start_looping_call(self._refresh_node_lc, constants.checkRefreshInterval)

View file

@ -52,8 +52,8 @@ class DHTPeerFinder(DummyPeerFinder):
try:
peer_list = yield finished_deferred
except defer.TimeoutError:
log.warning("DHT timed out while looking peers for blob"
" %s after %s seconds.", blob_hash, timeout)
log.debug("DHT timed out while looking peers for blob %s after %s seconds",
blob_hash, timeout)
peer_list = []
peers = set(peer_list)

View file

@ -59,7 +59,8 @@ class EncryptedFileStreamCreator(CryptStreamCreator):
# we can simply read the file from the disk without needing to
# involve reactor.
@defer.inlineCallbacks
def create_lbry_file(session, lbry_file_manager, file_name, file_handle, key=None, iv_generator=None):
def create_lbry_file(blob_manager, storage, payment_rate_manager, lbry_file_manager, file_name, file_handle,
key=None, iv_generator=None):
"""Turn a plain file into an LBRY File.
An LBRY File is a collection of encrypted blobs of data and the metadata that binds them
@ -98,7 +99,7 @@ def create_lbry_file(session, lbry_file_manager, file_name, file_handle, key=Non
file_directory = os.path.dirname(file_handle.name)
lbry_file_creator = EncryptedFileStreamCreator(
session.blob_manager, lbry_file_manager, base_file_name, key, iv_generator
blob_manager, lbry_file_manager, base_file_name, key, iv_generator
)
yield lbry_file_creator.setup()
@ -114,18 +115,18 @@ def create_lbry_file(session, lbry_file_manager, file_name, file_handle, key=Non
log.debug("making the sd blob")
sd_info = lbry_file_creator.sd_info
descriptor_writer = BlobStreamDescriptorWriter(session.blob_manager)
descriptor_writer = BlobStreamDescriptorWriter(blob_manager)
sd_hash = yield descriptor_writer.create_descriptor(sd_info)
log.debug("saving the stream")
yield session.storage.store_stream(
yield storage.store_stream(
sd_info['stream_hash'], sd_hash, sd_info['stream_name'], sd_info['key'],
sd_info['suggested_file_name'], sd_info['blobs']
)
log.debug("adding to the file manager")
lbry_file = yield lbry_file_manager.add_published_file(
sd_info['stream_hash'], sd_hash, binascii.hexlify(file_directory), session.payment_rate_manager,
session.payment_rate_manager.min_blob_data_payment_rate
sd_info['stream_hash'], sd_hash, binascii.hexlify(file_directory), payment_rate_manager,
payment_rate_manager.min_blob_data_payment_rate
)
defer.returnValue(lbry_file)

View file

@ -6,8 +6,9 @@ import binascii
from zope.interface import implements
from twisted.internet import defer
from lbrynet import conf
from lbrynet.core.client.StreamProgressManager import FullStreamProgressManager
from lbrynet.core.HTTPBlobDownloader import HTTPBlobDownloader
from lbrynet.core.utils import short_hash
from lbrynet.lbry_file.client.EncryptedFileDownloader import EncryptedFileSaver
from lbrynet.lbry_file.client.EncryptedFileDownloader import EncryptedFileDownloader
@ -37,7 +38,7 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver):
def __init__(self, rowid, stream_hash, peer_finder, rate_limiter, blob_manager, storage, lbry_file_manager,
payment_rate_manager, wallet, download_directory, file_name, stream_name, sd_hash, key,
suggested_file_name):
suggested_file_name, download_mirrors=None):
EncryptedFileSaver.__init__(
self, stream_hash, peer_finder, rate_limiter, blob_manager, storage, payment_rate_manager, wallet,
download_directory, key, stream_name, file_name
@ -55,6 +56,11 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver):
self.channel_claim_id = None
self.channel_name = None
self.metadata = None
self.mirror = None
if download_mirrors:
self.mirror = HTTPBlobDownloader(
self.blob_manager, servers=download_mirrors or conf.settings['download_mirrors']
)
def set_claim_info(self, claim_info):
self.claim_id = claim_info['claim_id']
@ -94,11 +100,13 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver):
@defer.inlineCallbacks
def stop(self, err=None, change_status=True):
log.debug('Stopping download for stream %s', short_hash(self.stream_hash))
if self.mirror:
self.mirror.stop()
# EncryptedFileSaver deletes metadata when it's stopped. We don't want that here.
yield EncryptedFileDownloader.stop(self, err=err)
if change_status is True:
status = yield self._save_status()
defer.returnValue(status)
defer.returnValue(status)
@defer.inlineCallbacks
def status(self):
@ -123,6 +131,10 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver):
yield EncryptedFileSaver._start(self)
status = yield self._save_status()
log_status(self.sd_hash, status)
if self.mirror:
blobs = yield self.storage.get_blobs_for_stream(self.stream_hash)
self.mirror.blob_hashes = [b.blob_hash for b in blobs if b.blob_hash is not None]
self.mirror.start()
defer.returnValue(status)
def _get_finished_deferred_callback_value(self):
@ -155,23 +167,25 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver):
class ManagedEncryptedFileDownloaderFactory(object):
implements(IStreamDownloaderFactory)
def __init__(self, lbry_file_manager):
def __init__(self, lbry_file_manager, blob_manager):
self.lbry_file_manager = lbry_file_manager
self.blob_manager = blob_manager
def can_download(self, sd_validator):
# TODO: add a sd_validator for non live streams, use it
return True
@defer.inlineCallbacks
def make_downloader(self, metadata, data_rate, payment_rate_manager, download_directory, file_name=None):
stream_hash = yield save_sd_info(self.lbry_file_manager.session.blob_manager,
def make_downloader(self, metadata, data_rate, payment_rate_manager, download_directory, file_name=None,
download_mirrors=None):
stream_hash = yield save_sd_info(self.blob_manager,
metadata.source_blob_hash,
metadata.validator.raw_info)
if file_name:
file_name = binascii.hexlify(file_name)
lbry_file = yield self.lbry_file_manager.add_downloaded_file(
stream_hash, metadata.source_blob_hash, binascii.hexlify(download_directory), payment_rate_manager,
data_rate, file_name=file_name
data_rate, file_name=file_name, download_mirrors=download_mirrors
)
defer.returnValue(lbry_file)

View file

@ -28,15 +28,17 @@ class EncryptedFileManager(object):
# when reflecting files, reflect up to this many files at a time
CONCURRENT_REFLECTS = 5
def __init__(self, session, sd_identifier):
def __init__(self, peer_finder, rate_limiter, blob_manager, wallet, payment_rate_manager, storage, sd_identifier):
self.auto_re_reflect = conf.settings['reflect_uploads'] and conf.settings['auto_re_reflect_interval'] > 0
self.auto_re_reflect_interval = conf.settings['auto_re_reflect_interval']
self.session = session
self.storage = session.storage
self.peer_finder = peer_finder
self.rate_limiter = rate_limiter
self.blob_manager = blob_manager
self.wallet = wallet
self.payment_rate_manager = payment_rate_manager
self.storage = storage
# TODO: why is sd_identifier part of the file manager?
self.sd_identifier = sd_identifier
assert sd_identifier
self.lbry_files = []
self.lbry_file_reflector = task.LoopingCall(self.reflect_lbry_files)
@ -47,14 +49,14 @@ class EncryptedFileManager(object):
log.info("Started file manager")
def get_lbry_file_status(self, lbry_file):
return self.session.storage.get_lbry_file_status(lbry_file.rowid)
return self.storage.get_lbry_file_status(lbry_file.rowid)
def set_lbry_file_data_payment_rate(self, lbry_file, new_rate):
return self.session.storage(lbry_file.rowid, new_rate)
return self.storage(lbry_file.rowid, new_rate)
def change_lbry_file_status(self, lbry_file, status):
log.debug("Changing status of %s to %s", lbry_file.stream_hash, status)
return self.session.storage.change_file_status(lbry_file.rowid, status)
return self.storage.change_file_status(lbry_file.rowid, status)
def get_lbry_file_status_reports(self):
ds = []
@ -71,35 +73,36 @@ class EncryptedFileManager(object):
return dl
def _add_to_sd_identifier(self):
downloader_factory = ManagedEncryptedFileDownloaderFactory(self)
downloader_factory = ManagedEncryptedFileDownloaderFactory(self, self.blob_manager)
self.sd_identifier.add_stream_downloader_factory(
EncryptedFileStreamType, downloader_factory)
def _get_lbry_file(self, rowid, stream_hash, payment_rate_manager, sd_hash, key,
stream_name, file_name, download_directory, suggested_file_name):
stream_name, file_name, download_directory, suggested_file_name, download_mirrors=None):
return ManagedEncryptedFileDownloader(
rowid,
stream_hash,
self.session.peer_finder,
self.session.rate_limiter,
self.session.blob_manager,
self.session.storage,
self.peer_finder,
self.rate_limiter,
self.blob_manager,
self.storage,
self,
payment_rate_manager,
self.session.wallet,
self.wallet,
download_directory,
file_name,
stream_name=stream_name,
sd_hash=sd_hash,
key=key,
suggested_file_name=suggested_file_name
suggested_file_name=suggested_file_name,
download_mirrors=download_mirrors
)
def _start_lbry_file(self, file_info, payment_rate_manager, claim_info):
def _start_lbry_file(self, file_info, payment_rate_manager, claim_info, download_mirrors=None):
lbry_file = self._get_lbry_file(
file_info['row_id'], file_info['stream_hash'], payment_rate_manager, file_info['sd_hash'],
file_info['key'], file_info['stream_name'], file_info['file_name'], file_info['download_directory'],
file_info['suggested_file_name']
file_info['suggested_file_name'], download_mirrors
)
if claim_info:
lbry_file.set_claim_info(claim_info)
@ -115,9 +118,9 @@ class EncryptedFileManager(object):
@defer.inlineCallbacks
def _start_lbry_files(self):
files = yield self.session.storage.get_all_lbry_files()
claim_infos = yield self.session.storage.get_claims_from_stream_hashes([file['stream_hash'] for file in files])
prm = self.session.payment_rate_manager
files = yield self.storage.get_all_lbry_files()
claim_infos = yield self.storage.get_claims_from_stream_hashes([file['stream_hash'] for file in files])
prm = self.payment_rate_manager
log.info("Starting %i files", len(files))
for file_info in files:
@ -153,7 +156,7 @@ class EncryptedFileManager(object):
@defer.inlineCallbacks
def add_published_file(self, stream_hash, sd_hash, download_directory, payment_rate_manager, blob_data_rate):
status = ManagedEncryptedFileDownloader.STATUS_FINISHED
stream_metadata = yield get_sd_info(self.session.storage, stream_hash, include_blobs=False)
stream_metadata = yield get_sd_info(self.storage, stream_hash, include_blobs=False)
key = stream_metadata['key']
stream_name = stream_metadata['stream_name']
file_name = stream_metadata['suggested_file_name']
@ -162,7 +165,7 @@ class EncryptedFileManager(object):
)
lbry_file = self._get_lbry_file(
rowid, stream_hash, payment_rate_manager, sd_hash, key, stream_name, file_name, download_directory,
stream_metadata['suggested_file_name']
stream_metadata['suggested_file_name'], download_mirrors=None
)
lbry_file.restore(status)
yield lbry_file.get_claim_info()
@ -172,11 +175,11 @@ class EncryptedFileManager(object):
@defer.inlineCallbacks
def add_downloaded_file(self, stream_hash, sd_hash, download_directory, payment_rate_manager=None,
blob_data_rate=None, status=None, file_name=None):
blob_data_rate=None, status=None, file_name=None, download_mirrors=None):
status = status or ManagedEncryptedFileDownloader.STATUS_STOPPED
payment_rate_manager = payment_rate_manager or self.session.payment_rate_manager
payment_rate_manager = payment_rate_manager or self.payment_rate_manager
blob_data_rate = blob_data_rate or payment_rate_manager.min_blob_data_payment_rate
stream_metadata = yield get_sd_info(self.session.storage, stream_hash, include_blobs=False)
stream_metadata = yield get_sd_info(self.storage, stream_hash, include_blobs=False)
key = stream_metadata['key']
stream_name = stream_metadata['stream_name']
file_name = file_name or stream_metadata['suggested_file_name']
@ -186,10 +189,10 @@ class EncryptedFileManager(object):
rowid = yield self.storage.save_downloaded_file(
stream_hash, os.path.basename(file_name.decode('hex')).encode('hex'), download_directory, blob_data_rate
)
file_name = yield self.session.storage.get_filename_for_rowid(rowid)
file_name = yield self.storage.get_filename_for_rowid(rowid)
lbry_file = self._get_lbry_file(
rowid, stream_hash, payment_rate_manager, sd_hash, key, stream_name, file_name, download_directory,
stream_metadata['suggested_file_name']
stream_metadata['suggested_file_name'], download_mirrors
)
lbry_file.restore(status)
yield lbry_file.get_claim_info(include_supports=False)
@ -221,7 +224,7 @@ class EncryptedFileManager(object):
del self.storage.content_claim_callbacks[lbry_file.stream_hash]
yield lbry_file.delete_data()
yield self.session.storage.delete_stream(lbry_file.stream_hash)
yield self.storage.delete_stream(lbry_file.stream_hash)
if delete_file and os.path.isfile(full_path):
os.remove(full_path)

File diff suppressed because it is too large Load diff

View file

@ -1,33 +1,42 @@
from twisted.internet import defer, threads, error
import os
from twisted.internet import defer, error
from twisted.trial import unittest
from lbrynet import conf
from lbrynet.core.StreamDescriptor import get_sd_info
from lbrynet import reflector
from lbrynet.core import BlobManager, PeerManager
from lbrynet.core import Session
from lbrynet.core import StreamDescriptor
from lbrynet.lbry_file.client import EncryptedFileOptions
from lbrynet.file_manager import EncryptedFileCreator
from lbrynet.file_manager import EncryptedFileManager
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.core.RateLimiter import DummyRateLimiter
from lbrynet.database.storage import SQLiteStorage
from lbrynet.core.PaymentRateManager import OnlyFreePaymentsManager
from lbrynet.tests import mocks
from lbrynet.tests.util import mk_db_and_blob_dir, rm_db_and_blob_dir
class TestReflector(unittest.TestCase):
def setUp(self):
mocks.mock_conf_settings(self)
self.session = None
self.lbry_file_manager = None
self.server_blob_manager = None
self.reflector_port = None
self.port = None
self.addCleanup(self.take_down_env)
mocks.mock_conf_settings(self)
self.server_db_dir, self.server_blob_dir = mk_db_and_blob_dir()
self.client_db_dir, self.client_blob_dir = mk_db_and_blob_dir()
prm = OnlyFreePaymentsManager()
wallet = mocks.Wallet()
peer_manager = PeerManager.PeerManager()
peer_finder = mocks.PeerFinder(5553, peer_manager, 2)
sd_identifier = StreamDescriptor.StreamDescriptorIdentifier()
self.server_storage = SQLiteStorage(self.server_db_dir)
self.server_blob_manager = BlobManager.DiskBlobManager(self.server_blob_dir, self.server_storage)
self.client_storage = SQLiteStorage(self.client_db_dir)
self.client_blob_manager = BlobManager.DiskBlobManager(self.client_blob_dir, self.client_storage)
self.server_lbry_file_manager = EncryptedFileManager(
peer_finder, DummyRateLimiter(), self.server_blob_manager, wallet, prm, self.server_storage,
StreamDescriptor.StreamDescriptorIdentifier()
)
self.client_lbry_file_manager = EncryptedFileManager(
peer_finder, DummyRateLimiter(), self.client_blob_manager, wallet, prm, self.client_storage,
StreamDescriptor.StreamDescriptorIdentifier()
)
self.expected_blobs = [
(
@ -46,60 +55,18 @@ class TestReflector(unittest.TestCase):
1015056
),
]
## Setup reflector client classes ##
self.db_dir, self.blob_dir = mk_db_and_blob_dir()
self.session = Session.Session(
conf.settings['data_rate'],
db_dir=self.db_dir,
node_id="abcd",
peer_finder=peer_finder,
blob_dir=self.blob_dir,
peer_port=5553,
dht_node_port=4444,
use_upnp=False,
wallet=wallet,
blob_tracker_class=mocks.BlobAvailabilityTracker,
external_ip="127.0.0.1",
hash_announcer=mocks.Announcer(),
)
self.lbry_file_manager = EncryptedFileManager.EncryptedFileManager(self.session,
sd_identifier)
## Setup reflector server classes ##
self.server_db_dir, self.server_blob_dir = mk_db_and_blob_dir()
self.server_session = Session.Session(
conf.settings['data_rate'],
db_dir=self.server_db_dir,
node_id="abcd",
peer_finder=peer_finder,
blob_dir=self.server_blob_dir,
peer_port=5554,
dht_node_port=4443,
use_upnp=False,
wallet=wallet,
blob_tracker_class=mocks.BlobAvailabilityTracker,
external_ip="127.0.0.1",
hash_announcer=mocks.Announcer(),
)
self.server_blob_manager = BlobManager.DiskBlobManager(self.server_blob_dir,
self.server_session.storage)
self.server_lbry_file_manager = EncryptedFileManager.EncryptedFileManager(
self.server_session, sd_identifier)
d = self.session.setup()
d.addCallback(lambda _: EncryptedFileOptions.add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
d.addCallback(lambda _: self.server_session.setup())
d = self.server_storage.setup()
d.addCallback(lambda _: self.server_blob_manager.setup())
d.addCallback(lambda _: self.server_lbry_file_manager.setup())
d.addCallback(lambda _: self.client_storage.setup())
d.addCallback(lambda _: self.client_blob_manager.setup())
d.addCallback(lambda _: self.client_lbry_file_manager.setup())
@defer.inlineCallbacks
def verify_equal(sd_info, stream_hash):
self.assertDictEqual(mocks.create_stream_sd_file, sd_info)
sd_hash = yield self.session.storage.get_sd_blob_hash_for_stream(stream_hash)
sd_hash = yield self.client_storage.get_sd_blob_hash_for_stream(stream_hash)
defer.returnValue(sd_hash)
def save_sd_blob_hash(sd_hash):
@ -108,7 +75,7 @@ class TestReflector(unittest.TestCase):
def verify_stream_descriptor_file(stream_hash):
self.stream_hash = stream_hash
d = get_sd_info(self.lbry_file_manager.session.storage, stream_hash, True)
d = get_sd_info(self.client_storage, stream_hash, True)
d.addCallback(verify_equal, stream_hash)
d.addCallback(save_sd_blob_hash)
return d
@ -116,8 +83,7 @@ class TestReflector(unittest.TestCase):
def create_stream():
test_file = mocks.GenFile(5209343, b''.join([chr(i + 3) for i in xrange(0, 64, 6)]))
d = EncryptedFileCreator.create_lbry_file(
self.session,
self.lbry_file_manager,
self.client_blob_manager, self.client_storage, prm, self.client_lbry_file_manager,
"test_file",
test_file,
key="0123456701234567",
@ -127,9 +93,8 @@ class TestReflector(unittest.TestCase):
return d
def start_server():
server_factory = reflector.ServerFactory(
peer_manager, self.server_blob_manager,
self.server_lbry_file_manager)
server_factory = reflector.ServerFactory(peer_manager, self.server_blob_manager,
self.server_lbry_file_manager)
from twisted.internet import reactor
port = 8943
while self.reflector_port is None:
@ -144,29 +109,31 @@ class TestReflector(unittest.TestCase):
d.addCallback(lambda _: start_server())
return d
def take_down_env(self):
d = defer.succeed(True)
## Close client classes ##
d.addCallback(lambda _: self.lbry_file_manager.stop())
d.addCallback(lambda _: self.session.shut_down())
## Close server classes ##
d.addCallback(lambda _: self.server_blob_manager.stop())
d.addCallback(lambda _: self.server_lbry_file_manager.stop())
d.addCallback(lambda _: self.server_session.shut_down())
d.addCallback(lambda _: self.reflector_port.stopListening())
def delete_test_env():
try:
rm_db_and_blob_dir(self.db_dir, self.blob_dir)
rm_db_and_blob_dir(self.server_db_dir, self.server_blob_dir)
except:
raise unittest.SkipTest("TODO: fix this for windows")
d.addCallback(lambda _: threads.deferToThread(delete_test_env))
d.addErrback(lambda err: str(err))
return d
@defer.inlineCallbacks
def tearDown(self):
lbry_files = self.client_lbry_file_manager.lbry_files
for lbry_file in lbry_files:
yield self.client_lbry_file_manager.delete_lbry_file(lbry_file)
yield self.client_lbry_file_manager.stop()
yield self.client_blob_manager.stop()
yield self.client_storage.stop()
self.reflector_port.stopListening()
lbry_files = self.server_lbry_file_manager.lbry_files
for lbry_file in lbry_files:
yield self.server_lbry_file_manager.delete_lbry_file(lbry_file)
yield self.server_lbry_file_manager.stop()
yield self.server_blob_manager.stop()
yield self.server_storage.stop()
try:
rm_db_and_blob_dir(self.client_db_dir, self.client_blob_dir)
except Exception as err:
raise unittest.SkipTest("TODO: fix this for windows")
try:
rm_db_and_blob_dir(self.server_db_dir, self.server_blob_dir)
except Exception as err:
raise unittest.SkipTest("TODO: fix this for windows")
if os.path.exists("test_file"):
os.remove("test_file")
def test_stream_reflector(self):
def verify_blob_on_reflector():
@ -178,16 +145,15 @@ class TestReflector(unittest.TestCase):
@defer.inlineCallbacks
def verify_stream_on_reflector():
# check stream_info_manager has all the right information
streams = yield self.server_session.storage.get_all_streams()
streams = yield self.server_storage.get_all_streams()
self.assertEqual(1, len(streams))
self.assertEqual(self.stream_hash, streams[0])
blobs = yield self.server_session.storage.get_blobs_for_stream(self.stream_hash)
blobs = yield self.server_storage.get_blobs_for_stream(self.stream_hash)
blob_hashes = [b.blob_hash for b in blobs if b.blob_hash is not None]
expected_blob_hashes = [b[0] for b in self.expected_blobs[:-1] if b[0] is not None]
self.assertEqual(expected_blob_hashes, blob_hashes)
sd_hash = yield self.server_session.storage.get_sd_blob_hash_for_stream(streams[0])
expected_sd_hash = self.expected_blobs[-1][0]
sd_hash = yield self.server_storage.get_sd_blob_hash_for_stream(streams[0])
self.assertEqual(self.sd_hash, sd_hash)
# check lbry file manager has the file
@ -195,14 +161,14 @@ class TestReflector(unittest.TestCase):
self.assertEqual(0, len(files))
streams = yield self.server_lbry_file_manager.storage.get_all_streams()
streams = yield self.server_storage.get_all_streams()
self.assertEqual(1, len(streams))
stream_info = yield self.server_lbry_file_manager.storage.get_stream_info(self.stream_hash)
stream_info = yield self.server_storage.get_stream_info(self.stream_hash)
self.assertEqual(self.sd_hash, stream_info[3])
self.assertEqual('test_file'.encode('hex'), stream_info[0])
# check should_announce blobs on blob_manager
blob_hashes = yield self.server_blob_manager.storage.get_all_should_announce_blobs()
blob_hashes = yield self.server_storage.get_all_should_announce_blobs()
self.assertSetEqual({self.sd_hash, expected_blob_hashes[0]}, set(blob_hashes))
def verify_have_blob(blob_hash, blob_size):
@ -211,7 +177,7 @@ class TestReflector(unittest.TestCase):
return d
def send_to_server():
factory = reflector.ClientFactory(self.session.blob_manager, self.stream_hash, self.sd_hash)
factory = reflector.ClientFactory(self.client_blob_manager, self.stream_hash, self.sd_hash)
from twisted.internet import reactor
reactor.connectTCP('localhost', self.port, factory)
@ -241,7 +207,7 @@ class TestReflector(unittest.TestCase):
def send_to_server(blob_hashes_to_send):
factory = reflector.BlobClientFactory(
self.session.blob_manager,
self.client_blob_manager,
blob_hashes_to_send
)
@ -261,10 +227,10 @@ class TestReflector(unittest.TestCase):
@defer.inlineCallbacks
def verify_stream_on_reflector():
# this protocol should not have any impact on stream info manager
streams = yield self.server_session.storage.get_all_streams()
streams = yield self.server_storage.get_all_streams()
self.assertEqual(0, len(streams))
# there should be no should announce blobs here
blob_hashes = yield self.server_blob_manager.storage.get_all_should_announce_blobs()
blob_hashes = yield self.server_storage.get_all_should_announce_blobs()
self.assertEqual(0, len(blob_hashes))
def verify_data_on_reflector():
@ -280,7 +246,7 @@ class TestReflector(unittest.TestCase):
def send_to_server(blob_hashes_to_send):
factory = reflector.BlobClientFactory(
self.session.blob_manager,
self.client_blob_manager,
blob_hashes_to_send
)
factory.protocol_version = 0
@ -311,20 +277,20 @@ class TestReflector(unittest.TestCase):
def verify_stream_on_reflector():
# check stream_info_manager has all the right information
streams = yield self.server_session.storage.get_all_streams()
streams = yield self.server_storage.get_all_streams()
self.assertEqual(1, len(streams))
self.assertEqual(self.stream_hash, streams[0])
blobs = yield self.server_session.storage.get_blobs_for_stream(self.stream_hash)
blobs = yield self.server_storage.get_blobs_for_stream(self.stream_hash)
blob_hashes = [b.blob_hash for b in blobs if b.blob_hash is not None]
expected_blob_hashes = [b[0] for b in self.expected_blobs[:-1] if b[0] is not None]
self.assertEqual(expected_blob_hashes, blob_hashes)
sd_hash = yield self.server_session.storage.get_sd_blob_hash_for_stream(
sd_hash = yield self.server_storage.get_sd_blob_hash_for_stream(
self.stream_hash)
self.assertEqual(self.sd_hash, sd_hash)
# check should_announce blobs on blob_manager
to_announce = yield self.server_blob_manager.storage.get_all_should_announce_blobs()
to_announce = yield self.server_storage.get_all_should_announce_blobs()
self.assertSetEqual(set(to_announce), {self.sd_hash, expected_blob_hashes[0]})
def verify_have_blob(blob_hash, blob_size):
@ -334,7 +300,7 @@ class TestReflector(unittest.TestCase):
def send_to_server_as_blobs(blob_hashes_to_send):
factory = reflector.BlobClientFactory(
self.session.blob_manager,
self.client_blob_manager,
blob_hashes_to_send
)
factory.protocol_version = 0
@ -344,7 +310,7 @@ class TestReflector(unittest.TestCase):
return factory.finished_deferred
def send_to_server_as_stream(result):
factory = reflector.ClientFactory(self.session.blob_manager, self.stream_hash, self.sd_hash)
factory = reflector.ClientFactory(self.client_blob_manager, self.stream_hash, self.sd_hash)
from twisted.internet import reactor
reactor.connectTCP('localhost', self.port, factory)

View file

@ -1,21 +1,18 @@
import os
import shutil
import tempfile
from hashlib import md5
from twisted.trial.unittest import TestCase
from twisted.internet import defer, threads
from lbrynet import conf
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.core.Session import Session
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
from lbrynet.file_manager.EncryptedFileCreator import create_lbry_file
from lbrynet.lbry_file.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
from lbrynet.core.BlobManager import DiskBlobManager
from lbrynet.core.StreamDescriptor import get_sd_info
from lbrynet.core.PeerManager import PeerManager
from lbrynet.core.RateLimiter import DummyRateLimiter
from lbrynet.database.storage import SQLiteStorage
from lbrynet.core.PaymentRateManager import OnlyFreePaymentsManager
from lbrynet.file_manager.EncryptedFileCreator import create_lbry_file
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.tests import mocks
@ -30,6 +27,7 @@ DummyBlobAvailabilityTracker = mocks.BlobAvailabilityTracker
class TestStreamify(TestCase):
maxDiff = 5000
def setUp(self):
mocks.mock_conf_settings(self)
self.session = None
@ -38,49 +36,41 @@ class TestStreamify(TestCase):
self.db_dir = tempfile.mkdtemp()
self.blob_dir = os.path.join(self.db_dir, "blobfiles")
os.mkdir(self.blob_dir)
self.dht_node = FakeNode()
self.wallet = FakeWallet()
self.peer_manager = PeerManager()
self.peer_finder = FakePeerFinder(5553, self.peer_manager, 2)
self.rate_limiter = DummyRateLimiter()
self.sd_identifier = StreamDescriptorIdentifier()
self.storage = SQLiteStorage(self.db_dir)
self.blob_manager = DiskBlobManager(self.blob_dir, self.storage, self.dht_node._dataStore)
self.prm = OnlyFreePaymentsManager()
self.lbry_file_manager = EncryptedFileManager(
self.peer_finder, self.rate_limiter, self.blob_manager, self.wallet, self.prm, self.storage,
self.sd_identifier
)
d = self.storage.setup()
d.addCallback(lambda _: self.lbry_file_manager.setup())
return d
@defer.inlineCallbacks
def tearDown(self):
lbry_files = self.lbry_file_manager.lbry_files
for lbry_file in lbry_files:
yield self.lbry_file_manager.delete_lbry_file(lbry_file)
if self.lbry_file_manager is not None:
yield self.lbry_file_manager.stop()
if self.session is not None:
yield self.session.shut_down()
yield self.session.storage.stop()
yield self.lbry_file_manager.stop()
yield self.storage.stop()
yield threads.deferToThread(shutil.rmtree, self.db_dir)
if os.path.exists("test_file"):
os.remove("test_file")
def test_create_stream(self):
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 2)
hash_announcer = FakeAnnouncer()
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
self.session = Session(
conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=self.db_dir, node_id="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=self.blob_dir, peer_port=5553,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
blob_tracker_class=DummyBlobAvailabilityTracker,
is_generous=self.is_generous, external_ip="127.0.0.1", dht_node_class=mocks.Node
)
self.lbry_file_manager = EncryptedFileManager(self.session, sd_identifier)
d = self.session.setup()
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
def verify_equal(sd_info):
self.assertEqual(sd_info, test_create_stream_sd_file)
def verify_stream_descriptor_file(stream_hash):
d = get_sd_info(self.session.storage, stream_hash, True)
d = get_sd_info(self.storage, stream_hash, True)
d.addCallback(verify_equal)
return d
@ -92,47 +82,26 @@ class TestStreamify(TestCase):
def create_stream():
test_file = GenFile(5209343, b''.join([chr(i + 3) for i in xrange(0, 64, 6)]))
d = create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file,
key="0123456701234567", iv_generator=iv_generator())
d = create_lbry_file(
self.blob_manager, self.storage, self.prm, self.lbry_file_manager, "test_file", test_file,
key="0123456701234567", iv_generator=iv_generator()
)
d.addCallback(lambda lbry_file: lbry_file.stream_hash)
return d
d.addCallback(lambda _: create_stream())
d = create_stream()
d.addCallback(verify_stream_descriptor_file)
return d
@defer.inlineCallbacks
def test_create_and_combine_stream(self):
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 2)
hash_announcer = FakeAnnouncer()
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
self.session = Session(
conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=self.db_dir, node_id="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=self.blob_dir, peer_port=5553, dht_node_class=mocks.Node,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
blob_tracker_class=DummyBlobAvailabilityTracker, external_ip="127.0.0.1"
)
self.lbry_file_manager = EncryptedFileManager(self.session, sd_identifier)
@defer.inlineCallbacks
def create_stream():
test_file = GenFile(53209343, b''.join([chr(i + 5) for i in xrange(0, 64, 6)]))
lbry_file = yield create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file)
sd_hash = yield self.session.storage.get_sd_blob_hash_for_stream(lbry_file.stream_hash)
self.assertTrue(lbry_file.sd_hash, sd_hash)
yield lbry_file.start()
f = open('test_file')
hashsum = md5()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "68959747edc73df45e45db6379dd7b3b")
d = self.session.setup()
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
d.addCallback(lambda _: create_stream())
return d
test_file = GenFile(53209343, b''.join([chr(i + 5) for i in xrange(0, 64, 6)]))
lbry_file = yield create_lbry_file(self.blob_manager, self.storage, self.prm, self.lbry_file_manager,
"test_file", test_file)
sd_hash = yield self.storage.get_sd_blob_hash_for_stream(lbry_file.stream_hash)
self.assertTrue(lbry_file.sd_hash, sd_hash)
yield lbry_file.start()
f = open('test_file')
hashsum = md5()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "68959747edc73df45e45db6379dd7b3b")

View file

@ -1,5 +1,6 @@
import base64
import io
import mock
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
@ -10,6 +11,7 @@ from twisted.python.failure import Failure
from lbrynet.core.client.ClientRequest import ClientRequest
from lbrynet.core.Error import RequestCanceledError
from lbrynet.core import BlobAvailability
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.dht.node import Node as RealNode
from lbrynet.daemon import ExchangeRateManager as ERM
from lbrynet import conf
@ -63,6 +65,7 @@ class BTCLBCFeed(ERM.MarketFeed):
0.0
)
class USDBTCFeed(ERM.MarketFeed):
def __init__(self):
ERM.MarketFeed.__init__(
@ -74,6 +77,7 @@ class USDBTCFeed(ERM.MarketFeed):
0.0
)
class ExchangeRateManager(ERM.ExchangeRateManager):
def __init__(self, market_feeds, rates):
self.market_feeds = market_feeds
@ -360,6 +364,101 @@ class BlobAvailabilityTracker(BlobAvailability.BlobAvailabilityTracker):
pass
# The components below viz. FakeWallet, FakeSession, FakeFileManager are just for testing Component Manager's
# startup and stop
class FakeComponent(object):
depends_on = []
component_name = None
def __init__(self, component_manager):
self.component_manager = component_manager
self._running = False
@property
def running(self):
return self._running
def start(self):
raise NotImplementedError # Override
def stop(self):
return defer.succeed(None)
@property
def component(self):
return self
@defer.inlineCallbacks
def _setup(self):
result = yield defer.maybeDeferred(self.start)
self._running = True
defer.returnValue(result)
@defer.inlineCallbacks
def _stop(self):
result = yield defer.maybeDeferred(self.stop)
self._running = False
defer.returnValue(result)
class FakeDelayedWallet(FakeComponent):
component_name = "wallet"
depends_on = []
def start(self):
return defer.succeed(True)
def stop(self):
d = defer.Deferred()
self.component_manager.reactor.callLater(1, d.callback, True)
return d
class FakeDelayedBlobManager(FakeComponent):
component_name = "blob_manager"
depends_on = [FakeDelayedWallet.component_name]
def start(self):
d = defer.Deferred()
self.component_manager.reactor.callLater(1, d.callback, True)
return d
def stop(self):
d = defer.Deferred()
self.component_manager.reactor.callLater(1, d.callback, True)
return d
class FakeDelayedFileManager(FakeComponent):
component_name = "file_manager"
depends_on = [FakeDelayedBlobManager.component_name]
def start(self):
d = defer.Deferred()
self.component_manager.reactor.callLater(1, d.callback, True)
return d
def stop(self):
return defer.succeed(True)
class FakeFileManager(FakeComponent):
component_name = "file_manager"
depends_on = []
@property
def component(self):
return mock.Mock(spec=EncryptedFileManager)
def start(self):
return defer.succeed(True)
def stop(self):
pass
def get_status(self):
return {}
create_stream_sd_file = {
'stream_name': '746573745f66696c65',

View file

@ -0,0 +1,150 @@
from twisted.internet.task import Clock
from twisted.trial import unittest
from lbrynet.daemon.ComponentManager import ComponentManager
from lbrynet.daemon.Components import DATABASE_COMPONENT, DHT_COMPONENT, STREAM_IDENTIFIER_COMPONENT
from lbrynet.daemon.Components import HASH_ANNOUNCER_COMPONENT, REFLECTOR_COMPONENT, UPNP_COMPONENT
from lbrynet.daemon.Components import PEER_PROTOCOL_SERVER_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
from lbrynet.daemon.Components import RATE_LIMITER_COMPONENT, HEADERS_COMPONENT, PAYMENT_RATE_COMPONENT
from lbrynet.daemon import Components
from lbrynet.tests import mocks
class TestComponentManager(unittest.TestCase):
def setUp(self):
mocks.mock_conf_settings(self)
self.default_components_sort = [
[
Components.HeadersComponent,
Components.DatabaseComponent,
Components.ExchangeRateManagerComponent,
Components.PaymentRateComponent,
Components.RateLimiterComponent,
Components.UPnPComponent
],
[
Components.DHTComponent,
Components.WalletComponent
],
[
Components.BlobComponent,
Components.HashAnnouncerComponent
],
[
Components.PeerProtocolServerComponent,
Components.StreamIdentifierComponent
],
[
Components.FileManagerComponent
],
[
Components.ReflectorComponent
]
]
self.component_manager = ComponentManager()
def tearDown(self):
pass
def test_sort_components(self):
stages = self.component_manager.sort_components()
for stage_list, sorted_stage_list in zip(stages, self.default_components_sort):
self.assertEqual([type(stage) for stage in stage_list], sorted_stage_list)
def test_sort_components_reverse(self):
rev_stages = self.component_manager.sort_components(reverse=True)
reverse_default_components_sort = reversed(self.default_components_sort)
for stage_list, sorted_stage_list in zip(rev_stages, reverse_default_components_sort):
self.assertEqual([type(stage) for stage in stage_list], sorted_stage_list)
def test_get_component_not_exists(self):
with self.assertRaises(NameError):
self.component_manager.get_component("random_component")
class TestComponentManagerOverrides(unittest.TestCase):
def setUp(self):
mocks.mock_conf_settings(self)
def test_init_with_overrides(self):
class FakeWallet(object):
component_name = "wallet"
depends_on = []
def __init__(self, component_manager):
self.component_manager = component_manager
@property
def component(self):
return self
new_component_manager = ComponentManager(wallet=FakeWallet)
fake_wallet = new_component_manager.get_component("wallet")
# wallet should be an instance of FakeWallet and not WalletComponent from Components.py
self.assertIsInstance(fake_wallet, FakeWallet)
self.assertNotIsInstance(fake_wallet, Components.WalletComponent)
def test_init_with_wrong_overrides(self):
class FakeRandomComponent(object):
component_name = "someComponent"
depends_on = []
with self.assertRaises(SyntaxError):
ComponentManager(randomComponent=FakeRandomComponent)
class TestComponentManagerProperStart(unittest.TestCase):
def setUp(self):
self.reactor = Clock()
mocks.mock_conf_settings(self)
self.component_manager = ComponentManager(
skip_components=[DATABASE_COMPONENT, DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, STREAM_IDENTIFIER_COMPONENT,
PEER_PROTOCOL_SERVER_COMPONENT, REFLECTOR_COMPONENT, UPNP_COMPONENT,
HEADERS_COMPONENT, PAYMENT_RATE_COMPONENT, RATE_LIMITER_COMPONENT,
EXCHANGE_RATE_MANAGER_COMPONENT],
reactor=self.reactor,
wallet=mocks.FakeDelayedWallet,
file_manager=mocks.FakeDelayedFileManager,
blob_manager=mocks.FakeDelayedBlobManager
)
def tearDown(self):
pass
def test_proper_starting_of_components(self):
self.component_manager.setup()
self.assertTrue(self.component_manager.get_component('wallet').running)
self.assertFalse(self.component_manager.get_component('blob_manager').running)
self.assertFalse(self.component_manager.get_component('file_manager').running)
self.reactor.advance(1)
self.assertTrue(self.component_manager.get_component('wallet').running)
self.assertTrue(self.component_manager.get_component('blob_manager').running)
self.assertFalse(self.component_manager.get_component('file_manager').running)
self.reactor.advance(1)
self.assertTrue(self.component_manager.get_component('wallet').running)
self.assertTrue(self.component_manager.get_component('blob_manager').running)
self.assertTrue(self.component_manager.get_component('file_manager').running)
def test_proper_stopping_of_components(self):
self.component_manager.setup()
self.reactor.advance(1)
self.reactor.advance(1)
self.component_manager.stop()
self.assertFalse(self.component_manager.get_component('file_manager').running)
self.assertTrue(self.component_manager.get_component('blob_manager').running)
self.assertTrue(self.component_manager.get_component('wallet').running)
self.reactor.advance(1)
self.assertFalse(self.component_manager.get_component('file_manager').running)
self.assertFalse(self.component_manager.get_component('blob_manager').running)
self.assertTrue(self.component_manager.get_component('wallet').running)
self.reactor.advance(1)
self.assertFalse(self.component_manager.get_component('file_manager').running)
self.assertFalse(self.component_manager.get_component('blob_manager').running)
self.assertFalse(self.component_manager.get_component('wallet').running)

View file

@ -0,0 +1,95 @@
from mock import MagicMock
from twisted.trial import unittest
from twisted.internet import defer
from lbrynet.blob import BlobFile
from lbrynet.core.HTTPBlobDownloader import HTTPBlobDownloader
from lbrynet.tests.util import mk_db_and_blob_dir, rm_db_and_blob_dir
class HTTPBlobDownloaderTest(unittest.TestCase):
def setUp(self):
self.db_dir, self.blob_dir = mk_db_and_blob_dir()
self.blob_manager = MagicMock()
self.client = MagicMock()
self.blob_hash = ('d17272b17a1ad61c4316ac13a651c2b0952063214a81333e'
'838364b01b2f07edbd165bb7ec60d2fb2f337a2c02923852')
self.blob = BlobFile(self.blob_dir, self.blob_hash)
self.blob_manager.get_blob.side_effect = lambda _: defer.succeed(self.blob)
self.response = MagicMock(code=200, length=400)
self.client.get.side_effect = lambda uri: defer.succeed(self.response)
self.downloader = HTTPBlobDownloader(self.blob_manager, [self.blob_hash], ['server1'], self.client)
self.downloader.interval = 0
def tearDown(self):
rm_db_and_blob_dir(self.db_dir, self.blob_dir)
@defer.inlineCallbacks
def test_download_successful(self):
self.client.collect.side_effect = collect
yield self.downloader.start()
self.blob_manager.get_blob.assert_called_with(self.blob_hash)
self.client.get.assert_called_with('http://{}/{}'.format('server1', self.blob_hash))
self.client.collect.assert_called()
self.assertEqual(self.blob.get_length(), self.response.length)
self.assertEqual(self.blob.get_is_verified(), True)
self.assertEqual(self.blob.writers, {})
@defer.inlineCallbacks
def test_download_invalid_content(self):
self.client.collect.side_effect = bad_collect
yield self.downloader.start()
self.assertEqual(self.blob.get_length(), self.response.length)
self.assertEqual(self.blob.get_is_verified(), False)
self.assertEqual(self.blob.writers, {})
@defer.inlineCallbacks
def test_peer_finished_first_causing_a_write_on_closed_handle(self):
self.client.collect.side_effect = lambda response, write: defer.fail(IOError('I/O operation on closed file'))
yield self.downloader.start()
self.blob_manager.get_blob.assert_called_with(self.blob_hash)
self.client.get.assert_called_with('http://{}/{}'.format('server1', self.blob_hash))
self.client.collect.assert_called()
self.assertEqual(self.blob.get_length(), self.response.length)
self.assertEqual(self.blob.writers, {})
@defer.inlineCallbacks
def test_download_transfer_failed(self):
self.client.collect.side_effect = lambda response, write: defer.fail(Exception())
yield self.downloader.start()
self.assertEqual(len(self.client.collect.mock_calls), self.downloader.max_failures)
self.blob_manager.get_blob.assert_called_with(self.blob_hash)
self.assertEqual(self.blob.get_length(), self.response.length)
self.assertEqual(self.blob.get_is_verified(), False)
self.assertEqual(self.blob.writers, {})
@defer.inlineCallbacks
def test_blob_not_found(self):
self.response.code = 404
yield self.downloader.start()
self.blob_manager.get_blob.assert_called_with(self.blob_hash)
self.client.get.assert_called_with('http://{}/{}'.format('server1', self.blob_hash))
self.client.collect.assert_not_called()
self.assertEqual(self.blob.get_is_verified(), False)
self.assertEqual(self.blob.writers, {})
@defer.inlineCallbacks
def test_stop(self):
self.client.collect.side_effect = lambda response, write: defer.Deferred()
self.downloader.start() # hangs if yielded, as intended, to simulate a long ongoing write while we call stop
yield self.downloader.stop()
self.blob_manager.get_blob.assert_called_with(self.blob_hash)
self.client.get.assert_called_with('http://{}/{}'.format('server1', self.blob_hash))
self.client.collect.assert_called()
self.assertEqual(self.blob.get_length(), self.response.length)
self.assertEqual(self.blob.get_is_verified(), False)
self.assertEqual(self.blob.writers, {})
def collect(response, write):
write('f' * response.length)
def bad_collect(response, write):
write('0' * response.length)

View file

@ -7,9 +7,7 @@ from twisted.internet import defer
from twisted.trial import unittest
from lbrynet import conf
from lbrynet.database.storage import SQLiteStorage, open_file_for_writing
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.tests.util import random_lbry_hash
log = logging.getLogger()
@ -67,7 +65,6 @@ fake_claim_info = {
}
class FakeAnnouncer(object):
def __init__(self):
self._queue_size = 0
@ -245,12 +242,8 @@ class FileStorageTests(StorageTest):
@defer.inlineCallbacks
def test_store_file(self):
session = MocSession(self.storage)
session.db_dir = self.db_dir
sd_identifier = StreamDescriptorIdentifier()
download_directory = self.db_dir
manager = EncryptedFileManager(session, sd_identifier)
out = yield manager.session.storage.get_all_lbry_files()
out = yield self.storage.get_all_lbry_files()
self.assertEqual(len(out), 0)
stream_hash = random_lbry_hash()
@ -268,33 +261,29 @@ class FileStorageTests(StorageTest):
blob_data_rate = 0
file_name = "test file"
out = yield manager.session.storage.save_published_file(
out = yield self.storage.save_published_file(
stream_hash, file_name, download_directory, blob_data_rate
)
rowid = yield manager.session.storage.get_rowid_for_stream_hash(stream_hash)
rowid = yield self.storage.get_rowid_for_stream_hash(stream_hash)
self.assertEqual(out, rowid)
files = yield manager.session.storage.get_all_lbry_files()
files = yield self.storage.get_all_lbry_files()
self.assertEqual(1, len(files))
status = yield manager.session.storage.get_lbry_file_status(rowid)
status = yield self.storage.get_lbry_file_status(rowid)
self.assertEqual(status, ManagedEncryptedFileDownloader.STATUS_STOPPED)
running = ManagedEncryptedFileDownloader.STATUS_RUNNING
yield manager.session.storage.change_file_status(rowid, running)
status = yield manager.session.storage.get_lbry_file_status(rowid)
yield self.storage.change_file_status(rowid, running)
status = yield self.storage.get_lbry_file_status(rowid)
self.assertEqual(status, ManagedEncryptedFileDownloader.STATUS_RUNNING)
class ContentClaimStorageTests(StorageTest):
@defer.inlineCallbacks
def test_store_content_claim(self):
session = MocSession(self.storage)
session.db_dir = self.db_dir
sd_identifier = StreamDescriptorIdentifier()
download_directory = self.db_dir
manager = EncryptedFileManager(session, sd_identifier)
out = yield manager.session.storage.get_all_lbry_files()
out = yield self.storage.get_all_lbry_files()
self.assertEqual(len(out), 0)
stream_hash = random_lbry_hash()
@ -307,7 +296,7 @@ class ContentClaimStorageTests(StorageTest):
yield self.make_and_store_fake_stream(blob_count=2, stream_hash=stream_hash, sd_hash=sd_hash)
blob_data_rate = 0
file_name = "test file"
yield manager.session.storage.save_published_file(
yield self.storage.save_published_file(
stream_hash, file_name, download_directory, blob_data_rate
)
yield self.storage.save_claims([fake_claim_info])

View file

@ -1,18 +1,29 @@
# -*- coding: utf-8 -*-
from cryptography.hazmat.primitives.ciphers.algorithms import AES
import mock
from twisted.trial import unittest
from twisted.internet import defer
from lbrynet.database.storage import SQLiteStorage
from lbrynet.core.StreamDescriptor import get_sd_info, BlobStreamDescriptorReader
from lbrynet.core import BlobManager
from lbrynet.core import Session
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
from lbrynet.core.BlobManager import DiskBlobManager
from lbrynet.core.PeerManager import PeerManager
from lbrynet.core.RateLimiter import DummyRateLimiter
from lbrynet.core.PaymentRateManager import OnlyFreePaymentsManager
from lbrynet.database.storage import SQLiteStorage
from lbrynet.file_manager import EncryptedFileCreator
from lbrynet.file_manager import EncryptedFileManager
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.tests import mocks
from lbrynet.tests.util import mk_db_and_blob_dir, rm_db_and_blob_dir
FakeNode = mocks.Node
FakeWallet = mocks.Wallet
FakePeerFinder = mocks.PeerFinder
FakeAnnouncer = mocks.Announcer
GenFile = mocks.GenFile
test_create_stream_sd_file = mocks.create_stream_sd_file
DummyBlobAvailabilityTracker = mocks.BlobAvailabilityTracker
MB = 2**20
@ -24,32 +35,37 @@ def iv_generator():
class CreateEncryptedFileTest(unittest.TestCase):
timeout = 5
@defer.inlineCallbacks
def setUp(self):
mocks.mock_conf_settings(self)
self.tmp_db_dir, self.tmp_blob_dir = mk_db_and_blob_dir()
self.session = mock.Mock(spec=Session.Session)(None, None)
self.session.payment_rate_manager.min_blob_data_payment_rate = 0
self.blob_manager = BlobManager.DiskBlobManager(self.tmp_blob_dir, SQLiteStorage(self.tmp_db_dir))
self.session.blob_manager = self.blob_manager
self.session.storage = self.session.blob_manager.storage
self.file_manager = EncryptedFileManager.EncryptedFileManager(self.session, object())
yield self.session.blob_manager.storage.setup()
yield self.session.blob_manager.setup()
self.wallet = FakeWallet()
self.peer_manager = PeerManager()
self.peer_finder = FakePeerFinder(5553, self.peer_manager, 2)
self.rate_limiter = DummyRateLimiter()
self.sd_identifier = StreamDescriptorIdentifier()
self.storage = SQLiteStorage(self.tmp_db_dir)
self.blob_manager = DiskBlobManager(self.tmp_blob_dir, self.storage)
self.prm = OnlyFreePaymentsManager()
self.lbry_file_manager = EncryptedFileManager(self.peer_finder, self.rate_limiter, self.blob_manager,
self.wallet, self.prm, self.storage, self.sd_identifier)
d = self.storage.setup()
d.addCallback(lambda _: self.lbry_file_manager.setup())
return d
@defer.inlineCallbacks
def tearDown(self):
yield self.lbry_file_manager.stop()
yield self.blob_manager.stop()
yield self.session.storage.stop()
yield self.storage.stop()
rm_db_and_blob_dir(self.tmp_db_dir, self.tmp_blob_dir)
@defer.inlineCallbacks
def create_file(self, filename):
handle = mocks.GenFile(3*MB, '1')
key = '2' * (AES.block_size / 8)
out = yield EncryptedFileCreator.create_lbry_file(self.session, self.file_manager, filename, handle,
key, iv_generator())
out = yield EncryptedFileCreator.create_lbry_file(
self.blob_manager, self.storage, self.prm, self.lbry_file_manager, filename, handle, key, iv_generator()
)
defer.returnValue(out)
@defer.inlineCallbacks
@ -60,7 +76,7 @@ class CreateEncryptedFileTest(unittest.TestCase):
"c8728fe0534dd06fbcacae92b0891787ad9b68ffc8d20c1"
filename = 'test.file'
lbry_file = yield self.create_file(filename)
sd_hash = yield self.session.storage.get_sd_blob_hash_for_stream(lbry_file.stream_hash)
sd_hash = yield self.storage.get_sd_blob_hash_for_stream(lbry_file.stream_hash)
# read the sd blob file
sd_blob = self.blob_manager.blobs[sd_hash]
@ -68,7 +84,7 @@ class CreateEncryptedFileTest(unittest.TestCase):
sd_file_info = yield sd_reader.get_info()
# this comes from the database, the blobs returned are sorted
sd_info = yield get_sd_info(self.session.storage, lbry_file.stream_hash, include_blobs=True)
sd_info = yield get_sd_info(self.storage, lbry_file.stream_hash, include_blobs=True)
self.assertDictEqual(sd_info, sd_file_info)
self.assertListEqual(sd_info['blobs'], sd_file_info['blobs'])
self.assertEqual(sd_info['stream_hash'], expected_stream_hash)

View file

@ -11,7 +11,7 @@ class AuthJSONRPCServerTest(unittest.TestCase):
# onto it.
def setUp(self):
conf.initialize_settings(False)
self.server = server.AuthJSONRPCServer(use_authentication=False)
self.server = server.AuthJSONRPCServer(True, use_authentication=False)
def test_get_server_port(self):
self.assertSequenceEqual(

View file

@ -1,26 +1,28 @@
import mock
import json
import unittest
import random
from os import path
from twisted.internet import defer
from twisted import trial
from twisted.trial import unittest
from faker import Faker
from lbryschema.decode import smart_decode
from lbryum.wallet import NewWallet
from lbrynet import conf
from lbrynet.core import Session, PaymentRateManager, Wallet
from lbrynet.core import Wallet
from lbrynet.database.storage import SQLiteStorage
from lbrynet.daemon.ComponentManager import ComponentManager
from lbrynet.daemon.Components import DATABASE_COMPONENT, DHT_COMPONENT, WALLET_COMPONENT, STREAM_IDENTIFIER_COMPONENT
from lbrynet.daemon.Components import HASH_ANNOUNCER_COMPONENT, REFLECTOR_COMPONENT, UPNP_COMPONENT, BLOB_COMPONENT
from lbrynet.daemon.Components import PEER_PROTOCOL_SERVER_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
from lbrynet.daemon.Components import RATE_LIMITER_COMPONENT, HEADERS_COMPONENT, FILE_MANAGER_COMPONENT
from lbrynet.daemon.Daemon import Daemon as LBRYDaemon
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.core.PaymentRateManager import OnlyFreePaymentsManager
from lbrynet.tests import util
from lbrynet.tests.mocks import mock_conf_settings, FakeNetwork
from lbrynet.tests.mocks import BlobAvailabilityTracker as DummyBlobAvailabilityTracker
from lbrynet.tests.mocks import mock_conf_settings, FakeNetwork, FakeFileManager
from lbrynet.tests.mocks import ExchangeRateManager as DummyExchangeRateManager
from lbrynet.tests.mocks import BTCLBCFeed, USDBTCFeed
from lbrynet.tests.util import is_android
@ -38,19 +40,23 @@ def get_test_daemon(data_rate=None, generous=True, with_fee=False):
'BTCLBC': {'spot': 3.0, 'ts': util.DEFAULT_ISO_TIME + 1},
'USDBTC': {'spot': 2.0, 'ts': util.DEFAULT_ISO_TIME + 2}
}
daemon = LBRYDaemon(None)
daemon.session = mock.Mock(spec=Session.Session)
daemon.session.wallet = mock.Mock(spec=Wallet.LBRYumWallet)
daemon.session.wallet.wallet = mock.Mock(spec=NewWallet)
daemon.session.wallet.wallet.use_encryption = False
daemon.session.wallet.network = FakeNetwork()
daemon.session.storage = mock.Mock(spec=SQLiteStorage)
component_manager = ComponentManager(
skip_components=[DATABASE_COMPONENT, DHT_COMPONENT, WALLET_COMPONENT, UPNP_COMPONENT,
PEER_PROTOCOL_SERVER_COMPONENT, REFLECTOR_COMPONENT, HASH_ANNOUNCER_COMPONENT,
STREAM_IDENTIFIER_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, BLOB_COMPONENT,
HEADERS_COMPONENT, RATE_LIMITER_COMPONENT],
file_manager=FakeFileManager
)
daemon = LBRYDaemon(component_manager=component_manager)
daemon.payment_rate_manager = OnlyFreePaymentsManager()
daemon.wallet = mock.Mock(spec=Wallet.LBRYumWallet)
daemon.wallet.wallet = mock.Mock(spec=NewWallet)
daemon.wallet.wallet.use_encryption = False
daemon.wallet.network = FakeNetwork()
daemon.storage = mock.Mock(spec=SQLiteStorage)
market_feeds = [BTCLBCFeed(), USDBTCFeed()]
daemon.exchange_rate_manager = DummyExchangeRateManager(market_feeds, rates)
base_prm = PaymentRateManager.BasePaymentRateManager(rate=data_rate)
prm = PaymentRateManager.NegotiatedPaymentRateManager(base_prm, DummyBlobAvailabilityTracker(),
generous=generous)
daemon.session.payment_rate_manager = prm
daemon.file_manager = component_manager.get_component(FILE_MANAGER_COMPONENT)
metadata = {
"author": "fake author",
@ -73,12 +79,12 @@ def get_test_daemon(data_rate=None, generous=True, with_fee=False):
{"fee": {"USD": {"address": "bQ6BGboPV2SpTMEP7wLNiAcnsZiH8ye6eA", "amount": 0.75}}})
daemon._resolve_name = lambda _: defer.succeed(metadata)
migrated = smart_decode(json.dumps(metadata))
daemon.session.wallet.resolve = lambda *_: defer.succeed(
daemon.wallet.resolve = lambda *_: defer.succeed(
{"test": {'claim': {'value': migrated.claim_dict}}})
return daemon
class TestCostEst(trial.unittest.TestCase):
class TestCostEst(unittest.TestCase):
def setUp(self):
mock_conf_settings(self)
util.resetTime(self)
@ -89,29 +95,30 @@ class TestCostEst(trial.unittest.TestCase):
daemon = get_test_daemon(generous=True, with_fee=True)
self.assertEquals(daemon.get_est_cost("test", size).result, correct_result)
def test_fee_and_ungenerous_data(self):
size = 10000000
fake_fee_amount = 4.5
data_rate = conf.ADJUSTABLE_SETTINGS['data_rate'][1]
correct_result = size / 10 ** 6 * data_rate + fake_fee_amount
daemon = get_test_daemon(generous=False, with_fee=True)
self.assertEquals(daemon.get_est_cost("test", size).result, correct_result)
# def test_fee_and_ungenerous_data(self):
# size = 10000000
# fake_fee_amount = 4.5
# data_rate = conf.ADJUSTABLE_SETTINGS['data_rate'][1]
# correct_result = size / 10 ** 6 * data_rate + fake_fee_amount
# daemon = get_test_daemon(generous=False, with_fee=True)
# self.assertEquals(daemon.get_est_cost("test", size).result, correct_result)
def test_generous_data_and_no_fee(self):
size = 10000000
correct_result = 0.0
daemon = get_test_daemon(generous=True)
self.assertEquals(daemon.get_est_cost("test", size).result, correct_result)
def test_ungenerous_data_and_no_fee(self):
size = 10000000
data_rate = conf.ADJUSTABLE_SETTINGS['data_rate'][1]
correct_result = size / 10 ** 6 * data_rate
daemon = get_test_daemon(generous=False)
self.assertEquals(daemon.get_est_cost("test", size).result, correct_result)
#
# def test_ungenerous_data_and_no_fee(self):
# size = 10000000
# data_rate = conf.ADJUSTABLE_SETTINGS['data_rate'][1]
# correct_result = size / 10 ** 6 * data_rate
# daemon = get_test_daemon(generous=False)
# self.assertEquals(daemon.get_est_cost("test", size).result, correct_result)
class TestJsonRpc(trial.unittest.TestCase):
class TestJsonRpc(unittest.TestCase):
def setUp(self):
def noop():
return None
@ -119,31 +126,30 @@ class TestJsonRpc(trial.unittest.TestCase):
mock_conf_settings(self)
util.resetTime(self)
self.test_daemon = get_test_daemon()
self.test_daemon.session.wallet.is_first_run = False
self.test_daemon.session.wallet.get_best_blockhash = noop
self.test_daemon.wallet.is_first_run = False
self.test_daemon.wallet.get_best_blockhash = noop
def test_status(self):
d = defer.maybeDeferred(self.test_daemon.jsonrpc_status)
d.addCallback(lambda status: self.assertDictContainsSubset({'is_running': False}, status))
@unittest.skipIf(is_android(),
'Test cannot pass on Android because PYTHONOPTIMIZE removes the docstrings.')
def test_help(self):
d = defer.maybeDeferred(self.test_daemon.jsonrpc_help, command='status')
d.addCallback(lambda result: self.assertSubstring('daemon status', result['help']))
# self.assertSubstring('daemon status', d.result)
if is_android():
test_help.skip = "Test cannot pass on Android because PYTHONOPTIMIZE removes the docstrings."
class TestFileListSorting(trial.unittest.TestCase):
class TestFileListSorting(unittest.TestCase):
def setUp(self):
mock_conf_settings(self)
util.resetTime(self)
self.faker = Faker('en_US')
self.faker.seed(66410)
self.test_daemon = get_test_daemon()
self.test_daemon.lbry_file_manager = mock.Mock(spec=EncryptedFileManager)
self.test_daemon.lbry_file_manager.lbry_files = self._get_fake_lbry_files()
self.test_daemon.file_manager.lbry_files = self._get_fake_lbry_files()
# Pre-sorted lists of prices and file names in ascending order produced by
# faker with seed 66410. This seed was chosen becacuse it produces 3 results
# 'points_paid' at 6.0 and 2 results at 4.5 to test multiple sort criteria.
@ -154,6 +160,7 @@ class TestFileListSorting(trial.unittest.TestCase):
self.test_authors = ['angela41', 'edward70', 'fhart', 'johnrosales',
'lucasfowler', 'peggytorres', 'qmitchell',
'trevoranderson', 'xmitchell', 'zhangsusan']
return self.test_daemon.component_manager.setup()
def test_sort_by_points_paid_no_direction_specified(self):
sort_options = ['points_paid']

View file

@ -3,16 +3,18 @@ import mock
from twisted.trial import unittest
from twisted.internet import defer, task
from lbrynet.core import Session, PaymentRateManager, Wallet
from lbrynet.core import PaymentRateManager, Wallet
from lbrynet.core.Error import DownloadDataTimeout, DownloadSDTimeout
from lbrynet.daemon import Downloader
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
from lbrynet.database.storage import SQLiteStorage
from lbrynet.core.BlobManager import DiskBlobManager
from lbrynet.dht.peerfinder import DummyPeerFinder
from lbrynet.core.RateLimiter import DummyRateLimiter
from lbrynet.file_manager.EncryptedFileStatusReport import EncryptedFileStatusReport
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.daemon.ExchangeRateManager import ExchangeRateManager
from lbrynet.tests.mocks import ExchangeRateManager as DummyExchangeRateManager
from lbrynet.tests.mocks import mock_conf_settings
@ -61,25 +63,22 @@ def moc_pay_key_fee(self, key_fee, name):
class GetStreamTests(unittest.TestCase):
def init_getstream_with_mocs(self):
mock_conf_settings(self)
sd_identifier = mock.Mock(spec=StreamDescriptorIdentifier)
session = mock.Mock(spec=Session.Session)
session.wallet = mock.Mock(spec=Wallet.LBRYumWallet)
wallet = mock.Mock(spec=Wallet.LBRYumWallet)
prm = mock.Mock(spec=PaymentRateManager.NegotiatedPaymentRateManager)
session.payment_rate_manager = prm
market_feeds = []
rates = {}
exchange_rate_manager = DummyExchangeRateManager(market_feeds, rates)
exchange_rate_manager = mock.Mock(spec=ExchangeRateManager)
max_key_fee = {'currency':"LBC", 'amount':10, 'address':''}
storage = mock.Mock(spec=SQLiteStorage)
peer_finder = DummyPeerFinder()
blob_manager = mock.Mock(spec=DiskBlobManager)
max_key_fee = {'currency': "LBC", 'amount': 10, 'address': ''}
disable_max_key_fee = False
data_rate = {'currency':"LBC", 'amount':0, 'address':''}
getstream = Downloader.GetStream(sd_identifier, session,
exchange_rate_manager, max_key_fee, disable_max_key_fee, timeout=3, data_rate=data_rate)
data_rate = {'currency': "LBC", 'amount': 0, 'address': ''}
getstream = Downloader.GetStream(
sd_identifier, wallet, exchange_rate_manager, blob_manager, peer_finder, DummyRateLimiter(), prm,
storage, max_key_fee, disable_max_key_fee, timeout=3, data_rate=data_rate
)
getstream.pay_key_fee_called = False
self.clock = task.Clock()
@ -100,7 +99,6 @@ class GetStreamTests(unittest.TestCase):
with self.assertRaises(AttributeError):
yield getstream.start(stream_info, name, "deadbeef" * 12, 0)
@defer.inlineCallbacks
def test_sd_blob_download_timeout(self):
"""

View file

@ -6,7 +6,7 @@ import unittest
from twisted.internet import defer
from twisted import trial
from lbrynet.core import log_support
from lbrynet import custom_logger
from lbrynet.tests.util import is_android
@ -22,7 +22,7 @@ class TestLogger(trial.unittest.TestCase):
return d
def setUp(self):
self.log = log_support.Logger('test')
self.log = custom_logger.Logger('test')
self.stream = StringIO.StringIO()
handler = logging.StreamHandler(self.stream)
handler.setFormatter(logging.Formatter("%(filename)s:%(lineno)d - %(message)s"))
@ -36,7 +36,7 @@ class TestLogger(trial.unittest.TestCase):
return self.stream.getvalue().split('\n')
# the line number could change if this file gets refactored
expected_first_line = 'test_log_support.py:20 - My message: terrible things happened'
expected_first_line = 'test_customLogger.py:20 - My message: terrible things happened'
# testing the entirety of the message is futile as the
# traceback will depend on the system the test is being run on

View file

@ -1,6 +1,6 @@
certifi==2018.4.16
Twisted==16.6.0
cryptography==2.2.2
cryptography==2.3
appdirs==1.4.3
argparse==1.2.1
docopt==0.6.2

View file

@ -0,0 +1,66 @@
import os
import re
import json
import inspect
from textwrap import dedent
from lbrynet.daemon.Daemon import Daemon
SECTIONS = re.compile("(.*?)Usage:(.*?)Options:(.*?)Returns:(.*)", re.DOTALL)
REQUIRED_OPTIONS = re.compile("\(<(.*?)>.*?\)")
ARGUMENT_NAME = re.compile("--([^=]+)")
ARGUMENT_TYPE = re.compile("\s*\((.*?)\)(.*)")
def get_api(obj):
docstr = inspect.getdoc(obj).strip()
try:
description, usage, options, returns = SECTIONS.search(docstr).groups()
except:
raise ValueError("Doc string format error for {}.".format(obj.__name__))
required = re.findall(REQUIRED_OPTIONS, usage)
arguments = []
for line in options.splitlines():
line = line.strip()
if not line:
continue
if line.startswith('--'):
arg, desc = line.split(':', 1)
arg_name = ARGUMENT_NAME.search(arg).group(1)
arg_type, arg_desc = ARGUMENT_TYPE.search(desc).groups()
arguments.append({
'name': arg_name.strip(),
'type': arg_type.strip(),
'description': [arg_desc.strip()],
'is_required': arg_name in required
})
elif line == 'None':
continue
else:
arguments[-1]['description'].append(line.strip())
for arg in arguments:
arg['description'] = ' '.join(arg['description'])
return {
'name': obj.__name__[len('jsonrpc_'):],
'description': description.strip(),
'arguments': arguments,
'returns': returns.strip()
}
def write_api(f):
apis = []
for method_name in sorted(Daemon.callable_methods.keys()):
apis.append(get_api(Daemon.callable_methods[method_name]))
json.dump(apis, f, indent=4)
if __name__ == '__main__':
html_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'docs', 'api.json')
with open(html_file, 'w+') as f:
write_api(f)

View file

@ -24,13 +24,14 @@ requires = [
'lbryschema==0.0.16',
'lbryum==3.2.3',
'miniupnpc',
'txupnp==0.0.1a6',
'pyyaml',
'requests',
'txJSON-RPC',
'zope.interface',
'treq',
'docopt',
'six'
'six',
]
console_scripts = [