Merge branch 'database-refactor'

This commit is contained in:
Jack Robison 2018-02-13 16:01:41 -05:00
commit e3d90f2240
No known key found for this signature in database
GPG key ID: DF25C68FE0239BB2
53 changed files with 2323 additions and 2322 deletions

View file

@ -290,7 +290,7 @@ spelling-store-unknown-words=no
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=100
max-line-length=120
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$

View file

@ -42,6 +42,6 @@ script:
- pip install mock pylint unqlite
- pylint lbrynet
- PYTHONPATH=. trial lbrynet.tests
- python -m unittest discover lbrynet/tests/integration
- python -m unittest discover lbrynet/tests/integration -v
- rvm install ruby-2.3.1
- rvm use 2.3.1 && gem install danger --version '~> 4.0' && danger

View file

@ -24,6 +24,7 @@ at anytime.
* fetching the external ip
* `blob_list` failing with --uri parameter (https://github.com/lbryio/lbry/issues/895)
* `get` failing with a non-useful error message when given a uri for a channel claim
* exception checking in several wallet unit tests
### Deprecated
* `channel_list_mine`, replaced with `channel_list`
@ -42,8 +43,9 @@ at anytime.
* `abandon_info` dictionary (containing `claim_name`, `claim_id`, `address`, `amount`, `balance_delta` and `nout`) for claims, supports, and updates returned by `transaction_list`
* `permanent_url` string to `channel_list_mine`, `claim_list`, `claim_show`, `resolve` and `resolve_name` (see lbryio/lbryum#203)
* `is_mine` boolean to `channel_list` results
* `status`, `blobs_completed`, and `blobs_in_stream` fields to file objects returned by `file_list` and `get`
* sqlite table to store the outpoint of the claim a stream is downloaded from
* `txid`, `nout`, `channel_claim_id`, `channel_claim_name`, `status`, `blobs_completed`, and `blobs_in_stream` fields to file objects returned by `file_list` and `get`
* `txid`, `nout`, `channel_claim_id`, and `channel_claim_name` filters for `file` commands (`file_list`, `file_set_status`, `file_reflect`, and `file_delete`)
* unit tests for `SQLiteStorage` and updated old tests for relevant changes (https://github.com/lbryio/lbry/issues/1088)
### Changed
* default download folder on linux from `~/Downloads` to `XDG_DOWNLOAD_DIR`
@ -55,7 +57,9 @@ at anytime.
* `publish` to verify the claim schema before trying to make the claim and to return better error messages
* `channel_list_mine` to be instead named `channel_list`
* `channel_list` to include channels where the certificate info has been imported but the claim is not in the wallet
* file objects returned by `file_list` and `get` to no longer contain `name`, `claim_id`, `message`, or `metadata`
* file objects returned by `file_list` and `get` to contain `claim_name` field instead of `name`
* `name` filter parameter for `file_list`, `file_set_status`, `file_reflect`, and `file_delete` to be named `claim_name`
* `metadata` field in file objects returned by `file_list` and `get` to be a [Metadata object](https://github.com/lbryio/lbryschema/blob/master/lbryschema/proto/metadata.proto#L5)
* assumption for time it takes to announce single hash from 1 second to 5 seconds
* HTTP error codes for failed api requests, conform to http://www.jsonrpc.org/specification#error_object (previously http errors were set for jsonrpc errors)
* api requests resulting in errors to return less verbose tracebacks
@ -64,16 +68,20 @@ at anytime.
* lbrynet to not manually save the wallet file and to let lbryum handle it
* internals to use reworked lbryum `payto` command
* dht `Node` class to re-attempt joining the network every 60 secs if no peers are known
* lbrynet database and file manager to separate the creation of lbry files (from downloading or publishing) from the handling of a stream. All files have a stream, but not all streams may have a file. (https://github.com/lbryio/lbry/issues/1020)
* manager classes to use new `SQLiteStorage` for database interaction. This class uses a single `lbrynet.sqlite` database file.
### Removed
* `seccure` and `gmpy` dependencies
* support for positional arguments in cli `settings_set`. Now only accepts settings changes in the form `--setting_key=value`
* `auto_re_reflect` setting from the conf file, use the `reflect_uploads` setting instead
* `name` argument for `claim_show` command
* claim related filter arguments `name`, `claim_id`, and `outpoint` from `file_list`, `file_delete`, `file_set_status`, and `file_reflect` commands
* `message` response field in file objects returned by `file_list` and `get`
* `include_tip_info` argument from `transaction_list`, which will now always include tip information.
* old and unused UI related code
* unnecessary `TempBlobManager` class
* old storage classes used by the file manager, wallet, and blob manager
* old `.db` database files from the data directory
## [0.18.0] - 2017-11-08
### Fixed

View file

@ -12,6 +12,7 @@ log = logging.getLogger(__name__)
MAX_BLOB_SIZE = 2 * 2 ** 20
class BlobFile(object):
"""
A chunk of data available on the network which is specified by a hashsum

View file

@ -16,3 +16,4 @@ class BlobInfo(object):
self.blob_hash = blob_hash
self.blob_num = blob_num
self.length = length

View file

@ -1,21 +1,17 @@
import logging
import os
import time
import sqlite3
from sqlite3 import IntegrityError
from twisted.internet import threads, defer, reactor
from twisted.enterprise import adbapi
from lbrynet import conf
from lbrynet.blob.blob_file import BlobFile
from lbrynet.blob.creator import BlobFileCreator
from lbrynet.core.server.DHTHashAnnouncer import DHTHashSupplier
from lbrynet.core.sqlite_helpers import rerun_if_locked
log = logging.getLogger(__name__)
class DiskBlobManager(DHTHashSupplier):
def __init__(self, hash_announcer, blob_dir, db_dir):
def __init__(self, hash_announcer, blob_dir, storage):
"""
This class stores blobs on the hard disk,
@ -24,27 +20,19 @@ class DiskBlobManager(DHTHashSupplier):
"""
DHTHashSupplier.__init__(self, hash_announcer)
self.storage = storage
self.announce_head_blobs_only = conf.settings['announce_head_blobs_only']
self.blob_dir = blob_dir
self.db_file = os.path.join(db_dir, "blobs.db")
self.db_conn = adbapi.ConnectionPool('sqlite3', self.db_file, check_same_thread=False)
self.blob_creator_type = BlobFileCreator
# TODO: consider using an LRU for blobs as there could potentially
# be thousands of blobs loaded up, many stale
self.blobs = {}
self.blob_hashes_to_delete = {} # {blob_hash: being_deleted (True/False)}
@defer.inlineCallbacks
def setup(self):
log.info("Starting disk blob manager. blob_dir: %s, db_file: %s", str(self.blob_dir),
str(self.db_file))
yield self._open_db()
return defer.succeed(True)
def stop(self):
log.info("Stopping disk blob manager.")
self.db_conn.close()
return defer.succeed(True)
def get_blob(self, blob_hash, length=None):
@ -75,8 +63,9 @@ class DiskBlobManager(DHTHashSupplier):
def blob_completed(self, blob, next_announce_time=None, should_announce=True):
if next_announce_time is None:
next_announce_time = self.get_next_announce_time()
yield self._add_completed_blob(blob.blob_hash, blob.length,
next_announce_time, should_announce)
yield self.storage.add_completed_blob(
blob.blob_hash, blob.length, next_announce_time, should_announce
)
# we announce all blobs immediately, if announce_head_blob_only is False
# otherwise, announce only if marked as should_announce
if not self.announce_head_blobs_only or should_announce:
@ -86,22 +75,22 @@ class DiskBlobManager(DHTHashSupplier):
return self._completed_blobs(blobhashes_to_check)
def hashes_to_announce(self):
return self._get_blobs_to_announce()
return self.storage.get_blobs_to_announce(self.hash_announcer)
def count_should_announce_blobs(self):
return self._count_should_announce_blobs()
return self.storage.count_should_announce_blobs()
def set_should_announce(self, blob_hash, should_announce):
if blob_hash in self.blobs:
blob = self.blobs[blob_hash]
if blob.get_is_verified():
return self._set_should_announce(blob_hash,
self.get_next_announce_time(),
should_announce)
return self.storage.set_should_announce(
blob_hash, self.get_next_announce_time(), should_announce
)
return defer.succeed(False)
def get_should_announce(self, blob_hash):
return self._should_announce(blob_hash)
return self.storage.should_announce(blob_hash)
def creator_finished(self, blob_creator, should_announce):
log.debug("blob_creator.blob_hash: %s", blob_creator.blob_hash)
@ -114,8 +103,7 @@ class DiskBlobManager(DHTHashSupplier):
new_blob = BlobFile(self.blob_dir, blob_creator.blob_hash, blob_creator.length)
self.blobs[blob_creator.blob_hash] = new_blob
next_announce_time = self.get_next_announce_time()
d = self.blob_completed(new_blob, next_announce_time, should_announce)
return d
return self.blob_completed(new_blob, next_announce_time, should_announce)
def immediate_announce_all_blobs(self):
d = self._get_all_verified_blob_hashes()
@ -127,24 +115,6 @@ class DiskBlobManager(DHTHashSupplier):
d.addCallback(self.completed_blobs)
return d
def add_blob_to_download_history(self, blob_hash, host, rate):
d = self._add_blob_to_download_history(blob_hash, host, rate)
return d
@defer.inlineCallbacks
def get_host_downloaded_from(self, blob_hash):
query_str = "SELECT host FROM download WHERE blob=? ORDER BY ts DESC LIMIT 1"
host = yield self.db_conn.runQuery(query_str, (blob_hash,))
if host:
result = host[0][0]
else:
result = None
defer.returnValue(result)
def add_blob_to_upload_history(self, blob_hash, host, rate):
d = self._add_blob_to_upload_history(blob_hash, host, rate)
return d
@defer.inlineCallbacks
def delete_blobs(self, blob_hashes):
bh_to_delete_from_db = []
@ -156,74 +126,11 @@ class DiskBlobManager(DHTHashSupplier):
del self.blobs[blob_hash]
except Exception as e:
log.warning("Failed to delete blob file. Reason: %s", e)
yield self._delete_blobs_from_db(bh_to_delete_from_db)
######### database calls #########
def _open_db(self):
# check_same_thread=False is solely to quiet a spurious error that appears to be due
# to a bug in twisted, where the connection is closed by a different thread than the
# one that opened it. The individual connections in the pool are not used in multiple
# threads.
def create_tables(transaction):
transaction.execute('PRAGMA journal_mode=WAL')
transaction.execute("create table if not exists blobs (" +
" blob_hash text primary key, " +
" blob_length integer, " +
" last_verified_time real, " +
" next_announce_time real, " +
" should_announce integer)")
transaction.execute("create table if not exists download (" +
" id integer primary key autoincrement, " +
" blob text, " +
" host text, " +
" rate float, " +
" ts integer)")
transaction.execute("create table if not exists upload (" +
" id integer primary key autoincrement, " +
" blob text, " +
" host text, " +
" rate float, " +
" ts integer)")
return self.db_conn.runInteraction(create_tables)
@rerun_if_locked
def _add_completed_blob(self, blob_hash, length, next_announce_time, should_announce):
log.debug("Adding a completed blob. blob_hash=%s, length=%s", blob_hash, str(length))
should_announce = 1 if should_announce else 0
d = self.db_conn.runQuery("insert into blobs (blob_hash, blob_length, next_announce_time, "
"should_announce) values (?, ?, ?, ?)", (blob_hash, length,
next_announce_time,
should_announce))
# TODO: why is this here?
d.addErrback(lambda err: err.trap(sqlite3.IntegrityError))
return d
@rerun_if_locked
@defer.inlineCallbacks
def _set_should_announce(self, blob_hash, next_announce_time, should_announce):
yield self.db_conn.runOperation("update blobs set next_announce_time=?, should_announce=? "
"where blob_hash=?", (next_announce_time, should_announce,
blob_hash))
defer.returnValue(True)
@rerun_if_locked
@defer.inlineCallbacks
def _should_announce(self, blob_hash):
result = yield self.db_conn.runQuery("select should_announce from blobs where blob_hash=?",
(blob_hash,))
defer.returnValue(result[0][0])
@rerun_if_locked
@defer.inlineCallbacks
def _count_should_announce_blobs(self):
result = yield self.db_conn.runQuery("select count(*) from blobs where should_announce=1")
defer.returnValue(result[0][0])
try:
yield self.storage.delete_blobs_from_db(bh_to_delete_from_db)
except IntegrityError as err:
if err.message != "FOREIGN KEY constraint failed":
raise err
@defer.inlineCallbacks
def _completed_blobs(self, blobhashes_to_check):
@ -232,65 +139,12 @@ class DiskBlobManager(DHTHashSupplier):
blob_hashes = [b.blob_hash for success, b in blobs if success and b.verified]
defer.returnValue(blob_hashes)
@rerun_if_locked
def _update_blob_verified_timestamp(self, blob, timestamp):
return self.db_conn.runQuery("update blobs set last_verified_time = ? where blob_hash = ?",
(blob, timestamp))
@rerun_if_locked
def _get_blobs_to_announce(self):
def get_and_update(transaction):
timestamp = time.time()
if self.announce_head_blobs_only is True:
r = transaction.execute("select blob_hash from blobs " +
"where next_announce_time < ? and blob_hash is not null "+
"and should_announce = 1",
(timestamp,))
else:
r = transaction.execute("select blob_hash from blobs " +
"where next_announce_time < ? and blob_hash is not null",
(timestamp,))
blobs = [b for b, in r.fetchall()]
next_announce_time = self.get_next_announce_time(len(blobs))
transaction.execute(
"update blobs set next_announce_time = ? where next_announce_time < ?",
(next_announce_time, timestamp))
log.debug("Got %s blobs to announce, next announce time is in %s seconds",
len(blobs), next_announce_time-time.time())
return blobs
return self.db_conn.runInteraction(get_and_update)
@rerun_if_locked
def _delete_blobs_from_db(self, blob_hashes):
def delete_blobs(transaction):
for b in blob_hashes:
transaction.execute("delete from blobs where blob_hash = ?", (b,))
return self.db_conn.runInteraction(delete_blobs)
@rerun_if_locked
def _get_all_blob_hashes(self):
d = self.db_conn.runQuery("select blob_hash from blobs")
return d
@rerun_if_locked
@defer.inlineCallbacks
def _get_all_should_announce_blob_hashes(self):
# return a list of blob hashes where should_announce is True
blob_hashes = yield self.db_conn.runQuery(
"select blob_hash from blobs where should_announce = 1")
defer.returnValue([d[0] for d in blob_hashes])
@rerun_if_locked
def _get_all_verified_blob_hashes(self):
d = self._get_all_blob_hashes()
d = self.storage.get_all_blob_hashes()
def get_verified_blobs(blobs):
verified_blobs = []
for blob_hash, in blobs:
for blob_hash in blobs:
file_path = os.path.join(self.blob_dir, blob_hash)
if os.path.isfile(file_path):
verified_blobs.append(blob_hash)
@ -298,19 +152,3 @@ class DiskBlobManager(DHTHashSupplier):
d.addCallback(lambda blobs: threads.deferToThread(get_verified_blobs, blobs))
return d
@rerun_if_locked
def _add_blob_to_download_history(self, blob_hash, host, rate):
ts = int(time.time())
d = self.db_conn.runQuery(
"insert into download values (null, ?, ?, ?, ?) ",
(blob_hash, str(host), float(rate), ts))
return d
@rerun_if_locked
def _add_blob_to_upload_history(self, blob_hash, host, rate):
ts = int(time.time())
d = self.db_conn.runQuery(
"insert into upload values (null, ?, ?, ?, ?) ",
(blob_hash, str(host), float(rate), ts))
return d

View file

@ -2,6 +2,7 @@ import logging
import miniupnpc
from lbrynet.core.BlobManager import DiskBlobManager
from lbrynet.dht import node
from lbrynet.database.storage import SQLiteStorage
from lbrynet.core.PeerManager import PeerManager
from lbrynet.core.RateLimiter import RateLimiter
from lbrynet.core.client.DHTPeerFinder import DHTPeerFinder
@ -43,7 +44,7 @@ class Session(object):
blob_manager=None, peer_port=None, use_upnp=True,
rate_limiter=None, wallet=None,
dht_node_class=node.Node, blob_tracker_class=None,
payment_rate_manager_class=None, is_generous=True, external_ip=None):
payment_rate_manager_class=None, is_generous=True, external_ip=None, storage=None):
"""@param blob_data_payment_rate: The default payment rate for blob data
@param db_dir: The directory in which levelDB files should be stored
@ -136,6 +137,7 @@ class Session(object):
self.payment_rate_manager = None
self.payment_rate_manager_class = payment_rate_manager_class or NegotiatedPaymentRateManager
self.is_generous = is_generous
self.storage = storage or SQLiteStorage(self.db_dir)
def setup(self):
"""Create the blob directory and database if necessary, start all desired services"""
@ -231,11 +233,13 @@ class Session(object):
# best not to rely on this external ip, the router can be behind layers of NATs
self.external_ip = external_ip
if self.peer_port:
self.upnp_redirects.append(get_port_mapping(u, self.peer_port, 'TCP',
'LBRY peer port'))
self.upnp_redirects.append(
get_port_mapping(u, self.peer_port, 'TCP', 'LBRY peer port')
)
if self.dht_node_port:
self.upnp_redirects.append(get_port_mapping(u, self.dht_node_port, 'UDP',
'LBRY DHT port'))
self.upnp_redirects.append(
get_port_mapping(u, self.dht_node_port, 'UDP', 'LBRY DHT port')
)
return True
return False
@ -313,27 +317,24 @@ class Session(object):
raise Exception(
"TempBlobManager is no longer supported, specify BlobManager or db_dir")
else:
self.blob_manager = DiskBlobManager(self.hash_announcer,
self.blob_dir,
self.db_dir)
self.blob_manager = DiskBlobManager(
self.hash_announcer, self.blob_dir, self.storage
)
if self.blob_tracker is None:
self.blob_tracker = self.blob_tracker_class(self.blob_manager,
self.peer_finder,
self.dht_node)
self.blob_tracker = self.blob_tracker_class(
self.blob_manager, self.peer_finder, self.dht_node
)
if self.payment_rate_manager is None:
self.payment_rate_manager = self.payment_rate_manager_class(
self.base_payment_rate_manager,
self.blob_tracker,
self.is_generous)
self.base_payment_rate_manager, self.blob_tracker, self.is_generous
)
self.rate_limiter.start()
d1 = self.blob_manager.setup()
d2 = self.wallet.start()
dl = defer.DeferredList([d1, d2], fireOnOneErrback=True, consumeErrors=True)
dl.addCallback(lambda _: self.blob_tracker.start())
return dl
d = self.storage.setup()
d.addCallback(lambda _: self.wallet.start())
d.addCallback(lambda _: self.blob_tracker.start())
return d
def _unset_upnp(self):
log.info("Unsetting upnp for session")

View file

@ -1,7 +1,11 @@
import os
import binascii
from collections import defaultdict
import json
import logging
from twisted.internet import threads, defer
from lbrynet.core.cryptoutils import get_lbry_hash_obj
from lbrynet.core.client.StandaloneBlobDownloader import StandaloneBlobDownloader
from lbrynet.core.Error import UnknownStreamTypeError, InvalidStreamDescriptorError
@ -87,7 +91,7 @@ class PlainStreamDescriptorWriter(StreamDescriptorWriter):
def _write_stream_descriptor(self, raw_data):
def write_file():
log.debug("Writing the sd file to disk")
log.info("Writing the sd file to disk")
with open(self.sd_file_name, 'w') as sd_file:
sd_file.write(raw_data)
return self.sd_file_name
@ -98,7 +102,6 @@ class PlainStreamDescriptorWriter(StreamDescriptorWriter):
class BlobStreamDescriptorWriter(StreamDescriptorWriter):
def __init__(self, blob_manager):
StreamDescriptorWriter.__init__(self)
self.blob_manager = blob_manager
@defer.inlineCallbacks
@ -239,6 +242,208 @@ class StreamDescriptorIdentifier(object):
return d
EncryptedFileStreamType = "lbryfile"
@defer.inlineCallbacks
def save_sd_info(blob_manager, sd_hash, sd_info):
if not blob_manager.blobs.get(sd_hash) or not blob_manager.blobs[sd_hash].get_is_verified():
descriptor_writer = BlobStreamDescriptorWriter(blob_manager)
calculated_sd_hash = yield descriptor_writer.create_descriptor(sd_info)
if calculated_sd_hash != sd_hash:
raise InvalidStreamDescriptorError("%s does not match calculated %s" %
(sd_hash, calculated_sd_hash))
stream_hash = yield blob_manager.storage.get_stream_hash_for_sd_hash(sd_hash)
if not stream_hash:
log.debug("Saving info for %s", sd_info['stream_name'].decode('hex'))
stream_name = sd_info['stream_name']
key = sd_info['key']
stream_hash = sd_info['stream_hash']
stream_blobs = sd_info['blobs']
suggested_file_name = sd_info['suggested_file_name']
yield blob_manager.storage.add_known_blobs(stream_blobs)
yield blob_manager.storage.store_stream(
stream_hash, sd_hash, stream_name, key, suggested_file_name, stream_blobs
)
defer.returnValue(stream_hash)
def format_blobs(crypt_blob_infos):
formatted_blobs = []
for blob_info in crypt_blob_infos:
blob = {}
if blob_info.length != 0:
blob['blob_hash'] = str(blob_info.blob_hash)
blob['blob_num'] = blob_info.blob_num
blob['iv'] = str(blob_info.iv)
blob['length'] = blob_info.length
formatted_blobs.append(blob)
return formatted_blobs
def format_sd_info(stream_type, stream_name, key, suggested_file_name, stream_hash, blobs):
return {
"stream_type": stream_type,
"stream_name": stream_name,
"key": key,
"suggested_file_name": suggested_file_name,
"stream_hash": stream_hash,
"blobs": blobs
}
@defer.inlineCallbacks
def get_sd_info(storage, stream_hash, include_blobs):
"""
Get an sd info dictionary from storage
:param storage: (SQLiteStorage) storage instance
:param stream_hash: (str) stream hash
:param include_blobs: (bool) include stream blob infos
:return: {
"stream_type": "lbryfile",
"stream_name": <hex encoded stream name>,
"key": <stream key>,
"suggested_file_name": <hex encoded suggested file name>,
"stream_hash": <stream hash>,
"blobs": [
{
"blob_hash": <head blob_hash>,
"blob_num": 0,
"iv": <iv>,
"length": <head blob length>
}, ...
{
"blob_num": <stream length>,
"iv": <iv>,
"length": 0
}
]
}
"""
stream_info = yield storage.get_stream_info(stream_hash)
blobs = []
if include_blobs:
blobs = yield storage.get_blobs_for_stream(stream_hash)
defer.returnValue(
format_sd_info(
EncryptedFileStreamType, stream_info[0], stream_info[1],
stream_info[2], stream_hash, format_blobs(blobs)
)
)
@defer.inlineCallbacks
def create_plain_sd(storage, stream_hash, file_name, overwrite_existing=False):
def _get_file_name():
actual_file_name = file_name
if os.path.exists(actual_file_name):
ext_num = 1
while os.path.exists(actual_file_name + "_" + str(ext_num)):
ext_num += 1
actual_file_name = actual_file_name + "_" + str(ext_num)
return actual_file_name
if overwrite_existing is False:
file_name = yield threads.deferToThread(_get_file_name())
descriptor_writer = PlainStreamDescriptorWriter(file_name)
sd_info = yield get_sd_info(storage, stream_hash, True)
sd_hash = yield descriptor_writer.create_descriptor(sd_info)
defer.returnValue(sd_hash)
def get_blob_hashsum(b):
length = b['length']
if length != 0:
blob_hash = b['blob_hash']
else:
blob_hash = None
blob_num = b['blob_num']
iv = b['iv']
blob_hashsum = get_lbry_hash_obj()
if length != 0:
blob_hashsum.update(blob_hash)
blob_hashsum.update(str(blob_num))
blob_hashsum.update(iv)
blob_hashsum.update(str(length))
return blob_hashsum.digest()
def get_stream_hash(hex_stream_name, key, hex_suggested_file_name, blob_infos):
h = get_lbry_hash_obj()
h.update(hex_stream_name)
h.update(key)
h.update(hex_suggested_file_name)
blobs_hashsum = get_lbry_hash_obj()
sorted_blob_infos = sorted(blob_infos, key=lambda x: x['blob_num'])
for blob in sorted_blob_infos:
blobs_hashsum.update(get_blob_hashsum(blob))
if sorted_blob_infos[-1]['length'] != 0:
raise InvalidStreamDescriptorError("Does not end with a zero-length blob.")
if 'blob_hash' in sorted_blob_infos[-1]:
raise InvalidStreamDescriptorError("Stream terminator blob should not have a hash")
h.update(blobs_hashsum.digest())
return h.hexdigest()
def verify_hex(text, field_name):
for c in text:
if c not in '0123456789abcdef':
raise InvalidStreamDescriptorError("%s is not a hex-encoded string" % field_name)
def validate_descriptor(stream_info):
try:
hex_stream_name = stream_info['stream_name']
key = stream_info['key']
hex_suggested_file_name = stream_info['suggested_file_name']
stream_hash = stream_info['stream_hash']
blobs = stream_info['blobs']
except KeyError as e:
raise InvalidStreamDescriptorError("Missing '%s'" % (e.args[0]))
verify_hex(key, "key")
verify_hex(hex_suggested_file_name, "suggested file name")
verify_hex(stream_hash, "stream_hash")
calculated_stream_hash = get_stream_hash(
hex_stream_name, key, hex_suggested_file_name, blobs
)
if calculated_stream_hash != stream_hash:
raise InvalidStreamDescriptorError("Stream hash does not match stream metadata")
return True
class EncryptedFileStreamDescriptorValidator(object):
def __init__(self, raw_info):
self.raw_info = raw_info
def validate(self):
return defer.succeed(validate_descriptor(self.raw_info))
def info_to_show(self):
info = []
info.append(("stream_name", binascii.unhexlify(self.raw_info.get("stream_name"))))
size_so_far = 0
for blob_info in self.raw_info.get("blobs", []):
size_so_far += int(blob_info['length'])
info.append(("stream_size", str(self.get_length_of_stream())))
suggested_file_name = self.raw_info.get("suggested_file_name", None)
if suggested_file_name is not None:
suggested_file_name = binascii.unhexlify(suggested_file_name)
info.append(("suggested_file_name", suggested_file_name))
return info
def get_length_of_stream(self):
size_so_far = 0
for blob_info in self.raw_info.get("blobs", []):
size_so_far += int(blob_info['length'])
return size_so_far
@defer.inlineCallbacks
def download_sd_blob(session, blob_hash, payment_rate_manager, timeout=None):
"""
Downloads a single blob from the network
@ -251,6 +456,7 @@ def download_sd_blob(session, blob_hash, payment_rate_manager, timeout=None):
@return: An object of type HashBlob
"""
downloader = StandaloneBlobDownloader(blob_hash,
session.blob_manager,
session.peer_finder,
@ -258,4 +464,10 @@ def download_sd_blob(session, blob_hash, payment_rate_manager, timeout=None):
payment_rate_manager,
session.wallet,
timeout)
return downloader.download()
sd_blob = yield downloader.download()
sd_reader = BlobStreamDescriptorReader(sd_blob)
sd_info = yield sd_reader.get_info()
raw_sd = yield sd_reader._get_raw_data()
yield session.blob_manager.storage.add_known_blob(blob_hash, len(raw_sd))
yield save_sd_info(session.blob_manager, sd_blob.blob_hash, sd_info)
defer.returnValue(sd_blob)

View file

@ -1,15 +1,10 @@
import os
from future_builtins import zip
from collections import defaultdict, deque
import datetime
import logging
import json
import time
from decimal import Decimal
from zope.interface import implements
from twisted.internet import threads, reactor, defer, task
from twisted.python.failure import Failure
from twisted.enterprise import adbapi
from lbryum import wallet as lbryum_wallet
from lbryum.network import Network
@ -23,8 +18,6 @@ from lbryschema.claim import ClaimDict
from lbryschema.error import DecodeError
from lbryschema.decode import smart_decode
from lbrynet import conf
from lbrynet.core.sqlite_helpers import rerun_if_locked
from lbrynet.interfaces import IRequestCreator, IQueryHandlerFactory, IQueryHandler, IWallet
from lbrynet.core.client.ClientRequest import ClientRequest
from lbrynet.core.Error import InsufficientFundsError, UnknownNameError
@ -67,370 +60,12 @@ class ClaimOutpoint(dict):
return not self.__eq__(compare)
class CachedClaim(object):
def __init__(self, claim_id, claim, claim_sequence, address, height, amount, supports,
channal_name, signature_is_valid, cache_timestamp, name, txid, nout):
self.claim_id = claim_id
self.claim = claim
self.claim_sequence = claim_sequence
self.address = address
self.height = height
self.amount = amount
self.supports = [] if not supports else json.loads(supports)
self.effective_amount = self.amount + sum([x['amount'] for x in self.supports])
self.channel_name = channal_name
self.signature_is_valid = signature_is_valid
self.cache_timestamp = cache_timestamp
self.name = name
self.txid = txid
self.nout = nout
def response_dict(self, check_expires=True):
if check_expires:
if (time.time() - int(self.cache_timestamp)) > conf.settings['cache_time']:
return
claim = {
"height": self.height,
"address": self.address,
"claim_id": self.claim_id,
"claim_sequence": self.claim_sequence,
"effective_amount": self.effective_amount,
"has_signature": self.claim.has_signature,
"name": self.name,
"hex": self.claim.serialized.encode('hex'),
"value": self.claim.claim_dict,
"txid": self.txid,
"amount": self.amount,
"decoded_claim": True,
"supports": self.supports,
"nout": self.nout
}
if self.channel_name is not None:
claim['channel_name'] = self.channel_name
if self.signature_is_valid is not None:
claim['signature_is_valid'] = bool(self.signature_is_valid)
return claim
class MetaDataStorage(object):
def load(self):
return defer.succeed(True)
def save_name_metadata(self, name, claim_outpoint, sd_hash):
return defer.succeed(True)
def get_claim_metadata_for_sd_hash(self, sd_hash):
return defer.succeed(True)
def update_claimid(self, claim_id, name, claim_outpoint):
return defer.succeed(True)
def get_claimid_for_tx(self, claim_outpoint):
return defer.succeed(True)
@defer.inlineCallbacks
def get_cached_claim(self, claim_id, check_expire=True):
cache_info = yield self._get_cached_claim(claim_id)
response = None
if cache_info:
cached_claim = CachedClaim(claim_id, *cache_info)
response = cached_claim.response_dict(check_expires=check_expire)
defer.returnValue(response)
def _get_cached_claim(self, claim_id):
return defer.succeed(None)
def save_claim_to_cache(self, claim_id, claim_sequence, claim, claim_address, height, amount,
supports, channel_name, signature_is_valid):
return defer.succeed(True)
def save_claim_to_uri_cache(self, uri, claim_id, certificate_id=None):
return defer.succeed(None)
def get_cached_claim_for_uri(self, uri, check_expire=True):
return defer.succeed(None)
class InMemoryStorage(MetaDataStorage):
def __init__(self):
self.metadata = {}
self.claimids = {}
self.claim_dicts = {}
self.uri_cache = {}
MetaDataStorage.__init__(self)
def save_name_metadata(self, name, claim_outpoint, sd_hash):
self.metadata[sd_hash] = (name, claim_outpoint)
return defer.succeed(True)
def get_claim_metadata_for_sd_hash(self, sd_hash):
try:
name, claim_outpoint = self.metadata[sd_hash]
return defer.succeed((name, claim_outpoint['txid'], claim_outpoint['nout']))
except KeyError:
return defer.succeed(None)
def update_claimid(self, claim_id, name, claim_outpoint):
self.claimids[(name, claim_outpoint['txid'], claim_outpoint['nout'])] = claim_id
return defer.succeed(True)
def get_claimid_for_tx(self, claim_outpoint):
result = None
for k, claim_id in self.claimids.iteritems():
if k[1] == claim_outpoint['txid'] and k[2] == claim_outpoint['nout']:
result = claim_id
break
return defer.succeed(result)
def _get_cached_claim(self, claim_id):
claim_cache = self.claim_dicts.get(claim_id, None)
claim_tx_cache = None
for k, v in self.claimids.iteritems():
if v == claim_id:
claim_tx_cache = k
break
if claim_cache and claim_tx_cache:
cached_claim_args = tuple(claim_cache) + tuple(claim_tx_cache)
return defer.succeed(cached_claim_args)
return defer.succeed(None)
def save_claim_to_cache(self, claim_id, claim_sequence, claim, claim_address, height, amount,
supports, channel_name, signature_is_valid):
self.claim_dicts[claim_id] = (claim, claim_sequence, claim_address, height, amount,
supports, channel_name, signature_is_valid, int(time.time()))
return defer.succeed(True)
def save_claim_to_uri_cache(self, uri, claim_id, certificate_id=None):
self.uri_cache[uri] = (claim_id, certificate_id)
return defer.succeed(None)
@defer.inlineCallbacks
def get_cached_claim_for_uri(self, uri, check_expire=True):
result = self.uri_cache.get(uri, None)
response = None
if result:
claim_id, certificate_id = result
response = yield self.get_cached_claim(claim_id, check_expire)
if response and certificate_id:
certificate = yield self.get_cached_claim(certificate_id, check_expire)
response['certificate'] = certificate['claim']
defer.returnValue(response)
class SqliteStorage(MetaDataStorage):
def __init__(self, db_dir):
self.db_dir = db_dir
self.db = adbapi.ConnectionPool('sqlite3', os.path.join(self.db_dir, "blockchainname.db"),
check_same_thread=False)
MetaDataStorage.__init__(self)
def load(self):
def create_tables(transaction):
transaction.execute("CREATE TABLE IF NOT EXISTS name_metadata (" +
" name TEXT UNIQUE NOT NULL, " +
" txid TEXT NOT NULL, " +
" n INTEGER NOT NULL, " +
" sd_hash TEXT NOT NULL)")
transaction.execute("create table if not exists claim_ids (" +
" claimId text, " +
" name text, " +
" txid text, " +
" n integer)")
transaction.execute("CREATE TABLE IF NOT EXISTS claim_cache (" +
" row_id INTEGER PRIMARY KEY AUTOINCREMENT, " +
" claim_id TEXT UNIQUE NOT NULL, " +
" claim_sequence INTEGER, " +
" claim_address TEXT NOT NULL, " +
" height INTEGER NOT NULL, " +
" amount INTEGER NOT NULL, " +
" supports TEXT, " +
" claim_pb TEXT, " +
" channel_name TEXT, " +
" signature_is_valid BOOL, " +
" last_modified TEXT)")
transaction.execute("CREATE TABLE IF NOT EXISTS uri_cache (" +
" row_id INTEGER PRIMARY KEY AUTOINCREMENT, " +
" uri TEXT UNIQUE NOT NULL, " +
" cache_row INTEGER, " +
" certificate_row INTEGER, " +
" last_modified TEXT)")
return self.db.runInteraction(create_tables)
@rerun_if_locked
@defer.inlineCallbacks
def save_name_metadata(self, name, claim_outpoint, sd_hash):
# TODO: refactor the 'claim_ids' table to not be terrible
txid, nout = claim_outpoint['txid'], claim_outpoint['nout']
yield self.db.runOperation("INSERT OR REPLACE INTO name_metadata VALUES (?, ?, ?, ?)",
(name, txid, nout, sd_hash))
defer.returnValue(None)
@rerun_if_locked
@defer.inlineCallbacks
def get_claim_metadata_for_sd_hash(self, sd_hash):
result = yield self.db.runQuery("SELECT name, txid, n FROM name_metadata WHERE sd_hash=?",
(sd_hash, ))
response = None
if result:
response = result[0]
defer.returnValue(response)
@rerun_if_locked
@defer.inlineCallbacks
def update_claimid(self, claim_id, name, claim_outpoint):
txid, nout = claim_outpoint['txid'], claim_outpoint['nout']
yield self.db.runOperation("INSERT OR IGNORE INTO claim_ids VALUES (?, ?, ?, ?)",
(claim_id, name, txid, nout))
defer.returnValue(claim_id)
@rerun_if_locked
@defer.inlineCallbacks
def get_claimid_for_tx(self, claim_outpoint):
result = yield self.db.runQuery("SELECT claimId FROM claim_ids "
"WHERE txid=? AND n=?",
(claim_outpoint['txid'], claim_outpoint['nout']))
response = None
if result:
response = result[0][0]
defer.returnValue(response)
@rerun_if_locked
@defer.inlineCallbacks
def _fix_malformed_supports_amount(self, row_id, supports, amount):
"""
this fixes malformed supports and amounts that were entering the cache
support list of [txid, nout, amount in deweys] instead of list of
{'txid':,'nout':,'amount':}, with amount specified in dewey
and also supports could be "[]" (brackets enclosed by double quotes)
This code can eventually be removed, as new versions should not have this problem
"""
fixed_supports = None
fixed_amount = None
supports = [] if not supports else json.loads(supports)
if isinstance(supports, (str, unicode)) and supports == '[]':
fixed_supports = []
elif len(supports) > 0 and not isinstance(supports[0], dict):
fixed_supports = []
fixed_amount = amount / 100000000.0
for support in supports:
fixed_supports.append(
{'txid':support[0], 'nout':support[1], 'amount':support[2]/100000000.0})
if fixed_supports is not None:
log.warn("Malformed support found, fixing it")
r = yield self.db.runOperation('UPDATE claim_cache SET supports=? WHERE row_id=?',
(json.dumps(fixed_supports), row_id))
supports = fixed_supports
if fixed_amount is not None:
log.warn("Malformed amount found, fixing it")
r = yield self.db.runOperation('UPDATE claim_cache SET amount=? WHERE row_id=?',
(fixed_amount, row_id))
amount = fixed_amount
defer.returnValue((json.dumps(supports), amount))
@rerun_if_locked
@defer.inlineCallbacks
def _get_cached_claim(self, claim_id, check_expire=True):
r = yield self.db.runQuery("SELECT * FROM claim_cache WHERE claim_id=?", (claim_id, ))
claim_tx_info = yield self.db.runQuery("SELECT name, txid, n FROM claim_ids "
"WHERE claimId=?", (claim_id, ))
response = None
if r and claim_tx_info and r[0]:
rid, _, seq, claim_address, height, amount, supports, raw, chan_name, valid, ts = r[0]
supports, amount = yield self._fix_malformed_supports_amount(rid, supports, amount)
last_modified = int(ts)
name, txid, nout = claim_tx_info[0]
claim = ClaimDict.deserialize(raw.decode('hex'))
response = (claim, seq, claim_address, height, amount, supports,
chan_name, valid, last_modified, name, txid, nout)
defer.returnValue(response)
@rerun_if_locked
@defer.inlineCallbacks
def save_claim_to_cache(self, claim_id, claim_sequence, claim, claim_address, height, amount,
supports, channel_name, signature_is_valid):
serialized = claim.serialized.encode("hex")
supports = json.dumps([] or supports)
now = str(int(time.time()))
yield self.db.runOperation("INSERT OR REPLACE INTO claim_cache(claim_sequence, "
" claim_id, claim_address, height, "
" amount, supports, claim_pb, "
" channel_name, signature_is_valid, "
" last_modified)"
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(claim_sequence, claim_id, claim_address, height, amount,
supports, serialized, channel_name, signature_is_valid, now))
defer.returnValue(None)
@rerun_if_locked
@defer.inlineCallbacks
def save_claim_to_uri_cache(self, uri, claim_id, certificate_id=None):
result = yield self.db.runQuery("SELECT row_id, last_modified FROM claim_cache "
"WHERE claim_id=?", (claim_id, ))
certificate_result = None
certificate_row = None
if certificate_id:
certificate_result = yield self.db.runQuery("SELECT row_id FROM claim_cache "
"WHERE claim_id=?", (certificate_id, ))
if certificate_id is not None and certificate_result is None:
log.warning("Certificate is not in cache")
elif certificate_result:
certificate_row = certificate_result[0][0]
if result:
cache_row, ts = result[0]
yield self.db.runOperation("INSERT OR REPLACE INTO uri_cache(uri, cache_row, "
" certificate_row, last_modified) "
"VALUES (?, ?, ?, ?)",
(uri, cache_row, certificate_row,
str(int(time.time()))))
else:
log.warning("Claim is not in cache")
defer.returnValue(None)
@rerun_if_locked
@defer.inlineCallbacks
def get_cached_claim_for_uri(self, uri, check_expire=True):
result = yield self.db.runQuery("SELECT "
"claim.claim_id, cert.claim_id, uri_cache.last_modified "
"FROM uri_cache "
"INNER JOIN claim_cache as claim "
"ON uri_cache.cache_row=claim.row_id "
"LEFT OUTER JOIN claim_cache as cert "
"ON uri_cache.certificate_row=cert.row_id "
"WHERE uri_cache.uri=?", (uri, ))
response = None
if result:
claim_id, certificate_id, last_modified = result[0]
last_modified = int(last_modified)
if check_expire and time.time() - last_modified > conf.settings['cache_time']:
defer.returnValue(None)
claim = yield self.get_cached_claim(claim_id)
if claim:
response = {
"claim": claim
}
if response and certificate_id is not None:
certificate = yield self.get_cached_claim(certificate_id)
response['certificate'] = certificate
defer.returnValue(response)
class Wallet(object):
"""This class implements the Wallet interface for the LBRYcrd payment system"""
implements(IWallet)
def __init__(self, storage):
if not isinstance(storage, MetaDataStorage):
raise ValueError('storage must be an instance of MetaDataStorage')
self._storage = storage
self.storage = storage
self.next_manage_call = None
self.wallet_balance = Decimal(0.0)
self.total_reserved_points = Decimal(0.0)
@ -456,20 +91,10 @@ class Wallet(object):
self.manage()
return True
d = self._storage.load()
d.addCallback(lambda _: self._start())
d = self._start()
d.addCallback(lambda _: start_manage())
return d
def _save_name_metadata(self, name, claim_outpoint, sd_hash):
return self._storage.save_name_metadata(name, claim_outpoint, sd_hash)
def _get_claim_metadata_for_sd_hash(self, sd_hash):
return self._storage.get_claim_metadata_for_sd_hash(sd_hash)
def _update_claimid(self, claim_id, name, claim_outpoint):
return self._storage.update_claimid(claim_id, name, claim_outpoint)
@staticmethod
def log_stop_error(err):
log.error("An error occurred stopping the wallet: %s", err.getTraceback())
@ -690,32 +315,15 @@ class Wallet(object):
######
@defer.inlineCallbacks
def get_cached_claim(self, claim_id, check_expire=True):
results = yield self._storage.get_cached_claim(claim_id, check_expire)
defer.returnValue(results)
@defer.inlineCallbacks
def get_claim_by_claim_id(self, claim_id, check_expire=True):
cached_claim = yield self.get_cached_claim(claim_id, check_expire)
if cached_claim:
result = cached_claim
else:
log.debug("Refreshing cached claim: %s", claim_id)
claim = yield self._get_claim_by_claimid(claim_id)
try:
result = yield self._handle_claim_result(claim)
except (UnknownNameError, UnknownClaimID, UnknownURI) as err:
result = {'error': err.message}
claim = yield self._get_claim_by_claimid(claim_id)
try:
result = self._handle_claim_result(claim)
except (UnknownNameError, UnknownClaimID, UnknownURI) as err:
result = {'error': err.message}
defer.returnValue(result)
@defer.inlineCallbacks
def get_claimid(self, txid, nout):
claim_outpoint = ClaimOutpoint(txid, nout)
claim_id = yield self._storage.get_claimid_for_tx(claim_outpoint)
defer.returnValue(claim_id)
@defer.inlineCallbacks
def get_my_claim(self, name):
my_claims = yield self.get_name_claims()
@ -727,8 +335,7 @@ class Wallet(object):
break
defer.returnValue(my_claim)
@defer.inlineCallbacks
def _decode_and_cache_claim_result(self, claim, update_caches):
def _decode_claim_result(self, claim):
if 'has_signature' in claim and claim['has_signature']:
if not claim['signature_is_valid']:
log.warning("lbry://%s#%s has an invalid signature",
@ -736,30 +343,15 @@ class Wallet(object):
try:
decoded = smart_decode(claim['value'])
claim_dict = decoded.claim_dict
outpoint = ClaimOutpoint(claim['txid'], claim['nout'])
name = claim['name']
claim['value'] = claim_dict
claim['hex'] = decoded.serialized.encode('hex')
if update_caches:
if decoded.is_stream:
yield self._save_name_metadata(name, outpoint, decoded.source_hash)
yield self._update_claimid(claim['claim_id'], name, outpoint)
yield self._storage.save_claim_to_cache(claim['claim_id'],
claim['claim_sequence'],
decoded, claim['address'],
claim['height'],
claim['amount'], claim['supports'],
claim.get('channel_name', None),
claim.get('signature_is_valid', None))
except DecodeError:
claim['hex'] = claim['value']
claim['value'] = None
claim['error'] = "Failed to decode value"
return claim
defer.returnValue(claim)
@defer.inlineCallbacks
def _handle_claim_result(self, results, update_caches=True):
def _handle_claim_result(self, results):
if not results:
#TODO: cannot determine what name we searched for here
# we should fix lbryum commands that return None
@ -779,49 +371,41 @@ class Wallet(object):
# case where return value is {'certificate':{'txid', 'value',...},...}
if 'certificate' in results:
results['certificate'] = yield self._decode_and_cache_claim_result(
results['certificate'],
update_caches)
results['certificate'] = self._decode_claim_result(results['certificate'])
# case where return value is {'claim':{'txid','value',...},...}
if 'claim' in results:
results['claim'] = yield self._decode_and_cache_claim_result(
results['claim'],
update_caches)
results['claim'] = self._decode_claim_result(results['claim'])
# case where return value is {'txid','value',...}
# returned by queries that are not name resolve related
# (getclaimbyoutpoint, getclaimbyid, getclaimsfromtx)
# we do not update caches here because it should be missing
# some values such as claim_sequence, and supports
elif 'value' in results:
results = yield self._decode_and_cache_claim_result(results, update_caches=False)
results = self._decode_claim_result(results)
# case where there is no 'certificate', 'value', or 'claim' key
elif 'certificate' not in results:
msg = 'result in unexpected format:{}'.format(results)
assert False, msg
defer.returnValue(results)
return results
@defer.inlineCallbacks
def save_claim(self, claim_info):
if 'value' in claim_info:
yield self.storage.save_claim(claim_info)
else:
if 'certificate' in claim_info:
yield self.storage.save_claim(claim_info['certificate'])
if 'claim' in claim_info:
yield self.storage.save_claim(claim_info['claim'])
@defer.inlineCallbacks
def resolve(self, *uris, **kwargs):
check_cache = kwargs.get('check_cache', True)
page = kwargs.get('page', 0)
page_size = kwargs.get('page_size', 10)
result = {}
needed = []
for uri in uris:
cached_claim = None
if check_cache:
cached_claim = yield self._storage.get_cached_claim_for_uri(uri, check_cache)
if cached_claim:
log.debug("Using cached results for %s", uri)
result[uri] = yield self._handle_claim_result(cached_claim, update_caches=False)
else:
log.info("Resolving %s", uri)
needed.append(uri)
batch_results = yield self._get_values_for_uris(page, page_size, *uris)
@ -833,36 +417,37 @@ class Wallet(object):
if resolve_results and 'certificate' in resolve_results:
certificate_id = resolve_results['certificate']['claim_id']
try:
result[uri] = yield self._handle_claim_result(resolve_results, update_caches=True)
if claim_id:
yield self._storage.save_claim_to_uri_cache(uri, claim_id, certificate_id)
result[uri] = self._handle_claim_result(resolve_results)
yield self.save_claim(result[uri])
except (UnknownNameError, UnknownClaimID, UnknownURI) as err:
result[uri] = {'error': err.message}
defer.returnValue(result)
@defer.inlineCallbacks
def get_claims_by_ids(self, *claim_ids):
claims = yield self._get_claims_by_claimids(*claim_ids)
for claim in claims.itervalues():
yield self.save_claim(claim)
defer.returnValue(claims)
@defer.inlineCallbacks
def get_claim_by_outpoint(self, claim_outpoint, check_expire=True):
claim_id = yield self._storage.get_claimid_for_tx(claim_outpoint)
txid, nout = claim_outpoint['txid'], claim_outpoint['nout']
if claim_id:
cached_claim = yield self._storage.get_cached_claim(claim_id, check_expire)
else:
cached_claim = None
if not cached_claim:
claim = yield self._get_claim_by_outpoint(txid, nout)
try:
result = yield self._handle_claim_result(claim)
except (UnknownOutpoint) as err:
result = {'error': err.message}
else:
result = cached_claim
txid, nout = claim_outpoint.split(":")
nout = int(nout)
claim = yield self._get_claim_by_outpoint(txid, nout)
try:
result = self._handle_claim_result(claim)
yield self.save_claim(result)
except (UnknownOutpoint) as err:
result = {'error': err.message}
defer.returnValue(result)
@defer.inlineCallbacks
def get_claim_by_name(self, name):
get_name_result = yield self._get_value_for_name(name)
result = yield self._handle_claim_result(get_name_result)
result = self._handle_claim_result(get_name_result)
yield self.save_claim(result)
defer.returnValue(result)
@defer.inlineCallbacks
@ -875,6 +460,7 @@ class Wallet(object):
decoded = smart_decode(claim['value'])
claim['value'] = decoded.claim_dict
claim['hex'] = decoded.serialized.encode('hex')
yield self.save_claim(claim)
claims_for_return.append(claim)
except DecodeError:
claim['hex'] = claim['value']
@ -892,6 +478,7 @@ class Wallet(object):
claim_out['fee'] = float(claim_out['fee'])
return claim_out
@defer.inlineCallbacks
def claim_new_channel(self, channel_name, amount):
parsed_channel_name = parse_lbry_uri(channel_name)
if not parsed_channel_name.is_channel:
@ -900,17 +487,33 @@ class Wallet(object):
parsed_channel_name.bid_position or parsed_channel_name.claim_sequence):
raise Exception("New channel claim should have no fields other than name")
log.info("Preparing to make certificate claim for %s", channel_name)
return self._claim_certificate(parsed_channel_name.name, amount)
channel_claim = yield self._claim_certificate(parsed_channel_name.name, amount)
yield self.save_claim(self._get_temp_claim_info(channel_claim, channel_name, amount))
defer.returnValue(channel_claim)
@defer.inlineCallbacks
def channel_list(self):
certificates = yield self.get_certificates_for_signing()
results = []
for claim in certificates:
formatted = yield self._handle_claim_result(claim)
formatted = self._handle_claim_result(claim)
results.append(formatted)
defer.returnValue(results)
def _get_temp_claim_info(self, claim_result, name, bid):
# save the claim information with a height and sequence of 0, this will be reset upon next resolve
return {
"claim_id": claim_result['claim_id'],
"name": name,
"amount": bid,
"address": claim_result['claim_address'],
"txid": claim_result['txid'],
"nout": claim_result['nout'],
"value": claim_result['value'],
"height": -1,
"claim_sequence": -1,
}
@defer.inlineCallbacks
def claim_name(self, name, bid, metadata, certificate_id=None, claim_address=None,
change_address=None):
@ -944,12 +547,8 @@ class Wallet(object):
log.error(claim)
msg = 'Claim to name {} failed: {}'.format(name, claim['reason'])
raise Exception(msg)
claim = self._process_claim_out(claim)
claim_outpoint = ClaimOutpoint(claim['txid'], claim['nout'])
log.info("Saving metadata for claim %s %d", claim['txid'], claim['nout'])
yield self._update_claimid(claim['claim_id'], name, claim_outpoint)
yield self._save_name_metadata(name, claim_outpoint, decoded.source_hash)
yield self.storage.save_claim(self._get_temp_claim_info(claim, name, bid), smart_decode(claim['value']))
defer.returnValue(claim)
@defer.inlineCallbacks
@ -1004,9 +603,6 @@ class Wallet(object):
d = self._get_transaction(txid)
return d
def get_claim_metadata_for_sd_hash(self, sd_hash):
return self._get_claim_metadata_for_sd_hash(sd_hash)
def get_balance(self):
return self.wallet_balance - self.total_reserved_points - sum(self.queued_payments.values())
@ -1135,6 +731,9 @@ class Wallet(object):
def _get_claim_by_claimid(self, claim_id):
return defer.fail(NotImplementedError())
def _get_claims_by_claimids(self, *claim_ids):
return defer.fail(NotImplementedError())
def _get_values_for_uris(self, page, page_size, *uris):
return defer.fail(NotImplementedError())
@ -1169,7 +768,7 @@ class Wallet(object):
return defer.fail(NotImplementedError())
def _start(self):
pass
return defer.fail(NotImplementedError())
def _stop(self):
pass
@ -1513,6 +1112,9 @@ class LBRYumWallet(Wallet):
def _get_claim_by_claimid(self, claim_id):
return self._run_cmd_as_defer_to_thread('getclaimbyid', claim_id)
def _get_claims_by_claimids(self, *claim_ids):
return self._run_cmd_as_defer_to_thread('getclaimsbyids', claim_ids)
def _get_balance_for_address(self, address):
return defer.succeed(Decimal(self.wallet.get_addr_received(address)) / COIN)

View file

@ -566,8 +566,6 @@ class DownloadRequest(RequestHelper):
self.peer.update_score(5.0)
should_announce = blob.blob_hash == self.head_blob_hash
d = self.requestor.blob_manager.blob_completed(blob, should_announce=should_announce)
d.addCallback(lambda _: self.requestor.blob_manager.add_blob_to_download_history(
blob.blob_hash, self.peer.host, self.protocol_prices[self.protocol]))
d.addCallback(lambda _: arg)
return d

View file

@ -152,7 +152,6 @@ class BlobRequestHandler(object):
response_fields['blob_hash'] = blob.blob_hash
response_fields['length'] = blob.length
response['incoming_blob'] = response_fields
d.addCallback(lambda _: self.record_transaction(blob))
d.addCallback(lambda _: response)
return d
log.debug("We can not send %s", str(blob))
@ -160,11 +159,6 @@ class BlobRequestHandler(object):
d.addCallback(lambda _: response)
return d
def record_transaction(self, blob):
d = self.blob_manager.add_blob_to_upload_history(
blob.blob_hash, self.peer.host, self.blob_data_payment_rate)
return d
def _reply_to_send_request(self, response, incoming):
response_fields = {}
response['incoming_blob'] = response_fields

View file

@ -1,23 +0,0 @@
import sqlite3
from twisted.internet import task, reactor
import logging
log = logging.getLogger(__name__)
def rerun_if_locked(f):
def rerun(err, *args, **kwargs):
if err.check(sqlite3.OperationalError) and err.value.message == "database is locked":
log.warning("database was locked. rerunning %s with args %s, kwargs %s",
str(f), str(args), str(kwargs))
return task.deferLater(reactor, 0, wrapper, *args, **kwargs)
return err
def wrapper(*args, **kwargs):
d = f(*args, **kwargs)
d.addErrback(rerun, *args, **kwargs)
return d
return wrapper

View file

@ -19,6 +19,16 @@ class CryptBlobInfo(BlobInfo):
BlobInfo.__init__(self, blob_hash, blob_num, length)
self.iv = iv
def get_dict(self):
info = {
"blob_num": self.blob_num,
"length": self.length,
"iv": self.iv
}
if self.blob_hash:
info['blob_hash'] = self.blob_hash
return info
class StreamBlobDecryptor(object):
def __init__(self, blob, key, iv, length):

View file

@ -128,7 +128,6 @@ class CryptStreamCreator(object):
d.addCallback(self._blob_finished)
self.finished_deferreds.append(d)
def _write(self, data):
while len(data) > 0:
if self.current_blob is None:

View file

@ -23,6 +23,7 @@ from lbryschema.decode import smart_decode
# TODO: importing this when internet is disabled raises a socket.gaierror
from lbrynet.core.system_info import get_lbrynet_version
from lbrynet.database.storage import SQLiteStorage
from lbrynet import conf
from lbrynet.conf import LBRYCRD_WALLET, LBRYUM_WALLET, PTC_WALLET
from lbrynet.reflector import reupload
@ -30,8 +31,6 @@ from lbrynet.reflector import ServerFactory as reflector_server_factory
from lbrynet.core.log_support import configure_loggly_handler
from lbrynet.lbry_file.client.EncryptedFileDownloader import EncryptedFileSaverFactory
from lbrynet.lbry_file.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
from lbrynet.lbry_file.EncryptedFileMetadataManager import DBEncryptedFileMetadataManager
from lbrynet.lbry_file.StreamDescriptor import EncryptedFileStreamType
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.daemon.Downloader import GetStream
from lbrynet.daemon.Publisher import Publisher
@ -40,8 +39,9 @@ from lbrynet.daemon.auth.server import AuthJSONRPCServer
from lbrynet.core.PaymentRateManager import OnlyFreePaymentsManager
from lbrynet.core import utils, system_info
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier, download_sd_blob
from lbrynet.core.StreamDescriptor import EncryptedFileStreamType
from lbrynet.core.Session import Session
from lbrynet.core.Wallet import LBRYumWallet, SqliteStorage, ClaimOutpoint
from lbrynet.core.Wallet import LBRYumWallet, ClaimOutpoint
from lbrynet.core.looping_call_manager import LoopingCallManager
from lbrynet.core.server.BlobRequestHandler import BlobRequestHandlerFactory
from lbrynet.core.server.ServerProtocol import ServerProtocolFactory
@ -120,6 +120,13 @@ class _FileID(IterableContainer):
FILE_NAME = 'file_name'
STREAM_HASH = 'stream_hash'
ROWID = "rowid"
CLAIM_ID = "claim_id"
OUTPOINT = "outpoint"
TXID = "txid"
NOUT = "nout"
CHANNEL_CLAIM_ID = "channel_claim_id"
CLAIM_NAME = "claim_name"
CHANNEL_NAME = "channel_name"
FileID = _FileID()
@ -175,6 +182,7 @@ class Daemon(AuthJSONRPCServer):
def __init__(self, analytics_manager):
AuthJSONRPCServer.__init__(self, conf.settings['use_auth_http'])
self.db_dir = conf.settings['data_dir']
self.storage = SQLiteStorage(self.db_dir)
self.download_directory = conf.settings['download_directory']
if conf.settings['BLOBFILES_DIR'] == "blobfiles":
self.blobfile_dir = os.path.join(self.db_dir, "blobfiles")
@ -198,7 +206,7 @@ class Daemon(AuthJSONRPCServer):
self.connected_to_internet = True
self.connection_status_code = None
self.platform = None
self.current_db_revision = 5
self.current_db_revision = 6
self.db_revision_file = conf.settings.get_db_revision_filename()
self.session = None
self._session_id = conf.settings.get_session_id()
@ -221,7 +229,6 @@ class Daemon(AuthJSONRPCServer):
}
self.looping_call_manager = LoopingCallManager(calls)
self.sd_identifier = StreamDescriptorIdentifier()
self.stream_info_manager = None
self.lbry_file_manager = None
@defer.inlineCallbacks
@ -230,16 +237,6 @@ class Daemon(AuthJSONRPCServer):
configure_loggly_handler()
@defer.inlineCallbacks
def _announce_startup():
def _announce():
self.announced_startup = True
self.startup_status = STARTUP_STAGES[5]
log.info("Started lbrynet-daemon")
log.info("%i blobs in manager", len(self.session.blob_manager.blobs))
yield _announce()
log.info("Starting lbrynet-daemon")
self.looping_call_manager.start(Checker.INTERNET_CONNECTION, 3600)
@ -248,7 +245,8 @@ class Daemon(AuthJSONRPCServer):
yield self._initial_setup()
yield threads.deferToThread(self._setup_data_directory)
yield self._check_db_migration()
migrated = yield self._check_db_migration()
yield self.storage.setup()
yield self._get_session()
yield self._check_wallet_locked()
yield self._start_analytics()
@ -258,7 +256,20 @@ class Daemon(AuthJSONRPCServer):
yield self._setup_query_handlers()
yield self._setup_server()
log.info("Starting balance: " + str(self.session.wallet.get_balance()))
yield _announce_startup()
self.announced_startup = True
self.startup_status = STARTUP_STAGES[5]
log.info("Started lbrynet-daemon")
###
# this should be removed with the next db revision
if migrated:
missing_channel_claim_ids = yield self.storage.get_unknown_certificate_ids()
while missing_channel_claim_ids: # in case there are a crazy amount lets batch to be safe
batch = missing_channel_claim_ids[:100]
_ = yield self.session.wallet.get_claims_by_ids(*batch)
missing_channel_claim_ids = missing_channel_claim_ids[100:]
###
self._auto_renew()
def _get_platform(self):
@ -327,7 +338,6 @@ class Daemon(AuthJSONRPCServer):
reflector_factory = reflector_server_factory(
self.session.peer_manager,
self.session.blob_manager,
self.stream_info_manager,
self.lbry_file_manager
)
try:
@ -485,42 +495,36 @@ class Daemon(AuthJSONRPCServer):
log.debug("Created the blobfile directory: %s", str(self.blobfile_dir))
if not os.path.exists(self.db_revision_file):
log.warning("db_revision file not found. Creating it")
self._write_db_revision_file(old_revision)
self._write_db_revision_file(self.current_db_revision)
@defer.inlineCallbacks
def _check_db_migration(self):
old_revision = 1
migrated = False
if os.path.exists(self.db_revision_file):
old_revision = int(open(self.db_revision_file).read().strip())
with open(self.db_revision_file, "r") as revision_read_handle:
old_revision = int(revision_read_handle.read().strip())
if old_revision > self.current_db_revision:
raise Exception('This version of lbrynet is not compatible with the database\n'
'Your database is revision %i, expected %i' %
(old_revision, self.current_db_revision))
def update_version_file_and_print_success():
if old_revision < self.current_db_revision:
from lbrynet.database.migrator import dbmigrator
log.info("Upgrading your databases (revision %i to %i)", old_revision, self.current_db_revision)
yield threads.deferToThread(
dbmigrator.migrate_db, self.db_dir, old_revision, self.current_db_revision
)
self._write_db_revision_file(self.current_db_revision)
log.info("Finished upgrading the databases.")
if old_revision < self.current_db_revision:
from lbrynet.db_migrator import dbmigrator
log.info("Upgrading your databases")
d = threads.deferToThread(
dbmigrator.migrate_db, self.db_dir, old_revision, self.current_db_revision)
d.addCallback(lambda _: update_version_file_and_print_success())
return d
return defer.succeed(True)
migrated = True
defer.returnValue(migrated)
@defer.inlineCallbacks
def _setup_lbry_file_manager(self):
log.info('Starting the file manager')
self.startup_status = STARTUP_STAGES[3]
self.stream_info_manager = DBEncryptedFileMetadataManager(self.db_dir)
self.lbry_file_manager = EncryptedFileManager(
self.session,
self.stream_info_manager,
self.sd_identifier,
download_directory=self.download_directory
)
self.lbry_file_manager = EncryptedFileManager(self.session, self.sd_identifier)
yield self.lbry_file_manager.setup()
log.info('Done setting up file manager')
@ -549,8 +553,7 @@ class Daemon(AuthJSONRPCServer):
config['use_keyring'] = conf.settings['use_keyring']
if conf.settings['lbryum_wallet_dir']:
config['lbryum_path'] = conf.settings['lbryum_wallet_dir']
storage = SqliteStorage(self.db_dir)
wallet = LBRYumWallet(storage, config)
wallet = LBRYumWallet(self.storage, config)
return defer.succeed(wallet)
elif self.wallet_type == PTC_WALLET:
log.info("Using PTC wallet")
@ -573,7 +576,8 @@ class Daemon(AuthJSONRPCServer):
use_upnp=self.use_upnp,
wallet=wallet,
is_generous=conf.settings['is_generous_host'],
external_ip=self.platform['ip']
external_ip=self.platform['ip'],
storage=self.storage
)
self.startup_status = STARTUP_STAGES[2]
@ -594,7 +598,7 @@ class Daemon(AuthJSONRPCServer):
self.session.peer_finder,
self.session.rate_limiter,
self.session.blob_manager,
self.stream_info_manager,
self.session.storage,
self.session.wallet,
self.download_directory
)
@ -623,7 +627,7 @@ class Daemon(AuthJSONRPCServer):
def _get_stream_analytics_report(self, claim_dict):
sd_hash = claim_dict.source_hash
try:
stream_hash = yield self.stream_info_manager.get_stream_hash_for_sd_hash(sd_hash)
stream_hash = yield self.session.storage.get_stream_hash_for_sd_hash(sd_hash)
except Exception:
stream_hash = None
report = {
@ -637,7 +641,7 @@ class Daemon(AuthJSONRPCServer):
sd_host = None
report["sd_blob"] = sd_host
if stream_hash:
blob_infos = yield self.stream_info_manager.get_blobs_for_stream(stream_hash)
blob_infos = yield self.session.storage.get_blobs_for_stream(stream_hash)
report["known_blobs"] = len(blob_infos)
else:
blob_infos = []
@ -683,12 +687,13 @@ class Daemon(AuthJSONRPCServer):
self.disable_max_key_fee,
conf.settings['data_rate'], timeout)
try:
lbry_file, finished_deferred = yield self.streams[sd_hash].start(claim_dict, name)
yield self.stream_info_manager.save_outpoint_to_file(lbry_file.rowid, txid, nout)
finished_deferred.addCallbacks(lambda _: _download_finished(download_id, name,
claim_dict),
lambda e: _download_failed(e, download_id, name,
claim_dict))
lbry_file, finished_deferred = yield self.streams[sd_hash].start(
claim_dict, name, txid, nout, file_name
)
finished_deferred.addCallbacks(
lambda _: _download_finished(download_id, name, claim_dict),
lambda e: _download_failed(e, download_id, name, claim_dict)
)
result = yield self._get_lbry_file_dict(lbry_file, full_status=True)
except Exception as err:
yield _download_failed(err, download_id, name, claim_dict)
@ -713,7 +718,8 @@ class Daemon(AuthJSONRPCServer):
if bid <= 0.0:
raise Exception("Invalid bid")
if not file_path:
claim_out = yield publisher.publish_stream(name, bid, claim_dict, claim_address,
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(claim_dict['stream']['source']['source'])
claim_out = yield publisher.publish_stream(name, bid, claim_dict, stream_hash, claim_address,
change_address)
else:
claim_out = yield publisher.create_and_publish_stream(name, bid, claim_dict, file_path,
@ -722,9 +728,6 @@ class Daemon(AuthJSONRPCServer):
d = reupload.reflect_stream(publisher.lbry_file)
d.addCallbacks(lambda _: log.info("Reflected new publication to lbry://%s", name),
log.exception)
yield self.stream_info_manager.save_outpoint_to_file(publisher.lbry_file.rowid,
claim_out['txid'],
int(claim_out['nout']))
self.analytics_manager.send_claim_action('publish')
log.info("Success! Published to lbry://%s txid: %s nout: %d", name, claim_out['txid'],
claim_out['nout'])
@ -880,7 +883,7 @@ class Daemon(AuthJSONRPCServer):
else:
written_bytes = 0
size = outpoint = num_completed = num_known = status = None
size = num_completed = num_known = status = None
if full_status:
size = yield lbry_file.get_total_bytes()
@ -888,7 +891,6 @@ class Daemon(AuthJSONRPCServer):
num_completed = file_status.num_completed
num_known = file_status.num_known
status = file_status.running_status
outpoint = yield self.stream_info_manager.get_file_outpoint(lbry_file.rowid)
result = {
'completed': lbry_file.completed,
@ -908,7 +910,14 @@ class Daemon(AuthJSONRPCServer):
'blobs_completed': num_completed,
'blobs_in_stream': num_known,
'status': status,
'outpoint': outpoint
'claim_id': lbry_file.claim_id,
'txid': lbry_file.txid,
'nout': lbry_file.nout,
'outpoint': lbry_file.outpoint,
'metadata': lbry_file.metadata,
'channel_claim_id': lbry_file.channel_claim_id,
'channel_name': lbry_file.channel_name,
'claim_name': lbry_file.claim_name
}
defer.returnValue(result)
@ -953,12 +962,12 @@ class Daemon(AuthJSONRPCServer):
dl.addCallback(lambda blobs: [blob[1] for blob in blobs if blob[0]])
return dl
d = self.stream_info_manager.get_blobs_for_stream(stream_hash)
d = self.session.storage.get_blobs_for_stream(stream_hash)
d.addCallback(_get_blobs)
return d
def get_blobs_for_sd_hash(self, sd_hash):
d = self.stream_info_manager.get_stream_hash_for_sd_hash(sd_hash)
d = self.session.storage.get_stream_hash_for_sd_hash(sd_hash)
d.addCallback(self.get_blobs_for_stream_hash)
return d
@ -1379,16 +1388,24 @@ class Daemon(AuthJSONRPCServer):
Usage:
file_list [--sd_hash=<sd_hash>] [--file_name=<file_name>] [--stream_hash=<stream_hash>]
[--rowid=<rowid>]
[-f]
[--rowid=<rowid>] [--claim_id=<claim_id>] [--outpoint=<outpoint>] [--txid=<txid>] [--nout=<nout>]
[--channel_claim_id=<channel_claim_id>] [--channel_name=<channel_name>]
[--claim_name=<claim_name>] [-f]
Options:
--sd_hash=<sd_hash> : get file with matching sd hash
--file_name=<file_name> : get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : get file with matching stream hash
--rowid=<rowid> : get file with matching row id
-f : full status, populate the 'message' and 'size' fields
--sd_hash=<sd_hash> : get file with matching sd hash
--file_name=<file_name> : get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : get file with matching stream hash
--rowid=<rowid> : get file with matching row id
--claim_id=<claim_id> : get file with matching claim id
--outpoint=<outpoint> : get file with matching claim outpoint
--txid=<txid> : get file with matching claim txid
--nout=<nout> : get file with matching claim nout
--channel_claim_id=<channel_claim_id> : get file with matching channel claim id
--channel_name=<channel_name> : get file with matching channel name
--claim_name=<claim_name> : get file with matching claim name
-f : full status, populate the 'message' and 'size' fields
Returns:
(list) List of files
@ -1412,7 +1429,14 @@ class Daemon(AuthJSONRPCServer):
'blobs_completed': (int) num_completed, None if full_status is false,
'blobs_in_stream': (int) None if full_status is false,
'status': (str) downloader status, None if full_status is false,
'outpoint': (str), None if full_status is false or if claim is not found
'claim_id': (str) None if full_status is false or if claim is not found,
'outpoint': (str) None if full_status is false or if claim is not found,
'txid': (str) None if full_status is false or if claim is not found,
'nout': (int) None if full_status is false or if claim is not found,
'metadata': (dict) None if full_status is false or if claim is not found,
'channel_claim_id': (str) None if full_status is false or if claim is not found or signed,
'channel_name': (str) None if full_status is false or if claim is not found or signed,
'claim_name': (str) None if full_status is false or if claim is not found
},
]
"""
@ -1599,24 +1623,31 @@ class Daemon(AuthJSONRPCServer):
Returns:
(dict) Dictionary containing information about the stream
{
'completed': (bool) true if download is completed,
'file_name': (str) name of file,
'download_directory': (str) download directory,
'points_paid': (float) credit paid to download file,
'stopped': (bool) true if download is stopped,
'stream_hash': (str) stream hash of file,
'stream_name': (str) stream name ,
'suggested_file_name': (str) suggested file name,
'sd_hash': (str) sd hash of file,
'download_path': (str) download path of file,
'mime_type': (str) mime type of file,
'key': (str) key attached to file,
'total_bytes': (int) file size in bytes, None if full_status is false,
'written_bytes': (int) written size in bytes,
'blobs_completed': (int) num_completed, None if full_status is false,
'blobs_in_stream': (int) None if full_status is false,
'status': (str) downloader status, None if full_status is false,
'outpoint': (str), None if full_status is false or if claim is not found
'completed': (bool) true if download is completed,
'file_name': (str) name of file,
'download_directory': (str) download directory,
'points_paid': (float) credit paid to download file,
'stopped': (bool) true if download is stopped,
'stream_hash': (str) stream hash of file,
'stream_name': (str) stream name ,
'suggested_file_name': (str) suggested file name,
'sd_hash': (str) sd hash of file,
'download_path': (str) download path of file,
'mime_type': (str) mime type of file,
'key': (str) key attached to file,
'total_bytes': (int) file size in bytes, None if full_status is false,
'written_bytes': (int) written size in bytes,
'blobs_completed': (int) num_completed, None if full_status is false,
'blobs_in_stream': (int) None if full_status is false,
'status': (str) downloader status, None if full_status is false,
'claim_id': (str) claim id,
'outpoint': (str) claim outpoint string,
'txid': (str) claim txid,
'nout': (int) claim nout,
'metadata': (dict) claim metadata,
'channel_claim_id': (str) None if claim is not signed
'channel_name': (str) None if claim is not signed
'claim_name': (str) claim name
}
"""
@ -1710,18 +1741,26 @@ class Daemon(AuthJSONRPCServer):
Usage:
file_delete [-f] [--delete_all] [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>] [--claim_id=<claim_id>] [--txid=<txid>]
[--nout=<nout>] [--claim_name=<claim_name>] [--channel_claim_id=<channel_claim_id>]
[--channel_name=<channel_name>]
Options:
-f, --delete_from_download_dir : delete file from download directory,
instead of just deleting blobs
--delete_all : if there are multiple matching files,
allow the deletion of multiple files.
Otherwise do not delete anything.
--sd_hash=<sd_hash> : delete by file sd hash
--file_name<file_name> : delete by file name in downloads folder
--stream_hash=<stream_hash> : delete by file stream hash
--rowid=<rowid> : delete by file row id
-f, --delete_from_download_dir : delete file from download directory,
instead of just deleting blobs
--delete_all : if there are multiple matching files,
allow the deletion of multiple files.
Otherwise do not delete anything.
--sd_hash=<sd_hash> : delete by file sd hash
--file_name<file_name> : delete by file name in downloads folder
--stream_hash=<stream_hash> : delete by file stream hash
--rowid=<rowid> : delete by file row id
--claim_id=<claim_id> : delete by file claim id
--txid=<txid> : delete by file claim txid
--nout=<nout> : delete by file claim nout
--claim_name=<claim_name> : delete by file claim name
--channel_claim_id=<channel_claim_id> : delete by file channel claim id
--channel_name=<channel_name> : delete by file channel claim name
Returns:
(bool) true if deletion was successful
@ -2730,8 +2769,8 @@ class Daemon(AuthJSONRPCServer):
response = yield self._render_response("Don't have that blob")
defer.returnValue(response)
try:
stream_hash = yield self.stream_info_manager.get_stream_hash_for_sd_hash(blob_hash)
yield self.stream_info_manager.delete_stream(stream_hash)
stream_hash = yield self.session.storage.get_stream_hash_for_sd_hash(blob_hash)
yield self.session.storage.delete_stream(stream_hash)
except Exception as err:
pass
yield self.session.blob_manager.delete_blobs([blob_hash])

View file

@ -116,14 +116,15 @@ class GetStream(object):
raise Exception('No suitable factory was found in {}'.format(factories))
@defer.inlineCallbacks
def get_downloader(self, factory, stream_metadata):
def get_downloader(self, factory, stream_metadata, file_name=None):
# TODO: we should use stream_metadata.options.get_downloader_options
# instead of hard-coding the options to be [self.data_rate]
downloader = yield factory.make_downloader(
stream_metadata,
[self.data_rate],
self.data_rate,
self.payment_rate_manager,
download_directory=self.download_directory,
self.download_directory,
file_name=file_name
)
defer.returnValue(downloader)
@ -165,10 +166,10 @@ class GetStream(object):
defer.returnValue(key_fee)
@defer.inlineCallbacks
def _create_downloader(self, sd_blob):
def _create_downloader(self, sd_blob, file_name=None):
stream_metadata = yield self.sd_identifier.get_metadata_for_sd_blob(sd_blob)
factory = self.get_downloader_factory(stream_metadata.factories)
downloader = yield self.get_downloader(factory, stream_metadata)
downloader = yield self.get_downloader(factory, stream_metadata, file_name)
defer.returnValue(downloader)
@defer.inlineCallbacks
@ -178,15 +179,17 @@ class GetStream(object):
defer.returnValue(sd_blob)
@defer.inlineCallbacks
def _download(self, sd_blob, name, key_fee):
self.downloader = yield self._create_downloader(sd_blob)
def _download(self, sd_blob, name, key_fee, txid, nout, file_name=None):
self.downloader = yield self._create_downloader(sd_blob, file_name=file_name)
yield self.pay_key_fee(key_fee, name)
yield self.session.storage.save_content_claim(self.downloader.stream_hash, "%s:%i" % (txid, nout))
yield self.downloader.get_claim_info()
log.info("Downloading lbry://%s (%s) --> %s", name, self.sd_hash[:6], self.download_path)
self.finished_deferred = self.downloader.start()
self.finished_deferred.addCallbacks(lambda result: self.finish(result, name), self.fail)
@defer.inlineCallbacks
def start(self, stream_info, name):
def start(self, stream_info, name, txid, nout, file_name=None):
"""
Start download
@ -203,7 +206,7 @@ class GetStream(object):
self.set_status(DOWNLOAD_METADATA_CODE, name)
sd_blob = yield self._download_sd_blob()
yield self._download(sd_blob, name, key_fee)
yield self._download(sd_blob, name, key_fee, txid, nout, file_name)
self.set_status(DOWNLOAD_RUNNING_CODE, name)
try:

View file

@ -6,9 +6,6 @@ from twisted.internet import defer
from lbrynet.core import file_utils
from lbrynet.file_manager.EncryptedFileCreator import create_lbry_file
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.lbry_file.StreamDescriptor import publish_sd_blob
log = logging.getLogger(__name__)
@ -33,29 +30,27 @@ class Publisher(object):
file_name = os.path.basename(file_path)
with file_utils.get_read_handle(file_path) as read_handle:
stream_hash = yield create_lbry_file(self.session, self.lbry_file_manager, file_name,
read_handle)
sd_hash = yield publish_sd_blob(self.lbry_file_manager.stream_info_manager,
self.session.blob_manager, stream_hash)
status = ManagedEncryptedFileDownloader.STATUS_FINISHED
self.lbry_file = yield self.lbry_file_manager.add_lbry_file(stream_hash, sd_hash,
status=status)
self.lbry_file = yield create_lbry_file(self.session, self.lbry_file_manager, file_name,
read_handle)
if 'source' not in claim_dict['stream']:
claim_dict['stream']['source'] = {}
claim_dict['stream']['source']['source'] = sd_hash
claim_dict['stream']['source']['source'] = self.lbry_file.sd_hash
claim_dict['stream']['source']['sourceType'] = 'lbry_sd_hash'
claim_dict['stream']['source']['contentType'] = get_content_type(file_path)
claim_dict['stream']['source']['version'] = "_0_0_1" # need current version here
claim_out = yield self.make_claim(name, bid, claim_dict, claim_address, change_address)
self.lbry_file.completed = True
yield self.lbry_file.save_status()
yield self.session.storage.save_content_claim(
self.lbry_file.stream_hash, "%s:%i" % (claim_out['txid'], claim_out['nout'])
)
yield self.lbry_file.get_claim_info()
defer.returnValue(claim_out)
@defer.inlineCallbacks
def publish_stream(self, name, bid, claim_dict, claim_address=None, change_address=None):
def publish_stream(self, name, bid, claim_dict, stream_hash, claim_address=None, change_address=None):
"""Make a claim without creating a lbry file"""
claim_out = yield self.make_claim(name, bid, claim_dict, claim_address, change_address)
yield self.session.storage.save_content_claim(stream_hash, "%s:%i" % (claim_out['txid'], claim_out['nout']))
defer.returnValue(claim_out)
@defer.inlineCallbacks

View file

@ -5,20 +5,23 @@ def migrate_db(db_dir, start, end):
current = start
while current < end:
if current == 1:
from lbrynet.db_migrator.migrate1to2 import do_migration
from lbrynet.database.migrator.migrate1to2 import do_migration
do_migration(db_dir)
elif current == 2:
from lbrynet.db_migrator.migrate2to3 import do_migration
from lbrynet.database.migrator.migrate2to3 import do_migration
do_migration(db_dir)
elif current == 3:
from lbrynet.db_migrator.migrate3to4 import do_migration
from lbrynet.database.migrator.migrate3to4 import do_migration
do_migration(db_dir)
elif current == 4:
from lbrynet.db_migrator.migrate4to5 import do_migration
from lbrynet.database.migrator.migrate4to5 import do_migration
do_migration(db_dir)
elif current == 5:
from lbrynet.database.migrator.migrate5to6 import do_migration
do_migration(db_dir)
else:
raise Exception(
"DB migration of version {} to {} is not available".format(current, current+1))
raise Exception("DB migration of version {} to {} is not available".format(current,
current+1))
current += 1
return None

View file

@ -0,0 +1,256 @@
import sqlite3
import os
import json
import logging
from lbryschema.decode import smart_decode
from lbrynet import conf
from lbrynet.database.storage import SQLiteStorage
log = logging.getLogger(__name__)
default_download_directory = conf.default_download_dir
def run_operation(db):
def _decorate(fn):
def _wrapper(*args):
cursor = db.cursor()
try:
result = fn(cursor, *args)
db.commit()
return result
except sqlite3.IntegrityError:
db.rollback()
raise
return _wrapper
return _decorate
def verify_sd_blob(sd_hash, blob_dir):
with open(os.path.join(blob_dir, sd_hash), "r") as sd_file:
data = sd_file.read()
sd_length = len(data)
decoded = json.loads(data)
assert set(decoded.keys()) == {
'stream_name', 'blobs', 'stream_type', 'key', 'suggested_file_name', 'stream_hash'
}, "invalid sd blob"
for blob in sorted(decoded['blobs'], key=lambda x: int(x['blob_num']), reverse=True):
if blob['blob_num'] == len(decoded['blobs']) - 1:
assert {'length', 'blob_num', 'iv'} == set(blob.keys()), 'invalid stream terminator'
assert blob['length'] == 0, 'non zero length stream terminator'
else:
assert {'blob_hash', 'length', 'blob_num', 'iv'} == set(blob.keys()), 'invalid stream blob'
assert blob['length'] > 0, 'zero length stream blob'
return decoded, sd_length
def do_migration(db_dir):
new_db_path = os.path.join(db_dir, "lbrynet.sqlite")
connection = sqlite3.connect(new_db_path)
metadata_db = sqlite3.connect(os.path.join(db_dir, "blockchainname.db"))
lbryfile_db = sqlite3.connect(os.path.join(db_dir, 'lbryfile_info.db'))
blobs_db = sqlite3.connect(os.path.join(db_dir, 'blobs.db'))
name_metadata_cursor = metadata_db.cursor()
lbryfile_cursor = lbryfile_db.cursor()
blobs_db_cursor = blobs_db.cursor()
old_rowid_to_outpoint = {
rowid: (txid, nout) for (rowid, txid, nout) in
lbryfile_cursor.execute("select * from lbry_file_metadata").fetchall()
}
old_sd_hash_to_outpoint = {
sd_hash: (txid, nout) for (txid, nout, sd_hash) in
name_metadata_cursor.execute("select txid, n, sd_hash from name_metadata").fetchall()
}
sd_hash_to_stream_hash = {
sd_hash: stream_hash for (sd_hash, stream_hash) in
lbryfile_cursor.execute("select sd_blob_hash, stream_hash from lbry_file_descriptors").fetchall()
}
stream_hash_to_stream_blobs = {}
for (blob_hash, stream_hash, position, iv, length) in lbryfile_db.execute(
"select * from lbry_file_blobs").fetchall():
stream_blobs = stream_hash_to_stream_blobs.get(stream_hash, [])
stream_blobs.append((blob_hash, length, position, iv))
stream_hash_to_stream_blobs[stream_hash] = stream_blobs
claim_outpoint_queries = {}
for claim_query in metadata_db.execute(
"select distinct c.txid, c.n, c.claimId, c.name, claim_cache.claim_sequence, claim_cache.claim_address, "
"claim_cache.height, claim_cache.amount, claim_cache.claim_pb "
"from claim_cache inner join claim_ids c on claim_cache.claim_id=c.claimId"):
txid, nout = claim_query[0], claim_query[1]
if (txid, nout) in claim_outpoint_queries:
continue
claim_outpoint_queries[(txid, nout)] = claim_query
@run_operation(connection)
def _populate_blobs(transaction, blob_infos):
transaction.executemany(
"insert into blob values (?, ?, ?, ?, ?)",
[(blob_hash, blob_length, int(next_announce_time), should_announce, "finished")
for (blob_hash, blob_length, _, next_announce_time, should_announce) in blob_infos]
)
@run_operation(connection)
def _import_file(transaction, sd_hash, stream_hash, key, stream_name, suggested_file_name, data_rate,
status, stream_blobs):
try:
transaction.execute(
"insert or ignore into stream values (?, ?, ?, ?, ?)",
(stream_hash, sd_hash, key, stream_name, suggested_file_name)
)
except sqlite3.IntegrityError:
# failed because the sd isn't a known blob, we'll try to read the blob file and recover it
return sd_hash
# insert any stream blobs that were missing from the blobs table
transaction.executemany(
"insert or ignore into blob values (?, ?, ?, ?, ?)",
[
(blob_hash, length, 0, 0, "pending")
for (blob_hash, length, position, iv) in stream_blobs
]
)
# insert the stream blobs
for blob_hash, length, position, iv in stream_blobs:
transaction.execute(
"insert or ignore into stream_blob values (?, ?, ?, ?)",
(stream_hash, blob_hash, position, iv)
)
# insert the file
transaction.execute(
"insert or ignore into file values (?, ?, ?, ?, ?)",
(stream_hash, stream_name, default_download_directory.encode('hex'),
data_rate, status)
)
@run_operation(connection)
def _add_recovered_blobs(transaction, blob_infos, sd_hash, sd_length):
transaction.execute(
"insert or replace into blob values (?, ?, ?, ?, ?)", (sd_hash, sd_length, 0, 1, "finished")
)
for blob in sorted(blob_infos, key=lambda x: x['blob_num'], reverse=True):
if blob['blob_num'] < len(blob_infos) - 1:
transaction.execute(
"insert or ignore into blob values (?, ?, ?, ?, ?)",
(blob['blob_hash'], blob['length'], 0, 0, "pending")
)
@run_operation(connection)
def _make_db(new_db):
# create the new tables
new_db.executescript(SQLiteStorage.CREATE_TABLES_QUERY)
# first migrate the blobs
blobs = blobs_db_cursor.execute("select * from blobs").fetchall()
_populate_blobs(blobs) # pylint: disable=no-value-for-parameter
log.info("migrated %i blobs", new_db.execute("select count(*) from blob").fetchone()[0])
# used to store the query arguments if we need to try re-importing the lbry file later
file_args = {} # <sd_hash>: args tuple
file_outpoints = {} # <outpoint tuple>: sd_hash
# get the file and stream queries ready
for (rowid, sd_hash, stream_hash, key, stream_name, suggested_file_name, data_rate, status) in \
lbryfile_db.execute(
"select distinct lbry_files.rowid, d.sd_blob_hash, lbry_files.*, o.blob_data_rate, o.status "
"from lbry_files "
"inner join lbry_file_descriptors d on lbry_files.stream_hash=d.stream_hash "
"inner join lbry_file_options o on lbry_files.stream_hash=o.stream_hash"):
# this is try to link the file to a content claim after we've imported all the files
if rowid in old_rowid_to_outpoint:
file_outpoints[old_rowid_to_outpoint[rowid]] = sd_hash
elif sd_hash in old_sd_hash_to_outpoint:
file_outpoints[old_sd_hash_to_outpoint[sd_hash]] = sd_hash
sd_hash_to_stream_hash[sd_hash] = stream_hash
if stream_hash in stream_hash_to_stream_blobs:
file_args[sd_hash] = (
sd_hash, stream_hash, key, stream_name,
suggested_file_name, data_rate or 0.0,
status, stream_hash_to_stream_blobs.pop(stream_hash)
)
# used to store the query arguments if we need to try re-importing the claim
claim_queries = {} # <sd_hash>: claim query tuple
# get the claim queries ready, only keep those with associated files
for outpoint, sd_hash in file_outpoints.iteritems():
if outpoint in claim_outpoint_queries:
claim_queries[sd_hash] = claim_outpoint_queries[outpoint]
# insert the claims
new_db.executemany(
"insert or ignore into claim values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
[
(
"%s:%i" % (claim_arg_tup[0], claim_arg_tup[1]), claim_arg_tup[2], claim_arg_tup[3],
claim_arg_tup[7], claim_arg_tup[6], claim_arg_tup[8],
smart_decode(claim_arg_tup[8]).certificate_id, claim_arg_tup[5], claim_arg_tup[4]
)
for sd_hash, claim_arg_tup in claim_queries.iteritems() if claim_arg_tup
] # sd_hash, (txid, nout, claim_id, name, sequence, address, height, amount, serialized)
)
log.info("migrated %i claims", new_db.execute("select count(*) from claim").fetchone()[0])
damaged_stream_sds = []
# import the files and get sd hashes of streams to attempt recovering
for sd_hash, file_query in file_args.iteritems():
failed_sd = _import_file(*file_query)
if failed_sd:
damaged_stream_sds.append(failed_sd)
# recover damaged streams
if damaged_stream_sds:
blob_dir = os.path.join(db_dir, "blobfiles")
damaged_sds_on_disk = [] if not os.path.isdir(blob_dir) else list({p for p in os.listdir(blob_dir)
if p in damaged_stream_sds})
for damaged_sd in damaged_sds_on_disk:
try:
decoded, sd_length = verify_sd_blob(damaged_sd, blob_dir)
blobs = decoded['blobs']
_add_recovered_blobs(blobs, damaged_sd, sd_length) # pylint: disable=no-value-for-parameter
_import_file(*file_args[damaged_sd])
damaged_stream_sds.remove(damaged_sd)
except (OSError, ValueError, TypeError, IOError, AssertionError, sqlite3.IntegrityError):
continue
log.info("migrated %i files", new_db.execute("select count(*) from file").fetchone()[0])
# associate the content claims to their respective files
for claim_arg_tup in claim_queries.values():
if claim_arg_tup and (claim_arg_tup[0], claim_arg_tup[1]) in file_outpoints \
and file_outpoints[(claim_arg_tup[0], claim_arg_tup[1])] in sd_hash_to_stream_hash:
try:
new_db.execute(
"insert or ignore into content_claim values (?, ?)",
(
sd_hash_to_stream_hash.get(file_outpoints.get((claim_arg_tup[0], claim_arg_tup[1]))),
"%s:%i" % (claim_arg_tup[0], claim_arg_tup[1])
)
)
except sqlite3.IntegrityError:
continue
log.info("migrated %i content claims", new_db.execute("select count(*) from content_claim").fetchone()[0])
_make_db() # pylint: disable=no-value-for-parameter
connection.close()
blobs_db.close()
lbryfile_db.close()
metadata_db.close()
os.remove(os.path.join(db_dir, "blockchainname.db"))
os.remove(os.path.join(db_dir, 'lbryfile_info.db'))
os.remove(os.path.join(db_dir, 'blobs.db'))

637
lbrynet/database/storage.py Normal file
View file

@ -0,0 +1,637 @@
import logging
import os
import time
import sqlite3
import traceback
from decimal import Decimal
from twisted.internet import defer, task, reactor, threads
from twisted.enterprise import adbapi
from lbryschema.claim import ClaimDict
from lbryschema.decode import smart_decode
from lbrynet import conf
from lbrynet.cryptstream.CryptBlob import CryptBlobInfo
from lbryum.constants import COIN
log = logging.getLogger(__name__)
def _get_next_available_file_name(download_directory, file_name):
base_name, ext = os.path.splitext(file_name)
i = 0
while os.path.isfile(os.path.join(download_directory, file_name)):
i += 1
file_name = "%s_%i%s" % (base_name, i, ext)
return os.path.join(download_directory, file_name)
def _open_file_for_writing(download_directory, suggested_file_name):
file_path = _get_next_available_file_name(download_directory, suggested_file_name)
try:
file_handle = open(file_path, 'wb')
file_handle.close()
except IOError:
log.error(traceback.format_exc())
raise ValueError(
"Failed to open %s. Make sure you have permission to save files to that location." % file_path
)
return os.path.basename(file_path)
def open_file_for_writing(download_directory, suggested_file_name):
"""
Used to touch the path of a file to be downloaded
:param download_directory: (str)
:param suggested_file_name: (str)
:return: (str) basename
"""
return threads.deferToThread(_open_file_for_writing, download_directory, suggested_file_name)
def get_next_announce_time(hash_announcer, num_hashes_to_announce=1, min_reannounce_time=60*60,
single_announce_duration=5):
"""
Hash reannounce time is set to current time + MIN_HASH_REANNOUNCE_TIME,
unless we are announcing a lot of hashes at once which could cause the
the announce queue to pile up. To prevent pile up, reannounce
only after a conservative estimate of when it will finish
to announce all the hashes.
Args:
num_hashes_to_announce: number of hashes that will be added to the queue
Returns:
timestamp for next announce time
"""
queue_size = hash_announcer.hash_queue_size() + num_hashes_to_announce
reannounce = max(min_reannounce_time,
queue_size * single_announce_duration)
return time.time() + reannounce
def rerun_if_locked(f):
max_attempts = 3
def rerun(err, rerun_count, *args, **kwargs):
log.debug("Failed to execute (%s): %s", err, args)
if err.check(sqlite3.OperationalError) and err.value.message == "database is locked":
log.warning("database was locked. rerunning %s with args %s, kwargs %s",
str(f), str(args), str(kwargs))
if rerun_count < max_attempts:
return task.deferLater(reactor, 0, inner_wrapper, rerun_count + 1, *args, **kwargs)
raise err
def inner_wrapper(rerun_count, *args, **kwargs):
d = f(*args, **kwargs)
d.addErrback(rerun, rerun_count, *args, **kwargs)
return d
def wrapper(*args, **kwargs):
return inner_wrapper(0, *args, **kwargs)
return wrapper
class SqliteConnection(adbapi.ConnectionPool):
def __init__(self, db_path):
adbapi.ConnectionPool.__init__(self, 'sqlite3', db_path, check_same_thread=False)
@rerun_if_locked
def runInteraction(self, interaction, *args, **kw):
return adbapi.ConnectionPool.runInteraction(self, interaction, *args, **kw)
class SQLiteStorage(object):
CREATE_TABLES_QUERY = """
pragma foreign_keys=on;
pragma journal_mode=WAL;
create table if not exists blob (
blob_hash char(96) primary key not null,
blob_length integer not null,
next_announce_time integer not null,
should_announce integer not null default 0,
status text not null
);
create table if not exists stream (
stream_hash char(96) not null primary key,
sd_hash char(96) not null references blob,
stream_key text not null,
stream_name text not null,
suggested_filename text not null
);
create table if not exists stream_blob (
stream_hash char(96) not null references stream,
blob_hash char(96) references blob,
position integer not null,
iv char(32) not null,
primary key (stream_hash, blob_hash)
);
create table if not exists claim (
claim_outpoint text not null primary key,
claim_id char(40) not null,
claim_name text not null,
amount integer not null,
height integer not null,
serialized_metadata blob not null,
channel_claim_id text,
address text not null,
claim_sequence integer not null
);
create table if not exists file (
stream_hash text primary key not null references stream,
file_name text not null,
download_directory text not null,
blob_data_rate real not null,
status text not null
);
create table if not exists content_claim (
stream_hash text unique not null references file,
claim_outpoint text not null references claim,
primary key (stream_hash, claim_outpoint)
);
create table if not exists support (
support_outpoint text not null primary key,
claim_id text not null,
amount integer not null,
address text not null
);
"""
def __init__(self, db_dir):
self.db_dir = db_dir
self._db_path = os.path.join(db_dir, "lbrynet.sqlite")
log.info("connecting to database: %s", self._db_path)
self.db = SqliteConnection(self._db_path)
def setup(self):
def _create_tables(transaction):
transaction.executescript(self.CREATE_TABLES_QUERY)
return self.db.runInteraction(_create_tables)
@defer.inlineCallbacks
def run_and_return_one_or_none(self, query, *args):
result = yield self.db.runQuery(query, args)
if result:
defer.returnValue(result[0][0])
else:
defer.returnValue(None)
@defer.inlineCallbacks
def run_and_return_list(self, query, *args):
result = yield self.db.runQuery(query, args)
if result:
defer.returnValue([i[0] for i in result])
else:
defer.returnValue([])
def stop(self):
self.db.close()
return defer.succeed(True)
# # # # # # # # # blob functions # # # # # # # # #
@defer.inlineCallbacks
def add_completed_blob(self, blob_hash, length, next_announce_time, should_announce):
log.debug("Adding a completed blob. blob_hash=%s, length=%i", blob_hash, length)
yield self.add_known_blob(blob_hash, length)
yield self.set_blob_status(blob_hash, "finished")
yield self.set_should_announce(blob_hash, next_announce_time, should_announce)
yield self.db.runOperation(
"update blob set blob_length=? where blob_hash=?", (length, blob_hash)
)
def set_should_announce(self, blob_hash, next_announce_time, should_announce):
should_announce = 1 if should_announce else 0
return self.db.runOperation(
"update blob set next_announce_time=?, should_announce=? where blob_hash=?",
(next_announce_time, should_announce, blob_hash)
)
def set_blob_status(self, blob_hash, status):
return self.db.runOperation(
"update blob set status=? where blob_hash=?", (status, blob_hash)
)
def get_blob_status(self, blob_hash):
return self.run_and_return_one_or_none(
"select status from blob where blob_hash=?", blob_hash
)
@defer.inlineCallbacks
def add_known_blob(self, blob_hash, length):
status = yield self.get_blob_status(blob_hash)
if status is None:
status = "pending"
yield self.db.runOperation("insert into blob values (?, ?, ?, ?, ?)",
(blob_hash, length, 0, 0, status))
defer.returnValue(status)
def should_announce(self, blob_hash):
return self.run_and_return_one_or_none(
"select should_announce from blob where blob_hash=?", blob_hash
)
def count_should_announce_blobs(self):
return self.run_and_return_one_or_none(
"select count(*) from blob where should_announce=1 and status=?", "finished"
)
def get_all_should_announce_blobs(self):
return self.run_and_return_list(
"select blob_hash from blob where should_announce=1 and status=?", "finished"
)
def get_blobs_to_announce(self, hash_announcer):
def get_and_update(transaction):
timestamp = time.time()
if conf.settings['announce_head_blobs_only']:
r = transaction.execute(
"select blob_hash from blob "
"where blob_hash is not null and should_announce=1 and next_announce_time<?",
(timestamp,)
)
else:
r = transaction.execute(
"select blob_hash from blob where blob_hash is not null and next_announce_time<?", (timestamp,)
)
blobs = [b for b, in r.fetchall()]
next_announce_time = get_next_announce_time(hash_announcer, len(blobs))
transaction.execute(
"update blob set next_announce_time=? where next_announce_time<?", (next_announce_time, timestamp)
)
log.debug("Got %s blobs to announce, next announce time is in %s seconds", len(blobs),
next_announce_time-time.time())
return blobs
return self.db.runInteraction(get_and_update)
def delete_blobs_from_db(self, blob_hashes):
def delete_blobs(transaction):
for blob_hash in blob_hashes:
transaction.execute("delete from blob where blob_hash=?;", (blob_hash,))
return self.db.runInteraction(delete_blobs)
def get_all_blob_hashes(self):
return self.run_and_return_list("select blob_hash from blob")
# # # # # # # # # stream blob functions # # # # # # # # #
def add_blobs_to_stream(self, stream_hash, blob_infos):
def _add_stream_blobs(transaction):
for blob_info in blob_infos:
transaction.execute("insert into stream_blob values (?, ?, ?, ?)",
(stream_hash, blob_info.get('blob_hash', None),
blob_info['blob_num'], blob_info['iv']))
return self.db.runInteraction(_add_stream_blobs)
@defer.inlineCallbacks
def add_known_blobs(self, blob_infos):
for blob_info in blob_infos:
if blob_info.get('blob_hash') and blob_info['length']:
yield self.add_known_blob(blob_info['blob_hash'], blob_info['length'])
# # # # # # # # # stream functions # # # # # # # # #
def store_stream(self, stream_hash, sd_hash, stream_name, stream_key, suggested_file_name,
stream_blob_infos):
"""
Add a stream to the stream table
:param stream_hash: hash of the assembled stream
:param sd_hash: hash of the sd blob
:param stream_key: blob decryption key
:param stream_name: the name of the file the stream was generated from
:param suggested_file_name: (str) suggested file name for stream
:param stream_blob_infos: (list) of blob info dictionaries
:return: (defer.Deferred)
"""
def _store_stream(transaction):
transaction.execute("insert into stream values (?, ?, ?, ?, ?);",
(stream_hash, sd_hash, stream_key, stream_name,
suggested_file_name))
for blob_info in stream_blob_infos:
transaction.execute("insert into stream_blob values (?, ?, ?, ?)",
(stream_hash, blob_info.get('blob_hash', None),
blob_info['blob_num'], blob_info['iv']))
return self.db.runInteraction(_store_stream)
@defer.inlineCallbacks
def delete_stream(self, stream_hash):
sd_hash = yield self.get_sd_blob_hash_for_stream(stream_hash)
stream_blobs = yield self.get_blobs_for_stream(stream_hash)
blob_hashes = [b.blob_hash for b in stream_blobs]
def _delete_stream(transaction):
transaction.execute("delete from content_claim where stream_hash=? ", (stream_hash,))
transaction.execute("delete from file where stream_hash=? ", (stream_hash, ))
transaction.execute("delete from stream_blob where stream_hash=?", (stream_hash, ))
transaction.execute("delete from stream where stream_hash=? ", (stream_hash, ))
transaction.execute("delete from blob where blob_hash=?", (sd_hash, ))
for blob_hash in blob_hashes:
transaction.execute("delete from blob where blob_hash=?;", (blob_hash, ))
yield self.db.runInteraction(_delete_stream)
def get_all_streams(self):
return self.run_and_return_list("select stream_hash from stream")
def get_stream_info(self, stream_hash):
d = self.db.runQuery("select stream_name, stream_key, suggested_filename, sd_hash from stream "
"where stream_hash=?", (stream_hash, ))
d.addCallback(lambda r: None if not r else r[0])
return d
def check_if_stream_exists(self, stream_hash):
d = self.db.runQuery("select stream_hash from stream where stream_hash=?", (stream_hash, ))
d.addCallback(lambda r: True if len(r) else False)
return d
def get_blob_num_by_hash(self, stream_hash, blob_hash):
return self.run_and_return_one_or_none(
"select position from stream_blob where stream_hash=? and blob_hash=?",
stream_hash, blob_hash
)
def get_stream_blob_by_position(self, stream_hash, blob_num):
return self.run_and_return_one_or_none(
"select blob_hash from stream_blob where stream_hash=? and position=?",
stream_hash, blob_num
)
def get_blobs_for_stream(self, stream_hash):
def _get_blobs_for_stream(transaction):
crypt_blob_infos = []
stream_blobs = transaction.execute("select blob_hash, position, iv from stream_blob "
"where stream_hash=?", (stream_hash, )).fetchall()
if stream_blobs:
for blob_hash, position, iv in stream_blobs:
if blob_hash is not None:
blob_length = transaction.execute("select blob_length from blob "
"where blob_hash=?",
(blob_hash,)).fetchone()
blob_length = 0 if not blob_length else blob_length[0]
crypt_blob_infos.append(CryptBlobInfo(blob_hash, position, blob_length, iv))
else:
crypt_blob_infos.append(CryptBlobInfo(None, position, 0, iv))
crypt_blob_infos = sorted(crypt_blob_infos, key=lambda info: info.blob_num)
return crypt_blob_infos
return self.db.runInteraction(_get_blobs_for_stream)
def get_stream_of_blob(self, blob_hash):
return self.run_and_return_one_or_none(
"select stream_hash from stream_blob where blob_hash=?", blob_hash
)
def get_sd_blob_hash_for_stream(self, stream_hash):
return self.run_and_return_one_or_none(
"select sd_hash from stream where stream_hash=?", stream_hash
)
def get_stream_hash_for_sd_hash(self, sd_blob_hash):
return self.run_and_return_one_or_none(
"select stream_hash from stream where sd_hash = ?", sd_blob_hash
)
# # # # # # # # # file stuff # # # # # # # # #
@defer.inlineCallbacks
def save_downloaded_file(self, stream_hash, file_name, download_directory, data_payment_rate):
# touch the closest available file to the file name
file_name = yield open_file_for_writing(download_directory.decode('hex'), file_name.decode('hex'))
result = yield self.save_published_file(
stream_hash, file_name.encode('hex'), download_directory, data_payment_rate
)
defer.returnValue(result)
def save_published_file(self, stream_hash, file_name, download_directory, data_payment_rate, status="stopped"):
def do_save(db_transaction):
db_transaction.execute(
"insert into file values (?, ?, ?, ?, ?)",
(stream_hash, file_name, download_directory, data_payment_rate, status)
)
file_rowid = db_transaction.lastrowid
return file_rowid
return self.db.runInteraction(do_save)
def get_filename_for_rowid(self, rowid):
return self.run_and_return_one_or_none("select file_name from file where rowid=?", rowid)
def get_all_lbry_files(self):
def _lbry_file_dict(rowid, stream_hash, file_name, download_dir, data_rate, status, _, sd_hash, stream_key,
stream_name, suggested_file_name):
return {
"row_id": rowid,
"stream_hash": stream_hash,
"file_name": file_name,
"download_directory": download_dir,
"blob_data_rate": data_rate,
"status": status,
"sd_hash": sd_hash,
"key": stream_key,
"stream_name": stream_name,
"suggested_file_name": suggested_file_name
}
def _get_all_files(transaction):
return [
_lbry_file_dict(*file_info) for file_info in transaction.execute(
"select file.rowid, file.*, stream.* "
"from file inner join stream on file.stream_hash=stream.stream_hash"
).fetchall()
]
d = self.db.runInteraction(_get_all_files)
return d
def change_file_status(self, rowid, new_status):
d = self.db.runQuery("update file set status=? where rowid=?", (new_status, rowid))
d.addCallback(lambda _: new_status)
return d
def get_lbry_file_status(self, rowid):
return self.run_and_return_one_or_none(
"select status from file where rowid = ?", rowid
)
def get_rowid_for_stream_hash(self, stream_hash):
return self.run_and_return_one_or_none(
"select rowid from file where stream_hash=?", stream_hash
)
# # # # # # # # # support functions # # # # # # # # #
def save_supports(self, claim_id, supports):
# TODO: add 'address' to support items returned for a claim from lbrycrdd and lbryum-server
def _save_support(transaction):
transaction.execute("delete from support where claim_id=?", (claim_id, ))
for support in supports:
transaction.execute(
"insert into support values (?, ?, ?, ?)",
("%s:%i" % (support['txid'], support['nout']), claim_id, int(support['amount'] * COIN),
support.get('address', ""))
)
return self.db.runInteraction(_save_support)
def get_supports(self, claim_id):
def _format_support(outpoint, supported_id, amount, address):
return {
"txid": outpoint.split(":")[0],
"nout": int(outpoint.split(":")[1]),
"claim_id": supported_id,
"amount": float(Decimal(amount) / Decimal(COIN)),
"address": address,
}
def _get_supports(transaction):
return [
_format_support(*support_info)
for support_info in transaction.execute(
"select * from support where claim_id=?", (claim_id, )
).fetchall()
]
return self.db.runInteraction(_get_supports)
# # # # # # # # # claim functions # # # # # # # # #
@defer.inlineCallbacks
def save_claim(self, claim_info, claim_dict=None):
outpoint = "%s:%i" % (claim_info['txid'], claim_info['nout'])
claim_id = claim_info['claim_id']
name = claim_info['name']
amount = int(COIN * claim_info['amount'])
height = claim_info['height']
address = claim_info['address']
sequence = claim_info['claim_sequence']
claim_dict = claim_dict or smart_decode(claim_info['value'])
serialized = claim_dict.serialized.encode('hex')
def _save_claim(transaction):
transaction.execute(
"insert or replace into claim values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(outpoint, claim_id, name, amount, height, serialized, claim_dict.certificate_id, address, sequence)
)
yield self.db.runInteraction(_save_claim)
if 'supports' in claim_info: # if this response doesn't have support info don't overwrite the existing
# support info
yield self.save_supports(claim_id, claim_info['supports'])
def save_content_claim(self, stream_hash, claim_outpoint):
def _save_content_claim(transaction):
# get the claim id and serialized metadata
claim_info = transaction.execute(
"select claim_id, serialized_metadata from claim where claim_outpoint=?", (claim_outpoint, )
).fetchone()
if not claim_info:
raise Exception("claim not found")
new_claim_id, claim = claim_info[0], ClaimDict.deserialize(claim_info[1].decode('hex'))
# certificate claims should not be in the content_claim table
if not claim.is_stream:
raise Exception("claim does not contain a stream")
# get the known sd hash for this stream
known_sd_hash = transaction.execute(
"select sd_hash from stream where stream_hash=?", (stream_hash, )
).fetchone()
if not known_sd_hash:
raise Exception("stream not found")
# check the claim contains the same sd hash
if known_sd_hash[0] != claim.source_hash:
raise Exception("stream mismatch")
# if there is a current claim associated to the file, check that the new claim is an update to it
current_associated_content = transaction.execute(
"select claim_outpoint from content_claim where stream_hash=?", (stream_hash, )
).fetchone()
if current_associated_content:
current_associated_claim_id = transaction.execute(
"select claim_id from claim where claim_outpoint=?", current_associated_content
).fetchone()[0]
if current_associated_claim_id != new_claim_id:
raise Exception("invalid stream update")
# update the claim associated to the file
transaction.execute("insert or replace into content_claim values (?, ?)", (stream_hash, claim_outpoint))
return self.db.runInteraction(_save_content_claim)
@defer.inlineCallbacks
def get_content_claim(self, stream_hash, include_supports=True):
def _get_content_claim(transaction):
claim_id = transaction.execute(
"select claim.claim_id from content_claim "
"inner join claim on claim.claim_outpoint=content_claim.claim_outpoint and content_claim.stream_hash=? "
"order by claim.rowid desc", (stream_hash, )
).fetchone()
if not claim_id:
return None
return claim_id[0]
content_claim_id = yield self.db.runInteraction(_get_content_claim)
result = None
if content_claim_id:
result = yield self.get_claim(content_claim_id, include_supports)
defer.returnValue(result)
@defer.inlineCallbacks
def get_claim(self, claim_id, include_supports=True):
def _claim_response(outpoint, claim_id, name, amount, height, serialized, channel_id, address, claim_sequence):
r = {
"name": name,
"claim_id": claim_id,
"address": address,
"claim_sequence": claim_sequence,
"value": ClaimDict.deserialize(serialized.decode('hex')).claim_dict,
"height": height,
"amount": float(Decimal(amount) / Decimal(COIN)),
"nout": int(outpoint.split(":")[1]),
"txid": outpoint.split(":")[0],
"channel_claim_id": channel_id,
"channel_name": None
}
return r
def _get_claim(transaction):
claim_info = transaction.execute(
"select * from claim where claim_id=? order by height, rowid desc", (claim_id, )
).fetchone()
result = _claim_response(*claim_info)
if result['channel_claim_id']:
channel_name_result = transaction.execute(
"select claim_name from claim where claim_id=?", (result['channel_claim_id'], )
).fetchone()
if channel_name_result:
result['channel_name'] = channel_name_result[0]
return result
result = yield self.db.runInteraction(_get_claim)
if include_supports:
supports = yield self.get_supports(result['claim_id'])
result['supports'] = supports
result['effective_amount'] = float(
sum([support['amount'] for support in supports]) + result['amount']
)
defer.returnValue(result)
def get_unknown_certificate_ids(self):
def _get_unknown_certificate_claim_ids(transaction):
return [
claim_id for (claim_id,) in transaction.execute(
"select distinct c1.channel_claim_id from claim as c1 "
"where c1.channel_claim_id!='' "
"and c1.channel_claim_id not in "
"(select c2.claim_id from claim as c2)"
).fetchall()
]
return self.db.runInteraction(_get_unknown_certificate_claim_ids)

View file

@ -5,13 +5,13 @@ Utilities for turning plain files into LBRY Files.
import binascii
import logging
import os
from lbrynet.core.StreamDescriptor import PlainStreamDescriptorWriter
from lbrynet.cryptstream.CryptStreamCreator import CryptStreamCreator
from lbrynet import conf
from lbrynet.lbry_file.StreamDescriptor import get_sd_info
from lbrynet.core.cryptoutils import get_lbry_hash_obj
from twisted.internet import defer
from twisted.protocols.basic import FileSender
from lbrynet.core.StreamDescriptor import BlobStreamDescriptorWriter, EncryptedFileStreamType
from lbrynet.core.StreamDescriptor import format_sd_info, get_stream_hash
from lbrynet.cryptstream.CryptStreamCreator import CryptStreamCreator
log = logging.getLogger(__name__)
@ -20,58 +20,32 @@ class EncryptedFileStreamCreator(CryptStreamCreator):
"""
A CryptStreamCreator which adds itself and its additional metadata to an EncryptedFileManager
"""
def __init__(self, blob_manager, lbry_file_manager, name=None,
key=None, iv_generator=None, suggested_file_name=None):
CryptStreamCreator.__init__(self, blob_manager, name, key, iv_generator)
def __init__(self, blob_manager, lbry_file_manager, stream_name=None,
key=None, iv_generator=None):
CryptStreamCreator.__init__(self, blob_manager, stream_name, key, iv_generator)
self.lbry_file_manager = lbry_file_manager
self.suggested_file_name = suggested_file_name or name
self.stream_hash = None
self.blob_infos = []
self.sd_info = None
def _blob_finished(self, blob_info):
log.debug("length: %s", blob_info.length)
self.blob_infos.append(blob_info)
self.blob_infos.append(blob_info.get_dict())
return blob_info
def _save_stream_info(self):
stream_info_manager = self.lbry_file_manager.stream_info_manager
d = stream_info_manager.save_stream(self.stream_hash, hexlify(self.name),
hexlify(self.key),
hexlify(self.suggested_file_name),
self.blob_infos)
return d
def _get_blobs_hashsum(self):
blobs_hashsum = get_lbry_hash_obj()
for blob_info in sorted(self.blob_infos, key=lambda b_i: b_i.blob_num):
length = blob_info.length
if length != 0:
blob_hash = blob_info.blob_hash
else:
blob_hash = None
blob_num = blob_info.blob_num
iv = blob_info.iv
blob_hashsum = get_lbry_hash_obj()
if length != 0:
blob_hashsum.update(blob_hash)
blob_hashsum.update(str(blob_num))
blob_hashsum.update(iv)
blob_hashsum.update(str(length))
blobs_hashsum.update(blob_hashsum.digest())
return blobs_hashsum.digest()
def _make_stream_hash(self):
hashsum = get_lbry_hash_obj()
hashsum.update(hexlify(self.name))
hashsum.update(hexlify(self.key))
hashsum.update(hexlify(self.suggested_file_name))
hashsum.update(self._get_blobs_hashsum())
self.stream_hash = hashsum.hexdigest()
def _finished(self):
self._make_stream_hash()
d = self._save_stream_info()
return d
# calculate the stream hash
self.stream_hash = get_stream_hash(
hexlify(self.name), hexlify(self.key), hexlify(self.name),
self.blob_infos
)
# generate the sd info
self.sd_info = format_sd_info(
EncryptedFileStreamType, hexlify(self.name), hexlify(self.key),
hexlify(self.name), self.stream_hash, self.blob_infos
)
return defer.succeed(self.stream_hash)
# TODO: this should be run its own thread. Encrypting a large file can
@ -80,8 +54,8 @@ class EncryptedFileStreamCreator(CryptStreamCreator):
# great when sending over the network, but this is all local so
# we can simply read the file from the disk without needing to
# involve reactor.
def create_lbry_file(session, lbry_file_manager, file_name, file_handle, key=None,
iv_generator=None, suggested_file_name=None):
@defer.inlineCallbacks
def create_lbry_file(session, lbry_file_manager, file_name, file_handle, key=None, iv_generator=None):
"""Turn a plain file into an LBRY File.
An LBRY File is a collection of encrypted blobs of data and the metadata that binds them
@ -104,10 +78,6 @@ def create_lbry_file(session, lbry_file_manager, file_name, file_handle, key=Non
@param file_handle: The file-like object to read
@type file_handle: any file-like object which can be read by twisted.protocols.basic.FileSender
@param secret_pass_phrase: A string that will be used to generate the public key. If None, a
random string will be used.
@type secret_pass_phrase: string
@param key: the raw AES key which will be used to encrypt the blobs. If None, a random key will
be generated.
@type key: string
@ -116,53 +86,44 @@ def create_lbry_file(session, lbry_file_manager, file_name, file_handle, key=Non
vectors for the blobs. Will be called once for each blob.
@type iv_generator: a generator function which yields strings
@param suggested_file_name: what the file should be called when the LBRY File is saved to disk.
@type suggested_file_name: string
@return: a Deferred which fires with the stream_hash of the LBRY File
@rtype: Deferred which fires with hex-encoded string
"""
def stop_file(creator):
log.debug("the file sender has triggered its deferred. stopping the stream writer")
return creator.stop()
def make_stream_desc_file(stream_hash):
log.debug("creating the stream descriptor file")
descriptor_file_path = os.path.join(
session.db_dir, file_name + conf.settings['CRYPTSD_FILE_EXTENSION'])
descriptor_writer = PlainStreamDescriptorWriter(descriptor_file_path)
d = get_sd_info(lbry_file_manager.stream_info_manager, stream_hash, True)
d.addCallback(descriptor_writer.create_descriptor)
return d
base_file_name = os.path.basename(file_name)
file_directory = os.path.dirname(file_handle.name)
lbry_file_creator = EncryptedFileStreamCreator(
session.blob_manager,
lbry_file_manager,
base_file_name, key,
iv_generator,
suggested_file_name)
session.blob_manager, lbry_file_manager, base_file_name, key, iv_generator
)
def start_stream():
# TODO: Using FileSender isn't necessary, we can just read
# straight from the disk. The stream creation process
# should be in its own thread anyway so we don't need to
# worry about interacting with the twisted reactor
file_sender = FileSender()
d = file_sender.beginFileTransfer(file_handle, lbry_file_creator)
d.addCallback(lambda _: stop_file(lbry_file_creator))
d.addCallback(lambda _: make_stream_desc_file(lbry_file_creator.stream_hash))
d.addCallback(lambda _: lbry_file_creator.stream_hash)
return d
yield lbry_file_creator.setup()
# TODO: Using FileSender isn't necessary, we can just read
# straight from the disk. The stream creation process
# should be in its own thread anyway so we don't need to
# worry about interacting with the twisted reactor
file_sender = FileSender()
yield file_sender.beginFileTransfer(file_handle, lbry_file_creator)
d = lbry_file_creator.setup()
d.addCallback(lambda _: start_stream())
return d
log.debug("the file sender has triggered its deferred. stopping the stream writer")
yield lbry_file_creator.stop()
log.debug("making the sd blob")
sd_info = lbry_file_creator.sd_info
descriptor_writer = BlobStreamDescriptorWriter(session.blob_manager)
sd_hash = yield descriptor_writer.create_descriptor(sd_info)
log.debug("saving the stream")
yield session.storage.store_stream(
sd_info['stream_hash'], sd_hash, sd_info['stream_name'], sd_info['key'],
sd_info['suggested_file_name'], sd_info['blobs']
)
log.debug("adding to the file manager")
lbry_file = yield lbry_file_manager.add_published_file(
sd_info['stream_hash'], sd_hash, binascii.hexlify(file_directory), session.payment_rate_manager,
session.payment_rate_manager.min_blob_data_payment_rate
)
defer.returnValue(lbry_file)
def hexlify(str_or_unicode):

View file

@ -2,18 +2,18 @@
Download LBRY Files from LBRYnet and save them to disk.
"""
import logging
import binascii
from zope.interface import implements
from twisted.internet import defer
from lbrynet.core.client.StreamProgressManager import FullStreamProgressManager
from lbrynet.core.utils import short_hash
from lbrynet.core.StreamDescriptor import StreamMetadata
from lbrynet.lbry_file.client.EncryptedFileDownloader import EncryptedFileSaver
from lbrynet.lbry_file.client.EncryptedFileDownloader import EncryptedFileDownloader
from lbrynet.file_manager.EncryptedFileStatusReport import EncryptedFileStatusReport
from lbrynet.interfaces import IStreamDownloaderFactory
from lbrynet.lbry_file.StreamDescriptor import save_sd_info
from lbrynet.core.StreamDescriptor import save_sd_info
log = logging.getLogger(__name__)
@ -35,19 +35,41 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver):
STATUS_STOPPED = "stopped"
STATUS_FINISHED = "finished"
def __init__(self, rowid, stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, lbry_file_manager, payment_rate_manager, wallet,
download_directory, sd_hash=None, key=None, stream_name=None,
suggested_file_name=None):
EncryptedFileSaver.__init__(self, stream_hash, peer_finder,
rate_limiter, blob_manager,
stream_info_manager,
payment_rate_manager, wallet,
download_directory, key, stream_name, suggested_file_name)
def __init__(self, rowid, stream_hash, peer_finder, rate_limiter, blob_manager, storage, lbry_file_manager,
payment_rate_manager, wallet, download_directory, file_name, stream_name, sd_hash, key,
suggested_file_name):
EncryptedFileSaver.__init__(
self, stream_hash, peer_finder, rate_limiter, blob_manager, storage, payment_rate_manager, wallet,
download_directory, key, stream_name, file_name
)
self.sd_hash = sd_hash
self.rowid = rowid
self.suggested_file_name = binascii.unhexlify(suggested_file_name)
self.lbry_file_manager = lbry_file_manager
self._saving_status = False
self.claim_id = None
self.outpoint = None
self.claim_name = None
self.txid = None
self.nout = None
self.channel_claim_id = None
self.channel_name = None
self.metadata = None
@defer.inlineCallbacks
def get_claim_info(self, include_supports=True):
claim_info = yield self.storage.get_content_claim(self.stream_hash, include_supports)
if claim_info:
self.claim_id = claim_info['claim_id']
self.txid = claim_info['txid']
self.nout = claim_info['nout']
self.channel_claim_id = claim_info['channel_claim_id']
self.outpoint = "%s:%i" % (self.txid, self.nout)
self.claim_name = claim_info['name']
self.channel_name = claim_info['channel_name']
self.metadata = claim_info['value']['stream']['metadata']
defer.returnValue(claim_info)
@property
def saving_status(self):
@ -77,8 +99,8 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver):
@defer.inlineCallbacks
def status(self):
blobs = yield self.stream_info_manager.get_blobs_for_stream(self.stream_hash)
blob_hashes = [b[0] for b in blobs if b[0] is not None]
blobs = yield self.storage.get_blobs_for_stream(self.stream_hash)
blob_hashes = [b.blob_hash for b in blobs if b.blob_hash is not None]
completed_blobs = yield self.blob_manager.completed_blobs(blob_hashes)
num_blobs_completed = len(completed_blobs)
num_blobs_known = len(blob_hashes)
@ -89,8 +111,9 @@ class ManagedEncryptedFileDownloader(EncryptedFileSaver):
status = "stopped"
else:
status = "running"
defer.returnValue(EncryptedFileStatusReport(self.file_name, num_blobs_completed,
num_blobs_known, status))
defer.returnValue(EncryptedFileStatusReport(
self.file_name, num_blobs_completed, num_blobs_known, status
))
@defer.inlineCallbacks
def _start(self):
@ -137,19 +160,16 @@ class ManagedEncryptedFileDownloaderFactory(object):
return True
@defer.inlineCallbacks
def make_downloader(self, metadata, options, payment_rate_manager, download_directory=None):
assert len(options) == 1
data_rate = options[0]
stream_hash = yield save_sd_info(self.lbry_file_manager.stream_info_manager,
def make_downloader(self, metadata, data_rate, payment_rate_manager, download_directory, file_name=None):
stream_hash = yield save_sd_info(self.lbry_file_manager.session.blob_manager,
metadata.source_blob_hash,
metadata.validator.raw_info)
if metadata.metadata_source == StreamMetadata.FROM_BLOB:
yield self.lbry_file_manager.save_sd_blob_hash_to_stream(stream_hash,
metadata.source_blob_hash)
lbry_file = yield self.lbry_file_manager.add_lbry_file(stream_hash,
metadata.source_blob_hash,
payment_rate_manager,
data_rate,
download_directory)
if file_name:
file_name = binascii.hexlify(file_name)
lbry_file = yield self.lbry_file_manager.add_downloaded_file(
stream_hash, metadata.source_blob_hash, binascii.hexlify(download_directory), payment_rate_manager,
data_rate, file_name=file_name
)
defer.returnValue(lbry_file)
@staticmethod

View file

@ -1,9 +1,8 @@
"""
Keep track of which LBRY Files are downloading and store their LBRY File specific metadata
"""
import logging
import os
import logging
from twisted.internet import defer, task, reactor
from twisted.python.failure import Failure
@ -12,7 +11,7 @@ from lbrynet.reflector.reupload import reflect_stream
from lbrynet.core.PaymentRateManager import NegotiatedPaymentRateManager
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloaderFactory
from lbrynet.lbry_file.StreamDescriptor import EncryptedFileStreamType, get_sd_info
from lbrynet.core.StreamDescriptor import EncryptedFileStreamType, get_sd_info
from lbrynet.cryptstream.client.CryptStreamDownloader import AlreadyStoppedError
from lbrynet.cryptstream.client.CryptStreamDownloader import CurrentlyStoppingError
from lbrynet.core.utils import safe_start_looping_call, safe_stop_looping_call
@ -30,38 +29,33 @@ class EncryptedFileManager(object):
# when reflecting files, reflect up to this many files at a time
CONCURRENT_REFLECTS = 5
def __init__(self, session, stream_info_manager, sd_identifier, download_directory=None):
def __init__(self, session, sd_identifier):
self.auto_re_reflect = conf.settings['reflect_uploads']
self.auto_re_reflect_interval = conf.settings['auto_re_reflect_interval']
self.session = session
self.stream_info_manager = stream_info_manager
self.storage = session.storage
# TODO: why is sd_identifier part of the file manager?
self.sd_identifier = sd_identifier
assert sd_identifier
self.lbry_files = []
if download_directory:
self.download_directory = download_directory
else:
self.download_directory = os.getcwd()
self.lbry_file_reflector = task.LoopingCall(self.reflect_lbry_files)
log.debug("Download directory for EncryptedFileManager: %s", str(self.download_directory))
@defer.inlineCallbacks
def setup(self):
yield self.stream_info_manager.setup()
yield self._add_to_sd_identifier()
yield self._start_lbry_files()
log.info("Started file manager")
def get_lbry_file_status(self, lbry_file):
return self._get_lbry_file_status(lbry_file.rowid)
return self.session.storage.get_lbry_file_status(lbry_file.rowid)
def set_lbry_file_data_payment_rate(self, lbry_file, new_rate):
return self._set_lbry_file_payment_rate(lbry_file.rowid, new_rate)
return self.session.storage(lbry_file.rowid, new_rate)
def change_lbry_file_status(self, lbry_file, status):
log.debug("Changing status of %s to %s", lbry_file.stream_hash, status)
return self._change_file_status(lbry_file.rowid, status)
return self.session.storage.change_file_status(lbry_file.rowid, status)
def get_lbry_file_status_reports(self):
ds = []
@ -77,59 +71,55 @@ class EncryptedFileManager(object):
dl.addCallback(filter_failures)
return dl
def save_sd_blob_hash_to_stream(self, stream_hash, sd_hash):
return self.stream_info_manager.save_sd_blob_hash_to_stream(stream_hash, sd_hash)
def _add_to_sd_identifier(self):
downloader_factory = ManagedEncryptedFileDownloaderFactory(self)
self.sd_identifier.add_stream_downloader_factory(
EncryptedFileStreamType, downloader_factory)
def _get_lbry_file(self, rowid, stream_hash, payment_rate_manager, sd_hash, key,
stream_name, suggested_file_name, download_directory=None):
download_directory = download_directory or self.download_directory
payment_rate_manager = payment_rate_manager or self.session.payment_rate_manager
stream_name, file_name, download_directory, suggested_file_name):
return ManagedEncryptedFileDownloader(
rowid,
stream_hash,
self.session.peer_finder,
self.session.rate_limiter,
self.session.blob_manager,
self.stream_info_manager,
self.session.storage,
self,
payment_rate_manager,
self.session.wallet,
download_directory,
file_name,
stream_name=stream_name,
sd_hash=sd_hash,
key=key,
stream_name=stream_name,
suggested_file_name=suggested_file_name
)
@defer.inlineCallbacks
def _start_lbry_files(self):
files_and_options = yield self._get_all_lbry_files()
stream_infos = yield self.stream_info_manager._get_all_stream_infos()
files = yield self.session.storage.get_all_lbry_files()
b_prm = self.session.base_payment_rate_manager
payment_rate_manager = NegotiatedPaymentRateManager(b_prm, self.session.blob_tracker)
log.info("Trying to start %i files", len(stream_infos))
for i, (rowid, stream_hash, blob_data_rate, status) in enumerate(files_and_options):
if len(files_and_options) > 500 and i % 500 == 0:
log.info("Started %i/%i files", i, len(stream_infos))
if stream_hash in stream_infos:
lbry_file = self._get_lbry_file(rowid, stream_hash, payment_rate_manager,
stream_infos[stream_hash]['sd_hash'],
stream_infos[stream_hash]['key'],
stream_infos[stream_hash]['stream_name'],
stream_infos[stream_hash]['suggested_file_name'])
log.info("initialized file %s", lbry_file.stream_name)
try:
# restore will raise an Exception if status is unknown
lbry_file.restore(status)
self.lbry_files.append(lbry_file)
except Exception:
log.warning("Failed to start %i", rowid)
continue
log.info("Trying to start %i files", len(files))
for i, file_info in enumerate(files):
if len(files) > 500 and i % 500 == 0:
log.info("Started %i/%i files", i, len(files))
lbry_file = self._get_lbry_file(
file_info['row_id'], file_info['stream_hash'], payment_rate_manager, file_info['sd_hash'],
file_info['key'], file_info['stream_name'], file_info['file_name'], file_info['download_directory'],
file_info['suggested_file_name']
)
yield lbry_file.get_claim_info()
try:
# restore will raise an Exception if status is unknown
lbry_file.restore(file_info['status'])
self.lbry_files.append(lbry_file)
except Exception:
log.warning("Failed to start %i", file_info['rowid'])
continue
log.info("Started %i lbry files", len(self.lbry_files))
if self.auto_re_reflect is True:
safe_start_looping_call(self.lbry_file_reflector, self.auto_re_reflect_interval)
@ -157,17 +147,46 @@ class EncryptedFileManager(object):
yield self._stop_lbry_file(lbry_file)
@defer.inlineCallbacks
def add_lbry_file(self, stream_hash, sd_hash, payment_rate_manager=None, blob_data_rate=None,
download_directory=None, status=None):
rowid = yield self._save_lbry_file(stream_hash, blob_data_rate)
stream_metadata = yield get_sd_info(self.stream_info_manager,
stream_hash, False)
def add_published_file(self, stream_hash, sd_hash, download_directory, payment_rate_manager, blob_data_rate):
status = ManagedEncryptedFileDownloader.STATUS_FINISHED
stream_metadata = yield get_sd_info(self.session.storage, stream_hash, include_blobs=False)
key = stream_metadata['key']
stream_name = stream_metadata['stream_name']
suggested_file_name = stream_metadata['suggested_file_name']
lbry_file = self._get_lbry_file(rowid, stream_hash, payment_rate_manager, sd_hash, key,
stream_name, suggested_file_name, download_directory)
lbry_file.restore(status or ManagedEncryptedFileDownloader.STATUS_STOPPED)
file_name = stream_metadata['suggested_file_name']
rowid = yield self.storage.save_published_file(
stream_hash, file_name, download_directory, blob_data_rate, status
)
lbry_file = self._get_lbry_file(
rowid, stream_hash, payment_rate_manager, sd_hash, key, stream_name, file_name, download_directory,
stream_metadata['suggested_file_name']
)
lbry_file.restore(status)
self.lbry_files.append(lbry_file)
defer.returnValue(lbry_file)
@defer.inlineCallbacks
def add_downloaded_file(self, stream_hash, sd_hash, download_directory, payment_rate_manager=None,
blob_data_rate=None, status=None, file_name=None):
status = status or ManagedEncryptedFileDownloader.STATUS_STOPPED
payment_rate_manager = payment_rate_manager or self.session.payment_rate_manager
blob_data_rate = blob_data_rate or payment_rate_manager.min_blob_data_payment_rate
stream_metadata = yield get_sd_info(self.session.storage, stream_hash, include_blobs=False)
key = stream_metadata['key']
stream_name = stream_metadata['stream_name']
file_name = file_name or stream_metadata['suggested_file_name']
# when we save the file we'll atomic touch the nearest file to the suggested file name
# that doesn't yet exist in the download directory
rowid = yield self.storage.save_downloaded_file(
stream_hash, os.path.basename(file_name.decode('hex')).encode('hex'), download_directory, blob_data_rate
)
file_name = yield self.session.storage.get_filename_for_rowid(rowid)
lbry_file = self._get_lbry_file(
rowid, stream_hash, payment_rate_manager, sd_hash, key, stream_name, file_name, download_directory,
stream_metadata['suggested_file_name']
)
lbry_file.get_claim_info(include_supports=False)
lbry_file.restore(status)
self.lbry_files.append(lbry_file)
defer.returnValue(lbry_file)
@ -191,22 +210,8 @@ class EncryptedFileManager(object):
self.lbry_files.remove(lbry_file)
yield self._delete_lbry_file_options(lbry_file.rowid)
yield lbry_file.delete_data()
# TODO: delete this
# get count for stream hash returns the count of the lbry files with the stream hash
# in the lbry_file_options table, which will soon be removed.
stream_count = yield self.get_count_for_stream_hash(lbry_file.stream_hash)
if stream_count == 0:
yield self.stream_info_manager.delete_stream(lbry_file.stream_hash)
else:
msg = ("Can't delete stream info for %s, count is %i\n"
"The call that resulted in this warning will\n"
"be removed in the database refactor")
log.warning(msg, lbry_file.stream_hash, stream_count)
yield self.session.storage.delete_stream(lbry_file.stream_hash)
if delete_file and os.path.isfile(full_path):
os.remove(full_path)
@ -234,30 +239,3 @@ class EncryptedFileManager(object):
yield defer.DeferredList(list(self._stop_lbry_files()))
log.info("Stopped encrypted file manager")
defer.returnValue(True)
def get_count_for_stream_hash(self, stream_hash):
return self._get_count_for_stream_hash(stream_hash)
def _get_count_for_stream_hash(self, stream_hash):
return self.stream_info_manager._get_count_for_stream_hash(stream_hash)
def _delete_lbry_file_options(self, rowid):
return self.stream_info_manager._delete_lbry_file_options(rowid)
def _save_lbry_file(self, stream_hash, data_payment_rate):
return self.stream_info_manager._save_lbry_file(stream_hash, data_payment_rate)
def _get_all_lbry_files(self):
return self.stream_info_manager._get_all_lbry_files()
def _get_rowid_for_stream_hash(self, stream_hash):
return self.stream_info_manager._get_rowid_for_stream_hash(stream_hash)
def _change_file_status(self, rowid, status):
return self.stream_info_manager._change_file_status(rowid, status)
def _set_lbry_file_payment_rate(self, rowid, new_rate):
return self.stream_info_manager._set_lbry_file_payment_rate(rowid, new_rate)
def _get_lbry_file_status(self, rowid):
return self.stream_info_manager._get_lbry_file_status(rowid)

View file

@ -1,378 +0,0 @@
import os
import logging
import sqlite3
from twisted.internet import defer
from twisted.python.failure import Failure
from twisted.enterprise import adbapi
from lbrynet.core.Error import DuplicateStreamHashError, NoSuchStreamHash, NoSuchSDHash
from lbrynet.core.sqlite_helpers import rerun_if_locked
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
log = logging.getLogger(__name__)
class DBEncryptedFileMetadataManager(object):
"""Store and provide access to LBRY file metadata using sqlite"""
def __init__(self, db_dir, file_name=None):
self.db_dir = db_dir
self._db_file_name = file_name or "lbryfile_info.db"
self.db_conn = adbapi.ConnectionPool("sqlite3", os.path.join(self.db_dir,
self._db_file_name),
check_same_thread=False)
def setup(self):
return self._open_db()
def stop(self):
self.db_conn.close()
return defer.succeed(True)
def get_all_streams(self):
return self._get_all_streams()
def save_stream(self, stream_hash, file_name, key, suggested_file_name, blobs):
d = self._store_stream(stream_hash, file_name, key, suggested_file_name)
d.addCallback(lambda _: self.add_blobs_to_stream(stream_hash, blobs))
return d
def get_stream_info(self, stream_hash):
return self._get_stream_info(stream_hash)
def check_if_stream_exists(self, stream_hash):
return self._check_if_stream_exists(stream_hash)
def delete_stream(self, stream_hash):
return self._delete_stream(stream_hash)
def add_blobs_to_stream(self, stream_hash, blobs):
return self._add_blobs_to_stream(stream_hash, blobs, ignore_duplicate_error=True)
def get_blobs_for_stream(self, stream_hash, start_blob=None,
end_blob=None, count=None, reverse=False):
log.debug("Getting blobs for stream %s. Count is %s", stream_hash, count)
def get_positions_of_start_and_end():
if start_blob is not None:
d1 = self._get_blob_num_by_hash(stream_hash, start_blob)
else:
d1 = defer.succeed(None)
if end_blob is not None:
d2 = self._get_blob_num_by_hash(stream_hash, end_blob)
else:
d2 = defer.succeed(None)
dl = defer.DeferredList([d1, d2])
def get_positions(results):
start_num = None
end_num = None
if results[0][0] is True:
start_num = results[0][1]
if results[1][0] is True:
end_num = results[1][1]
return start_num, end_num
dl.addCallback(get_positions)
return dl
def get_blob_infos(nums):
start_num, end_num = nums
return self._get_further_blob_infos(stream_hash, start_num, end_num,
count, reverse)
d = get_positions_of_start_and_end()
d.addCallback(get_blob_infos)
return d
def get_stream_of_blob(self, blob_hash):
return self._get_stream_of_blobhash(blob_hash)
def save_sd_blob_hash_to_stream(self, stream_hash, sd_blob_hash):
return self._save_sd_blob_hash_to_stream(stream_hash, sd_blob_hash)
def get_sd_blob_hashes_for_stream(self, stream_hash):
return self._get_sd_blob_hashes_for_stream(stream_hash)
def get_stream_hash_for_sd_hash(self, sd_hash):
return self._get_stream_hash_for_sd_blob_hash(sd_hash)
@staticmethod
def _create_tables(transaction):
transaction.execute("create table if not exists lbry_files (" +
" stream_hash text primary key, " +
" key text, " +
" stream_name text, " +
" suggested_file_name text" +
")")
transaction.execute("create table if not exists lbry_file_blobs (" +
" blob_hash text, " +
" stream_hash text, " +
" position integer, " +
" iv text, " +
" length integer, " +
" foreign key(stream_hash) references lbry_files(stream_hash)" +
")")
transaction.execute("create table if not exists lbry_file_descriptors (" +
" sd_blob_hash TEXT PRIMARY KEY, " +
" stream_hash TEXT, " +
" foreign key(stream_hash) references lbry_files(stream_hash)" +
")")
transaction.execute("create table if not exists lbry_file_options (" +
" blob_data_rate real, " +
" status text," +
" stream_hash text,"
" foreign key(stream_hash) references lbry_files(stream_hash)" +
")")
transaction.execute("create table if not exists lbry_file_metadata (" +
" lbry_file integer primary key, " +
" txid text, " +
" n integer, " +
" foreign key(lbry_file) references lbry_files(rowid)"
")")
def _open_db(self):
# check_same_thread=False is solely to quiet a spurious error that appears to be due
# to a bug in twisted, where the connection is closed by a different thread than the
# one that opened it. The individual connections in the pool are not used in multiple
# threads.
return self.db_conn.runInteraction(self._create_tables)
@rerun_if_locked
@defer.inlineCallbacks
def get_file_outpoint(self, rowid):
result = yield self.db_conn.runQuery("select txid, n from lbry_file_metadata "
"where lbry_file=?", (rowid, ))
response = None
if result:
txid, nout = result[0]
if txid is not None and nout is not None:
response = "%s:%i" % (txid, nout)
defer.returnValue(response)
@rerun_if_locked
@defer.inlineCallbacks
def save_outpoint_to_file(self, rowid, txid, nout):
existing_outpoint = yield self.get_file_outpoint(rowid)
if not existing_outpoint:
yield self.db_conn.runOperation("insert into lbry_file_metadata values "
"(?, ?, ?)", (rowid, txid, nout))
@rerun_if_locked
def _delete_stream(self, stream_hash):
d = self.db_conn.runQuery(
"select rowid, stream_hash from lbry_files where stream_hash = ?", (stream_hash,))
d.addCallback(
lambda result: result[0] if result else Failure(NoSuchStreamHash(stream_hash)))
def do_delete(transaction, row_id, s_h):
transaction.execute("delete from lbry_files where stream_hash = ?", (s_h,))
transaction.execute("delete from lbry_file_blobs where stream_hash = ?", (s_h,))
transaction.execute("delete from lbry_file_descriptors where stream_hash = ?", (s_h,))
transaction.execute("delete from lbry_file_metadata where lbry_file = ?", (row_id,))
d.addCallback(lambda (row_id, s_h): self.db_conn.runInteraction(do_delete, row_id, s_h))
return d
@rerun_if_locked
def _store_stream(self, stream_hash, name, key, suggested_file_name):
d = self.db_conn.runQuery("insert into lbry_files values (?, ?, ?, ?)",
(stream_hash, key, name, suggested_file_name))
def check_duplicate(err):
if err.check(sqlite3.IntegrityError):
raise DuplicateStreamHashError(stream_hash)
return err
d.addErrback(check_duplicate)
return d
@rerun_if_locked
def _get_all_streams(self):
d = self.db_conn.runQuery("select stream_hash from lbry_files")
d.addCallback(lambda results: [r[0] for r in results])
return d
@rerun_if_locked
def _get_stream_info(self, stream_hash):
def get_result(res):
if res:
return res[0]
else:
raise NoSuchStreamHash(stream_hash)
d = self.db_conn.runQuery(
"select key, stream_name, suggested_file_name from lbry_files where stream_hash = ?",
(stream_hash,))
d.addCallback(get_result)
return d
@rerun_if_locked
@defer.inlineCallbacks
def _get_all_stream_infos(self):
file_results = yield self.db_conn.runQuery("select rowid, * from lbry_files")
descriptor_results = yield self.db_conn.runQuery("select stream_hash, sd_blob_hash "
"from lbry_file_descriptors")
response = {}
for (stream_hash, sd_hash) in descriptor_results:
if stream_hash in response:
log.warning("Duplicate stream %s (sd: %s)", stream_hash, sd_hash[:16])
continue
response[stream_hash] = {
'sd_hash': sd_hash
}
for (rowid, stream_hash, key, stream_name, suggested_file_name) in file_results:
if stream_hash not in response:
log.warning("Missing sd hash for %s", stream_hash)
continue
response[stream_hash]['rowid'] = rowid
response[stream_hash]['key'] = key
response[stream_hash]['stream_name'] = stream_name
response[stream_hash]['suggested_file_name'] = suggested_file_name
defer.returnValue(response)
@rerun_if_locked
def _check_if_stream_exists(self, stream_hash):
d = self.db_conn.runQuery(
"select stream_hash from lbry_files where stream_hash = ?", (stream_hash,))
d.addCallback(lambda r: True if len(r) else False)
return d
@rerun_if_locked
def _get_blob_num_by_hash(self, stream_hash, blob_hash):
d = self.db_conn.runQuery(
"select position from lbry_file_blobs where stream_hash = ? and blob_hash = ?",
(stream_hash, blob_hash))
d.addCallback(lambda r: r[0][0] if len(r) else None)
return d
@rerun_if_locked
def _get_further_blob_infos(self, stream_hash, start_num, end_num, count=None, reverse=False):
params = []
q_string = "select * from ("
q_string += " select blob_hash, position, iv, length from lbry_file_blobs "
q_string += " where stream_hash = ? "
params.append(stream_hash)
if start_num is not None:
q_string += " and position > ? "
params.append(start_num)
if end_num is not None:
q_string += " and position < ? "
params.append(end_num)
q_string += " order by position "
if reverse is True:
q_string += " DESC "
if count is not None:
q_string += " limit ? "
params.append(count)
q_string += ") order by position"
# Order by position is done twice so that it always returns them from lowest position to
# greatest, but the limit by clause can select the 'count' greatest or 'count' least
return self.db_conn.runQuery(q_string, tuple(params))
@rerun_if_locked
def _add_blobs_to_stream(self, stream_hash, blob_infos, ignore_duplicate_error=False):
def add_blobs(transaction):
for blob_info in blob_infos:
try:
transaction.execute("insert into lbry_file_blobs values (?, ?, ?, ?, ?)",
(blob_info.blob_hash, stream_hash, blob_info.blob_num,
blob_info.iv, blob_info.length))
except sqlite3.IntegrityError:
if ignore_duplicate_error is False:
raise
return self.db_conn.runInteraction(add_blobs)
@rerun_if_locked
def _get_stream_of_blobhash(self, blob_hash):
d = self.db_conn.runQuery("select stream_hash from lbry_file_blobs where blob_hash = ?",
(blob_hash,))
d.addCallback(lambda r: r[0][0] if len(r) else None)
return d
@rerun_if_locked
def _save_sd_blob_hash_to_stream(self, stream_hash, sd_blob_hash):
d = self.db_conn.runOperation("insert or ignore into lbry_file_descriptors values (?, ?)",
(sd_blob_hash, stream_hash))
d.addCallback(lambda _: log.info("Saved sd blob hash %s to stream hash %s",
str(sd_blob_hash), str(stream_hash)))
return d
@rerun_if_locked
def _get_sd_blob_hashes_for_stream(self, stream_hash):
log.debug("Looking up sd blob hashes for stream hash %s", str(stream_hash))
d = self.db_conn.runQuery(
"select sd_blob_hash from lbry_file_descriptors where stream_hash = ?",
(stream_hash,))
d.addCallback(lambda results: [r[0] for r in results])
return d
@rerun_if_locked
def _get_stream_hash_for_sd_blob_hash(self, sd_blob_hash):
def _handle_result(result):
if not result:
raise NoSuchSDHash(sd_blob_hash)
return result[0][0]
log.debug("Looking up sd blob hashes for sd blob hash %s", str(sd_blob_hash))
d = self.db_conn.runQuery(
"select stream_hash from lbry_file_descriptors where sd_blob_hash = ?",
(sd_blob_hash,))
d.addCallback(_handle_result)
return d
# used by lbry file manager
@rerun_if_locked
def _save_lbry_file(self, stream_hash, data_payment_rate):
def do_save(db_transaction):
row = (data_payment_rate, ManagedEncryptedFileDownloader.STATUS_STOPPED, stream_hash)
db_transaction.execute("insert into lbry_file_options values (?, ?, ?)", row)
return db_transaction.lastrowid
return self.db_conn.runInteraction(do_save)
@rerun_if_locked
def _delete_lbry_file_options(self, rowid):
return self.db_conn.runQuery("delete from lbry_file_options where rowid = ?",
(rowid,))
@rerun_if_locked
def _set_lbry_file_payment_rate(self, rowid, new_rate):
return self.db_conn.runQuery(
"update lbry_file_options set blob_data_rate = ? where rowid = ?",
(new_rate, rowid))
@rerun_if_locked
def _get_all_lbry_files(self):
d = self.db_conn.runQuery("select rowid, stream_hash, blob_data_rate, status "
"from lbry_file_options")
return d
@rerun_if_locked
def _change_file_status(self, rowid, new_status):
d = self.db_conn.runQuery("update lbry_file_options set status = ? where rowid = ?",
(new_status, rowid))
d.addCallback(lambda _: new_status)
return d
@rerun_if_locked
def _get_lbry_file_status(self, rowid):
d = self.db_conn.runQuery("select status from lbry_file_options where rowid = ?",
(rowid,))
d.addCallback(lambda r: (r[0][0] if len(r) else None))
return d
@rerun_if_locked
def _get_count_for_stream_hash(self, stream_hash):
d = self.db_conn.runQuery("select count(*) from lbry_file_options where stream_hash = ?",
(stream_hash,))
d.addCallback(lambda r: (r[0][0] if r else 0))
return d
@rerun_if_locked
def _get_rowid_for_stream_hash(self, stream_hash):
d = self.db_conn.runQuery("select rowid from lbry_file_options where stream_hash = ?",
(stream_hash,))
d.addCallback(lambda r: (r[0][0] if len(r) else None))
return d

View file

@ -1,185 +0,0 @@
import binascii
import logging
from lbrynet.core.cryptoutils import get_lbry_hash_obj
from lbrynet.cryptstream.CryptBlob import CryptBlobInfo
from twisted.internet import defer, threads
from lbrynet.core.Error import DuplicateStreamHashError, InvalidStreamDescriptorError
from lbrynet.core.StreamDescriptor import PlainStreamDescriptorWriter, BlobStreamDescriptorWriter
import os
log = logging.getLogger(__name__)
EncryptedFileStreamType = "lbryfile"
def save_sd_info(stream_info_manager, sd_info, ignore_duplicate=False):
log.debug("Saving info for %s", str(sd_info['stream_name']))
hex_stream_name = sd_info['stream_name']
key = sd_info['key']
stream_hash = sd_info['stream_hash']
raw_blobs = sd_info['blobs']
suggested_file_name = sd_info['suggested_file_name']
crypt_blobs = []
for blob in raw_blobs:
length = blob['length']
if length != 0:
blob_hash = blob['blob_hash']
else:
blob_hash = None
blob_num = blob['blob_num']
iv = blob['iv']
crypt_blobs.append(CryptBlobInfo(blob_hash, blob_num, length, iv))
log.debug("Trying to save stream info for %s", str(hex_stream_name))
d = stream_info_manager.save_stream(stream_hash, hex_stream_name, key,
suggested_file_name, crypt_blobs)
def check_if_duplicate(err):
if ignore_duplicate is True:
err.trap(DuplicateStreamHashError)
d.addErrback(check_if_duplicate)
d.addCallback(lambda _: stream_hash)
return d
def get_sd_info(stream_info_manager, stream_hash, include_blobs):
d = stream_info_manager.get_stream_info(stream_hash)
def format_info(stream_info):
fields = {}
fields['stream_type'] = EncryptedFileStreamType
fields['stream_name'] = stream_info[1]
fields['key'] = stream_info[0]
fields['suggested_file_name'] = stream_info[2]
fields['stream_hash'] = stream_hash
def format_blobs(blobs):
formatted_blobs = []
for blob_hash, blob_num, iv, length in blobs:
blob = {}
if length != 0:
blob['blob_hash'] = blob_hash
blob['blob_num'] = blob_num
blob['iv'] = iv
blob['length'] = length
formatted_blobs.append(blob)
fields['blobs'] = formatted_blobs
return fields
if include_blobs is True:
d = stream_info_manager.get_blobs_for_stream(stream_hash)
else:
d = defer.succeed([])
d.addCallback(format_blobs)
return d
d.addCallback(format_info)
return d
@defer.inlineCallbacks
def publish_sd_blob(stream_info_manager, blob_manager, stream_hash):
descriptor_writer = BlobStreamDescriptorWriter(blob_manager)
sd_info = yield get_sd_info(stream_info_manager, stream_hash, True)
sd_blob_hash = yield descriptor_writer.create_descriptor(sd_info)
yield stream_info_manager.save_sd_blob_hash_to_stream(stream_hash, sd_blob_hash)
defer.returnValue(sd_blob_hash)
def create_plain_sd(stream_info_manager, stream_hash, file_name, overwrite_existing=False):
def _get_file_name():
actual_file_name = file_name
if os.path.exists(actual_file_name):
ext_num = 1
while os.path.exists(actual_file_name + "_" + str(ext_num)):
ext_num += 1
actual_file_name = actual_file_name + "_" + str(ext_num)
return actual_file_name
if overwrite_existing is False:
d = threads.deferToThread(_get_file_name())
else:
d = defer.succeed(file_name)
def do_create(file_name):
descriptor_writer = PlainStreamDescriptorWriter(file_name)
d = get_sd_info(stream_info_manager, stream_hash, True)
d.addCallback(descriptor_writer.create_descriptor)
return d
d.addCallback(do_create)
return d
class EncryptedFileStreamDescriptorValidator(object):
def __init__(self, raw_info):
self.raw_info = raw_info
def validate(self):
log.debug("Trying to validate stream descriptor for %s", str(self.raw_info['stream_name']))
try:
hex_stream_name = self.raw_info['stream_name']
key = self.raw_info['key']
hex_suggested_file_name = self.raw_info['suggested_file_name']
stream_hash = self.raw_info['stream_hash']
blobs = self.raw_info['blobs']
except KeyError as e:
raise InvalidStreamDescriptorError("Missing '%s'" % (e.args[0]))
for c in hex_suggested_file_name:
if c not in '0123456789abcdef':
raise InvalidStreamDescriptorError(
"Suggested file name is not a hex-encoded string")
h = get_lbry_hash_obj()
h.update(hex_stream_name)
h.update(key)
h.update(hex_suggested_file_name)
def get_blob_hashsum(b):
length = b['length']
if length != 0:
blob_hash = b['blob_hash']
else:
blob_hash = None
blob_num = b['blob_num']
iv = b['iv']
blob_hashsum = get_lbry_hash_obj()
if length != 0:
blob_hashsum.update(blob_hash)
blob_hashsum.update(str(blob_num))
blob_hashsum.update(iv)
blob_hashsum.update(str(length))
return blob_hashsum.digest()
blobs_hashsum = get_lbry_hash_obj()
for blob in blobs:
blobs_hashsum.update(get_blob_hashsum(blob))
if blobs[-1]['length'] != 0:
raise InvalidStreamDescriptorError("Does not end with a zero-length blob.")
h.update(blobs_hashsum.digest())
if h.hexdigest() != stream_hash:
raise InvalidStreamDescriptorError("Stream hash does not match stream metadata")
log.debug("It is validated")
return defer.succeed(True)
def info_to_show(self):
info = []
info.append(("stream_name", binascii.unhexlify(self.raw_info.get("stream_name"))))
size_so_far = 0
for blob_info in self.raw_info.get("blobs", []):
size_so_far += int(blob_info['length'])
info.append(("stream_size", str(self.get_length_of_stream())))
suggested_file_name = self.raw_info.get("suggested_file_name", None)
if suggested_file_name is not None:
suggested_file_name = binascii.unhexlify(suggested_file_name)
info.append(("suggested_file_name", suggested_file_name))
return info
def get_length_of_stream(self):
size_so_far = 0
for blob_info in self.raw_info.get("blobs", []):
size_so_far += int(blob_info['length'])
return size_so_far

View file

@ -1,2 +0,0 @@
from lbrynet.lbry_file.StreamDescriptor import get_sd_info
from lbrynet.lbry_file.StreamDescriptor import publish_sd_blob

View file

@ -2,10 +2,9 @@ import binascii
from zope.interface import implements
from lbrynet.lbry_file.StreamDescriptor import save_sd_info
from lbrynet.core.StreamDescriptor import save_sd_info
from lbrynet.cryptstream.client.CryptStreamDownloader import CryptStreamDownloader
from lbrynet.core.client.StreamProgressManager import FullStreamProgressManager
from lbrynet.core.StreamDescriptor import StreamMetadata
from lbrynet.interfaces import IStreamDownloaderFactory
from lbrynet.lbry_file.client.EncryptedFileMetadataHandler import EncryptedFileMetadataHandler
import os
@ -21,39 +20,21 @@ class EncryptedFileDownloader(CryptStreamDownloader):
"""Classes which inherit from this class download LBRY files"""
def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, payment_rate_manager, wallet, key, stream_name,
suggested_file_name=None):
storage, payment_rate_manager, wallet, key, stream_name, file_name):
CryptStreamDownloader.__init__(self, peer_finder, rate_limiter, blob_manager,
payment_rate_manager, wallet, key, stream_name)
self.stream_hash = stream_hash
self.stream_info_manager = stream_info_manager
self.suggested_file_name = binascii.unhexlify(suggested_file_name)
self.storage = storage
self.file_name = binascii.unhexlify(os.path.basename(file_name))
self._calculated_total_bytes = None
@defer.inlineCallbacks
def delete_data(self):
d1 = self.stream_info_manager.get_blobs_for_stream(self.stream_hash)
def get_blob_hashes(blob_infos):
return [b[0] for b in blob_infos if b[0] is not None]
d1.addCallback(get_blob_hashes)
d2 = self.stream_info_manager.get_sd_blob_hashes_for_stream(self.stream_hash)
def combine_blob_hashes(results):
blob_hashes = []
for success, result in results:
if success is True:
blob_hashes.extend(result)
return blob_hashes
def delete_blobs(blob_hashes):
self.blob_manager.delete_blobs(blob_hashes)
return True
dl = defer.DeferredList([d1, d2], fireOnOneErrback=True)
dl.addCallback(combine_blob_hashes)
dl.addCallback(delete_blobs)
return dl
crypt_infos = yield self.storage.get_blobs_for_stream(self.stream_hash)
blob_hashes = [b.blob_hash for b in crypt_infos if b.blob_hash]
sd_hash = yield self.storage.get_sd_blob_hash_for_stream(self.stream_hash)
blob_hashes.append(sd_hash)
yield self.blob_manager.delete_blobs(blob_hashes)
def stop(self, err=None):
d = self._close_output()
@ -76,10 +57,10 @@ class EncryptedFileDownloader(CryptStreamDownloader):
pass
def get_total_bytes(self):
d = self.stream_info_manager.get_blobs_for_stream(self.stream_hash)
d = self.storage.get_blobs_for_stream(self.stream_hash)
def calculate_size(blobs):
return sum([b[3] for b in blobs])
return sum([b.length for b in blobs])
d.addCallback(calculate_size)
return d
@ -106,18 +87,17 @@ class EncryptedFileDownloader(CryptStreamDownloader):
def _get_metadata_handler(self, download_manager):
return EncryptedFileMetadataHandler(self.stream_hash,
self.stream_info_manager, download_manager)
self.storage, download_manager)
class EncryptedFileDownloaderFactory(object):
implements(IStreamDownloaderFactory)
def __init__(self, peer_finder, rate_limiter, blob_manager, stream_info_manager,
wallet):
def __init__(self, peer_finder, rate_limiter, blob_manager, storage, wallet):
self.peer_finder = peer_finder
self.rate_limiter = rate_limiter
self.blob_manager = blob_manager
self.stream_info_manager = stream_info_manager
self.storage = storage
self.wallet = wallet
def can_download(self, sd_validator):
@ -129,22 +109,14 @@ class EncryptedFileDownloaderFactory(object):
payment_rate_manager.min_blob_data_payment_rate = data_rate
def save_source_if_blob(stream_hash):
if metadata.metadata_source == StreamMetadata.FROM_BLOB:
d = self.stream_info_manager.save_sd_blob_hash_to_stream(
stream_hash, metadata.source_blob_hash)
else:
d = defer.succeed(True)
d.addCallback(lambda _: stream_hash)
return d
return defer.succeed(metadata.source_blob_hash)
def create_downloader(stream_hash):
downloader = self._make_downloader(stream_hash, payment_rate_manager,
metadata.validator.raw_info)
d = downloader.set_stream_info()
d.addCallback(lambda _: downloader)
return d
return defer.succeed(downloader)
d = save_sd_info(self.stream_info_manager, metadata.validator.raw_info)
d = save_sd_info(self.blob_manager, metadata.source_blob_hash, metadata.validator.raw_info)
d.addCallback(save_source_if_blob)
d.addCallback(create_downloader)
return d
@ -154,26 +126,20 @@ class EncryptedFileDownloaderFactory(object):
class EncryptedFileSaver(EncryptedFileDownloader):
def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, stream_info_manager,
payment_rate_manager, wallet, download_directory, key, stream_name,
suggested_file_name):
def __init__(self, stream_hash, peer_finder, rate_limiter, blob_manager, storage, payment_rate_manager, wallet,
download_directory, key, stream_name, file_name):
EncryptedFileDownloader.__init__(self, stream_hash, peer_finder, rate_limiter,
blob_manager, stream_info_manager, payment_rate_manager,
wallet, key, stream_name, suggested_file_name)
self.download_directory = download_directory
self.file_name = os.path.basename(self.suggested_file_name)
self.file_written_to = None
blob_manager, storage, payment_rate_manager,
wallet, key, stream_name, file_name)
self.download_directory = binascii.unhexlify(download_directory)
self.file_written_to = os.path.join(self.download_directory, binascii.unhexlify(file_name))
self.file_handle = None
def __str__(self):
if self.file_written_to is not None:
return str(self.file_written_to)
else:
return str(self.file_name)
return str(self.file_written_to)
def stop(self, err=None):
d = EncryptedFileDownloader.stop(self, err=err)
d.addCallback(lambda _: self._delete_from_info_manager())
return d
def _get_progress_manager(self, download_manager):
@ -184,34 +150,16 @@ class EncryptedFileSaver(EncryptedFileDownloader):
def _setup_output(self):
def open_file():
if self.file_handle is None:
file_name = self.file_name
if not file_name:
file_name = "_"
if os.path.exists(os.path.join(self.download_directory, file_name)):
ext_num = 1
def _get_file_name(ext):
if len(file_name.split(".")):
fn = ''.join(file_name.split(".")[:-1])
file_ext = ''.join(file_name.split(".")[-1])
return fn + "-" + str(ext) + "." + file_ext
else:
return file_name + "_" + str(ext)
while os.path.exists(os.path.join(self.download_directory,
_get_file_name(ext_num))):
ext_num += 1
file_name = _get_file_name(ext_num)
file_written_to = os.path.join(self.download_directory, self.file_name)
try:
self.file_handle = open(os.path.join(self.download_directory, file_name), 'wb')
self.file_written_to = os.path.join(self.download_directory, file_name)
self.file_handle = open(file_written_to, 'wb')
self.file_written_to = file_written_to
except IOError:
log.error(traceback.format_exc())
raise ValueError(
"Failed to open %s. Make sure you have permission to save files to that"
" location." %
os.path.join(self.download_directory, file_name))
" location." % file_written_to
)
return threads.deferToThread(open_file)
def _close_output(self):
@ -232,26 +180,20 @@ class EncryptedFileSaver(EncryptedFileDownloader):
self.file_handle.write(data)
return write_func
def _delete_from_info_manager(self):
return self.stream_info_manager.delete_stream(self.stream_hash)
class EncryptedFileSaverFactory(EncryptedFileDownloaderFactory):
def __init__(self, peer_finder, rate_limiter, blob_manager, stream_info_manager,
wallet, download_directory):
EncryptedFileDownloaderFactory.__init__(self, peer_finder, rate_limiter, blob_manager,
stream_info_manager, wallet)
self.download_directory = download_directory
def __init__(self, peer_finder, rate_limiter, blob_manager, storage, wallet, download_directory):
EncryptedFileDownloaderFactory.__init__(self, peer_finder, rate_limiter, blob_manager, storage, wallet)
self.download_directory = binascii.hexlify(download_directory)
def _make_downloader(self, stream_hash, payment_rate_manager, stream_info):
stream_name = stream_info.raw_info['stream_name']
key = stream_info.raw_info['key']
suggested_file_name = stream_info.raw_info['suggested_file_name']
return EncryptedFileSaver(stream_hash, self.peer_finder, self.rate_limiter,
self.blob_manager, self.stream_info_manager,
payment_rate_manager, self.wallet, self.download_directory,
key=key, stream_name=stream_name,
suggested_file_name=suggested_file_name)
return EncryptedFileSaver(
stream_hash, self.peer_finder, self.rate_limiter, self.blob_manager, self.storage, payment_rate_manager,
self.wallet, self.download_directory, key=key, stream_name=stream_name, file_name=suggested_file_name
)
@staticmethod
def get_description():

View file

@ -1,7 +1,6 @@
import logging
from zope.interface import implements
from twisted.internet import defer
from lbrynet.cryptstream.CryptBlob import CryptBlobInfo
from lbrynet.interfaces import IMetadataHandler
@ -11,9 +10,9 @@ log = logging.getLogger(__name__)
class EncryptedFileMetadataHandler(object):
implements(IMetadataHandler)
def __init__(self, stream_hash, stream_info_manager, download_manager):
def __init__(self, stream_hash, storage, download_manager):
self.stream_hash = stream_hash
self.stream_info_manager = stream_info_manager
self.storage = storage
self.download_manager = download_manager
self._final_blob_num = None
@ -21,7 +20,7 @@ class EncryptedFileMetadataHandler(object):
@defer.inlineCallbacks
def get_initial_blobs(self):
blob_infos = yield self.stream_info_manager.get_blobs_for_stream(self.stream_hash)
blob_infos = yield self.storage.get_blobs_for_stream(self.stream_hash)
formatted_infos = self._format_initial_blobs_for_download_manager(blob_infos)
defer.returnValue(formatted_infos)
@ -32,12 +31,13 @@ class EncryptedFileMetadataHandler(object):
def _format_initial_blobs_for_download_manager(self, blob_infos):
infos = []
for i, (blob_hash, blob_num, iv, length) in enumerate(blob_infos):
if blob_hash is not None and length:
infos.append(CryptBlobInfo(blob_hash, blob_num, length, iv))
for i, crypt_blob in enumerate(blob_infos):
if crypt_blob.blob_hash is not None and crypt_blob.length:
infos.append(crypt_blob)
else:
if i != len(blob_infos) - 1:
raise Exception("Invalid stream terminator")
log.debug("Setting _final_blob_num to %s", str(blob_num - 1))
self._final_blob_num = blob_num - 1
raise Exception("Invalid stream terminator: %i of %i" %
(i, len(blob_infos) - 1))
log.debug("Setting _final_blob_num to %s", str(crypt_blob.blob_num - 1))
self._final_blob_num = crypt_blob.blob_num - 1
return infos

View file

@ -1,11 +1,11 @@
from lbrynet.lbry_file.StreamDescriptor import EncryptedFileStreamType
from lbrynet.lbry_file.StreamDescriptor import EncryptedFileStreamDescriptorValidator
from lbrynet.core.StreamDescriptor import EncryptedFileStreamType
from lbrynet.core.StreamDescriptor import EncryptedFileStreamDescriptorValidator
from lbrynet.core.DownloadOption import DownloadOption, DownloadOptionChoice
def add_lbry_file_to_sd_identifier(sd_identifier):
sd_identifier.add_stream_type(
EncryptedFileStreamType, EncryptedFileStreamDescriptorValidator, EncryptedFileOptions())
sd_identifier.add_stream_type(EncryptedFileStreamType, EncryptedFileStreamDescriptorValidator,
EncryptedFileOptions())
class EncryptedFileOptions(object):

View file

@ -1,4 +1,4 @@
"""
__doc__ = """
Reflector is a protocol to re-host lbry blobs and streams
Client queries and server responses follow, all dicts are encoded as json

View file

@ -50,10 +50,6 @@ class EncryptedFileReflectorClient(Protocol):
def protocol_version(self):
return self.factory.protocol_version
@property
def stream_info_manager(self):
return self.factory.stream_info_manager
@property
def stream_hash(self):
return self.factory.stream_hash
@ -113,9 +109,9 @@ class EncryptedFileReflectorClient(Protocol):
def get_validated_blobs(self, blobs_in_stream):
def get_blobs(blobs):
for (blob, _, _, blob_len) in blobs:
if blob and blob_len:
yield self.blob_manager.get_blob(blob, blob_len)
for crypt_blob in blobs:
if crypt_blob.blob_hash and crypt_blob.length:
yield self.blob_manager.get_blob(crypt_blob.blob_hash, crypt_blob.length)
dl = defer.DeferredList(list(get_blobs(blobs_in_stream)), consumeErrors=True)
dl.addCallback(lambda blobs: [blob for r, blob in blobs if r and blob.get_is_verified()])
@ -135,7 +131,7 @@ class EncryptedFileReflectorClient(Protocol):
len(filtered))
return filtered
d = self.factory.stream_info_manager.get_blobs_for_stream(self.factory.stream_hash)
d = self.factory.blob_manager.storage.get_blobs_for_stream(self.factory.stream_hash)
d.addCallback(self.get_validated_blobs)
if not self.descriptor_needed:
d.addCallback(lambda filtered:
@ -155,8 +151,8 @@ class EncryptedFileReflectorClient(Protocol):
def _save_descriptor_blob(sd_blob):
self.stream_descriptor = sd_blob
d = self.factory.stream_info_manager.get_sd_blob_hashes_for_stream(self.factory.stream_hash)
d.addCallback(lambda sd: self.factory.blob_manager.get_blob(sd[0]))
d = self.factory.blob_manager.storage.get_sd_blob_hash_for_stream(self.factory.stream_hash)
d.addCallback(self.factory.blob_manager.get_blob)
d.addCallback(_save_descriptor_blob)
return d
@ -326,10 +322,6 @@ class EncryptedFileReflectorClientFactory(ClientFactory):
def blob_manager(self):
return self._lbry_file.blob_manager
@property
def stream_info_manager(self):
return self._lbry_file.stream_info_manager
@property
def stream_hash(self):
return self._lbry_file.stream_hash

View file

@ -6,7 +6,7 @@ from twisted.internet.protocol import Protocol, ServerFactory
from lbrynet.core.utils import is_valid_blobhash
from lbrynet.core.Error import DownloadCanceledError, InvalidBlobHashError, NoSuchSDHash
from lbrynet.core.StreamDescriptor import BlobStreamDescriptorReader
from lbrynet.lbry_file.StreamDescriptor import save_sd_info
from lbrynet.core.StreamDescriptor import save_sd_info
from lbrynet.reflector.common import REFLECTOR_V1, REFLECTOR_V2
from lbrynet.reflector.common import ReflectorRequestError, ReflectorClientVersionError
@ -32,7 +32,7 @@ class ReflectorServer(Protocol):
log.debug('Connection made to %s', peer_info)
self.peer = self.factory.peer_manager.get_peer(peer_info.host, peer_info.port)
self.blob_manager = self.factory.blob_manager
self.stream_info_manager = self.factory.stream_info_manager
self.storage = self.factory.blob_manager.storage
self.lbry_file_manager = self.factory.lbry_file_manager
self.protocol_version = self.factory.protocol_version
self.received_handshake = False
@ -67,16 +67,15 @@ class ReflectorServer(Protocol):
@defer.inlineCallbacks
def check_head_blob_announce(self, stream_hash):
blob_infos = yield self.stream_info_manager.get_blobs_for_stream(stream_hash)
blob_hash, blob_num, blob_iv, blob_length = blob_infos[0]
if blob_hash in self.blob_manager.blobs:
head_blob = self.blob_manager.blobs[blob_hash]
head_blob_hash = yield self.storage.get_stream_blob_by_position(stream_hash, 0)
if head_blob_hash in self.blob_manager.blobs:
head_blob = self.blob_manager.blobs[head_blob_hash]
if head_blob.get_is_verified():
should_announce = yield self.blob_manager.get_should_announce(blob_hash)
should_announce = yield self.blob_manager.get_should_announce(head_blob_hash)
if should_announce == 0:
yield self.blob_manager.set_should_announce(blob_hash, 1)
yield self.blob_manager.set_should_announce(head_blob_hash, 1)
log.info("Discovered previously completed head blob (%s), "
"setting it to be announced", blob_hash[:8])
"setting it to be announced", head_blob_hash[:8])
defer.returnValue(None)
@defer.inlineCallbacks
@ -89,27 +88,21 @@ class ReflectorServer(Protocol):
yield self.blob_manager.set_should_announce(sd_hash, 1)
log.info("Discovered previously completed sd blob (%s), "
"setting it to be announced", sd_hash[:8])
try:
yield self.stream_info_manager.get_stream_hash_for_sd_hash(sd_hash)
except NoSuchSDHash:
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(sd_hash)
if not stream_hash:
log.info("Adding blobs to stream")
sd_info = yield BlobStreamDescriptorReader(sd_blob).get_info()
yield save_sd_info(self.stream_info_manager, sd_info)
yield self.stream_info_manager.save_sd_blob_hash_to_stream(
sd_info['stream_hash'],
sd_hash)
yield save_sd_info(self.blob_manager, sd_hash, sd_info)
defer.returnValue(None)
@defer.inlineCallbacks
def _on_completed_blob(self, blob, response_key):
should_announce = False
yield self.blob_manager.blob_completed(blob, should_announce=False)
if response_key == RECEIVED_SD_BLOB:
sd_info = yield BlobStreamDescriptorReader(blob).get_info()
yield save_sd_info(self.stream_info_manager, sd_info)
yield self.stream_info_manager.save_sd_blob_hash_to_stream(sd_info['stream_hash'],
blob.blob_hash)
yield self.lbry_file_manager.add_lbry_file(sd_info['stream_hash'], blob.blob_hash)
should_announce = True
yield save_sd_info(self.blob_manager, blob.blob_hash, sd_info)
yield self.blob_manager.set_should_announce(blob.blob_hash, True)
# if we already have the head blob, set it to be announced now that we know it's
# a head blob
@ -117,21 +110,18 @@ class ReflectorServer(Protocol):
else:
d = defer.succeed(None)
stream_hash = yield self.stream_info_manager.get_stream_of_blob(blob.blob_hash)
stream_hash = yield self.storage.get_stream_of_blob(blob.blob_hash)
if stream_hash is not None:
blob_num = yield self.stream_info_manager._get_blob_num_by_hash(stream_hash,
blob.blob_hash)
blob_num = yield self.storage.get_blob_num_by_hash(stream_hash,
blob.blob_hash)
if blob_num == 0:
should_announce = True
sd_hashes = yield self.stream_info_manager.get_sd_blob_hashes_for_stream(
stream_hash)
sd_hash = yield self.storage.get_sd_blob_hash_for_stream(stream_hash)
yield self.blob_manager.set_should_announce(blob.blob_hash, True)
# if we already have the sd blob, set it to be announced now that we know it's
# a sd blob
for sd_hash in sd_hashes:
d.addCallback(lambda _: self.check_sd_blob_announce(sd_hash))
d.addCallback(lambda _: self.check_sd_blob_announce(sd_hash))
yield self.blob_manager.blob_completed(blob, should_announce=should_announce)
yield self.close_blob()
yield d
log.info("Received %s", blob)
@ -306,14 +296,12 @@ class ReflectorServer(Protocol):
# marked as such for announcement now that we know it's an sd blob that we have.
yield self.check_sd_blob_announce(sd_blob.blob_hash)
try:
stream_hash = yield self.stream_info_manager.get_stream_hash_for_sd_hash(
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(
sd_blob.blob_hash)
except NoSuchSDHash:
sd_info = yield BlobStreamDescriptorReader(sd_blob).get_info()
stream_hash = sd_info['stream_hash']
yield save_sd_info(self.stream_info_manager, sd_info)
yield self.stream_info_manager.save_sd_blob_hash_to_stream(stream_hash,
sd_blob.blob_hash)
yield save_sd_info(self.blob_manager, sd_blob.blob_hash, sd_info)
yield self.check_head_blob_announce(stream_hash)
response = yield self.request_needed_blobs({SEND_SD_BLOB: False}, sd_blob)
else:
@ -401,10 +389,9 @@ class ReflectorServer(Protocol):
class ReflectorServerFactory(ServerFactory):
protocol = ReflectorServer
def __init__(self, peer_manager, blob_manager, stream_info_manager, lbry_file_manager):
def __init__(self, peer_manager, blob_manager, lbry_file_manager):
self.peer_manager = peer_manager
self.blob_manager = blob_manager
self.stream_info_manager = stream_info_manager
self.lbry_file_manager = lbry_file_manager
self.protocol_version = REFLECTOR_V2

View file

@ -10,17 +10,14 @@ import unittest
from Crypto import Random
from Crypto.Hash import MD5
from lbrynet import conf
from lbrynet.lbry_file.EncryptedFileMetadataManager import DBEncryptedFileMetadataManager
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.core.Session import Session
from lbrynet.core.server.BlobAvailabilityHandler import BlobAvailabilityHandlerFactory
from lbrynet.core.client.StandaloneBlobDownloader import StandaloneBlobDownloader
from lbrynet.core.StreamDescriptor import BlobStreamDescriptorWriter
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
from lbrynet.core.StreamDescriptor import download_sd_blob
from lbrynet.file_manager.EncryptedFileCreator import create_lbry_file
from lbrynet.lbry_file.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
from lbrynet.lbry_file.StreamDescriptor import get_sd_info
from twisted.internet import defer, threads, task
from twisted.trial.unittest import TestCase
from twisted.python.failure import Failure
@ -119,9 +116,7 @@ class LbryUploader(object):
peer_port=5553, use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
blob_tracker_class=DummyBlobAvailabilityTracker,
dht_node_class=Node, is_generous=self.is_generous, external_ip="127.0.0.1")
stream_info_manager = DBEncryptedFileMetadataManager(self.db_dir)
self.lbry_file_manager = EncryptedFileManager(
self.session, stream_info_manager, self.sd_identifier)
self.lbry_file_manager = EncryptedFileManager(self.session, self.sd_identifier)
if self.ul_rate_limit is not None:
self.session.rate_limiter.set_ul_limit(self.ul_rate_limit)
reactor.callLater(1, self.start_all)
@ -134,7 +129,6 @@ class LbryUploader(object):
d.addCallback(lambda _: self.lbry_file_manager.setup())
d.addCallback(lambda _: self.start_server())
d.addCallback(lambda _: self.create_stream())
d.addCallback(self.create_stream_descriptor)
d.addCallback(self.put_sd_hash_on_queue)
def print_error(err):
@ -180,16 +174,11 @@ class LbryUploader(object):
if self.kill_event.is_set():
self.kill_server()
@defer.inlineCallbacks
def create_stream(self):
test_file = GenFile(self.file_size, b''.join([chr(i) for i in xrange(0, 64, 6)]))
d = create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file)
return d
def create_stream_descriptor(self, stream_hash):
descriptor_writer = BlobStreamDescriptorWriter(self.session.blob_manager)
d = get_sd_info(self.lbry_file_manager.stream_info_manager, stream_hash, True)
d.addCallback(descriptor_writer.create_descriptor)
return d
lbry_file = yield create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file)
defer.returnValue(lbry_file.sd_hash)
def put_sd_hash_on_queue(self, sd_hash):
self.sd_hash_queue.put(sd_hash)
@ -226,26 +215,20 @@ def start_lbry_reuploader(sd_hash, kill_event, dead_event,
is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1],
external_ip="127.0.0.1")
stream_info_manager = DBEncryptedFileMetadataManager(db_dir)
lbry_file_manager = EncryptedFileManager(session, stream_info_manager, sd_identifier)
lbry_file_manager = EncryptedFileManager(session, sd_identifier)
if ul_rate_limit is not None:
session.rate_limiter.set_ul_limit(ul_rate_limit)
def make_downloader(metadata, prm):
info_validator = metadata.validator
options = metadata.options
def make_downloader(metadata, prm, download_directory):
factories = metadata.factories
chosen_options = [o.default_value for o in
options.get_downloader_options(info_validator, prm)]
return factories[0].make_downloader(metadata, chosen_options, prm)
return factories[0].make_downloader(metadata, prm.min_blob_data_payment_rate, prm, download_directory)
def download_file():
prm = session.payment_rate_manager
d = download_sd_blob(session, sd_hash, prm)
d.addCallback(sd_identifier.get_metadata_for_sd_blob)
d.addCallback(make_downloader, prm)
d.addCallback(make_downloader, prm, db_dir)
d.addCallback(lambda downloader: downloader.start())
return d
@ -413,7 +396,6 @@ class TestTransfer(TestCase):
mocks.mock_conf_settings(self)
self.server_processes = []
self.session = None
self.stream_info_manager = None
self.lbry_file_manager = None
self.is_generous = True
self.addCleanup(self.take_down_env)
@ -425,8 +407,6 @@ class TestTransfer(TestCase):
d.addCallback(lambda _: self.lbry_file_manager.stop())
if self.session is not None:
d.addCallback(lambda _: self.session.shut_down())
if self.stream_info_manager is not None:
d.addCallback(lambda _: self.stream_info_manager.stop())
def delete_test_env():
dirs = ['server', 'server1', 'server2', 'client']
@ -519,19 +499,12 @@ class TestTransfer(TestCase):
blob_tracker_class=DummyBlobAvailabilityTracker,
dht_node_class=Node, is_generous=self.is_generous, external_ip="127.0.0.1")
self.stream_info_manager = DBEncryptedFileMetadataManager(db_dir)
self.lbry_file_manager = EncryptedFileManager(
self.session, self.stream_info_manager, sd_identifier)
self.session, sd_identifier)
def make_downloader(metadata, prm):
info_validator = metadata.validator
options = metadata.options
factories = metadata.factories
chosen_options = [
o.default_value for o in options.get_downloader_options(info_validator, prm)
]
return factories[0].make_downloader(metadata, chosen_options, prm)
return factories[0].make_downloader(metadata, prm.min_blob_data_payment_rate, prm, db_dir)
def download_file(sd_hash):
prm = self.session.payment_rate_manager
@ -542,7 +515,7 @@ class TestTransfer(TestCase):
return d
def check_md5_sum():
f = open('test_file')
f = open(os.path.join(db_dir, 'test_file'))
hashsum = MD5.new()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be")
@ -696,25 +669,14 @@ class TestTransfer(TestCase):
is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1],
external_ip="127.0.0.1")
self.stream_info_manager = DBEncryptedFileMetadataManager(self.session.db_dir)
self.lbry_file_manager = EncryptedFileManager(self.session, self.stream_info_manager,
sd_identifier)
self.lbry_file_manager = EncryptedFileManager(self.session, sd_identifier)
@defer.inlineCallbacks
def make_downloader(metadata, prm):
info_validator = metadata.validator
options = metadata.options
factories = metadata.factories
chosen_options = [
o.default_value for o in options.get_downloader_options(info_validator, prm)
]
downloader = yield factories[0].make_downloader(metadata, chosen_options, prm)
downloader = yield factories[0].make_downloader(metadata, prm.min_blob_data_payment_rate, prm, db_dir)
defer.returnValue(downloader)
def append_downloader(downloader):
downloaders.append(downloader)
return downloader
@defer.inlineCallbacks
def download_file(sd_hash):
prm = self.session.payment_rate_manager
@ -722,28 +684,21 @@ class TestTransfer(TestCase):
metadata = yield sd_identifier.get_metadata_for_sd_blob(sd_blob)
downloader = yield make_downloader(metadata, prm)
downloaders.append(downloader)
finished_value = yield downloader.start()
defer.returnValue(finished_value)
yield downloader.start()
defer.returnValue(downloader)
def check_md5_sum():
f = open('test_file')
f = open(os.path.join(db_dir, 'test_file'))
hashsum = MD5.new()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be")
def delete_lbry_file():
def delete_lbry_file(downloader):
logging.debug("deleting the file")
d = self.lbry_file_manager.delete_lbry_file(downloaders[0])
d.addCallback(lambda _: self.lbry_file_manager.get_count_for_stream_hash(
downloaders[0].stream_hash))
d.addCallback(
lambda c: self.stream_info_manager.delete_stream(
downloaders[1].stream_hash) if c == 0 else True)
return d
return self.lbry_file_manager.delete_lbry_file(downloader)
def check_lbry_file():
d = downloaders[1].status()
d.addCallback(lambda _: downloaders[1].status())
def check_lbry_file(downloader):
d = downloader.status()
def check_status_report(status_report):
self.assertEqual(status_report.num_known, status_report.num_completed)
@ -754,17 +709,20 @@ class TestTransfer(TestCase):
@defer.inlineCallbacks
def start_transfer(sd_hash):
# download a file, delete it, and download it again
logging.debug("Starting the transfer")
yield self.session.setup()
yield self.stream_info_manager.setup()
yield add_lbry_file_to_sd_identifier(sd_identifier)
yield self.lbry_file_manager.setup()
yield download_file(sd_hash)
downloader = yield download_file(sd_hash)
yield check_md5_sum()
yield download_file(sd_hash)
yield check_lbry_file()
yield delete_lbry_file()
yield check_lbry_file(downloader)
yield delete_lbry_file(downloader)
downloader = yield download_file(sd_hash)
yield check_lbry_file(downloader)
yield check_md5_sum()
yield delete_lbry_file(downloader)
def stop(arg):
if isinstance(arg, Failure):
@ -819,10 +777,8 @@ class TestTransfer(TestCase):
is_generous=conf.ADJUSTABLE_SETTINGS['is_generous_host'][1],
external_ip="127.0.0.1")
self.stream_info_manager = DBEncryptedFileMetadataManager(db_dir)
self.lbry_file_manager = EncryptedFileManager(
self.session, self.stream_info_manager, sd_identifier)
self.session, sd_identifier)
def start_additional_uploaders(sd_hash):
for i in range(1, num_uploaders):

View file

@ -2,13 +2,12 @@ from twisted.internet import defer, threads, error
from twisted.trial import unittest
from lbrynet import conf
from lbrynet import lbry_file
from lbrynet.core.StreamDescriptor import get_sd_info
from lbrynet import reflector
from lbrynet.core import BlobManager
from lbrynet.core import PeerManager
from lbrynet.core import Session
from lbrynet.core import StreamDescriptor
from lbrynet.lbry_file import EncryptedFileMetadataManager
from lbrynet.lbry_file.client import EncryptedFileOptions
from lbrynet.file_manager import EncryptedFileCreator
from lbrynet.file_manager import EncryptedFileManager
@ -21,7 +20,6 @@ class TestReflector(unittest.TestCase):
def setUp(self):
mocks.mock_conf_settings(self)
self.session = None
self.stream_info_manager = None
self.lbry_file_manager = None
self.server_blob_manager = None
self.reflector_port = None
@ -66,11 +64,8 @@ class TestReflector(unittest.TestCase):
external_ip="127.0.0.1"
)
self.stream_info_manager = EncryptedFileMetadataManager.DBEncryptedFileMetadataManager(
self.db_dir)
self.lbry_file_manager = EncryptedFileManager.EncryptedFileManager(
self.session, self.stream_info_manager, sd_identifier)
self.lbry_file_manager = EncryptedFileManager.EncryptedFileManager(self.session,
sd_identifier)
## Setup reflector server classes ##
self.server_db_dir, self.server_blob_dir = mk_db_and_blob_dir()
@ -88,26 +83,25 @@ class TestReflector(unittest.TestCase):
external_ip="127.0.0.1"
)
self.server_blob_manager = BlobManager.DiskBlobManager(
hash_announcer, self.server_blob_dir, self.server_db_dir)
self.server_stream_info_manager = \
EncryptedFileMetadataManager.DBEncryptedFileMetadataManager(self.server_db_dir)
self.server_blob_manager = BlobManager.DiskBlobManager(hash_announcer,
self.server_blob_dir,
self.server_session.storage)
self.server_lbry_file_manager = EncryptedFileManager.EncryptedFileManager(
self.server_session, self.server_stream_info_manager,
sd_identifier)
self.server_session, sd_identifier)
d = self.session.setup()
d.addCallback(lambda _: self.stream_info_manager.setup())
d.addCallback(lambda _: EncryptedFileOptions.add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
d.addCallback(lambda _: self.server_session.setup())
d.addCallback(lambda _: self.server_blob_manager.setup())
d.addCallback(lambda _: self.server_stream_info_manager.setup())
d.addCallback(lambda _: self.server_lbry_file_manager.setup())
def verify_equal(sd_info):
self.assertEqual(mocks.create_stream_sd_file, sd_info)
@defer.inlineCallbacks
def verify_equal(sd_info, stream_hash):
self.assertDictEqual(mocks.create_stream_sd_file, sd_info)
sd_hash = yield self.session.storage.get_sd_blob_hash_for_stream(stream_hash)
defer.returnValue(sd_hash)
def save_sd_blob_hash(sd_hash):
self.sd_hash = sd_hash
@ -115,14 +109,8 @@ class TestReflector(unittest.TestCase):
def verify_stream_descriptor_file(stream_hash):
self.stream_hash = stream_hash
d = lbry_file.get_sd_info(self.lbry_file_manager.stream_info_manager, stream_hash, True)
d.addCallback(verify_equal)
d.addCallback(
lambda _: lbry_file.publish_sd_blob(
self.lbry_file_manager.stream_info_manager,
self.session.blob_manager, stream_hash
)
)
d = get_sd_info(self.lbry_file_manager.session.storage, stream_hash, True)
d.addCallback(verify_equal, stream_hash)
d.addCallback(save_sd_blob_hash)
return d
@ -136,11 +124,12 @@ class TestReflector(unittest.TestCase):
key="0123456701234567",
iv_generator=iv_generator()
)
d.addCallback(lambda lbry_file: lbry_file.stream_hash)
return d
def start_server():
server_factory = reflector.ServerFactory(
peer_manager, self.server_blob_manager, self.server_stream_info_manager,
peer_manager, self.server_blob_manager,
self.server_lbry_file_manager)
from twisted.internet import reactor
port = 8943
@ -161,13 +150,11 @@ class TestReflector(unittest.TestCase):
## Close client classes ##
d.addCallback(lambda _: self.lbry_file_manager.stop())
d.addCallback(lambda _: self.session.shut_down())
d.addCallback(lambda _: self.stream_info_manager.stop())
## Close server classes ##
d.addCallback(lambda _: self.server_blob_manager.stop())
d.addCallback(lambda _: self.server_lbry_file_manager.stop())
d.addCallback(lambda _: self.server_session.shut_down())
d.addCallback(lambda _: self.server_stream_info_manager.stop())
d.addCallback(lambda _: self.reflector_port.stopListening())
@ -192,37 +179,32 @@ class TestReflector(unittest.TestCase):
@defer.inlineCallbacks
def verify_stream_on_reflector():
# check stream_info_manager has all the right information
streams = yield self.server_stream_info_manager.get_all_streams()
streams = yield self.server_session.storage.get_all_streams()
self.assertEqual(1, len(streams))
self.assertEqual(self.stream_hash, streams[0])
blobs = yield self.server_stream_info_manager.get_blobs_for_stream(self.stream_hash)
blob_hashes = [b[0] for b in blobs if b[0] is not None]
blobs = yield self.server_session.storage.get_blobs_for_stream(self.stream_hash)
blob_hashes = [b.blob_hash for b in blobs if b.blob_hash is not None]
expected_blob_hashes = [b[0] for b in self.expected_blobs[:-1] if b[0] is not None]
self.assertEqual(expected_blob_hashes, blob_hashes)
sd_hashes = yield self.server_stream_info_manager.get_sd_blob_hashes_for_stream(
self.stream_hash)
self.assertEqual(1, len(sd_hashes))
sd_hash = yield self.server_session.storage.get_sd_blob_hash_for_stream(streams[0])
expected_sd_hash = self.expected_blobs[-1][0]
self.assertEqual(self.sd_hash, sd_hashes[0])
self.assertEqual(self.sd_hash, sd_hash)
# check lbry file manager has the file
files = yield self.server_lbry_file_manager.lbry_files
self.assertEqual(1, len(files))
self.assertEqual(self.sd_hash, files[0].sd_hash)
self.assertEqual('test_file', files[0].file_name)
status = yield files[0].status()
self.assertEqual('stopped', status.running_status)
num_blobs = len(self.expected_blobs) -1 # subtract sd hash
self.assertEqual(num_blobs, status.num_completed)
self.assertEqual(num_blobs, status.num_known)
self.assertEqual(0, len(files))
streams = yield self.server_lbry_file_manager.storage.get_all_streams()
self.assertEqual(1, len(streams))
stream_info = yield self.server_lbry_file_manager.storage.get_stream_info(self.stream_hash)
self.assertEqual(self.sd_hash, stream_info[3])
self.assertEqual('test_file'.encode('hex'), stream_info[0])
# check should_announce blobs on blob_manager
blob_hashes = yield self.server_blob_manager._get_all_should_announce_blob_hashes()
self.assertEqual(2, len(blob_hashes))
self.assertTrue(self.sd_hash in blob_hashes)
self.assertTrue(expected_blob_hashes[0] in blob_hashes)
blob_hashes = yield self.server_blob_manager.storage.get_all_should_announce_blobs()
self.assertSetEqual({self.sd_hash, expected_blob_hashes[0]}, set(blob_hashes))
def verify_have_blob(blob_hash, blob_size):
d = self.server_blob_manager.get_blob(blob_hash)
@ -231,7 +213,7 @@ class TestReflector(unittest.TestCase):
def send_to_server():
fake_lbry_file = mocks.FakeLBRYFile(self.session.blob_manager,
self.stream_info_manager,
self.server_session.storage,
self.stream_hash)
factory = reflector.ClientFactory(fake_lbry_file)
@ -283,10 +265,10 @@ class TestReflector(unittest.TestCase):
@defer.inlineCallbacks
def verify_stream_on_reflector():
# this protocol should not have any impact on stream info manager
streams = yield self.server_stream_info_manager.get_all_streams()
streams = yield self.server_session.storage.get_all_streams()
self.assertEqual(0, len(streams))
# there should be no should announce blobs here
blob_hashes = yield self.server_blob_manager._get_all_should_announce_blob_hashes()
blob_hashes = yield self.server_blob_manager.storage.get_all_should_announce_blobs()
self.assertEqual(0, len(blob_hashes))
def verify_data_on_reflector():
@ -333,25 +315,21 @@ class TestReflector(unittest.TestCase):
def verify_stream_on_reflector():
# check stream_info_manager has all the right information
streams = yield self.server_stream_info_manager.get_all_streams()
streams = yield self.server_session.storage.get_all_streams()
self.assertEqual(1, len(streams))
self.assertEqual(self.stream_hash, streams[0])
blobs = yield self.server_stream_info_manager.get_blobs_for_stream(self.stream_hash)
blob_hashes = [b[0] for b in blobs if b[0] is not None]
blobs = yield self.server_session.storage.get_blobs_for_stream(self.stream_hash)
blob_hashes = [b.blob_hash for b in blobs if b.blob_hash is not None]
expected_blob_hashes = [b[0] for b in self.expected_blobs[:-1] if b[0] is not None]
self.assertEqual(expected_blob_hashes, blob_hashes)
sd_hashes = yield self.server_stream_info_manager.get_sd_blob_hashes_for_stream(
sd_hash = yield self.server_session.storage.get_sd_blob_hash_for_stream(
self.stream_hash)
self.assertEqual(1, len(sd_hashes))
expected_sd_hash = self.expected_blobs[-1][0]
self.assertEqual(self.sd_hash, sd_hashes[0])
self.assertEqual(self.sd_hash, sd_hash)
# check should_announce blobs on blob_manager
blob_hashes = yield self.server_blob_manager._get_all_should_announce_blob_hashes()
self.assertEqual(2, len(blob_hashes))
self.assertTrue(self.sd_hash in blob_hashes)
self.assertTrue(expected_blob_hashes[0] in blob_hashes)
to_announce = yield self.server_blob_manager.storage.get_all_should_announce_blobs()
self.assertSetEqual(set(to_announce), {self.sd_hash, expected_blob_hashes[0]})
def verify_have_blob(blob_hash, blob_size):
d = self.server_blob_manager.get_blob(blob_hash)
@ -371,7 +349,7 @@ class TestReflector(unittest.TestCase):
def send_to_server_as_stream(result):
fake_lbry_file = mocks.FakeLBRYFile(self.session.blob_manager,
self.stream_info_manager,
self.server_session.storage,
self.stream_hash)
factory = reflector.ClientFactory(fake_lbry_file)
@ -379,7 +357,6 @@ class TestReflector(unittest.TestCase):
reactor.connectTCP('localhost', self.port, factory)
return factory.finished_deferred
def verify_blob_completed(blob, blob_size):
self.assertTrue(blob.get_is_verified())
self.assertEqual(blob_size, blob.length)

View file

@ -1,20 +1,18 @@
import logging
import os
import shutil
import tempfile
from Crypto.Hash import MD5
from twisted.trial.unittest import TestCase
from twisted.internet import defer, threads
from lbrynet import conf
from lbrynet.lbry_file.EncryptedFileMetadataManager import DBEncryptedFileMetadataManager
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.core.Session import Session
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
from lbrynet.lbry_file import publish_sd_blob
from lbrynet.file_manager.EncryptedFileCreator import create_lbry_file
from lbrynet.lbry_file.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
from lbrynet.lbry_file.StreamDescriptor import get_sd_info
from lbrynet.core.StreamDescriptor import get_sd_info
from lbrynet.core.PeerManager import PeerManager
from lbrynet.core.RateLimiter import DummyRateLimiter
@ -31,30 +29,29 @@ DummyBlobAvailabilityTracker = mocks.BlobAvailabilityTracker
class TestStreamify(TestCase):
maxDiff = 5000
def setUp(self):
mocks.mock_conf_settings(self)
self.session = None
self.stream_info_manager = None
self.lbry_file_manager = None
self.addCleanup(self.take_down_env)
self.is_generous = True
self.db_dir = tempfile.mkdtemp()
self.blob_dir = os.path.join(self.db_dir, "blobfiles")
os.mkdir(self.blob_dir)
def take_down_env(self):
d = defer.succeed(True)
@defer.inlineCallbacks
def tearDown(self):
lbry_files = self.lbry_file_manager.lbry_files
for lbry_file in lbry_files:
yield self.lbry_file_manager.delete_lbry_file(lbry_file)
if self.lbry_file_manager is not None:
d.addCallback(lambda _: self.lbry_file_manager.stop())
yield self.lbry_file_manager.stop()
if self.session is not None:
d.addCallback(lambda _: self.session.shut_down())
if self.stream_info_manager is not None:
d.addCallback(lambda _: self.stream_info_manager.stop())
def delete_test_env():
shutil.rmtree('client')
if os.path.exists("test_file"):
os.remove("test_file")
d.addCallback(lambda _: threads.deferToThread(delete_test_env))
return d
yield self.session.shut_down()
yield self.session.storage.stop()
yield threads.deferToThread(shutil.rmtree, self.db_dir)
if os.path.exists("test_file"):
os.remove("test_file")
def test_create_stream(self):
wallet = FakeWallet()
@ -64,28 +61,18 @@ class TestStreamify(TestCase):
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
db_dir = "client"
blob_dir = os.path.join(db_dir, "blobfiles")
os.mkdir(db_dir)
os.mkdir(blob_dir)
self.session = Session(
conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=db_dir, node_id="abcd",
conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=self.db_dir, node_id="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=blob_dir, peer_port=5553,
blob_dir=self.blob_dir, peer_port=5553,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
blob_tracker_class=DummyBlobAvailabilityTracker,
is_generous=self.is_generous, external_ip="127.0.0.1"
)
self.stream_info_manager = DBEncryptedFileMetadataManager(db_dir)
self.lbry_file_manager = EncryptedFileManager(
self.session, self.stream_info_manager, sd_identifier)
self.lbry_file_manager = EncryptedFileManager(self.session, sd_identifier)
d = self.session.setup()
d.addCallback(lambda _: self.stream_info_manager.setup())
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
@ -93,7 +80,7 @@ class TestStreamify(TestCase):
self.assertEqual(sd_info, test_create_stream_sd_file)
def verify_stream_descriptor_file(stream_hash):
d = get_sd_info(self.lbry_file_manager.stream_info_manager, stream_hash, True)
d = get_sd_info(self.session.storage, stream_hash, True)
d.addCallback(verify_equal)
return d
@ -107,6 +94,7 @@ class TestStreamify(TestCase):
test_file = GenFile(5209343, b''.join([chr(i + 3) for i in xrange(0, 64, 6)]))
d = create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file,
key="0123456701234567", iv_generator=iv_generator())
d.addCallback(lambda lbry_file: lbry_file.stream_hash)
return d
d.addCallback(lambda _: create_stream())
@ -121,57 +109,30 @@ class TestStreamify(TestCase):
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
db_dir = "client"
blob_dir = os.path.join(db_dir, "blobfiles")
os.mkdir(db_dir)
os.mkdir(blob_dir)
self.session = Session(
conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=db_dir, node_id="abcd",
conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=self.db_dir, node_id="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=blob_dir, peer_port=5553,
blob_dir=self.blob_dir, peer_port=5553,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet,
blob_tracker_class=DummyBlobAvailabilityTracker, external_ip="127.0.0.1"
)
self.stream_info_manager = DBEncryptedFileMetadataManager(self.session.db_dir)
self.lbry_file_manager = EncryptedFileManager(
self.session, self.stream_info_manager, sd_identifier)
def start_lbry_file(lbry_file):
logging.debug("Calling lbry_file.start()")
d = lbry_file.start()
return d
def combine_stream(info):
stream_hash, sd_hash = info
prm = self.session.payment_rate_manager
d = self.lbry_file_manager.add_lbry_file(stream_hash, sd_hash, prm)
d.addCallback(start_lbry_file)
def check_md5_sum():
f = open('test_file')
hashsum = MD5.new()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "68959747edc73df45e45db6379dd7b3b")
d.addCallback(lambda _: check_md5_sum())
return d
self.lbry_file_manager = EncryptedFileManager(self.session, sd_identifier)
@defer.inlineCallbacks
def create_stream():
test_file = GenFile(53209343, b''.join([chr(i + 5) for i in xrange(0, 64, 6)]))
stream_hash = yield create_lbry_file(self.session, self.lbry_file_manager, "test_file",
test_file, suggested_file_name="test_file")
sd_hash = yield publish_sd_blob(self.stream_info_manager, self.session.blob_manager,
stream_hash)
defer.returnValue((stream_hash, sd_hash))
lbry_file = yield create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file)
sd_hash = yield self.session.storage.get_sd_blob_hash_for_stream(lbry_file.stream_hash)
self.assertTrue(lbry_file.sd_hash, sd_hash)
yield lbry_file.start()
f = open('test_file')
hashsum = MD5.new()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "68959747edc73df45e45db6379dd7b3b")
d = self.session.setup()
d.addCallback(lambda _: self.stream_info_manager.setup())
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
d.addCallback(lambda _: create_stream())
d.addCallback(combine_stream)
return d

View file

@ -173,6 +173,7 @@ class GenFile(io.RawIOBase):
self.read_so_far = 0
self.buff = b''
self.last_offset = 0
self.name = "."
def readable(self):
return True

View file

@ -3,33 +3,38 @@ import shutil
import os
import random
import string
from twisted.trial import unittest
from twisted.internet import defer, threads
from lbrynet.tests.util import random_lbry_hash
from lbrynet.core.BlobManager import DiskBlobManager
from lbrynet.core.HashAnnouncer import DummyHashAnnouncer
from lbrynet.database.storage import SQLiteStorage
from lbrynet.core.Peer import Peer
from lbrynet import conf
from lbrynet.core.cryptoutils import get_lbry_hash_obj
from twisted.trial import unittest
from twisted.internet import defer
class BlobManagerTest(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
conf.initialize_settings()
self.blob_dir = tempfile.mkdtemp()
self.db_dir = tempfile.mkdtemp()
hash_announcer = DummyHashAnnouncer()
self.bm = DiskBlobManager(hash_announcer, self.blob_dir, self.db_dir)
self.bm = DiskBlobManager(hash_announcer, self.blob_dir, SQLiteStorage(self.db_dir))
self.peer = Peer('somehost', 22)
yield self.bm.storage.setup()
@defer.inlineCallbacks
def tearDown(self):
self.bm.stop()
yield self.bm.stop()
yield self.bm.storage.stop()
# BlobFile will try to delete itself in _close_writer
# thus when calling rmtree we may get a FileNotFoundError
# for the blob file
shutil.rmtree(self.blob_dir, ignore_errors=True)
shutil.rmtree(self.db_dir)
yield threads.deferToThread(shutil.rmtree, self.blob_dir)
yield threads.deferToThread(shutil.rmtree, self.db_dir)
@defer.inlineCallbacks
def _create_and_add_blob(self, should_announce=False):
@ -43,13 +48,13 @@ class BlobManagerTest(unittest.TestCase):
blob_hash = out
# create new blob
yield self.bm.storage.setup()
yield self.bm.setup()
blob = yield self.bm.get_blob(blob_hash, len(data))
writer, finished_d = yield blob.open_for_writing(self.peer)
yield writer.write(data)
yield self.bm.blob_completed(blob, should_announce)
yield self.bm.add_blob_to_upload_history(blob_hash, 'test', len(data))
# check to see if blob is there
self.assertTrue(os.path.isfile(os.path.join(self.blob_dir, blob_hash)))
@ -81,7 +86,7 @@ class BlobManagerTest(unittest.TestCase):
self.assertFalse(os.path.isfile(os.path.join(self.blob_dir, blob_hash)))
blobs = yield self.bm.get_all_verified_blobs()
self.assertEqual(len(blobs), 0)
blobs = yield self.bm._get_all_blob_hashes()
blobs = yield self.bm.storage.get_all_blob_hashes()
self.assertEqual(len(blobs), 0)
self.assertFalse(blob_hash in self.bm.blobs)

View file

@ -7,39 +7,62 @@ from decimal import Decimal
from collections import defaultdict
from twisted.trial import unittest
from twisted.internet import threads, defer
from lbrynet.database.storage import SQLiteStorage
from lbrynet.tests.mocks import FakeNetwork
from lbrynet.core.Error import InsufficientFundsError
from lbrynet.core.Wallet import Wallet, LBRYumWallet, ReservedPoints, InMemoryStorage
from lbrynet.core.Wallet import LBRYumWallet, ReservedPoints
from lbryum.commands import Commands
from lbryum.simple_config import SimpleConfig
from lbryschema.claim import ClaimDict
test_metadata = {
'license': 'NASA',
'version': '_0_1_0',
'description': 'test',
'language': 'en',
'author': 'test',
'title': 'test',
'nsfw': False,
'thumbnail': 'test'
'license': 'NASA',
'version': '_0_1_0',
'description': 'test',
'language': 'en',
'author': 'test',
'title': 'test',
'nsfw': False,
'thumbnail': 'test'
}
test_claim_dict = {
'version':'_0_0_1',
'claimType':'streamType',
'stream':{'metadata':test_metadata, 'version':'_0_0_1', 'source':
'version': '_0_0_1',
'claimType': 'streamType',
'stream': {'metadata': test_metadata, 'version': '_0_0_1', 'source':
{'source': '8655f713819344980a9a0d67b198344e2c462c90f813e86f'
'0c63789ab0868031f25c54d0bb31af6658e997e2041806eb',
'sourceType': 'lbry_sd_hash', 'contentType': 'video/mp4', 'version': '_0_0_1'},
}}
}}
class MocLbryumWallet(Wallet):
def __init__(self):
class MocLbryumWallet(LBRYumWallet):
def __init__(self, db_dir):
LBRYumWallet.__init__(self, SQLiteStorage(db_dir), SimpleConfig(
{"lbryum_path": db_dir, "wallet_path": os.path.join(db_dir, "testwallet")}
))
self.db_dir = db_dir
self.wallet_balance = Decimal(10.0)
self.total_reserved_points = Decimal(0.0)
self.queued_payments = defaultdict(Decimal)
self._storage = InMemoryStorage()
self.network = FakeNetwork()
assert self.config.get_wallet_path() == os.path.join(self.db_dir, "testwallet")
@defer.inlineCallbacks
def setup(self, password=None, seed=None):
yield self.storage.setup()
seed = seed or "travel nowhere air position hill peace suffer parent beautiful rise " \
"blood power home crumble teach"
storage = lbryum.wallet.WalletStorage(self.config.get_wallet_path())
self.wallet = lbryum.wallet.NewWallet(storage)
self.wallet.add_seed(seed, password)
self.wallet.create_master_keys(password)
self.wallet.create_main_account()
@defer.inlineCallbacks
def stop(self):
yield self.storage.stop()
yield threads.deferToThread(shutil.rmtree, self.db_dir)
def get_least_used_address(self, account=None, for_change=False, max_count=100):
return defer.succeed(None)
@ -51,82 +74,61 @@ class MocLbryumWallet(Wallet):
return defer.succeed(True)
class MocEncryptedWallet(LBRYumWallet):
def __init__(self):
LBRYumWallet.__init__(self, InMemoryStorage())
self.wallet_balance = Decimal(10.0)
self.total_reserved_points = Decimal(0.0)
self.queued_payments = defaultdict(Decimal)
class WalletTest(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
wallet = MocEncryptedWallet()
seed_text = "travel nowhere air position hill peace suffer parent beautiful rise " \
"blood power home crumble teach"
password = "secret"
user_dir = tempfile.mkdtemp()
path = os.path.join(user_dir, "somewallet")
storage = lbryum.wallet.WalletStorage(path)
wallet.wallet = lbryum.wallet.NewWallet(storage)
wallet.wallet.add_seed(seed_text, password)
wallet.wallet.create_master_keys(password)
wallet.wallet.create_main_account()
self.wallet_path = path
self.enc_wallet = wallet
self.enc_wallet_password = password
self.wallet = MocLbryumWallet(user_dir)
yield self.wallet.setup()
self.assertEqual(self.wallet.get_balance(), Decimal(10))
def tearDown(self):
shutil.rmtree(os.path.dirname(self.wallet_path))
return self.wallet.stop()
def test_failed_send_name_claim(self):
def not_enough_funds_send_name_claim(self, name, val, amount):
claim_out = {'success':False, 'reason':'Not enough funds'}
claim_out = {'success': False, 'reason': 'Not enough funds'}
return claim_out
MocLbryumWallet._send_name_claim = not_enough_funds_send_name_claim
wallet = MocLbryumWallet()
d = wallet.claim_name('test', 1, test_claim_dict)
self.wallet._send_name_claim = not_enough_funds_send_name_claim
d = self.wallet.claim_name('test', 1, test_claim_dict)
self.assertFailure(d, Exception)
return d
@defer.inlineCallbacks
def test_successful_send_name_claim(self):
expected_claim_out = {
"claim_id": "f43dc06256a69988bdbea09a58c80493ba15dcfa",
"fee": "0.00012",
"nout": 0,
"success": True,
"txid": "6f8180002ef4d21f5b09ca7d9648a54d213c666daf8639dc283e2fd47450269e"
}
def check_out(claim_out):
self.assertTrue('success' not in claim_out)
self.assertEqual(expected_claim_out['claim_id'], claim_out['claim_id'])
self.assertEqual(expected_claim_out['fee'], claim_out['fee'])
self.assertEqual(expected_claim_out['nout'], claim_out['nout'])
self.assertEqual(expected_claim_out['txid'], claim_out['txid'])
"txid": "6f8180002ef4d21f5b09ca7d9648a54d213c666daf8639dc283e2fd47450269e",
"value": ClaimDict.load_dict(test_claim_dict).serialized.encode('hex'),
"claim_address": "",
"channel_claim_id": "",
"channel_name": ""
}
def success_send_name_claim(self, name, val, amount, certificate_id=None,
claim_address=None, change_address=None):
return expected_claim_out
return defer.succeed(expected_claim_out)
MocLbryumWallet._send_name_claim = success_send_name_claim
wallet = MocLbryumWallet()
d = wallet.claim_name('test', 1, test_claim_dict)
d.addCallback(lambda claim_out: check_out(claim_out))
return d
self.wallet._send_name_claim = success_send_name_claim
claim_out = yield self.wallet.claim_name('test', 1, test_claim_dict)
self.assertTrue('success' not in claim_out)
self.assertEqual(expected_claim_out['claim_id'], claim_out['claim_id'])
self.assertEqual(expected_claim_out['fee'], claim_out['fee'])
self.assertEqual(expected_claim_out['nout'], claim_out['nout'])
self.assertEqual(expected_claim_out['txid'], claim_out['txid'])
self.assertEqual(expected_claim_out['value'], claim_out['value'])
@defer.inlineCallbacks
def test_failed_support(self):
def failed_support_claim(self, name, claim_id, amount):
claim_out = {'success':False, 'reason':'Not enough funds'}
return threads.deferToThread(lambda: claim_out)
MocLbryumWallet._support_claim = failed_support_claim
wallet = MocLbryumWallet()
d = wallet.support_claim('test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 1)
self.assertFailure(d, Exception)
return d
# wallet.support_claim will check the balance before calling _support_claim
try:
yield self.wallet.support_claim('test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 1000)
except InsufficientFundsError:
pass
def test_succesful_support(self):
expected_support_out = {
@ -136,30 +138,32 @@ class WalletTest(unittest.TestCase):
"txid": "11030a76521e5f552ca87ad70765d0cc52e6ea4c0dc0063335e6cf2a9a85085f"
}
def check_out(claim_out):
self.assertTrue('success' not in claim_out)
self.assertEqual(expected_support_out['fee'], claim_out['fee'])
self.assertEqual(expected_support_out['nout'], claim_out['nout'])
self.assertEqual(expected_support_out['txid'], claim_out['txid'])
expected_result = {
"fee": 0.000129,
"nout": 0,
"txid": "11030a76521e5f552ca87ad70765d0cc52e6ea4c0dc0063335e6cf2a9a85085f"
}
def success_support_claim(self, name, val, amount):
return threads.deferToThread(lambda: expected_support_out)
MocLbryumWallet._support_claim = success_support_claim
wallet = MocLbryumWallet()
d = wallet.support_claim('test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 1)
def check_out(claim_out):
self.assertDictEqual(expected_result, claim_out)
def success_support_claim(name, val, amount):
return defer.succeed(expected_support_out)
self.wallet._support_claim = success_support_claim
d = self.wallet.support_claim('test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 1)
d.addCallback(lambda claim_out: check_out(claim_out))
return d
@defer.inlineCallbacks
def test_failed_abandon(self):
def failed_abandon_claim(self, claim_outpoint):
claim_out = {'success':False, 'reason':'Not enough funds'}
return threads.deferToThread(lambda: claim_out)
MocLbryumWallet._abandon_claim = failed_abandon_claim
wallet = MocLbryumWallet()
d = wallet.abandon_claim("f43dc06256a69988bdbea09a58c80493ba15dcfa", None, None)
self.assertFailure(d, Exception)
return d
try:
yield self.wallet.abandon_claim("f43dc06256a69988bdbea09a58c80493ba15dcfa", None, None)
raise Exception("test failed")
except Exception as err:
self.assertSubstring("claim not found", err.message)
@defer.inlineCallbacks
def test_successful_abandon(self):
expected_abandon_out = {
"fee": "0.000096",
@ -167,56 +171,57 @@ class WalletTest(unittest.TestCase):
"txid": "0578c161ad8d36a7580c557d7444f967ea7f988e194c20d0e3c42c3cabf110dd"
}
def check_out(claim_out):
self.assertTrue('success' not in claim_out)
self.assertEqual(expected_abandon_out['fee'], claim_out['fee'])
self.assertEqual(expected_abandon_out['txid'], claim_out['txid'])
expected_abandon_result = {
"fee": 0.000096,
"txid": "0578c161ad8d36a7580c557d7444f967ea7f988e194c20d0e3c42c3cabf110dd"
}
def success_abandon_claim(self, claim_outpoint, txid, nout):
return threads.deferToThread(lambda: expected_abandon_out)
def success_abandon_claim(claim_outpoint, txid, nout):
return defer.succeed(expected_abandon_out)
MocLbryumWallet._abandon_claim = success_abandon_claim
wallet = MocLbryumWallet()
d = wallet.abandon_claim("f43dc06256a69988bdbea09a58c80493ba15dcfa", None, None)
d.addCallback(lambda claim_out: check_out(claim_out))
return d
self.wallet._abandon_claim = success_abandon_claim
claim_out = yield self.wallet.abandon_claim("f43dc06256a69988bdbea09a58c80493ba15dcfa", None, None)
self.assertDictEqual(expected_abandon_result, claim_out)
@defer.inlineCallbacks
def test_point_reservation_and_balance(self):
# check that point reservations and cancellation changes the balance
# properly
def update_balance():
return defer.succeed(5)
wallet = MocLbryumWallet()
wallet._update_balance = update_balance
d = wallet.update_balance()
self.wallet._update_balance = update_balance
yield self.wallet.update_balance()
self.assertEqual(5, self.wallet.get_balance())
# test point reservation
d.addCallback(lambda _: self.assertEqual(5, wallet.get_balance()))
d.addCallback(lambda _: wallet.reserve_points('testid', 2))
d.addCallback(lambda _: self.assertEqual(3, wallet.get_balance()))
d.addCallback(lambda _: self.assertEqual(2, wallet.total_reserved_points))
yield self.wallet.reserve_points('testid', 2)
self.assertEqual(3, self.wallet.get_balance())
self.assertEqual(2, self.wallet.total_reserved_points)
# test reserved points cancellation
d.addCallback(lambda _: wallet.cancel_point_reservation(ReservedPoints('testid', 2)))
d.addCallback(lambda _: self.assertEqual(5, wallet.get_balance()))
d.addCallback(lambda _: self.assertEqual(0, wallet.total_reserved_points))
yield self.wallet.cancel_point_reservation(ReservedPoints('testid', 2))
self.assertEqual(5, self.wallet.get_balance())
self.assertEqual(0, self.wallet.total_reserved_points)
# test point sending
d.addCallback(lambda _: wallet.reserve_points('testid', 2))
d.addCallback(lambda reserve_points: wallet.send_points_to_address(reserve_points, 1))
d.addCallback(lambda _: self.assertEqual(3, wallet.get_balance()))
reserve_points = yield self.wallet.reserve_points('testid', 2)
yield self.wallet.send_points_to_address(reserve_points, 1)
self.assertEqual(3, self.wallet.get_balance())
# test failed point reservation
d.addCallback(lambda _: wallet.reserve_points('testid', 4))
d.addCallback(lambda out: self.assertEqual(None, out))
return d
out = yield self.wallet.reserve_points('testid', 4)
self.assertEqual(None, out)
def test_point_reservation_and_claim(self):
# check that claims take into consideration point reservations
def update_balance():
return defer.succeed(5)
wallet = MocLbryumWallet()
wallet._update_balance = update_balance
d = wallet.update_balance()
d.addCallback(lambda _: self.assertEqual(5, wallet.get_balance()))
d.addCallback(lambda _: wallet.reserve_points('testid', 2))
d.addCallback(lambda _: wallet.claim_name('test', 4, test_claim_dict))
self.wallet._update_balance = update_balance
d = self.wallet.update_balance()
d.addCallback(lambda _: self.assertEqual(5, self.wallet.get_balance()))
d.addCallback(lambda _: self.wallet.reserve_points('testid', 2))
d.addCallback(lambda _: self.wallet.claim_name('test', 4, test_claim_dict))
self.assertFailure(d, InsufficientFundsError)
return d
@ -224,38 +229,45 @@ class WalletTest(unittest.TestCase):
# check that supports take into consideration point reservations
def update_balance():
return defer.succeed(5)
wallet = MocLbryumWallet()
wallet._update_balance = update_balance
d = wallet.update_balance()
d.addCallback(lambda _: self.assertEqual(5, wallet.get_balance()))
d.addCallback(lambda _: wallet.reserve_points('testid', 2))
d.addCallback(lambda _: wallet.support_claim(
self.wallet._update_balance = update_balance
d = self.wallet.update_balance()
d.addCallback(lambda _: self.assertEqual(5, self.wallet.get_balance()))
d.addCallback(lambda _: self.wallet.reserve_points('testid', 2))
d.addCallback(lambda _: self.wallet.support_claim(
'test', "f43dc06256a69988bdbea09a58c80493ba15dcfa", 4))
self.assertFailure(d, InsufficientFundsError)
return d
class WalletEncryptionTests(unittest.TestCase):
def setUp(self):
user_dir = tempfile.mkdtemp()
self.wallet = MocLbryumWallet(user_dir)
return self.wallet.setup(password="password")
def tearDown(self):
return self.wallet.stop()
def test_unlock_wallet(self):
wallet = self.enc_wallet
wallet._cmd_runner = Commands(
wallet.config, wallet.wallet, wallet.network, None, self.enc_wallet_password)
cmd_runner = wallet.get_cmd_runner()
cmd_runner.unlock_wallet(self.enc_wallet_password)
self.wallet._cmd_runner = Commands(
self.wallet.config, self.wallet.wallet, self.wallet.network, None, "password")
cmd_runner = self.wallet.get_cmd_runner()
cmd_runner.unlock_wallet("password")
self.assertIsNone(cmd_runner.new_password)
self.assertEqual(cmd_runner._password, self.enc_wallet_password)
self.assertEqual(cmd_runner._password, "password")
def test_encrypt_decrypt_wallet(self):
wallet = self.enc_wallet
wallet._cmd_runner = Commands(
wallet.config, wallet.wallet, wallet.network, None, self.enc_wallet_password)
wallet.encrypt_wallet("secret2", False)
wallet.decrypt_wallet()
self.wallet._cmd_runner = Commands(
self.wallet.config, self.wallet.wallet, self.wallet.network, None, "password")
self.wallet.encrypt_wallet("secret2", False)
self.wallet.decrypt_wallet()
def test_update_password_keyring_off(self):
wallet = self.enc_wallet
wallet.config.use_keyring = False
wallet._cmd_runner = Commands(
wallet.config, wallet.wallet, wallet.network, None, self.enc_wallet_password)
self.wallet.config.use_keyring = False
self.wallet._cmd_runner = Commands(
self.wallet.config, self.wallet.wallet, self.wallet.network, None, "password")
# no keyring available, so ValueError is expected
with self.assertRaises(ValueError):
wallet.encrypt_wallet("secret2", True)
self.wallet.encrypt_wallet("secret2", True)

View file

@ -0,0 +1,332 @@
import os
import shutil
import tempfile
import logging
from copy import deepcopy
from twisted.internet import defer
from twisted.trial import unittest
from lbrynet import conf
from lbrynet.database.storage import SQLiteStorage, open_file_for_writing
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.tests.util import random_lbry_hash
log = logging.getLogger()
def blob_info_dict(blob_info):
info = {
"length": blob_info.length,
"blob_num": blob_info.blob_num,
"iv": blob_info.iv
}
if blob_info.length:
info['blob_hash'] = blob_info.blob_hash
return info
fake_claim_info = {
'name': "test",
'claim_id': 'deadbeef' * 5,
'address': "bT6wc54qiUUYt34HQF9wnW8b2o2yQTXf2S",
'claim_sequence': 1,
'value': {
"version": "_0_0_1",
"claimType": "streamType",
"stream": {
"source": {
"source": 'deadbeef' * 12,
"version": "_0_0_1",
"contentType": "video/mp4",
"sourceType": "lbry_sd_hash"
},
"version": "_0_0_1",
"metadata": {
"license": "LBRY inc",
"description": "What is LBRY? An introduction with Alex Tabarrok",
"language": "en",
"title": "What is LBRY?",
"author": "Samuel Bryan",
"version": "_0_1_0",
"nsfw": False,
"licenseUrl": "",
"preview": "",
"thumbnail": "https://s3.amazonaws.com/files.lbry.io/logo.png"
}
}
},
'height': 10000,
'amount': 1.0,
'effective_amount': 1.0,
'nout': 0,
'txid': "deadbeef" * 8,
'supports': [],
'channel_claim_id': None,
'channel_name': None
}
class FakeAnnouncer(object):
def __init__(self):
self._queue_size = 0
def hash_queue_size(self):
return self._queue_size
class MocSession(object):
def __init__(self, storage):
self.storage = storage
class StorageTest(unittest.TestCase):
maxDiff = 5000
@defer.inlineCallbacks
def setUp(self):
conf.initialize_settings()
self.db_dir = tempfile.mkdtemp()
self.storage = SQLiteStorage(self.db_dir)
yield self.storage.setup()
@defer.inlineCallbacks
def tearDown(self):
yield self.storage.stop()
shutil.rmtree(self.db_dir)
@defer.inlineCallbacks
def store_fake_blob(self, blob_hash, blob_length=100, next_announce=0, should_announce=0):
yield self.storage.add_completed_blob(blob_hash, blob_length, next_announce,
should_announce)
yield self.storage.set_blob_status(blob_hash, "finished")
@defer.inlineCallbacks
def store_fake_stream_blob(self, stream_hash, blob_hash, blob_num, length=100, iv="DEADBEEF"):
blob_info = {
'blob_hash': blob_hash, 'blob_num': blob_num, 'iv': iv
}
if length:
blob_info['length'] = length
yield self.storage.add_blobs_to_stream(stream_hash, [blob_info])
@defer.inlineCallbacks
def store_fake_stream(self, stream_hash, sd_hash, file_name="fake_file", key="DEADBEEF",
blobs=[]):
yield self.storage.store_stream(stream_hash, sd_hash, file_name, key,
file_name, blobs)
@defer.inlineCallbacks
def make_and_store_fake_stream(self, blob_count=2, stream_hash=None, sd_hash=None):
stream_hash = stream_hash or random_lbry_hash()
sd_hash = sd_hash or random_lbry_hash()
blobs = {
i + 1: random_lbry_hash() for i in range(blob_count)
}
yield self.store_fake_blob(sd_hash)
for blob in blobs.itervalues():
yield self.store_fake_blob(blob)
yield self.store_fake_stream(stream_hash, sd_hash)
for pos, blob in sorted(blobs.iteritems(), key=lambda x: x[0]):
yield self.store_fake_stream_blob(stream_hash, blob, pos)
class TestSetup(StorageTest):
@defer.inlineCallbacks
def test_setup(self):
files = yield self.storage.get_all_lbry_files()
self.assertEqual(len(files), 0)
blobs = yield self.storage.get_all_blob_hashes()
self.assertEqual(len(blobs), 0)
class BlobStorageTests(StorageTest):
@defer.inlineCallbacks
def test_store_blob(self):
blob_hash = random_lbry_hash()
yield self.store_fake_blob(blob_hash)
blob_hashes = yield self.storage.get_all_blob_hashes()
self.assertEqual(blob_hashes, [blob_hash])
@defer.inlineCallbacks
def test_delete_blob(self):
blob_hash = random_lbry_hash()
yield self.store_fake_blob(blob_hash)
blob_hashes = yield self.storage.get_all_blob_hashes()
self.assertEqual(blob_hashes, [blob_hash])
yield self.storage.delete_blobs_from_db(blob_hashes)
blob_hashes = yield self.storage.get_all_blob_hashes()
self.assertEqual(blob_hashes, [])
class StreamStorageTests(StorageTest):
@defer.inlineCallbacks
def test_store_stream(self, stream_hash=None):
stream_hash = stream_hash or random_lbry_hash()
sd_hash = random_lbry_hash()
blob1 = random_lbry_hash()
blob2 = random_lbry_hash()
yield self.store_fake_blob(sd_hash)
yield self.store_fake_blob(blob1)
yield self.store_fake_blob(blob2)
yield self.store_fake_stream(stream_hash, sd_hash)
yield self.store_fake_stream_blob(stream_hash, blob1, 1)
yield self.store_fake_stream_blob(stream_hash, blob2, 2)
stream_blobs = yield self.storage.get_blobs_for_stream(stream_hash)
stream_blob_hashes = [b.blob_hash for b in stream_blobs]
self.assertListEqual(stream_blob_hashes, [blob1, blob2])
blob_hashes = yield self.storage.get_all_blob_hashes()
self.assertSetEqual(set(blob_hashes), {sd_hash, blob1, blob2})
stream_blobs = yield self.storage.get_blobs_for_stream(stream_hash)
stream_blob_hashes = [b.blob_hash for b in stream_blobs]
self.assertListEqual(stream_blob_hashes, [blob1, blob2])
yield self.storage.set_should_announce(sd_hash, 1, 1)
yield self.storage.set_should_announce(blob1, 1, 1)
should_announce_count = yield self.storage.count_should_announce_blobs()
self.assertEqual(should_announce_count, 2)
should_announce_hashes = yield self.storage.get_blobs_to_announce(FakeAnnouncer())
self.assertSetEqual(set(should_announce_hashes), {sd_hash, blob1})
stream_hashes = yield self.storage.get_all_streams()
self.assertListEqual(stream_hashes, [stream_hash])
@defer.inlineCallbacks
def test_delete_stream(self):
stream_hash = random_lbry_hash()
yield self.test_store_stream(stream_hash)
yield self.storage.delete_stream(stream_hash)
stream_hashes = yield self.storage.get_all_streams()
self.assertListEqual(stream_hashes, [])
stream_blobs = yield self.storage.get_blobs_for_stream(stream_hash)
self.assertListEqual(stream_blobs, [])
blob_hashes = yield self.storage.get_all_blob_hashes()
self.assertListEqual(blob_hashes, [])
class FileStorageTests(StorageTest):
@defer.inlineCallbacks
def test_setup_output(self):
file_name = 'encrypted_file_saver_test.tmp'
self.assertFalse(os.path.isfile(file_name))
written_to = yield open_file_for_writing(self.db_dir, file_name)
self.assertTrue(written_to == file_name)
self.assertTrue(os.path.isfile(os.path.join(self.db_dir, file_name)))
@defer.inlineCallbacks
def test_store_file(self):
session = MocSession(self.storage)
session.db_dir = self.db_dir
sd_identifier = StreamDescriptorIdentifier()
download_directory = self.db_dir
manager = EncryptedFileManager(session, sd_identifier)
out = yield manager.session.storage.get_all_lbry_files()
self.assertEqual(len(out), 0)
stream_hash = random_lbry_hash()
sd_hash = random_lbry_hash()
blob1 = random_lbry_hash()
blob2 = random_lbry_hash()
yield self.store_fake_blob(sd_hash)
yield self.store_fake_blob(blob1)
yield self.store_fake_blob(blob2)
yield self.store_fake_stream(stream_hash, sd_hash)
yield self.store_fake_stream_blob(stream_hash, blob1, 1)
yield self.store_fake_stream_blob(stream_hash, blob2, 2)
blob_data_rate = 0
file_name = "test file"
out = yield manager.session.storage.save_published_file(
stream_hash, file_name, download_directory, blob_data_rate
)
rowid = yield manager.session.storage.get_rowid_for_stream_hash(stream_hash)
self.assertEqual(out, rowid)
files = yield manager.session.storage.get_all_lbry_files()
self.assertEqual(1, len(files))
status = yield manager.session.storage.get_lbry_file_status(rowid)
self.assertEqual(status, ManagedEncryptedFileDownloader.STATUS_STOPPED)
running = ManagedEncryptedFileDownloader.STATUS_RUNNING
yield manager.session.storage.change_file_status(rowid, running)
status = yield manager.session.storage.get_lbry_file_status(rowid)
self.assertEqual(status, ManagedEncryptedFileDownloader.STATUS_RUNNING)
class ContentClaimStorageTests(StorageTest):
@defer.inlineCallbacks
def test_store_content_claim(self):
session = MocSession(self.storage)
session.db_dir = self.db_dir
sd_identifier = StreamDescriptorIdentifier()
download_directory = self.db_dir
manager = EncryptedFileManager(session, sd_identifier)
out = yield manager.session.storage.get_all_lbry_files()
self.assertEqual(len(out), 0)
stream_hash = random_lbry_hash()
sd_hash = fake_claim_info['value']['stream']['source']['source']
# test that we can associate a content claim to a file
# use the generated sd hash in the fake claim
fake_outpoint = "%s:%i" % (fake_claim_info['txid'], fake_claim_info['nout'])
yield self.make_and_store_fake_stream(blob_count=2, stream_hash=stream_hash, sd_hash=sd_hash)
blob_data_rate = 0
file_name = "test file"
yield manager.session.storage.save_published_file(
stream_hash, file_name, download_directory, blob_data_rate
)
yield self.storage.save_claim(fake_claim_info)
yield self.storage.save_content_claim(stream_hash, fake_outpoint)
stored_content_claim = yield self.storage.get_content_claim(stream_hash)
self.assertDictEqual(stored_content_claim, fake_claim_info)
# test that we can't associate a claim update with a new stream to the file
second_stream_hash, second_sd_hash = random_lbry_hash(), random_lbry_hash()
yield self.make_and_store_fake_stream(blob_count=2, stream_hash=second_stream_hash, sd_hash=second_sd_hash)
try:
yield self.storage.save_content_claim(second_stream_hash, fake_outpoint)
raise Exception("test failed")
except Exception as err:
self.assertTrue(err.message == "stream mismatch")
# test that we can associate a new claim update containing the same stream to the file
update_info = deepcopy(fake_claim_info)
update_info['txid'] = "beef0000" * 12
update_info['nout'] = 0
second_outpoint = "%s:%i" % (update_info['txid'], update_info['nout'])
yield self.storage.save_claim(update_info)
yield self.storage.save_content_claim(stream_hash, second_outpoint)
update_info_result = yield self.storage.get_content_claim(stream_hash)
self.assertDictEqual(update_info_result, update_info)
# test that we can't associate an update with a mismatching claim id
invalid_update_info = deepcopy(fake_claim_info)
invalid_update_info['txid'] = "beef0001" * 12
invalid_update_info['nout'] = 0
invalid_update_info['claim_id'] = "beef0002" * 5
invalid_update_outpoint = "%s:%i" % (invalid_update_info['txid'], invalid_update_info['nout'])
yield self.storage.save_claim(invalid_update_info)
try:
yield self.storage.save_content_claim(stream_hash, invalid_update_outpoint)
raise Exception("test failed")
except Exception as err:
self.assertTrue(err.message == "invalid stream update")
current_claim_info = yield self.storage.get_content_claim(stream_hash)
# this should still be the previous update
self.assertDictEqual(current_claim_info, update_info)

View file

@ -1,34 +0,0 @@
import os.path
from twisted.trial import unittest
from twisted.internet import defer
from lbrynet.lbry_file.client.EncryptedFileDownloader import EncryptedFileSaver
class TestEncryptedFileSaver(unittest.TestCase):
@defer.inlineCallbacks
def test_setup_output(self):
file_name = 'encrypted_file_saver_test.tmp'
file_name_hex = file_name.encode('hex')
self.assertFalse(os.path.isfile(file_name))
# create file in the temporary trial folder
stream_hash = ''
peer_finder = None
rate_limiter = None
blob_manager = None
stream_info_manager = None
payment_rate_manager = None
wallet = None
download_directory = '.'
key = ''
saver = EncryptedFileSaver(stream_hash, peer_finder, rate_limiter, blob_manager,
stream_info_manager, payment_rate_manager, wallet,
download_directory, key,
file_name_hex, file_name_hex)
yield saver._setup_output()
self.assertTrue(os.path.isfile(file_name))
saver._close_output()

View file

@ -1,78 +0,0 @@
import tempfile
import shutil
from twisted.trial import unittest
from twisted.internet import defer
from lbrynet.lbry_file.EncryptedFileMetadataManager import DBEncryptedFileMetadataManager
from lbrynet.cryptstream.CryptBlob import CryptBlobInfo
from lbrynet.core.Error import NoSuchStreamHash
from lbrynet.tests.util import random_lbry_hash
class DBEncryptedFileMetadataManagerTest(unittest.TestCase):
def setUp(self):
self.db_dir = tempfile.mkdtemp()
self.manager = DBEncryptedFileMetadataManager(self.db_dir)
def tearDown(self):
self.manager.stop()
shutil.rmtree(self.db_dir)
@defer.inlineCallbacks
def test_basic(self):
yield self.manager.setup()
out = yield self.manager.get_all_streams()
self.assertEqual(len(out), 0)
stream_hash = random_lbry_hash()
file_name = 'file_name'
key = 'key'
suggested_file_name = 'sug_file_name'
blob1 = CryptBlobInfo(random_lbry_hash(), 0, 10, 1)
blob2 = CryptBlobInfo(random_lbry_hash(), 0, 10, 1)
blobs = [blob1, blob2]
# save stream
yield self.manager.save_stream(stream_hash, file_name, key, suggested_file_name, blobs)
out = yield self.manager.get_stream_info(stream_hash)
self.assertEqual(key, out[0])
self.assertEqual(file_name, out[1])
self.assertEqual(suggested_file_name, out[2])
out = yield self.manager.check_if_stream_exists(stream_hash)
self.assertTrue(out)
out = yield self.manager.get_blobs_for_stream(stream_hash)
self.assertEqual(2, len(out))
out = yield self.manager.get_all_streams()
self.assertEqual(1, len(out))
# add a blob to stream
blob3 = CryptBlobInfo(random_lbry_hash(), 0, 10, 1)
blobs = [blob3]
out = yield self.manager.add_blobs_to_stream(stream_hash, blobs)
out = yield self.manager.get_blobs_for_stream(stream_hash)
self.assertEqual(3, len(out))
out = yield self.manager.get_stream_of_blob(blob3.blob_hash)
self.assertEqual(stream_hash, out)
# check non existing stream
with self.assertRaises(NoSuchStreamHash):
out = yield self.manager.get_stream_info(random_lbry_hash())
# check save of sd blob hash
sd_blob_hash = random_lbry_hash()
yield self.manager.save_sd_blob_hash_to_stream(stream_hash, sd_blob_hash)
out = yield self.manager.get_sd_blob_hashes_for_stream(stream_hash)
self.assertEqual(1, len(out))
self.assertEqual(sd_blob_hash, out[0])
out = yield self.manager.get_stream_hash_for_sd_hash(sd_blob_hash)
self.assertEqual(stream_hash, out)
# delete stream
yield self.manager.delete_stream(stream_hash)
out = yield self.manager.check_if_stream_exists(stream_hash)
self.assertFalse(out)

View file

@ -4,6 +4,7 @@ import mock
from twisted.trial import unittest
from twisted.internet import defer
from lbrynet.database.storage import SQLiteStorage
from lbrynet.core import BlobManager
from lbrynet.core import Session
from lbrynet.core.server import DHTHashAnnouncer
@ -21,48 +22,60 @@ def iv_generator():
class CreateEncryptedFileTest(unittest.TestCase):
timeout = 5
@defer.inlineCallbacks
def setUp(self):
mocks.mock_conf_settings(self)
self.tmp_db_dir, self.tmp_blob_dir = mk_db_and_blob_dir()
self.session = mock.Mock(spec=Session.Session)(None, None)
self.session.payment_rate_manager.min_blob_data_payment_rate = 0
hash_announcer = DHTHashAnnouncer.DHTHashAnnouncer(None, None)
self.blob_manager = BlobManager.DiskBlobManager(
hash_announcer, self.tmp_blob_dir, SQLiteStorage(self.tmp_db_dir))
self.session.blob_manager = self.blob_manager
self.session.storage = self.session.blob_manager.storage
self.file_manager = EncryptedFileManager.EncryptedFileManager(self.session, object())
yield self.session.blob_manager.storage.setup()
yield self.session.blob_manager.setup()
@defer.inlineCallbacks
def tearDown(self):
yield self.blob_manager.stop()
yield self.session.storage.stop()
rm_db_and_blob_dir(self.tmp_db_dir, self.tmp_blob_dir)
@defer.inlineCallbacks
def create_file(self, filename):
session = mock.Mock(spec=Session.Session)(None, None)
hash_announcer = DHTHashAnnouncer.DHTHashAnnouncer(None, None)
self.blob_manager = BlobManager.DiskBlobManager(
hash_announcer, self.tmp_blob_dir, self.tmp_db_dir)
session.blob_manager = self.blob_manager
yield session.blob_manager.setup()
session.db_dir = self.tmp_db_dir
manager = mock.Mock(spec=EncryptedFileManager.EncryptedFileManager)()
handle = mocks.GenFile(3*MB, '1')
key = '2'*AES.block_size
out = yield EncryptedFileCreator.create_lbry_file(
session, manager, filename, handle, key, iv_generator())
out = yield EncryptedFileCreator.create_lbry_file(self.session, self.file_manager, filename, handle,
key, iv_generator())
defer.returnValue(out)
@defer.inlineCallbacks
def test_can_create_file(self):
expected_stream_hash = ('41e6b247d923d191b154fb6f1b8529d6ddd6a73d65c357b1acb7'
'42dd83151fb66393a7709e9f346260a4f4db6de10c25')
expected_stream_hash = "41e6b247d923d191b154fb6f1b8529d6ddd6a73d65c35" \
"7b1acb742dd83151fb66393a7709e9f346260a4f4db6de10c25"
expected_sd_hash = "bc435ae0c4659635e6514e05bb1fcd0d365b234f6f0e78002" \
"d2576ff84a0b8710a9847757a9aa8cbeda5a8e1aeafa48b"
filename = 'test.file'
stream_hash = yield self.create_file(filename)
self.assertEqual(expected_stream_hash, stream_hash)
lbry_file = yield self.create_file(filename)
sd_hash = yield self.session.storage.get_sd_blob_hash_for_stream(lbry_file.stream_hash)
self.assertEqual(expected_stream_hash, lbry_file.stream_hash)
self.assertEqual(sd_hash, lbry_file.sd_hash)
self.assertEqual(sd_hash, expected_sd_hash)
blobs = yield self.blob_manager.get_all_verified_blobs()
self.assertEqual(2, len(blobs))
self.assertEqual(3, len(blobs))
num_should_announce_blobs = yield self.blob_manager.count_should_announce_blobs()
self.assertEqual(1, num_should_announce_blobs)
self.assertEqual(2, num_should_announce_blobs)
@defer.inlineCallbacks
def test_can_create_file_with_unicode_filename(self):
expected_stream_hash = ('d1da4258f3ce12edb91d7e8e160d091d3ab1432c2e55a6352dce0'
'2fd5adb86fe144e93e110075b5865fff8617776c6c0')
filename = u'☃.file'
stream_hash = yield self.create_file(filename)
self.assertEqual(expected_stream_hash, stream_hash)
lbry_file = yield self.create_file(filename)
self.assertEqual(expected_stream_hash, lbry_file.stream_hash)

View file

@ -1,42 +0,0 @@
from twisted.internet import defer
from twisted.trial import unittest
from lbrynet import conf
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.lbry_file.EncryptedFileMetadataManager import DBEncryptedFileMetadataManager
from lbrynet.tests.util import random_lbry_hash
class TestEncryptedFileManager(unittest.TestCase):
def setUp(self):
conf.initialize_settings()
@defer.inlineCallbacks
def test_database_operations(self):
# test database read/write functions in EncrypteFileManager
class MocSession(object):
pass
session = MocSession()
session.db_dir = '.'
stream_info_manager = DBEncryptedFileMetadataManager('.')
sd_identifier = None
download_directory = '.'
manager = EncryptedFileManager(
session, stream_info_manager, sd_identifier, download_directory)
yield manager.stream_info_manager.setup()
out = yield manager._get_all_lbry_files()
self.assertEqual(len(out), 0)
stream_hash = random_lbry_hash()
blob_data_rate = 0
out = yield manager._save_lbry_file(stream_hash, blob_data_rate)
rowid = yield manager._get_rowid_for_stream_hash(stream_hash)
self.assertEqual(out, rowid)
files = yield manager._get_all_lbry_files()
self.assertEqual(1, len(files))
yield manager._change_file_status(rowid, ManagedEncryptedFileDownloader.STATUS_RUNNING)
out = yield manager._get_lbry_file_status(rowid)
self.assertEqual(out, ManagedEncryptedFileDownloader.STATUS_RUNNING)

View file

@ -6,8 +6,10 @@ from twisted.internet import defer
from twisted import trial
from lbryschema.decode import smart_decode
from lbryum.wallet import NewWallet
from lbrynet import conf
from lbrynet.core import Session, PaymentRateManager, Wallet
from lbrynet.database.storage import SQLiteStorage
from lbrynet.daemon.Daemon import Daemon as LBRYDaemon
from lbrynet.tests import util
@ -33,6 +35,10 @@ def get_test_daemon(data_rate=None, generous=True, with_fee=False):
daemon = LBRYDaemon(None)
daemon.session = mock.Mock(spec=Session.Session)
daemon.session.wallet = mock.Mock(spec=Wallet.LBRYumWallet)
daemon.session.wallet.wallet = mock.Mock(spec=NewWallet)
daemon.session.wallet.wallet.use_encryption = False
daemon.session.wallet.network = FakeNetwork()
daemon.session.storage = mock.Mock(spec=SQLiteStorage)
market_feeds = [BTCLBCFeed(), USDBTCFeed()]
daemon.exchange_rate_manager = DummyExchangeRateManager(market_feeds, rates)
base_prm = PaymentRateManager.BasePaymentRateManager(rate=data_rate)
@ -107,8 +113,7 @@ class TestJsonRpc(trial.unittest.TestCase):
mock_conf_settings(self)
util.resetTime(self)
self.test_daemon = get_test_daemon()
self.test_daemon.session.wallet = Wallet.LBRYumWallet(storage=Wallet.InMemoryStorage())
self.test_daemon.session.wallet.network = FakeNetwork()
self.test_daemon.session.wallet.is_first_run = False
self.test_daemon.session.wallet.get_best_blockhash = noop
def test_status(self):

View file

@ -39,22 +39,27 @@ class MocDownloader(object):
self.stop_called = True
self.finish_deferred.callback(True)
def moc_initialize(self, stream_info):
self.sd_hash = "d5169241150022f996fa7cd6a9a1c421937276a3275eb912" \
"790bd07ba7aec1fac5fd45431d226b8fb402691e79aeb24b"
return None
def moc_download_sd_blob(self):
return None
def moc_download(self, sd_blob, name, key_fee):
def moc_download(self, sd_blob, name, txid, nout, key_fee, file_name):
self.pay_key_fee(key_fee, name)
self.downloader = MocDownloader()
self.downloader.start()
def moc_pay_key_fee(self, key_fee, name):
self.pay_key_fee_called = True
class GetStreamTests(unittest.TestCase):
def init_getstream_with_mocs(self):
@ -93,7 +98,7 @@ class GetStreamTests(unittest.TestCase):
stream_info = None
with self.assertRaises(AttributeError):
yield getstream.start(stream_info, name)
yield getstream.start(stream_info, name, "deadbeef" * 12, 0)
@defer.inlineCallbacks
@ -113,7 +118,7 @@ class GetStreamTests(unittest.TestCase):
name = 'test'
stream_info = None
with self.assertRaises(DownloadSDTimeout):
yield getstream.start(stream_info, name)
yield getstream.start(stream_info, name, "deadbeef" * 12, 0)
self.assertFalse(getstream.pay_key_fee_called)
@defer.inlineCallbacks
@ -129,7 +134,7 @@ class GetStreamTests(unittest.TestCase):
getstream.pay_key_fee = types.MethodType(moc_pay_key_fee, getstream)
name = 'test'
stream_info = None
start = getstream.start(stream_info, name)
start = getstream.start(stream_info, name, "deadbeef" * 12, 0)
self.clock.advance(1)
self.clock.advance(1)
self.clock.advance(1)
@ -151,8 +156,7 @@ class GetStreamTests(unittest.TestCase):
getstream.pay_key_fee = types.MethodType(moc_pay_key_fee, getstream)
name = 'test'
stream_info = None
start = getstream.start(stream_info, name)
start = getstream.start(stream_info, name, "deadbeef" * 12, 0)
getstream.downloader.num_completed = 1
self.clock.advance(1)

View file

@ -11,7 +11,7 @@ from lbrynet.tests.mocks import BTCLBCFeed, USDBTCFeed
class FeeFormatTest(unittest.TestCase):
def test_fee_created_with_correct_inputs(self):
fee_dict = {
'currency':'USD',
'currency': 'USD',
'amount': 10.0,
'address': "bRcHraa8bYJZL7vkh5sNmGwPDERFUjGPP9"
}
@ -21,7 +21,7 @@ class FeeFormatTest(unittest.TestCase):
def test_fee_zero(self):
fee_dict = {
'currency':'LBC',
'currency': 'LBC',
'amount': 0.0,
'address': "bRcHraa8bYJZL7vkh5sNmGwPDERFUjGPP9"
}
@ -47,7 +47,7 @@ class FeeTest(unittest.TestCase):
def test_fee_converts_to_lbc(self):
fee = Fee({
'currency':'USD',
'currency': 'USD',
'amount': 10.0,
'address': "bRcHraa8bYJZL7vkh5sNmGwPDERFUjGPP9"
})