fully asyncio daemon api
This commit is contained in:
parent
6c52450858
commit
86fb99a37c
12 changed files with 400 additions and 594 deletions
|
@ -57,7 +57,7 @@ def start_daemon(settings: typing.Optional[typing.Dict] = None,
|
||||||
|
|
||||||
if check_connection():
|
if check_connection():
|
||||||
daemon = Daemon()
|
daemon = Daemon()
|
||||||
asyncio.get_event_loop().run_until_complete(daemon.start_listening())
|
asyncio.get_event_loop().create_task(daemon.start_listening())
|
||||||
reactor.run()
|
reactor.run()
|
||||||
else:
|
else:
|
||||||
log.info("Not connected to internet, unable to start")
|
log.info("Not connected to internet, unable to start")
|
||||||
|
|
|
@ -36,37 +36,35 @@ class Component(metaclass=ComponentType):
|
||||||
def running(self):
|
def running(self):
|
||||||
return self._running
|
return self._running
|
||||||
|
|
||||||
def get_status(self):
|
async def get_status(self):
|
||||||
return
|
return
|
||||||
|
|
||||||
def start(self):
|
async def start(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def stop(self):
|
async def stop(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def component(self):
|
def component(self):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _setup(self):
|
||||||
def _setup(self):
|
|
||||||
try:
|
try:
|
||||||
result = yield defer.maybeDeferred(self.start)
|
result = await self.start()
|
||||||
self._running = True
|
self._running = True
|
||||||
defer.returnValue(result)
|
return result
|
||||||
except (defer.CancelledError, AlreadyQuit):
|
except (defer.CancelledError, AlreadyQuit):
|
||||||
pass
|
pass
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
log.exception("Error setting up %s", self.component_name or self.__class__.__name__)
|
log.exception("Error setting up %s", self.component_name or self.__class__.__name__)
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _stop(self):
|
||||||
def _stop(self):
|
|
||||||
try:
|
try:
|
||||||
result = yield defer.maybeDeferred(self.stop)
|
result = await self.stop()
|
||||||
self._running = False
|
self._running = False
|
||||||
defer.returnValue(result)
|
return result
|
||||||
except (defer.CancelledError, AlreadyQuit):
|
except (defer.CancelledError, AlreadyQuit):
|
||||||
pass
|
pass
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from twisted.internet import defer
|
|
||||||
from lbrynet.p2p.Error import ComponentStartConditionNotMet
|
from lbrynet.p2p.Error import ComponentStartConditionNotMet
|
||||||
from lbrynet.extras.daemon.PeerManager import PeerManager
|
from lbrynet.extras.daemon.PeerManager import PeerManager
|
||||||
from lbrynet.extras.daemon.PeerFinder import DHTPeerFinder
|
from lbrynet.extras.daemon.PeerFinder import DHTPeerFinder
|
||||||
|
@ -110,13 +110,8 @@ class ComponentManager:
|
||||||
steps.reverse()
|
steps.reverse()
|
||||||
return steps
|
return steps
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def setup(self, **callbacks):
|
||||||
def setup(self, **callbacks):
|
""" Start Components in sequence sorted by requirements """
|
||||||
"""
|
|
||||||
Start Components in sequence sorted by requirements
|
|
||||||
|
|
||||||
:return: (defer.Deferred)
|
|
||||||
"""
|
|
||||||
for component_name, cb in callbacks.items():
|
for component_name, cb in callbacks.items():
|
||||||
if component_name not in self.component_classes:
|
if component_name not in self.component_classes:
|
||||||
if component_name not in self.skip_components:
|
if component_name not in self.skip_components:
|
||||||
|
@ -124,19 +119,22 @@ class ComponentManager:
|
||||||
if not callable(cb):
|
if not callable(cb):
|
||||||
raise ValueError("%s is not callable" % cb)
|
raise ValueError("%s is not callable" % cb)
|
||||||
|
|
||||||
def _setup(component):
|
async def _setup(component):
|
||||||
|
await component._setup()
|
||||||
if component.component_name in callbacks:
|
if component.component_name in callbacks:
|
||||||
d = component._setup()
|
maybe_coro = callbacks[component.component_name](component)
|
||||||
d.addCallback(callbacks[component.component_name], component)
|
if asyncio.iscoroutine(maybe_coro):
|
||||||
return d
|
asyncio.create_task(maybe_coro)
|
||||||
return component._setup()
|
|
||||||
|
|
||||||
stages = self.sort_components()
|
stages = self.sort_components()
|
||||||
for stage in stages:
|
for stage in stages:
|
||||||
yield defer.DeferredList([_setup(component) for component in stage if not component.running])
|
needing_start = [
|
||||||
|
_setup(component) for component in stage if not component.running
|
||||||
|
]
|
||||||
|
if needing_start:
|
||||||
|
await asyncio.wait(needing_start)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def stop(self):
|
||||||
def stop(self):
|
|
||||||
"""
|
"""
|
||||||
Stop Components in reversed startup order
|
Stop Components in reversed startup order
|
||||||
|
|
||||||
|
@ -144,7 +142,11 @@ class ComponentManager:
|
||||||
"""
|
"""
|
||||||
stages = self.sort_components(reverse=True)
|
stages = self.sort_components(reverse=True)
|
||||||
for stage in stages:
|
for stage in stages:
|
||||||
yield defer.DeferredList([component._stop() for component in stage if component.running])
|
needing_stop = [
|
||||||
|
component._stop() for component in stage if component.running
|
||||||
|
]
|
||||||
|
if needing_stop:
|
||||||
|
await asyncio.wait(needing_stop)
|
||||||
|
|
||||||
def all_components_running(self, *component_names):
|
def all_components_running(self, *component_names):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
import os
|
import os
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
import logging
|
import logging
|
||||||
import treq
|
import treq
|
||||||
import json
|
|
||||||
import math
|
import math
|
||||||
import binascii
|
import binascii
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from types import SimpleNamespace
|
from types import SimpleNamespace
|
||||||
from twisted.internet import defer, threads, reactor, error, task
|
from twisted.internet import defer, reactor, error, task
|
||||||
|
|
||||||
from aioupnp import __version__ as aioupnp_version
|
from aioupnp import __version__ as aioupnp_version
|
||||||
from aioupnp.upnp import UPnP
|
from aioupnp.upnp import UPnP
|
||||||
|
@ -16,6 +16,7 @@ from aioupnp.fault import UPnPError
|
||||||
import lbrynet.schema
|
import lbrynet.schema
|
||||||
from lbrynet import conf
|
from lbrynet import conf
|
||||||
|
|
||||||
|
from lbrynet.extras.compat import d2f
|
||||||
from lbrynet.blob.EncryptedFileManager import EncryptedFileManager
|
from lbrynet.blob.EncryptedFileManager import EncryptedFileManager
|
||||||
from lbrynet.blob.client.EncryptedFileDownloader import EncryptedFileSaverFactory
|
from lbrynet.blob.client.EncryptedFileDownloader import EncryptedFileSaverFactory
|
||||||
from lbrynet.blob.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
|
from lbrynet.blob.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
|
||||||
|
@ -27,7 +28,7 @@ from lbrynet.extras.daemon.HashAnnouncer import DHTHashAnnouncer
|
||||||
from lbrynet.extras.reflector.server.server import ReflectorServerFactory
|
from lbrynet.extras.reflector.server.server import ReflectorServerFactory
|
||||||
from lbrynet.extras.wallet import LbryWalletManager
|
from lbrynet.extras.wallet import LbryWalletManager
|
||||||
from lbrynet.extras.wallet import Network
|
from lbrynet.extras.wallet import Network
|
||||||
from lbrynet.utils import DeferredDict, generate_id
|
from lbrynet.utils import generate_id
|
||||||
from lbrynet.p2p.PaymentRateManager import OnlyFreePaymentsManager
|
from lbrynet.p2p.PaymentRateManager import OnlyFreePaymentsManager
|
||||||
from lbrynet.p2p.RateLimiter import RateLimiter
|
from lbrynet.p2p.RateLimiter import RateLimiter
|
||||||
from lbrynet.p2p.BlobManager import DiskBlobManager
|
from lbrynet.p2p.BlobManager import DiskBlobManager
|
||||||
|
@ -55,22 +56,23 @@ RATE_LIMITER_COMPONENT = "rate_limiter"
|
||||||
PAYMENT_RATE_COMPONENT = "payment_rate_manager"
|
PAYMENT_RATE_COMPONENT = "payment_rate_manager"
|
||||||
|
|
||||||
|
|
||||||
def from_future(coroutine: asyncio.coroutine) -> defer.Deferred:
|
async def gather_dict(tasks: dict):
|
||||||
return defer.Deferred.fromFuture(asyncio.ensure_future(coroutine))
|
async def wait_value(key, value):
|
||||||
|
return key, await value
|
||||||
|
return dict(await asyncio.gather(*(
|
||||||
|
wait_value(*kv) for kv in tasks.items()
|
||||||
|
)))
|
||||||
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def get_external_ip(): # used if upnp is disabled or non-functioning
|
||||||
def get_external_ip(): # used if upnp is disabled or non-functioning
|
|
||||||
try:
|
try:
|
||||||
buf = []
|
async with aiohttp.ClientSession() as session:
|
||||||
response = yield treq.get("https://api.lbry.io/ip")
|
async with session.get("https://api.lbry.io/ip") as resp:
|
||||||
yield treq.collect(response, buf.append)
|
response = await resp.json()
|
||||||
parsed = json.loads(b"".join(buf).decode())
|
if response['success']:
|
||||||
if parsed['success']:
|
return response['data']['ip']
|
||||||
return parsed['data']['ip']
|
except Exception as e:
|
||||||
return
|
pass
|
||||||
except Exception as err:
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
class DatabaseComponent(Component):
|
class DatabaseComponent(Component):
|
||||||
|
@ -97,8 +99,7 @@ class DatabaseComponent(Component):
|
||||||
with open(conf.settings.get_db_revision_filename(), mode='w') as db_revision:
|
with open(conf.settings.get_db_revision_filename(), mode='w') as db_revision:
|
||||||
db_revision.write(str(version_num))
|
db_revision.write(str(version_num))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def start(self):
|
||||||
def start(self):
|
|
||||||
# check directories exist, create them if they don't
|
# check directories exist, create them if they don't
|
||||||
log.info("Loading databases")
|
log.info("Loading databases")
|
||||||
|
|
||||||
|
@ -117,19 +118,19 @@ class DatabaseComponent(Component):
|
||||||
if old_revision < self.get_current_db_revision():
|
if old_revision < self.get_current_db_revision():
|
||||||
from lbrynet.extras.daemon.migrator import dbmigrator
|
from lbrynet.extras.daemon.migrator import dbmigrator
|
||||||
log.info("Upgrading your databases (revision %i to %i)", old_revision, self.get_current_db_revision())
|
log.info("Upgrading your databases (revision %i to %i)", old_revision, self.get_current_db_revision())
|
||||||
yield threads.deferToThread(
|
await asyncio.get_event_loop().run_in_executor(
|
||||||
dbmigrator.migrate_db, conf.settings.data_dir, old_revision, self.get_current_db_revision()
|
None, dbmigrator.migrate_db, conf.settings.data_dir, old_revision, self.get_current_db_revision()
|
||||||
)
|
)
|
||||||
self._write_db_revision_file(self.get_current_db_revision())
|
self._write_db_revision_file(self.get_current_db_revision())
|
||||||
log.info("Finished upgrading the databases.")
|
log.info("Finished upgrading the databases.")
|
||||||
|
|
||||||
# start SQLiteStorage
|
self.storage = SQLiteStorage(
|
||||||
self.storage = SQLiteStorage(conf.settings.data_dir)
|
os.path.join(conf.settings.data_dir, "lbrynet.sqlite")
|
||||||
yield self.storage.setup()
|
)
|
||||||
|
await self.storage.open()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def stop(self):
|
||||||
def stop(self):
|
await self.storage.close()
|
||||||
yield self.storage.stop()
|
|
||||||
self.storage = None
|
self.storage = None
|
||||||
|
|
||||||
|
|
||||||
|
@ -250,33 +251,24 @@ class HeadersComponent(Component):
|
||||||
with open(self.headers_file, "rb+") as headers_file:
|
with open(self.headers_file, "rb+") as headers_file:
|
||||||
headers_file.truncate(checksum_length_in_bytes)
|
headers_file.truncate(checksum_length_in_bytes)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def start(self):
|
||||||
def start(self):
|
|
||||||
conf.settings.ensure_wallet_dir()
|
conf.settings.ensure_wallet_dir()
|
||||||
if not os.path.exists(self.headers_dir):
|
if not os.path.exists(self.headers_dir):
|
||||||
os.mkdir(self.headers_dir)
|
os.mkdir(self.headers_dir)
|
||||||
if os.path.exists(self.old_file):
|
if os.path.exists(self.old_file):
|
||||||
log.warning("Moving old headers from %s to %s.", self.old_file, self.headers_file)
|
log.warning("Moving old headers from %s to %s.", self.old_file, self.headers_file)
|
||||||
os.rename(self.old_file, self.headers_file)
|
os.rename(self.old_file, self.headers_file)
|
||||||
self._downloading_headers = yield f2d(self.should_download_headers_from_s3())
|
self._downloading_headers = await self.should_download_headers_from_s3()
|
||||||
if self._downloading_headers:
|
if self._downloading_headers:
|
||||||
try:
|
try:
|
||||||
yield self.fetch_headers_from_s3()
|
await d2f(self.fetch_headers_from_s3())
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
log.error("failed to fetch headers from s3: %s", err)
|
log.error("failed to fetch headers from s3: %s", err)
|
||||||
finally:
|
finally:
|
||||||
self._downloading_headers = False
|
self._downloading_headers = False
|
||||||
|
|
||||||
def stop(self):
|
async def stop(self):
|
||||||
return defer.succeed(None)
|
pass
|
||||||
|
|
||||||
|
|
||||||
def d2f(deferred):
|
|
||||||
return deferred.asFuture(asyncio.get_event_loop())
|
|
||||||
|
|
||||||
|
|
||||||
def f2d(future):
|
|
||||||
return defer.Deferred.fromFuture(asyncio.ensure_future(future))
|
|
||||||
|
|
||||||
|
|
||||||
class WalletComponent(Component):
|
class WalletComponent(Component):
|
||||||
|
@ -304,19 +296,17 @@ class WalletComponent(Component):
|
||||||
'is_locked': not self.wallet_manager.is_wallet_unlocked,
|
'is_locked': not self.wallet_manager.is_wallet_unlocked,
|
||||||
}
|
}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def start(self):
|
||||||
def start(self):
|
|
||||||
conf.settings.ensure_wallet_dir()
|
conf.settings.ensure_wallet_dir()
|
||||||
log.info("Starting torba wallet")
|
log.info("Starting torba wallet")
|
||||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||||
lbrynet.schema.BLOCKCHAIN_NAME = conf.settings['blockchain_name']
|
lbrynet.schema.BLOCKCHAIN_NAME = conf.settings['blockchain_name']
|
||||||
self.wallet_manager = yield f2d(LbryWalletManager.from_lbrynet_config(conf.settings, storage))
|
self.wallet_manager = await LbryWalletManager.from_lbrynet_config(conf.settings, storage)
|
||||||
self.wallet_manager.old_db = storage
|
self.wallet_manager.old_db = storage
|
||||||
yield f2d(self.wallet_manager.start())
|
await self.wallet_manager.start()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def stop(self):
|
||||||
def stop(self):
|
await self.wallet_manager.stop()
|
||||||
yield f2d(self.wallet_manager.stop())
|
|
||||||
self.wallet_manager = None
|
self.wallet_manager = None
|
||||||
|
|
||||||
|
|
||||||
|
@ -345,14 +335,11 @@ class BlobComponent(Component):
|
||||||
def stop(self):
|
def stop(self):
|
||||||
return self.blob_manager.stop()
|
return self.blob_manager.stop()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def get_status(self):
|
||||||
def get_status(self):
|
|
||||||
count = 0
|
count = 0
|
||||||
if self.blob_manager:
|
if self.blob_manager:
|
||||||
count = yield self.blob_manager.storage.count_finished_blobs()
|
count = await self.blob_manager.storage.count_finished_blobs()
|
||||||
defer.returnValue({
|
return {'finished_blobs': count}
|
||||||
'finished_blobs': count
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
class DHTComponent(Component):
|
class DHTComponent(Component):
|
||||||
|
@ -376,8 +363,7 @@ class DHTComponent(Component):
|
||||||
'peers_in_routing_table': 0 if not self.dht_node else len(self.dht_node.contacts)
|
'peers_in_routing_table': 0 if not self.dht_node else len(self.dht_node.contacts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def start(self):
|
||||||
def start(self):
|
|
||||||
self.upnp_component = self.component_manager.get_component(UPNP_COMPONENT)
|
self.upnp_component = self.component_manager.get_component(UPNP_COMPONENT)
|
||||||
self.external_peer_port = self.upnp_component.upnp_redirects.get("TCP", conf.settings["peer_port"])
|
self.external_peer_port = self.upnp_component.upnp_redirects.get("TCP", conf.settings["peer_port"])
|
||||||
self.external_udp_port = self.upnp_component.upnp_redirects.get("UDP", conf.settings["dht_node_port"])
|
self.external_udp_port = self.upnp_component.upnp_redirects.get("UDP", conf.settings["dht_node_port"])
|
||||||
|
@ -387,7 +373,7 @@ class DHTComponent(Component):
|
||||||
external_ip = self.upnp_component.external_ip
|
external_ip = self.upnp_component.external_ip
|
||||||
if not external_ip:
|
if not external_ip:
|
||||||
log.warning("UPnP component failed to get external ip")
|
log.warning("UPnP component failed to get external ip")
|
||||||
external_ip = yield get_external_ip()
|
external_ip = await get_external_ip()
|
||||||
if not external_ip:
|
if not external_ip:
|
||||||
log.warning("failed to get external ip")
|
log.warning("failed to get external ip")
|
||||||
|
|
||||||
|
@ -399,12 +385,11 @@ class DHTComponent(Component):
|
||||||
peerPort=self.external_peer_port
|
peerPort=self.external_peer_port
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.dht_node.start(conf.settings['known_dht_nodes'], block_on_join=False)
|
await d2f(self.dht_node.start(conf.settings['known_dht_nodes'], block_on_join=False))
|
||||||
log.info("Started the dht")
|
log.info("Started the dht")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
yield self.dht_node.stop()
|
return d2f(self.dht_node.stop())
|
||||||
|
|
||||||
|
|
||||||
class HashAnnouncerComponent(Component):
|
class HashAnnouncerComponent(Component):
|
||||||
|
@ -419,16 +404,14 @@ class HashAnnouncerComponent(Component):
|
||||||
def component(self):
|
def component(self):
|
||||||
return self.hash_announcer
|
return self.hash_announcer
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def start(self):
|
def start(self):
|
||||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||||
dht_node = self.component_manager.get_component(DHT_COMPONENT)
|
dht_node = self.component_manager.get_component(DHT_COMPONENT)
|
||||||
self.hash_announcer = DHTHashAnnouncer(dht_node, storage)
|
self.hash_announcer = DHTHashAnnouncer(dht_node, storage)
|
||||||
yield self.hash_announcer.start()
|
self.hash_announcer.start()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
yield self.hash_announcer.stop()
|
self.hash_announcer.stop()
|
||||||
|
|
||||||
def get_status(self):
|
def get_status(self):
|
||||||
return {
|
return {
|
||||||
|
@ -447,13 +430,11 @@ class RateLimiterComponent(Component):
|
||||||
def component(self):
|
def component(self):
|
||||||
return self.rate_limiter
|
return self.rate_limiter
|
||||||
|
|
||||||
def start(self):
|
async def start(self):
|
||||||
self.rate_limiter.start()
|
self.rate_limiter.start()
|
||||||
return defer.succeed(None)
|
|
||||||
|
|
||||||
def stop(self):
|
async def stop(self):
|
||||||
self.rate_limiter.stop()
|
self.rate_limiter.stop()
|
||||||
return defer.succeed(None)
|
|
||||||
|
|
||||||
|
|
||||||
class PaymentRateComponent(Component):
|
class PaymentRateComponent(Component):
|
||||||
|
@ -467,11 +448,11 @@ class PaymentRateComponent(Component):
|
||||||
def component(self):
|
def component(self):
|
||||||
return self.payment_rate_manager
|
return self.payment_rate_manager
|
||||||
|
|
||||||
def start(self):
|
async def start(self):
|
||||||
return defer.succeed(None)
|
pass
|
||||||
|
|
||||||
def stop(self):
|
async def stop(self):
|
||||||
return defer.succeed(None)
|
pass
|
||||||
|
|
||||||
|
|
||||||
class FileManagerComponent(Component):
|
class FileManagerComponent(Component):
|
||||||
|
@ -494,7 +475,6 @@ class FileManagerComponent(Component):
|
||||||
'managed_files': len(self.file_manager.lbry_files)
|
'managed_files': len(self.file_manager.lbry_files)
|
||||||
}
|
}
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def start(self):
|
def start(self):
|
||||||
rate_limiter = self.component_manager.get_component(RATE_LIMITER_COMPONENT)
|
rate_limiter = self.component_manager.get_component(RATE_LIMITER_COMPONENT)
|
||||||
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||||
|
@ -511,18 +491,16 @@ class FileManagerComponent(Component):
|
||||||
wallet,
|
wallet,
|
||||||
conf.settings.download_dir
|
conf.settings.download_dir
|
||||||
)
|
)
|
||||||
yield sd_identifier.add_stream_downloader_factory(EncryptedFileStreamType, file_saver_factory)
|
sd_identifier.add_stream_downloader_factory(EncryptedFileStreamType, file_saver_factory)
|
||||||
|
|
||||||
payment_rate_manager = self.component_manager.get_component(PAYMENT_RATE_COMPONENT)
|
payment_rate_manager = self.component_manager.get_component(PAYMENT_RATE_COMPONENT)
|
||||||
log.info('Starting the file manager')
|
log.info('Starting the file manager')
|
||||||
self.file_manager = EncryptedFileManager(self.component_manager.peer_finder, rate_limiter, blob_manager, wallet,
|
self.file_manager = EncryptedFileManager(self.component_manager.peer_finder, rate_limiter, blob_manager, wallet,
|
||||||
payment_rate_manager, storage, sd_identifier)
|
payment_rate_manager, storage, sd_identifier)
|
||||||
yield self.file_manager.setup()
|
return self.file_manager.setup()
|
||||||
log.info('Done setting up file manager')
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
yield self.file_manager.stop()
|
return d2f(self.file_manager.stop())
|
||||||
|
|
||||||
|
|
||||||
class PeerProtocolServerComponent(Component):
|
class PeerProtocolServerComponent(Component):
|
||||||
|
@ -538,8 +516,7 @@ class PeerProtocolServerComponent(Component):
|
||||||
def component(self):
|
def component(self):
|
||||||
return self.lbry_server_port
|
return self.lbry_server_port
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def start(self):
|
||||||
def start(self):
|
|
||||||
wallet = self.component_manager.get_component(WALLET_COMPONENT)
|
wallet = self.component_manager.get_component(WALLET_COMPONENT)
|
||||||
upnp = self.component_manager.get_component(UPNP_COMPONENT)
|
upnp = self.component_manager.get_component(UPNP_COMPONENT)
|
||||||
peer_port = conf.settings['peer_port']
|
peer_port = conf.settings['peer_port']
|
||||||
|
@ -562,7 +539,7 @@ class PeerProtocolServerComponent(Component):
|
||||||
try:
|
try:
|
||||||
log.info("Peer protocol listening on TCP %i (ext port %i)", peer_port,
|
log.info("Peer protocol listening on TCP %i (ext port %i)", peer_port,
|
||||||
upnp.upnp_redirects.get("TCP", peer_port))
|
upnp.upnp_redirects.get("TCP", peer_port))
|
||||||
self.lbry_server_port = yield reactor.listenTCP(peer_port, server_factory)
|
self.lbry_server_port = await d2f(reactor.listenTCP(peer_port, server_factory))
|
||||||
except error.CannotListenError as e:
|
except error.CannotListenError as e:
|
||||||
import traceback
|
import traceback
|
||||||
log.error("Couldn't bind to port %d. Visit lbry.io/faq/how-to-change-port for"
|
log.error("Couldn't bind to port %d. Visit lbry.io/faq/how-to-change-port for"
|
||||||
|
@ -570,12 +547,11 @@ class PeerProtocolServerComponent(Component):
|
||||||
log.error("%s", traceback.format_exc())
|
log.error("%s", traceback.format_exc())
|
||||||
raise ValueError("%s lbrynet may already be running on your computer." % str(e))
|
raise ValueError("%s lbrynet may already be running on your computer." % str(e))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def stop(self):
|
||||||
def stop(self):
|
|
||||||
if self.lbry_server_port is not None:
|
if self.lbry_server_port is not None:
|
||||||
self.lbry_server_port, old_port = None, self.lbry_server_port
|
self.lbry_server_port, old_port = None, self.lbry_server_port
|
||||||
log.info('Stop listening on port %s', old_port.port)
|
log.info('Stop listening on port %s', old_port.port)
|
||||||
yield old_port.stopListening()
|
await d2f(old_port.stopListening())
|
||||||
|
|
||||||
|
|
||||||
class ReflectorComponent(Component):
|
class ReflectorComponent(Component):
|
||||||
|
@ -591,25 +567,23 @@ class ReflectorComponent(Component):
|
||||||
def component(self):
|
def component(self):
|
||||||
return self.reflector_server
|
return self.reflector_server
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def start(self):
|
||||||
def start(self):
|
|
||||||
log.info("Starting reflector server")
|
log.info("Starting reflector server")
|
||||||
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||||
file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
|
file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
|
||||||
reflector_factory = ReflectorServerFactory(self.component_manager.peer_manager, blob_manager, file_manager)
|
reflector_factory = ReflectorServerFactory(self.component_manager.peer_manager, blob_manager, file_manager)
|
||||||
try:
|
try:
|
||||||
self.reflector_server = yield reactor.listenTCP(self.reflector_server_port, reflector_factory)
|
self.reflector_server = await d2f(reactor.listenTCP(self.reflector_server_port, reflector_factory))
|
||||||
log.info('Started reflector on port %s', self.reflector_server_port)
|
log.info('Started reflector on port %s', self.reflector_server_port)
|
||||||
except error.CannotListenError as e:
|
except error.CannotListenError as e:
|
||||||
log.exception("Couldn't bind reflector to port %d", self.reflector_server_port)
|
log.exception("Couldn't bind reflector to port %d", self.reflector_server_port)
|
||||||
raise ValueError(f"{e} lbrynet may already be running on your computer.")
|
raise ValueError(f"{e} lbrynet may already be running on your computer.")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def stop(self):
|
||||||
def stop(self):
|
|
||||||
if self.reflector_server is not None:
|
if self.reflector_server is not None:
|
||||||
log.info("Stopping reflector server")
|
log.info("Stopping reflector server")
|
||||||
self.reflector_server, p = None, self.reflector_server
|
self.reflector_server, p = None, self.reflector_server
|
||||||
yield p.stopListening
|
await d2f(p.stopListening())
|
||||||
|
|
||||||
|
|
||||||
class UPnPComponent(Component):
|
class UPnPComponent(Component):
|
||||||
|
@ -630,22 +604,11 @@ class UPnPComponent(Component):
|
||||||
def component(self):
|
def component(self):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _maintain_redirects(self):
|
||||||
def _setup_redirects(self):
|
|
||||||
d = {}
|
|
||||||
if PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
|
||||||
d["TCP"] = from_future(self.upnp.get_next_mapping(self._int_peer_port, "TCP", "LBRY peer port"))
|
|
||||||
if DHT_COMPONENT not in self.component_manager.skip_components:
|
|
||||||
d["UDP"] = from_future(self.upnp.get_next_mapping(self._int_dht_node_port, "UDP", "LBRY DHT port"))
|
|
||||||
upnp_redirects = yield DeferredDict(d)
|
|
||||||
self.upnp_redirects.update(upnp_redirects)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _maintain_redirects(self):
|
|
||||||
# setup the gateway if necessary
|
# setup the gateway if necessary
|
||||||
if not self.upnp:
|
if not self.upnp:
|
||||||
try:
|
try:
|
||||||
self.upnp = yield from_future(UPnP.discover())
|
self.upnp = await UPnP.discover()
|
||||||
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
log.warning("upnp discovery failed: %s", err)
|
log.warning("upnp discovery failed: %s", err)
|
||||||
|
@ -655,7 +618,7 @@ class UPnPComponent(Component):
|
||||||
external_ip = None
|
external_ip = None
|
||||||
if self.upnp:
|
if self.upnp:
|
||||||
try:
|
try:
|
||||||
external_ip = yield from_future(self.upnp.get_external_ip())
|
external_ip = await self.upnp.get_external_ip()
|
||||||
if external_ip != "0.0.0.0" and not self.external_ip:
|
if external_ip != "0.0.0.0" and not self.external_ip:
|
||||||
log.info("got external ip from UPnP: %s", external_ip)
|
log.info("got external ip from UPnP: %s", external_ip)
|
||||||
except (asyncio.TimeoutError, UPnPError):
|
except (asyncio.TimeoutError, UPnPError):
|
||||||
|
@ -663,7 +626,7 @@ class UPnPComponent(Component):
|
||||||
|
|
||||||
if external_ip == "0.0.0.0" or not external_ip:
|
if external_ip == "0.0.0.0" or not external_ip:
|
||||||
log.warning("unable to get external ip from UPnP, checking lbry.io fallback")
|
log.warning("unable to get external ip from UPnP, checking lbry.io fallback")
|
||||||
external_ip = yield get_external_ip()
|
external_ip = await get_external_ip()
|
||||||
if self.external_ip and self.external_ip != external_ip:
|
if self.external_ip and self.external_ip != external_ip:
|
||||||
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
|
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
|
||||||
self.external_ip = external_ip
|
self.external_ip = external_ip
|
||||||
|
@ -674,10 +637,10 @@ class UPnPComponent(Component):
|
||||||
log.info("add UPnP port mappings")
|
log.info("add UPnP port mappings")
|
||||||
d = {}
|
d = {}
|
||||||
if PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
if PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
||||||
d["TCP"] = from_future(self.upnp.get_next_mapping(self._int_peer_port, "TCP", "LBRY peer port"))
|
d["TCP"] = self.upnp.get_next_mapping(self._int_peer_port, "TCP", "LBRY peer port")
|
||||||
if DHT_COMPONENT not in self.component_manager.skip_components:
|
if DHT_COMPONENT not in self.component_manager.skip_components:
|
||||||
d["UDP"] = from_future(self.upnp.get_next_mapping(self._int_dht_node_port, "UDP", "LBRY DHT port"))
|
d["UDP"] = self.upnp.get_next_mapping(self._int_dht_node_port, "UDP", "LBRY DHT port")
|
||||||
upnp_redirects = yield DeferredDict(d)
|
upnp_redirects = await gather_dict(d)
|
||||||
log.info("set up redirects: %s", upnp_redirects)
|
log.info("set up redirects: %s", upnp_redirects)
|
||||||
self.upnp_redirects.update(upnp_redirects)
|
self.upnp_redirects.update(upnp_redirects)
|
||||||
except (asyncio.TimeoutError, UPnPError):
|
except (asyncio.TimeoutError, UPnPError):
|
||||||
|
@ -685,7 +648,7 @@ class UPnPComponent(Component):
|
||||||
return self._maintain_redirects()
|
return self._maintain_redirects()
|
||||||
elif self.upnp: # check existing redirects are still active
|
elif self.upnp: # check existing redirects are still active
|
||||||
found = set()
|
found = set()
|
||||||
mappings = yield from_future(self.upnp.get_redirects())
|
mappings = await self.upnp.get_redirects()
|
||||||
for mapping in mappings:
|
for mapping in mappings:
|
||||||
proto = mapping['NewProtocol']
|
proto = mapping['NewProtocol']
|
||||||
if proto in self.upnp_redirects and mapping['NewExternalPort'] == self.upnp_redirects[proto]:
|
if proto in self.upnp_redirects and mapping['NewExternalPort'] == self.upnp_redirects[proto]:
|
||||||
|
@ -693,18 +656,14 @@ class UPnPComponent(Component):
|
||||||
found.add(proto)
|
found.add(proto)
|
||||||
if 'UDP' not in found and DHT_COMPONENT not in self.component_manager.skip_components:
|
if 'UDP' not in found and DHT_COMPONENT not in self.component_manager.skip_components:
|
||||||
try:
|
try:
|
||||||
udp_port = yield from_future(
|
udp_port = await self.upnp.get_next_mapping(self._int_dht_node_port, "UDP", "LBRY DHT port")
|
||||||
self.upnp.get_next_mapping(self._int_dht_node_port, "UDP", "LBRY DHT port")
|
|
||||||
)
|
|
||||||
self.upnp_redirects['UDP'] = udp_port
|
self.upnp_redirects['UDP'] = udp_port
|
||||||
log.info("refreshed upnp redirect for dht port: %i", udp_port)
|
log.info("refreshed upnp redirect for dht port: %i", udp_port)
|
||||||
except (asyncio.TimeoutError, UPnPError):
|
except (asyncio.TimeoutError, UPnPError):
|
||||||
del self.upnp_redirects['UDP']
|
del self.upnp_redirects['UDP']
|
||||||
if 'TCP' not in found and PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
if 'TCP' not in found and PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
||||||
try:
|
try:
|
||||||
tcp_port = yield from_future(
|
tcp_port = await self.upnp.get_next_mapping(self._int_peer_port, "TCP", "LBRY peer port")
|
||||||
self.upnp.get_next_mapping(self._int_peer_port, "TCP", "LBRY peer port")
|
|
||||||
)
|
|
||||||
self.upnp_redirects['TCP'] = tcp_port
|
self.upnp_redirects['TCP'] = tcp_port
|
||||||
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
|
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
|
||||||
except (asyncio.TimeoutError, UPnPError):
|
except (asyncio.TimeoutError, UPnPError):
|
||||||
|
@ -715,14 +674,13 @@ class UPnPComponent(Component):
|
||||||
if self.upnp_redirects:
|
if self.upnp_redirects:
|
||||||
log.debug("upnp redirects are still active")
|
log.debug("upnp redirects are still active")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def start(self):
|
||||||
def start(self):
|
|
||||||
log.info("detecting external ip")
|
log.info("detecting external ip")
|
||||||
if not self.use_upnp:
|
if not self.use_upnp:
|
||||||
self.external_ip = yield get_external_ip()
|
self.external_ip = await get_external_ip()
|
||||||
return
|
return
|
||||||
success = False
|
success = False
|
||||||
yield self._maintain_redirects()
|
await self._maintain_redirects()
|
||||||
if self.upnp:
|
if self.upnp:
|
||||||
if not self.upnp_redirects and not all([x in self.component_manager.skip_components for x in
|
if not self.upnp_redirects and not all([x in self.component_manager.skip_components for x in
|
||||||
(DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)]):
|
(DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)]):
|
||||||
|
@ -736,13 +694,11 @@ class UPnPComponent(Component):
|
||||||
self.component_manager.analytics_manager.send_upnp_setup_success_fail(success, self.get_status())
|
self.component_manager.analytics_manager.send_upnp_setup_success_fail(success, self.get_status())
|
||||||
self._maintain_redirects_lc.start(360, now=False)
|
self._maintain_redirects_lc.start(360, now=False)
|
||||||
|
|
||||||
def stop(self):
|
async def stop(self):
|
||||||
if self._maintain_redirects_lc.running:
|
if self.upnp_redirects:
|
||||||
self._maintain_redirects_lc.stop()
|
await asyncio.wait([
|
||||||
return defer.DeferredList(
|
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
|
||||||
[from_future(self.upnp.delete_port_mapping(port, protocol))
|
])
|
||||||
for protocol, port in self.upnp_redirects.items()]
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_status(self):
|
def get_status(self):
|
||||||
return {
|
return {
|
||||||
|
@ -766,10 +722,8 @@ class ExchangeRateManagerComponent(Component):
|
||||||
def component(self):
|
def component(self):
|
||||||
return self.exchange_rate_manager
|
return self.exchange_rate_manager
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def start(self):
|
||||||
def start(self):
|
self.exchange_rate_manager.start()
|
||||||
yield self.exchange_rate_manager.start()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def stop(self):
|
||||||
def stop(self):
|
self.exchange_rate_manager.stop()
|
||||||
yield self.exchange_rate_manager.stop()
|
|
||||||
|
|
|
@ -7,7 +7,6 @@ from typing import Callable, Optional, List
|
||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
from binascii import hexlify, unhexlify
|
from binascii import hexlify, unhexlify
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from twisted.internet import reactor
|
|
||||||
from twisted.internet.task import LoopingCall
|
from twisted.internet.task import LoopingCall
|
||||||
|
|
||||||
from torba.client.baseaccount import SingleKey, HierarchicalDeterministic
|
from torba.client.baseaccount import SingleKey, HierarchicalDeterministic
|
||||||
|
@ -17,7 +16,7 @@ from lbrynet.dht.error import TimeoutError
|
||||||
from lbrynet.blob.blob_file import is_valid_blobhash
|
from lbrynet.blob.blob_file import is_valid_blobhash
|
||||||
from lbrynet.extras import system_info
|
from lbrynet.extras import system_info
|
||||||
from lbrynet.extras.reflector import reupload
|
from lbrynet.extras.reflector import reupload
|
||||||
from lbrynet.extras.daemon.Components import d2f, f2d
|
from lbrynet.extras.daemon.Components import d2f
|
||||||
from lbrynet.extras.daemon.Components import WALLET_COMPONENT, DATABASE_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT
|
from lbrynet.extras.daemon.Components import WALLET_COMPONENT, DATABASE_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT
|
||||||
from lbrynet.extras.daemon.Components import FILE_MANAGER_COMPONENT, RATE_LIMITER_COMPONENT
|
from lbrynet.extras.daemon.Components import FILE_MANAGER_COMPONENT, RATE_LIMITER_COMPONENT
|
||||||
from lbrynet.extras.daemon.Components import EXCHANGE_RATE_MANAGER_COMPONENT, PAYMENT_RATE_COMPONENT, UPNP_COMPONENT
|
from lbrynet.extras.daemon.Components import EXCHANGE_RATE_MANAGER_COMPONENT, PAYMENT_RATE_COMPONENT, UPNP_COMPONENT
|
||||||
|
@ -48,22 +47,15 @@ from lbrynet.extras.daemon.json_response_encoder import JSONResponseEncoder
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from urllib import parse as urlparse
|
|
||||||
import json
|
import json
|
||||||
import inspect
|
import inspect
|
||||||
import signal
|
import signal
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.internet.defer import Deferred
|
|
||||||
from twisted.python.failure import Failure
|
from twisted.python.failure import Failure
|
||||||
from twisted.internet.error import ConnectionDone, ConnectionLost
|
|
||||||
from txjsonrpc import jsonrpclib
|
|
||||||
from traceback import format_exc
|
|
||||||
|
|
||||||
from lbrynet import utils
|
from lbrynet import utils
|
||||||
from lbrynet.p2p.Error import InvalidAuthenticationToken
|
|
||||||
from lbrynet.extras.daemon.undecorated import undecorated
|
from lbrynet.extras.daemon.undecorated import undecorated
|
||||||
from twisted.web import server
|
|
||||||
from lbrynet import conf
|
from lbrynet import conf
|
||||||
|
|
||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
|
@ -376,14 +368,13 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
self.component_manager = component_manager or ComponentManager(
|
self.component_manager = component_manager or ComponentManager(
|
||||||
analytics_manager=self.analytics_manager,
|
analytics_manager=self.analytics_manager,
|
||||||
skip_components=to_skip or [],
|
skip_components=to_skip or [],
|
||||||
reactor=reactor
|
|
||||||
)
|
)
|
||||||
self.looping_call_manager = LoopingCallManager({n: lc for n, (lc, t) in (looping_calls or {}).items()})
|
self.looping_call_manager = LoopingCallManager({n: lc for n, (lc, t) in (looping_calls or {}).items()})
|
||||||
self._looping_call_times = {n: t for n, (lc, t) in (looping_calls or {}).items()}
|
self._looping_call_times = {n: t for n, (lc, t) in (looping_calls or {}).items()}
|
||||||
self._use_authentication = use_authentication or conf.settings['use_auth_http']
|
self._use_authentication = use_authentication or conf.settings['use_auth_http']
|
||||||
self._use_https = use_https or conf.settings['use_https']
|
self._use_https = use_https or conf.settings['use_https']
|
||||||
self.listening_port = None
|
self.listening_port = None
|
||||||
self._component_setup_deferred = None
|
self._component_setup_task = None
|
||||||
self.announced_startup = False
|
self.announced_startup = False
|
||||||
self.sessions = {}
|
self.sessions = {}
|
||||||
self.is_first_run = is_first_run()
|
self.is_first_run = is_first_run()
|
||||||
|
@ -409,6 +400,8 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
|
|
||||||
self.app = web.Application()
|
self.app = web.Application()
|
||||||
self.app.router.add_get('/lbryapi', self.handle_old_jsonrpc)
|
self.app.router.add_get('/lbryapi', self.handle_old_jsonrpc)
|
||||||
|
self.app.router.add_post('/lbryapi', self.handle_old_jsonrpc)
|
||||||
|
self.app.router.add_post('/', self.handle_old_jsonrpc)
|
||||||
self.handler = self.app.make_handler()
|
self.handler = self.app.make_handler()
|
||||||
self.server = None
|
self.server = None
|
||||||
|
|
||||||
|
@ -423,32 +416,28 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
except OSError:
|
except OSError:
|
||||||
log.error('lbrynet API failed to bind TCP %s:%i for listening. Daemon is already running or this port is '
|
log.error('lbrynet API failed to bind TCP %s:%i for listening. Daemon is already running or this port is '
|
||||||
'already in use by another application.', conf.settings['api_host'], conf.settings['api_port'])
|
'already in use by another application.', conf.settings['api_host'], conf.settings['api_port'])
|
||||||
reactor.fireSystemEvent("shutdown")
|
|
||||||
except defer.CancelledError:
|
except defer.CancelledError:
|
||||||
log.info("shutting down before finished starting")
|
log.info("shutting down before finished starting")
|
||||||
reactor.fireSystemEvent("shutdown")
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.analytics_manager.send_server_startup_error(str(err))
|
self.analytics_manager.send_server_startup_error(str(err))
|
||||||
log.exception('Failed to start lbrynet-daemon')
|
log.exception('Failed to start lbrynet-daemon')
|
||||||
reactor.fireSystemEvent("shutdown")
|
|
||||||
|
|
||||||
async def setup(self):
|
async def setup(self):
|
||||||
log.info("Starting lbrynet-daemon")
|
log.info("Starting lbrynet-daemon")
|
||||||
log.info("Platform: %s", json.dumps(system_info.get_platform()))
|
log.info("Platform: %s", json.dumps(system_info.get_platform()))
|
||||||
|
|
||||||
reactor.addSystemEventTrigger('before', 'shutdown', self.shutdown)
|
|
||||||
if not self.analytics_manager.is_started:
|
if not self.analytics_manager.is_started:
|
||||||
self.analytics_manager.start()
|
self.analytics_manager.start()
|
||||||
self.analytics_manager.send_server_startup()
|
self.analytics_manager.send_server_startup()
|
||||||
for lc_name, lc_time in self._looping_call_times.items():
|
for lc_name, lc_time in self._looping_call_times.items():
|
||||||
self.looping_call_manager.start(lc_name, lc_time)
|
self.looping_call_manager.start(lc_name, lc_time)
|
||||||
|
|
||||||
def update_attribute(setup_result, component):
|
def update_attribute(component):
|
||||||
setattr(self, self.component_attributes[component.component_name], component.component)
|
setattr(self, self.component_attributes[component.component_name], component.component)
|
||||||
|
|
||||||
kwargs = {component: update_attribute for component in self.component_attributes.keys()}
|
kwargs = {component: update_attribute for component in self.component_attributes.keys()}
|
||||||
self._component_setup_deferred = self.component_manager.setup(**kwargs)
|
self._component_setup_task = self.component_manager.setup(**kwargs)
|
||||||
await self._component_setup_deferred.asFuture(asyncio.get_event_loop())
|
await self._component_setup_task
|
||||||
|
|
||||||
log.info("Started lbrynet-daemon")
|
log.info("Started lbrynet-daemon")
|
||||||
|
|
||||||
|
@ -463,7 +452,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
signal.signal(signal.SIGTERM, self._already_shutting_down)
|
signal.signal(signal.SIGTERM, self._already_shutting_down)
|
||||||
if self.listening_port:
|
if self.listening_port:
|
||||||
self.listening_port.stopListening()
|
self.listening_port.stopListening()
|
||||||
self.looping_call_manager.shutdown()
|
|
||||||
if self.server is not None:
|
if self.server is not None:
|
||||||
self.server.close()
|
self.server.close()
|
||||||
await self.server.wait_closed()
|
await self.server.wait_closed()
|
||||||
|
@ -473,15 +461,11 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
if self.analytics_manager:
|
if self.analytics_manager:
|
||||||
self.analytics_manager.shutdown()
|
self.analytics_manager.shutdown()
|
||||||
try:
|
try:
|
||||||
self._component_setup_deferred.cancel()
|
self._component_setup_task.cancel()
|
||||||
except (AttributeError, defer.CancelledError):
|
except (AttributeError, asyncio.CancelledError):
|
||||||
pass
|
pass
|
||||||
if self.component_manager is not None:
|
if self.component_manager is not None:
|
||||||
d = self.component_manager.stop()
|
await self.component_manager.stop()
|
||||||
d.addErrback(log.fail(), 'Failure while shutting down')
|
|
||||||
else:
|
|
||||||
d = defer.succeed(None)
|
|
||||||
return d
|
|
||||||
|
|
||||||
async def handle_old_jsonrpc(self, request):
|
async def handle_old_jsonrpc(self, request):
|
||||||
data = await request.json()
|
data = await request.json()
|
||||||
|
@ -518,7 +502,9 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
log.warning(params_error_message)
|
log.warning(params_error_message)
|
||||||
raise web.HTTPBadRequest(text=params_error_message)
|
raise web.HTTPBadRequest(text=params_error_message)
|
||||||
|
|
||||||
result = await fn(self, *_args, **_kwargs)
|
result = fn(self, *_args, **_kwargs)
|
||||||
|
if asyncio.iscoroutine(result):
|
||||||
|
result = await result
|
||||||
|
|
||||||
return web.Response(
|
return web.Response(
|
||||||
text=jsonrpc_dumps_pretty(result, ledger=self.ledger),
|
text=jsonrpc_dumps_pretty(result, ledger=self.ledger),
|
||||||
|
@ -596,7 +582,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
for sd_hash, stream in self.streams.items():
|
for sd_hash, stream in self.streams.items():
|
||||||
stream.cancel(reason="daemon shutdown")
|
stream.cancel(reason="daemon shutdown")
|
||||||
|
|
||||||
def _download_blob(self, blob_hash, rate_manager=None, timeout=None):
|
async def _download_blob(self, blob_hash, rate_manager=None, timeout=None):
|
||||||
"""
|
"""
|
||||||
Download a blob
|
Download a blob
|
||||||
|
|
||||||
|
@ -615,13 +601,12 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
blob_hash, self.blob_manager, self.component_manager.peer_finder, self.rate_limiter,
|
blob_hash, self.blob_manager, self.component_manager.peer_finder, self.rate_limiter,
|
||||||
rate_manager, self.wallet_manager, timeout
|
rate_manager, self.wallet_manager, timeout
|
||||||
)
|
)
|
||||||
return downloader.download()
|
return await d2f(downloader.download())
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _get_stream_analytics_report(self, claim_dict):
|
||||||
def _get_stream_analytics_report(self, claim_dict):
|
|
||||||
sd_hash = claim_dict.source_hash.decode()
|
sd_hash = claim_dict.source_hash.decode()
|
||||||
try:
|
try:
|
||||||
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(sd_hash)
|
stream_hash = await self.storage.get_stream_hash_for_sd_hash(sd_hash)
|
||||||
except Exception:
|
except Exception:
|
||||||
stream_hash = None
|
stream_hash = None
|
||||||
report = {
|
report = {
|
||||||
|
@ -630,12 +615,12 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
}
|
}
|
||||||
blobs = {}
|
blobs = {}
|
||||||
try:
|
try:
|
||||||
sd_host = yield self.blob_manager.get_host_downloaded_from(sd_hash)
|
sd_host = await d2f(self.blob_manager.get_host_downloaded_from(sd_hash))
|
||||||
except Exception:
|
except Exception:
|
||||||
sd_host = None
|
sd_host = None
|
||||||
report["sd_blob"] = sd_host
|
report["sd_blob"] = sd_host
|
||||||
if stream_hash:
|
if stream_hash:
|
||||||
blob_infos = yield self.storage.get_blobs_for_stream(stream_hash)
|
blob_infos = await self.storage.get_blobs_for_stream(stream_hash)
|
||||||
report["known_blobs"] = len(blob_infos)
|
report["known_blobs"] = len(blob_infos)
|
||||||
else:
|
else:
|
||||||
blob_infos = []
|
blob_infos = []
|
||||||
|
@ -648,32 +633,28 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
# if host:
|
# if host:
|
||||||
# blobs[blob_num] = host
|
# blobs[blob_num] = host
|
||||||
# report["blobs"] = json.dumps(blobs)
|
# report["blobs"] = json.dumps(blobs)
|
||||||
defer.returnValue(report)
|
return report
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _download_name(self, name, claim_dict, sd_hash, txid, nout, timeout=None, file_name=None):
|
||||||
def _download_name(self, name, claim_dict, sd_hash, txid, nout, timeout=None, file_name=None):
|
|
||||||
"""
|
"""
|
||||||
Add a lbry file to the file manager, start the download, and return the new lbry file.
|
Add a lbry file to the file manager, start the download, and return the new lbry file.
|
||||||
If it already exists in the file manager, return the existing lbry file
|
If it already exists in the file manager, return the existing lbry file
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _download_finished(download_id, name, claim_dict):
|
||||||
def _download_finished(download_id, name, claim_dict):
|
report = await self._get_stream_analytics_report(claim_dict)
|
||||||
report = yield self._get_stream_analytics_report(claim_dict)
|
|
||||||
self.analytics_manager.send_download_finished(download_id, name, report, claim_dict)
|
self.analytics_manager.send_download_finished(download_id, name, report, claim_dict)
|
||||||
self.analytics_manager.send_new_download_success(download_id, name, claim_dict)
|
self.analytics_manager.send_new_download_success(download_id, name, claim_dict)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _download_failed(error, download_id, name, claim_dict):
|
||||||
def _download_failed(error, download_id, name, claim_dict):
|
report = await self._get_stream_analytics_report(claim_dict)
|
||||||
report = yield self._get_stream_analytics_report(claim_dict)
|
|
||||||
self.analytics_manager.send_download_errored(error, download_id, name, claim_dict,
|
self.analytics_manager.send_download_errored(error, download_id, name, claim_dict,
|
||||||
report)
|
report)
|
||||||
self.analytics_manager.send_new_download_fail(download_id, name, claim_dict, error)
|
self.analytics_manager.send_new_download_fail(download_id, name, claim_dict, error)
|
||||||
|
|
||||||
if sd_hash in self.streams:
|
if sd_hash in self.streams:
|
||||||
downloader = self.streams[sd_hash]
|
downloader = self.streams[sd_hash]
|
||||||
result = yield downloader.finished_deferred
|
return await d2f(downloader.finished_deferred)
|
||||||
defer.returnValue(result)
|
|
||||||
else:
|
else:
|
||||||
download_id = utils.random_string()
|
download_id = utils.random_string()
|
||||||
self.analytics_manager.send_download_started(download_id, name, claim_dict)
|
self.analytics_manager.send_download_started(download_id, name, claim_dict)
|
||||||
|
@ -685,26 +666,26 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
timeout
|
timeout
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
lbry_file, finished_deferred = yield self.streams[sd_hash].start(
|
lbry_file, finished_deferred = await d2f(self.streams[sd_hash].start(
|
||||||
claim_dict, name, txid, nout, file_name
|
claim_dict, name, txid, nout, file_name
|
||||||
)
|
))
|
||||||
finished_deferred.addCallbacks(
|
finished_deferred.addCallbacks(
|
||||||
lambda _: _download_finished(download_id, name, claim_dict),
|
lambda _: _download_finished(download_id, name, claim_dict),
|
||||||
lambda e: _download_failed(e, download_id, name, claim_dict)
|
lambda e: _download_failed(e, download_id, name, claim_dict)
|
||||||
)
|
)
|
||||||
result = yield self._get_lbry_file_dict(lbry_file)
|
result = await self._get_lbry_file_dict(lbry_file)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
yield _download_failed(err, download_id, name, claim_dict)
|
await _download_failed(err, download_id, name, claim_dict)
|
||||||
if isinstance(err, (DownloadDataTimeout, DownloadSDTimeout)):
|
if isinstance(err, (DownloadDataTimeout, DownloadSDTimeout)):
|
||||||
log.warning('Failed to get %s (%s)', name, err)
|
log.warning('Failed to get %s (%s)', name, err)
|
||||||
else:
|
else:
|
||||||
log.error('Failed to get %s (%s)', name, err)
|
log.error('Failed to get %s (%s)', name, err)
|
||||||
if self.streams[sd_hash].downloader and self.streams[sd_hash].code != 'running':
|
if self.streams[sd_hash].downloader and self.streams[sd_hash].code != 'running':
|
||||||
yield self.streams[sd_hash].downloader.stop(err)
|
await d2f(self.streams[sd_hash].downloader.stop(err))
|
||||||
result = {'error': str(err)}
|
result = {'error': str(err)}
|
||||||
finally:
|
finally:
|
||||||
del self.streams[sd_hash]
|
del self.streams[sd_hash]
|
||||||
defer.returnValue(result)
|
return result
|
||||||
|
|
||||||
async def _publish_stream(self, account, name, bid, claim_dict, file_path=None, certificate=None,
|
async def _publish_stream(self, account, name, bid, claim_dict, file_path=None, certificate=None,
|
||||||
claim_address=None, change_address=None):
|
claim_address=None, change_address=None):
|
||||||
|
@ -714,8 +695,8 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
)
|
)
|
||||||
parse_lbry_uri(name)
|
parse_lbry_uri(name)
|
||||||
if not file_path:
|
if not file_path:
|
||||||
stream_hash = await d2f(self.storage.get_stream_hash_for_sd_hash(
|
stream_hash = await self.storage.get_stream_hash_for_sd_hash(
|
||||||
claim_dict['stream']['source']['source']))
|
claim_dict['stream']['source']['source'])
|
||||||
tx = await publisher.publish_stream(name, bid, claim_dict, stream_hash, claim_address)
|
tx = await publisher.publish_stream(name, bid, claim_dict, stream_hash, claim_address)
|
||||||
else:
|
else:
|
||||||
tx = await publisher.create_and_publish_stream(name, bid, claim_dict, file_path, claim_address)
|
tx = await publisher.create_and_publish_stream(name, bid, claim_dict, file_path, claim_address)
|
||||||
|
@ -844,8 +825,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
return self.get_est_cost_using_known_size(uri, size)
|
return self.get_est_cost_using_known_size(uri, size)
|
||||||
return self.get_est_cost_from_uri(uri)
|
return self.get_est_cost_from_uri(uri)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _get_lbry_file_dict(self, lbry_file):
|
||||||
def _get_lbry_file_dict(self, lbry_file):
|
|
||||||
key = hexlify(lbry_file.key) if lbry_file.key else None
|
key = hexlify(lbry_file.key) if lbry_file.key else None
|
||||||
full_path = os.path.join(lbry_file.download_directory, lbry_file.file_name)
|
full_path = os.path.join(lbry_file.download_directory, lbry_file.file_name)
|
||||||
mime_type = guess_mime_type(lbry_file.file_name)
|
mime_type = guess_mime_type(lbry_file.file_name)
|
||||||
|
@ -856,13 +836,13 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
else:
|
else:
|
||||||
written_bytes = 0
|
written_bytes = 0
|
||||||
|
|
||||||
size = yield lbry_file.get_total_bytes()
|
size = await d2f(lbry_file.get_total_bytes())
|
||||||
file_status = yield lbry_file.status()
|
file_status = await d2f(lbry_file.status())
|
||||||
num_completed = file_status.num_completed
|
num_completed = file_status.num_completed
|
||||||
num_known = file_status.num_known
|
num_known = file_status.num_known
|
||||||
status = file_status.running_status
|
status = file_status.running_status
|
||||||
|
|
||||||
result = {
|
return {
|
||||||
'completed': lbry_file.completed,
|
'completed': lbry_file.completed,
|
||||||
'file_name': lbry_file.file_name,
|
'file_name': lbry_file.file_name,
|
||||||
'download_directory': lbry_file.download_directory,
|
'download_directory': lbry_file.download_directory,
|
||||||
|
@ -889,10 +869,8 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
'channel_name': lbry_file.channel_name,
|
'channel_name': lbry_file.channel_name,
|
||||||
'claim_name': lbry_file.claim_name
|
'claim_name': lbry_file.claim_name
|
||||||
}
|
}
|
||||||
defer.returnValue(result)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _get_lbry_file(self, search_by, val, return_json=False):
|
||||||
def _get_lbry_file(self, search_by, val, return_json=False):
|
|
||||||
lbry_file = None
|
lbry_file = None
|
||||||
if search_by in FileID:
|
if search_by in FileID:
|
||||||
for l_f in self.file_manager.lbry_files:
|
for l_f in self.file_manager.lbry_files:
|
||||||
|
@ -902,11 +880,10 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
else:
|
else:
|
||||||
raise NoValidSearch(f'{search_by} is not a valid search operation')
|
raise NoValidSearch(f'{search_by} is not a valid search operation')
|
||||||
if return_json and lbry_file:
|
if return_json and lbry_file:
|
||||||
lbry_file = yield self._get_lbry_file_dict(lbry_file)
|
lbry_file = await self._get_lbry_file_dict(lbry_file)
|
||||||
defer.returnValue(lbry_file)
|
return lbry_file
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _get_lbry_files(self, return_json=False, **kwargs):
|
||||||
def _get_lbry_files(self, return_json=False, **kwargs):
|
|
||||||
lbry_files = list(self.file_manager.lbry_files)
|
lbry_files = list(self.file_manager.lbry_files)
|
||||||
if kwargs:
|
if kwargs:
|
||||||
for search_type, value in iter_lbry_file_search_values(kwargs):
|
for search_type, value in iter_lbry_file_search_values(kwargs):
|
||||||
|
@ -914,11 +891,11 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
if return_json:
|
if return_json:
|
||||||
file_dicts = []
|
file_dicts = []
|
||||||
for lbry_file in lbry_files:
|
for lbry_file in lbry_files:
|
||||||
lbry_file_dict = yield self._get_lbry_file_dict(lbry_file)
|
lbry_file_dict = await self._get_lbry_file_dict(lbry_file)
|
||||||
file_dicts.append(lbry_file_dict)
|
file_dicts.append(lbry_file_dict)
|
||||||
lbry_files = file_dicts
|
lbry_files = file_dicts
|
||||||
log.debug("Collected %i lbry files", len(lbry_files))
|
log.debug("Collected %i lbry files", len(lbry_files))
|
||||||
defer.returnValue(lbry_files)
|
return lbry_files
|
||||||
|
|
||||||
def _sort_lbry_files(self, lbry_files, sort_by):
|
def _sort_lbry_files(self, lbry_files, sort_by):
|
||||||
for field, direction in sort_by:
|
for field, direction in sort_by:
|
||||||
|
@ -946,18 +923,15 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
downloader.setup(self.wallet_manager)
|
downloader.setup(self.wallet_manager)
|
||||||
return downloader
|
return downloader
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def _blob_availability(self, blob_hash, search_timeout, blob_timeout, downloader=None):
|
||||||
def _blob_availability(self, blob_hash, search_timeout, blob_timeout, downloader=None):
|
|
||||||
if not downloader:
|
if not downloader:
|
||||||
downloader = self._get_single_peer_downloader()
|
downloader = self._get_single_peer_downloader()
|
||||||
result = {}
|
|
||||||
search_timeout = search_timeout or conf.settings['peer_search_timeout']
|
search_timeout = search_timeout or conf.settings['peer_search_timeout']
|
||||||
blob_timeout = blob_timeout or conf.settings['sd_download_timeout']
|
blob_timeout = blob_timeout or conf.settings['sd_download_timeout']
|
||||||
is_available = False
|
|
||||||
reachable_peers = []
|
reachable_peers = []
|
||||||
unreachable_peers = []
|
unreachable_peers = []
|
||||||
try:
|
try:
|
||||||
peers = yield self.jsonrpc_peer_list(blob_hash, search_timeout)
|
peers = await d2f(self.jsonrpc_peer_list(blob_hash, search_timeout))
|
||||||
peer_infos = [{"peer": Peer(x['host'], x['port']),
|
peer_infos = [{"peer": Peer(x['host'], x['port']),
|
||||||
"blob_hash": blob_hash,
|
"blob_hash": blob_hash,
|
||||||
"timeout": blob_timeout} for x in peers]
|
"timeout": blob_timeout} for x in peers]
|
||||||
|
@ -968,8 +942,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
d = downloader.download_temp_blob_from_peer(**peer_info)
|
d = downloader.download_temp_blob_from_peer(**peer_info)
|
||||||
dl.append(d)
|
dl.append(d)
|
||||||
dl_peers.append("%s:%i" % (peer_info['peer'].host, peer_info['peer'].port))
|
dl_peers.append("%s:%i" % (peer_info['peer'].host, peer_info['peer'].port))
|
||||||
for dl_peer, (success, download_result) in zip(dl_peers,
|
for dl_peer, (success, download_result) in zip(dl_peers, (await d2f(defer.DeferredList(dl)))):
|
||||||
(yield defer.DeferredList(dl))):
|
|
||||||
if success:
|
if success:
|
||||||
if download_result:
|
if download_result:
|
||||||
reachable_peers.append(dl_peer)
|
reachable_peers.append(dl_peer)
|
||||||
|
@ -978,14 +951,13 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
dl_results.append(download_result)
|
dl_results.append(download_result)
|
||||||
is_available = any(dl_results)
|
is_available = any(dl_results)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
result['error'] = "Failed to get peers for blob: %s" % err
|
return {'error': "Failed to get peers for blob: %s" % err}
|
||||||
|
|
||||||
response = {
|
return {
|
||||||
'is_available': is_available,
|
'is_available': is_available,
|
||||||
'reachable_peers': reachable_peers,
|
'reachable_peers': reachable_peers,
|
||||||
'unreachable_peers': unreachable_peers,
|
'unreachable_peers': unreachable_peers,
|
||||||
}
|
}
|
||||||
defer.returnValue(response)
|
|
||||||
|
|
||||||
############################################################################
|
############################################################################
|
||||||
# #
|
# #
|
||||||
|
@ -1011,11 +983,9 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
(string) Shutdown message
|
(string) Shutdown message
|
||||||
"""
|
"""
|
||||||
log.info("Shutting down lbrynet daemon")
|
log.info("Shutting down lbrynet daemon")
|
||||||
reactor.callLater(0.1, reactor.fireSystemEvent, "shutdown")
|
|
||||||
return "Shutting down"
|
return "Shutting down"
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_status(self):
|
||||||
def jsonrpc_status(self):
|
|
||||||
"""
|
"""
|
||||||
Get daemon status
|
Get daemon status
|
||||||
|
|
||||||
|
@ -1101,10 +1071,10 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for component in self.component_manager.components:
|
for component in self.component_manager.components:
|
||||||
status = yield defer.maybeDeferred(component.get_status)
|
status = await d2f(defer.maybeDeferred(component.get_status))
|
||||||
if status:
|
if status:
|
||||||
response[component.component_name] = status
|
response[component.component_name] = status
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
def jsonrpc_version(self):
|
def jsonrpc_version(self):
|
||||||
"""
|
"""
|
||||||
|
@ -1131,10 +1101,9 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
'python_version': (str) python version,
|
'python_version': (str) python version,
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
platform_info = system_info.get_platform()
|
platform_info = system_info.get_platform()
|
||||||
log.info("Get version info: " + json.dumps(platform_info))
|
log.info("Get version info: " + json.dumps(platform_info))
|
||||||
return self._render_response(platform_info)
|
return platform_info
|
||||||
|
|
||||||
def jsonrpc_report_bug(self, message=None):
|
def jsonrpc_report_bug(self, message=None):
|
||||||
"""
|
"""
|
||||||
|
@ -1157,7 +1126,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
platform_name,
|
platform_name,
|
||||||
__version__
|
__version__
|
||||||
)
|
)
|
||||||
return self._render_response(True)
|
return True
|
||||||
|
|
||||||
def jsonrpc_settings_get(self):
|
def jsonrpc_settings_get(self):
|
||||||
"""
|
"""
|
||||||
|
@ -1173,7 +1142,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
(dict) Dictionary of daemon settings
|
(dict) Dictionary of daemon settings
|
||||||
See ADJUSTABLE_SETTINGS in lbrynet/conf.py for full list of settings
|
See ADJUSTABLE_SETTINGS in lbrynet/conf.py for full list of settings
|
||||||
"""
|
"""
|
||||||
return self._render_response(conf.settings.get_adjustable_settings_dict())
|
return conf.settings.get_adjustable_settings_dict()
|
||||||
|
|
||||||
def jsonrpc_settings_set(self, **kwargs):
|
def jsonrpc_settings_set(self, **kwargs):
|
||||||
"""
|
"""
|
||||||
|
@ -1260,7 +1229,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
conf.settings.update({key: converted},
|
conf.settings.update({key: converted},
|
||||||
data_types=(conf.TYPE_RUNTIME, conf.TYPE_PERSISTED))
|
data_types=(conf.TYPE_RUNTIME, conf.TYPE_PERSISTED))
|
||||||
conf.settings.save_conf_file_settings()
|
conf.settings.save_conf_file_settings()
|
||||||
return self._render_response(conf.settings.get_adjustable_settings_dict())
|
return conf.settings.get_adjustable_settings_dict()
|
||||||
|
|
||||||
def jsonrpc_help(self, command=None):
|
def jsonrpc_help(self, command=None):
|
||||||
"""
|
"""
|
||||||
|
@ -1277,13 +1246,13 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if command is None:
|
if command is None:
|
||||||
return self._render_response({
|
return {
|
||||||
'about': 'This is the LBRY JSON-RPC API',
|
'about': 'This is the LBRY JSON-RPC API',
|
||||||
'command_help': 'Pass a `command` parameter to this method to see ' +
|
'command_help': 'Pass a `command` parameter to this method to see ' +
|
||||||
'help for that command (e.g. `help command=resolve_name`)',
|
'help for that command (e.g. `help command=resolve_name`)',
|
||||||
'command_list': 'Get a full list of commands using the `commands` method',
|
'command_list': 'Get a full list of commands using the `commands` method',
|
||||||
'more_info': 'Visit https://lbry.io/api for more info',
|
'more_info': 'Visit https://lbry.io/api for more info',
|
||||||
})
|
}
|
||||||
|
|
||||||
fn = self.callable_methods.get(command)
|
fn = self.callable_methods.get(command)
|
||||||
if fn is None:
|
if fn is None:
|
||||||
|
@ -1291,9 +1260,9 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
f"No help available for '{command}'. It is not a valid command."
|
f"No help available for '{command}'. It is not a valid command."
|
||||||
)
|
)
|
||||||
|
|
||||||
return self._render_response({
|
return {
|
||||||
'help': textwrap.dedent(fn.__doc__ or '')
|
'help': textwrap.dedent(fn.__doc__ or '')
|
||||||
})
|
}
|
||||||
|
|
||||||
def jsonrpc_commands(self):
|
def jsonrpc_commands(self):
|
||||||
"""
|
"""
|
||||||
|
@ -1308,7 +1277,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
Returns:
|
Returns:
|
||||||
(list) list of available commands
|
(list) list of available commands
|
||||||
"""
|
"""
|
||||||
return self._render_response(sorted([command for command in self.callable_methods.keys()]))
|
return sorted([command for command in self.callable_methods.keys()])
|
||||||
|
|
||||||
@deprecated("account_balance")
|
@deprecated("account_balance")
|
||||||
def jsonrpc_wallet_balance(self, address=None):
|
def jsonrpc_wallet_balance(self, address=None):
|
||||||
|
@ -1882,8 +1851,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
return self.get_account_or_default(account_id).receiving.get_or_create_usable_address()
|
return self.get_account_or_default(account_id).receiving.get_or_create_usable_address()
|
||||||
|
|
||||||
@requires(FILE_MANAGER_COMPONENT)
|
@requires(FILE_MANAGER_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_file_list(self, sort=None, **kwargs):
|
||||||
def jsonrpc_file_list(self, sort=None, **kwargs):
|
|
||||||
"""
|
"""
|
||||||
List files limited by optional filters
|
List files limited by optional filters
|
||||||
|
|
||||||
|
@ -1943,13 +1911,11 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
"""
|
"""
|
||||||
|
result = await self._get_lbry_files(return_json=True, **kwargs)
|
||||||
result = yield self._get_lbry_files(return_json=True, **kwargs)
|
|
||||||
if sort:
|
if sort:
|
||||||
sort_by = [self._parse_lbry_files_sort(s) for s in sort]
|
sort_by = [self._parse_lbry_files_sort(s) for s in sort]
|
||||||
result = self._sort_lbry_files(result, sort_by)
|
result = self._sort_lbry_files(result, sort_by)
|
||||||
response = yield self._render_response(result)
|
return result
|
||||||
defer.returnValue(response)
|
|
||||||
|
|
||||||
@requires(WALLET_COMPONENT)
|
@requires(WALLET_COMPONENT)
|
||||||
async def jsonrpc_resolve_name(self, name, force=False):
|
async def jsonrpc_resolve_name(self, name, force=False):
|
||||||
|
@ -2179,7 +2145,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
log.info("Already waiting on lbry://%s to start downloading", name)
|
log.info("Already waiting on lbry://%s to start downloading", name)
|
||||||
await d2f(self.streams[sd_hash].data_downloading_deferred)
|
await d2f(self.streams[sd_hash].data_downloading_deferred)
|
||||||
|
|
||||||
lbry_file = await d2f(self._get_lbry_file(FileID.SD_HASH, sd_hash, return_json=False))
|
lbry_file = await self._get_lbry_file(FileID.SD_HASH, sd_hash, return_json=False)
|
||||||
|
|
||||||
if lbry_file:
|
if lbry_file:
|
||||||
if not os.path.isfile(os.path.join(lbry_file.download_directory, lbry_file.file_name)):
|
if not os.path.isfile(os.path.join(lbry_file.download_directory, lbry_file.file_name)):
|
||||||
|
@ -2188,15 +2154,14 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
await d2f(lbry_file.start())
|
await d2f(lbry_file.start())
|
||||||
else:
|
else:
|
||||||
log.info('Already have a file for %s', name)
|
log.info('Already have a file for %s', name)
|
||||||
result = await d2f(self._get_lbry_file_dict(lbry_file))
|
result = await self._get_lbry_file_dict(lbry_file)
|
||||||
else:
|
else:
|
||||||
result = await d2f(self._download_name(name, claim_dict, sd_hash, txid, nout,
|
result = await self._download_name(name, claim_dict, sd_hash, txid, nout,
|
||||||
timeout=timeout, file_name=file_name))
|
timeout=timeout, file_name=file_name)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@requires(FILE_MANAGER_COMPONENT)
|
@requires(FILE_MANAGER_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_file_set_status(self, status, **kwargs):
|
||||||
def jsonrpc_file_set_status(self, status, **kwargs):
|
|
||||||
"""
|
"""
|
||||||
Start or stop downloading a file
|
Start or stop downloading a file
|
||||||
|
|
||||||
|
@ -2220,24 +2185,22 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
raise Exception('Status must be "start" or "stop".')
|
raise Exception('Status must be "start" or "stop".')
|
||||||
|
|
||||||
search_type, value = get_lbry_file_search_value(kwargs)
|
search_type, value = get_lbry_file_search_value(kwargs)
|
||||||
lbry_file = yield self._get_lbry_file(search_type, value, return_json=False)
|
lbry_file = await self._get_lbry_file(search_type, value, return_json=False)
|
||||||
if not lbry_file:
|
if not lbry_file:
|
||||||
raise Exception(f'Unable to find a file for {search_type}:{value}')
|
raise Exception(f'Unable to find a file for {search_type}:{value}')
|
||||||
|
|
||||||
if status == 'start' and lbry_file.stopped or status == 'stop' and not lbry_file.stopped:
|
if status == 'start' and lbry_file.stopped or status == 'stop' and not lbry_file.stopped:
|
||||||
yield self.file_manager.toggle_lbry_file_running(lbry_file)
|
await d2f(self.file_manager.toggle_lbry_file_running(lbry_file))
|
||||||
msg = "Started downloading file" if status == 'start' else "Stopped downloading file"
|
msg = "Started downloading file" if status == 'start' else "Stopped downloading file"
|
||||||
else:
|
else:
|
||||||
msg = (
|
msg = (
|
||||||
"File was already being downloaded" if status == 'start'
|
"File was already being downloaded" if status == 'start'
|
||||||
else "File was already stopped"
|
else "File was already stopped"
|
||||||
)
|
)
|
||||||
response = yield self._render_response(msg)
|
return msg
|
||||||
defer.returnValue(response)
|
|
||||||
|
|
||||||
@requires(FILE_MANAGER_COMPONENT)
|
@requires(FILE_MANAGER_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_file_delete(self, delete_from_download_dir=False, delete_all=False, **kwargs):
|
||||||
def jsonrpc_file_delete(self, delete_from_download_dir=False, delete_all=False, **kwargs):
|
|
||||||
"""
|
"""
|
||||||
Delete a LBRY file
|
Delete a LBRY file
|
||||||
|
|
||||||
|
@ -2268,33 +2231,28 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
(bool) true if deletion was successful
|
(bool) true if deletion was successful
|
||||||
"""
|
"""
|
||||||
|
|
||||||
lbry_files = yield self._get_lbry_files(return_json=False, **kwargs)
|
lbry_files = await self._get_lbry_files(return_json=False, **kwargs)
|
||||||
|
|
||||||
if len(lbry_files) > 1:
|
if len(lbry_files) > 1:
|
||||||
if not delete_all:
|
if not delete_all:
|
||||||
log.warning("There are %i files to delete, use narrower filters to select one",
|
log.warning("There are %i files to delete, use narrower filters to select one",
|
||||||
len(lbry_files))
|
len(lbry_files))
|
||||||
response = yield self._render_response(False)
|
return False
|
||||||
defer.returnValue(response)
|
|
||||||
else:
|
else:
|
||||||
log.warning("Deleting %i files",
|
log.warning("Deleting %i files",
|
||||||
len(lbry_files))
|
len(lbry_files))
|
||||||
|
|
||||||
if not lbry_files:
|
if not lbry_files:
|
||||||
log.warning("There is no file to delete")
|
log.warning("There is no file to delete")
|
||||||
result = False
|
return False
|
||||||
else:
|
else:
|
||||||
for lbry_file in lbry_files:
|
for lbry_file in lbry_files:
|
||||||
file_name, stream_hash = lbry_file.file_name, lbry_file.stream_hash
|
file_name, stream_hash = lbry_file.file_name, lbry_file.stream_hash
|
||||||
if lbry_file.sd_hash in self.streams:
|
if lbry_file.sd_hash in self.streams:
|
||||||
del self.streams[lbry_file.sd_hash]
|
del self.streams[lbry_file.sd_hash]
|
||||||
yield self.file_manager.delete_lbry_file(lbry_file,
|
await d2f(self.file_manager.delete_lbry_file(lbry_file, delete_file=delete_from_download_dir))
|
||||||
delete_file=delete_from_download_dir)
|
|
||||||
log.info("Deleted file: %s", file_name)
|
log.info("Deleted file: %s", file_name)
|
||||||
result = True
|
return True
|
||||||
|
|
||||||
response = yield self._render_response(result)
|
|
||||||
defer.returnValue(response)
|
|
||||||
|
|
||||||
@requires(WALLET_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, BLOB_COMPONENT,
|
@requires(WALLET_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, BLOB_COMPONENT,
|
||||||
DHT_COMPONENT, RATE_LIMITER_COMPONENT, PAYMENT_RATE_COMPONENT, DATABASE_COMPONENT,
|
DHT_COMPONENT, RATE_LIMITER_COMPONENT, PAYMENT_RATE_COMPONENT, DATABASE_COMPONENT,
|
||||||
|
@ -2397,8 +2355,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
)
|
)
|
||||||
|
|
||||||
@requires(WALLET_COMPONENT)
|
@requires(WALLET_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_channel_export(self, claim_id):
|
||||||
def jsonrpc_channel_export(self, claim_id):
|
|
||||||
"""
|
"""
|
||||||
Export serialized channel signing information for a given certificate claim id
|
Export serialized channel signing information for a given certificate claim id
|
||||||
|
|
||||||
|
@ -2412,12 +2369,10 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
(str) Serialized certificate information
|
(str) Serialized certificate information
|
||||||
"""
|
"""
|
||||||
|
|
||||||
result = yield self.wallet_manager.export_certificate_info(claim_id)
|
return await self.wallet_manager.export_certificate_info(claim_id)
|
||||||
defer.returnValue(result)
|
|
||||||
|
|
||||||
@requires(WALLET_COMPONENT)
|
@requires(WALLET_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_channel_import(self, serialized_certificate_info):
|
||||||
def jsonrpc_channel_import(self, serialized_certificate_info):
|
|
||||||
"""
|
"""
|
||||||
Import serialized channel signing information (to allow signing new claims to the channel)
|
Import serialized channel signing information (to allow signing new claims to the channel)
|
||||||
|
|
||||||
|
@ -2431,8 +2386,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
(dict) Result dictionary
|
(dict) Result dictionary
|
||||||
"""
|
"""
|
||||||
|
|
||||||
result = yield self.wallet_manager.import_certificate_info(serialized_certificate_info)
|
return await self.wallet_manager.import_certificate_info(serialized_certificate_info)
|
||||||
defer.returnValue(result)
|
|
||||||
|
|
||||||
@requires(WALLET_COMPONENT, FILE_MANAGER_COMPONENT, BLOB_COMPONENT, PAYMENT_RATE_COMPONENT, DATABASE_COMPONENT,
|
@requires(WALLET_COMPONENT, FILE_MANAGER_COMPONENT, BLOB_COMPONENT, PAYMENT_RATE_COMPONENT, DATABASE_COMPONENT,
|
||||||
conditions=[WALLET_IS_UNLOCKED])
|
conditions=[WALLET_IS_UNLOCKED])
|
||||||
|
@ -3120,8 +3074,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
|
|
||||||
@requires(WALLET_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT, RATE_LIMITER_COMPONENT, PAYMENT_RATE_COMPONENT,
|
@requires(WALLET_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT, RATE_LIMITER_COMPONENT, PAYMENT_RATE_COMPONENT,
|
||||||
conditions=[WALLET_IS_UNLOCKED])
|
conditions=[WALLET_IS_UNLOCKED])
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_blob_get(self, blob_hash, timeout=None, encoding=None, payment_rate_manager=None):
|
||||||
def jsonrpc_blob_get(self, blob_hash, timeout=None, encoding=None, payment_rate_manager=None):
|
|
||||||
"""
|
"""
|
||||||
Download and return a blob
|
Download and return a blob
|
||||||
|
|
||||||
|
@ -3150,7 +3103,9 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
}
|
}
|
||||||
|
|
||||||
timeout = timeout or 30
|
timeout = timeout or 30
|
||||||
blob = yield self._download_blob(blob_hash, rate_manager=self.payment_rate_manager, timeout=timeout)
|
blob = await self._download_blob(
|
||||||
|
blob_hash, rate_manager=self.payment_rate_manager, timeout=timeout
|
||||||
|
)
|
||||||
if encoding and encoding in decoders:
|
if encoding and encoding in decoders:
|
||||||
blob_file = blob.open_for_reading()
|
blob_file = blob.open_for_reading()
|
||||||
result = decoders[encoding](blob_file.read())
|
result = decoders[encoding](blob_file.read())
|
||||||
|
@ -3161,8 +3116,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@requires(BLOB_COMPONENT, DATABASE_COMPONENT)
|
@requires(BLOB_COMPONENT, DATABASE_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_blob_delete(self, blob_hash):
|
||||||
def jsonrpc_blob_delete(self, blob_hash):
|
|
||||||
"""
|
"""
|
||||||
Delete a blob
|
Delete a blob
|
||||||
|
|
||||||
|
@ -3179,16 +3133,15 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
if blob_hash not in self.blob_manager.blobs:
|
if blob_hash not in self.blob_manager.blobs:
|
||||||
return "Don't have that blob"
|
return "Don't have that blob"
|
||||||
try:
|
try:
|
||||||
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(blob_hash)
|
stream_hash = await self.storage.get_stream_hash_for_sd_hash(blob_hash)
|
||||||
yield self.storage.delete_stream(stream_hash)
|
await self.storage.delete_stream(stream_hash)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
pass
|
pass
|
||||||
yield self.blob_manager.delete_blobs([blob_hash])
|
await d2f(self.blob_manager.delete_blobs([blob_hash]))
|
||||||
return "Deleted %s" % blob_hash
|
return "Deleted %s" % blob_hash
|
||||||
|
|
||||||
@requires(DHT_COMPONENT)
|
@requires(DHT_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_peer_list(self, blob_hash, timeout=None):
|
||||||
def jsonrpc_peer_list(self, blob_hash, timeout=None):
|
|
||||||
"""
|
"""
|
||||||
Get peers for blob hash
|
Get peers for blob hash
|
||||||
|
|
||||||
|
@ -3214,7 +3167,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
|
|
||||||
finished_deferred.addTimeout(timeout or conf.settings['peer_search_timeout'], self.dht_node.clock)
|
finished_deferred.addTimeout(timeout or conf.settings['peer_search_timeout'], self.dht_node.clock)
|
||||||
finished_deferred.addErrback(trap_timeout)
|
finished_deferred.addErrback(trap_timeout)
|
||||||
peers = yield finished_deferred
|
peers = await d2f(finished_deferred)
|
||||||
results = [
|
results = [
|
||||||
{
|
{
|
||||||
"node_id": hexlify(node_id).decode(),
|
"node_id": hexlify(node_id).decode(),
|
||||||
|
@ -3226,8 +3179,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
return results
|
return results
|
||||||
|
|
||||||
@requires(DATABASE_COMPONENT)
|
@requires(DATABASE_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_blob_announce(self, blob_hash=None, stream_hash=None, sd_hash=None):
|
||||||
def jsonrpc_blob_announce(self, blob_hash=None, stream_hash=None, sd_hash=None):
|
|
||||||
"""
|
"""
|
||||||
Announce blobs to the DHT
|
Announce blobs to the DHT
|
||||||
|
|
||||||
|
@ -3245,7 +3197,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
Returns:
|
Returns:
|
||||||
(bool) true if successful
|
(bool) true if successful
|
||||||
"""
|
"""
|
||||||
|
|
||||||
blob_hashes = []
|
blob_hashes = []
|
||||||
if blob_hash:
|
if blob_hash:
|
||||||
blob_hashes.append(blob_hash)
|
blob_hashes.append(blob_hash)
|
||||||
|
@ -3253,17 +3204,16 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
if sd_hash and stream_hash:
|
if sd_hash and stream_hash:
|
||||||
raise Exception("either the sd hash or the stream hash should be provided, not both")
|
raise Exception("either the sd hash or the stream hash should be provided, not both")
|
||||||
if sd_hash:
|
if sd_hash:
|
||||||
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(sd_hash)
|
stream_hash = await self.storage.get_stream_hash_for_sd_hash(sd_hash)
|
||||||
blobs = yield self.storage.get_blobs_for_stream(stream_hash, only_completed=True)
|
blobs = await self.storage.get_blobs_for_stream(stream_hash, only_completed=True)
|
||||||
blob_hashes.extend(blob.blob_hash for blob in blobs if blob.blob_hash is not None)
|
blob_hashes.extend(blob.blob_hash for blob in blobs if blob.blob_hash is not None)
|
||||||
else:
|
else:
|
||||||
raise Exception('single argument must be specified')
|
raise Exception('single argument must be specified')
|
||||||
yield self.storage.should_single_announce_blobs(blob_hashes, immediate=True)
|
await self.storage.should_single_announce_blobs(blob_hashes, immediate=True)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@requires(FILE_MANAGER_COMPONENT)
|
@requires(FILE_MANAGER_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_file_reflect(self, **kwargs):
|
||||||
def jsonrpc_file_reflect(self, **kwargs):
|
|
||||||
"""
|
"""
|
||||||
Reflect all the blobs in a file matching the filter criteria
|
Reflect all the blobs in a file matching the filter criteria
|
||||||
|
|
||||||
|
@ -3284,22 +3234,17 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
Returns:
|
Returns:
|
||||||
(list) list of blobs reflected
|
(list) list of blobs reflected
|
||||||
"""
|
"""
|
||||||
|
lbry_files = await self._get_lbry_files(**kwargs)
|
||||||
reflector_server = kwargs.get('reflector', None)
|
|
||||||
lbry_files = yield self._get_lbry_files(**kwargs)
|
|
||||||
|
|
||||||
if len(lbry_files) > 1:
|
if len(lbry_files) > 1:
|
||||||
raise Exception('Too many (%i) files found, need one' % len(lbry_files))
|
raise Exception('Too many (%i) files found, need one' % len(lbry_files))
|
||||||
elif not lbry_files:
|
elif not lbry_files:
|
||||||
raise Exception('No file found')
|
raise Exception('No file found')
|
||||||
lbry_file = lbry_files[0]
|
return await d2f(reupload.reflect_file(
|
||||||
|
lbry_files[0], reflector_server=kwargs.get('reflector', None)
|
||||||
results = yield reupload.reflect_file(lbry_file, reflector_server=reflector_server)
|
))
|
||||||
return results
|
|
||||||
|
|
||||||
@requires(BLOB_COMPONENT, WALLET_COMPONENT)
|
@requires(BLOB_COMPONENT, WALLET_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_blob_list(self, uri=None, stream_hash=None, sd_hash=None, needed=None,
|
||||||
def jsonrpc_blob_list(self, uri=None, stream_hash=None, sd_hash=None, needed=None,
|
|
||||||
finished=None, page_size=None, page=None):
|
finished=None, page_size=None, page=None):
|
||||||
"""
|
"""
|
||||||
Returns blob hashes. If not given filters, returns all blobs known by the blob manager
|
Returns blob hashes. If not given filters, returns all blobs known by the blob manager
|
||||||
|
@ -3325,20 +3270,20 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
"""
|
"""
|
||||||
if uri or stream_hash or sd_hash:
|
if uri or stream_hash or sd_hash:
|
||||||
if uri:
|
if uri:
|
||||||
metadata = (yield f2d(self.wallet_manager.resolve(uri)))[uri]
|
metadata = (await self.wallet_manager.resolve(uri))[uri]
|
||||||
sd_hash = utils.get_sd_hash(metadata)
|
sd_hash = utils.get_sd_hash(metadata)
|
||||||
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(sd_hash)
|
stream_hash = await self.storage.get_stream_hash_for_sd_hash(sd_hash)
|
||||||
elif stream_hash:
|
elif stream_hash:
|
||||||
sd_hash = yield self.storage.get_sd_blob_hash_for_stream(stream_hash)
|
sd_hash = await self.storage.get_sd_blob_hash_for_stream(stream_hash)
|
||||||
elif sd_hash:
|
elif sd_hash:
|
||||||
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(sd_hash)
|
stream_hash = await self.storage.get_stream_hash_for_sd_hash(sd_hash)
|
||||||
sd_hash = yield self.storage.get_sd_blob_hash_for_stream(stream_hash)
|
sd_hash = await self.storage.get_sd_blob_hash_for_stream(stream_hash)
|
||||||
if stream_hash:
|
if stream_hash:
|
||||||
crypt_blobs = yield self.storage.get_blobs_for_stream(stream_hash)
|
crypt_blobs = await self.storage.get_blobs_for_stream(stream_hash)
|
||||||
blobs = yield defer.gatherResults([
|
blobs = await d2f(defer.gatherResults([
|
||||||
self.blob_manager.get_blob(crypt_blob.blob_hash, crypt_blob.length)
|
self.blob_manager.get_blob(crypt_blob.blob_hash, crypt_blob.length)
|
||||||
for crypt_blob in crypt_blobs if crypt_blob.blob_hash is not None
|
for crypt_blob in crypt_blobs if crypt_blob.blob_hash is not None
|
||||||
])
|
]))
|
||||||
else:
|
else:
|
||||||
blobs = []
|
blobs = []
|
||||||
# get_blobs_for_stream does not include the sd blob, so we'll add it manually
|
# get_blobs_for_stream does not include the sd blob, so we'll add it manually
|
||||||
|
@ -3373,13 +3318,10 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
Returns:
|
Returns:
|
||||||
(list) reflected blob hashes
|
(list) reflected blob hashes
|
||||||
"""
|
"""
|
||||||
|
return d2f(reupload.reflect_blob_hashes(blob_hashes, self.blob_manager, reflector_server))
|
||||||
d = reupload.reflect_blob_hashes(blob_hashes, self.blob_manager, reflector_server)
|
|
||||||
d.addCallback(lambda r: self._render_response(r))
|
|
||||||
return d
|
|
||||||
|
|
||||||
@requires(BLOB_COMPONENT)
|
@requires(BLOB_COMPONENT)
|
||||||
def jsonrpc_blob_reflect_all(self):
|
async def jsonrpc_blob_reflect_all(self):
|
||||||
"""
|
"""
|
||||||
Reflects all saved blobs
|
Reflects all saved blobs
|
||||||
|
|
||||||
|
@ -3392,15 +3334,11 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
Returns:
|
Returns:
|
||||||
(bool) true if successful
|
(bool) true if successful
|
||||||
"""
|
"""
|
||||||
|
blob_hashes = await d2f(self.blob_manager.get_all_verified_blobs())
|
||||||
d = self.blob_manager.get_all_verified_blobs()
|
return await d2f(reupload.reflect_blob_hashes(blob_hashes, self.blob_manager))
|
||||||
d.addCallback(reupload.reflect_blob_hashes, self.blob_manager)
|
|
||||||
d.addCallback(lambda r: self._render_response(r))
|
|
||||||
return d
|
|
||||||
|
|
||||||
@requires(DHT_COMPONENT)
|
@requires(DHT_COMPONENT)
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_peer_ping(self, node_id, address=None, port=None):
|
||||||
def jsonrpc_peer_ping(self, node_id, address=None, port=None):
|
|
||||||
"""
|
"""
|
||||||
Send a kademlia ping to the specified peer. If address and port are provided the peer is directly pinged,
|
Send a kademlia ping to the specified peer. If address and port are provided the peer is directly pinged,
|
||||||
if not provided the peer is located first.
|
if not provided the peer is located first.
|
||||||
|
@ -3416,7 +3354,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
Returns:
|
Returns:
|
||||||
(str) pong, or {'error': <error message>} if an error is encountered
|
(str) pong, or {'error': <error message>} if an error is encountered
|
||||||
"""
|
"""
|
||||||
|
|
||||||
contact = None
|
contact = None
|
||||||
if node_id and address and port:
|
if node_id and address and port:
|
||||||
contact = self.dht_node.contact_manager.get_contact(unhexlify(node_id), address, int(port))
|
contact = self.dht_node.contact_manager.get_contact(unhexlify(node_id), address, int(port))
|
||||||
|
@ -3426,16 +3363,15 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
)
|
)
|
||||||
if not contact:
|
if not contact:
|
||||||
try:
|
try:
|
||||||
contact = yield self.dht_node.findContact(unhexlify(node_id))
|
contact = await d2f(self.dht_node.findContact(unhexlify(node_id)))
|
||||||
except TimeoutError:
|
except TimeoutError:
|
||||||
return {'error': 'timeout finding peer'}
|
return {'error': 'timeout finding peer'}
|
||||||
if not contact:
|
if not contact:
|
||||||
return {'error': 'peer not found'}
|
return {'error': 'peer not found'}
|
||||||
try:
|
try:
|
||||||
result = (yield contact.ping()).decode()
|
return (await d2f(contact.ping())).decode()
|
||||||
except TimeoutError:
|
except TimeoutError:
|
||||||
result = {'error': 'ping timeout'}
|
return {'error': 'ping timeout'}
|
||||||
return result
|
|
||||||
|
|
||||||
@requires(DHT_COMPONENT)
|
@requires(DHT_COMPONENT)
|
||||||
def jsonrpc_routing_table_get(self):
|
def jsonrpc_routing_table_get(self):
|
||||||
|
@ -3495,7 +3431,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
result['contacts'] = list(contact_set)
|
result['contacts'] = list(contact_set)
|
||||||
result['blob_hashes'] = list(blob_hashes)
|
result['blob_hashes'] = list(blob_hashes)
|
||||||
result['node_id'] = hexlify(self.dht_node.node_id).decode()
|
result['node_id'] = hexlify(self.dht_node.node_id).decode()
|
||||||
return self._render_response(result)
|
return result
|
||||||
|
|
||||||
# the single peer downloader needs wallet access
|
# the single peer downloader needs wallet access
|
||||||
@requires(DHT_COMPONENT, WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
|
@requires(DHT_COMPONENT, WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
|
||||||
|
@ -3520,12 +3456,10 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
"unreachable_peers": ["<ip>:<port>"]
|
"unreachable_peers": ["<ip>:<port>"]
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return self._blob_availability(blob_hash, search_timeout, blob_timeout)
|
return self._blob_availability(blob_hash, search_timeout, blob_timeout)
|
||||||
|
|
||||||
@requires(UPNP_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
|
@requires(UPNP_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
|
||||||
@defer.inlineCallbacks
|
async def jsonrpc_stream_availability(self, uri, search_timeout=None, blob_timeout=None):
|
||||||
def jsonrpc_stream_availability(self, uri, search_timeout=None, blob_timeout=None):
|
|
||||||
"""
|
"""
|
||||||
Get stream availability for lbry uri
|
Get stream availability for lbry uri
|
||||||
|
|
||||||
|
@ -3576,26 +3510,26 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resolved_result = (yield self.wallet_manager.resolve(uri))[uri]
|
resolved_result = (await self.wallet_manager.resolve(uri))[uri]
|
||||||
response['did_resolve'] = True
|
response['did_resolve'] = True
|
||||||
except UnknownNameError:
|
except UnknownNameError:
|
||||||
response['error'] = "Failed to resolve name"
|
response['error'] = "Failed to resolve name"
|
||||||
defer.returnValue(response)
|
return response
|
||||||
except URIParseError:
|
except URIParseError:
|
||||||
response['error'] = "Invalid URI"
|
response['error'] = "Invalid URI"
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
try:
|
try:
|
||||||
claim_obj = smart_decode(resolved_result[uri]['claim']['hex'])
|
claim_obj = smart_decode(resolved_result[uri]['claim']['hex'])
|
||||||
response['did_decode'] = True
|
response['did_decode'] = True
|
||||||
except DecodeError:
|
except DecodeError:
|
||||||
response['error'] = "Failed to decode claim value"
|
response['error'] = "Failed to decode claim value"
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
response['is_stream'] = claim_obj.is_stream
|
response['is_stream'] = claim_obj.is_stream
|
||||||
if not claim_obj.is_stream:
|
if not claim_obj.is_stream:
|
||||||
response['error'] = "Claim for \"%s\" does not contain a stream" % uri
|
response['error'] = "Claim for \"%s\" does not contain a stream" % uri
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
sd_hash = claim_obj.source_hash
|
sd_hash = claim_obj.source_hash
|
||||||
response['sd_hash'] = sd_hash
|
response['sd_hash'] = sd_hash
|
||||||
|
@ -3603,28 +3537,23 @@ class Daemon(metaclass=JSONRPCServerType):
|
||||||
downloader = self._get_single_peer_downloader()
|
downloader = self._get_single_peer_downloader()
|
||||||
have_sd_blob = sd_hash in self.blob_manager.blobs
|
have_sd_blob = sd_hash in self.blob_manager.blobs
|
||||||
try:
|
try:
|
||||||
sd_blob = yield self.jsonrpc_blob_get(sd_hash, timeout=blob_timeout,
|
sd_blob = await self.jsonrpc_blob_get(sd_hash, timeout=blob_timeout, encoding="json")
|
||||||
encoding="json")
|
|
||||||
if not have_sd_blob:
|
if not have_sd_blob:
|
||||||
yield self.jsonrpc_blob_delete(sd_hash)
|
await self.jsonrpc_blob_delete(sd_hash)
|
||||||
if sd_blob and 'blobs' in sd_blob:
|
if sd_blob and 'blobs' in sd_blob:
|
||||||
response['num_blobs_in_stream'] = len(sd_blob['blobs']) - 1
|
response['num_blobs_in_stream'] = len(sd_blob['blobs']) - 1
|
||||||
head_blob_hash = sd_blob['blobs'][0]['blob_hash']
|
head_blob_hash = sd_blob['blobs'][0]['blob_hash']
|
||||||
head_blob_availability = yield self._blob_availability(head_blob_hash,
|
head_blob_availability = await self._blob_availability(
|
||||||
search_timeout,
|
head_blob_hash, search_timeout, blob_timeout, downloader)
|
||||||
blob_timeout,
|
|
||||||
downloader)
|
|
||||||
response['head_blob_availability'] = head_blob_availability
|
response['head_blob_availability'] = head_blob_availability
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
response['error'] = err
|
response['error'] = err
|
||||||
response['head_blob_hash'] = head_blob_hash
|
response['head_blob_hash'] = head_blob_hash
|
||||||
response['sd_blob_availability'] = yield self._blob_availability(sd_hash,
|
response['sd_blob_availability'] = await self._blob_availability(
|
||||||
search_timeout,
|
sd_hash, search_timeout, blob_timeout, downloader)
|
||||||
blob_timeout,
|
|
||||||
downloader)
|
|
||||||
response['is_available'] = response['sd_blob_availability'].get('is_available') and \
|
response['is_available'] = response['sd_blob_availability'].get('is_available') and \
|
||||||
response['head_blob_availability'].get('is_available')
|
response['head_blob_availability'].get('is_available')
|
||||||
defer.returnValue(response)
|
return response
|
||||||
|
|
||||||
async def get_channel_or_error(
|
async def get_channel_or_error(
|
||||||
self, accounts: List[LBCAccount], channel_id: str = None, channel_name: str = None):
|
self, accounts: List[LBCAccount], channel_id: str = None, channel_name: str = None):
|
||||||
|
|
|
@ -14,7 +14,6 @@ import aiohttp
|
||||||
import logging
|
import logging
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from lbrynet import conf
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
USER_AGENT = "AuthServiceProxy/0.1"
|
USER_AGENT = "AuthServiceProxy/0.1"
|
||||||
|
@ -110,7 +109,7 @@ class AuthAPIClient:
|
||||||
@classmethod
|
@classmethod
|
||||||
async def get_client(cls, key_name=None):
|
async def get_client(cls, key_name=None):
|
||||||
api_key_name = key_name or "api"
|
api_key_name = key_name or "api"
|
||||||
keyring = Keyring.load_from_disk()
|
keyring = Keyring.load_from_disk() # pylint: disable=E0602
|
||||||
|
|
||||||
api_key = keyring.api_key
|
api_key = keyring.api_key
|
||||||
login_url = conf.settings.get_api_connection_string(api_key_name, api_key.secret)
|
login_url = conf.settings.get_api_connection_string(api_key_name, api_key.secret)
|
||||||
|
@ -127,7 +126,7 @@ class AuthAPIClient:
|
||||||
async with session.post(login_url, headers=headers) as r:
|
async with session.post(login_url, headers=headers) as r:
|
||||||
cookies = r.cookies
|
cookies = r.cookies
|
||||||
uid = cookies.get(TWISTED_SECURE_SESSION if conf.settings['use_https'] else TWISTED_SESSION).value
|
uid = cookies.get(TWISTED_SECURE_SESSION if conf.settings['use_https'] else TWISTED_SESSION).value
|
||||||
api_key = APIKey.create(seed=uid.encode())
|
api_key = APIKey.create(seed=uid.encode()) # pylint: disable=E0602
|
||||||
return cls(api_key, session, cookies, url, login_url)
|
return cls(api_key, session, cookies, url, login_url)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ from lbrynet.p2p.StreamDescriptor import download_sd_blob
|
||||||
from lbrynet.blob.EncryptedFileDownloader import ManagedEncryptedFileDownloaderFactory
|
from lbrynet.blob.EncryptedFileDownloader import ManagedEncryptedFileDownloaderFactory
|
||||||
from torba.client.constants import COIN
|
from torba.client.constants import COIN
|
||||||
from lbrynet.extras.wallet.dewies import dewies_to_lbc
|
from lbrynet.extras.wallet.dewies import dewies_to_lbc
|
||||||
from lbrynet.extras.daemon.Components import f2d
|
from lbrynet.extras.compat import f2d
|
||||||
|
|
||||||
INITIALIZING_CODE = 'initializing'
|
INITIALIZING_CODE = 'initializing'
|
||||||
DOWNLOAD_METADATA_CODE = 'downloading_metadata'
|
DOWNLOAD_METADATA_CODE = 'downloading_metadata'
|
||||||
|
|
|
@ -50,18 +50,18 @@ class Publisher:
|
||||||
)
|
)
|
||||||
|
|
||||||
# check if we have a file already for this claim (if this is a publish update with a new stream)
|
# check if we have a file already for this claim (if this is a publish update with a new stream)
|
||||||
old_stream_hashes = await d2f(self.storage.get_old_stream_hashes_for_claim_id(
|
old_stream_hashes = await self.storage.get_old_stream_hashes_for_claim_id(
|
||||||
tx.outputs[0].claim_id, self.lbry_file.stream_hash
|
tx.outputs[0].claim_id, self.lbry_file.stream_hash
|
||||||
))
|
)
|
||||||
if old_stream_hashes:
|
if old_stream_hashes:
|
||||||
for lbry_file in filter(lambda l: l.stream_hash in old_stream_hashes,
|
for lbry_file in filter(lambda l: l.stream_hash in old_stream_hashes,
|
||||||
list(self.lbry_file_manager.lbry_files)):
|
list(self.lbry_file_manager.lbry_files)):
|
||||||
await d2f(self.lbry_file_manager.delete_lbry_file(lbry_file, delete_file=False))
|
await d2f(self.lbry_file_manager.delete_lbry_file(lbry_file, delete_file=False))
|
||||||
log.info("Removed old stream for claim update: %s", lbry_file.stream_hash)
|
log.info("Removed old stream for claim update: %s", lbry_file.stream_hash)
|
||||||
|
|
||||||
await d2f(self.storage.save_content_claim(
|
await self.storage.save_content_claim(
|
||||||
self.lbry_file.stream_hash, tx.outputs[0].id
|
self.lbry_file.stream_hash, tx.outputs[0].id
|
||||||
))
|
)
|
||||||
return tx
|
return tx
|
||||||
|
|
||||||
async def publish_stream(self, name, bid, claim_dict, stream_hash, holding_address=None):
|
async def publish_stream(self, name, bid, claim_dict, stream_hash, holding_address=None):
|
||||||
|
|
|
@ -1,17 +1,16 @@
|
||||||
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sqlite3
|
|
||||||
import traceback
|
import traceback
|
||||||
import typing
|
import typing
|
||||||
from binascii import hexlify, unhexlify
|
from binascii import hexlify, unhexlify
|
||||||
from twisted.internet import defer, task, threads
|
|
||||||
from twisted.enterprise import adbapi
|
|
||||||
from lbrynet.extras.wallet.dewies import dewies_to_lbc, lbc_to_dewies
|
from lbrynet.extras.wallet.dewies import dewies_to_lbc, lbc_to_dewies
|
||||||
from lbrynet import conf
|
from lbrynet import conf
|
||||||
from lbrynet.schema.claim import ClaimDict
|
from lbrynet.schema.claim import ClaimDict
|
||||||
from lbrynet.schema.decode import smart_decode
|
from lbrynet.schema.decode import smart_decode
|
||||||
from lbrynet.blob.CryptBlob import CryptBlobInfo
|
from lbrynet.blob.CryptBlob import CryptBlobInfo
|
||||||
from lbrynet.dht.constants import dataExpireTimeout
|
from lbrynet.dht.constants import dataExpireTimeout
|
||||||
|
from torba.client.basedatabase import SQLiteMixin
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -44,63 +43,23 @@ def _open_file_for_writing(download_directory, suggested_file_name):
|
||||||
return os.path.basename(file_path)
|
return os.path.basename(file_path)
|
||||||
|
|
||||||
|
|
||||||
def open_file_for_writing(download_directory, suggested_file_name):
|
async def open_file_for_writing(download_directory: str, suggested_file_name: str) -> str:
|
||||||
"""
|
""" Used to touch the path of a file to be downloaded. """
|
||||||
Used to touch the path of a file to be downloaded
|
return await asyncio.get_event_loop().run_in_executor(
|
||||||
|
None, _open_file_for_writing, download_directory, suggested_file_name
|
||||||
:param download_directory: (str)
|
)
|
||||||
:param suggested_file_name: (str)
|
|
||||||
:return: (str) basename
|
|
||||||
"""
|
|
||||||
return threads.deferToThread(_open_file_for_writing, download_directory, suggested_file_name)
|
|
||||||
|
|
||||||
|
|
||||||
def rerun_if_locked(f):
|
async def looping_call(interval, fun):
|
||||||
max_attempts = 5
|
while True:
|
||||||
|
try:
|
||||||
def rerun(err, rerun_count, *args, **kwargs):
|
await fun()
|
||||||
connection = args[0]
|
except Exception as e:
|
||||||
reactor = connection.reactor
|
log.exception('Looping call experienced exception:', exc_info=e)
|
||||||
log.debug("Failed to execute (%s): %s", err, args)
|
await asyncio.sleep(interval)
|
||||||
if err.check(sqlite3.OperationalError) and "database is locked" in str(err.value):
|
|
||||||
log.warning("database was locked. rerunning %s with args %s, kwargs %s",
|
|
||||||
str(f), str(args), str(kwargs))
|
|
||||||
if rerun_count < max_attempts:
|
|
||||||
delay = 2**rerun_count
|
|
||||||
return task.deferLater(reactor, delay, inner_wrapper, rerun_count + 1, *args, **kwargs)
|
|
||||||
raise err
|
|
||||||
|
|
||||||
def check_needed_rerun(result, rerun_count):
|
|
||||||
if rerun_count:
|
|
||||||
log.info("successfully reran database query")
|
|
||||||
return result
|
|
||||||
|
|
||||||
def inner_wrapper(rerun_count, *args, **kwargs):
|
|
||||||
d = f(*args, **kwargs)
|
|
||||||
d.addCallback(check_needed_rerun, rerun_count)
|
|
||||||
d.addErrback(rerun, rerun_count, *args, **kwargs)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
return inner_wrapper(0, *args, **kwargs)
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteConnection(adbapi.ConnectionPool):
|
class SQLiteStorage(SQLiteMixin):
|
||||||
def __init__(self, db_path):
|
|
||||||
super().__init__('sqlite3', db_path, check_same_thread=False)
|
|
||||||
|
|
||||||
@rerun_if_locked
|
|
||||||
def runInteraction(self, interaction, *args, **kw):
|
|
||||||
return super().runInteraction(interaction, *args, **kw)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def set_reactor(cls, reactor):
|
|
||||||
cls.reactor = reactor
|
|
||||||
|
|
||||||
|
|
||||||
class SQLiteStorage:
|
|
||||||
|
|
||||||
CREATE_TABLES_QUERY = """
|
CREATE_TABLES_QUERY = """
|
||||||
pragma foreign_keys=on;
|
pragma foreign_keys=on;
|
||||||
|
@ -173,70 +132,45 @@ class SQLiteStorage:
|
||||||
);
|
);
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, db_dir, reactor=None):
|
def __init__(self, path):
|
||||||
if not reactor:
|
super().__init__(path)
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
self.db_dir = db_dir
|
|
||||||
self._db_path = os.path.join(db_dir, "lbrynet.sqlite")
|
|
||||||
log.info("connecting to database: %s", self._db_path)
|
|
||||||
self.db = SqliteConnection(self._db_path)
|
|
||||||
self.db.set_reactor(reactor)
|
|
||||||
self.clock = reactor
|
self.clock = reactor
|
||||||
|
self.content_claim_callbacks = {}
|
||||||
# used to refresh the claim attributes on a ManagedEncryptedFileDownloader when a
|
|
||||||
# change to the associated content claim occurs. these are added by the file manager
|
|
||||||
# when it loads each file
|
|
||||||
self.content_claim_callbacks = {} # {<stream_hash>: <callable returning a deferred>}
|
|
||||||
self.check_should_announce_lc = None
|
self.check_should_announce_lc = None
|
||||||
|
|
||||||
|
async def open(self):
|
||||||
|
await super().open()
|
||||||
if 'reflector' not in conf.settings['components_to_skip']:
|
if 'reflector' not in conf.settings['components_to_skip']:
|
||||||
self.check_should_announce_lc = task.LoopingCall(self.verify_will_announce_all_head_and_sd_blobs)
|
self.check_should_announce_lc = looping_call(
|
||||||
|
600, self.verify_will_announce_all_head_and_sd_blobs
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def close(self):
|
||||||
def setup(self):
|
if self.check_should_announce_lc is not None:
|
||||||
def _create_tables(transaction):
|
self.check_should_announce_lc.close()
|
||||||
transaction.executescript(self.CREATE_TABLES_QUERY)
|
await super().close()
|
||||||
yield self.db.runInteraction(_create_tables)
|
|
||||||
if self.check_should_announce_lc and not self.check_should_announce_lc.running:
|
|
||||||
self.check_should_announce_lc.start(600)
|
|
||||||
defer.returnValue(None)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def run_and_return_one_or_none(self, query, *args):
|
||||||
def run_and_return_one_or_none(self, query, *args):
|
for row in await self.db.execute_fetchall(query, args):
|
||||||
result = yield self.db.runQuery(query, args)
|
return row
|
||||||
if result:
|
|
||||||
defer.returnValue(result[0][0])
|
|
||||||
else:
|
|
||||||
defer.returnValue(None)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def run_and_return_list(self, query, *args):
|
||||||
def run_and_return_list(self, query, *args):
|
rows = list(await self.db.execute_fetchall(query, args))
|
||||||
result = yield self.db.runQuery(query, args)
|
return [col[0] for col in rows] if rows else []
|
||||||
if result:
|
|
||||||
defer.returnValue([i[0] for i in result])
|
|
||||||
else:
|
|
||||||
defer.returnValue([])
|
|
||||||
|
|
||||||
def run_and_return_id(self, query, *args):
|
async def run_and_return_id(self, query, *args):
|
||||||
def do_save(t):
|
return (await self.db.execute(query, args)).lastrowid
|
||||||
t.execute(query, args)
|
|
||||||
return t.lastrowid
|
|
||||||
return self.db.runInteraction(do_save)
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
if self.check_should_announce_lc and self.check_should_announce_lc.running:
|
|
||||||
self.check_should_announce_lc.stop()
|
|
||||||
self.db.close()
|
|
||||||
return defer.succeed(True)
|
|
||||||
|
|
||||||
# # # # # # # # # blob functions # # # # # # # # #
|
# # # # # # # # # blob functions # # # # # # # # #
|
||||||
|
|
||||||
def add_completed_blob(self, blob_hash, length, next_announce_time, should_announce, status="finished"):
|
def add_completed_blob(self, blob_hash, length, next_announce_time, should_announce, status="finished"):
|
||||||
log.debug("Adding a completed blob. blob_hash=%s, length=%i", blob_hash, length)
|
log.debug("Adding a completed blob. blob_hash=%s, length=%i", blob_hash, length)
|
||||||
values = (blob_hash, length, next_announce_time or 0, int(bool(should_announce)), status, 0, 0)
|
values = (blob_hash, length, next_announce_time or 0, int(bool(should_announce)), status, 0, 0)
|
||||||
return self.db.runOperation("insert or replace into blob values (?, ?, ?, ?, ?, ?, ?)", values)
|
return self.db.execute("insert or replace into blob values (?, ?, ?, ?, ?, ?, ?)", values)
|
||||||
|
|
||||||
def set_should_announce(self, blob_hash, next_announce_time, should_announce):
|
def set_should_announce(self, blob_hash, next_announce_time, should_announce):
|
||||||
return self.db.runOperation(
|
return self.db.execute(
|
||||||
"update blob set next_announce_time=?, should_announce=? where blob_hash=?",
|
"update blob set next_announce_time=?, should_announce=? where blob_hash=?",
|
||||||
(next_announce_time or 0, int(bool(should_announce)), blob_hash)
|
(next_announce_time or 0, int(bool(should_announce)), blob_hash)
|
||||||
)
|
)
|
||||||
|
@ -246,9 +180,8 @@ class SQLiteStorage:
|
||||||
"select status from blob where blob_hash=?", blob_hash
|
"select status from blob where blob_hash=?", blob_hash
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def add_known_blob(self, blob_hash, length):
|
def add_known_blob(self, blob_hash, length):
|
||||||
yield self.db.runOperation(
|
return self.db.execute(
|
||||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)", (blob_hash, length, 0, 0, "pending", 0, 0)
|
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)", (blob_hash, length, 0, 0, "pending", 0, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -267,12 +200,11 @@ class SQLiteStorage:
|
||||||
"select blob_hash from blob where should_announce=1 and status='finished'"
|
"select blob_hash from blob where should_announce=1 and status='finished'"
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def get_all_finished_blobs(self):
|
||||||
def get_all_finished_blobs(self):
|
blob_hashes = await self.run_and_return_list(
|
||||||
blob_hashes = yield self.run_and_return_list(
|
|
||||||
"select blob_hash from blob where status='finished'"
|
"select blob_hash from blob where status='finished'"
|
||||||
)
|
)
|
||||||
defer.returnValue([unhexlify(blob_hash) for blob_hash in blob_hashes])
|
return [unhexlify(blob_hash) for blob_hash in blob_hashes]
|
||||||
|
|
||||||
def count_finished_blobs(self):
|
def count_finished_blobs(self):
|
||||||
return self.run_and_return_one_or_none(
|
return self.run_and_return_one_or_none(
|
||||||
|
@ -280,10 +212,10 @@ class SQLiteStorage:
|
||||||
)
|
)
|
||||||
|
|
||||||
def update_last_announced_blob(self, blob_hash, last_announced):
|
def update_last_announced_blob(self, blob_hash, last_announced):
|
||||||
return self.db.runOperation(
|
return self.db.execute(
|
||||||
"update blob set next_announce_time=?, last_announced_time=?, single_announce=0 where blob_hash=?",
|
"update blob set next_announce_time=?, last_announced_time=?, single_announce=0 where blob_hash=?",
|
||||||
(int(last_announced + (dataExpireTimeout / 2)), int(last_announced), blob_hash)
|
(int(last_announced + (dataExpireTimeout / 2)), int(last_announced), blob_hash)
|
||||||
)
|
)
|
||||||
|
|
||||||
def should_single_announce_blobs(self, blob_hashes, immediate=False):
|
def should_single_announce_blobs(self, blob_hashes, immediate=False):
|
||||||
def set_single_announce(transaction):
|
def set_single_announce(transaction):
|
||||||
|
@ -298,7 +230,7 @@ class SQLiteStorage:
|
||||||
transaction.execute(
|
transaction.execute(
|
||||||
"update blob set single_announce=1 where blob_hash=? and status='finished'", (blob_hash, )
|
"update blob set single_announce=1 where blob_hash=? and status='finished'", (blob_hash, )
|
||||||
)
|
)
|
||||||
return self.db.runInteraction(set_single_announce)
|
return self.db.run(set_single_announce)
|
||||||
|
|
||||||
def get_blobs_to_announce(self):
|
def get_blobs_to_announce(self):
|
||||||
def get_and_update(transaction):
|
def get_and_update(transaction):
|
||||||
|
@ -317,13 +249,13 @@ class SQLiteStorage:
|
||||||
)
|
)
|
||||||
blobs = [b[0] for b in r.fetchall()]
|
blobs = [b[0] for b in r.fetchall()]
|
||||||
return blobs
|
return blobs
|
||||||
return self.db.runInteraction(get_and_update)
|
return self.db.run(get_and_update)
|
||||||
|
|
||||||
def delete_blobs_from_db(self, blob_hashes):
|
def delete_blobs_from_db(self, blob_hashes):
|
||||||
def delete_blobs(transaction):
|
def delete_blobs(transaction):
|
||||||
for blob_hash in blob_hashes:
|
for blob_hash in blob_hashes:
|
||||||
transaction.execute("delete from blob where blob_hash=?;", (blob_hash,))
|
transaction.execute("delete from blob where blob_hash=?;", (blob_hash,))
|
||||||
return self.db.runInteraction(delete_blobs)
|
return self.db.run(delete_blobs)
|
||||||
|
|
||||||
def get_all_blob_hashes(self):
|
def get_all_blob_hashes(self):
|
||||||
return self.run_and_return_list("select blob_hash from blob")
|
return self.run_and_return_list("select blob_hash from blob")
|
||||||
|
@ -336,17 +268,16 @@ class SQLiteStorage:
|
||||||
transaction.execute("insert into stream_blob values (?, ?, ?, ?)",
|
transaction.execute("insert into stream_blob values (?, ?, ?, ?)",
|
||||||
(stream_hash, blob_info.get('blob_hash', None),
|
(stream_hash, blob_info.get('blob_hash', None),
|
||||||
blob_info['blob_num'], blob_info['iv']))
|
blob_info['blob_num'], blob_info['iv']))
|
||||||
return self.db.runInteraction(_add_stream_blobs)
|
return self.db.run(_add_stream_blobs)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def add_known_blobs(self, blob_infos):
|
||||||
def add_known_blobs(self, blob_infos):
|
|
||||||
for blob_info in blob_infos:
|
for blob_info in blob_infos:
|
||||||
if blob_info.get('blob_hash') and blob_info['length']:
|
if blob_info.get('blob_hash') and blob_info['length']:
|
||||||
yield self.add_known_blob(blob_info['blob_hash'], blob_info['length'])
|
await self.add_known_blob(blob_info['blob_hash'], blob_info['length'])
|
||||||
|
|
||||||
def verify_will_announce_head_and_sd_blobs(self, stream_hash):
|
def verify_will_announce_head_and_sd_blobs(self, stream_hash):
|
||||||
# fix should_announce for imported head and sd blobs
|
# fix should_announce for imported head and sd blobs
|
||||||
return self.db.runOperation(
|
return self.db.execute(
|
||||||
"update blob set should_announce=1 "
|
"update blob set should_announce=1 "
|
||||||
"where should_announce=0 and "
|
"where should_announce=0 and "
|
||||||
"blob.blob_hash in "
|
"blob.blob_hash in "
|
||||||
|
@ -358,7 +289,7 @@ class SQLiteStorage:
|
||||||
)
|
)
|
||||||
|
|
||||||
def verify_will_announce_all_head_and_sd_blobs(self):
|
def verify_will_announce_all_head_and_sd_blobs(self):
|
||||||
return self.db.runOperation(
|
return self.db.execute(
|
||||||
"update blob set should_announce=1 "
|
"update blob set should_announce=1 "
|
||||||
"where should_announce=0 and "
|
"where should_announce=0 and "
|
||||||
"blob.blob_hash in "
|
"blob.blob_hash in "
|
||||||
|
@ -383,23 +314,24 @@ class SQLiteStorage:
|
||||||
:param stream_blob_infos: (list) of blob info dictionaries
|
:param stream_blob_infos: (list) of blob info dictionaries
|
||||||
:return: (defer.Deferred)
|
:return: (defer.Deferred)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _store_stream(transaction):
|
def _store_stream(transaction):
|
||||||
transaction.execute("insert into stream values (?, ?, ?, ?, ?);",
|
transaction.execute(
|
||||||
(stream_hash, sd_hash, stream_key, stream_name,
|
"insert into stream values (?, ?, ?, ?, ?);", (
|
||||||
suggested_file_name))
|
stream_hash, sd_hash, stream_key, stream_name, suggested_file_name
|
||||||
|
)
|
||||||
|
)
|
||||||
for blob_info in stream_blob_infos:
|
for blob_info in stream_blob_infos:
|
||||||
transaction.execute("insert into stream_blob values (?, ?, ?, ?)",
|
transaction.execute(
|
||||||
(stream_hash, blob_info.get('blob_hash', None),
|
"insert into stream_blob values (?, ?, ?, ?)", (
|
||||||
blob_info['blob_num'], blob_info['iv']))
|
stream_hash, blob_info.get('blob_hash', None),
|
||||||
|
blob_info['blob_num'], blob_info['iv']
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return self.db.run(_store_stream)
|
||||||
|
|
||||||
return self.db.runInteraction(_store_stream)
|
async def delete_stream(self, stream_hash):
|
||||||
|
sd_hash = await self.get_sd_blob_hash_for_stream(stream_hash)
|
||||||
@defer.inlineCallbacks
|
stream_blobs = await self.get_blobs_for_stream(stream_hash)
|
||||||
def delete_stream(self, stream_hash):
|
|
||||||
sd_hash = yield self.get_sd_blob_hash_for_stream(stream_hash)
|
|
||||||
stream_blobs = yield self.get_blobs_for_stream(stream_hash)
|
|
||||||
blob_hashes = [b.blob_hash for b in stream_blobs if b.blob_hash is not None]
|
blob_hashes = [b.blob_hash for b in stream_blobs if b.blob_hash is not None]
|
||||||
|
|
||||||
def _delete_stream(transaction):
|
def _delete_stream(transaction):
|
||||||
|
@ -407,24 +339,28 @@ class SQLiteStorage:
|
||||||
transaction.execute("delete from file where stream_hash=? ", (stream_hash, ))
|
transaction.execute("delete from file where stream_hash=? ", (stream_hash, ))
|
||||||
transaction.execute("delete from stream_blob where stream_hash=?", (stream_hash, ))
|
transaction.execute("delete from stream_blob where stream_hash=?", (stream_hash, ))
|
||||||
transaction.execute("delete from stream where stream_hash=? ", (stream_hash, ))
|
transaction.execute("delete from stream where stream_hash=? ", (stream_hash, ))
|
||||||
transaction.execute("delete from blob where blob_hash=?", (sd_hash, ))
|
transaction.execute("delete from blob where blob_hash=?", sd_hash)
|
||||||
for blob_hash in blob_hashes:
|
for blob_hash in blob_hashes:
|
||||||
transaction.execute("delete from blob where blob_hash=?;", (blob_hash, ))
|
transaction.execute("delete from blob where blob_hash=?;", (blob_hash, ))
|
||||||
yield self.db.runInteraction(_delete_stream)
|
|
||||||
|
await self.db.run(_delete_stream)
|
||||||
|
|
||||||
def get_all_streams(self):
|
def get_all_streams(self):
|
||||||
return self.run_and_return_list("select stream_hash from stream")
|
return self.run_and_return_list("select stream_hash from stream")
|
||||||
|
|
||||||
def get_stream_info(self, stream_hash):
|
def get_stream_info(self, stream_hash):
|
||||||
d = self.db.runQuery("select stream_name, stream_key, suggested_filename, sd_hash from stream "
|
return self.run_and_return_one_or_none(
|
||||||
"where stream_hash=?", (stream_hash, ))
|
"select stream_name, stream_key, suggested_filename, sd_hash from stream "
|
||||||
d.addCallback(lambda r: None if not r else r[0])
|
"where stream_hash=?", stream_hash
|
||||||
return d
|
)
|
||||||
|
|
||||||
def check_if_stream_exists(self, stream_hash):
|
async def check_if_stream_exists(self, stream_hash):
|
||||||
d = self.db.runQuery("select stream_hash from stream where stream_hash=?", (stream_hash, ))
|
row = await self.run_and_return_one_or_none(
|
||||||
d.addCallback(lambda r: bool(len(r)))
|
"select stream_hash from stream where stream_hash=?", stream_hash
|
||||||
return d
|
)
|
||||||
|
if row is not None:
|
||||||
|
return bool(len(row))
|
||||||
|
return False
|
||||||
|
|
||||||
def get_blob_num_by_hash(self, stream_hash, blob_hash):
|
def get_blob_num_by_hash(self, stream_hash, blob_hash):
|
||||||
return self.run_and_return_one_or_none(
|
return self.run_and_return_one_or_none(
|
||||||
|
@ -466,7 +402,7 @@ class SQLiteStorage:
|
||||||
crypt_blob_infos.append(CryptBlobInfo(blob_hash, position, blob_length, iv))
|
crypt_blob_infos.append(CryptBlobInfo(blob_hash, position, blob_length, iv))
|
||||||
crypt_blob_infos = sorted(crypt_blob_infos, key=lambda info: info.blob_num)
|
crypt_blob_infos = sorted(crypt_blob_infos, key=lambda info: info.blob_num)
|
||||||
return crypt_blob_infos
|
return crypt_blob_infos
|
||||||
return self.db.runInteraction(_get_blobs_for_stream)
|
return self.db.run(_get_blobs_for_stream)
|
||||||
|
|
||||||
def get_pending_blobs_for_stream(self, stream_hash):
|
def get_pending_blobs_for_stream(self, stream_hash):
|
||||||
return self.run_and_return_list(
|
return self.run_and_return_list(
|
||||||
|
@ -493,14 +429,12 @@ class SQLiteStorage:
|
||||||
|
|
||||||
# # # # # # # # # file stuff # # # # # # # # #
|
# # # # # # # # # file stuff # # # # # # # # #
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def save_downloaded_file(self, stream_hash, file_name, download_directory, data_payment_rate):
|
||||||
def save_downloaded_file(self, stream_hash, file_name, download_directory, data_payment_rate):
|
|
||||||
# touch the closest available file to the file name
|
# touch the closest available file to the file name
|
||||||
file_name = yield open_file_for_writing(unhexlify(download_directory).decode(), unhexlify(file_name).decode())
|
file_name = await open_file_for_writing(unhexlify(download_directory).decode(), unhexlify(file_name).decode())
|
||||||
result = yield self.save_published_file(
|
return await self.save_published_file(
|
||||||
stream_hash, hexlify(file_name.encode()), download_directory, data_payment_rate
|
stream_hash, hexlify(file_name.encode()), download_directory, data_payment_rate
|
||||||
)
|
)
|
||||||
defer.returnValue(result)
|
|
||||||
|
|
||||||
def save_published_file(self, stream_hash, file_name, download_directory, data_payment_rate, status="stopped"):
|
def save_published_file(self, stream_hash, file_name, download_directory, data_payment_rate, status="stopped"):
|
||||||
return self.run_and_return_id(
|
return self.run_and_return_id(
|
||||||
|
@ -509,7 +443,9 @@ class SQLiteStorage:
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_filename_for_rowid(self, rowid):
|
def get_filename_for_rowid(self, rowid):
|
||||||
return self.run_and_return_one_or_none("select file_name from file where rowid=?", rowid)
|
return self.run_and_return_one_or_none(
|
||||||
|
"select file_name from file where rowid=?", rowid
|
||||||
|
)
|
||||||
|
|
||||||
def get_all_lbry_files(self):
|
def get_all_lbry_files(self):
|
||||||
def _lbry_file_dict(rowid, stream_hash, file_name, download_dir, data_rate, status, _, sd_hash, stream_key,
|
def _lbry_file_dict(rowid, stream_hash, file_name, download_dir, data_rate, status, _, sd_hash, stream_key,
|
||||||
|
@ -535,13 +471,11 @@ class SQLiteStorage:
|
||||||
).fetchall()
|
).fetchall()
|
||||||
]
|
]
|
||||||
|
|
||||||
d = self.db.runInteraction(_get_all_files)
|
return self.db.run(_get_all_files)
|
||||||
return d
|
|
||||||
|
|
||||||
def change_file_status(self, rowid, new_status):
|
async def change_file_status(self, rowid, new_status):
|
||||||
d = self.db.runQuery("update file set status=? where rowid=?", (new_status, rowid))
|
await self.db.execute("update file set status=? where rowid=?", (new_status, rowid))
|
||||||
d.addCallback(lambda _: new_status)
|
return new_status
|
||||||
return d
|
|
||||||
|
|
||||||
def get_lbry_file_status(self, rowid):
|
def get_lbry_file_status(self, rowid):
|
||||||
return self.run_and_return_one_or_none(
|
return self.run_and_return_one_or_none(
|
||||||
|
@ -565,7 +499,7 @@ class SQLiteStorage:
|
||||||
("%s:%i" % (support['txid'], support['nout']), claim_id, lbc_to_dewies(support['amount']),
|
("%s:%i" % (support['txid'], support['nout']), claim_id, lbc_to_dewies(support['amount']),
|
||||||
support.get('address', ""))
|
support.get('address', ""))
|
||||||
)
|
)
|
||||||
return self.db.runInteraction(_save_support)
|
return self.db.run(_save_support)
|
||||||
|
|
||||||
def get_supports(self, *claim_ids):
|
def get_supports(self, *claim_ids):
|
||||||
def _format_support(outpoint, supported_id, amount, address):
|
def _format_support(outpoint, supported_id, amount, address):
|
||||||
|
@ -587,15 +521,16 @@ class SQLiteStorage:
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
return self.db.runInteraction(_get_supports)
|
return self.db.run(_get_supports)
|
||||||
|
|
||||||
# # # # # # # # # claim functions # # # # # # # # #
|
# # # # # # # # # claim functions # # # # # # # # #
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def save_claims(self, claim_infos):
|
||||||
def save_claims(self, claim_infos):
|
support_callbacks = []
|
||||||
|
update_file_callbacks = []
|
||||||
|
|
||||||
def _save_claims(transaction):
|
def _save_claims(transaction):
|
||||||
content_claims_to_update = []
|
content_claims_to_update = []
|
||||||
support_callbacks = []
|
|
||||||
for claim_info in claim_infos:
|
for claim_info in claim_infos:
|
||||||
outpoint = "%s:%i" % (claim_info['txid'], claim_info['nout'])
|
outpoint = "%s:%i" % (claim_info['txid'], claim_info['nout'])
|
||||||
claim_id = claim_info['claim_id']
|
claim_id = claim_info['claim_id']
|
||||||
|
@ -622,7 +557,7 @@ class SQLiteStorage:
|
||||||
)
|
)
|
||||||
if 'supports' in claim_info: # if this response doesn't have support info don't overwrite the existing
|
if 'supports' in claim_info: # if this response doesn't have support info don't overwrite the existing
|
||||||
# support info
|
# support info
|
||||||
support_callbacks.append(self.save_supports(claim_id, claim_info['supports']))
|
support_callbacks.append((claim_id, claim_info['supports']))
|
||||||
if not source_hash:
|
if not source_hash:
|
||||||
continue
|
continue
|
||||||
stream_hash = transaction.execute(
|
stream_hash = transaction.execute(
|
||||||
|
@ -644,18 +579,18 @@ class SQLiteStorage:
|
||||||
content_claims_to_update.append((stream_hash, outpoint))
|
content_claims_to_update.append((stream_hash, outpoint))
|
||||||
elif known_outpoint != outpoint:
|
elif known_outpoint != outpoint:
|
||||||
content_claims_to_update.append((stream_hash, outpoint))
|
content_claims_to_update.append((stream_hash, outpoint))
|
||||||
update_file_callbacks = []
|
|
||||||
for stream_hash, outpoint in content_claims_to_update:
|
for stream_hash, outpoint in content_claims_to_update:
|
||||||
self._save_content_claim(transaction, outpoint, stream_hash)
|
self._save_content_claim(transaction, outpoint, stream_hash)
|
||||||
if stream_hash in self.content_claim_callbacks:
|
if stream_hash in self.content_claim_callbacks:
|
||||||
update_file_callbacks.append(self.content_claim_callbacks[stream_hash]())
|
update_file_callbacks.append(self.content_claim_callbacks[stream_hash]())
|
||||||
return update_file_callbacks, support_callbacks
|
|
||||||
|
|
||||||
content_dl, support_dl = yield self.db.runInteraction(_save_claims)
|
await self.db.run(_save_claims)
|
||||||
if content_dl:
|
if update_file_callbacks:
|
||||||
yield defer.DeferredList(content_dl)
|
await asyncio.wait(update_file_callbacks)
|
||||||
if support_dl:
|
if support_callbacks:
|
||||||
yield defer.DeferredList(support_dl)
|
await asyncio.wait([
|
||||||
|
self.save_supports(*args) for args in support_callbacks
|
||||||
|
])
|
||||||
|
|
||||||
def save_claims_for_resolve(self, claim_infos):
|
def save_claims_for_resolve(self, claim_infos):
|
||||||
to_save = []
|
to_save = []
|
||||||
|
@ -718,16 +653,13 @@ class SQLiteStorage:
|
||||||
# update the claim associated to the file
|
# update the claim associated to the file
|
||||||
transaction.execute("insert or replace into content_claim values (?, ?)", (stream_hash, claim_outpoint))
|
transaction.execute("insert or replace into content_claim values (?, ?)", (stream_hash, claim_outpoint))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def save_content_claim(self, stream_hash, claim_outpoint):
|
||||||
def save_content_claim(self, stream_hash, claim_outpoint):
|
await self.db.run(self._save_content_claim, claim_outpoint, stream_hash)
|
||||||
yield self.db.runInteraction(self._save_content_claim, claim_outpoint, stream_hash)
|
|
||||||
# update corresponding ManagedEncryptedFileDownloader object
|
# update corresponding ManagedEncryptedFileDownloader object
|
||||||
if stream_hash in self.content_claim_callbacks:
|
if stream_hash in self.content_claim_callbacks:
|
||||||
file_callback = self.content_claim_callbacks[stream_hash]
|
await self.content_claim_callbacks[stream_hash]()
|
||||||
yield file_callback()
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def get_content_claim(self, stream_hash, include_supports=True):
|
||||||
def get_content_claim(self, stream_hash, include_supports=True):
|
|
||||||
def _get_claim_from_stream_hash(transaction):
|
def _get_claim_from_stream_hash(transaction):
|
||||||
claim_info = transaction.execute(
|
claim_info = transaction.execute(
|
||||||
"select c.*, "
|
"select c.*, "
|
||||||
|
@ -745,15 +677,13 @@ class SQLiteStorage:
|
||||||
result['channel_name'] = channel_name
|
result['channel_name'] = channel_name
|
||||||
return result
|
return result
|
||||||
|
|
||||||
result = yield self.db.runInteraction(_get_claim_from_stream_hash)
|
result = await self.db.run(_get_claim_from_stream_hash)
|
||||||
if result and include_supports:
|
if result and include_supports:
|
||||||
supports = yield self.get_supports(result['claim_id'])
|
result['supports'] = await self.get_supports(result['claim_id'])
|
||||||
result['supports'] = supports
|
result['effective_amount'] = calculate_effective_amount(result['amount'], result['supports'])
|
||||||
result['effective_amount'] = calculate_effective_amount(result['amount'], supports)
|
return result
|
||||||
defer.returnValue(result)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def get_claims_from_stream_hashes(self, stream_hashes, include_supports=True):
|
||||||
def get_claims_from_stream_hashes(self, stream_hashes, include_supports=True):
|
|
||||||
def _batch_get_claim(transaction):
|
def _batch_get_claim(transaction):
|
||||||
results = {}
|
results = {}
|
||||||
claim_infos = _batched_select(
|
claim_infos = _batched_select(
|
||||||
|
@ -781,10 +711,10 @@ class SQLiteStorage:
|
||||||
results[stream_hash]['channel_name'] = channel_name
|
results[stream_hash]['channel_name'] = channel_name
|
||||||
return results
|
return results
|
||||||
|
|
||||||
claims = yield self.db.runInteraction(_batch_get_claim)
|
claims = await self.db.run(_batch_get_claim)
|
||||||
if include_supports:
|
if include_supports:
|
||||||
all_supports = {}
|
all_supports = {}
|
||||||
for support in (yield self.get_supports(*[claim['claim_id'] for claim in claims.values()])):
|
for support in await self.get_supports(*[claim['claim_id'] for claim in claims.values()]):
|
||||||
all_supports.setdefault(support['claim_id'], []).append(support)
|
all_supports.setdefault(support['claim_id'], []).append(support)
|
||||||
for stream_hash in claims.keys():
|
for stream_hash in claims.keys():
|
||||||
claim = claims[stream_hash]
|
claim = claims[stream_hash]
|
||||||
|
@ -792,28 +722,28 @@ class SQLiteStorage:
|
||||||
claim['supports'] = supports
|
claim['supports'] = supports
|
||||||
claim['effective_amount'] = calculate_effective_amount(claim['amount'], supports)
|
claim['effective_amount'] = calculate_effective_amount(claim['amount'], supports)
|
||||||
claims[stream_hash] = claim
|
claims[stream_hash] = claim
|
||||||
defer.returnValue(claims)
|
return claims
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def get_claim(self, claim_outpoint, include_supports=True):
|
||||||
def get_claim(self, claim_outpoint, include_supports=True):
|
|
||||||
def _get_claim(transaction):
|
def _get_claim(transaction):
|
||||||
claim_info = transaction.execute("select c.*, "
|
claim_info = transaction.execute(
|
||||||
"case when c.channel_claim_id is not null then "
|
"select c.*, "
|
||||||
"(select claim_name from claim where claim_id==c.channel_claim_id) "
|
"case when c.channel_claim_id is not null then "
|
||||||
"else null end as channel_name from claim c where claim_outpoint = ?",
|
"(select claim_name from claim where claim_id==c.channel_claim_id) "
|
||||||
(claim_outpoint,)).fetchone()
|
"else null end as channel_name from claim c where claim_outpoint = ?",
|
||||||
|
(claim_outpoint,)
|
||||||
|
).fetchone()
|
||||||
channel_name = claim_info[-1]
|
channel_name = claim_info[-1]
|
||||||
result = _format_claim_response(*claim_info[:-1])
|
result = _format_claim_response(*claim_info[:-1])
|
||||||
if channel_name:
|
if channel_name:
|
||||||
result['channel_name'] = channel_name
|
result['channel_name'] = channel_name
|
||||||
return result
|
return result
|
||||||
|
|
||||||
result = yield self.db.runInteraction(_get_claim)
|
result = await self.db.run(_get_claim)
|
||||||
if include_supports:
|
if include_supports:
|
||||||
supports = yield self.get_supports(result['claim_id'])
|
result['supports'] = await self.get_supports(result['claim_id'])
|
||||||
result['supports'] = supports
|
result['effective_amount'] = calculate_effective_amount(result['amount'], result['supports'])
|
||||||
result['effective_amount'] = calculate_effective_amount(result['amount'], supports)
|
return result
|
||||||
defer.returnValue(result)
|
|
||||||
|
|
||||||
def get_unknown_certificate_ids(self):
|
def get_unknown_certificate_ids(self):
|
||||||
def _get_unknown_certificate_claim_ids(transaction):
|
def _get_unknown_certificate_claim_ids(transaction):
|
||||||
|
@ -825,11 +755,10 @@ class SQLiteStorage:
|
||||||
"(select c2.claim_id from claim as c2)"
|
"(select c2.claim_id from claim as c2)"
|
||||||
).fetchall()
|
).fetchall()
|
||||||
]
|
]
|
||||||
return self.db.runInteraction(_get_unknown_certificate_claim_ids)
|
return self.db.run(_get_unknown_certificate_claim_ids)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
async def get_pending_claim_outpoints(self):
|
||||||
def get_pending_claim_outpoints(self):
|
claim_outpoints = await self.run_and_return_list("select claim_outpoint from claim where height=-1")
|
||||||
claim_outpoints = yield self.run_and_return_list("select claim_outpoint from claim where height=-1")
|
|
||||||
results = {} # {txid: [nout, ...]}
|
results = {} # {txid: [nout, ...]}
|
||||||
for outpoint_str in claim_outpoints:
|
for outpoint_str in claim_outpoints:
|
||||||
txid, nout = outpoint_str.split(":")
|
txid, nout = outpoint_str.split(":")
|
||||||
|
@ -838,7 +767,7 @@ class SQLiteStorage:
|
||||||
results[txid] = outputs
|
results[txid] = outputs
|
||||||
if results:
|
if results:
|
||||||
log.debug("missing transaction heights for %i claims", len(results))
|
log.debug("missing transaction heights for %i claims", len(results))
|
||||||
defer.returnValue(results)
|
return results
|
||||||
|
|
||||||
def save_claim_tx_heights(self, claim_tx_heights):
|
def save_claim_tx_heights(self, claim_tx_heights):
|
||||||
def _save_claim_heights(transaction):
|
def _save_claim_heights(transaction):
|
||||||
|
@ -847,17 +776,17 @@ class SQLiteStorage:
|
||||||
"update claim set height=? where claim_outpoint=? and height=-1",
|
"update claim set height=? where claim_outpoint=? and height=-1",
|
||||||
(height, outpoint)
|
(height, outpoint)
|
||||||
)
|
)
|
||||||
return self.db.runInteraction(_save_claim_heights)
|
return self.db.run(_save_claim_heights)
|
||||||
|
|
||||||
# # # # # # # # # reflector functions # # # # # # # # #
|
# # # # # # # # # reflector functions # # # # # # # # #
|
||||||
|
|
||||||
def update_reflected_stream(self, sd_hash, reflector_address, success=True):
|
def update_reflected_stream(self, sd_hash, reflector_address, success=True):
|
||||||
if success:
|
if success:
|
||||||
return self.db.runOperation(
|
return self.db.execute(
|
||||||
"insert or replace into reflected_stream values (?, ?, ?)",
|
"insert or replace into reflected_stream values (?, ?, ?)",
|
||||||
(sd_hash, reflector_address, self.clock.seconds())
|
(sd_hash, reflector_address, self.clock.seconds())
|
||||||
)
|
)
|
||||||
return self.db.runOperation(
|
return self.db.execute(
|
||||||
"delete from reflected_stream where sd_hash=? and reflector_address=?",
|
"delete from reflected_stream where sd_hash=? and reflector_address=?",
|
||||||
(sd_hash, reflector_address)
|
(sd_hash, reflector_address)
|
||||||
)
|
)
|
||||||
|
|
|
@ -277,7 +277,7 @@ class LbryWalletManager(BaseWalletManager):
|
||||||
if 'error' not in results:
|
if 'error' not in results:
|
||||||
await self.old_db.save_claims_for_resolve([
|
await self.old_db.save_claims_for_resolve([
|
||||||
value for value in results.values() if 'error' not in value
|
value for value in results.values() if 'error' not in value
|
||||||
]).asFuture(asyncio.get_event_loop())
|
])
|
||||||
return results
|
return results
|
||||||
|
|
||||||
async def get_claims_for_name(self, name: str):
|
async def get_claims_for_name(self, name: str):
|
||||||
|
@ -432,7 +432,7 @@ class LbryWalletManager(BaseWalletManager):
|
||||||
await account.ledger.broadcast(tx)
|
await account.ledger.broadcast(tx)
|
||||||
await self.old_db.save_claims([self._old_get_temp_claim_info(
|
await self.old_db.save_claims([self._old_get_temp_claim_info(
|
||||||
tx, tx.outputs[0], claim_address, claim_dict, name, dewies_to_lbc(amount)
|
tx, tx.outputs[0], claim_address, claim_dict, name, dewies_to_lbc(amount)
|
||||||
)]).asFuture(asyncio.get_event_loop())
|
)])
|
||||||
# TODO: release reserved tx outputs in case anything fails by this point
|
# TODO: release reserved tx outputs in case anything fails by this point
|
||||||
return tx
|
return tx
|
||||||
|
|
||||||
|
@ -446,7 +446,7 @@ class LbryWalletManager(BaseWalletManager):
|
||||||
'address': holding_address,
|
'address': holding_address,
|
||||||
'claim_id': claim_id,
|
'claim_id': claim_id,
|
||||||
'amount': dewies_to_lbc(amount)
|
'amount': dewies_to_lbc(amount)
|
||||||
}]).asFuture(asyncio.get_event_loop())
|
}])
|
||||||
return tx
|
return tx
|
||||||
|
|
||||||
async def tip_claim(self, amount, claim_id, account):
|
async def tip_claim(self, amount, claim_id, account):
|
||||||
|
@ -461,7 +461,7 @@ class LbryWalletManager(BaseWalletManager):
|
||||||
'address': claim_to_tip['address'],
|
'address': claim_to_tip['address'],
|
||||||
'claim_id': claim_id,
|
'claim_id': claim_id,
|
||||||
'amount': dewies_to_lbc(amount)
|
'amount': dewies_to_lbc(amount)
|
||||||
}]).asFuture(asyncio.get_event_loop())
|
}])
|
||||||
return tx
|
return tx
|
||||||
|
|
||||||
async def abandon_claim(self, claim_id, txid, nout, account):
|
async def abandon_claim(self, claim_id, txid, nout, account):
|
||||||
|
@ -484,7 +484,7 @@ class LbryWalletManager(BaseWalletManager):
|
||||||
|
|
||||||
await self.old_db.save_claims([self._old_get_temp_claim_info(
|
await self.old_db.save_claims([self._old_get_temp_claim_info(
|
||||||
tx, tx.outputs[0], address, cert, channel_name, dewies_to_lbc(amount)
|
tx, tx.outputs[0], address, cert, channel_name, dewies_to_lbc(amount)
|
||||||
)]).asFuture(asyncio.get_event_loop())
|
)])
|
||||||
return tx
|
return tx
|
||||||
|
|
||||||
def _old_get_temp_claim_info(self, tx, txo, address, claim_dict, name, bid):
|
def _old_get_temp_claim_info(self, tx, txo, address, claim_dict, name, bid):
|
||||||
|
|
|
@ -131,9 +131,6 @@ class LBRYBlockProcessor(BlockProcessor):
|
||||||
super().backup_blocks(raw_blocks=raw_blocks)
|
super().backup_blocks(raw_blocks=raw_blocks)
|
||||||
self.db.batched_flush_claims()
|
self.db.batched_flush_claims()
|
||||||
|
|
||||||
def shutdown(self):
|
|
||||||
self.db.shutdown()
|
|
||||||
|
|
||||||
async def flush(self, flush_utxos):
|
async def flush(self, flush_utxos):
|
||||||
self.db.batched_flush_claims()
|
self.db.batched_flush_claims()
|
||||||
return await super().flush(flush_utxos)
|
return await super().flush(flush_utxos)
|
||||||
|
|
|
@ -21,7 +21,7 @@ class LBRYDB(DB):
|
||||||
self.pending_abandons = {}
|
self.pending_abandons = {}
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
def shutdown(self):
|
def close(self):
|
||||||
self.batched_flush_claims()
|
self.batched_flush_claims()
|
||||||
self.claims_db.close()
|
self.claims_db.close()
|
||||||
self.names_db.close()
|
self.names_db.close()
|
||||||
|
@ -29,9 +29,7 @@ class LBRYDB(DB):
|
||||||
self.outpoint_to_claim_id_db.close()
|
self.outpoint_to_claim_id_db.close()
|
||||||
self.claim_undo_db.close()
|
self.claim_undo_db.close()
|
||||||
self.utxo_db.close()
|
self.utxo_db.close()
|
||||||
# electrumx ones
|
super().close()
|
||||||
self.utxo_db.close()
|
|
||||||
self.history.close_db()
|
|
||||||
|
|
||||||
async def _open_dbs(self, for_sync, compacting):
|
async def _open_dbs(self, for_sync, compacting):
|
||||||
await super()._open_dbs(for_sync=for_sync, compacting=compacting)
|
await super()._open_dbs(for_sync=for_sync, compacting=compacting)
|
||||||
|
|
Loading…
Add table
Reference in a new issue