deleted lbry.extras

This commit is contained in:
Lex Berezhny 2020-06-05 19:18:00 -04:00
parent 82b69109bd
commit e66445b46e
30 changed files with 0 additions and 9116 deletions

View file

@ -1,341 +0,0 @@
import os
import sys
import shutil
import signal
import pathlib
import json
import asyncio
import argparse
import logging
import logging.handlers
import aiohttp
from aiohttp.web import GracefulExit
from docopt import docopt
from lbry import __version__ as lbrynet_version
from lbry.extras.daemon.loggly_handler import get_loggly_handler
from lbry.extras.daemon.daemon import Daemon
from lbry.conf import Config, CLIConfig
log = logging.getLogger('lbry')
def display(data):
print(json.dumps(data, indent=2))
async def execute_command(conf, method, params, callback=display):
async with aiohttp.ClientSession() as session:
try:
message = {'method': method, 'params': params}
async with session.get(conf.api_connection_url, json=message) as resp:
try:
data = await resp.json()
if 'result' in data:
return callback(data['result'])
elif 'error' in data:
return callback(data['error'])
except Exception as e:
log.exception('Could not process response from server:', exc_info=e)
except aiohttp.ClientConnectionError:
print("Could not connect to daemon. Are you sure it's running?")
def normalize_value(x, key=None):
if not isinstance(x, str):
return x
if key in ('uri', 'channel_name', 'name', 'file_name', 'claim_name', 'download_directory'):
return x
if x.lower() == 'true':
return True
if x.lower() == 'false':
return False
if x.isdigit():
return int(x)
return x
def remove_brackets(key):
if key.startswith("<") and key.endswith(">"):
return str(key[1:-1])
return key
def set_kwargs(parsed_args):
kwargs = {}
for key, arg in parsed_args.items():
if arg is None:
continue
k = None
if key.startswith("--") and remove_brackets(key[2:]) not in kwargs:
k = remove_brackets(key[2:])
elif remove_brackets(key) not in kwargs:
k = remove_brackets(key)
kwargs[k] = normalize_value(arg, k)
return kwargs
def split_subparser_argument(parent, original, name, condition):
new_sub_parser = argparse._SubParsersAction(
original.option_strings,
original._prog_prefix,
original._parser_class,
metavar=original.metavar
)
new_sub_parser._name_parser_map = original._name_parser_map
new_sub_parser._choices_actions = [
a for a in original._choices_actions if condition(original._name_parser_map[a.dest])
]
group = argparse._ArgumentGroup(parent, name)
group._group_actions = [new_sub_parser]
return group
class ArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, group_name=None, **kwargs):
super().__init__(*args, formatter_class=HelpFormatter, add_help=False, **kwargs)
self.add_argument(
'--help', dest='help', action='store_true', default=False,
help='Show this help message and exit.'
)
self._optionals.title = 'Options'
if group_name is None:
self.epilog = (
f"Run 'lbrynet COMMAND --help' for more information on a command or group."
)
else:
self.epilog = (
f"Run 'lbrynet {group_name} COMMAND --help' for more information on a command."
)
self.set_defaults(group=group_name, group_parser=self)
def format_help(self):
formatter = self._get_formatter()
formatter.add_usage(
self.usage, self._actions, self._mutually_exclusive_groups
)
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._granular_action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
formatter.add_text(self.epilog)
return formatter.format_help()
@property
def _granular_action_groups(self):
if self.prog != 'lbrynet':
yield from self._action_groups
return
yield self._optionals
action: argparse._SubParsersAction = self._positionals._group_actions[0]
yield split_subparser_argument(
self, action, "Grouped Commands", lambda parser: 'group' in parser._defaults
)
yield split_subparser_argument(
self, action, "Commands", lambda parser: 'group' not in parser._defaults
)
def error(self, message):
self.print_help(argparse._sys.stderr)
self.exit(2, f"\n{message}\n")
class HelpFormatter(argparse.HelpFormatter):
def add_usage(self, usage, actions, groups, prefix='Usage: '):
super().add_usage(
usage, [a for a in actions if a.option_strings != ['--help']], groups, prefix
)
def add_command_parser(parent, command):
subcommand = parent.add_parser(
command['name'],
help=command['doc'].strip().splitlines()[0]
)
subcommand.set_defaults(
api_method_name=command['api_method_name'],
command=command['name'],
doc=command['doc'],
replaced_by=command.get('replaced_by', None)
)
def get_argument_parser():
root = ArgumentParser(
'lbrynet', description='An interface to the LBRY Network.', allow_abbrev=False,
)
root.add_argument(
'-v', '--version', dest='cli_version', action="store_true",
help='Show lbrynet CLI version and exit.'
)
root.set_defaults(group=None, command=None)
CLIConfig.contribute_to_argparse(root)
sub = root.add_subparsers(metavar='COMMAND')
start = sub.add_parser(
'start',
usage='lbrynet start [--config FILE] [--data-dir DIR] [--wallet-dir DIR] [--download-dir DIR] ...',
help='Start LBRY Network interface.'
)
start.add_argument(
'--quiet', dest='quiet', action="store_true",
help='Disable all console output.'
)
start.add_argument(
'--no-logging', dest='no_logging', action="store_true",
help='Disable all logging of any kind.'
)
start.add_argument(
'--verbose', nargs="*",
help=('Enable debug output for lbry logger and event loop. Optionally specify loggers for which debug output '
'should selectively be applied.')
)
start.add_argument(
'--initial-headers', dest='initial_headers',
help='Specify path to initial blockchain headers, faster than downloading them on first run.'
)
Config.contribute_to_argparse(start)
start.set_defaults(command='start', start_parser=start, doc=start.format_help())
api = Daemon.get_api_definitions()
groups = {}
for group_name in sorted(api['groups']):
group_parser = sub.add_parser(group_name, group_name=group_name, help=api['groups'][group_name])
groups[group_name] = group_parser.add_subparsers(metavar='COMMAND')
nicer_order = ['stop', 'get', 'publish', 'resolve']
for command_name in sorted(api['commands']):
if command_name not in nicer_order:
nicer_order.append(command_name)
for command_name in nicer_order:
command = api['commands'][command_name]
if command['group'] is None:
add_command_parser(sub, command)
else:
add_command_parser(groups[command['group']], command)
return root
def ensure_directory_exists(path: str):
if not os.path.isdir(path):
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
LOG_MODULES = 'lbry', 'aioupnp'
def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config):
default_formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(name)s:%(lineno)d: %(message)s")
file_handler = logging.handlers.RotatingFileHandler(conf.log_file_path, maxBytes=2097152, backupCount=5)
file_handler.setFormatter(default_formatter)
for module_name in LOG_MODULES:
logger.getChild(module_name).addHandler(file_handler)
if not args.quiet:
handler = logging.StreamHandler()
handler.setFormatter(default_formatter)
for module_name in LOG_MODULES:
logger.getChild(module_name).addHandler(handler)
logger.getChild('lbry').setLevel(logging.INFO)
logger.getChild('aioupnp').setLevel(logging.WARNING)
logger.getChild('aiohttp').setLevel(logging.CRITICAL)
if args.verbose is not None:
if len(args.verbose) > 0:
for module in args.verbose:
logger.getChild(module).setLevel(logging.DEBUG)
else:
logger.getChild('lbry').setLevel(logging.DEBUG)
loggly_handler = get_loggly_handler(conf)
loggly_handler.setLevel(logging.ERROR)
logger.getChild('lbry').addHandler(loggly_handler)
def run_daemon(args: argparse.Namespace, conf: Config):
loop = asyncio.get_event_loop()
if args.verbose is not None:
loop.set_debug(True)
if not args.no_logging:
setup_logging(logging.getLogger(), args, conf)
daemon = Daemon(conf)
def __exit():
raise GracefulExit()
try:
loop.add_signal_handler(signal.SIGINT, __exit)
loop.add_signal_handler(signal.SIGTERM, __exit)
except NotImplementedError:
pass # Not implemented on Windows
try:
loop.run_until_complete(daemon.start())
loop.run_forever()
except (GracefulExit, KeyboardInterrupt, asyncio.CancelledError):
pass
finally:
loop.run_until_complete(daemon.stop())
logging.shutdown()
if hasattr(loop, 'shutdown_asyncgens'):
loop.run_until_complete(loop.shutdown_asyncgens())
def main(argv=None):
argv = argv or sys.argv[1:]
parser = get_argument_parser()
args, command_args = parser.parse_known_args(argv)
conf = Config.create_from_arguments(args)
for directory in (conf.data_dir, conf.download_dir, conf.wallet_dir):
ensure_directory_exists(directory)
if args.cli_version:
print(f"lbrynet {lbrynet_version}")
elif args.command == 'start':
if args.help:
args.start_parser.print_help()
else:
if args.initial_headers:
ledger_path = os.path.join(conf.wallet_dir, 'lbc_mainnet')
ensure_directory_exists(ledger_path)
current_size = 0
headers_path = os.path.join(ledger_path, 'headers')
if os.path.exists(headers_path):
current_size = os.stat(headers_path).st_size
if os.stat(args.initial_headers).st_size > current_size:
log.info('Copying header from %s to %s', args.initial_headers, headers_path)
shutil.copy(args.initial_headers, headers_path)
run_daemon(args, conf)
elif args.command is not None:
doc = args.doc
api_method_name = args.api_method_name
if args.replaced_by:
print(f"{args.api_method_name} is deprecated, using {args.replaced_by['api_method_name']}.")
doc = args.replaced_by['doc']
api_method_name = args.replaced_by['api_method_name']
if args.help:
print(doc)
else:
parsed = docopt(doc, command_args)
params = set_kwargs(parsed)
asyncio.get_event_loop().run_until_complete(execute_command(conf, api_method_name, params))
elif args.group is not None:
args.group_parser.print_help()
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())

View file

@ -1,233 +0,0 @@
import asyncio
import collections
import logging
import typing
import aiohttp
from lbry import utils
from lbry.conf import Config
from lbry.extras import system_info
ANALYTICS_ENDPOINT = 'https://api.segment.io/v1'
ANALYTICS_TOKEN = 'Ax5LZzR1o3q3Z3WjATASDwR5rKyHH0qOIRIbLmMXn2H='
# Things We Track
SERVER_STARTUP = 'Server Startup'
SERVER_STARTUP_SUCCESS = 'Server Startup Success'
SERVER_STARTUP_ERROR = 'Server Startup Error'
DOWNLOAD_STARTED = 'Download Started'
DOWNLOAD_ERRORED = 'Download Errored'
DOWNLOAD_FINISHED = 'Download Finished'
HEARTBEAT = 'Heartbeat'
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
NEW_CHANNEL = 'New Channel'
CREDITS_SENT = 'Credits Sent'
UPNP_SETUP = "UPnP Setup"
BLOB_BYTES_UPLOADED = 'Blob Bytes Uploaded'
TIME_TO_FIRST_BYTES = "Time To First Bytes"
log = logging.getLogger(__name__)
def _event_properties(installation_id: str, session_id: str,
event_properties: typing.Optional[typing.Dict]) -> typing.Dict:
properties = {
'lbry_id': installation_id,
'session_id': session_id,
}
properties.update(event_properties or {})
return properties
def _download_properties(conf: Config, external_ip: str, resolve_duration: float,
total_duration: typing.Optional[float], download_id: str, name: str,
outpoint: str, active_peer_count: typing.Optional[int],
tried_peers_count: typing.Optional[int], connection_failures_count: typing.Optional[int],
added_fixed_peers: bool, fixed_peer_delay: float, sd_hash: str,
sd_download_duration: typing.Optional[float] = None,
head_blob_hash: typing.Optional[str] = None,
head_blob_length: typing.Optional[int] = None,
head_blob_download_duration: typing.Optional[float] = None,
error: typing.Optional[str] = None, error_msg: typing.Optional[str] = None,
wallet_server: typing.Optional[str] = None) -> typing.Dict:
return {
"external_ip": external_ip,
"download_id": download_id,
"total_duration": round(total_duration, 4),
"resolve_duration": None if not resolve_duration else round(resolve_duration, 4),
"error": error,
"error_message": error_msg,
'name': name,
"outpoint": outpoint,
"node_rpc_timeout": conf.node_rpc_timeout,
"peer_connect_timeout": conf.peer_connect_timeout,
"blob_download_timeout": conf.blob_download_timeout,
"use_fixed_peers": len(conf.reflector_servers) > 0,
"fixed_peer_delay": fixed_peer_delay,
"added_fixed_peers": added_fixed_peers,
"active_peer_count": active_peer_count,
"tried_peers_count": tried_peers_count,
"sd_blob_hash": sd_hash,
"sd_blob_duration": None if not sd_download_duration else round(sd_download_duration, 4),
"head_blob_hash": head_blob_hash,
"head_blob_length": head_blob_length,
"head_blob_duration": None if not head_blob_download_duration else round(head_blob_download_duration, 4),
"connection_failures_count": connection_failures_count,
"wallet_server": wallet_server
}
def _make_context(platform):
# see https://segment.com/docs/spec/common/#context
# they say they'll ignore fields outside the spec, but evidently they don't
context = {
'app': {
'version': platform['lbrynet_version'],
'build': platform['build'],
},
# TODO: expand os info to give linux/osx specific info
'os': {
'name': platform['os_system'],
'version': platform['os_release']
},
}
if 'desktop' in platform and 'distro' in platform:
context['os']['desktop'] = platform['desktop']
context['os']['distro'] = platform['distro']
return context
class AnalyticsManager:
def __init__(self, conf: Config, installation_id: str, session_id: str):
self.conf = conf
self.cookies = {}
self.url = ANALYTICS_ENDPOINT
self._write_key = utils.deobfuscate(ANALYTICS_TOKEN)
self._tracked_data = collections.defaultdict(list)
self.context = _make_context(system_info.get_platform())
self.installation_id = installation_id
self.session_id = session_id
self.task: typing.Optional[asyncio.Task] = None
self.external_ip: typing.Optional[str] = None
@property
def enabled(self):
return self.conf.share_usage_data
@property
def is_started(self):
return self.task is not None
async def start(self):
if self.task is None:
self.task = asyncio.create_task(self.run())
async def run(self):
while True:
if self.enabled:
self.external_ip = await utils.get_external_ip()
await self._send_heartbeat()
await asyncio.sleep(1800)
def stop(self):
if self.task is not None and not self.task.done():
self.task.cancel()
async def _post(self, data: typing.Dict):
request_kwargs = {
'method': 'POST',
'url': self.url + '/track',
'headers': {'Connection': 'Close'},
'auth': aiohttp.BasicAuth(self._write_key, ''),
'json': data,
'cookies': self.cookies
}
try:
async with utils.aiohttp_request(**request_kwargs) as response:
self.cookies.update(response.cookies)
except Exception as e:
log.debug('Encountered an exception while POSTing to %s: ', self.url + '/track', exc_info=e)
async def track(self, event: typing.Dict):
"""Send a single tracking event"""
if self.enabled:
log.debug('Sending track event: %s', event)
await self._post(event)
async def send_upnp_setup_success_fail(self, success, status):
await self.track(
self._event(UPNP_SETUP, {
'success': success,
'status': status,
})
)
async def send_server_startup(self):
await self.track(self._event(SERVER_STARTUP))
async def send_server_startup_success(self):
await self.track(self._event(SERVER_STARTUP_SUCCESS))
async def send_server_startup_error(self, message):
await self.track(self._event(SERVER_STARTUP_ERROR, {'message': message}))
async def send_time_to_first_bytes(self, resolve_duration: typing.Optional[float],
total_duration: typing.Optional[float], download_id: str,
name: str, outpoint: typing.Optional[str],
found_peers_count: typing.Optional[int],
tried_peers_count: typing.Optional[int],
connection_failures_count: typing.Optional[int],
added_fixed_peers: bool,
fixed_peers_delay: float, sd_hash: str,
sd_download_duration: typing.Optional[float] = None,
head_blob_hash: typing.Optional[str] = None,
head_blob_length: typing.Optional[int] = None,
head_blob_duration: typing.Optional[int] = None,
error: typing.Optional[str] = None,
error_msg: typing.Optional[str] = None,
wallet_server: typing.Optional[str] = None):
await self.track(self._event(TIME_TO_FIRST_BYTES, _download_properties(
self.conf, self.external_ip, resolve_duration, total_duration, download_id, name, outpoint,
found_peers_count, tried_peers_count, connection_failures_count, added_fixed_peers, fixed_peers_delay,
sd_hash, sd_download_duration, head_blob_hash, head_blob_length, head_blob_duration, error, error_msg,
wallet_server
)))
async def send_download_finished(self, download_id, name, sd_hash):
await self.track(
self._event(
DOWNLOAD_FINISHED, {
'download_id': download_id,
'name': name,
'stream_info': sd_hash
}
)
)
async def send_claim_action(self, action):
await self.track(self._event(CLAIM_ACTION, {'action': action}))
async def send_new_channel(self):
await self.track(self._event(NEW_CHANNEL))
async def send_credits_sent(self):
await self.track(self._event(CREDITS_SENT))
async def _send_heartbeat(self):
await self.track(self._event(HEARTBEAT))
def _event(self, event, properties: typing.Optional[typing.Dict] = None):
return {
'userId': 'lbry',
'event': event,
'properties': _event_properties(self.installation_id, self.session_id, properties),
'context': self.context,
'timestamp': utils.isonow()
}

View file

@ -1,6 +0,0 @@
from lbry.conf import Config
from lbry.extras.cli import execute_command
def daemon_rpc(conf: Config, method: str, **kwargs):
return execute_command(conf, method, kwargs, callback=lambda data: data)

View file

@ -1,66 +0,0 @@
import logging
import time
import hashlib
import binascii
import ecdsa
from lbry import utils
from lbry.crypto.hash import sha256
from lbry.wallet.transaction import Output
log = logging.getLogger(__name__)
def get_encoded_signature(signature):
signature = signature.encode() if isinstance(signature, str) else signature
r = int(signature[:int(len(signature) / 2)], 16)
s = int(signature[int(len(signature) / 2):], 16)
return ecdsa.util.sigencode_der(r, s, len(signature) * 4)
def cid2hash(claim_id: str) -> bytes:
return binascii.unhexlify(claim_id.encode())[::-1]
def is_comment_signed_by_channel(comment: dict, channel: Output, abandon=False):
if isinstance(channel, Output):
try:
signing_field = comment['comment_id'] if abandon else comment['comment']
pieces = [
comment['signing_ts'].encode(),
cid2hash(comment['channel_id']),
signing_field.encode()
]
return Output.is_signature_valid(
get_encoded_signature(comment['signature']),
sha256(b''.join(pieces)),
channel.claim.channel.public_key_bytes
)
except KeyError:
pass
return False
def sign_comment(comment: dict, channel: Output, abandon=False):
timestamp = str(int(time.time()))
signing_field = comment['comment_id'] if abandon else comment['comment']
pieces = [timestamp.encode(), channel.claim_hash, signing_field.encode()]
digest = sha256(b''.join(pieces))
signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
comment.update({
'signature': binascii.hexlify(signature).decode(),
'signing_ts': timestamp
})
async def jsonrpc_post(url: str, method: str, params: dict = None, **kwargs) -> any:
params = params or {}
params.update(kwargs)
json_body = {'jsonrpc': '2.0', 'id': None, 'method': method, 'params': params}
async with utils.aiohttp_request('POST', url, json=json_body) as response:
try:
result = await response.json()
return result['result'] if 'result' in result else result
except Exception as cte:
log.exception('Unable to decode response from server: %s', cte)
return await response.text()

View file

@ -1,75 +0,0 @@
import asyncio
import logging
from lbry.conf import Config
from lbry.extras.daemon.componentmanager import ComponentManager
log = logging.getLogger(__name__)
class ComponentType(type):
def __new__(mcs, name, bases, newattrs):
klass = type.__new__(mcs, name, bases, newattrs)
if name != "Component" and newattrs['__module__'] != 'lbry.testcase':
ComponentManager.default_component_classes[klass.component_name] = klass
return klass
class Component(metaclass=ComponentType):
"""
lbry-daemon component helper
Inheriting classes will be automatically registered with the ComponentManager and must implement setup and stop
methods
"""
depends_on = []
component_name = None
def __init__(self, component_manager):
self.conf: Config = component_manager.conf
self.component_manager = component_manager
self._running = False
def __lt__(self, other):
return self.component_name < other.component_name
@property
def running(self):
return self._running
async def get_status(self):
return
async def start(self):
raise NotImplementedError()
async def stop(self):
raise NotImplementedError()
@property
def component(self):
raise NotImplementedError()
async def _setup(self):
try:
result = await self.start()
self._running = True
return result
except asyncio.CancelledError:
log.info("Cancelled setup of %s component", self.__class__.__name__)
raise
except Exception as err:
log.exception("Error setting up %s", self.component_name or self.__class__.__name__)
raise err
async def _stop(self):
try:
result = await self.stop()
self._running = False
return result
except asyncio.CancelledError:
log.info("Cancelled stop of %s component", self.__class__.__name__)
raise
except Exception as err:
log.exception("Error stopping %s", self.__class__.__name__)
raise err

View file

@ -1,171 +0,0 @@
import logging
import asyncio
from lbry.conf import Config
from lbry.error import ComponentStartConditionNotMetError
from lbry.dht.peer import PeerManager
log = logging.getLogger(__name__)
class RegisteredConditions:
conditions = {}
class RequiredConditionType(type):
def __new__(mcs, name, bases, newattrs):
klass = type.__new__(mcs, name, bases, newattrs)
if name != "RequiredCondition":
if klass.name in RegisteredConditions.conditions:
raise SyntaxError("already have a component registered for \"%s\"" % klass.name)
RegisteredConditions.conditions[klass.name] = klass
return klass
class RequiredCondition(metaclass=RequiredConditionType):
name = ""
component = ""
message = ""
@staticmethod
def evaluate(component):
raise NotImplementedError()
class ComponentManager:
default_component_classes = {}
def __init__(self, conf: Config, analytics_manager=None, skip_components=None,
peer_manager=None, **override_components):
self.conf = conf
self.skip_components = skip_components or []
self.loop = asyncio.get_event_loop()
self.analytics_manager = analytics_manager
self.component_classes = {}
self.components = set()
self.started = asyncio.Event(loop=self.loop)
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
for component_name, component_class in self.default_component_classes.items():
if component_name in override_components:
component_class = override_components.pop(component_name)
if component_name not in self.skip_components:
self.component_classes[component_name] = component_class
if override_components:
raise SyntaxError("unexpected components: %s" % override_components)
for component_class in self.component_classes.values():
self.components.add(component_class(self))
def evaluate_condition(self, condition_name):
if condition_name not in RegisteredConditions.conditions:
raise NameError(condition_name)
condition = RegisteredConditions.conditions[condition_name]
try:
component = self.get_component(condition.component)
result = condition.evaluate(component)
except Exception:
log.exception('failed to evaluate condition:')
result = False
return result, "" if result else condition.message
def sort_components(self, reverse=False):
"""
Sort components by requirements
"""
steps = []
staged = set()
components = set(self.components)
# components with no requirements
step = []
for component in set(components):
if not component.depends_on:
step.append(component)
staged.add(component.component_name)
components.remove(component)
if step:
step.sort()
steps.append(step)
while components:
step = []
to_stage = set()
for component in set(components):
reqs_met = 0
for needed in component.depends_on:
if needed in staged:
reqs_met += 1
if reqs_met == len(component.depends_on):
step.append(component)
to_stage.add(component.component_name)
components.remove(component)
if step:
step.sort()
staged.update(to_stage)
steps.append(step)
elif components:
raise ComponentStartConditionNotMetError(components)
if reverse:
steps.reverse()
return steps
async def start(self):
""" Start Components in sequence sorted by requirements """
for stage in self.sort_components():
needing_start = [
component._setup() for component in stage if not component.running
]
if needing_start:
await asyncio.wait(needing_start)
self.started.set()
async def stop(self):
"""
Stop Components in reversed startup order
"""
stages = self.sort_components(reverse=True)
for stage in stages:
needing_stop = [
component._stop() for component in stage if component.running
]
if needing_stop:
await asyncio.wait(needing_stop)
def all_components_running(self, *component_names):
"""
Check if components are running
:return: (bool) True if all specified components are running
"""
components = {component.component_name: component for component in self.components}
for component in component_names:
if component not in components:
raise NameError("%s is not a known Component" % component)
if not components[component].running:
return False
return True
def get_components_status(self):
"""
List status of all the components, whether they are running or not
:return: (dict) {(str) component_name: (bool) True is running else False}
"""
return {
component.component_name: component.running
for component in self.components
}
def get_actual_component(self, component_name):
for component in self.components:
if component.component_name == component_name:
return component
raise NameError(component_name)
def get_component(self, component_name):
return self.get_actual_component(component_name).component
def has_component(self, component_name):
return any(component for component in self.components if component_name == component.component_name)

View file

@ -1,553 +0,0 @@
import math
import os
import asyncio
import logging
import binascii
import typing
import base58
from aioupnp import __version__ as aioupnp_version
from aioupnp.upnp import UPnP
from aioupnp.fault import UPnPError
from lbry import utils
from lbry.dht.node import Node
from lbry.dht.peer import is_valid_public_ipv4
from lbry.dht.blob_announcer import BlobAnnouncer
from lbry.blob.blob_manager import BlobManager
from lbry.blob_exchange.server import BlobServer
from lbry.stream.stream_manager import StreamManager
from lbry.extras.daemon.component import Component
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
from lbry.extras.daemon.storage import SQLiteStorage
from lbry.wallet import WalletManager
from lbry.wallet.usage_payment import WalletServerPayer
log = logging.getLogger(__name__)
# settings must be initialized before this file is imported
DATABASE_COMPONENT = "database"
BLOB_COMPONENT = "blob_manager"
WALLET_COMPONENT = "wallet"
WALLET_SERVER_PAYMENTS_COMPONENT = "wallet_server_payments"
DHT_COMPONENT = "dht"
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
STREAM_MANAGER_COMPONENT = "stream_manager"
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
UPNP_COMPONENT = "upnp"
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
class DatabaseComponent(Component):
component_name = DATABASE_COMPONENT
def __init__(self, component_manager):
super().__init__(component_manager)
self.storage = None
@property
def component(self):
return self.storage
@staticmethod
def get_current_db_revision():
return 14
@property
def revision_filename(self):
return os.path.join(self.conf.data_dir, 'db_revision')
def _write_db_revision_file(self, version_num):
with open(self.revision_filename, mode='w') as db_revision:
db_revision.write(str(version_num))
async def start(self):
# check directories exist, create them if they don't
log.info("Loading databases")
if not os.path.exists(self.revision_filename):
log.info("db_revision file not found. Creating it")
self._write_db_revision_file(self.get_current_db_revision())
# check the db migration and run any needed migrations
with open(self.revision_filename, "r") as revision_read_handle:
old_revision = int(revision_read_handle.read().strip())
if old_revision > self.get_current_db_revision():
raise Exception('This version of lbrynet is not compatible with the database\n'
'Your database is revision %i, expected %i' %
(old_revision, self.get_current_db_revision()))
if old_revision < self.get_current_db_revision():
from lbry.extras.daemon.migrator import dbmigrator # pylint: disable=import-outside-toplevel
log.info("Upgrading your databases (revision %i to %i)", old_revision, self.get_current_db_revision())
await asyncio.get_event_loop().run_in_executor(
None, dbmigrator.migrate_db, self.conf, old_revision, self.get_current_db_revision()
)
self._write_db_revision_file(self.get_current_db_revision())
log.info("Finished upgrading the databases.")
self.storage = SQLiteStorage(
self.conf, os.path.join(self.conf.data_dir, "lbrynet.sqlite")
)
await self.storage.open()
async def stop(self):
await self.storage.close()
self.storage = None
class WalletComponent(Component):
component_name = WALLET_COMPONENT
depends_on = [DATABASE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.wallet_manager = None
@property
def component(self):
return self.wallet_manager
async def get_status(self):
if self.wallet_manager is None:
return
session_pool = self.wallet_manager.ledger.network.session_pool
sessions = session_pool.sessions
connected = None
if self.wallet_manager.ledger.network.client:
addr_and_port = self.wallet_manager.ledger.network.client.server_address_and_port
if addr_and_port:
connected = f"{addr_and_port[0]}:{addr_and_port[1]}"
result = {
'connected': connected,
'connected_features': self.wallet_manager.ledger.network.server_features,
'servers': [
{
'host': session.server[0],
'port': session.server[1],
'latency': session.connection_latency,
'availability': session.available,
} for session in sessions
],
'known_servers': len(sessions),
'available_servers': len(list(session_pool.available_sessions))
}
if self.wallet_manager.ledger.network.remote_height:
local_height = self.wallet_manager.ledger.local_height_including_downloaded_height
disk_height = len(self.wallet_manager.ledger.headers)
remote_height = self.wallet_manager.ledger.network.remote_height
download_height, target_height = local_height - disk_height, remote_height - disk_height
if target_height > 0:
progress = min(max(math.ceil(float(download_height) / float(target_height) * 100), 0), 100)
else:
progress = 100
best_hash = await self.wallet_manager.get_best_blockhash()
result.update({
'headers_synchronization_progress': progress,
'blocks': max(local_height, 0),
'blocks_behind': max(remote_height - local_height, 0),
'best_blockhash': best_hash,
})
return result
async def start(self):
log.info("Starting wallet")
self.wallet_manager = await WalletManager.from_lbrynet_config(self.conf)
await self.wallet_manager.start()
async def stop(self):
await self.wallet_manager.stop()
self.wallet_manager = None
class WalletServerPaymentsComponent(Component):
component_name = WALLET_SERVER_PAYMENTS_COMPONENT
depends_on = [WALLET_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.usage_payment_service = WalletServerPayer(
max_fee=self.conf.max_wallet_server_fee, analytics_manager=self.component_manager.analytics_manager,
)
@property
def component(self) -> typing.Optional[WalletServerPayer]:
return self.usage_payment_service
async def start(self):
wallet_manager = self.component_manager.get_component(WALLET_COMPONENT)
await self.usage_payment_service.start(wallet_manager.ledger, wallet_manager.default_wallet)
async def stop(self):
await self.usage_payment_service.stop()
async def get_status(self):
return {
'max_fee': self.usage_payment_service.max_fee,
'running': self.usage_payment_service.running
}
class BlobComponent(Component):
component_name = BLOB_COMPONENT
depends_on = [DATABASE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.blob_manager: typing.Optional[BlobManager] = None
@property
def component(self) -> typing.Optional[BlobManager]:
return self.blob_manager
async def start(self):
storage = self.component_manager.get_component(DATABASE_COMPONENT)
data_store = None
if DHT_COMPONENT not in self.component_manager.skip_components:
dht_node: Node = self.component_manager.get_component(DHT_COMPONENT)
if dht_node:
data_store = dht_node.protocol.data_store
blob_dir = os.path.join(self.conf.data_dir, 'blobfiles')
if not os.path.isdir(blob_dir):
os.mkdir(blob_dir)
self.blob_manager = BlobManager(self.component_manager.loop, blob_dir, storage, self.conf, data_store)
return await self.blob_manager.setup()
async def stop(self):
self.blob_manager.stop()
async def get_status(self):
count = 0
if self.blob_manager:
count = len(self.blob_manager.completed_blob_hashes)
return {
'finished_blobs': count,
'connections': {} if not self.blob_manager else self.blob_manager.connection_manager.status
}
class DHTComponent(Component):
component_name = DHT_COMPONENT
depends_on = [UPNP_COMPONENT, DATABASE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.dht_node: typing.Optional[Node] = None
self.external_udp_port = None
self.external_peer_port = None
@property
def component(self) -> typing.Optional[Node]:
return self.dht_node
async def get_status(self):
return {
'node_id': None if not self.dht_node else binascii.hexlify(self.dht_node.protocol.node_id),
'peers_in_routing_table': 0 if not self.dht_node else len(self.dht_node.protocol.routing_table.get_peers())
}
def get_node_id(self):
node_id_filename = os.path.join(self.conf.data_dir, "node_id")
if os.path.isfile(node_id_filename):
with open(node_id_filename, "r") as node_id_file:
return base58.b58decode(str(node_id_file.read()).strip())
node_id = utils.generate_id()
with open(node_id_filename, "w") as node_id_file:
node_id_file.write(base58.b58encode(node_id).decode())
return node_id
async def start(self):
log.info("start the dht")
upnp_component = self.component_manager.get_component(UPNP_COMPONENT)
self.external_peer_port = upnp_component.upnp_redirects.get("TCP", self.conf.tcp_port)
self.external_udp_port = upnp_component.upnp_redirects.get("UDP", self.conf.udp_port)
external_ip = upnp_component.external_ip
storage = self.component_manager.get_component(DATABASE_COMPONENT)
if not external_ip:
external_ip = await utils.get_external_ip()
if not external_ip:
log.warning("failed to get external ip")
self.dht_node = Node(
self.component_manager.loop,
self.component_manager.peer_manager,
node_id=self.get_node_id(),
internal_udp_port=self.conf.udp_port,
udp_port=self.external_udp_port,
external_ip=external_ip,
peer_port=self.external_peer_port,
rpc_timeout=self.conf.node_rpc_timeout,
split_buckets_under_index=self.conf.split_buckets_under_index,
storage=storage
)
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
log.info("Started the dht")
async def stop(self):
self.dht_node.stop()
class HashAnnouncerComponent(Component):
component_name = HASH_ANNOUNCER_COMPONENT
depends_on = [DHT_COMPONENT, DATABASE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.hash_announcer: typing.Optional[BlobAnnouncer] = None
@property
def component(self) -> typing.Optional[BlobAnnouncer]:
return self.hash_announcer
async def start(self):
storage = self.component_manager.get_component(DATABASE_COMPONENT)
dht_node = self.component_manager.get_component(DHT_COMPONENT)
self.hash_announcer = BlobAnnouncer(self.component_manager.loop, dht_node, storage)
self.hash_announcer.start(self.conf.concurrent_blob_announcers)
log.info("Started blob announcer")
async def stop(self):
self.hash_announcer.stop()
log.info("Stopped blob announcer")
async def get_status(self):
return {
'announce_queue_size': 0 if not self.hash_announcer else len(self.hash_announcer.announce_queue)
}
class StreamManagerComponent(Component):
component_name = STREAM_MANAGER_COMPONENT
depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.stream_manager: typing.Optional[StreamManager] = None
@property
def component(self) -> typing.Optional[StreamManager]:
return self.stream_manager
async def get_status(self):
if not self.stream_manager:
return
return {
'managed_files': len(self.stream_manager.streams),
}
async def start(self):
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
storage = self.component_manager.get_component(DATABASE_COMPONENT)
wallet = self.component_manager.get_component(WALLET_COMPONENT)
node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None
log.info('Starting the file manager')
loop = asyncio.get_event_loop()
self.stream_manager = StreamManager(
loop, self.conf, blob_manager, wallet, storage, node, self.component_manager.analytics_manager
)
await self.stream_manager.start()
log.info('Done setting up file manager')
async def stop(self):
self.stream_manager.stop()
class PeerProtocolServerComponent(Component):
component_name = PEER_PROTOCOL_SERVER_COMPONENT
depends_on = [UPNP_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.blob_server: typing.Optional[BlobServer] = None
@property
def component(self) -> typing.Optional[BlobServer]:
return self.blob_server
async def start(self):
log.info("start blob server")
blob_manager: BlobManager = self.component_manager.get_component(BLOB_COMPONENT)
wallet: WalletManager = self.component_manager.get_component(WALLET_COMPONENT)
peer_port = self.conf.tcp_port
address = await wallet.get_unused_address()
self.blob_server = BlobServer(asyncio.get_event_loop(), blob_manager, address)
self.blob_server.start_server(peer_port, interface=self.conf.network_interface)
await self.blob_server.started_listening.wait()
async def stop(self):
if self.blob_server:
self.blob_server.stop_server()
class UPnPComponent(Component):
component_name = UPNP_COMPONENT
def __init__(self, component_manager):
super().__init__(component_manager)
self._int_peer_port = self.conf.tcp_port
self._int_dht_node_port = self.conf.udp_port
self.use_upnp = self.conf.use_upnp
self.upnp: typing.Optional[UPnP] = None
self.upnp_redirects = {}
self.external_ip: typing.Optional[str] = None
self._maintain_redirects_task = None
@property
def component(self) -> 'UPnPComponent':
return self
async def _repeatedly_maintain_redirects(self, now=True):
while True:
if now:
await self._maintain_redirects()
await asyncio.sleep(360, loop=self.component_manager.loop)
async def _maintain_redirects(self):
# setup the gateway if necessary
if not self.upnp:
try:
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.warning("upnp discovery failed: %s", err)
self.upnp = None
# update the external ip
external_ip = None
if self.upnp:
try:
external_ip = await self.upnp.get_external_ip()
if external_ip != "0.0.0.0" and not self.external_ip:
log.info("got external ip from UPnP: %s", external_ip)
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
pass
if external_ip and not is_valid_public_ipv4(external_ip):
log.warning("UPnP returned a private/reserved ip - %s, checking lbry.com fallback", external_ip)
external_ip = await utils.get_external_ip()
if self.external_ip and self.external_ip != external_ip:
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
if external_ip:
self.external_ip = external_ip
# assert self.external_ip is not None # TODO: handle going/starting offline
if not self.upnp_redirects and self.upnp: # setup missing redirects
log.info("add UPnP port mappings")
upnp_redirects = {}
if PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
try:
upnp_redirects["TCP"] = await self.upnp.get_next_mapping(
self._int_peer_port, "TCP", "LBRY peer port", self._int_peer_port
)
except (UPnPError, asyncio.TimeoutError, NotImplementedError):
pass
if DHT_COMPONENT not in self.component_manager.skip_components:
try:
upnp_redirects["UDP"] = await self.upnp.get_next_mapping(
self._int_dht_node_port, "UDP", "LBRY DHT port", self._int_dht_node_port
)
except (UPnPError, asyncio.TimeoutError, NotImplementedError):
pass
if upnp_redirects:
log.info("set up redirects: %s", upnp_redirects)
self.upnp_redirects.update(upnp_redirects)
elif self.upnp: # check existing redirects are still active
found = set()
mappings = await self.upnp.get_redirects()
for mapping in mappings:
proto = mapping.protocol
if proto in self.upnp_redirects and mapping.external_port == self.upnp_redirects[proto]:
if mapping.lan_address == self.upnp.lan_address:
found.add(proto)
if 'UDP' not in found and DHT_COMPONENT not in self.component_manager.skip_components:
try:
udp_port = await self.upnp.get_next_mapping(self._int_dht_node_port, "UDP", "LBRY DHT port")
self.upnp_redirects['UDP'] = udp_port
log.info("refreshed upnp redirect for dht port: %i", udp_port)
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
del self.upnp_redirects['UDP']
if 'TCP' not in found and PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
try:
tcp_port = await self.upnp.get_next_mapping(self._int_peer_port, "TCP", "LBRY peer port")
self.upnp_redirects['TCP'] = tcp_port
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
del self.upnp_redirects['TCP']
if ('TCP' in self.upnp_redirects and
PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components) and \
('UDP' in self.upnp_redirects and DHT_COMPONENT not in self.component_manager.skip_components):
if self.upnp_redirects:
log.debug("upnp redirects are still active")
async def start(self):
log.info("detecting external ip")
if not self.use_upnp:
self.external_ip = await utils.get_external_ip()
return
success = False
await self._maintain_redirects()
if self.upnp:
if not self.upnp_redirects and not all([x in self.component_manager.skip_components for x in
(DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)]):
log.error("failed to setup upnp")
else:
success = True
if self.upnp_redirects:
log.debug("set up upnp port redirects for gateway: %s", self.upnp.gateway.manufacturer_string)
else:
log.error("failed to setup upnp")
if not self.external_ip:
self.external_ip = await utils.get_external_ip()
if self.external_ip:
log.info("detected external ip using lbry.com fallback")
if self.component_manager.analytics_manager:
self.component_manager.loop.create_task(
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
success, await self.get_status()
)
)
self._maintain_redirects_task = self.component_manager.loop.create_task(
self._repeatedly_maintain_redirects(now=False)
)
async def stop(self):
if self.upnp_redirects:
log.info("Removing upnp redirects: %s", self.upnp_redirects)
await asyncio.wait([
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
], loop=self.component_manager.loop)
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
self._maintain_redirects_task.cancel()
async def get_status(self):
return {
'aioupnp_version': aioupnp_version,
'redirects': self.upnp_redirects,
'gateway': 'No gateway found' if not self.upnp else self.upnp.gateway.manufacturer_string,
'dht_redirect_set': 'UDP' in self.upnp_redirects,
'peer_redirect_set': 'TCP' in self.upnp_redirects,
'external_ip': self.external_ip
}
class ExchangeRateManagerComponent(Component):
component_name = EXCHANGE_RATE_MANAGER_COMPONENT
def __init__(self, component_manager):
super().__init__(component_manager)
self.exchange_rate_manager = ExchangeRateManager()
@property
def component(self) -> ExchangeRateManager:
return self.exchange_rate_manager
async def start(self):
self.exchange_rate_manager.start()
async def stop(self):
self.exchange_rate_manager.stop()

File diff suppressed because it is too large Load diff

View file

@ -1,323 +0,0 @@
import logging
from decimal import Decimal
from binascii import hexlify, unhexlify
from datetime import datetime, date
from json import JSONEncoder
from google.protobuf.message import DecodeError
from lbry.schema.claim import Claim
from lbry.wallet.wallet import Wallet, Account
from lbry.blockchain.ledger import Ledger
from lbry.blockchain.transaction import Transaction, Output
from lbry.crypto.bip32 import PubKey
from lbry.blockchain.dewies import dewies_to_lbc
from lbry.stream.managed_stream import ManagedStream
log = logging.getLogger(__name__)
def encode_txo_doc():
return {
'txid': "hash of transaction in hex",
'nout': "position in the transaction",
'height': "block where transaction was recorded",
'amount': "value of the txo as a decimal",
'address': "address of who can spend the txo",
'confirmations': "number of confirmed blocks",
'is_change': "payment to change address, only available when it can be determined",
'is_received': "true if txo was sent from external account to this account",
'is_spent': "true if txo is spent",
'is_mine': "payment to one of your accounts, only available when it can be determined",
'type': "one of 'claim', 'support' or 'purchase'",
'name': "when type is 'claim' or 'support', this is the claim name",
'claim_id': "when type is 'claim', 'support' or 'purchase', this is the claim id",
'claim_op': "when type is 'claim', this determines if it is 'create' or 'update'",
'value': "when type is 'claim' or 'support' with payload, this is the decoded protobuf payload",
'value_type': "determines the type of the 'value' field: 'channel', 'stream', etc",
'protobuf': "hex encoded raw protobuf version of 'value' field",
'permanent_url': "when type is 'claim' or 'support', this is the long permanent claim URL",
'claim': "for purchase outputs only, metadata of purchased claim",
'reposted_claim': "for repost claims only, metadata of claim being reposted",
'signing_channel': "for signed claims only, metadata of signing channel",
'is_channel_signature_valid': "for signed claims only, whether signature is valid",
'purchase_receipt': "metadata for the purchase transaction associated with this claim"
}
def encode_tx_doc():
return {
'txid': "hash of transaction in hex",
'height': "block where transaction was recorded",
'inputs': [encode_txo_doc()],
'outputs': [encode_txo_doc()],
'total_input': "sum of inputs as a decimal",
'total_output': "sum of outputs, sans fee, as a decimal",
'total_fee': "fee amount",
'hex': "entire transaction encoded in hex",
}
def encode_account_doc():
return {
'id': 'account_id',
'is_default': 'this account is used by default',
'ledger': 'name of crypto currency and network',
'name': 'optional account name',
'seed': 'human friendly words from which account can be recreated',
'encrypted': 'if account is encrypted',
'private_key': 'extended private key',
'public_key': 'extended public key',
'address_generator': 'settings for generating addresses',
'modified_on': 'date of last modification to account settings'
}
def encode_wallet_doc():
return {
'id': 'wallet_id',
'name': 'optional wallet name',
}
def encode_file_doc():
return {
'streaming_url': '(str) url to stream the file using range requests',
'completed': '(bool) true if download is completed',
'file_name': '(str) name of file',
'download_directory': '(str) download directory',
'points_paid': '(float) credit paid to download file',
'stopped': '(bool) true if download is stopped',
'stream_hash': '(str) stream hash of file',
'stream_name': '(str) stream name',
'suggested_file_name': '(str) suggested file name',
'sd_hash': '(str) sd hash of file',
'download_path': '(str) download path of file',
'mime_type': '(str) mime type of file',
'key': '(str) key attached to file',
'total_bytes_lower_bound': '(int) lower bound file size in bytes',
'total_bytes': '(int) file upper bound size in bytes',
'written_bytes': '(int) written size in bytes',
'blobs_completed': '(int) number of fully downloaded blobs',
'blobs_in_stream': '(int) total blobs on stream',
'blobs_remaining': '(int) total blobs remaining to download',
'status': '(str) downloader status',
'claim_id': '(str) None if claim is not found else the claim id',
'txid': '(str) None if claim is not found else the transaction id',
'nout': '(int) None if claim is not found else the transaction output index',
'outpoint': '(str) None if claim is not found else the tx and output',
'metadata': '(dict) None if claim is not found else the claim metadata',
'channel_claim_id': '(str) None if claim is not found or not signed',
'channel_name': '(str) None if claim is not found or not signed',
'claim_name': '(str) None if claim is not found else the claim name'
}
class JSONResponseEncoder(JSONEncoder):
def __init__(self, *args, service, include_protobuf=False, **kwargs):
super().__init__(*args, **kwargs)
self.service = service
self.include_protobuf = include_protobuf
def default(self, obj): # pylint: disable=method-hidden,arguments-differ,too-many-return-statements
if isinstance(obj, Account):
return self.encode_account(obj)
if isinstance(obj, Wallet):
return self.encode_wallet(obj)
if isinstance(obj, ManagedStream):
return self.encode_file(obj)
if isinstance(obj, Transaction):
return self.encode_transaction(obj)
if isinstance(obj, Output):
return self.encode_output(obj)
if isinstance(obj, Claim):
return self.encode_claim(obj)
if isinstance(obj, PubKey):
return obj.extended_key_string()
if isinstance(obj, date):
return obj.isoformat()
if isinstance(obj, datetime):
return obj.strftime("%Y%m%dT%H:%M:%S")
if isinstance(obj, Decimal):
return float(obj)
if isinstance(obj, bytes):
return obj.decode()
return super().default(obj)
def encode_transaction(self, tx):
return {
'txid': tx.id,
'height': tx.height,
'inputs': [self.encode_input(txo) for txo in tx.inputs],
'outputs': [self.encode_output(txo) for txo in tx.outputs],
'total_input': dewies_to_lbc(tx.input_sum),
'total_output': dewies_to_lbc(tx.input_sum - tx.fee),
'total_fee': dewies_to_lbc(tx.fee),
'hex': hexlify(tx.raw).decode(),
}
def encode_output(self, txo, check_signature=True):
if not txo:
return
tx_height = txo.tx_ref.height
best_height = 0#self.ledger.headers.height
output = {
'txid': txo.tx_ref.id,
'nout': txo.position,
'height': tx_height,
'amount': dewies_to_lbc(txo.amount),
'address': txo.get_address(self.service.ledger) if txo.has_address else None,
'confirmations': (best_height+1) - tx_height if tx_height > 0 else tx_height,
'timestamp': 0 #self.ledger.headers.estimated_timestamp(tx_height)
}
if txo.is_spent is not None:
output['is_spent'] = txo.is_spent
if txo.is_my_output is not None:
output['is_my_output'] = txo.is_my_output
if txo.is_my_input is not None:
output['is_my_input'] = txo.is_my_input
if txo.sent_supports is not None:
output['sent_supports'] = dewies_to_lbc(txo.sent_supports)
if txo.sent_tips is not None:
output['sent_tips'] = dewies_to_lbc(txo.sent_tips)
if txo.received_tips is not None:
output['received_tips'] = dewies_to_lbc(txo.received_tips)
if txo.is_internal_transfer is not None:
output['is_internal_transfer'] = txo.is_internal_transfer
if txo.script.is_claim_name:
output['type'] = 'claim'
output['claim_op'] = 'create'
elif txo.script.is_update_claim:
output['type'] = 'claim'
output['claim_op'] = 'update'
elif txo.script.is_support_claim:
output['type'] = 'support'
elif txo.script.is_return_data:
output['type'] = 'data'
elif txo.purchase is not None:
output['type'] = 'purchase'
output['claim_id'] = txo.purchased_claim_id
if txo.purchased_claim is not None:
output['claim'] = self.encode_output(txo.purchased_claim)
else:
output['type'] = 'payment'
if txo.script.is_claim_involved:
output.update({
'name': txo.claim_name,
'normalized_name': txo.normalized_name,
'claim_id': txo.claim_id,
'permanent_url': txo.permanent_url,
'meta': self.encode_claim_meta(txo.meta.copy())
})
if 'short_url' in output['meta']:
output['short_url'] = output['meta'].pop('short_url')
if 'canonical_url' in output['meta']:
output['canonical_url'] = output['meta'].pop('canonical_url')
if txo.claims is not None:
output['claims'] = [self.encode_output(o) for o in txo.claims]
if txo.reposted_claim is not None:
output['reposted_claim'] = self.encode_output(txo.reposted_claim)
if txo.script.is_claim_name or txo.script.is_update_claim:
try:
output['value'] = txo.claim
output['value_type'] = txo.claim.claim_type
if self.include_protobuf:
output['protobuf'] = hexlify(txo.claim.to_bytes())
if txo.purchase_receipt is not None:
output['purchase_receipt'] = self.encode_output(txo.purchase_receipt)
if txo.claim.is_channel:
output['has_signing_key'] = txo.has_private_key
if check_signature and txo.claim.is_signed:
if txo.channel is not None:
output['signing_channel'] = self.encode_output(txo.channel)
output['is_channel_signature_valid'] = txo.is_signed_by(txo.channel, self.service.ledger)
else:
output['signing_channel'] = {'channel_id': txo.claim.signing_channel_id}
output['is_channel_signature_valid'] = False
except DecodeError:
pass
return output
def encode_claim_meta(self, meta):
for key, value in meta.items():
if key.endswith('_amount'):
if isinstance(value, int):
meta[key] = dewies_to_lbc(value)
if 0 < meta.get('creation_height', 0) <= 0: #self.ledger.headers.height:
meta['creation_timestamp'] = self.ledger.headers.estimated_timestamp(meta['creation_height'])
return meta
def encode_input(self, txi):
return self.encode_output(txi.txo_ref.txo, False) if txi.txo_ref.txo is not None else {
'txid': txi.txo_ref.tx_ref.id,
'nout': txi.txo_ref.position
}
def encode_account(self, account):
result = account.to_dict()
result['id'] = account.id
result.pop('certificates', None)
#result['is_default'] = self.ledger.accounts[0] == account
return result
@staticmethod
def encode_wallet(wallet):
return {
'id': wallet.id,
'name': wallet.name
}
def encode_file(self, managed_stream):
output_exists = managed_stream.output_file_exists
tx_height = managed_stream.stream_claim_info.height
best_height = 0 #self.ledger.headers.height
return {
'streaming_url': managed_stream.stream_url,
'completed': managed_stream.completed,
'file_name': managed_stream.file_name if output_exists else None,
'download_directory': managed_stream.download_directory if output_exists else None,
'download_path': managed_stream.full_path if output_exists else None,
'points_paid': 0.0,
'stopped': not managed_stream.running,
'stream_hash': managed_stream.stream_hash,
'stream_name': managed_stream.descriptor.stream_name,
'suggested_file_name': managed_stream.descriptor.suggested_file_name,
'sd_hash': managed_stream.descriptor.sd_hash,
'mime_type': managed_stream.mime_type,
'key': managed_stream.descriptor.key,
'total_bytes_lower_bound': managed_stream.descriptor.lower_bound_decrypted_length(),
'total_bytes': managed_stream.descriptor.upper_bound_decrypted_length(),
'written_bytes': managed_stream.written_bytes,
'blobs_completed': managed_stream.blobs_completed,
'blobs_in_stream': managed_stream.blobs_in_stream,
'blobs_remaining': managed_stream.blobs_remaining,
'status': managed_stream.status,
'claim_id': managed_stream.claim_id,
'txid': managed_stream.txid,
'nout': managed_stream.nout,
'outpoint': managed_stream.outpoint,
'metadata': managed_stream.metadata,
'protobuf': managed_stream.metadata_protobuf,
'channel_claim_id': managed_stream.channel_claim_id,
'channel_name': managed_stream.channel_name,
'claim_name': managed_stream.claim_name,
'content_fee': managed_stream.content_fee,
'purchase_receipt': self.encode_output(managed_stream.purchase_receipt),
'added_on': managed_stream.added_on,
'height': tx_height,
'confirmations': (best_height + 1) - tx_height if tx_height > 0 else tx_height,
'timestamp': 0, #self.ledger.headers.estimated_timestamp(tx_height),
'is_fully_reflected': managed_stream.is_fully_reflected
}
def encode_claim(self, claim):
encoded = getattr(claim, claim.claim_type).to_dict()
if 'public_key' in encoded:
encoded['public_key_id'] = self.service.ledger.public_key_to_address(
unhexlify(encoded['public_key'])
)
return encoded

View file

@ -1,95 +0,0 @@
import asyncio
import json
import logging.handlers
import traceback
import typing
from aiohttp.client_exceptions import ClientError
import aiohttp
from lbry import utils, __version__
if typing.TYPE_CHECKING:
from lbry.conf import Config
LOGGLY_TOKEN = 'BQEzZmMzLJHgAGxkBF00LGD0YGuyATVgAmqxAQEuAQZ2BQH4'
class JsonFormatter(logging.Formatter):
"""Format log records using json serialization"""
def __init__(self, **kwargs):
super().__init__()
self.attributes = kwargs
def format(self, record):
data = {
'loggerName': record.name,
'asciTime': self.formatTime(record),
'fileName': record.filename,
'functionName': record.funcName,
'levelNo': record.levelno,
'lineNo': record.lineno,
'levelName': record.levelname,
'message': record.getMessage(),
}
data.update(self.attributes)
if record.exc_info:
data['exc_info'] = self.formatException(record.exc_info)
return json.dumps(data)
class HTTPSLogglyHandler(logging.Handler):
def __init__(self, loggly_token: str, config: 'Config'):
super().__init__()
self.cookies = {}
self.url = "https://logs-01.loggly.com/inputs/{token}/tag/{tag}".format(
token=utils.deobfuscate(loggly_token), tag='lbrynet-' + __version__
)
self._loop = asyncio.get_event_loop()
self._session = aiohttp.ClientSession()
self._config = config
@property
def enabled(self):
return self._config.share_usage_data
@staticmethod
def get_full_message(record):
if record.exc_info:
return '\n'.join(traceback.format_exception(*record.exc_info))
else:
return record.getMessage()
async def _emit(self, record, retry=True):
data = self.format(record).encode()
try:
async with self._session.post(self.url, data=data,
cookies=self.cookies) as response:
self.cookies.update(response.cookies)
except ClientError:
if self._loop.is_running() and retry and self.enabled:
await self._session.close()
self._session = aiohttp.ClientSession()
return await self._emit(record, retry=False)
def emit(self, record):
if not self.enabled:
return
try:
asyncio.ensure_future(self._emit(record), loop=self._loop)
except RuntimeError: # TODO: use a second loop
print(f"\nfailed to send traceback to loggly, please file an issue with the following traceback:\n"
f"{self.format(record)}")
def close(self):
super().close()
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(self._session.close())
except RuntimeError:
pass
def get_loggly_handler(config):
handler = HTTPSLogglyHandler(LOGGLY_TOKEN, config=config)
handler.setFormatter(JsonFormatter())
return handler

View file

@ -1,70 +0,0 @@
# pylint: skip-file
import os
import sys
import logging
log = logging.getLogger(__name__)
def migrate_db(conf, start, end):
current = start
while current < end:
if current == 1:
from .migrate1to2 import do_migration
elif current == 2:
from .migrate2to3 import do_migration
elif current == 3:
from .migrate3to4 import do_migration
elif current == 4:
from .migrate4to5 import do_migration
elif current == 5:
from .migrate5to6 import do_migration
elif current == 6:
from .migrate6to7 import do_migration
elif current == 7:
from .migrate7to8 import do_migration
elif current == 8:
from .migrate8to9 import do_migration
elif current == 9:
from .migrate9to10 import do_migration
elif current == 10:
from .migrate10to11 import do_migration
elif current == 11:
from .migrate11to12 import do_migration
elif current == 12:
from .migrate12to13 import do_migration
elif current == 13:
from .migrate13to14 import do_migration
else:
raise Exception(f"DB migration of version {current} to {current+1} is not available")
try:
do_migration(conf)
except Exception:
log.exception("failed to migrate database")
if os.path.exists(os.path.join(conf.data_dir, "lbrynet.sqlite")):
backup_name = f"rev_{current}_unmigrated_database"
count = 0
while os.path.exists(os.path.join(conf.data_dir, backup_name + ".sqlite")):
count += 1
backup_name = f"rev_{current}_unmigrated_database_{count}"
backup_path = os.path.join(conf.data_dir, backup_name + ".sqlite")
os.rename(os.path.join(conf.data_dir, "lbrynet.sqlite"), backup_path)
log.info("made a backup of the unmigrated database: %s", backup_path)
if os.path.isfile(os.path.join(conf.data_dir, "db_revision")):
os.remove(os.path.join(conf.data_dir, "db_revision"))
return None
current += 1
log.info("successfully migrated the database from revision %i to %i", current - 1, current)
return None
def run_migration_script():
log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s"
logging.basicConfig(level=logging.DEBUG, format=log_format, filename="migrator.log")
sys.stdout = open("migrator.out.log", 'w')
sys.stderr = open("migrator.err.log", 'w')
migrate_db(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))
if __name__ == "__main__":
run_migration_script()

View file

@ -1,54 +0,0 @@
import sqlite3
import os
import binascii
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
current_columns = []
for col_info in cursor.execute("pragma table_info('file');").fetchall():
current_columns.append(col_info[1])
if 'content_fee' in current_columns or 'saved_file' in current_columns:
connection.close()
print("already migrated")
return
cursor.execute(
"pragma foreign_keys=off;"
)
cursor.execute("""
create table if not exists new_file (
stream_hash text primary key not null references stream,
file_name text,
download_directory text,
blob_data_rate real not null,
status text not null,
saved_file integer not null,
content_fee text
);
""")
for (stream_hash, file_name, download_dir, data_rate, status) in cursor.execute("select * from file").fetchall():
saved_file = 0
if download_dir != '{stream}' and file_name != '{stream}':
try:
if os.path.isfile(os.path.join(binascii.unhexlify(download_dir).decode(),
binascii.unhexlify(file_name).decode())):
saved_file = 1
else:
download_dir, file_name = None, None
except Exception:
download_dir, file_name = None, None
else:
download_dir, file_name = None, None
cursor.execute(
"insert into new_file values (?, ?, ?, ?, ?, ?, NULL)",
(stream_hash, file_name, download_dir, data_rate, status, saved_file)
)
cursor.execute("drop table file")
cursor.execute("alter table new_file rename to file")
connection.commit()
connection.close()

View file

@ -1,69 +0,0 @@
import sqlite3
import os
import time
def do_migration(conf):
db_path = os.path.join(conf.data_dir, 'lbrynet.sqlite')
connection = sqlite3.connect(db_path)
connection.row_factory = sqlite3.Row
cursor = connection.cursor()
current_columns = []
for col_info in cursor.execute("pragma table_info('file');").fetchall():
current_columns.append(col_info[1])
if 'added_on' in current_columns:
connection.close()
print('already migrated')
return
# follow 12 step schema change procedure
cursor.execute("pragma foreign_keys=off")
# we don't have any indexes, views or triggers, so step 3 is skipped.
cursor.execute("drop table if exists new_file")
cursor.execute("""
create table if not exists new_file (
stream_hash text not null primary key references stream,
file_name text,
download_directory text,
blob_data_rate text not null,
status text not null,
saved_file integer not null,
content_fee text,
added_on integer not null
);
""")
# step 5: transfer content from old to new
select = "select * from file"
for (stream_hash, file_name, download_dir, blob_rate, status, saved_file, fee) \
in cursor.execute(select).fetchall():
added_on = int(time.time())
cursor.execute(
"insert into new_file values (?, ?, ?, ?, ?, ?, ?, ?)",
(stream_hash, file_name, download_dir, blob_rate, status, saved_file, fee, added_on)
)
# step 6: drop old table
cursor.execute("drop table file")
# step 7: rename new table to old table
cursor.execute("alter table new_file rename to file")
# step 8: we aren't using indexes, views or triggers so skip
# step 9: no views so skip
# step 10: foreign key check
cursor.execute("pragma foreign_key_check;")
# step 11: commit transaction
connection.commit()
# step 12: re-enable foreign keys
connection.execute("pragma foreign_keys=on;")
# done :)
connection.close()

View file

@ -1,80 +0,0 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
current_columns = []
for col_info in cursor.execute("pragma table_info('file');").fetchall():
current_columns.append(col_info[1])
if 'bt_infohash' in current_columns:
connection.close()
print("already migrated")
return
cursor.executescript("""
pragma foreign_keys=off;
create table if not exists torrent (
bt_infohash char(20) not null primary key,
tracker text,
length integer not null,
name text not null
);
create table if not exists torrent_node ( -- BEP-0005
bt_infohash char(20) not null references torrent,
host text not null,
port integer not null
);
create table if not exists torrent_tracker ( -- BEP-0012
bt_infohash char(20) not null references torrent,
tracker text not null
);
create table if not exists torrent_http_seed ( -- BEP-0017
bt_infohash char(20) not null references torrent,
http_seed text not null
);
create table if not exists new_file (
stream_hash char(96) references stream,
bt_infohash char(20) references torrent,
file_name text,
download_directory text,
blob_data_rate real not null,
status text not null,
saved_file integer not null,
content_fee text,
added_on integer not null
);
create table if not exists new_content_claim (
stream_hash char(96) references stream,
bt_infohash char(20) references torrent,
claim_outpoint text unique not null references claim
);
insert into new_file (stream_hash, bt_infohash, file_name, download_directory, blob_data_rate, status,
saved_file, content_fee, added_on) select
stream_hash, NULL, file_name, download_directory, blob_data_rate, status, saved_file, content_fee,
added_on
from file;
insert or ignore into new_content_claim (stream_hash, bt_infohash, claim_outpoint)
select stream_hash, NULL, claim_outpoint from content_claim;
drop table file;
drop table content_claim;
alter table new_file rename to file;
alter table new_content_claim rename to content_claim;
pragma foreign_keys=on;
""")
connection.commit()
connection.close()

View file

@ -1,21 +0,0 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("""
create table if not exists peer (
node_id char(96) not null primary key,
address text not null,
udp_port integer not null,
tcp_port integer,
unique (address, udp_port)
);
""")
connection.commit()
connection.close()

View file

@ -1,77 +0,0 @@
import sqlite3
import os
import logging
log = logging.getLogger(__name__)
UNSET_NOUT = -1
def do_migration(conf):
log.info("Doing the migration")
migrate_blockchainname_db(conf.data_dir)
log.info("Migration succeeded")
def migrate_blockchainname_db(db_dir):
blockchainname_db = os.path.join(db_dir, "blockchainname.db")
# skip migration on fresh installs
if not os.path.isfile(blockchainname_db):
return
temp_db = sqlite3.connect(":memory:")
db_file = sqlite3.connect(blockchainname_db)
file_cursor = db_file.cursor()
mem_cursor = temp_db.cursor()
mem_cursor.execute("create table if not exists name_metadata ("
" name text, "
" txid text, "
" n integer, "
" sd_hash text)")
mem_cursor.execute("create table if not exists claim_ids ("
" claimId text, "
" name text, "
" txid text, "
" n integer)")
temp_db.commit()
name_metadata = file_cursor.execute("select * from name_metadata").fetchall()
claim_metadata = file_cursor.execute("select * from claim_ids").fetchall()
# fill n as V1_UNSET_NOUT, Wallet.py will be responsible for filling in correct n
for name, txid, sd_hash in name_metadata:
mem_cursor.execute(
"insert into name_metadata values (?, ?, ?, ?) ",
(name, txid, UNSET_NOUT, sd_hash))
for claim_id, name, txid in claim_metadata:
mem_cursor.execute(
"insert into claim_ids values (?, ?, ?, ?)",
(claim_id, name, txid, UNSET_NOUT))
temp_db.commit()
new_name_metadata = mem_cursor.execute("select * from name_metadata").fetchall()
new_claim_metadata = mem_cursor.execute("select * from claim_ids").fetchall()
file_cursor.execute("drop table name_metadata")
file_cursor.execute("create table name_metadata ("
" name text, "
" txid text, "
" n integer, "
" sd_hash text)")
for name, txid, n, sd_hash in new_name_metadata:
file_cursor.execute(
"insert into name_metadata values (?, ?, ?, ?) ", (name, txid, n, sd_hash))
file_cursor.execute("drop table claim_ids")
file_cursor.execute("create table claim_ids ("
" claimId text, "
" name text, "
" txid text, "
" n integer)")
for claim_id, name, txid, n in new_claim_metadata:
file_cursor.execute("insert into claim_ids values (?, ?, ?, ?)", (claim_id, name, txid, n))
db_file.commit()
db_file.close()
temp_db.close()

View file

@ -1,42 +0,0 @@
import sqlite3
import os
import logging
log = logging.getLogger(__name__)
def do_migration(conf):
log.info("Doing the migration")
migrate_blockchainname_db(conf.data_dir)
log.info("Migration succeeded")
def migrate_blockchainname_db(db_dir):
blockchainname_db = os.path.join(db_dir, "blockchainname.db")
# skip migration on fresh installs
if not os.path.isfile(blockchainname_db):
return
db_file = sqlite3.connect(blockchainname_db)
file_cursor = db_file.cursor()
tables = file_cursor.execute("SELECT tbl_name FROM sqlite_master "
"WHERE type='table'").fetchall()
if 'tmp_name_metadata_table' in tables and 'name_metadata' not in tables:
file_cursor.execute("ALTER TABLE tmp_name_metadata_table RENAME TO name_metadata")
else:
file_cursor.executescript(
"CREATE TABLE IF NOT EXISTS tmp_name_metadata_table "
" (name TEXT UNIQUE NOT NULL, "
" txid TEXT NOT NULL, "
" n INTEGER NOT NULL, "
" sd_hash TEXT NOT NULL); "
"INSERT OR IGNORE INTO tmp_name_metadata_table "
" (name, txid, n, sd_hash) "
" SELECT name, txid, n, sd_hash FROM name_metadata; "
"DROP TABLE name_metadata; "
"ALTER TABLE tmp_name_metadata_table RENAME TO name_metadata;"
)
db_file.commit()
db_file.close()

View file

@ -1,85 +0,0 @@
import sqlite3
import os
import logging
log = logging.getLogger(__name__)
def do_migration(conf):
log.info("Doing the migration")
migrate_blobs_db(conf.data_dir)
log.info("Migration succeeded")
def migrate_blobs_db(db_dir):
"""
We migrate the blobs.db used in BlobManager to have a "should_announce" column,
and set this to True for blobs that are sd_hash's or head blobs (first blob in stream)
"""
blobs_db = os.path.join(db_dir, "blobs.db")
lbryfile_info_db = os.path.join(db_dir, 'lbryfile_info.db')
# skip migration on fresh installs
if not os.path.isfile(blobs_db) and not os.path.isfile(lbryfile_info_db):
return
# if blobs.db doesn't exist, skip migration
if not os.path.isfile(blobs_db):
log.info("blobs.db was not found but lbryfile_info.db was found, skipping migration")
return
blobs_db_file = sqlite3.connect(blobs_db)
blobs_db_cursor = blobs_db_file.cursor()
# check if new columns exist (it shouldn't) and create it
try:
blobs_db_cursor.execute("SELECT should_announce FROM blobs")
except sqlite3.OperationalError:
blobs_db_cursor.execute(
"ALTER TABLE blobs ADD COLUMN should_announce integer NOT NULL DEFAULT 0")
else:
log.warning("should_announce already exists somehow, proceeding anyways")
# if lbryfile_info.db doesn't exist, skip marking blobs as should_announce = True
if not os.path.isfile(lbryfile_info_db):
log.error("lbryfile_info.db was not found, skipping check for should_announce")
return
lbryfile_info_file = sqlite3.connect(lbryfile_info_db)
lbryfile_info_cursor = lbryfile_info_file.cursor()
# find blobs that are stream descriptors
lbryfile_info_cursor.execute('SELECT * FROM lbry_file_descriptors')
descriptors = lbryfile_info_cursor.fetchall()
should_announce_blob_hashes = []
for d in descriptors:
sd_blob_hash = (d[0],)
should_announce_blob_hashes.append(sd_blob_hash)
# find blobs that are the first blob in a stream
lbryfile_info_cursor.execute('SELECT * FROM lbry_file_blobs WHERE position = 0')
blobs = lbryfile_info_cursor.fetchall()
head_blob_hashes = []
for b in blobs:
blob_hash = (b[0],)
should_announce_blob_hashes.append(blob_hash)
# now mark them as should_announce = True
blobs_db_cursor.executemany('UPDATE blobs SET should_announce=1 WHERE blob_hash=?',
should_announce_blob_hashes)
# Now run some final checks here to make sure migration succeeded
try:
blobs_db_cursor.execute("SELECT should_announce FROM blobs")
except sqlite3.OperationalError:
raise Exception('Migration failed, cannot find should_announce')
blobs_db_cursor.execute("SELECT * FROM blobs WHERE should_announce=1")
blobs = blobs_db_cursor.fetchall()
if len(blobs) != len(should_announce_blob_hashes):
log.error("Some how not all blobs were marked as announceable")
blobs_db_file.commit()
blobs_db_file.close()
lbryfile_info_file.close()

View file

@ -1,62 +0,0 @@
import sqlite3
import os
import logging
log = logging.getLogger(__name__)
def do_migration(conf):
log.info("Doing the migration")
add_lbry_file_metadata(conf.data_dir)
log.info("Migration succeeded")
def add_lbry_file_metadata(db_dir):
"""
We migrate the blobs.db used in BlobManager to have a "should_announce" column,
and set this to True for blobs that are sd_hash's or head blobs (first blob in stream)
"""
name_metadata = os.path.join(db_dir, "blockchainname.db")
lbryfile_info_db = os.path.join(db_dir, 'lbryfile_info.db')
if not os.path.isfile(name_metadata) and not os.path.isfile(lbryfile_info_db):
return
if not os.path.isfile(lbryfile_info_db):
log.info("blockchainname.db was not found but lbryfile_info.db was found, skipping migration")
return
name_metadata_db = sqlite3.connect(name_metadata)
lbryfile_db = sqlite3.connect(lbryfile_info_db)
name_metadata_cursor = name_metadata_db.cursor()
lbryfile_cursor = lbryfile_db.cursor()
lbryfile_db.executescript(
"create table if not exists lbry_file_metadata (" +
" lbry_file integer primary key, " +
" txid text, " +
" n integer, " +
" foreign key(lbry_file) references lbry_files(rowid)"
")")
_files = lbryfile_cursor.execute("select rowid, stream_hash from lbry_files").fetchall()
lbry_files = {x[1]: x[0] for x in _files}
for (sd_hash, stream_hash) in lbryfile_cursor.execute("select * "
"from lbry_file_descriptors").fetchall():
lbry_file_id = lbry_files[stream_hash]
outpoint = name_metadata_cursor.execute("select txid, n from name_metadata "
"where sd_hash=?",
(sd_hash,)).fetchall()
if outpoint:
txid, nout = outpoint[0]
lbryfile_cursor.execute("insert into lbry_file_metadata values (?, ?, ?)",
(lbry_file_id, txid, nout))
else:
lbryfile_cursor.execute("insert into lbry_file_metadata values (?, ?, ?)",
(lbry_file_id, None, None))
lbryfile_db.commit()
lbryfile_db.close()
name_metadata_db.close()

View file

@ -1,326 +0,0 @@
import sqlite3
import os
import json
import logging
from binascii import hexlify
from lbry.schema.claim import Claim
log = logging.getLogger(__name__)
CREATE_TABLES_QUERY = """
pragma foreign_keys=on;
pragma journal_mode=WAL;
create table if not exists blob (
blob_hash char(96) primary key not null,
blob_length integer not null,
next_announce_time integer not null,
should_announce integer not null default 0,
status text not null
);
create table if not exists stream (
stream_hash char(96) not null primary key,
sd_hash char(96) not null references blob,
stream_key text not null,
stream_name text not null,
suggested_filename text not null
);
create table if not exists stream_blob (
stream_hash char(96) not null references stream,
blob_hash char(96) references blob,
position integer not null,
iv char(32) not null,
primary key (stream_hash, blob_hash)
);
create table if not exists claim (
claim_outpoint text not null primary key,
claim_id char(40) not null,
claim_name text not null,
amount integer not null,
height integer not null,
serialized_metadata blob not null,
channel_claim_id text,
address text not null,
claim_sequence integer not null
);
create table if not exists file (
stream_hash text primary key not null references stream,
file_name text not null,
download_directory text not null,
blob_data_rate real not null,
status text not null
);
create table if not exists content_claim (
stream_hash text unique not null references file,
claim_outpoint text not null references claim,
primary key (stream_hash, claim_outpoint)
);
create table if not exists support (
support_outpoint text not null primary key,
claim_id text not null,
amount integer not null,
address text not null
);
"""
def run_operation(db):
def _decorate(fn):
def _wrapper(*args):
cursor = db.cursor()
try:
result = fn(cursor, *args)
db.commit()
return result
except sqlite3.IntegrityError:
db.rollback()
raise
return _wrapper
return _decorate
def verify_sd_blob(sd_hash, blob_dir):
with open(os.path.join(blob_dir, sd_hash), "r") as sd_file:
data = sd_file.read()
sd_length = len(data)
decoded = json.loads(data)
assert set(decoded.keys()) == {
'stream_name', 'blobs', 'stream_type', 'key', 'suggested_file_name', 'stream_hash'
}, "invalid sd blob"
for blob in sorted(decoded['blobs'], key=lambda x: int(x['blob_num']), reverse=True):
if blob['blob_num'] == len(decoded['blobs']) - 1:
assert {'length', 'blob_num', 'iv'} == set(blob.keys()), 'invalid stream terminator'
assert blob['length'] == 0, 'non zero length stream terminator'
else:
assert {'blob_hash', 'length', 'blob_num', 'iv'} == set(blob.keys()), 'invalid stream blob'
assert blob['length'] > 0, 'zero length stream blob'
return decoded, sd_length
def do_migration(conf):
new_db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(new_db_path)
metadata_db = sqlite3.connect(os.path.join(conf.data_dir, "blockchainname.db"))
lbryfile_db = sqlite3.connect(os.path.join(conf.data_dir, 'lbryfile_info.db'))
blobs_db = sqlite3.connect(os.path.join(conf.data_dir, 'blobs.db'))
name_metadata_cursor = metadata_db.cursor()
lbryfile_cursor = lbryfile_db.cursor()
blobs_db_cursor = blobs_db.cursor()
old_rowid_to_outpoint = {
rowid: (txid, nout) for (rowid, txid, nout) in
lbryfile_cursor.execute("select * from lbry_file_metadata").fetchall()
}
old_sd_hash_to_outpoint = {
sd_hash: (txid, nout) for (txid, nout, sd_hash) in
name_metadata_cursor.execute("select txid, n, sd_hash from name_metadata").fetchall()
}
sd_hash_to_stream_hash = dict(
lbryfile_cursor.execute("select sd_blob_hash, stream_hash from lbry_file_descriptors").fetchall()
)
stream_hash_to_stream_blobs = {}
for (blob_hash, stream_hash, position, iv, length) in lbryfile_db.execute(
"select * from lbry_file_blobs").fetchall():
stream_blobs = stream_hash_to_stream_blobs.get(stream_hash, [])
stream_blobs.append((blob_hash, length, position, iv))
stream_hash_to_stream_blobs[stream_hash] = stream_blobs
claim_outpoint_queries = {}
for claim_query in metadata_db.execute(
"select distinct c.txid, c.n, c.claimId, c.name, claim_cache.claim_sequence, claim_cache.claim_address, "
"claim_cache.height, claim_cache.amount, claim_cache.claim_pb "
"from claim_cache inner join claim_ids c on claim_cache.claim_id=c.claimId"):
txid, nout = claim_query[0], claim_query[1]
if (txid, nout) in claim_outpoint_queries:
continue
claim_outpoint_queries[(txid, nout)] = claim_query
@run_operation(connection)
def _populate_blobs(transaction, blob_infos):
transaction.executemany(
"insert into blob values (?, ?, ?, ?, ?)",
[(blob_hash, blob_length, int(next_announce_time), should_announce, "finished")
for (blob_hash, blob_length, _, next_announce_time, should_announce) in blob_infos]
)
@run_operation(connection)
def _import_file(transaction, sd_hash, stream_hash, key, stream_name, suggested_file_name, data_rate,
status, stream_blobs):
try:
transaction.execute(
"insert or ignore into stream values (?, ?, ?, ?, ?)",
(stream_hash, sd_hash, key, stream_name, suggested_file_name)
)
except sqlite3.IntegrityError:
# failed because the sd isn't a known blob, we'll try to read the blob file and recover it
return sd_hash
# insert any stream blobs that were missing from the blobs table
transaction.executemany(
"insert or ignore into blob values (?, ?, ?, ?, ?)",
[
(blob_hash, length, 0, 0, "pending")
for (blob_hash, length, position, iv) in stream_blobs
]
)
# insert the stream blobs
for blob_hash, length, position, iv in stream_blobs:
transaction.execute(
"insert or ignore into stream_blob values (?, ?, ?, ?)",
(stream_hash, blob_hash, position, iv)
)
download_dir = conf.download_dir
if not isinstance(download_dir, bytes):
download_dir = download_dir.encode()
# insert the file
transaction.execute(
"insert or ignore into file values (?, ?, ?, ?, ?)",
(stream_hash, stream_name, hexlify(download_dir),
data_rate, status)
)
@run_operation(connection)
def _add_recovered_blobs(transaction, blob_infos, sd_hash, sd_length):
transaction.execute(
"insert or replace into blob values (?, ?, ?, ?, ?)", (sd_hash, sd_length, 0, 1, "finished")
)
for blob in sorted(blob_infos, key=lambda x: x['blob_num'], reverse=True):
if blob['blob_num'] < len(blob_infos) - 1:
transaction.execute(
"insert or ignore into blob values (?, ?, ?, ?, ?)",
(blob['blob_hash'], blob['length'], 0, 0, "pending")
)
@run_operation(connection)
def _make_db(new_db):
# create the new tables
new_db.executescript(CREATE_TABLES_QUERY)
# first migrate the blobs
blobs = blobs_db_cursor.execute("select * from blobs").fetchall()
_populate_blobs(blobs) # pylint: disable=no-value-for-parameter
log.info("migrated %i blobs", new_db.execute("select count(*) from blob").fetchone()[0])
# used to store the query arguments if we need to try re-importing the lbry file later
file_args = {} # <sd_hash>: args tuple
file_outpoints = {} # <outpoint tuple>: sd_hash
# get the file and stream queries ready
for (rowid, sd_hash, stream_hash, key, stream_name, suggested_file_name, data_rate, status) in \
lbryfile_db.execute(
"select distinct lbry_files.rowid, d.sd_blob_hash, lbry_files.*, o.blob_data_rate, o.status "
"from lbry_files "
"inner join lbry_file_descriptors d on lbry_files.stream_hash=d.stream_hash "
"inner join lbry_file_options o on lbry_files.stream_hash=o.stream_hash"):
# this is try to link the file to a content claim after we've imported all the files
if rowid in old_rowid_to_outpoint:
file_outpoints[old_rowid_to_outpoint[rowid]] = sd_hash
elif sd_hash in old_sd_hash_to_outpoint:
file_outpoints[old_sd_hash_to_outpoint[sd_hash]] = sd_hash
sd_hash_to_stream_hash[sd_hash] = stream_hash
if stream_hash in stream_hash_to_stream_blobs:
file_args[sd_hash] = (
sd_hash, stream_hash, key, stream_name,
suggested_file_name, data_rate or 0.0,
status, stream_hash_to_stream_blobs.pop(stream_hash)
)
# used to store the query arguments if we need to try re-importing the claim
claim_queries = {} # <sd_hash>: claim query tuple
# get the claim queries ready, only keep those with associated files
for outpoint, sd_hash in file_outpoints.items():
if outpoint in claim_outpoint_queries:
claim_queries[sd_hash] = claim_outpoint_queries[outpoint]
# insert the claims
new_db.executemany(
"insert or ignore into claim values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
[
(
"%s:%i" % (claim_arg_tup[0], claim_arg_tup[1]), claim_arg_tup[2], claim_arg_tup[3],
claim_arg_tup[7], claim_arg_tup[6], claim_arg_tup[8],
Claim.from_bytes(claim_arg_tup[8]).signing_channel_id, claim_arg_tup[5], claim_arg_tup[4]
)
for sd_hash, claim_arg_tup in claim_queries.items() if claim_arg_tup
] # sd_hash, (txid, nout, claim_id, name, sequence, address, height, amount, serialized)
)
log.info("migrated %i claims", new_db.execute("select count(*) from claim").fetchone()[0])
damaged_stream_sds = []
# import the files and get sd hashes of streams to attempt recovering
for sd_hash, file_query in file_args.items():
failed_sd = _import_file(*file_query)
if failed_sd:
damaged_stream_sds.append(failed_sd)
# recover damaged streams
if damaged_stream_sds:
blob_dir = os.path.join(conf.data_dir, "blobfiles")
damaged_sds_on_disk = [] if not os.path.isdir(blob_dir) else list({p for p in os.listdir(blob_dir)
if p in damaged_stream_sds})
for damaged_sd in damaged_sds_on_disk:
try:
decoded, sd_length = verify_sd_blob(damaged_sd, blob_dir)
blobs = decoded['blobs']
_add_recovered_blobs(blobs, damaged_sd, sd_length) # pylint: disable=no-value-for-parameter
_import_file(*file_args[damaged_sd])
damaged_stream_sds.remove(damaged_sd)
except (OSError, ValueError, TypeError, AssertionError, sqlite3.IntegrityError):
continue
log.info("migrated %i files", new_db.execute("select count(*) from file").fetchone()[0])
# associate the content claims to their respective files
for claim_arg_tup in claim_queries.values():
if claim_arg_tup and (claim_arg_tup[0], claim_arg_tup[1]) in file_outpoints \
and file_outpoints[(claim_arg_tup[0], claim_arg_tup[1])] in sd_hash_to_stream_hash:
try:
new_db.execute(
"insert or ignore into content_claim values (?, ?)",
(
sd_hash_to_stream_hash.get(file_outpoints.get((claim_arg_tup[0], claim_arg_tup[1]))),
"%s:%i" % (claim_arg_tup[0], claim_arg_tup[1])
)
)
except sqlite3.IntegrityError:
continue
log.info("migrated %i content claims", new_db.execute("select count(*) from content_claim").fetchone()[0])
try:
_make_db() # pylint: disable=no-value-for-parameter
except sqlite3.OperationalError as err:
if err.message == "table blob has 7 columns but 5 values were supplied":
log.warning("detected a failed previous migration to revision 6, repairing it")
connection.close()
os.remove(new_db_path)
return do_migration(conf)
raise err
connection.close()
blobs_db.close()
lbryfile_db.close()
metadata_db.close()
# os.remove(os.path.join(db_dir, "blockchainname.db"))
# os.remove(os.path.join(db_dir, 'lbryfile_info.db'))
# os.remove(os.path.join(db_dir, 'blobs.db'))

View file

@ -1,13 +0,0 @@
import sqlite3
import os
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("alter table blob add last_announced_time integer;")
cursor.executescript("alter table blob add single_announce integer;")
cursor.execute("update blob set next_announce_time=0")
connection.commit()
connection.close()

View file

@ -1,21 +0,0 @@
import sqlite3
import os
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript(
"""
create table reflected_stream (
sd_hash text not null,
reflector_address text not null,
timestamp integer,
primary key (sd_hash, reflector_address)
);
"""
)
connection.commit()
connection.close()

View file

@ -1,47 +0,0 @@
import sqlite3
import logging
import os
from lbry.blob.blob_info import BlobInfo
from lbry.stream.descriptor import StreamDescriptor
log = logging.getLogger(__name__)
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
blob_dir = os.path.join(conf.data_dir, "blobfiles")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
query = "select stream_name, stream_key, suggested_filename, sd_hash, stream_hash from stream"
streams = cursor.execute(query).fetchall()
blobs = cursor.execute("select s.stream_hash, s.position, s.iv, b.blob_hash, b.blob_length from stream_blob s "
"left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall()
blobs_by_stream = {}
for stream_hash, position, iv, blob_hash, blob_length in blobs:
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, blob_hash))
for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams:
sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename,
blobs_by_stream[stream_hash], stream_hash, sd_hash)
if sd_hash != sd.calculate_sd_hash():
log.info("Stream for descriptor %s is invalid, cleaning it up", sd_hash)
blob_hashes = [blob.blob_hash for blob in blobs_by_stream[stream_hash]]
delete_stream(cursor, stream_hash, sd_hash, blob_hashes, blob_dir)
connection.commit()
connection.close()
def delete_stream(transaction, stream_hash, sd_hash, blob_hashes, blob_dir):
transaction.execute("delete from content_claim where stream_hash=? ", (stream_hash,))
transaction.execute("delete from file where stream_hash=? ", (stream_hash, ))
transaction.execute("delete from stream_blob where stream_hash=?", (stream_hash, ))
transaction.execute("delete from stream where stream_hash=? ", (stream_hash, ))
transaction.execute("delete from blob where blob_hash=?", (sd_hash, ))
for blob_hash in blob_hashes:
transaction.execute("delete from blob where blob_hash=?", (blob_hash, ))
file_path = os.path.join(blob_dir, blob_hash)
if os.path.isfile(file_path):
os.unlink(file_path)

View file

@ -1,20 +0,0 @@
import sqlite3
import os
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
query = "select stream_hash, sd_hash from main.stream"
for stream_hash, sd_hash in cursor.execute(query).fetchall():
head_blob_hash = cursor.execute(
"select blob_hash from stream_blob where position = 0 and stream_hash = ?",
(stream_hash,)
).fetchone()
if not head_blob_hash:
continue
cursor.execute("update blob set should_announce=1 where blob_hash in (?, ?)", (sd_hash, head_blob_hash[0],))
connection.commit()
connection.close()

View file

@ -1,835 +0,0 @@
import os
import logging
import sqlite3
import typing
import asyncio
import binascii
import time
from typing import Optional
from lbry.wallet.database import SQLiteMixin
from lbry.conf import Config
from lbry.blockchain.dewies import dewies_to_lbc, lbc_to_dewies
from lbry.blockchain.transaction import Transaction
from lbry.schema.claim import Claim
from lbry.dht.constants import DATA_EXPIRATION
from lbry.blob.blob_info import BlobInfo
if typing.TYPE_CHECKING:
from lbry.blob.blob_file import BlobFile
from lbry.stream.descriptor import StreamDescriptor
log = logging.getLogger(__name__)
def calculate_effective_amount(amount: str, supports: typing.Optional[typing.List[typing.Dict]] = None) -> str:
return dewies_to_lbc(
lbc_to_dewies(amount) + sum([lbc_to_dewies(support['amount']) for support in supports])
)
class StoredContentClaim:
def __init__(self, outpoint: Optional[str] = None, claim_id: Optional[str] = None, name: Optional[str] = None,
amount: Optional[int] = None, height: Optional[int] = None, serialized: Optional[str] = None,
channel_claim_id: Optional[str] = None, address: Optional[str] = None,
claim_sequence: Optional[int] = None, channel_name: Optional[str] = None):
self.claim_id = claim_id
self.outpoint = outpoint
self.claim_name = name
self.amount = amount
self.height = height
self.claim: typing.Optional[Claim] = None if not serialized else Claim.from_bytes(
binascii.unhexlify(serialized)
)
self.claim_address = address
self.claim_sequence = claim_sequence
self.channel_claim_id = channel_claim_id
self.channel_name = channel_name
@property
def txid(self) -> typing.Optional[str]:
return None if not self.outpoint else self.outpoint.split(":")[0]
@property
def nout(self) -> typing.Optional[int]:
return None if not self.outpoint else int(self.outpoint.split(":")[1])
def as_dict(self) -> typing.Dict:
return {
"name": self.claim_name,
"claim_id": self.claim_id,
"address": self.claim_address,
"claim_sequence": self.claim_sequence,
"value": self.claim,
"height": self.height,
"amount": dewies_to_lbc(self.amount),
"nout": self.nout,
"txid": self.txid,
"channel_claim_id": self.channel_claim_id,
"channel_name": self.channel_name
}
def _get_content_claims(transaction: sqlite3.Connection, query: str,
source_hashes: typing.List[str]) -> typing.Dict[str, StoredContentClaim]:
claims = {}
for claim_info in _batched_select(transaction, query, source_hashes):
claims[claim_info[0]] = StoredContentClaim(*claim_info[1:])
return claims
def get_claims_from_stream_hashes(transaction: sqlite3.Connection,
stream_hashes: typing.List[str]) -> typing.Dict[str, StoredContentClaim]:
query = (
"select content_claim.stream_hash, c.*, case when c.channel_claim_id is not null then "
" (select claim_name from claim where claim_id==c.channel_claim_id) "
" else null end as channel_name "
" from content_claim "
" inner join claim c on c.claim_outpoint=content_claim.claim_outpoint and content_claim.stream_hash in {}"
" order by c.rowid desc"
)
return _get_content_claims(transaction, query, stream_hashes)
def get_claims_from_torrent_info_hashes(transaction: sqlite3.Connection,
info_hashes: typing.List[str]) -> typing.Dict[str, StoredContentClaim]:
query = (
"select content_claim.bt_infohash, c.*, case when c.channel_claim_id is not null then "
" (select claim_name from claim where claim_id==c.channel_claim_id) "
" else null end as channel_name "
" from content_claim "
" inner join claim c on c.claim_outpoint=content_claim.claim_outpoint and content_claim.bt_infohash in {}"
" order by c.rowid desc"
)
return _get_content_claims(transaction, query, info_hashes)
def _batched_select(transaction, query, parameters, batch_size=900):
for start_index in range(0, len(parameters), batch_size):
current_batch = parameters[start_index:start_index+batch_size]
bind = "({})".format(','.join(['?'] * len(current_batch)))
yield from transaction.execute(query.format(bind), current_batch)
def _get_lbry_file_stream_dict(rowid, added_on, stream_hash, file_name, download_dir, data_rate, status,
sd_hash, stream_key, stream_name, suggested_file_name, claim, saved_file,
raw_content_fee, fully_reflected):
return {
"rowid": rowid,
"added_on": added_on,
"stream_hash": stream_hash,
"file_name": file_name, # hex
"download_directory": download_dir, # hex
"blob_data_rate": data_rate,
"status": status,
"sd_hash": sd_hash,
"key": stream_key,
"stream_name": stream_name, # hex
"suggested_file_name": suggested_file_name, # hex
"claim": claim,
"saved_file": bool(saved_file),
"content_fee": None if not raw_content_fee else Transaction(
binascii.unhexlify(raw_content_fee)
),
"fully_reflected": fully_reflected
}
def get_all_lbry_files(transaction: sqlite3.Connection) -> typing.List[typing.Dict]:
files = []
signed_claims = {}
for (rowid, stream_hash, _, file_name, download_dir, data_rate, status, saved_file, raw_content_fee,
added_on, _, sd_hash, stream_key, stream_name, suggested_file_name, *claim_args) in transaction.execute(
"select file.rowid, file.*, stream.*, c.*, "
" case when (SELECT 1 FROM reflected_stream r WHERE r.sd_hash=stream.sd_hash) "
" is null then 0 else 1 end as fully_reflected "
"from file inner join stream on file.stream_hash=stream.stream_hash "
"inner join content_claim cc on file.stream_hash=cc.stream_hash "
"inner join claim c on cc.claim_outpoint=c.claim_outpoint "
"order by c.rowid desc").fetchall():
claim_args, fully_reflected = tuple(claim_args[:-1]), claim_args[-1]
claim = StoredContentClaim(*claim_args)
if claim.channel_claim_id:
if claim.channel_claim_id not in signed_claims:
signed_claims[claim.channel_claim_id] = []
signed_claims[claim.channel_claim_id].append(claim)
files.append(
_get_lbry_file_stream_dict(
rowid, added_on, stream_hash, file_name, download_dir, data_rate, status,
sd_hash, stream_key, stream_name, suggested_file_name, claim, saved_file,
raw_content_fee, fully_reflected
)
)
for claim_name, claim_id in _batched_select(
transaction, "select c.claim_name, c.claim_id from claim c where c.claim_id in {}",
tuple(signed_claims.keys())):
for claim in signed_claims[claim_id]:
claim.channel_name = claim_name
return files
def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
# add all blobs, except the last one, which is empty
transaction.executemany(
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0)
for blob in (descriptor.blobs[:-1] if len(descriptor.blobs) > 1 else descriptor.blobs) + [sd_blob])
).fetchall()
# associate the blobs to the stream
transaction.execute("insert or ignore into stream values (?, ?, ?, ?, ?)",
(descriptor.stream_hash, sd_blob.blob_hash, descriptor.key,
binascii.hexlify(descriptor.stream_name.encode()).decode(),
binascii.hexlify(descriptor.suggested_file_name.encode()).decode())).fetchall()
# add the stream
transaction.executemany(
"insert or ignore into stream_blob values (?, ?, ?, ?)",
((descriptor.stream_hash, blob.blob_hash, blob.blob_num, blob.iv)
for blob in descriptor.blobs)
).fetchall()
# ensure should_announce is set regardless if insert was ignored
transaction.execute(
"update blob set should_announce=1 where blob_hash in (?, ?)",
(sd_blob.blob_hash, descriptor.blobs[0].blob_hash,)
).fetchall()
def delete_stream(transaction: sqlite3.Connection, descriptor: 'StreamDescriptor'):
blob_hashes = [(blob.blob_hash, ) for blob in descriptor.blobs[:-1]]
blob_hashes.append((descriptor.sd_hash, ))
transaction.execute("delete from content_claim where stream_hash=? ", (descriptor.stream_hash,)).fetchall()
transaction.execute("delete from file where stream_hash=? ", (descriptor.stream_hash,)).fetchall()
transaction.execute("delete from stream_blob where stream_hash=?", (descriptor.stream_hash,)).fetchall()
transaction.execute("delete from stream where stream_hash=? ", (descriptor.stream_hash,)).fetchall()
transaction.executemany("delete from blob where blob_hash=?", blob_hashes).fetchall()
def delete_torrent(transaction: sqlite3.Connection, bt_infohash: str):
transaction.execute("delete from content_claim where bt_infohash=?", (bt_infohash, )).fetchall()
transaction.execute("delete from torrent_tracker where bt_infohash=?", (bt_infohash,)).fetchall()
transaction.execute("delete from torrent_node where bt_infohash=?", (bt_infohash,)).fetchall()
transaction.execute("delete from torrent_http_seed where bt_infohash=?", (bt_infohash,)).fetchall()
transaction.execute("delete from file where bt_infohash=?", (bt_infohash,)).fetchall()
transaction.execute("delete from torrent where bt_infohash=?", (bt_infohash,)).fetchall()
def store_file(transaction: sqlite3.Connection, stream_hash: str, file_name: typing.Optional[str],
download_directory: typing.Optional[str], data_payment_rate: float, status: str,
content_fee: typing.Optional[Transaction], added_on: typing.Optional[int] = None) -> int:
if not file_name and not download_directory:
encoded_file_name, encoded_download_dir = None, None
else:
encoded_file_name = binascii.hexlify(file_name.encode()).decode()
encoded_download_dir = binascii.hexlify(download_directory.encode()).decode()
time_added = added_on or int(time.time())
transaction.execute(
"insert or replace into file values (?, NULL, ?, ?, ?, ?, ?, ?, ?)",
(stream_hash, encoded_file_name, encoded_download_dir, data_payment_rate, status,
1 if (file_name and download_directory and os.path.isfile(os.path.join(download_directory, file_name))) else 0,
None if not content_fee else binascii.hexlify(content_fee.raw).decode(), time_added)
).fetchall()
return transaction.execute("select rowid from file where stream_hash=?", (stream_hash, )).fetchone()[0]
class SQLiteStorage(SQLiteMixin):
CREATE_TABLES_QUERY = """
pragma foreign_keys=on;
pragma journal_mode=WAL;
create table if not exists blob (
blob_hash char(96) primary key not null,
blob_length integer not null,
next_announce_time integer not null,
should_announce integer not null default 0,
status text not null,
last_announced_time integer,
single_announce integer
);
create table if not exists stream (
stream_hash char(96) not null primary key,
sd_hash char(96) not null references blob,
stream_key text not null,
stream_name text not null,
suggested_filename text not null
);
create table if not exists stream_blob (
stream_hash char(96) not null references stream,
blob_hash char(96) references blob,
position integer not null,
iv char(32) not null,
primary key (stream_hash, blob_hash)
);
create table if not exists claim (
claim_outpoint text not null primary key,
claim_id char(40) not null,
claim_name text not null,
amount integer not null,
height integer not null,
serialized_metadata blob not null,
channel_claim_id text,
address text not null,
claim_sequence integer not null
);
create table if not exists torrent (
bt_infohash char(20) not null primary key,
tracker text,
length integer not null,
name text not null
);
create table if not exists torrent_node ( -- BEP-0005
bt_infohash char(20) not null references torrent,
host text not null,
port integer not null
);
create table if not exists torrent_tracker ( -- BEP-0012
bt_infohash char(20) not null references torrent,
tracker text not null
);
create table if not exists torrent_http_seed ( -- BEP-0017
bt_infohash char(20) not null references torrent,
http_seed text not null
);
create table if not exists file (
stream_hash char(96) references stream,
bt_infohash char(20) references torrent,
file_name text,
download_directory text,
blob_data_rate real not null,
status text not null,
saved_file integer not null,
content_fee text,
added_on integer not null
);
create table if not exists content_claim (
stream_hash char(96) references stream,
bt_infohash char(20) references torrent,
claim_outpoint text unique not null references claim
);
create table if not exists support (
support_outpoint text not null primary key,
claim_id text not null,
amount integer not null,
address text not null
);
create table if not exists reflected_stream (
sd_hash text not null,
reflector_address text not null,
timestamp integer,
primary key (sd_hash, reflector_address)
);
create table if not exists peer (
node_id char(96) not null primary key,
address text not null,
udp_port integer not null,
tcp_port integer,
unique (address, udp_port)
);
"""
def __init__(self, conf: Config, path, loop=None, time_getter: typing.Optional[typing.Callable[[], float]] = None):
super().__init__(path)
self.conf = conf
self.content_claim_callbacks = {}
self.loop = loop or asyncio.get_event_loop()
self.time_getter = time_getter or time.time
async def run_and_return_one_or_none(self, query, *args):
for row in await self.db.execute_fetchall(query, args):
if len(row) == 1:
return row[0]
return row
async def run_and_return_list(self, query, *args):
rows = list(await self.db.execute_fetchall(query, args))
return [col[0] for col in rows] if rows else []
# # # # # # # # # blob functions # # # # # # # # #
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int], finished=False):
def _add_blobs(transaction: sqlite3.Connection):
transaction.executemany(
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
(
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0)
for blob_hash, length in blob_hashes_and_lengths
)
).fetchall()
if finished:
transaction.executemany(
"update blob set status='finished' where blob.blob_hash=?", (
(blob_hash, ) for blob_hash, _ in blob_hashes_and_lengths
)
).fetchall()
return await self.db.run(_add_blobs)
def get_blob_status(self, blob_hash: str):
return self.run_and_return_one_or_none(
"select status from blob where blob_hash=?", blob_hash
)
def update_last_announced_blobs(self, blob_hashes: typing.List[str]):
def _update_last_announced_blobs(transaction: sqlite3.Connection):
last_announced = self.time_getter()
return transaction.executemany(
"update blob set next_announce_time=?, last_announced_time=?, single_announce=0 "
"where blob_hash=?",
((int(last_announced + (DATA_EXPIRATION / 2)), int(last_announced), blob_hash)
for blob_hash in blob_hashes)
).fetchall()
return self.db.run(_update_last_announced_blobs)
def should_single_announce_blobs(self, blob_hashes, immediate=False):
def set_single_announce(transaction):
now = int(self.time_getter())
for blob_hash in blob_hashes:
if immediate:
transaction.execute(
"update blob set single_announce=1, next_announce_time=? "
"where blob_hash=? and status='finished'", (int(now), blob_hash)
).fetchall()
else:
transaction.execute(
"update blob set single_announce=1 where blob_hash=? and status='finished'", (blob_hash,)
).fetchall()
return self.db.run(set_single_announce)
def get_blobs_to_announce(self):
def get_and_update(transaction):
timestamp = int(self.time_getter())
if self.conf.announce_head_and_sd_only:
r = transaction.execute(
"select blob_hash from blob "
"where blob_hash is not null and "
"(should_announce=1 or single_announce=1) and next_announce_time<? and status='finished' "
"order by next_announce_time asc limit ?",
(timestamp, int(self.conf.concurrent_blob_announcers * 10))
).fetchall()
else:
r = transaction.execute(
"select blob_hash from blob where blob_hash is not null "
"and next_announce_time<? and status='finished' "
"order by next_announce_time asc limit ?",
(timestamp, int(self.conf.concurrent_blob_announcers * 10))
).fetchall()
return [b[0] for b in r]
return self.db.run(get_and_update)
def delete_blobs_from_db(self, blob_hashes):
def delete_blobs(transaction):
transaction.executemany(
"delete from blob where blob_hash=?;", ((blob_hash,) for blob_hash in blob_hashes)
).fetchall()
return self.db.run_with_foreign_keys_disabled(delete_blobs)
def get_all_blob_hashes(self):
return self.run_and_return_list("select blob_hash from blob")
def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]:
def _sync_blobs(transaction: sqlite3.Connection) -> typing.Set[str]:
finished_blob_hashes = tuple(
blob_hash for (blob_hash, ) in transaction.execute(
"select blob_hash from blob where status='finished'"
).fetchall()
)
finished_blobs_set = set(finished_blob_hashes)
to_update_set = finished_blobs_set.difference(blob_files)
transaction.executemany(
"update blob set status='pending' where blob_hash=?",
((blob_hash, ) for blob_hash in to_update_set)
).fetchall()
return blob_files.intersection(finished_blobs_set)
return self.db.run(_sync_blobs)
# # # # # # # # # stream functions # # # # # # # # #
async def stream_exists(self, sd_hash: str) -> bool:
streams = await self.run_and_return_one_or_none("select stream_hash from stream where sd_hash=?", sd_hash)
return streams is not None
async def file_exists(self, sd_hash: str) -> bool:
streams = await self.run_and_return_one_or_none("select f.stream_hash from file f "
"inner join stream s on "
"s.stream_hash=f.stream_hash and s.sd_hash=?", sd_hash)
return streams is not None
def store_stream(self, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
return self.db.run(store_stream, sd_blob, descriptor)
def get_blobs_for_stream(self, stream_hash, only_completed=False) -> typing.Awaitable[typing.List[BlobInfo]]:
def _get_blobs_for_stream(transaction):
crypt_blob_infos = []
stream_blobs = transaction.execute(
"select blob_hash, position, iv from stream_blob where stream_hash=? "
"order by position asc", (stream_hash, )
).fetchall()
if only_completed:
lengths = transaction.execute(
"select b.blob_hash, b.blob_length from blob b "
"inner join stream_blob s ON b.blob_hash=s.blob_hash and b.status='finished' and s.stream_hash=?",
(stream_hash, )
).fetchall()
else:
lengths = transaction.execute(
"select b.blob_hash, b.blob_length from blob b "
"inner join stream_blob s ON b.blob_hash=s.blob_hash and s.stream_hash=?",
(stream_hash, )
).fetchall()
blob_length_dict = {}
for blob_hash, length in lengths:
blob_length_dict[blob_hash] = length
for blob_hash, position, iv in stream_blobs:
blob_length = blob_length_dict.get(blob_hash, 0)
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, blob_hash))
if not blob_hash:
break
return crypt_blob_infos
return self.db.run(_get_blobs_for_stream)
def get_sd_blob_hash_for_stream(self, stream_hash):
return self.run_and_return_one_or_none(
"select sd_hash from stream where stream_hash=?", stream_hash
)
def get_stream_hash_for_sd_hash(self, sd_blob_hash):
return self.run_and_return_one_or_none(
"select stream_hash from stream where sd_hash = ?", sd_blob_hash
)
def delete_stream(self, descriptor: 'StreamDescriptor'):
return self.db.run_with_foreign_keys_disabled(delete_stream, descriptor)
async def delete_torrent(self, bt_infohash: str):
return await self.db.run(delete_torrent, bt_infohash)
# # # # # # # # # file stuff # # # # # # # # #
def save_downloaded_file(self, stream_hash: str, file_name: typing.Optional[str],
download_directory: typing.Optional[str], data_payment_rate: float,
content_fee: typing.Optional[Transaction] = None,
added_on: typing.Optional[int] = None) -> typing.Awaitable[int]:
return self.save_published_file(
stream_hash, file_name, download_directory, data_payment_rate, status="running",
content_fee=content_fee, added_on=added_on
)
def save_published_file(self, stream_hash: str, file_name: typing.Optional[str],
download_directory: typing.Optional[str], data_payment_rate: float,
status: str = "finished",
content_fee: typing.Optional[Transaction] = None,
added_on: typing.Optional[int] = None) -> typing.Awaitable[int]:
return self.db.run(store_file, stream_hash, file_name, download_directory, data_payment_rate, status,
content_fee, added_on)
async def update_manually_removed_files_since_last_run(self):
"""
Update files that have been removed from the downloads directory since the last run
"""
def update_manually_removed_files(transaction: sqlite3.Connection):
files = {}
query = "select stream_hash, download_directory, file_name from file where saved_file=1 " \
"and stream_hash is not null"
for (stream_hash, download_directory, file_name) in transaction.execute(query).fetchall():
if download_directory and file_name:
files[stream_hash] = download_directory, file_name
return files
def detect_removed(files):
return [
stream_hash for stream_hash, (download_directory, file_name) in files.items()
if not os.path.isfile(os.path.join(binascii.unhexlify(download_directory).decode(),
binascii.unhexlify(file_name).decode()))
]
def update_db_removed(transaction: sqlite3.Connection, removed):
query = "update file set file_name=null, download_directory=null, saved_file=0 where stream_hash in {}"
for cur in _batched_select(transaction, query, removed):
cur.fetchall()
stream_and_file = await self.db.run(update_manually_removed_files)
removed = await self.loop.run_in_executor(None, detect_removed, stream_and_file)
if removed:
await self.db.run(update_db_removed, removed)
def get_all_lbry_files(self) -> typing.Awaitable[typing.List[typing.Dict]]:
return self.db.run(get_all_lbry_files)
def change_file_status(self, stream_hash: str, new_status: str):
log.debug("update file status %s -> %s", stream_hash, new_status)
return self.db.execute_fetchall("update file set status=? where stream_hash=?", (new_status, stream_hash))
async def change_file_download_dir_and_file_name(self, stream_hash: str, download_dir: typing.Optional[str],
file_name: typing.Optional[str]):
if not file_name or not download_dir:
encoded_file_name, encoded_download_dir = None, None
else:
encoded_file_name = binascii.hexlify(file_name.encode()).decode()
encoded_download_dir = binascii.hexlify(download_dir.encode()).decode()
return await self.db.execute_fetchall("update file set download_directory=?, file_name=? where stream_hash=?", (
encoded_download_dir, encoded_file_name, stream_hash,
))
async def save_content_fee(self, stream_hash: str, content_fee: Transaction):
return await self.db.execute_fetchall("update file set content_fee=? where stream_hash=?", (
binascii.hexlify(content_fee.raw), stream_hash,
))
async def set_saved_file(self, stream_hash: str):
return await self.db.execute_fetchall("update file set saved_file=1 where stream_hash=?", (
stream_hash,
))
async def clear_saved_file(self, stream_hash: str):
return await self.db.execute_fetchall("update file set saved_file=0 where stream_hash=?", (
stream_hash,
))
async def recover_streams(self, descriptors_and_sds: typing.List[typing.Tuple['StreamDescriptor', 'BlobFile',
typing.Optional[Transaction]]],
download_directory: str):
def _recover(transaction: sqlite3.Connection):
stream_hashes = [x[0].stream_hash for x in descriptors_and_sds]
for descriptor, sd_blob, content_fee in descriptors_and_sds:
content_claim = transaction.execute(
"select * from content_claim where stream_hash=?", (descriptor.stream_hash, )
).fetchone()
delete_stream(transaction, descriptor) # this will also delete the content claim
store_stream(transaction, sd_blob, descriptor)
store_file(transaction, descriptor.stream_hash, os.path.basename(descriptor.suggested_file_name),
download_directory, 0.0, 'stopped', content_fee=content_fee)
if content_claim:
transaction.execute("insert or ignore into content_claim values (?, ?, ?)", content_claim)
transaction.executemany(
"update file set status='stopped' where stream_hash=?",
((stream_hash, ) for stream_hash in stream_hashes)
).fetchall()
download_dir = binascii.hexlify(self.conf.download_dir.encode()).decode()
transaction.executemany(
f"update file set download_directory=? where stream_hash=?",
((download_dir, stream_hash) for stream_hash in stream_hashes)
).fetchall()
await self.db.run_with_foreign_keys_disabled(_recover)
def get_all_stream_hashes(self):
return self.run_and_return_list("select stream_hash from stream")
# # # # # # # # # support functions # # # # # # # # #
def save_supports(self, claim_id_to_supports: dict):
# TODO: add 'address' to support items returned for a claim from lbrycrdd and lbryum-server
def _save_support(transaction):
bind = "({})".format(','.join(['?'] * len(claim_id_to_supports)))
transaction.execute(
f"delete from support where claim_id in {bind}", tuple(claim_id_to_supports.keys())
).fetchall()
for claim_id, supports in claim_id_to_supports.items():
for support in supports:
transaction.execute(
"insert into support values (?, ?, ?, ?)",
("%s:%i" % (support['txid'], support['nout']), claim_id, lbc_to_dewies(support['amount']),
support.get('address', ""))
).fetchall()
return self.db.run(_save_support)
def get_supports(self, *claim_ids):
def _format_support(outpoint, supported_id, amount, address):
return {
"txid": outpoint.split(":")[0],
"nout": int(outpoint.split(":")[1]),
"claim_id": supported_id,
"amount": dewies_to_lbc(amount),
"address": address,
}
def _get_supports(transaction):
return [
_format_support(*support_info)
for support_info in _batched_select(
transaction,
"select * from support where claim_id in {}",
claim_ids
)
]
return self.db.run(_get_supports)
# # # # # # # # # claim functions # # # # # # # # #
async def save_claims(self, claim_infos):
claim_id_to_supports = {}
update_file_callbacks = []
def _save_claims(transaction):
content_claims_to_update = []
for claim_info in claim_infos:
outpoint = "%s:%i" % (claim_info['txid'], claim_info['nout'])
claim_id = claim_info['claim_id']
name = claim_info['name']
amount = lbc_to_dewies(claim_info['amount'])
height = claim_info['height']
address = claim_info['address']
sequence = claim_info['claim_sequence']
certificate_id = claim_info['value'].signing_channel_id
try:
source_hash = claim_info['value'].stream.source.sd_hash
except (AttributeError, ValueError):
source_hash = None
serialized = binascii.hexlify(claim_info['value'].to_bytes())
transaction.execute(
"insert or replace into claim values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(outpoint, claim_id, name, amount, height, serialized, certificate_id, address, sequence)
).fetchall()
# if this response doesn't have support info don't overwrite the existing
# support info
if 'supports' in claim_info:
claim_id_to_supports[claim_id] = claim_info['supports']
if not source_hash:
continue
stream_hash = transaction.execute(
"select file.stream_hash from stream "
"inner join file on file.stream_hash=stream.stream_hash where sd_hash=?", (source_hash,)
).fetchone()
if not stream_hash:
continue
stream_hash = stream_hash[0]
known_outpoint = transaction.execute(
"select claim_outpoint from content_claim where stream_hash=?", (stream_hash,)
).fetchone()
known_claim_id = transaction.execute(
"select claim_id from claim "
"inner join content_claim c3 ON claim.claim_outpoint=c3.claim_outpoint "
"where c3.stream_hash=?", (stream_hash,)
).fetchone()
if not known_claim_id:
content_claims_to_update.append((stream_hash, outpoint))
elif known_outpoint != outpoint:
content_claims_to_update.append((stream_hash, outpoint))
for stream_hash, outpoint in content_claims_to_update:
self._save_content_claim(transaction, outpoint, stream_hash)
if stream_hash in self.content_claim_callbacks:
update_file_callbacks.append(self.content_claim_callbacks[stream_hash]())
await self.db.run(_save_claims)
if update_file_callbacks:
await asyncio.wait(update_file_callbacks)
if claim_id_to_supports:
await self.save_supports(claim_id_to_supports)
def save_claims_for_resolve(self, claim_infos):
to_save = {}
for info in claim_infos:
if 'value' in info:
if info['value']:
to_save[info['claim_id']] = info
else:
for key in ('certificate', 'claim'):
if info.get(key, {}).get('value'):
to_save[info[key]['claim_id']] = info[key]
return self.save_claims(to_save.values())
@staticmethod
def _save_content_claim(transaction, claim_outpoint, stream_hash):
# get the claim id and serialized metadata
claim_info = transaction.execute(
"select claim_id, serialized_metadata from claim where claim_outpoint=?", (claim_outpoint,)
).fetchone()
if not claim_info:
raise Exception("claim not found")
new_claim_id, claim = claim_info[0], Claim.from_bytes(binascii.unhexlify(claim_info[1]))
# certificate claims should not be in the content_claim table
if not claim.is_stream:
raise Exception("claim does not contain a stream")
# get the known sd hash for this stream
known_sd_hash = transaction.execute(
"select sd_hash from stream where stream_hash=?", (stream_hash,)
).fetchone()
if not known_sd_hash:
raise Exception("stream not found")
# check the claim contains the same sd hash
if known_sd_hash[0] != claim.stream.source.sd_hash:
raise Exception("stream mismatch")
# if there is a current claim associated to the file, check that the new claim is an update to it
current_associated_content = transaction.execute(
"select claim_outpoint from content_claim where stream_hash=?", (stream_hash,)
).fetchone()
if current_associated_content:
current_associated_claim_id = transaction.execute(
"select claim_id from claim where claim_outpoint=?", current_associated_content
).fetchone()[0]
if current_associated_claim_id != new_claim_id:
raise Exception(
f"mismatching claim ids when updating stream {current_associated_claim_id} vs {new_claim_id}"
)
# update the claim associated to the file
transaction.execute("delete from content_claim where stream_hash=?", (stream_hash, )).fetchall()
transaction.execute(
"insert into content_claim values (?, NULL, ?)", (stream_hash, claim_outpoint)
).fetchall()
async def save_content_claim(self, stream_hash, claim_outpoint):
await self.db.run(self._save_content_claim, claim_outpoint, stream_hash)
# update corresponding ManagedEncryptedFileDownloader object
if stream_hash in self.content_claim_callbacks:
await self.content_claim_callbacks[stream_hash]()
async def get_content_claim(self, stream_hash: str, include_supports: typing.Optional[bool] = True) -> typing.Dict:
claims = await self.db.run(get_claims_from_stream_hashes, [stream_hash])
claim = None
if claims:
claim = claims[stream_hash].as_dict()
if include_supports:
supports = await self.get_supports(claim['claim_id'])
claim['supports'] = supports
claim['effective_amount'] = calculate_effective_amount(claim['amount'], supports)
return claim
# # # # # # # # # reflector functions # # # # # # # # #
def update_reflected_stream(self, sd_hash, reflector_address, success=True):
if success:
return self.db.execute_fetchall(
"insert or replace into reflected_stream values (?, ?, ?)",
(sd_hash, reflector_address, self.time_getter())
)
return self.db.execute_fetchall(
"delete from reflected_stream where sd_hash=? and reflector_address=?",
(sd_hash, reflector_address)
)
def get_streams_to_re_reflect(self):
return self.run_and_return_list(
"select s.sd_hash from stream s "
"left outer join reflected_stream r on s.sd_hash=r.sd_hash "
"where r.timestamp is null or r.timestamp < ?",
int(self.time_getter()) - 86400
)
# # # # # # # # # # dht functions # # # # # # # # # # #
async def get_persisted_kademlia_peers(self) -> typing.List[typing.Tuple[bytes, str, int, int]]:
query = 'select node_id, address, udp_port, tcp_port from peer'
return [(binascii.unhexlify(n), a, u, t) for n, a, u, t in await self.db.execute_fetchall(query)]
async def save_kademlia_peers(self, peers: typing.List['KademliaPeer']):
def _save_kademlia_peers(transaction: sqlite3.Connection):
transaction.execute('delete from peer').fetchall()
transaction.executemany(
'insert into peer(node_id, address, udp_port, tcp_port) values (?, ?, ?, ?)',
tuple([(binascii.hexlify(p.node_id), p.address, p.udp_port, p.tcp_port) for p in peers])
).fetchall()
return await self.db.run(_save_kademlia_peers)

View file

@ -1,62 +0,0 @@
# Copyright 2016-2017 Ionuț Arțăriși <ionut@artarisi.eu>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This came from https://github.com/mapleoin/undecorated
from inspect import isfunction, ismethod, isclass
__version__ = '0.3.0'
def undecorated(o):
"""Remove all decorators from a function, method or class"""
# class decorator
if isinstance(o, type):
return o
try:
# python2
closure = o.func_closure
except AttributeError:
pass
try:
# python3
closure = o.__closure__
except AttributeError:
return
if closure:
for cell in closure:
# avoid infinite recursion
if cell.cell_contents is o:
continue
# check if the contents looks like a decorator; in that case
# we need to go one level down into the dream, otherwise it
# might just be a different closed-over variable, which we
# can ignore.
# Note: this favors supporting decorators defined without
# @wraps to the detriment of function/method/class closures
if looks_like_a_decorator(cell.cell_contents):
undecd = undecorated(cell.cell_contents)
if undecd:
return undecd
return o
def looks_like_a_decorator(a):
return isfunction(a) or ismethod(a) or isclass(a)

View file

@ -1,29 +0,0 @@
import platform
import os
import logging.handlers
from lbry import build_info, __version__ as lbrynet_version
log = logging.getLogger(__name__)
def get_platform() -> dict:
os_system = platform.system()
if os.environ and 'ANDROID_ARGUMENT' in os.environ:
os_system = 'android'
d = {
"processor": platform.processor(),
"python_version": platform.python_version(),
"platform": platform.platform(),
"os_release": platform.release(),
"os_system": os_system,
"lbrynet_version": lbrynet_version,
"version": lbrynet_version,
"build": build_info.BUILD, # CI server sets this during build step
}
if d["os_system"] == "Linux":
import distro # pylint: disable=import-outside-toplevel
d["distro"] = distro.info()
d["desktop"] = os.environ.get('XDG_CURRENT_DESKTOP', 'Unknown')
return d