forked from LBRYCommunity/lbry-sdk
Compare commits
1 commit
master
...
update_fil
Author | SHA1 | Date | |
---|---|---|---|
|
40ebf9ee5d |
399 changed files with 23652 additions and 59332 deletions
|
@ -2,6 +2,6 @@
|
|||
.tox
|
||||
__pycache__
|
||||
dist
|
||||
lbry.egg-info
|
||||
lbrynet.egg-info
|
||||
docs
|
||||
tests
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
/CHANGELOG.md merge=union
|
74
.github/ISSUE_TEMPLATE.md
vendored
Normal file
74
.github/ISSUE_TEMPLATE.md
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
<!--
|
||||
Thanks for reporting an issue to LBRY and helping us improve!
|
||||
|
||||
To make it possible for us to help you, please fill out below information carefully.
|
||||
|
||||
Before reporting any issues, please make sure that you're using the latest version.
|
||||
- App: https://github.com/lbryio/lbry-desktop/releases
|
||||
- Daemon: https://github.com/lbryio/lbry/releases
|
||||
|
||||
We are also available on Discord at https://chat.lbry.io
|
||||
-->
|
||||
|
||||
|
||||
## The Issue
|
||||
|
||||
In order to <achieve some value>,
|
||||
as a <type of user>,
|
||||
I want <some functionality>.
|
||||
|
||||
|
||||
### Steps to reproduce
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
### Expected behaviour
|
||||
Tell us what should happen
|
||||
|
||||
### Actual behaviour
|
||||
Tell us what happens instead
|
||||
|
||||
|
||||
## System Configuration
|
||||
|
||||
<!-- For the app, this info is in the About section at the bottom of the Help page.
|
||||
You can include a screenshot instead of typing it out -->
|
||||
|
||||
<!-- For the daemon, run:
|
||||
curl 'http://localhost:5279' --data '{"method":"version"}'
|
||||
and include the full output -->
|
||||
|
||||
- LBRY Daemon version:
|
||||
- LBRY App version:
|
||||
- LBRY Installation ID:
|
||||
- Operating system:
|
||||
|
||||
|
||||
## Anything Else
|
||||
<!-- Include anything else that does not fit into the above sections -->
|
||||
|
||||
|
||||
## Screenshots
|
||||
<!-- If a screenshot would help explain the bug, please include one or two here -->
|
||||
|
||||
|
||||
|
||||
## Internal Use
|
||||
|
||||
### Acceptance Criteria
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
### Definition of Done
|
||||
- [ ] Tested against acceptance criteria
|
||||
- [ ] Tested against the assumptions of user story
|
||||
- [ ] The project builds without errors
|
||||
- [ ] Unit tests are written and passing
|
||||
- [ ] Tests on devices/browsers listed in the issue have passed
|
||||
- [ ] QA performed & issues resolved
|
||||
- [ ] Refactoring completed
|
||||
- [ ] Any configuration or build changes documented
|
||||
- [ ] Documentation updated
|
||||
- [ ] Peer Code Review performed
|
38
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
38
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
## PR Checklist
|
||||
Please check all that apply to this PR using "x":
|
||||
|
||||
- [ ] I have checked that this PR is not a duplicate of an existing PR (open, closed or merged)
|
||||
- [ ] I have checked that this PR does not introduce a breaking change
|
||||
- [ ] This PR introduces breaking changes and I have provided a detailed explanation below
|
||||
|
||||
|
||||
## PR Type
|
||||
What kind of change does this PR introduce?
|
||||
|
||||
Why is this change necessary?
|
||||
|
||||
<!-- Please check all that apply to this PR using "x". -->
|
||||
|
||||
- [ ] Bugfix
|
||||
- [ ] Feature
|
||||
- [ ] Breaking changes (bugfix or feature that introduces breaking changes)
|
||||
- [ ] Code style update (formatting)
|
||||
- [ ] Refactoring (no functional changes)
|
||||
- [ ] Documentation changes
|
||||
- [ ] Other - Please describe:
|
||||
|
||||
## Fixes
|
||||
|
||||
Issue Number: N/A
|
||||
|
||||
|
||||
## What is the current behavior?
|
||||
|
||||
|
||||
## What is the new behavior?
|
||||
|
||||
|
||||
## Other information
|
||||
|
||||
|
||||
<!-- If this PR contains a breaking change, please describe the impact and solution strategy for existing applications below. -->
|
206
.github/workflows/main.yml
vendored
206
.github/workflows/main.yml
vendored
|
@ -1,206 +0,0 @@
|
|||
name: ci
|
||||
on: ["push", "pull_request", "workflow_dispatch"]
|
||||
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- run: pip install --user --upgrade pip wheel
|
||||
- run: pip install -e .[lint]
|
||||
- run: make lint
|
||||
|
||||
tests-unit:
|
||||
name: "tests / unit"
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- name: set pip cache dir
|
||||
shell: bash
|
||||
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ env.PIP_CACHE_DIR }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- id: os-name
|
||||
uses: ASzc/change-string-case-action@v5
|
||||
with:
|
||||
string: ${{ runner.os }}
|
||||
- run: python -m pip install --user --upgrade pip wheel
|
||||
- if: startsWith(runner.os, 'linux')
|
||||
run: pip install -e .[test]
|
||||
- if: startsWith(runner.os, 'linux')
|
||||
env:
|
||||
HOME: /tmp
|
||||
run: make test-unit-coverage
|
||||
- if: startsWith(runner.os, 'linux') != true
|
||||
run: pip install -e .[test]
|
||||
- if: startsWith(runner.os, 'linux') != true
|
||||
env:
|
||||
HOME: /tmp
|
||||
run: coverage run --source=lbry -m unittest tests/unit/test_conf.py
|
||||
- name: submit coverage report
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COVERALLS_FLAG_NAME: tests-unit-${{ steps.os-name.outputs.lowercase }}
|
||||
COVERALLS_PARALLEL: true
|
||||
run: |
|
||||
pip install coveralls
|
||||
coveralls --service=github
|
||||
|
||||
tests-integration:
|
||||
name: "tests / integration"
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
test:
|
||||
- datanetwork
|
||||
- blockchain
|
||||
- claims
|
||||
- takeovers
|
||||
- transactions
|
||||
- other
|
||||
steps:
|
||||
- name: Configure sysctl limits
|
||||
run: |
|
||||
sudo swapoff -a
|
||||
sudo sysctl -w vm.swappiness=1
|
||||
sudo sysctl -w fs.file-max=262144
|
||||
sudo sysctl -w vm.max_map_count=262144
|
||||
- name: Runs Elasticsearch
|
||||
uses: elastic/elastic-github-actions/elasticsearch@master
|
||||
with:
|
||||
stack-version: 7.12.1
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- if: matrix.test == 'other'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --no-install-recommends ffmpeg
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ./.tox
|
||||
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
|
||||
restore-keys: txo-integration-${{ matrix.test }}-
|
||||
- run: pip install tox coverage coveralls
|
||||
- if: matrix.test == 'claims'
|
||||
run: rm -rf .tox
|
||||
- run: tox -e ${{ matrix.test }}
|
||||
- name: submit coverage report
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COVERALLS_FLAG_NAME: tests-integration-${{ matrix.test }}
|
||||
COVERALLS_PARALLEL: true
|
||||
run: |
|
||||
coverage combine tests
|
||||
coveralls --service=github
|
||||
|
||||
|
||||
coverage:
|
||||
needs: ["tests-unit", "tests-integration"]
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: finalize coverage report submission
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
pip install coveralls
|
||||
coveralls --service=github --finish
|
||||
|
||||
build:
|
||||
needs: ["lint", "tests-unit", "tests-integration"]
|
||||
name: "build / binary"
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- id: os-name
|
||||
uses: ASzc/change-string-case-action@v5
|
||||
with:
|
||||
string: ${{ runner.os }}
|
||||
- name: set pip cache dir
|
||||
shell: bash
|
||||
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ env.PIP_CACHE_DIR }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- run: pip install pyinstaller==4.6
|
||||
- run: pip install -e .
|
||||
- if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: python docker/set_build.py
|
||||
- if: startsWith(runner.os, 'linux') || startsWith(runner.os, 'mac')
|
||||
name: Build & Run (Unix)
|
||||
run: |
|
||||
pyinstaller --onefile --name lbrynet lbry/extras/cli.py
|
||||
dist/lbrynet --version
|
||||
- if: startsWith(runner.os, 'windows')
|
||||
name: Build & Run (Windows)
|
||||
run: |
|
||||
pip install pywin32==301
|
||||
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
|
||||
dist/lbrynet.exe --version
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: lbrynet-${{ steps.os-name.outputs.lowercase }}
|
||||
path: dist/
|
||||
|
||||
release:
|
||||
name: "release"
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
needs: ["build"]
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/download-artifact@v2
|
||||
- name: upload binaries
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.RELEASE_API_TOKEN }}
|
||||
run: |
|
||||
pip install githubrelease
|
||||
chmod +x lbrynet-macos/lbrynet
|
||||
chmod +x lbrynet-linux/lbrynet
|
||||
zip --junk-paths lbrynet-mac.zip lbrynet-macos/lbrynet
|
||||
zip --junk-paths lbrynet-linux.zip lbrynet-linux/lbrynet
|
||||
zip --junk-paths lbrynet-windows.zip lbrynet-windows/lbrynet.exe
|
||||
ls -lh
|
||||
githubrelease release lbryio/lbry-sdk info ${GITHUB_REF#refs/tags/}
|
||||
githubrelease asset lbryio/lbry-sdk upload ${GITHUB_REF#refs/tags/} \
|
||||
lbrynet-mac.zip lbrynet-linux.zip lbrynet-windows.zip
|
||||
githubrelease release lbryio/lbry-sdk publish ${GITHUB_REF#refs/tags/}
|
||||
|
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
|
@ -1,22 +0,0 @@
|
|||
name: slack
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: "slack notification"
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: LoveToKnow/slackify-markdown-action@v1.0.0
|
||||
id: markdown
|
||||
with:
|
||||
text: "There is a new SDK release: ${{github.event.release.html_url}}\n${{ github.event.release.body }}"
|
||||
- uses: slackapi/slack-github-action@v1.14.0
|
||||
env:
|
||||
CHANGELOG: '<!channel> ${{ steps.markdown.outputs.text }}'
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_RELEASE_BOT_WEBHOOK }}
|
||||
with:
|
||||
payload: '{"type": "mrkdwn", "text": ${{ toJSON(env.CHANGELOG) }} }'
|
||||
|
19
.gitignore
vendored
19
.gitignore
vendored
|
@ -1,22 +1,13 @@
|
|||
/.idea
|
||||
/.DS_Store
|
||||
.DS_Store
|
||||
/build
|
||||
/dist
|
||||
/.tox
|
||||
/.coverage*
|
||||
/.idea
|
||||
/.coverage
|
||||
/lbry-venv
|
||||
/venv
|
||||
/lbry/blockchain
|
||||
|
||||
lbry.egg-info
|
||||
lbrynet.egg-info
|
||||
__pycache__
|
||||
_trial_temp/
|
||||
trending*.log
|
||||
|
||||
/tests/integration/claims/files
|
||||
/tests/.coverage.*
|
||||
|
||||
/lbry/wallet/bin
|
||||
|
||||
/.vscode
|
||||
/.gitignore
|
||||
/tests/integration/files
|
||||
|
|
445
.pylintrc
Normal file
445
.pylintrc
Normal file
|
@ -0,0 +1,445 @@
|
|||
[MASTER]
|
||||
|
||||
# Specify a configuration file.
|
||||
#rcfile=
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
#init-hook=
|
||||
|
||||
# Add files or directories to the blacklist. They should be base names, not
|
||||
# paths.
|
||||
ignore=CVS,schema
|
||||
|
||||
# Add files or directories matching the regex patterns to the
|
||||
# blacklist. The regex matches against base names, not paths.
|
||||
# `\.#.*` - add emacs tmp files to the blacklist
|
||||
ignore-patterns=\.#.*
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=yes
|
||||
|
||||
# List of plugins (as comma separated values of python modules names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
# Use multiple processes to speed up Pylint.
|
||||
jobs=1
|
||||
|
||||
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
||||
# active Python interpreter and may run arbitrary code.
|
||||
unsafe-load-any-extension=no
|
||||
|
||||
# A comma-separated list of package or module names from where C extensions may
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code
|
||||
# extension-pkg-whitelist=
|
||||
|
||||
# Allow optimization of some AST trees. This will activate a peephole AST
|
||||
# optimizer, which will apply various small optimizations. For instance, it can
|
||||
# be used to obtain the result of joining multiple strings with the addition
|
||||
# operator. Joining a lot of strings can lead to a maximum recursion error in
|
||||
# Pylint and this flag can prevent that. It has one side effect, the resulting
|
||||
# AST will be different than the one from reality.
|
||||
optimize-ast=no
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Only show warnings with the listed confidence levels. Leave empty to show
|
||||
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
|
||||
confidence=
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time (only on the command line, not in the configuration file where
|
||||
# it should appear only once). See also the "--disable" option for examples.
|
||||
#enable=
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifiers separated by comma (,) or put this
|
||||
# option multiple times (only on the command line, not in the configuration
|
||||
# file where it should appear only once).You can also use "--disable=all" to
|
||||
# disable everything first and then re-enable specific checks. For example, if
|
||||
# you want to run only the similarities checker, you can use "--disable=all
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
||||
# --disable=W"
|
||||
disable=
|
||||
anomalous-backslash-in-string,
|
||||
arguments-differ,
|
||||
attribute-defined-outside-init,
|
||||
bad-continuation,
|
||||
bare-except,
|
||||
broad-except,
|
||||
cell-var-from-loop,
|
||||
consider-iterating-dictionary,
|
||||
dangerous-default-value,
|
||||
duplicate-code,
|
||||
fixme,
|
||||
global-statement,
|
||||
inherit-non-class,
|
||||
invalid-name,
|
||||
len-as-condition,
|
||||
locally-disabled,
|
||||
logging-not-lazy,
|
||||
missing-docstring,
|
||||
no-else-return,
|
||||
no-init,
|
||||
no-member,
|
||||
no-self-use,
|
||||
protected-access,
|
||||
redefined-builtin,
|
||||
redefined-outer-name,
|
||||
redefined-variable-type,
|
||||
relative-import,
|
||||
signature-differs,
|
||||
super-init-not-called,
|
||||
too-few-public-methods,
|
||||
too-many-arguments,
|
||||
too-many-branches,
|
||||
too-many-instance-attributes,
|
||||
too-many-lines,
|
||||
too-many-locals,
|
||||
too-many-nested-blocks,
|
||||
too-many-public-methods,
|
||||
too-many-return-statements,
|
||||
too-many-statements,
|
||||
trailing-newlines,
|
||||
undefined-loop-variable,
|
||||
ungrouped-imports,
|
||||
unnecessary-lambda,
|
||||
unused-argument,
|
||||
unused-variable,
|
||||
wildcard-import,
|
||||
wrong-import-order,
|
||||
wrong-import-position,
|
||||
deprecated-lambda,
|
||||
simplifiable-if-statement,
|
||||
unidiomatic-typecheck,
|
||||
global-at-module-level,
|
||||
inconsistent-return-statements,
|
||||
keyword-arg-before-vararg,
|
||||
assignment-from-no-return,
|
||||
useless-return,
|
||||
assignment-from-none,
|
||||
stop-iteration-return
|
||||
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||
# (visual studio) and html. You can also give a reporter class, eg
|
||||
# mypackage.mymodule.MyReporterClass.
|
||||
output-format=text
|
||||
|
||||
# Put messages in a separate file for each module / package specified on the
|
||||
# command line instead of printing them on stdout. Reports (if any) will be
|
||||
# written in a file name "pylint_global.[txt|html]".
|
||||
files-output=no
|
||||
|
||||
# Tells whether to display a full report or only the messages
|
||||
reports=no
|
||||
|
||||
# Python expression which should return a note less than 10 (10 is the highest
|
||||
# note). You have access to the variables errors warning, statement which
|
||||
# respectively contain the number of errors / warnings messages and the total
|
||||
# number of statements analyzed. This is used by the global evaluation report
|
||||
# (RP0004).
|
||||
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
||||
|
||||
# Template used to display messages. This is a python new-style format string
|
||||
# used to format the message information. See doc for all details
|
||||
#msg-template=
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# A regular expression matching the name of dummy variables (i.e. expectedly
|
||||
# not used).
|
||||
dummy-variables-rgx=_$|dummy
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid to define new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
# List of strings which can identify a callback function by name. A callback
|
||||
# name must start or end with one of those strings.
|
||||
callbacks=cb_,_cb
|
||||
|
||||
|
||||
[LOGGING]
|
||||
|
||||
# Logging modules to check that the string format arguments are in logging
|
||||
# function parameter format
|
||||
logging-modules=logging
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# List of builtins function names that should not be used, separated by a comma
|
||||
bad-functions=map,filter,input
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma
|
||||
# allow `d` as its used frequently for deferred callback chains
|
||||
good-names=i,j,k,ex,Run,_,d
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma
|
||||
bad-names=foo,bar,baz,toto,tutu,tata
|
||||
|
||||
# Colon-delimited sets of names that determine each other's naming style when
|
||||
# the name regexes allow several styles.
|
||||
name-group=
|
||||
|
||||
# Include a hint for the correct naming format with invalid-name
|
||||
include-naming-hint=no
|
||||
|
||||
# Regular expression matching correct function names
|
||||
function-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for function names
|
||||
function-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct variable names
|
||||
variable-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for variable names
|
||||
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct constant names
|
||||
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Naming hint for constant names
|
||||
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
||||
|
||||
# Regular expression matching correct attribute names
|
||||
attr-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for attribute names
|
||||
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct argument names
|
||||
argument-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for argument names
|
||||
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression matching correct class attribute names
|
||||
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
||||
|
||||
# Naming hint for class attribute names
|
||||
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
||||
|
||||
# Regular expression matching correct inline iteration names
|
||||
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Naming hint for inline iteration names
|
||||
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
|
||||
|
||||
# Regular expression matching correct class names
|
||||
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Naming hint for class names
|
||||
class-name-hint=[A-Z_][a-zA-Z0-9]+$
|
||||
|
||||
# Regular expression matching correct module names
|
||||
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Naming hint for module names
|
||||
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
||||
|
||||
# Regular expression matching correct method names
|
||||
method-rgx=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Naming hint for method names
|
||||
method-name-hint=[a-z_][a-z0-9_]{2,30}$
|
||||
|
||||
# Regular expression which should only match function or class names that do
|
||||
# not require a docstring.
|
||||
no-docstring-rgx=^_
|
||||
|
||||
# Minimum line length for functions/classes that require docstrings, shorter
|
||||
# ones are exempt.
|
||||
docstring-min-length=-1
|
||||
|
||||
|
||||
[ELIF]
|
||||
|
||||
# Maximum number of nested blocks for function / method body
|
||||
max-nested-blocks=5
|
||||
|
||||
|
||||
[SPELLING]
|
||||
|
||||
# Spelling dictionary name. Available dictionaries: none. To make it working
|
||||
# install python-enchant package.
|
||||
spelling-dict=
|
||||
|
||||
# List of comma separated words that should not be checked.
|
||||
spelling-ignore-words=
|
||||
|
||||
# A path to a file that contains private dictionary; one word per line.
|
||||
spelling-private-dict-file=
|
||||
|
||||
# Tells whether to store unknown words to indicated private dictionary in
|
||||
# --spelling-private-dict-file option instead of raising a message.
|
||||
spelling-store-unknown-words=no
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=120
|
||||
|
||||
# Regexp for a line that is allowed to be longer than the limit.
|
||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||
|
||||
# Allow the body of an if to be on the same line as the test if there is no
|
||||
# else.
|
||||
single-line-if-stmt=no
|
||||
|
||||
# List of optional constructs for which whitespace checking is disabled. `dict-
|
||||
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
|
||||
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
|
||||
# `empty-line` allows space-only lines.
|
||||
no-space-check=trailing-comma,dict-separator
|
||||
|
||||
# Maximum number of lines in a module
|
||||
max-module-lines=1000
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
# Number of spaces of indent required inside a hanging or continued line.
|
||||
indent-after-paren=4
|
||||
|
||||
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
||||
expected-line-ending-format=
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,XXX,TODO
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=4
|
||||
|
||||
# Ignore comments when computing similarities.
|
||||
ignore-comments=yes
|
||||
|
||||
# Ignore docstrings when computing similarities.
|
||||
ignore-docstrings=yes
|
||||
|
||||
# Ignore imports when computing similarities.
|
||||
ignore-imports=no
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||
ignore-mixin-members=yes
|
||||
|
||||
# List of module names for which member attributes should not be checked
|
||||
# (useful for modules/projects where namespaces are manipulated during runtime
|
||||
# and thus existing member attributes cannot be deduced by static analysis. It
|
||||
# supports qualified module names, as well as Unix pattern matching.
|
||||
ignored-modules=leveldb,distutils
|
||||
# Ignoring distutils because: https://github.com/PyCQA/pylint/issues/73
|
||||
|
||||
# List of classes names for which member attributes should not be checked
|
||||
# (useful for classes with attributes dynamically set). This supports can work
|
||||
# with qualified names.
|
||||
# ignored-classes=
|
||||
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=lbrynet.lbrynet_daemon.LBRYDaemon.Parameters
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma
|
||||
deprecated-modules=regsub,TERMIOS,Bastion,rexec
|
||||
|
||||
# Create a graph of every (i.e. internal and external) dependencies in the
|
||||
# given file (report RP0402 must not be disabled)
|
||||
import-graph=
|
||||
|
||||
# Create a graph of external dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
ext-import-graph=
|
||||
|
||||
# Create a graph of internal dependencies in the given file (report RP0402 must
|
||||
# not be disabled)
|
||||
int-import-graph=
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# Maximum number of arguments for function / method
|
||||
max-args=10
|
||||
|
||||
# Argument names that match this expression will be ignored. Default to name
|
||||
# with leading underscore
|
||||
ignored-argument-names=_.*
|
||||
|
||||
# Maximum number of locals for function / method body
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of return / yield for function / method body
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of branch for function / method body
|
||||
max-branches=12
|
||||
|
||||
# Maximum number of statements in function / method body
|
||||
max-statements=50
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=8
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=7
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
# Maximum number of boolean expressions in a if statement
|
||||
max-bool-expr=5
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,__new__,setUp
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
# List of valid names for the first argument in a metaclass class method.
|
||||
valid-metaclass-classmethod-first-arg=mcs
|
||||
|
||||
# List of member names, which should be excluded from the protected access
|
||||
# warning.
|
||||
exclude-protected=_asdict,_fields,_replace,_source,_make
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when being caught. Defaults to
|
||||
# "Exception"
|
||||
overgeneral-exceptions=Exception
|
139
.travis.yml
Normal file
139
.travis.yml
Normal file
|
@ -0,0 +1,139 @@
|
|||
sudo: required
|
||||
dist: xenial
|
||||
language: python
|
||||
python: "3.7"
|
||||
|
||||
jobs:
|
||||
include:
|
||||
|
||||
- stage: code quality
|
||||
name: "pylint lbrynet"
|
||||
install:
|
||||
- pip install astroid pylint
|
||||
- pip install git+https://github.com/lbryio/torba.git#egg=torba
|
||||
- pip install -e .
|
||||
script: pylint lbrynet
|
||||
|
||||
- stage: test
|
||||
name: "Unit Tests"
|
||||
install:
|
||||
- pip install coverage
|
||||
- pip install git+https://github.com/lbryio/torba.git#egg=torba
|
||||
- pip install -e .
|
||||
script:
|
||||
- HOME=/tmp coverage run -p --source=lbrynet -m unittest discover -vv tests.unit
|
||||
after_success:
|
||||
- coverage combine
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
- name: "Integration Tests"
|
||||
install:
|
||||
- pip install tox-travis coverage
|
||||
- pushd .. && git clone https://github.com/lbryio/torba.git && popd
|
||||
script: tox
|
||||
after_success:
|
||||
- coverage combine tests/
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
- name: "Run Examples"
|
||||
install:
|
||||
- pip install coverage
|
||||
- pip install git+https://github.com/lbryio/torba.git#egg=torba
|
||||
- pip install -e .
|
||||
script:
|
||||
- HOME=/tmp coverage run -p --source=lbrynet scripts/generate_json_api.py
|
||||
after_success:
|
||||
- coverage combine
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
- stage: build
|
||||
name: "Windows"
|
||||
language: generic
|
||||
services:
|
||||
- docker
|
||||
install:
|
||||
- docker pull lbry/pyinstaller34_32bits:py371
|
||||
script:
|
||||
- python scripts/set_build.py
|
||||
- docker run -v "$(pwd):/src/lbry" lbry/pyinstaller34_32bits:py371 lbry/scripts/wine_build.sh
|
||||
- sudo zip -j dist/lbrynet-windows.zip dist/lbrynet.exe
|
||||
deploy:
|
||||
provider: releases
|
||||
api_key: $GITHUB_OAUTH_TOKEN
|
||||
file: dist/lbrynet-windows.zip
|
||||
skip_cleanup: true
|
||||
overwrite: true
|
||||
draft: true
|
||||
on:
|
||||
tags: true
|
||||
addons:
|
||||
artifacts:
|
||||
working_dir: dist
|
||||
paths:
|
||||
- lbrynet-windows.zip
|
||||
target_paths:
|
||||
- /daemon/build-${TRAVIS_BUILD_NUMBER}_commit-${TRAVIS_COMMIT:0:7}_branch-${TRAVIS_BRANCH}$([ ! -z ${TRAVIS_TAG} ] && echo _tag-${TRAVIS_TAG})
|
||||
|
||||
- &build
|
||||
name: "Linux"
|
||||
env: OS=linux
|
||||
install:
|
||||
- pip3 install pyinstaller
|
||||
- pip3 install git+https://github.com/lbryio/torba.git
|
||||
- python3 scripts/set_build.py
|
||||
- pip3 install -e .
|
||||
script:
|
||||
- pyinstaller -F -n lbrynet lbrynet/extras/cli.py
|
||||
- chmod +x dist/lbrynet
|
||||
- zip -j dist/lbrynet-${OS}.zip dist/lbrynet
|
||||
- ./dist/lbrynet --version
|
||||
deploy:
|
||||
provider: releases
|
||||
api_key: $GITHUB_OAUTH_TOKEN
|
||||
file: dist/lbrynet-${OS}.zip
|
||||
skip_cleanup: true
|
||||
overwrite: true
|
||||
draft: true
|
||||
on:
|
||||
tags: true
|
||||
addons:
|
||||
artifacts:
|
||||
working_dir: dist
|
||||
paths:
|
||||
- lbrynet-${OS}.zip
|
||||
# artifact uploader thinks lbrynet is a directory, https://github.com/travis-ci/artifacts/issues/78
|
||||
target_paths:
|
||||
- /daemon/build-${TRAVIS_BUILD_NUMBER}_commit-${TRAVIS_COMMIT:0:7}_branch-${TRAVIS_BRANCH}$([ ! -z ${TRAVIS_TAG} ] && echo _tag-${TRAVIS_TAG})
|
||||
|
||||
- <<: *build
|
||||
name: "Mac"
|
||||
os: osx
|
||||
osx_image: xcode8.3
|
||||
language: generic
|
||||
env: OS=mac
|
||||
cache: false
|
||||
before_install:
|
||||
- brew upgrade python || true
|
||||
- brew upgrade python || true
|
||||
- if: tag IS present
|
||||
stage: build
|
||||
name: "Wallet Server Docker Image - Tagged Release"
|
||||
script:
|
||||
- set -e
|
||||
- echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
|
||||
- travis_retry docker build -t lbry/wallet-server:$TRAVIS_TAG -f scripts/Dockerfile.wallet_server .
|
||||
- docker push lbry/wallet-server:$TRAVIS_TAG
|
||||
- if: tag IS blank AND branch = master AND NOT type IN (pull_request)
|
||||
stage: build
|
||||
name: "Wallet Server Docker Image - Master"
|
||||
script:
|
||||
- set -e
|
||||
- echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
|
||||
- travis_retry docker build -t lbry/wallet-server:master -f scripts/Dockerfile.wallet_server .
|
||||
- docker push lbry/wallet-server:master
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/pip
|
||||
- $HOME/Library/Caches/pip
|
||||
- $TRAVIS_BUILD_DIR/.tox
|
1019
CHANGELOG.md
1019
CHANGELOG.md
File diff suppressed because it is too large
Load diff
6
Dangerfile
Normal file
6
Dangerfile
Normal file
|
@ -0,0 +1,6 @@
|
|||
# Add a CHANGELOG entry for app changes
|
||||
has_app_changes = !git.modified_files.grep(/lbrynet/).empty?
|
||||
if !git.modified_files.include?("CHANGELOG.md") && has_app_changes
|
||||
fail("Please include a CHANGELOG entry.")
|
||||
message "See http://keepachangelog.com/en/0.3.0/ for details on good changelog guidelines"
|
||||
end
|
161
INSTALL.md
161
INSTALL.md
|
@ -1,6 +1,6 @@
|
|||
# Installing LBRY
|
||||
|
||||
If only the JSON-RPC API server is needed, the recommended way to install LBRY is to use a pre-built binary. We provide binaries for all major operating systems. See the [README](README.md)!
|
||||
If only the JSON-RPC API server is needed, the recommended way to install LBRY is to use a pre-built binary. We provide binaries for all major operating systems. See the [README](README.md).
|
||||
|
||||
These instructions are for installing LBRY from source, which is recommended if you are interested in doing development work or LBRY is not available on your operating system (godspeed, TempleOS users).
|
||||
|
||||
|
@ -9,47 +9,29 @@ Here's a video walkthrough of this setup, which is itself hosted by the LBRY net
|
|||
|
||||
## Prerequisites
|
||||
|
||||
Running `lbrynet` from source requires Python 3.7. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
|
||||
Running `lbrynet` from source requires Python 3.6 or higher (3.7 is preferred). Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/)
|
||||
|
||||
After installing Python 3.7, you'll need to install some additional libraries depending on your operating system.
|
||||
|
||||
Because of [issue #2769](https://github.com/lbryio/lbry-sdk/issues/2769)
|
||||
at the moment the `lbrynet` daemon will only work correctly with Python 3.7.
|
||||
If Python 3.8+ is used, the daemon will start but the RPC server
|
||||
may not accept messages, returning the following:
|
||||
```
|
||||
Could not connect to daemon. Are you sure it's running?
|
||||
```
|
||||
After installing python 3, you'll need to install some additional libraries depending on your operating system.
|
||||
|
||||
### macOS
|
||||
|
||||
macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/).
|
||||
|
||||
These environment variables also need to be set:
|
||||
```
|
||||
PYTHONUNBUFFERED=1
|
||||
EVENT_NOKQUEUE=1
|
||||
```
|
||||
|
||||
Remaining dependencies can then be installed by running:
|
||||
|
||||
```
|
||||
brew install python protobuf
|
||||
```
|
||||
|
||||
Assistance installing Python3: https://docs.python-guide.org/starting/install3/osx/.
|
||||
Assistance installing Python3: https://docs.python-guide.org/starting/install3/osx/
|
||||
|
||||
### Linux
|
||||
|
||||
On Ubuntu (we recommend 18.04 or 20.04), install the following:
|
||||
```
|
||||
sudo add-apt-repository ppa:deadsnakes/ppa
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential python3.7 python3.7-dev git python3.7-venv libssl-dev python-protobuf
|
||||
```
|
||||
On Ubuntu (we recommend 18.04), install the following:
|
||||
|
||||
The [deadsnakes PPA](https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa) provides Python 3.7
|
||||
for those Ubuntu distributions that no longer have it in their
|
||||
official repositories.
|
||||
```
|
||||
sudo apt-get install build-essential python3.7 python3.7-dev git python3-venv libssl-dev python-protobuf
|
||||
```
|
||||
|
||||
On Raspbian, you will also need to install `python-pyparsing`.
|
||||
|
||||
|
@ -57,121 +39,38 @@ If you're running another Linux distro, install the equivalent of the above pack
|
|||
|
||||
## Installation
|
||||
|
||||
### Linux/Mac
|
||||
To install:
|
||||
|
||||
Clone the repository:
|
||||
```bash
|
||||
git clone https://github.com/lbryio/lbry-sdk.git
|
||||
cd lbry-sdk
|
||||
```
|
||||
```
|
||||
git clone https://github.com/lbryio/lbry.git
|
||||
cd lbry
|
||||
|
||||
Create a Python virtual environment for lbry-sdk:
|
||||
```bash
|
||||
python3.7 -m venv lbry-venv
|
||||
```
|
||||
Creating venv:
|
||||
python -m venv lbry-venv
|
||||
|
||||
Activating lbry-venv on Linux/Mac:
|
||||
source lbry-venv/bin/activate
|
||||
|
||||
Activating lbry-venv on Windows:
|
||||
lbry-venv\Scripts\activate
|
||||
|
||||
Activate virtual environment:
|
||||
```bash
|
||||
source lbry-venv/bin/activate
|
||||
```
|
||||
python --version # Python 2 is not supported. Make sure you're on Python 3.7
|
||||
|
||||
Make sure you're on Python 3.7+ as default in the virtual environment:
|
||||
```bash
|
||||
python --version
|
||||
```
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
Install packages:
|
||||
```bash
|
||||
make install
|
||||
```
|
||||
|
||||
If you are on Linux and using PyCharm, generates initial configs:
|
||||
```bash
|
||||
make idea
|
||||
```
|
||||
|
||||
To verify your installation, `which lbrynet` should return a path inside
|
||||
of the `lbry-venv` folder.
|
||||
```bash
|
||||
(lbry-venv) $ which lbrynet
|
||||
/opt/lbry-sdk/lbry-venv/bin/lbrynet
|
||||
```
|
||||
|
||||
To exit the virtual environment simply use the command `deactivate`.
|
||||
|
||||
### Windows
|
||||
|
||||
Clone the repository:
|
||||
```bash
|
||||
git clone https://github.com/lbryio/lbry-sdk.git
|
||||
cd lbry-sdk
|
||||
```
|
||||
|
||||
Create a Python virtual environment for lbry-sdk:
|
||||
```bash
|
||||
python -m venv lbry-venv
|
||||
```
|
||||
|
||||
Activate virtual environment:
|
||||
```bash
|
||||
lbry-venv\Scripts\activate
|
||||
```
|
||||
|
||||
Install packages:
|
||||
```bash
|
||||
pip install -e .
|
||||
```
|
||||
To verify your installation, `which lbrynet` should return a path inside of the `lbry-venv` folder created by the `virtualenv` command.
|
||||
|
||||
## Run the tests
|
||||
### Elasticsearch
|
||||
|
||||
For running integration tests, Elasticsearch is required to be available at localhost:9200/
|
||||
|
||||
The easiest way to start it is using docker with:
|
||||
```bash
|
||||
make elastic-docker
|
||||
```
|
||||
|
||||
Alternative installation methods are available [at Elasticsearch website](https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html).
|
||||
|
||||
To run the unit and integration tests from the repo directory:
|
||||
```
|
||||
python -m unittest discover tests.unit
|
||||
python -m unittest discover tests.integration
|
||||
```
|
||||
To run the unit tests from the repo directory:
|
||||
```
|
||||
trial --reactor=asyncio tests.unit
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
To start the API server:
|
||||
```
|
||||
lbrynet start
|
||||
```
|
||||
`lbrynet start`
|
||||
|
||||
Whenever the code inside [lbry-sdk/lbry](./lbry)
|
||||
is modified we should run `make install` to recompile the `lbrynet`
|
||||
executable with the newest code.
|
||||
|
||||
## Development
|
||||
|
||||
When developing, remember to enter the environment,
|
||||
and if you wish start the server interactively.
|
||||
```bash
|
||||
$ source lbry-venv/bin/activate
|
||||
|
||||
(lbry-venv) $ python lbry/extras/cli.py start
|
||||
```
|
||||
|
||||
Parameters can be passed in the same way.
|
||||
```bash
|
||||
(lbry-venv) $ python lbry/extras/cli.py wallet balance
|
||||
```
|
||||
|
||||
If a Python debugger (`pdb` or `ipdb`) is installed we can also start it
|
||||
in this way, set up break points, and step through the code.
|
||||
```bash
|
||||
(lbry-venv) $ pip install ipdb
|
||||
|
||||
(lbry-venv) $ ipdb lbry/extras/cli.py
|
||||
```
|
||||
|
||||
Happy hacking!
|
||||
|
|
2
LICENSE
2
LICENSE
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015-2022 LBRY Inc
|
||||
Copyright (c) 2015-2019 LBRY Inc
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
include README.md
|
||||
include CHANGELOG.md
|
||||
include LICENSE
|
||||
recursive-include lbry *.txt *.py
|
26
Makefile
26
Makefile
|
@ -1,26 +0,0 @@
|
|||
.PHONY: install tools lint test test-unit test-unit-coverage test-integration idea
|
||||
|
||||
install:
|
||||
pip install -e .
|
||||
|
||||
lint:
|
||||
pylint --rcfile=setup.cfg lbry
|
||||
#mypy --ignore-missing-imports lbry
|
||||
|
||||
test: test-unit test-integration
|
||||
|
||||
test-unit:
|
||||
python -m unittest discover tests.unit
|
||||
|
||||
test-unit-coverage:
|
||||
coverage run --source=lbry -m unittest discover -vv tests.unit
|
||||
|
||||
test-integration:
|
||||
tox
|
||||
|
||||
idea:
|
||||
mkdir -p .idea
|
||||
cp -r scripts/idea/* .idea
|
||||
|
||||
elastic-docker:
|
||||
docker run -d -v lbryhub:/usr/share/elasticsearch/data -p 9200:9200 -p 9300:9300 -e"ES_JAVA_OPTS=-Xms512m -Xmx512m" -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.12.1
|
22
README.md
22
README.md
|
@ -1,19 +1,19 @@
|
|||
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![build](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml/badge.svg)](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml) [![coverage](https://coveralls.io/repos/github/lbryio/lbry-sdk/badge.svg)](https://coveralls.io/github/lbryio/lbry-sdk)
|
||||
# <img src="https://raw.githubusercontent.com/lbryio/lbry/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![Build Status](https://travis-ci.org/lbryio/lbry.svg?branch=master)](https://travis-ci.org/lbryio/lbry) [![Test Coverage](https://codecov.io/gh/lbryio/lbry/branch/master/graph/badge.svg)](https://codecov.io/gh/lbryio/lbry)
|
||||
|
||||
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
|
||||
|
||||
LBRY SDK for Python is currently the most fully featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components include:
|
||||
LBRY SDK for Python is currently the most full featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components:
|
||||
|
||||
* Built on Python 3.7 and `asyncio`.
|
||||
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/dht)).
|
||||
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/blob_exchange)).
|
||||
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/schema)).
|
||||
* Wallet implementation for the LBRY blockchain ([lbry.wallet](https://github.com/lbryio/lbry-sdk/tree/master/lbry/wallet)).
|
||||
* Daemon with a JSON-RPC API to ease building end user applications in any language and for automating various tasks ([lbry.extras.daemon](https://github.com/lbryio/lbry-sdk/tree/master/lbry/extras/daemon)).
|
||||
* Built on Python 3.7+ and `asyncio`.
|
||||
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbrynet.dht](https://github.com/lbryio/lbry/tree/master/lbrynet/dht)).
|
||||
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbrynet.blob_exchange](https://github.com/lbryio/lbry/tree/master/lbrynet/blob_exchange)).
|
||||
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbrynet.schema](https://github.com/lbryio/lbry/tree/master/lbrynet/schema)).
|
||||
* Wallet implementation for the LBRY blockchain ([lbrynet.wallet](https://github.com/lbryio/lbry/tree/master/lbrynet/wallet)).
|
||||
* Daemon with a JSON-RPC API to ease building end user applications in any language and for automating various tasks ([lbrynet.extras.daemon](https://github.com/lbryio/lbry/tree/master/lbrynet/extras/daemon)).
|
||||
|
||||
## Installation
|
||||
|
||||
Our [releases page](https://github.com/lbryio/lbry-sdk/releases) contains pre-built binaries of the latest release, pre-releases, and past releases for macOS, Debian-based Linux, and Windows. [Automated travis builds](http://build.lbry.io/daemon/) are also available for testing.
|
||||
Our [releases page](https://github.com/lbryio/lbry/releases) contains pre-built binaries of the latest release, pre-releases, and past releases for macOS, Debian-based Linux, and Windows. [Automated travis builds](http://build.lbry.io/daemon/) are also available for testing.
|
||||
|
||||
## Usage
|
||||
|
||||
|
@ -41,7 +41,7 @@ This project is MIT licensed. For the full license, see [LICENSE](LICENSE).
|
|||
|
||||
## Security
|
||||
|
||||
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
|
||||
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our GPG key is here](https://lbry.io/faq/gpg-key) if you need it.
|
||||
|
||||
## Contact
|
||||
|
||||
|
@ -53,4 +53,4 @@ The documentation for the API can be found [here](https://lbry.tech/api/sdk).
|
|||
|
||||
Daemon defaults, ports, and other settings are documented [here](https://lbry.tech/resources/daemon-settings).
|
||||
|
||||
Settings can be configured using a daemon-settings.yml file. An example can be found [here](https://github.com/lbryio/lbry-sdk/blob/master/example_daemon_settings.yml).
|
||||
Settings can be configured using a daemon-settings.yml file. An example can be found [here](https://github.com/lbryio/lbry/blob/master/example_daemon_settings.yml).
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
While we are not at v1.0 yet, only the latest release will be supported.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
See https://lbry.com/faq/security
|
|
@ -1,43 +0,0 @@
|
|||
FROM debian:10-slim
|
||||
|
||||
ARG user=lbry
|
||||
ARG projects_dir=/home/$user
|
||||
ARG db_dir=/database
|
||||
|
||||
ARG DOCKER_TAG
|
||||
ARG DOCKER_COMMIT=docker
|
||||
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y --no-install-recommends install \
|
||||
wget \
|
||||
automake libtool \
|
||||
tar unzip \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
libleveldb-dev \
|
||||
python3.7 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-wheel \
|
||||
python3-setuptools && \
|
||||
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
||||
|
||||
COPY . $projects_dir
|
||||
RUN chown -R $user:$user $projects_dir
|
||||
RUN mkdir -p $db_dir
|
||||
RUN chown -R $user:$user $db_dir
|
||||
|
||||
USER $user
|
||||
WORKDIR $projects_dir
|
||||
|
||||
RUN python3 -m pip install -U setuptools pip
|
||||
RUN make install
|
||||
RUN python3 docker/set_build.py
|
||||
RUN rm ~/.cache -rf
|
||||
VOLUME $db_dir
|
||||
ENTRYPOINT ["python3", "scripts/dht_node.py"]
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
FROM debian:10-slim
|
||||
|
||||
ARG user=lbry
|
||||
ARG db_dir=/database
|
||||
ARG projects_dir=/home/$user
|
||||
|
||||
ARG DOCKER_TAG
|
||||
ARG DOCKER_COMMIT=docker
|
||||
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y --no-install-recommends install \
|
||||
wget \
|
||||
tar unzip \
|
||||
build-essential \
|
||||
automake libtool \
|
||||
pkg-config \
|
||||
libleveldb-dev \
|
||||
python3.7 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-wheel \
|
||||
python3-cffi \
|
||||
python3-setuptools && \
|
||||
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
||||
RUN mkdir -p $db_dir
|
||||
RUN chown -R $user:$user $db_dir
|
||||
|
||||
COPY . $projects_dir
|
||||
RUN chown -R $user:$user $projects_dir
|
||||
|
||||
USER $user
|
||||
WORKDIR $projects_dir
|
||||
|
||||
RUN pip install uvloop
|
||||
RUN make install
|
||||
RUN python3 docker/set_build.py
|
||||
RUN rm ~/.cache -rf
|
||||
|
||||
# entry point
|
||||
ARG host=0.0.0.0
|
||||
ARG tcp_port=50001
|
||||
ARG daemon_url=http://lbry:lbry@localhost:9245/
|
||||
VOLUME $db_dir
|
||||
ENV TCP_PORT=$tcp_port
|
||||
ENV HOST=$host
|
||||
ENV DAEMON_URL=$daemon_url
|
||||
ENV DB_DIRECTORY=$db_dir
|
||||
ENV MAX_SESSIONS=1000000000
|
||||
ENV MAX_SEND=1000000000000000000
|
||||
ENV EVENT_LOOP_POLICY=uvloop
|
||||
COPY ./docker/wallet_server_entrypoint.sh /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,45 +0,0 @@
|
|||
FROM debian:10-slim
|
||||
|
||||
ARG user=lbry
|
||||
ARG downloads_dir=/database
|
||||
ARG projects_dir=/home/$user
|
||||
|
||||
ARG DOCKER_TAG
|
||||
ARG DOCKER_COMMIT=docker
|
||||
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y --no-install-recommends install \
|
||||
wget \
|
||||
automake libtool \
|
||||
tar unzip \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
libleveldb-dev \
|
||||
python3.7 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-wheel \
|
||||
python3-setuptools && \
|
||||
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
||||
RUN mkdir -p $downloads_dir
|
||||
RUN chown -R $user:$user $downloads_dir
|
||||
|
||||
COPY . $projects_dir
|
||||
RUN chown -R $user:$user $projects_dir
|
||||
|
||||
USER $user
|
||||
WORKDIR $projects_dir
|
||||
|
||||
RUN pip install uvloop
|
||||
RUN make install
|
||||
RUN python3 docker/set_build.py
|
||||
RUN rm ~/.cache -rf
|
||||
|
||||
# entry point
|
||||
VOLUME $downloads_dir
|
||||
COPY ./docker/webconf.yaml /webconf.yaml
|
||||
ENTRYPOINT ["/home/lbry/.local/bin/lbrynet", "start", "--config=/webconf.yaml"]
|
|
@ -1,9 +0,0 @@
|
|||
### How to run with docker-compose
|
||||
1. Edit config file and after that fix permissions with
|
||||
```
|
||||
sudo chown -R 999:999 webconf.yaml
|
||||
```
|
||||
2. Start SDK with
|
||||
```
|
||||
docker-compose up -d
|
||||
```
|
|
@ -1,49 +0,0 @@
|
|||
version: "3"
|
||||
|
||||
volumes:
|
||||
wallet_server:
|
||||
es01:
|
||||
|
||||
services:
|
||||
wallet_server:
|
||||
depends_on:
|
||||
- es01
|
||||
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
|
||||
restart: always
|
||||
network_mode: host
|
||||
ports:
|
||||
- "50001:50001" # rpc port
|
||||
- "2112:2112" # uncomment to enable prometheus
|
||||
volumes:
|
||||
- "wallet_server:/database"
|
||||
environment:
|
||||
- DAEMON_URL=http://lbry:lbry@127.0.0.1:9245
|
||||
- MAX_QUERY_WORKERS=4
|
||||
- CACHE_MB=1024
|
||||
- CACHE_ALL_TX_HASHES=
|
||||
- CACHE_ALL_CLAIM_TXOS=
|
||||
- MAX_SEND=1000000000000000000
|
||||
- MAX_RECEIVE=1000000000000000000
|
||||
- MAX_SESSIONS=100000
|
||||
- HOST=0.0.0.0
|
||||
- TCP_PORT=50001
|
||||
- PROMETHEUS_PORT=2112
|
||||
- FILTERING_CHANNEL_IDS=770bd7ecba84fd2f7607fb15aedd2b172c2e153f 95e5db68a3101df19763f3a5182e4b12ba393ee8
|
||||
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6
|
||||
es01:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
|
||||
container_name: es01
|
||||
environment:
|
||||
- node.name=es01
|
||||
- discovery.type=single-node
|
||||
- indices.query.bool.max_clause_count=8192
|
||||
- bootstrap.memory_lock=true
|
||||
- "ES_JAVA_OPTS=-Xms4g -Xmx4g" # no more than 32, remember to disable swap
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
volumes:
|
||||
- es01:/usr/share/elasticsearch/data
|
||||
ports:
|
||||
- 127.0.0.1:9200:9200
|
|
@ -1,9 +0,0 @@
|
|||
version: '3'
|
||||
services:
|
||||
websdk:
|
||||
image: vshyba/websdk
|
||||
ports:
|
||||
- '5279:5279'
|
||||
- '5280:5280'
|
||||
volumes:
|
||||
- ./webconf.yaml:/webconf.yaml
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
cd "$DIR/../.." ## make sure we're in the right place. Docker Hub screws this up sometimes
|
||||
echo "docker build dir: $(pwd)"
|
||||
|
||||
docker build --build-arg DOCKER_TAG=$DOCKER_TAG --build-arg DOCKER_COMMIT=$SOURCE_COMMIT -f $DOCKERFILE_PATH -t $IMAGE_NAME .
|
|
@ -1,11 +0,0 @@
|
|||
# requires powershell and .NET 4+. see https://chocolatey.org/install for more info.
|
||||
|
||||
$chocoVersion = powershell choco -v
|
||||
if(-not($chocoVersion)){
|
||||
Write-Output "Chocolatey is not installed, installing now"
|
||||
Write-Output "IF YOU KEEP GETTING THIS MESSAGE ON EVERY BUILD, TRY RESTARTING THE GITLAB RUNNER SO IT GETS CHOCO INTO IT'S ENV"
|
||||
Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
|
||||
}
|
||||
else{
|
||||
Write-Output "Chocolatey version $chocoVersion is already installed"
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
import sys
|
||||
import os
|
||||
import re
|
||||
import logging
|
||||
import lbry.build_info as build_info_mod
|
||||
|
||||
log = logging.getLogger()
|
||||
log.addHandler(logging.StreamHandler())
|
||||
log.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
def _check_and_set(d: dict, key: str, value: str):
|
||||
try:
|
||||
d[key]
|
||||
except KeyError:
|
||||
raise Exception(f"{key} var does not exist in {build_info_mod.__file__}")
|
||||
d[key] = value
|
||||
|
||||
|
||||
def main():
|
||||
build_info = {item: build_info_mod.__dict__[item] for item in dir(build_info_mod) if not item.startswith("__")}
|
||||
|
||||
commit_hash = os.getenv('DOCKER_COMMIT', os.getenv('GITHUB_SHA'))
|
||||
if commit_hash is None:
|
||||
raise ValueError("Commit hash not found in env vars")
|
||||
_check_and_set(build_info, "COMMIT_HASH", commit_hash[:6])
|
||||
|
||||
docker_tag = os.getenv('DOCKER_TAG')
|
||||
if docker_tag:
|
||||
_check_and_set(build_info, "DOCKER_TAG", docker_tag)
|
||||
_check_and_set(build_info, "BUILD", "docker")
|
||||
else:
|
||||
if re.match(r'refs/tags/v\d+\.\d+\.\d+$', str(os.getenv('GITHUB_REF'))):
|
||||
_check_and_set(build_info, "BUILD", "release")
|
||||
else:
|
||||
_check_and_set(build_info, "BUILD", "qa")
|
||||
|
||||
log.debug("build info: %s", ", ".join([f"{k}={v}" for k, v in build_info.items()]))
|
||||
with open(build_info_mod.__file__, 'w') as f:
|
||||
f.write("\n".join([f"{k} = \"{v}\"" for k, v in build_info.items()]) + "\n")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
|
@ -1,25 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# entrypoint for wallet server Docker image
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
|
||||
|
||||
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
|
||||
files="$(ls)"
|
||||
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
|
||||
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
|
||||
echo "Extracting snapshot..."
|
||||
filename="$(grep -vf <(echo "$files") <(ls))" # finds the file that was not there before
|
||||
case "$filename" in
|
||||
*.tgz|*.tar.gz|*.tar.bz2 ) tar xvf "$filename" --directory /database ;;
|
||||
*.zip ) unzip "$filename" -d /database ;;
|
||||
* ) echo "Don't know how to extract ${filename}. SNAPSHOT COULD NOT BE LOADED" && exit 1 ;;
|
||||
esac
|
||||
rm "$filename"
|
||||
fi
|
||||
|
||||
/home/lbry/.local/bin/lbry-hub-elastic-sync
|
||||
echo 'starting server'
|
||||
/home/lbry/.local/bin/lbry-hub "$@"
|
|
@ -1,9 +0,0 @@
|
|||
allowed_origin: "*"
|
||||
max_key_fee: "0.0 USD"
|
||||
save_files: false
|
||||
save_blobs: false
|
||||
streaming_server: "0.0.0.0:5280"
|
||||
api: "0.0.0.0:5279"
|
||||
data_dir: /tmp
|
||||
download_dir: /tmp
|
||||
wallet_dir: /tmp
|
307
docs/404.html
Normal file
307
docs/404.html
Normal file
|
@ -0,0 +1,307 @@
|
|||
|
||||
|
||||
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" class="no-js">
|
||||
<head>
|
||||
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1">
|
||||
<meta http-equiv="x-ua-compatible" content="ie=edge">
|
||||
|
||||
|
||||
|
||||
|
||||
<meta name="lang:clipboard.copy" content="Copy to clipboard">
|
||||
|
||||
<meta name="lang:clipboard.copied" content="Copied to clipboard">
|
||||
|
||||
<meta name="lang:search.language" content="en">
|
||||
|
||||
<meta name="lang:search.pipeline.stopwords" content="True">
|
||||
|
||||
<meta name="lang:search.pipeline.trimmer" content="True">
|
||||
|
||||
<meta name="lang:search.result.none" content="No matching documents">
|
||||
|
||||
<meta name="lang:search.result.one" content="1 matching document">
|
||||
|
||||
<meta name="lang:search.result.other" content="# matching documents">
|
||||
|
||||
<meta name="lang:search.tokenizer" content="[\s\-]+">
|
||||
|
||||
<link rel="shortcut icon" href="/assets/images/favicon.png">
|
||||
<meta name="generator" content="mkdocs-0.17.3, mkdocs-material-2.7.0">
|
||||
|
||||
|
||||
|
||||
<title>LBRY</title>
|
||||
|
||||
|
||||
|
||||
<link rel="stylesheet" href="/assets/stylesheets/application.78aab2dc.css">
|
||||
|
||||
<link rel="stylesheet" href="/assets/stylesheets/application-palette.6079476c.css">
|
||||
|
||||
|
||||
|
||||
<script src="/assets/javascripts/modernizr.1aa3b519.js"></script>
|
||||
|
||||
|
||||
<link href="https://fonts.gstatic.com" rel="preconnect" crossorigin>
|
||||
|
||||
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,400i,700|Roboto+Mono">
|
||||
<style>body,input{font-family:"Roboto","Helvetica Neue",Helvetica,Arial,sans-serif}code,kbd,pre{font-family:"Roboto Mono","Courier New",Courier,monospace}</style>
|
||||
|
||||
<link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons">
|
||||
|
||||
|
||||
|
||||
</head>
|
||||
|
||||
|
||||
|
||||
<body dir="ltr" data-md-color-primary="teal" data-md-color-accent="green">
|
||||
|
||||
<svg class="md-svg">
|
||||
<defs>
|
||||
|
||||
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="416" height="448"
|
||||
viewBox="0 0 416 448" id="github">
|
||||
<path fill="currentColor" d="M160 304q0 10-3.125 20.5t-10.75 19-18.125
|
||||
8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19 18.125-8.5
|
||||
18.125 8.5 10.75 19 3.125 20.5zM320 304q0 10-3.125 20.5t-10.75
|
||||
19-18.125 8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19
|
||||
18.125-8.5 18.125 8.5 10.75 19 3.125 20.5zM360
|
||||
304q0-30-17.25-51t-46.75-21q-10.25 0-48.75 5.25-17.75 2.75-39.25
|
||||
2.75t-39.25-2.75q-38-5.25-48.75-5.25-29.5 0-46.75 21t-17.25 51q0 22 8
|
||||
38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0
|
||||
37.25-1.75t35-7.375 30.5-15 20.25-25.75 8-38.375zM416 260q0 51.75-15.25
|
||||
82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5-41.75
|
||||
1.125q-19.5 0-35.5-0.75t-36.875-3.125-38.125-7.5-34.25-12.875-30.25-20.25-21.5-28.75q-15.5-30.75-15.5-82.75
|
||||
0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25
|
||||
30.875q36.75-8.75 77.25-8.75 37 0 70 8 26.25-20.5
|
||||
46.75-30.25t47.25-9.75q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34
|
||||
99.5z" />
|
||||
</svg>
|
||||
|
||||
</defs>
|
||||
</svg>
|
||||
<input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="drawer">
|
||||
<input class="md-toggle" data-md-toggle="search" type="checkbox" id="search">
|
||||
<label class="md-overlay" data-md-component="overlay" for="drawer"></label>
|
||||
|
||||
|
||||
<header class="md-header" data-md-component="header">
|
||||
<nav class="md-header-nav md-grid">
|
||||
<div class="md-flex">
|
||||
<div class="md-flex__cell md-flex__cell--shrink">
|
||||
<a href="/" title="LBRY" class="md-header-nav__button md-logo">
|
||||
|
||||
<img src="https://s3.amazonaws.com/files.lbry.io/logo-square-white-bookonly.png" alt="LBRY logo" width="24" height="24">
|
||||
|
||||
</a>
|
||||
</div>
|
||||
<div class="md-flex__cell md-flex__cell--shrink">
|
||||
<label class="md-icon md-icon--menu md-header-nav__button" for="drawer"></label>
|
||||
</div>
|
||||
<div class="md-flex__cell md-flex__cell--stretch">
|
||||
<div class="md-flex__ellipsis md-header-nav__title" data-md-component="title">
|
||||
|
||||
|
||||
<span class="md-header-nav__topic">
|
||||
LBRY
|
||||
</span>
|
||||
<span class="md-header-nav__topic">
|
||||
|
||||
</span>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<div class="md-flex__cell md-flex__cell--shrink">
|
||||
|
||||
|
||||
<label class="md-icon md-icon--search md-header-nav__button" for="search"></label>
|
||||
|
||||
<div class="md-search" data-md-component="search" role="dialog">
|
||||
<label class="md-search__overlay" for="search"></label>
|
||||
<div class="md-search__inner" role="search">
|
||||
<form class="md-search__form" name="search">
|
||||
<input type="text" class="md-search__input" name="query" placeholder="Search" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="query" data-md-state="active">
|
||||
<label class="md-icon md-search__icon" for="search"></label>
|
||||
<button type="reset" class="md-icon md-search__icon" data-md-component="reset" tabindex="-1">
|
||||

|
||||
</button>
|
||||
</form>
|
||||
<div class="md-search__output">
|
||||
<div class="md-search__scrollwrap" data-md-scrollfix>
|
||||
<div class="md-search-result" data-md-component="result">
|
||||
<div class="md-search-result__meta">
|
||||
Type to start searching
|
||||
</div>
|
||||
<ol class="md-search-result__list"></ol>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
<div class="md-flex__cell md-flex__cell--shrink">
|
||||
<div class="md-header-nav__source">
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<a href="https://github.com/lbryio/lbry/" title="Go to repository" class="md-source" data-md-source="github">
|
||||
|
||||
<div class="md-source__icon">
|
||||
<svg viewBox="0 0 24 24" width="24" height="24">
|
||||
<use xlink:href="#github" width="24" height="24"></use>
|
||||
</svg>
|
||||
</div>
|
||||
|
||||
<div class="md-source__repository">
|
||||
GitHub
|
||||
</div>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</nav>
|
||||
</header>
|
||||
|
||||
<div class="md-container">
|
||||
|
||||
|
||||
|
||||
|
||||
<main class="md-main">
|
||||
<div class="md-main__inner md-grid" data-md-component="container">
|
||||
|
||||
|
||||
<div class="md-sidebar md-sidebar--primary" data-md-component="navigation">
|
||||
<div class="md-sidebar__scrollwrap">
|
||||
<div class="md-sidebar__inner">
|
||||
<nav class="md-nav md-nav--primary" data-md-level="0">
|
||||
<label class="md-nav__title md-nav__title--site" for="drawer">
|
||||
<span class="md-nav__button md-logo">
|
||||
|
||||
<img src="https://s3.amazonaws.com/files.lbry.io/logo-square-white-bookonly.png" alt="LBRY logo" width="48" height="48">
|
||||
|
||||
</span>
|
||||
LBRY
|
||||
</label>
|
||||
|
||||
<div class="md-nav__source">
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<a href="https://github.com/lbryio/lbry/" title="Go to repository" class="md-source" data-md-source="github">
|
||||
|
||||
<div class="md-source__icon">
|
||||
<svg viewBox="0 0 24 24" width="24" height="24">
|
||||
<use xlink:href="#github" width="24" height="24"></use>
|
||||
</svg>
|
||||
</div>
|
||||
|
||||
<div class="md-source__repository">
|
||||
GitHub
|
||||
</div>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
|
||||
<ul class="md-nav__list" data-md-scrollfix>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<li class="md-nav__item">
|
||||
<a href="/" title="API" class="md-nav__link">
|
||||
API
|
||||
</a>
|
||||
</li>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<li class="md-nav__item">
|
||||
<a href="/cli/" title="CLI" class="md-nav__link">
|
||||
CLI
|
||||
</a>
|
||||
</li>
|
||||
|
||||
|
||||
</ul>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<div class="md-content">
|
||||
<article class="md-content__inner md-typeset">
|
||||
|
||||
<h1>404 - Not found</h1>
|
||||
|
||||
|
||||
|
||||
|
||||
</article>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
|
||||
|
||||
<footer class="md-footer">
|
||||
|
||||
<div class="md-footer-meta md-typeset">
|
||||
<div class="md-footer-meta__inner md-grid">
|
||||
<div class="md-footer-copyright">
|
||||
|
||||
powered by
|
||||
<a href="http://www.mkdocs.org">MkDocs</a>
|
||||
and
|
||||
<a href="https://squidfunk.github.io/mkdocs-material/">
|
||||
Material for MkDocs</a>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
</div>
|
||||
|
||||
<script src="/assets/javascripts/application.8eb9be28.js"></script>
|
||||
|
||||
<script>app.initialize({version:"0.17.3",url:{base:""}})</script>
|
||||
|
||||
|
||||
|
||||
|
||||
<script>!function(e,a,t,n,o,c,i){e.GoogleAnalyticsObject=o,e.ga=e.ga||function(){(e.ga.q=e.ga.q||[]).push(arguments)},e.ga.l=1*new Date,c=a.createElement(t),i=a.getElementsByTagName(t)[0],c.async=1,c.src="https://www.google-analytics.com/analytics.js",i.parentNode.insertBefore(c,i)}(window,document,"script",0,"ga"),ga("create","UA-60403362-1","auto"),ga("set","anonymizeIp",!0),ga("send","pageview");var links=document.getElementsByTagName("a");if(Array.prototype.map.call(links,function(e){e.host!=document.location.host&&e.addEventListener("click",function(){var a=e.getAttribute("data-md-action")||"follow";ga("send","event","outbound",a,e.href)})}),document.forms.search){var query=document.forms.search.query;query.addEventListener("blur",function(){if(this.value){var e=document.location.pathname;ga("send","pageview",e+"?q="+this.value)}})}</script>
|
||||
|
||||
|
||||
</body>
|
||||
</html>
|
3096
docs/api.json
3096
docs/api.json
File diff suppressed because one or more lines are too long
BIN
docs/assets/images/favicon.png
Normal file
BIN
docs/assets/images/favicon.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 521 B |
20
docs/assets/images/icons/bitbucket.4ebea66e.svg
Normal file
20
docs/assets/images/icons/bitbucket.4ebea66e.svg
Normal file
|
@ -0,0 +1,20 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" width="352" height="448"
|
||||
viewBox="0 0 352 448" id="bitbucket">
|
||||
<path fill="currentColor" d="M203.75 214.75q2 15.75-12.625 25.25t-27.875
|
||||
1.5q-9.75-4.25-13.375-14.5t-0.125-20.5 13-14.5q9-4.5 18.125-3t16 8.875
|
||||
6.875 16.875zM231.5 209.5q-3.5-26.75-28.25-41t-49.25-3.25q-15.75
|
||||
7-25.125 22.125t-8.625 32.375q1 22.75 19.375 38.75t41.375 14q22.75-2
|
||||
38-21t12.5-42zM291.25
|
||||
74q-5-6.75-14-11.125t-14.5-5.5-17.75-3.125q-72.75-11.75-141.5 0.5-10.75
|
||||
1.75-16.5 3t-13.75 5.5-12.5 10.75q7.5 7 19 11.375t18.375 5.5 21.875
|
||||
2.875q57 7.25 112 0.25 15.75-2 22.375-3t18.125-5.375 18.75-11.625zM305.5
|
||||
332.75q-2 6.5-3.875 19.125t-3.5 21-7.125 17.5-14.5 14.125q-21.5
|
||||
12-47.375 17.875t-50.5 5.5-50.375-4.625q-11.5-2-20.375-4.5t-19.125-6.75-18.25-10.875-13-15.375q-6.25-24-14.25-73l1.5-4
|
||||
4.5-2.25q55.75 37 126.625 37t126.875-37q5.25 1.5 6 5.75t-1.25 11.25-2
|
||||
9.25zM350.75 92.5q-6.5 41.75-27.75 163.75-1.25 7.5-6.75 14t-10.875
|
||||
10-13.625 7.75q-63 31.5-152.5
|
||||
22-62-6.75-98.5-34.75-3.75-3-6.375-6.625t-4.25-8.75-2.25-8.5-1.5-9.875-1.375-8.75q-2.25-12.5-6.625-37.5t-7-40.375-5.875-36.875-5.5-39.5q0.75-6.5
|
||||
4.375-12.125t7.875-9.375 11.25-7.5 11.5-5.625 12-4.625q31.25-11.5
|
||||
78.25-16 94.75-9.25 169 12.5 38.75 11.5 53.75 30.5 4 5 4.125
|
||||
12.75t-1.375 13.5z" />
|
||||
</svg>
|
After Width: | Height: | Size: 1.4 KiB |
18
docs/assets/images/icons/github.a4034fb1.svg
Normal file
18
docs/assets/images/icons/github.a4034fb1.svg
Normal file
|
@ -0,0 +1,18 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" width="416" height="448"
|
||||
viewBox="0 0 416 448" id="github">
|
||||
<path fill="currentColor" d="M160 304q0 10-3.125 20.5t-10.75 19-18.125
|
||||
8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19 18.125-8.5
|
||||
18.125 8.5 10.75 19 3.125 20.5zM320 304q0 10-3.125 20.5t-10.75
|
||||
19-18.125 8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19
|
||||
18.125-8.5 18.125 8.5 10.75 19 3.125 20.5zM360
|
||||
304q0-30-17.25-51t-46.75-21q-10.25 0-48.75 5.25-17.75 2.75-39.25
|
||||
2.75t-39.25-2.75q-38-5.25-48.75-5.25-29.5 0-46.75 21t-17.25 51q0 22 8
|
||||
38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0
|
||||
37.25-1.75t35-7.375 30.5-15 20.25-25.75 8-38.375zM416 260q0 51.75-15.25
|
||||
82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5-41.75
|
||||
1.125q-19.5 0-35.5-0.75t-36.875-3.125-38.125-7.5-34.25-12.875-30.25-20.25-21.5-28.75q-15.5-30.75-15.5-82.75
|
||||
0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25
|
||||
30.875q36.75-8.75 77.25-8.75 37 0 70 8 26.25-20.5
|
||||
46.75-30.25t47.25-9.75q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34
|
||||
99.5z" />
|
||||
</svg>
|
After Width: | Height: | Size: 1.2 KiB |
38
docs/assets/images/icons/gitlab.348cdb3a.svg
Normal file
38
docs/assets/images/icons/gitlab.348cdb3a.svg
Normal file
|
@ -0,0 +1,38 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" width="500" height="500"
|
||||
viewBox="0 0 500 500" id="gitlab">
|
||||
<g transform="translate(156.197863, 1.160267)">
|
||||
<path fill="currentColor"
|
||||
d="M93.667,473.347L93.667,473.347l90.684-279.097H2.983L93.667,
|
||||
473.347L93.667,473.347z" />
|
||||
</g>
|
||||
<g transform="translate(28.531199, 1.160800)" opacity="0.7">
|
||||
<path fill="currentColor"
|
||||
d="M221.333,473.345L130.649,194.25H3.557L221.333,473.345L221.333,
|
||||
473.345z" />
|
||||
</g>
|
||||
<g transform="translate(0.088533, 0.255867)" opacity="0.5">
|
||||
<path fill="currentColor"
|
||||
d="M32,195.155L32,195.155L4.441,279.97c-2.513,7.735,0.24,16.21,6.821,
|
||||
20.99l238.514,173.29 L32,195.155L32,195.155z" />
|
||||
</g>
|
||||
<g transform="translate(29.421866, 280.255593)">
|
||||
<path fill="currentColor"
|
||||
d="M2.667-84.844h127.092L75.14-252.942c-2.811-8.649-15.047-8.649-17.856,
|
||||
0L2.667-84.844 L2.667-84.844z" />
|
||||
</g>
|
||||
<g transform="translate(247.197860, 1.160800)" opacity="0.7">
|
||||
<path fill="currentColor"
|
||||
d="M2.667,473.345L93.351,194.25h127.092L2.667,473.345L2.667,
|
||||
473.345z" />
|
||||
</g>
|
||||
<g transform="translate(246.307061, 0.255867)" opacity="0.5">
|
||||
<path fill="currentColor"
|
||||
d="M221.334,195.155L221.334,195.155l27.559,84.815c2.514,7.735-0.24,
|
||||
16.21-6.821,20.99 L3.557,474.25L221.334,195.155L221.334,195.155z" />
|
||||
</g>
|
||||
<g transform="translate(336.973725, 280.255593)">
|
||||
<path fill="currentColor"
|
||||
d="M130.667-84.844H3.575l54.618-168.098c2.811-8.649,15.047-8.649,
|
||||
17.856,0L130.667-84.844 L130.667-84.844z" />
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 1.6 KiB |
1
docs/assets/javascripts/application.8eb9be28.js
Normal file
1
docs/assets/javascripts/application.8eb9be28.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.da.js
Normal file
1
docs/assets/javascripts/lunr/lunr.da.js
Normal file
|
@ -0,0 +1 @@
|
|||
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,i,n;e.da=function(){this.pipeline.reset(),this.pipeline.add(e.da.trimmer,e.da.stopWordFilter,e.da.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.da.stemmer))},e.da.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.da.trimmer=e.trimmerSupport.generateTrimmer(e.da.wordCharacters),e.Pipeline.registerFunction(e.da.trimmer,"trimmer-da"),e.da.stemmer=(r=e.stemmerSupport.Among,i=e.stemmerSupport.SnowballProgram,n=new function(){var e,n,t,s=[new r("hed",-1,1),new r("ethed",0,1),new r("ered",-1,1),new r("e",-1,1),new r("erede",3,1),new r("ende",3,1),new r("erende",5,1),new r("ene",3,1),new r("erne",3,1),new r("ere",3,1),new r("en",-1,1),new r("heden",10,1),new r("eren",10,1),new r("er",-1,1),new r("heder",13,1),new r("erer",13,1),new r("s",-1,2),new r("heds",16,1),new r("es",16,1),new r("endes",18,1),new r("erendes",19,1),new r("enes",18,1),new r("ernes",18,1),new r("eres",18,1),new r("ens",16,1),new r("hedens",24,1),new r("erens",24,1),new r("ers",16,1),new r("ets",16,1),new r("erets",28,1),new r("et",-1,1),new r("eret",30,1)],o=[new r("gd",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1)],a=[new r("ig",-1,1),new r("lig",0,1),new r("elig",1,1),new r("els",-1,1),new r("løst",-1,2)],d=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],u=[239,254,42,3,0,0,0,0,0,0,0,0,0,0,0,0,16],c=new i;function l(){var e,r=c.limit-c.cursor;c.cursor>=n&&(e=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,c.find_among_b(o,4)?(c.bra=c.cursor,c.limit_backward=e,c.cursor=c.limit-r,c.cursor>c.limit_backward&&(c.cursor--,c.bra=c.cursor,c.slice_del())):c.limit_backward=e)}this.setCurrent=function(e){c.setCurrent(e)},this.getCurrent=function(){return c.getCurrent()},this.stem=function(){var r,i=c.cursor;return function(){var r,i=c.cursor+3;if(n=c.limit,0<=i&&i<=c.limit){for(e=i;;){if(r=c.cursor,c.in_grouping(d,97,248)){c.cursor=r;break}if(c.cursor=r,r>=c.limit)return;c.cursor++}for(;!c.out_grouping(d,97,248);){if(c.cursor>=c.limit)return;c.cursor++}(n=c.cursor)<e&&(n=e)}}(),c.limit_backward=i,c.cursor=c.limit,function(){var e,r;if(c.cursor>=n&&(r=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,e=c.find_among_b(s,32),c.limit_backward=r,e))switch(c.bra=c.cursor,e){case 1:c.slice_del();break;case 2:c.in_grouping_b(u,97,229)&&c.slice_del()}}(),c.cursor=c.limit,l(),c.cursor=c.limit,function(){var e,r,i,t=c.limit-c.cursor;if(c.ket=c.cursor,c.eq_s_b(2,"st")&&(c.bra=c.cursor,c.eq_s_b(2,"ig")&&c.slice_del()),c.cursor=c.limit-t,c.cursor>=n&&(r=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,e=c.find_among_b(a,5),c.limit_backward=r,e))switch(c.bra=c.cursor,e){case 1:c.slice_del(),i=c.limit-c.cursor,l(),c.cursor=c.limit-i;break;case 2:c.slice_from("løs")}}(),c.cursor=c.limit,c.cursor>=n&&(r=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,c.out_grouping_b(d,97,248)?(c.bra=c.cursor,t=c.slice_to(t),c.limit_backward=r,c.eq_v_b(t)&&c.slice_del()):c.limit_backward=r),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}),e.Pipeline.registerFunction(e.da.stemmer,"stemmer-da"),e.da.stopWordFilter=e.generateStopWordFilter("ad af alle alt anden at blev blive bliver da de dem den denne der deres det dette dig din disse dog du efter eller en end er et for fra ham han hans har havde have hende hendes her hos hun hvad hvis hvor i ikke ind jeg jer jo kunne man mange med meget men mig min mine mit mod ned noget nogle nu når og også om op os over på selv sig sin sine sit skal skulle som sådan thi til ud under var vi vil ville vor være været".split(" ")),e.Pipeline.registerFunction(e.da.stopWordFilter,"stopWordFilter-da")}});
|
1
docs/assets/javascripts/lunr/lunr.de.js
Normal file
1
docs/assets/javascripts/lunr/lunr.de.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.du.js
Normal file
1
docs/assets/javascripts/lunr/lunr.du.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.es.js
Normal file
1
docs/assets/javascripts/lunr/lunr.es.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.fi.js
Normal file
1
docs/assets/javascripts/lunr/lunr.fi.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.fr.js
Normal file
1
docs/assets/javascripts/lunr/lunr.fr.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.hu.js
Normal file
1
docs/assets/javascripts/lunr/lunr.hu.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.it.js
Normal file
1
docs/assets/javascripts/lunr/lunr.it.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.jp.js
Normal file
1
docs/assets/javascripts/lunr/lunr.jp.js
Normal file
|
@ -0,0 +1 @@
|
|||
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r="2"==e.version[0];e.jp=function(){this.pipeline.reset(),this.pipeline.add(e.jp.stopWordFilter,e.jp.stemmer),r?this.tokenizer=e.jp.tokenizer:(e.tokenizer&&(e.tokenizer=e.jp.tokenizer),this.tokenizerFn&&(this.tokenizerFn=e.jp.tokenizer))};var t=new e.TinySegmenter;e.jp.tokenizer=function(n){if(!arguments.length||null==n||null==n)return[];if(Array.isArray(n))return n.map(function(t){return r?new e.Token(t.toLowerCase()):t.toLowerCase()});for(var i=n.toString().toLowerCase().replace(/^\s+/,""),o=i.length-1;o>=0;o--)if(/\S/.test(i.charAt(o))){i=i.substring(0,o+1);break}return t.segment(i).filter(function(e){return!!e}).map(function(t){return r?new e.Token(t):t})},e.jp.stemmer=function(e){return e},e.Pipeline.registerFunction(e.jp.stemmer,"stemmer-jp"),e.jp.wordCharacters="一二三四五六七八九十百千万億兆一-龠々〆ヵヶぁ-んァ-ヴーア-ン゙a-zA-Za-zA-Z0-90-9",e.jp.stopWordFilter=function(t){if(-1===e.jp.stopWordFilter.stopWords.indexOf(r?t.toString():t))return t},e.jp.stopWordFilter=e.generateStopWordFilter("これ それ あれ この その あの ここ そこ あそこ こちら どこ だれ なに なん 何 私 貴方 貴方方 我々 私達 あの人 あのかた 彼女 彼 です あります おります います は が の に を で え から まで より も どの と し それで しかし".split(" ")),e.Pipeline.registerFunction(e.jp.stopWordFilter,"stopWordFilter-jp")}});
|
1
docs/assets/javascripts/lunr/lunr.multi.js
Normal file
1
docs/assets/javascripts/lunr/lunr.multi.js
Normal file
|
@ -0,0 +1 @@
|
|||
!function(e,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(e.lunr)}(this,function(){return function(e){e.multiLanguage=function(){for(var i=Array.prototype.slice.call(arguments),t=i.join("-"),r="",n=[],s=[],p=0;p<i.length;++p)"en"==i[p]?(r+="\\w",n.unshift(e.stopWordFilter),n.push(e.stemmer),s.push(e.stemmer)):(r+=e[i[p]].wordCharacters,n.unshift(e[i[p]].stopWordFilter),n.push(e[i[p]].stemmer),s.push(e[i[p]].stemmer));var o=e.trimmerSupport.generateTrimmer(r);return e.Pipeline.registerFunction(o,"lunr-multi-trimmer-"+t),n.unshift(o),function(){this.pipeline.reset(),this.pipeline.add.apply(this.pipeline,n),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add.apply(this.searchPipeline,s))}}}});
|
1
docs/assets/javascripts/lunr/lunr.no.js
Normal file
1
docs/assets/javascripts/lunr/lunr.no.js
Normal file
|
@ -0,0 +1 @@
|
|||
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,n,i;e.no=function(){this.pipeline.reset(),this.pipeline.add(e.no.trimmer,e.no.stopWordFilter,e.no.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.no.stemmer))},e.no.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.no.trimmer=e.trimmerSupport.generateTrimmer(e.no.wordCharacters),e.Pipeline.registerFunction(e.no.trimmer,"trimmer-no"),e.no.stemmer=(r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){var e,i,t=[new r("a",-1,1),new r("e",-1,1),new r("ede",1,1),new r("ande",1,1),new r("ende",1,1),new r("ane",1,1),new r("ene",1,1),new r("hetene",6,1),new r("erte",1,3),new r("en",-1,1),new r("heten",9,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",12,1),new r("s",-1,2),new r("as",14,1),new r("es",14,1),new r("edes",16,1),new r("endes",16,1),new r("enes",16,1),new r("hetenes",19,1),new r("ens",14,1),new r("hetens",21,1),new r("ers",14,1),new r("ets",14,1),new r("et",-1,1),new r("het",25,1),new r("ert",-1,3),new r("ast",-1,1)],o=[new r("dt",-1,-1),new r("vt",-1,-1)],s=[new r("leg",-1,1),new r("eleg",0,1),new r("ig",-1,1),new r("eig",2,1),new r("lig",2,1),new r("elig",4,1),new r("els",-1,1),new r("lov",-1,1),new r("elov",7,1),new r("slov",7,1),new r("hetslov",9,1)],a=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],m=[119,125,149,1],l=new n;this.setCurrent=function(e){l.setCurrent(e)},this.getCurrent=function(){return l.getCurrent()},this.stem=function(){var r,n,u,d,c=l.cursor;return function(){var r,n=l.cursor+3;if(i=l.limit,0<=n||n<=l.limit){for(e=n;;){if(r=l.cursor,l.in_grouping(a,97,248)){l.cursor=r;break}if(r>=l.limit)return;l.cursor=r+1}for(;!l.out_grouping(a,97,248);){if(l.cursor>=l.limit)return;l.cursor++}(i=l.cursor)<e&&(i=e)}}(),l.limit_backward=c,l.cursor=l.limit,function(){var e,r,n;if(l.cursor>=i&&(r=l.limit_backward,l.limit_backward=i,l.ket=l.cursor,e=l.find_among_b(t,29),l.limit_backward=r,e))switch(l.bra=l.cursor,e){case 1:l.slice_del();break;case 2:n=l.limit-l.cursor,l.in_grouping_b(m,98,122)?l.slice_del():(l.cursor=l.limit-n,l.eq_s_b(1,"k")&&l.out_grouping_b(a,97,248)&&l.slice_del());break;case 3:l.slice_from("er")}}(),l.cursor=l.limit,n=l.limit-l.cursor,l.cursor>=i&&(r=l.limit_backward,l.limit_backward=i,l.ket=l.cursor,l.find_among_b(o,2)?(l.bra=l.cursor,l.limit_backward=r,l.cursor=l.limit-n,l.cursor>l.limit_backward&&(l.cursor--,l.bra=l.cursor,l.slice_del())):l.limit_backward=r),l.cursor=l.limit,l.cursor>=i&&(d=l.limit_backward,l.limit_backward=i,l.ket=l.cursor,(u=l.find_among_b(s,11))?(l.bra=l.cursor,l.limit_backward=d,1==u&&l.slice_del()):l.limit_backward=d),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}),e.Pipeline.registerFunction(e.no.stemmer,"stemmer-no"),e.no.stopWordFilter=e.generateStopWordFilter("alle at av bare begge ble blei bli blir blitt både båe da de deg dei deim deira deires dem den denne der dere deres det dette di din disse ditt du dykk dykkar då eg ein eit eitt eller elles en enn er et ett etter for fordi fra før ha hadde han hans har hennar henne hennes her hjå ho hoe honom hoss hossen hun hva hvem hver hvilke hvilken hvis hvor hvordan hvorfor i ikke ikkje ikkje ingen ingi inkje inn inni ja jeg kan kom korleis korso kun kunne kva kvar kvarhelst kven kvi kvifor man mange me med medan meg meget mellom men mi min mine mitt mot mykje ned no noe noen noka noko nokon nokor nokre nå når og også om opp oss over på samme seg selv si si sia sidan siden sin sine sitt sjøl skal skulle slik so som som somme somt så sånn til um upp ut uten var vart varte ved vere verte vi vil ville vore vors vort vår være være vært å".split(" ")),e.Pipeline.registerFunction(e.no.stopWordFilter,"stopWordFilter-no")}});
|
1
docs/assets/javascripts/lunr/lunr.pt.js
Normal file
1
docs/assets/javascripts/lunr/lunr.pt.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.ro.js
Normal file
1
docs/assets/javascripts/lunr/lunr.ro.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.ru.js
Normal file
1
docs/assets/javascripts/lunr/lunr.ru.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/lunr.stemmer.support.js
Normal file
1
docs/assets/javascripts/lunr/lunr.stemmer.support.js
Normal file
|
@ -0,0 +1 @@
|
|||
!function(r,t){"function"==typeof define&&define.amd?define(t):"object"==typeof exports?module.exports=t():t()(r.lunr)}(this,function(){return function(r){r.stemmerSupport={Among:function(r,t,i,s){if(this.toCharArray=function(r){for(var t=r.length,i=new Array(t),s=0;s<t;s++)i[s]=r.charCodeAt(s);return i},!r&&""!=r||!t&&0!=t||!i)throw"Bad Among initialisation: s:"+r+", substring_i: "+t+", result: "+i;this.s_size=r.length,this.s=this.toCharArray(r),this.substring_i=t,this.result=i,this.method=s},SnowballProgram:function(){var r;return{bra:0,ket:0,limit:0,cursor:0,limit_backward:0,setCurrent:function(t){r=t,this.cursor=0,this.limit=t.length,this.limit_backward=0,this.bra=this.cursor,this.ket=this.limit},getCurrent:function(){var t=r;return r=null,t},in_grouping:function(t,i,s){if(this.cursor<this.limit){var e=r.charCodeAt(this.cursor);if(e<=s&&e>=i&&t[(e-=i)>>3]&1<<(7&e))return this.cursor++,!0}return!1},in_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e<=s&&e>=i&&t[(e-=i)>>3]&1<<(7&e))return this.cursor--,!0}return!1},out_grouping:function(t,i,s){if(this.cursor<this.limit){var e=r.charCodeAt(this.cursor);if(e>s||e<i)return this.cursor++,!0;if(!(t[(e-=i)>>3]&1<<(7&e)))return this.cursor++,!0}return!1},out_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e>s||e<i)return this.cursor--,!0;if(!(t[(e-=i)>>3]&1<<(7&e)))return this.cursor--,!0}return!1},eq_s:function(t,i){if(this.limit-this.cursor<t)return!1;for(var s=0;s<t;s++)if(r.charCodeAt(this.cursor+s)!=i.charCodeAt(s))return!1;return this.cursor+=t,!0},eq_s_b:function(t,i){if(this.cursor-this.limit_backward<t)return!1;for(var s=0;s<t;s++)if(r.charCodeAt(this.cursor-t+s)!=i.charCodeAt(s))return!1;return this.cursor-=t,!0},find_among:function(t,i){for(var s=0,e=i,n=this.cursor,u=this.limit,o=0,h=0,c=!1;;){for(var a=s+(e-s>>1),f=0,l=o<h?o:h,_=t[a],m=l;m<_.s_size;m++){if(n+l==u){f=-1;break}if(f=r.charCodeAt(n+l)-_.s[m])break;l++}if(f<0?(e=a,h=l):(s=a,o=l),e-s<=1){if(s>0||e==s||c)break;c=!0}}for(;;){if(o>=(_=t[s]).s_size){if(this.cursor=n+_.s_size,!_.method)return _.result;var b=_.method();if(this.cursor=n+_.s_size,b)return _.result}if((s=_.substring_i)<0)return 0}},find_among_b:function(t,i){for(var s=0,e=i,n=this.cursor,u=this.limit_backward,o=0,h=0,c=!1;;){for(var a=s+(e-s>>1),f=0,l=o<h?o:h,_=(m=t[a]).s_size-1-l;_>=0;_--){if(n-l==u){f=-1;break}if(f=r.charCodeAt(n-1-l)-m.s[_])break;l++}if(f<0?(e=a,h=l):(s=a,o=l),e-s<=1){if(s>0||e==s||c)break;c=!0}}for(;;){var m;if(o>=(m=t[s]).s_size){if(this.cursor=n-m.s_size,!m.method)return m.result;var b=m.method();if(this.cursor=n-m.s_size,b)return m.result}if((s=m.substring_i)<0)return 0}},replace_s:function(t,i,s){var e=s.length-(i-t),n=r.substring(0,t),u=r.substring(i);return r=n+s+u,this.limit+=e,this.cursor>=i?this.cursor+=e:this.cursor>t&&(this.cursor=t),e},slice_check:function(){if(this.bra<0||this.bra>this.ket||this.ket>this.limit||this.limit>r.length)throw"faulty slice operation"},slice_from:function(r){this.slice_check(),this.replace_s(this.bra,this.ket,r)},slice_del:function(){this.slice_from("")},insert:function(r,t,i){var s=this.replace_s(r,t,i);r<=this.bra&&(this.bra+=s),r<=this.ket&&(this.ket+=s)},slice_to:function(){return this.slice_check(),r.substring(this.bra,this.ket)},eq_v_b:function(r){return this.eq_s_b(r.length,r)}}}},r.trimmerSupport={generateTrimmer:function(r){var t=new RegExp("^[^"+r+"]+"),i=new RegExp("[^"+r+"]+$");return function(r){return"function"==typeof r.update?r.update(function(r){return r.replace(t,"").replace(i,"")}):r.replace(t,"").replace(i,"")}}}}});
|
1
docs/assets/javascripts/lunr/lunr.sv.js
Normal file
1
docs/assets/javascripts/lunr/lunr.sv.js
Normal file
|
@ -0,0 +1 @@
|
|||
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,n,t;e.sv=function(){this.pipeline.reset(),this.pipeline.add(e.sv.trimmer,e.sv.stopWordFilter,e.sv.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.sv.stemmer))},e.sv.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.sv.trimmer=e.trimmerSupport.generateTrimmer(e.sv.wordCharacters),e.Pipeline.registerFunction(e.sv.trimmer,"trimmer-sv"),e.sv.stemmer=(r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,t=new function(){var e,t,i=[new r("a",-1,1),new r("arna",0,1),new r("erna",0,1),new r("heterna",2,1),new r("orna",0,1),new r("ad",-1,1),new r("e",-1,1),new r("ade",6,1),new r("ande",6,1),new r("arne",6,1),new r("are",6,1),new r("aste",6,1),new r("en",-1,1),new r("anden",12,1),new r("aren",12,1),new r("heten",12,1),new r("ern",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",18,1),new r("or",-1,1),new r("s",-1,2),new r("as",21,1),new r("arnas",22,1),new r("ernas",22,1),new r("ornas",22,1),new r("es",21,1),new r("ades",26,1),new r("andes",26,1),new r("ens",21,1),new r("arens",29,1),new r("hetens",29,1),new r("erns",21,1),new r("at",-1,1),new r("andet",-1,1),new r("het",-1,1),new r("ast",-1,1)],s=[new r("dd",-1,-1),new r("gd",-1,-1),new r("nn",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1),new r("tt",-1,-1)],a=[new r("ig",-1,1),new r("lig",0,1),new r("els",-1,1),new r("fullt",-1,3),new r("löst",-1,2)],o=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,24,0,32],u=[119,127,149],m=new n;this.setCurrent=function(e){m.setCurrent(e)},this.getCurrent=function(){return m.getCurrent()},this.stem=function(){var r,n=m.cursor;return function(){var r,n=m.cursor+3;if(t=m.limit,0<=n||n<=m.limit){for(e=n;;){if(r=m.cursor,m.in_grouping(o,97,246)){m.cursor=r;break}if(m.cursor=r,m.cursor>=m.limit)return;m.cursor++}for(;!m.out_grouping(o,97,246);){if(m.cursor>=m.limit)return;m.cursor++}(t=m.cursor)<e&&(t=e)}}(),m.limit_backward=n,m.cursor=m.limit,function(){var e,r=m.limit_backward;if(m.cursor>=t&&(m.limit_backward=t,m.cursor=m.limit,m.ket=m.cursor,e=m.find_among_b(i,37),m.limit_backward=r,e))switch(m.bra=m.cursor,e){case 1:m.slice_del();break;case 2:m.in_grouping_b(u,98,121)&&m.slice_del()}}(),m.cursor=m.limit,r=m.limit_backward,m.cursor>=t&&(m.limit_backward=t,m.cursor=m.limit,m.find_among_b(s,7)&&(m.cursor=m.limit,m.ket=m.cursor,m.cursor>m.limit_backward&&(m.bra=--m.cursor,m.slice_del())),m.limit_backward=r),m.cursor=m.limit,function(){var e,r;if(m.cursor>=t){if(r=m.limit_backward,m.limit_backward=t,m.cursor=m.limit,m.ket=m.cursor,e=m.find_among_b(a,5))switch(m.bra=m.cursor,e){case 1:m.slice_del();break;case 2:m.slice_from("lös");break;case 3:m.slice_from("full")}m.limit_backward=r}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return t.setCurrent(e),t.stem(),t.getCurrent()}):(t.setCurrent(e),t.stem(),t.getCurrent())}),e.Pipeline.registerFunction(e.sv.stemmer,"stemmer-sv"),e.sv.stopWordFilter=e.generateStopWordFilter("alla allt att av blev bli blir blivit de dem den denna deras dess dessa det detta dig din dina ditt du där då efter ej eller en er era ert ett från för ha hade han hans har henne hennes hon honom hur här i icke ingen inom inte jag ju kan kunde man med mellan men mig min mina mitt mot mycket ni nu när någon något några och om oss på samma sedan sig sin sina sitta själv skulle som så sådan sådana sådant till under upp ut utan vad var vara varför varit varje vars vart vem vi vid vilka vilkas vilken vilket vår våra vårt än är åt över".split(" ")),e.Pipeline.registerFunction(e.sv.stopWordFilter,"stopWordFilter-sv")}});
|
1
docs/assets/javascripts/lunr/lunr.tr.js
Normal file
1
docs/assets/javascripts/lunr/lunr.tr.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/lunr/tinyseg.js
Normal file
1
docs/assets/javascripts/lunr/tinyseg.js
Normal file
File diff suppressed because one or more lines are too long
1
docs/assets/javascripts/modernizr.1aa3b519.js
Normal file
1
docs/assets/javascripts/modernizr.1aa3b519.js
Normal file
File diff suppressed because one or more lines are too long
2
docs/assets/stylesheets/application-palette.6079476c.css
Normal file
2
docs/assets/stylesheets/application-palette.6079476c.css
Normal file
File diff suppressed because one or more lines are too long
2
docs/assets/stylesheets/application.78aab2dc.css
Normal file
2
docs/assets/stylesheets/application.78aab2dc.css
Normal file
File diff suppressed because one or more lines are too long
2587
docs/cli/index.html
Normal file
2587
docs/cli/index.html
Normal file
File diff suppressed because it is too large
Load diff
2313
docs/index.html
Normal file
2313
docs/index.html
Normal file
File diff suppressed because it is too large
Load diff
584
docs/search/search_index.json
Normal file
584
docs/search/search_index.json
Normal file
File diff suppressed because one or more lines are too long
|
@ -4,9 +4,8 @@
|
|||
share_usage_data: True
|
||||
|
||||
lbryum_servers:
|
||||
- lbryumx1.lbry.com:50001
|
||||
- lbryumx2.lbry.com:50001
|
||||
- lbryumx4.lbry.com:50001
|
||||
- lbryumx1.lbry.io:50001
|
||||
- lbryumx2.lbry.io:50001
|
||||
|
||||
blockchain_name: lbrycrd_main
|
||||
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
__version__ = "0.113.0"
|
||||
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
|
|
@ -1,6 +0,0 @@
|
|||
from lbry.utils import get_lbry_hash_obj
|
||||
|
||||
MAX_BLOB_SIZE = 2 * 2 ** 20
|
||||
|
||||
# digest_size is in bytes, and blob hashes are hex encoded
|
||||
BLOBHASH_LENGTH = get_lbry_hash_obj().digest_size * 2
|
|
@ -1,77 +0,0 @@
|
|||
import asyncio
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DiskSpaceManager:
|
||||
|
||||
def __init__(self, config, db, blob_manager, cleaning_interval=30 * 60, analytics=None):
|
||||
self.config = config
|
||||
self.db = db
|
||||
self.blob_manager = blob_manager
|
||||
self.cleaning_interval = cleaning_interval
|
||||
self.running = False
|
||||
self.task = None
|
||||
self.analytics = analytics
|
||||
self._used_space_bytes = None
|
||||
|
||||
async def get_free_space_mb(self, is_network_blob=False):
|
||||
limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
|
||||
space_used_mb = await self.get_space_used_mb()
|
||||
space_used_mb = space_used_mb['network_storage'] if is_network_blob else space_used_mb['content_storage']
|
||||
return max(0, limit_mb - space_used_mb)
|
||||
|
||||
async def get_space_used_bytes(self):
|
||||
self._used_space_bytes = await self.db.get_stored_blob_disk_usage()
|
||||
return self._used_space_bytes
|
||||
|
||||
async def get_space_used_mb(self, cached=True):
|
||||
cached = cached and self._used_space_bytes is not None
|
||||
space_used_bytes = self._used_space_bytes if cached else await self.get_space_used_bytes()
|
||||
return {key: int(value/1024.0/1024.0) for key, value in space_used_bytes.items()}
|
||||
|
||||
async def clean(self):
|
||||
await self._clean(False)
|
||||
await self._clean(True)
|
||||
|
||||
async def _clean(self, is_network_blob=False):
|
||||
space_used_mb = await self.get_space_used_mb(cached=False)
|
||||
if is_network_blob:
|
||||
space_used_mb = space_used_mb['network_storage']
|
||||
else:
|
||||
space_used_mb = space_used_mb['content_storage'] + space_used_mb['private_storage']
|
||||
storage_limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
|
||||
if self.analytics:
|
||||
asyncio.create_task(
|
||||
self.analytics.send_disk_space_used(space_used_mb, storage_limit_mb, is_network_blob)
|
||||
)
|
||||
delete = []
|
||||
available = storage_limit_mb - space_used_mb
|
||||
if storage_limit_mb == 0 if not is_network_blob else available >= 0:
|
||||
return 0
|
||||
for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False, is_network_blob=is_network_blob):
|
||||
delete.append(blob_hash)
|
||||
available += int(file_size/1024.0/1024.0)
|
||||
if available >= 0:
|
||||
break
|
||||
if delete:
|
||||
await self.db.stop_all_files()
|
||||
await self.blob_manager.delete_blobs(delete, delete_from_db=True)
|
||||
self._used_space_bytes = None
|
||||
return len(delete)
|
||||
|
||||
async def cleaning_loop(self):
|
||||
while self.running:
|
||||
await asyncio.sleep(self.cleaning_interval)
|
||||
await self.clean()
|
||||
|
||||
async def start(self):
|
||||
self.running = True
|
||||
self.task = asyncio.create_task(self.cleaning_loop())
|
||||
self.task.add_done_callback(lambda _: log.info("Stopping blob cleanup service."))
|
||||
|
||||
async def stop(self):
|
||||
if self.running:
|
||||
self.running = False
|
||||
self.task.cancel()
|
|
@ -1,141 +0,0 @@
|
|||
import asyncio
|
||||
import typing
|
||||
import logging
|
||||
from lbry.utils import cache_concurrent
|
||||
from lbry.blob_exchange.client import request_blob
|
||||
from lbry.dht.node import get_kademlia_peers_from_hosts
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.dht.node import Node
|
||||
from lbry.dht.peer import KademliaPeer
|
||||
from lbry.blob.blob_manager import BlobManager
|
||||
from lbry.blob.blob_file import AbstractBlob
|
||||
from lbry.blob_exchange.client import BlobExchangeClientProtocol
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlobDownloader:
|
||||
BAN_FACTOR = 2.0 # fixme: when connection manager gets implemented, move it out from here
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, config: 'Config', blob_manager: 'BlobManager',
|
||||
peer_queue: asyncio.Queue):
|
||||
self.loop = loop
|
||||
self.config = config
|
||||
self.blob_manager = blob_manager
|
||||
self.peer_queue = peer_queue
|
||||
self.active_connections: typing.Dict['KademliaPeer', asyncio.Task] = {} # active request_blob calls
|
||||
self.ignored: typing.Dict['KademliaPeer', int] = {}
|
||||
self.scores: typing.Dict['KademliaPeer', int] = {}
|
||||
self.failures: typing.Dict['KademliaPeer', int] = {}
|
||||
self.connection_failures: typing.Set['KademliaPeer'] = set()
|
||||
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
|
||||
self.is_running = asyncio.Event()
|
||||
|
||||
def should_race_continue(self, blob: 'AbstractBlob'):
|
||||
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
|
||||
if len(self.active_connections) >= max_probes:
|
||||
return False
|
||||
return not (blob.get_is_verified() or not blob.is_writeable())
|
||||
|
||||
async def request_blob_from_peer(self, blob: 'AbstractBlob', peer: 'KademliaPeer', connection_id: int = 0,
|
||||
just_probe: bool = False):
|
||||
if blob.get_is_verified():
|
||||
return
|
||||
start = self.loop.time()
|
||||
bytes_received, protocol = await request_blob(
|
||||
self.loop, blob if not just_probe else None, peer.address, peer.tcp_port, self.config.peer_connect_timeout,
|
||||
self.config.blob_download_timeout, connected_protocol=self.connections.get(peer),
|
||||
connection_id=connection_id, connection_manager=self.blob_manager.connection_manager
|
||||
)
|
||||
if not bytes_received and not protocol and peer not in self.connection_failures:
|
||||
self.connection_failures.add(peer)
|
||||
if not protocol and peer not in self.ignored:
|
||||
self.ignored[peer] = self.loop.time()
|
||||
log.debug("drop peer %s:%i", peer.address, peer.tcp_port)
|
||||
self.failures[peer] = self.failures.get(peer, 0) + 1
|
||||
if peer in self.connections:
|
||||
del self.connections[peer]
|
||||
elif protocol:
|
||||
log.debug("keep peer %s:%i", peer.address, peer.tcp_port)
|
||||
self.failures[peer] = 0
|
||||
self.connections[peer] = protocol
|
||||
elapsed = self.loop.time() - start
|
||||
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
|
||||
|
||||
async def new_peer_or_finished(self):
|
||||
active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
|
||||
await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
|
||||
|
||||
def cleanup_active(self):
|
||||
if not self.active_connections and not self.connections:
|
||||
self.clearbanned()
|
||||
to_remove = [peer for (peer, task) in self.active_connections.items() if task.done()]
|
||||
for peer in to_remove:
|
||||
del self.active_connections[peer]
|
||||
|
||||
def clearbanned(self):
|
||||
now = self.loop.time()
|
||||
self.ignored = {
|
||||
peer: when for (peer, when) in self.ignored.items()
|
||||
if (now - when) < min(30.0, (self.failures.get(peer, 0) ** self.BAN_FACTOR))
|
||||
}
|
||||
|
||||
@cache_concurrent
|
||||
async def download_blob(self, blob_hash: str, length: typing.Optional[int] = None,
|
||||
connection_id: int = 0) -> 'AbstractBlob':
|
||||
blob = self.blob_manager.get_blob(blob_hash, length)
|
||||
if blob.get_is_verified():
|
||||
return blob
|
||||
self.is_running.set()
|
||||
try:
|
||||
while not blob.get_is_verified() and self.is_running.is_set():
|
||||
batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
|
||||
while not self.peer_queue.empty():
|
||||
batch.update(self.peer_queue.get_nowait())
|
||||
log.debug(
|
||||
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
|
||||
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
|
||||
)
|
||||
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
|
||||
if peer in self.ignored:
|
||||
continue
|
||||
if peer in self.active_connections or not self.should_race_continue(blob):
|
||||
continue
|
||||
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
|
||||
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
|
||||
self.active_connections[peer] = t
|
||||
self.peer_queue.put_nowait(list(batch))
|
||||
await self.new_peer_or_finished()
|
||||
self.cleanup_active()
|
||||
log.debug("downloaded %s", blob_hash[:8])
|
||||
return blob
|
||||
finally:
|
||||
blob.close()
|
||||
if self.loop.is_running():
|
||||
self.loop.call_soon(self.cleanup_active)
|
||||
|
||||
def close(self):
|
||||
self.connection_failures.clear()
|
||||
self.scores.clear()
|
||||
self.ignored.clear()
|
||||
self.is_running.clear()
|
||||
for protocol in self.connections.values():
|
||||
protocol.close()
|
||||
|
||||
|
||||
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
|
||||
blob_hash: str) -> 'AbstractBlob':
|
||||
search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
|
||||
search_queue.put_nowait(blob_hash)
|
||||
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
|
||||
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)
|
||||
if fixed_peers:
|
||||
loop.call_later(config.fixed_peer_delay, peer_queue.put_nowait, fixed_peers)
|
||||
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
|
||||
try:
|
||||
return await downloader.download_blob(blob_hash)
|
||||
finally:
|
||||
if accumulate_task and not accumulate_task.done():
|
||||
accumulate_task.cancel()
|
||||
downloader.close()
|
|
@ -1,194 +0,0 @@
|
|||
import asyncio
|
||||
import binascii
|
||||
import logging
|
||||
import socket
|
||||
import typing
|
||||
from json.decoder import JSONDecodeError
|
||||
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
|
||||
from lbry.blob_exchange.serialization import BlobAvailabilityResponse, BlobPriceResponse, BlobDownloadResponse, \
|
||||
BlobPaymentAddressResponse
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.blob.blob_manager import BlobManager
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# a standard request will be 295 bytes
|
||||
MAX_REQUEST_SIZE = 1200
|
||||
|
||||
|
||||
class BlobServerProtocol(asyncio.Protocol):
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_manager: 'BlobManager', lbrycrd_address: str,
|
||||
idle_timeout: float = 30.0, transfer_timeout: float = 60.0):
|
||||
self.loop = loop
|
||||
self.blob_manager = blob_manager
|
||||
self.idle_timeout = idle_timeout
|
||||
self.transfer_timeout = transfer_timeout
|
||||
self.server_task: typing.Optional[asyncio.Task] = None
|
||||
self.started_listening = asyncio.Event()
|
||||
self.buf = b''
|
||||
self.transport: typing.Optional[asyncio.Transport] = None
|
||||
self.lbrycrd_address = lbrycrd_address
|
||||
self.peer_address_and_port: typing.Optional[str] = None
|
||||
self.started_transfer = asyncio.Event()
|
||||
self.transfer_finished = asyncio.Event()
|
||||
self.close_on_idle_task: typing.Optional[asyncio.Task] = None
|
||||
|
||||
async def close_on_idle(self):
|
||||
while self.transport:
|
||||
try:
|
||||
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
|
||||
except asyncio.TimeoutError:
|
||||
log.debug("closing idle connection from %s", self.peer_address_and_port)
|
||||
return self.close()
|
||||
self.started_transfer.clear()
|
||||
await self.transfer_finished.wait()
|
||||
self.transfer_finished.clear()
|
||||
|
||||
def close(self):
|
||||
if self.transport:
|
||||
self.transport.close()
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.transport = transport
|
||||
self.close_on_idle_task = self.loop.create_task(self.close_on_idle())
|
||||
self.peer_address_and_port = "%s:%i" % self.transport.get_extra_info('peername')
|
||||
self.blob_manager.connection_manager.connection_received(self.peer_address_and_port)
|
||||
log.debug("received connection from %s", self.peer_address_and_port)
|
||||
|
||||
def connection_lost(self, exc: typing.Optional[Exception]) -> None:
|
||||
log.debug("lost connection from %s", self.peer_address_and_port)
|
||||
self.blob_manager.connection_manager.incoming_connection_lost(self.peer_address_and_port)
|
||||
self.transport = None
|
||||
if self.close_on_idle_task and not self.close_on_idle_task.done():
|
||||
self.close_on_idle_task.cancel()
|
||||
self.close_on_idle_task = None
|
||||
|
||||
def send_response(self, responses: typing.List[blob_response_types]):
|
||||
to_send = []
|
||||
while responses:
|
||||
to_send.append(responses.pop())
|
||||
serialized = BlobResponse(to_send).serialize()
|
||||
self.transport.write(serialized)
|
||||
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, len(serialized))
|
||||
|
||||
async def handle_request(self, request: BlobRequest):
|
||||
addr = self.transport.get_extra_info('peername')
|
||||
peer_address, peer_port = addr
|
||||
|
||||
responses = []
|
||||
address_request = request.get_address_request()
|
||||
if address_request:
|
||||
responses.append(BlobPaymentAddressResponse(lbrycrd_address=self.lbrycrd_address))
|
||||
availability_request = request.get_availability_request()
|
||||
if availability_request:
|
||||
responses.append(BlobAvailabilityResponse(available_blobs=list(set(
|
||||
filter(lambda blob_hash: blob_hash in self.blob_manager.completed_blob_hashes,
|
||||
availability_request.requested_blobs)
|
||||
))))
|
||||
price_request = request.get_price_request()
|
||||
if price_request:
|
||||
responses.append(BlobPriceResponse(blob_data_payment_rate='RATE_ACCEPTED'))
|
||||
download_request = request.get_blob_request()
|
||||
|
||||
if download_request:
|
||||
blob = self.blob_manager.get_blob(download_request.requested_blob)
|
||||
if blob.get_is_verified():
|
||||
incoming_blob = {'blob_hash': blob.blob_hash, 'length': blob.length}
|
||||
responses.append(BlobDownloadResponse(incoming_blob=incoming_blob))
|
||||
self.send_response(responses)
|
||||
blob_hash = blob.blob_hash[:8]
|
||||
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
|
||||
self.started_transfer.set()
|
||||
try:
|
||||
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
|
||||
if sent and sent > 0:
|
||||
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
|
||||
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
|
||||
else:
|
||||
self.close()
|
||||
log.debug("stopped sending %s to %s:%i", blob_hash, peer_address, peer_port)
|
||||
return
|
||||
except (OSError, ValueError, asyncio.TimeoutError) as err:
|
||||
if isinstance(err, asyncio.TimeoutError):
|
||||
log.debug("timed out sending blob %s to %s", blob_hash, peer_address)
|
||||
else:
|
||||
log.warning("could not read blob %s to send %s:%i", blob_hash, peer_address, peer_port)
|
||||
self.close()
|
||||
return
|
||||
finally:
|
||||
self.transfer_finished.set()
|
||||
else:
|
||||
log.info("don't have %s to send %s:%i", blob.blob_hash[:8], peer_address, peer_port)
|
||||
if responses and not self.transport.is_closing():
|
||||
self.send_response(responses)
|
||||
|
||||
def data_received(self, data):
|
||||
request = None
|
||||
if len(self.buf) + len(data or b'') >= MAX_REQUEST_SIZE:
|
||||
log.warning("request from %s is too large", self.peer_address_and_port)
|
||||
self.close()
|
||||
return
|
||||
if data:
|
||||
self.blob_manager.connection_manager.received_data(self.peer_address_and_port, len(data))
|
||||
_, separator, remainder = data.rpartition(b'}')
|
||||
if not separator:
|
||||
self.buf += data
|
||||
return
|
||||
try:
|
||||
request = BlobRequest.deserialize(self.buf + data)
|
||||
self.buf = remainder
|
||||
except (UnicodeDecodeError, JSONDecodeError):
|
||||
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
|
||||
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
|
||||
self.close()
|
||||
return
|
||||
if not request.requests:
|
||||
log.error("failed to decode request from %s (%i bytes): %s", self.peer_address_and_port,
|
||||
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
|
||||
self.close()
|
||||
return
|
||||
self.loop.create_task(self.handle_request(request))
|
||||
|
||||
|
||||
class BlobServer:
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, blob_manager: 'BlobManager', lbrycrd_address: str,
|
||||
idle_timeout: float = 30.0, transfer_timeout: float = 60.0):
|
||||
self.loop = loop
|
||||
self.blob_manager = blob_manager
|
||||
self.server_task: typing.Optional[asyncio.Task] = None
|
||||
self.started_listening = asyncio.Event()
|
||||
self.lbrycrd_address = lbrycrd_address
|
||||
self.idle_timeout = idle_timeout
|
||||
self.transfer_timeout = transfer_timeout
|
||||
self.server_protocol_class = BlobServerProtocol
|
||||
|
||||
def start_server(self, port: int, interface: typing.Optional[str] = '0.0.0.0'):
|
||||
if self.server_task is not None:
|
||||
raise Exception("already running")
|
||||
|
||||
async def _start_server():
|
||||
# checking if the port is in use
|
||||
# thx https://stackoverflow.com/a/52872579
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
if s.connect_ex(('localhost', port)) == 0:
|
||||
# the port is already in use!
|
||||
log.error("Failed to bind TCP %s:%d", interface, port)
|
||||
|
||||
server = await self.loop.create_server(
|
||||
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
|
||||
self.idle_timeout, self.transfer_timeout),
|
||||
interface, port
|
||||
)
|
||||
self.started_listening.set()
|
||||
log.info("Blob server listening on TCP %s:%i", interface, port)
|
||||
async with server:
|
||||
await server.serve_forever()
|
||||
|
||||
self.server_task = self.loop.create_task(_start_server())
|
||||
|
||||
def stop_server(self):
|
||||
if self.server_task:
|
||||
self.server_task.cancel()
|
||||
self.server_task = None
|
||||
log.info("Stopped blob server")
|
|
@ -1,105 +0,0 @@
|
|||
import time
|
||||
import asyncio
|
||||
import typing
|
||||
import collections
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CONNECTED_EVENT = "connected"
|
||||
DISCONNECTED_EVENT = "disconnected"
|
||||
TRANSFERRED_EVENT = "transferred"
|
||||
|
||||
|
||||
class ConnectionManager:
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop):
|
||||
self.loop = loop
|
||||
self.incoming_connected: typing.Set[str] = set()
|
||||
self.incoming: typing.DefaultDict[str, int] = collections.defaultdict(int)
|
||||
self.outgoing_connected: typing.Set[str] = set()
|
||||
self.outgoing: typing.DefaultDict[str, int] = collections.defaultdict(int)
|
||||
self._max_incoming_mbs = 0.0
|
||||
self._max_outgoing_mbs = 0.0
|
||||
self._status = {}
|
||||
self._running = False
|
||||
self._task: typing.Optional[asyncio.Task] = None
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
return self._status
|
||||
|
||||
def sent_data(self, host_and_port: str, size: int):
|
||||
if self._running:
|
||||
self.outgoing[host_and_port] += size
|
||||
|
||||
def received_data(self, host_and_port: str, size: int):
|
||||
if self._running:
|
||||
self.incoming[host_and_port] += size
|
||||
|
||||
def connection_made(self, host_and_port: str):
|
||||
if self._running:
|
||||
self.outgoing_connected.add(host_and_port)
|
||||
|
||||
def connection_received(self, host_and_port: str):
|
||||
# self.incoming_connected.add(host_and_port)
|
||||
pass
|
||||
|
||||
def outgoing_connection_lost(self, host_and_port: str):
|
||||
if self._running and host_and_port in self.outgoing_connected:
|
||||
self.outgoing_connected.remove(host_and_port)
|
||||
|
||||
def incoming_connection_lost(self, host_and_port: str):
|
||||
if self._running and host_and_port in self.incoming_connected:
|
||||
self.incoming_connected.remove(host_and_port)
|
||||
|
||||
async def _update(self):
|
||||
self._status = {
|
||||
'incoming_bps': {},
|
||||
'outgoing_bps': {},
|
||||
'total_incoming_mbs': 0.0,
|
||||
'total_outgoing_mbs': 0.0,
|
||||
'total_sent': 0,
|
||||
'total_received': 0,
|
||||
'max_incoming_mbs': 0.0,
|
||||
'max_outgoing_mbs': 0.0
|
||||
}
|
||||
|
||||
while True:
|
||||
last = time.perf_counter()
|
||||
await asyncio.sleep(0.1)
|
||||
self._status['incoming_bps'].clear()
|
||||
self._status['outgoing_bps'].clear()
|
||||
now = time.perf_counter()
|
||||
while self.outgoing:
|
||||
k, sent = self.outgoing.popitem()
|
||||
self._status['total_sent'] += sent
|
||||
self._status['outgoing_bps'][k] = sent / (now - last)
|
||||
while self.incoming:
|
||||
k, received = self.incoming.popitem()
|
||||
self._status['total_received'] += received
|
||||
self._status['incoming_bps'][k] = received / (now - last)
|
||||
self._status['total_outgoing_mbs'] = int(sum(list(self._status['outgoing_bps'].values())
|
||||
)) / 1000000.0
|
||||
self._status['total_incoming_mbs'] = int(sum(list(self._status['incoming_bps'].values())
|
||||
)) / 1000000.0
|
||||
self._max_incoming_mbs = max(self._max_incoming_mbs, self._status['total_incoming_mbs'])
|
||||
self._max_outgoing_mbs = max(self._max_outgoing_mbs, self._status['total_outgoing_mbs'])
|
||||
self._status['max_incoming_mbs'] = self._max_incoming_mbs
|
||||
self._status['max_outgoing_mbs'] = self._max_outgoing_mbs
|
||||
|
||||
def stop(self):
|
||||
if self._task:
|
||||
self._task.cancel()
|
||||
self._task = None
|
||||
self.outgoing.clear()
|
||||
self.outgoing_connected.clear()
|
||||
self.incoming.clear()
|
||||
self.incoming_connected.clear()
|
||||
self._status.clear()
|
||||
self._running = False
|
||||
|
||||
def start(self):
|
||||
self.stop()
|
||||
self._running = True
|
||||
self._task = self.loop.create_task(self._update())
|
|
@ -1,2 +0,0 @@
|
|||
CENT = 1000000
|
||||
COIN = 100*CENT
|
|
@ -1,86 +0,0 @@
|
|||
from lbry.crypto.hash import double_sha256
|
||||
from lbry.crypto.util import bytes_to_int, int_to_bytes
|
||||
|
||||
|
||||
class Base58Error(Exception):
|
||||
""" Exception used for Base58 errors. """
|
||||
|
||||
|
||||
class Base58:
|
||||
""" Class providing base 58 functionality. """
|
||||
|
||||
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||
assert len(chars) == 58
|
||||
char_map = {c: n for n, c in enumerate(chars)}
|
||||
|
||||
@classmethod
|
||||
def char_value(cls, c):
|
||||
val = cls.char_map.get(c)
|
||||
if val is None:
|
||||
raise Base58Error(f'invalid base 58 character "{c}"')
|
||||
return val
|
||||
|
||||
@classmethod
|
||||
def decode(cls, txt):
|
||||
""" Decodes txt into a big-endian bytearray. """
|
||||
if isinstance(txt, memoryview):
|
||||
txt = str(txt)
|
||||
|
||||
if isinstance(txt, bytes):
|
||||
txt = txt.decode()
|
||||
|
||||
if not isinstance(txt, str):
|
||||
raise TypeError('a string is required')
|
||||
|
||||
if not txt:
|
||||
raise Base58Error('string cannot be empty')
|
||||
|
||||
value = 0
|
||||
for c in txt:
|
||||
value = value * 58 + cls.char_value(c)
|
||||
|
||||
result = int_to_bytes(value)
|
||||
|
||||
# Prepend leading zero bytes if necessary
|
||||
count = 0
|
||||
for c in txt:
|
||||
if c != '1':
|
||||
break
|
||||
count += 1
|
||||
if count:
|
||||
result = bytes((0,)) * count + result
|
||||
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def encode(cls, be_bytes):
|
||||
"""Converts a big-endian bytearray into a base58 string."""
|
||||
value = bytes_to_int(be_bytes)
|
||||
|
||||
txt = ''
|
||||
while value:
|
||||
value, mod = divmod(value, 58)
|
||||
txt += cls.chars[mod]
|
||||
|
||||
for byte in be_bytes:
|
||||
if byte != 0:
|
||||
break
|
||||
txt += '1'
|
||||
|
||||
return txt[::-1]
|
||||
|
||||
@classmethod
|
||||
def decode_check(cls, txt, hash_fn=double_sha256):
|
||||
""" Decodes a Base58Check-encoded string to a payload. The version prefixes it. """
|
||||
be_bytes = cls.decode(txt)
|
||||
result, check = be_bytes[:-4], be_bytes[-4:]
|
||||
if check != hash_fn(result)[:4]:
|
||||
raise Base58Error(f'invalid base 58 checksum for {txt}')
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def encode_check(cls, payload, hash_fn=double_sha256):
|
||||
""" Encodes a payload bytearray (which includes the version byte(s))
|
||||
into a Base58Check string."""
|
||||
be_bytes = payload + hash_fn(payload)[:4]
|
||||
return cls.encode(be_bytes)
|
|
@ -1,71 +0,0 @@
|
|||
import os
|
||||
import base64
|
||||
import typing
|
||||
from cryptography.hazmat.primitives.kdf.scrypt import Scrypt
|
||||
from cryptography.hazmat.primitives.ciphers import Cipher, modes
|
||||
from cryptography.hazmat.primitives.ciphers.algorithms import AES
|
||||
from cryptography.hazmat.primitives.padding import PKCS7
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
|
||||
from lbry.error import InvalidPasswordError
|
||||
from lbry.crypto.hash import double_sha256
|
||||
|
||||
|
||||
def aes_encrypt(secret: str, value: str, init_vector: bytes = None) -> str:
|
||||
if init_vector is not None:
|
||||
assert len(init_vector) == 16
|
||||
else:
|
||||
init_vector = os.urandom(16)
|
||||
key = double_sha256(secret.encode())
|
||||
encryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).encryptor()
|
||||
padder = PKCS7(AES.block_size).padder()
|
||||
padded_data = padder.update(value.encode()) + padder.finalize()
|
||||
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
|
||||
return base64.b64encode(init_vector + encrypted_data).decode()
|
||||
|
||||
|
||||
def aes_decrypt(secret: str, value: str) -> typing.Tuple[str, bytes]:
|
||||
try:
|
||||
data = base64.b64decode(value.encode())
|
||||
key = double_sha256(secret.encode())
|
||||
init_vector, data = data[:16], data[16:]
|
||||
decryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).decryptor()
|
||||
unpadder = PKCS7(AES.block_size).unpadder()
|
||||
result = unpadder.update(decryptor.update(data)) + unpadder.finalize()
|
||||
return result.decode(), init_vector
|
||||
except UnicodeDecodeError:
|
||||
raise InvalidPasswordError()
|
||||
except ValueError as e:
|
||||
if e.args[0] == 'Invalid padding bytes.':
|
||||
raise InvalidPasswordError()
|
||||
raise
|
||||
|
||||
|
||||
def better_aes_encrypt(secret: str, value: bytes) -> bytes:
|
||||
init_vector = os.urandom(16)
|
||||
key = scrypt(secret.encode(), salt=init_vector)
|
||||
encryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).encryptor()
|
||||
padder = PKCS7(AES.block_size).padder()
|
||||
padded_data = padder.update(value) + padder.finalize()
|
||||
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
|
||||
return base64.b64encode(b's:8192:16:1:' + init_vector + encrypted_data)
|
||||
|
||||
|
||||
def better_aes_decrypt(secret: str, value: bytes) -> bytes:
|
||||
try:
|
||||
data = base64.b64decode(value)
|
||||
_, scryp_n, scrypt_r, scrypt_p, data = data.split(b':', maxsplit=4)
|
||||
init_vector, data = data[:16], data[16:]
|
||||
key = scrypt(secret.encode(), init_vector, int(scryp_n), int(scrypt_r), int(scrypt_p))
|
||||
decryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).decryptor()
|
||||
unpadder = PKCS7(AES.block_size).unpadder()
|
||||
return unpadder.update(decryptor.update(data)) + unpadder.finalize()
|
||||
except ValueError as e:
|
||||
if e.args[0] == 'Invalid padding bytes.':
|
||||
raise InvalidPasswordError()
|
||||
raise
|
||||
|
||||
|
||||
def scrypt(passphrase, salt, scrypt_n=1<<13, scrypt_r=16, scrypt_p=1):
|
||||
kdf = Scrypt(salt, length=32, n=scrypt_n, r=scrypt_r, p=scrypt_p, backend=default_backend())
|
||||
return kdf.derive(passphrase)
|
|
@ -1,47 +0,0 @@
|
|||
import hashlib
|
||||
import hmac
|
||||
from binascii import hexlify, unhexlify
|
||||
|
||||
|
||||
def sha256(x):
|
||||
""" Simple wrapper of hashlib sha256. """
|
||||
return hashlib.sha256(x).digest()
|
||||
|
||||
|
||||
def sha512(x):
|
||||
""" Simple wrapper of hashlib sha512. """
|
||||
return hashlib.sha512(x).digest()
|
||||
|
||||
|
||||
def ripemd160(x):
|
||||
""" Simple wrapper of hashlib ripemd160. """
|
||||
h = hashlib.new('ripemd160')
|
||||
h.update(x)
|
||||
return h.digest()
|
||||
|
||||
|
||||
def double_sha256(x):
|
||||
""" SHA-256 of SHA-256, as used extensively in bitcoin. """
|
||||
return sha256(sha256(x))
|
||||
|
||||
|
||||
def hmac_sha512(key, msg):
|
||||
""" Use SHA-512 to provide an HMAC. """
|
||||
return hmac.new(key, msg, hashlib.sha512).digest()
|
||||
|
||||
|
||||
def hash160(x):
|
||||
""" RIPEMD-160 of SHA-256.
|
||||
Used to make bitcoin addresses from pubkeys. """
|
||||
return ripemd160(sha256(x))
|
||||
|
||||
|
||||
def hash_to_hex_str(x):
|
||||
""" Convert a big-endian binary hash to displayed hex string.
|
||||
Display form of a binary hash is reversed and converted to hex. """
|
||||
return hexlify(reversed(x))
|
||||
|
||||
|
||||
def hex_str_to_hash(x):
|
||||
""" Convert a displayed hex string to a binary hash. """
|
||||
return reversed(unhexlify(x))
|
|
@ -1,13 +0,0 @@
|
|||
from binascii import unhexlify, hexlify
|
||||
|
||||
|
||||
def bytes_to_int(be_bytes):
|
||||
""" Interprets a big-endian sequence of bytes as an integer. """
|
||||
return int(hexlify(be_bytes), 16)
|
||||
|
||||
|
||||
def int_to_bytes(value):
|
||||
""" Converts an integer to a big-endian sequence of bytes. """
|
||||
length = (value.bit_length() + 7) // 8
|
||||
s = '%x' % value
|
||||
return unhexlify(('0' * (len(s) % 2) + s).zfill(length * 2))
|
|
@ -1,78 +0,0 @@
|
|||
import asyncio
|
||||
import typing
|
||||
import logging
|
||||
|
||||
from prometheus_client import Counter, Gauge
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.dht.node import Node
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlobAnnouncer:
|
||||
announcements_sent_metric = Counter(
|
||||
"announcements_sent", "Number of announcements sent and their respective status.", namespace="dht_node",
|
||||
labelnames=("peers", "error"),
|
||||
)
|
||||
announcement_queue_size_metric = Gauge(
|
||||
"announcement_queue_size", "Number of hashes waiting to be announced.", namespace="dht_node",
|
||||
labelnames=("scope",)
|
||||
)
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
|
||||
self.loop = loop
|
||||
self.node = node
|
||||
self.storage = storage
|
||||
self.announce_task: asyncio.Task = None
|
||||
self.announce_queue: typing.List[str] = []
|
||||
self._done = asyncio.Event()
|
||||
self.announced = set()
|
||||
|
||||
async def _run_consumer(self):
|
||||
while self.announce_queue:
|
||||
try:
|
||||
blob_hash = self.announce_queue.pop()
|
||||
peers = len(await self.node.announce_blob(blob_hash))
|
||||
self.announcements_sent_metric.labels(peers=peers, error=False).inc()
|
||||
if peers > 4:
|
||||
self.announced.add(blob_hash)
|
||||
else:
|
||||
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
|
||||
except Exception as err:
|
||||
self.announcements_sent_metric.labels(peers=0, error=True).inc()
|
||||
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
|
||||
|
||||
async def _announce(self, batch_size: typing.Optional[int] = 10):
|
||||
while batch_size:
|
||||
if not self.node.joined.is_set():
|
||||
await self.node.joined.wait()
|
||||
await asyncio.sleep(60)
|
||||
if not self.node.protocol.routing_table.get_peers():
|
||||
log.warning("No peers in DHT, announce round skipped")
|
||||
continue
|
||||
self.announce_queue.extend(await self.storage.get_blobs_to_announce())
|
||||
self.announcement_queue_size_metric.labels(scope="global").set(len(self.announce_queue))
|
||||
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
|
||||
while len(self.announce_queue) > 0:
|
||||
log.info("%i blobs to announce", len(self.announce_queue))
|
||||
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)])
|
||||
announced = list(filter(None, self.announced))
|
||||
if announced:
|
||||
await self.storage.update_last_announced_blobs(announced)
|
||||
log.info("announced %i blobs", len(announced))
|
||||
self.announced.clear()
|
||||
self._done.set()
|
||||
self._done.clear()
|
||||
|
||||
def start(self, batch_size: typing.Optional[int] = 10):
|
||||
assert not self.announce_task or self.announce_task.done(), "already running"
|
||||
self.announce_task = self.loop.create_task(self._announce(batch_size))
|
||||
|
||||
def stop(self):
|
||||
if self.announce_task and not self.announce_task.done():
|
||||
self.announce_task.cancel()
|
||||
|
||||
def wait(self):
|
||||
return self._done.wait()
|
|
@ -1,40 +0,0 @@
|
|||
import hashlib
|
||||
import os
|
||||
|
||||
HASH_CLASS = hashlib.sha384 # pylint: disable=invalid-name
|
||||
HASH_LENGTH = HASH_CLASS().digest_size
|
||||
HASH_BITS = HASH_LENGTH * 8
|
||||
ALPHA = 5
|
||||
K = 8
|
||||
SPLIT_BUCKETS_UNDER_INDEX = 1
|
||||
REPLACEMENT_CACHE_SIZE = 8
|
||||
RPC_TIMEOUT = 5.0
|
||||
RPC_ATTEMPTS = 5
|
||||
RPC_ATTEMPTS_PRUNING_WINDOW = 600
|
||||
ITERATIVE_LOOKUP_DELAY = RPC_TIMEOUT / 2.0 # TODO: use config val / 2 if rpc timeout is provided
|
||||
REFRESH_INTERVAL = 3600 # 1 hour
|
||||
REPLICATE_INTERVAL = REFRESH_INTERVAL
|
||||
DATA_EXPIRATION = 86400 # 24 hours
|
||||
TOKEN_SECRET_REFRESH_INTERVAL = 300 # 5 minutes
|
||||
MAYBE_PING_DELAY = 300 # 5 minutes
|
||||
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
|
||||
RPC_ID_LENGTH = 20
|
||||
PROTOCOL_VERSION = 1
|
||||
MSG_SIZE_LIMIT = 1400
|
||||
|
||||
|
||||
def digest(data: bytes) -> bytes:
|
||||
h = HASH_CLASS()
|
||||
h.update(data)
|
||||
return h.digest()
|
||||
|
||||
|
||||
def generate_id(num=None) -> bytes:
|
||||
if num is not None:
|
||||
return digest(str(num).encode())
|
||||
else:
|
||||
return digest(os.urandom(32))
|
||||
|
||||
|
||||
def generate_rpc_id(num=None) -> bytes:
|
||||
return generate_id(num)[:RPC_ID_LENGTH]
|
282
lbry/dht/node.py
282
lbry/dht/node.py
|
@ -1,282 +0,0 @@
|
|||
import logging
|
||||
import asyncio
|
||||
import typing
|
||||
import socket
|
||||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from lbry.utils import aclosing, resolve_host
|
||||
from lbry.dht import constants
|
||||
from lbry.dht.peer import make_kademlia_peer
|
||||
from lbry.dht.protocol.distance import Distance
|
||||
from lbry.dht.protocol.iterative_find import IterativeNodeFinder, IterativeValueFinder
|
||||
from lbry.dht.protocol.protocol import KademliaProtocol
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.dht.peer import PeerManager
|
||||
from lbry.dht.peer import KademliaPeer
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Node:
|
||||
storing_peers_metric = Gauge(
|
||||
"storing_peers", "Number of peers storing blobs announced to this node", namespace="dht_node",
|
||||
labelnames=("scope",),
|
||||
)
|
||||
stored_blob_with_x_bytes_colliding = Gauge(
|
||||
"stored_blobs_x_bytes_colliding", "Number of blobs with at least X bytes colliding with this node id prefix",
|
||||
namespace="dht_node", labelnames=("amount",)
|
||||
)
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
|
||||
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False,
|
||||
storage: typing.Optional['SQLiteStorage'] = None):
|
||||
self.loop = loop
|
||||
self.internal_udp_port = internal_udp_port
|
||||
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
|
||||
split_buckets_under_index, is_bootstrap_node)
|
||||
self.listening_port: asyncio.DatagramTransport = None
|
||||
self.joined = asyncio.Event()
|
||||
self._join_task: asyncio.Task = None
|
||||
self._refresh_task: asyncio.Task = None
|
||||
self._storage = storage
|
||||
|
||||
@property
|
||||
def stored_blob_hashes(self):
|
||||
return self.protocol.data_store.keys()
|
||||
|
||||
async def refresh_node(self, force_once=False):
|
||||
while True:
|
||||
# remove peers with expired blob announcements from the datastore
|
||||
self.protocol.data_store.removed_expired_peers()
|
||||
|
||||
total_peers: typing.List['KademliaPeer'] = []
|
||||
# add all peers in the routing table
|
||||
total_peers.extend(self.protocol.routing_table.get_peers())
|
||||
# add all the peers who have announced blobs to us
|
||||
storing_peers = self.protocol.data_store.get_storing_contacts()
|
||||
self.storing_peers_metric.labels("global").set(len(storing_peers))
|
||||
total_peers.extend(storing_peers)
|
||||
|
||||
counts = {0: 0, 1: 0, 2: 0}
|
||||
node_id = self.protocol.node_id
|
||||
for blob_hash in self.protocol.data_store.keys():
|
||||
bytes_colliding = 0 if blob_hash[0] != node_id[0] else 2 if blob_hash[1] == node_id[1] else 1
|
||||
counts[bytes_colliding] += 1
|
||||
self.stored_blob_with_x_bytes_colliding.labels(amount=0).set(counts[0])
|
||||
self.stored_blob_with_x_bytes_colliding.labels(amount=1).set(counts[1])
|
||||
self.stored_blob_with_x_bytes_colliding.labels(amount=2).set(counts[2])
|
||||
|
||||
# get ids falling in the midpoint of each bucket that hasn't been recently updated
|
||||
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
|
||||
|
||||
if self.protocol.routing_table.get_peers():
|
||||
# if we have node ids to look up, perform the iterative search until we have k results
|
||||
while node_ids:
|
||||
peers = await self.peer_search(node_ids.pop())
|
||||
total_peers.extend(peers)
|
||||
else:
|
||||
if force_once:
|
||||
break
|
||||
fut = asyncio.Future()
|
||||
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
|
||||
await fut
|
||||
continue
|
||||
|
||||
# ping the set of peers; upon success/failure the routing able and last replied/failed time will be updated
|
||||
to_ping = [peer for peer in set(total_peers) if self.protocol.peer_manager.peer_is_good(peer) is not True]
|
||||
if to_ping:
|
||||
self.protocol.ping_queue.enqueue_maybe_ping(*to_ping, delay=0)
|
||||
if self._storage:
|
||||
await self._storage.save_kademlia_peers(self.protocol.routing_table.get_peers())
|
||||
if force_once:
|
||||
break
|
||||
|
||||
fut = asyncio.Future()
|
||||
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
|
||||
await fut
|
||||
|
||||
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
|
||||
hash_value = bytes.fromhex(blob_hash)
|
||||
assert len(hash_value) == constants.HASH_LENGTH
|
||||
peers = await self.peer_search(hash_value)
|
||||
|
||||
if not self.protocol.external_ip:
|
||||
raise Exception("Cannot determine external IP")
|
||||
log.debug("Store to %i peers", len(peers))
|
||||
for peer in peers:
|
||||
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
|
||||
stored_to_tup = await asyncio.gather(
|
||||
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers)
|
||||
)
|
||||
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
|
||||
if stored_to:
|
||||
log.debug(
|
||||
"Stored %s to %i of %i attempted peers", hash_value.hex()[:8],
|
||||
len(stored_to), len(peers)
|
||||
)
|
||||
else:
|
||||
log.debug("Failed announcing %s, stored to 0 peers", blob_hash[:8])
|
||||
return stored_to
|
||||
|
||||
def stop(self) -> None:
|
||||
if self.joined.is_set():
|
||||
self.joined.clear()
|
||||
if self._join_task:
|
||||
self._join_task.cancel()
|
||||
if self._refresh_task and not (self._refresh_task.done() or self._refresh_task.cancelled()):
|
||||
self._refresh_task.cancel()
|
||||
if self.protocol and self.protocol.ping_queue.running:
|
||||
self.protocol.ping_queue.stop()
|
||||
self.protocol.stop()
|
||||
if self.listening_port is not None:
|
||||
self.listening_port.close()
|
||||
self._join_task = None
|
||||
self.listening_port = None
|
||||
log.info("Stopped DHT node")
|
||||
|
||||
async def start_listening(self, interface: str = '0.0.0.0') -> None:
|
||||
if not self.listening_port:
|
||||
self.listening_port, _ = await self.loop.create_datagram_endpoint(
|
||||
lambda: self.protocol, (interface, self.internal_udp_port)
|
||||
)
|
||||
log.info("DHT node listening on UDP %s:%i", interface, self.internal_udp_port)
|
||||
self.protocol.start()
|
||||
else:
|
||||
log.warning("Already bound to port %s", self.listening_port)
|
||||
|
||||
async def join_network(self, interface: str = '0.0.0.0',
|
||||
known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
|
||||
def peers_from_urls(urls: typing.Optional[typing.List[typing.Tuple[bytes, str, int, int]]]):
|
||||
peer_addresses = []
|
||||
for node_id, address, udp_port, tcp_port in urls:
|
||||
if (node_id, address, udp_port, tcp_port) not in peer_addresses and \
|
||||
(address, udp_port) != (self.protocol.external_ip, self.protocol.udp_port):
|
||||
peer_addresses.append((node_id, address, udp_port, tcp_port))
|
||||
return [make_kademlia_peer(*peer_address) for peer_address in peer_addresses]
|
||||
|
||||
if not self.listening_port:
|
||||
await self.start_listening(interface)
|
||||
self.protocol.ping_queue.start()
|
||||
self._refresh_task = self.loop.create_task(self.refresh_node())
|
||||
|
||||
while True:
|
||||
if self.protocol.routing_table.get_peers():
|
||||
if not self.joined.is_set():
|
||||
self.joined.set()
|
||||
log.info(
|
||||
"joined dht, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()),
|
||||
self.protocol.routing_table.buckets_with_contacts()
|
||||
)
|
||||
else:
|
||||
if self.joined.is_set():
|
||||
self.joined.clear()
|
||||
seed_peers = peers_from_urls(
|
||||
await self._storage.get_persisted_kademlia_peers()
|
||||
) if self._storage else []
|
||||
if not seed_peers:
|
||||
try:
|
||||
seed_peers.extend(peers_from_urls([
|
||||
(None, await resolve_host(address, udp_port, 'udp'), udp_port, None)
|
||||
for address, udp_port in known_node_urls or []
|
||||
]))
|
||||
except socket.gaierror:
|
||||
await asyncio.sleep(30)
|
||||
continue
|
||||
|
||||
self.protocol.peer_manager.reset()
|
||||
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
|
||||
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
|
||||
|
||||
await asyncio.sleep(1)
|
||||
|
||||
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
|
||||
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
|
||||
|
||||
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
||||
max_results: int = constants.K) -> IterativeNodeFinder:
|
||||
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
|
||||
return IterativeNodeFinder(self.loop, self.protocol, key, max_results, shortlist)
|
||||
|
||||
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
||||
max_results: int = -1) -> IterativeValueFinder:
|
||||
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
|
||||
return IterativeValueFinder(self.loop, self.protocol, key, max_results, shortlist)
|
||||
|
||||
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None
|
||||
) -> typing.List['KademliaPeer']:
|
||||
peers = []
|
||||
async with aclosing(self.get_iterative_node_finder(
|
||||
node_id, shortlist=shortlist, max_results=max_results)) as node_finder:
|
||||
async for iteration_peers in node_finder:
|
||||
peers.extend(iteration_peers)
|
||||
distance = Distance(node_id)
|
||||
peers.sort(key=lambda peer: distance(peer.node_id))
|
||||
return peers[:count]
|
||||
|
||||
async def _accumulate_peers_for_value(self, search_queue: asyncio.Queue, result_queue: asyncio.Queue):
|
||||
tasks = []
|
||||
try:
|
||||
while True:
|
||||
blob_hash = await search_queue.get()
|
||||
tasks.append(self.loop.create_task(self._peers_for_value_producer(blob_hash, result_queue)))
|
||||
finally:
|
||||
for task in tasks:
|
||||
task.cancel()
|
||||
|
||||
async def _peers_for_value_producer(self, blob_hash: str, result_queue: asyncio.Queue):
|
||||
async def put_into_result_queue_after_pong(_peer):
|
||||
try:
|
||||
await self.protocol.get_rpc_peer(_peer).ping()
|
||||
result_queue.put_nowait([_peer])
|
||||
log.debug("pong from %s:%i for %s", _peer.address, _peer.udp_port, blob_hash)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
|
||||
# prioritize peers who reply to a dht ping first
|
||||
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
|
||||
async with aclosing(self.get_iterative_value_finder(bytes.fromhex(blob_hash))) as value_finder:
|
||||
async for results in value_finder:
|
||||
to_put = []
|
||||
for peer in results:
|
||||
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
|
||||
continue
|
||||
is_good = self.protocol.peer_manager.peer_is_good(peer)
|
||||
if is_good:
|
||||
# the peer has replied recently over UDP, it can probably be reached on the TCP port
|
||||
to_put.append(peer)
|
||||
elif is_good is None:
|
||||
if not peer.udp_port:
|
||||
# TODO: use the same port for TCP and UDP
|
||||
# the udp port must be guessed
|
||||
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
|
||||
# including on a network with several nodes, then assume the udp port is proportionately
|
||||
# based on a starting port of 4444
|
||||
udp_port_to_try = peer.tcp_port
|
||||
if 3400 > peer.tcp_port > 3332:
|
||||
udp_port_to_try = (peer.tcp_port - 3333) + 4444
|
||||
self.loop.create_task(put_into_result_queue_after_pong(
|
||||
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
|
||||
))
|
||||
else:
|
||||
self.loop.create_task(put_into_result_queue_after_pong(peer))
|
||||
else:
|
||||
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
|
||||
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
|
||||
if to_put:
|
||||
result_queue.put_nowait(to_put)
|
||||
|
||||
def accumulate_peers(self, search_queue: asyncio.Queue,
|
||||
peer_queue: typing.Optional[asyncio.Queue] = None
|
||||
) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
|
||||
queue = peer_queue or asyncio.Queue()
|
||||
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))
|
||||
|
||||
|
||||
async def get_kademlia_peers_from_hosts(peer_list: typing.List[typing.Tuple[str, int]]) -> typing.List['KademliaPeer']:
|
||||
peer_address_list = [(await resolve_host(url, port, proto='tcp'), port) for url, port in peer_list]
|
||||
kademlia_peer_list = [make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
|
||||
for address, port in peer_address_list]
|
||||
return kademlia_peer_list
|
|
@ -1,76 +0,0 @@
|
|||
import asyncio
|
||||
import typing
|
||||
|
||||
from lbry.dht import constants
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.dht.peer import KademliaPeer, PeerManager
|
||||
|
||||
|
||||
class DictDataStore:
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager'):
|
||||
# Dictionary format:
|
||||
# { <key>: [(<contact>, <age>), ...] }
|
||||
self._data_store: typing.Dict[bytes, typing.List[typing.Tuple['KademliaPeer', float]]] = {}
|
||||
|
||||
self.loop = loop
|
||||
self._peer_manager = peer_manager
|
||||
self.completed_blobs: typing.Set[str] = set()
|
||||
|
||||
def keys(self):
|
||||
return self._data_store.keys()
|
||||
|
||||
def __len__(self):
|
||||
return self._data_store.__len__()
|
||||
|
||||
def removed_expired_peers(self):
|
||||
now = self.loop.time()
|
||||
keys = list(self._data_store.keys())
|
||||
for key in keys:
|
||||
to_remove = []
|
||||
for (peer, ts) in self._data_store[key]:
|
||||
if ts + constants.DATA_EXPIRATION < now or self._peer_manager.peer_is_good(peer) is False:
|
||||
to_remove.append((peer, ts))
|
||||
for item in to_remove:
|
||||
self._data_store[key].remove(item)
|
||||
if not self._data_store[key]:
|
||||
del self._data_store[key]
|
||||
|
||||
def filter_bad_and_expired_peers(self, key: bytes) -> typing.Iterator['KademliaPeer']:
|
||||
"""
|
||||
Returns only non-expired and unknown/good peers
|
||||
"""
|
||||
for peer in self.filter_expired_peers(key):
|
||||
if self._peer_manager.peer_is_good(peer) is not False:
|
||||
yield peer
|
||||
|
||||
def filter_expired_peers(self, key: bytes) -> typing.Iterator['KademliaPeer']:
|
||||
"""
|
||||
Returns only non-expired peers
|
||||
"""
|
||||
now = self.loop.time()
|
||||
for (peer, ts) in self._data_store.get(key, []):
|
||||
if ts + constants.DATA_EXPIRATION > now:
|
||||
yield peer
|
||||
|
||||
def has_peers_for_blob(self, key: bytes) -> bool:
|
||||
return key in self._data_store
|
||||
|
||||
def add_peer_to_blob(self, contact: 'KademliaPeer', key: bytes) -> None:
|
||||
now = self.loop.time()
|
||||
if key in self._data_store:
|
||||
current = list(filter(lambda x: x[0] == contact, self._data_store[key]))
|
||||
if len(current) > 0:
|
||||
self._data_store[key][self._data_store[key].index(current[0])] = contact, now
|
||||
else:
|
||||
self._data_store[key].append((contact, now))
|
||||
else:
|
||||
self._data_store[key] = [(contact, now)]
|
||||
|
||||
def get_peers_for_blob(self, key: bytes) -> typing.List['KademliaPeer']:
|
||||
return list(self.filter_bad_and_expired_peers(key))
|
||||
|
||||
def get_storing_contacts(self) -> typing.List['KademliaPeer']:
|
||||
peers = set()
|
||||
for _, stored in self._data_store.items():
|
||||
peers.update(set(map(lambda tup: tup[0], stored)))
|
||||
return list(peers)
|
|
@ -1,361 +0,0 @@
|
|||
import asyncio
|
||||
from itertools import chain
|
||||
from collections import defaultdict, OrderedDict
|
||||
from collections.abc import AsyncIterator
|
||||
import typing
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
from lbry.dht import constants
|
||||
from lbry.dht.error import RemoteException, TransportNotConnected
|
||||
from lbry.dht.protocol.distance import Distance
|
||||
from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address
|
||||
from lbry.dht.serialization.datagram import PAGE_KEY
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from lbry.dht.protocol.protocol import KademliaProtocol
|
||||
from lbry.dht.peer import PeerManager, KademliaPeer
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FindResponse:
|
||||
@property
|
||||
def found(self) -> bool:
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
|
||||
for contact_triple in self.get_close_triples():
|
||||
node_id, address, udp_port = contact_triple
|
||||
try:
|
||||
yield make_kademlia_peer(node_id, address, udp_port)
|
||||
except ValueError:
|
||||
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
|
||||
peer_info.udp_port, address, udp_port)
|
||||
|
||||
|
||||
class FindNodeResponse(FindResponse):
|
||||
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
|
||||
self.key = key
|
||||
self.close_triples = close_triples
|
||||
|
||||
@property
|
||||
def found(self) -> bool:
|
||||
return self.key in [triple[0] for triple in self.close_triples]
|
||||
|
||||
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
|
||||
return self.close_triples
|
||||
|
||||
|
||||
class FindValueResponse(FindResponse):
|
||||
def __init__(self, key: bytes, result_dict: typing.Dict):
|
||||
self.key = key
|
||||
self.token = result_dict[b'token']
|
||||
self.close_triples: typing.List[typing.Tuple[bytes, bytes, int]] = result_dict.get(b'contacts', [])
|
||||
self.found_compact_addresses = result_dict.get(key, [])
|
||||
self.pages = int(result_dict.get(PAGE_KEY, 0))
|
||||
|
||||
@property
|
||||
def found(self) -> bool:
|
||||
return len(self.found_compact_addresses) > 0
|
||||
|
||||
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
|
||||
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
|
||||
|
||||
|
||||
class IterativeFinder(AsyncIterator):
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||
protocol: 'KademliaProtocol', key: bytes,
|
||||
max_results: typing.Optional[int] = constants.K,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||
if len(key) != constants.HASH_LENGTH:
|
||||
raise ValueError("invalid key length: %i" % len(key))
|
||||
self.loop = loop
|
||||
self.peer_manager = protocol.peer_manager
|
||||
self.protocol = protocol
|
||||
|
||||
self.key = key
|
||||
self.max_results = max(constants.K, max_results)
|
||||
|
||||
self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
|
||||
self.contacted: typing.Set['KademliaPeer'] = set()
|
||||
self.distance = Distance(key)
|
||||
|
||||
self.iteration_queue = asyncio.Queue()
|
||||
|
||||
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
|
||||
self.iteration_count = 0
|
||||
self.running = False
|
||||
self.tasks: typing.List[asyncio.Task] = []
|
||||
for peer in shortlist:
|
||||
if peer.node_id:
|
||||
self._add_active(peer, force=True)
|
||||
else:
|
||||
# seed nodes
|
||||
self._schedule_probe(peer)
|
||||
|
||||
async def send_probe(self, peer: 'KademliaPeer') -> FindResponse:
|
||||
"""
|
||||
Send the rpc request to the peer and return an object with the FindResponse interface
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def search_exhausted(self):
|
||||
"""
|
||||
This method ends the iterator due no more peers to contact.
|
||||
Override to provide last time results.
|
||||
"""
|
||||
self.iteration_queue.put_nowait(None)
|
||||
|
||||
def check_result_ready(self, response: FindResponse):
|
||||
"""
|
||||
Called after adding peers from an rpc result to the shortlist.
|
||||
This method is responsible for putting a result for the generator into the Queue
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_initial_result(self) -> typing.List['KademliaPeer']: #pylint: disable=no-self-use
|
||||
"""
|
||||
Get an initial or cached result to be put into the Queue. Used for findValue requests where the blob
|
||||
has peers in the local data store of blobs announced to us
|
||||
"""
|
||||
return []
|
||||
|
||||
def _add_active(self, peer, force=False):
|
||||
if not force and self.peer_manager.peer_is_good(peer) is False:
|
||||
return
|
||||
if peer in self.contacted:
|
||||
return
|
||||
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
|
||||
self.active[peer] = self.distance(peer.node_id)
|
||||
self.active = OrderedDict(sorted(self.active.items(), key=lambda item: item[1]))
|
||||
|
||||
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
|
||||
self._add_active(peer)
|
||||
for new_peer in response.get_close_kademlia_peers(peer):
|
||||
self._add_active(new_peer)
|
||||
self.check_result_ready(response)
|
||||
self._log_state(reason="check result")
|
||||
|
||||
def _reset_closest(self, peer):
|
||||
if peer in self.active:
|
||||
del self.active[peer]
|
||||
|
||||
async def _send_probe(self, peer: 'KademliaPeer'):
|
||||
try:
|
||||
response = await self.send_probe(peer)
|
||||
except asyncio.TimeoutError:
|
||||
self._reset_closest(peer)
|
||||
return
|
||||
except asyncio.CancelledError:
|
||||
log.debug("%s[%x] cancelled probe",
|
||||
type(self).__name__, id(self))
|
||||
raise
|
||||
except ValueError as err:
|
||||
log.warning(str(err))
|
||||
self._reset_closest(peer)
|
||||
return
|
||||
except TransportNotConnected:
|
||||
await self._aclose(reason="not connected")
|
||||
return
|
||||
except RemoteException:
|
||||
self._reset_closest(peer)
|
||||
return
|
||||
return await self._handle_probe_result(peer, response)
|
||||
|
||||
def _search_round(self):
|
||||
"""
|
||||
Send up to constants.alpha (5) probes to closest active peers
|
||||
"""
|
||||
|
||||
added = 0
|
||||
for index, peer in enumerate(self.active.keys()):
|
||||
if index == 0:
|
||||
log.debug("%s[%x] closest to probe: %s",
|
||||
type(self).__name__, id(self),
|
||||
peer.node_id.hex()[:8])
|
||||
if peer in self.contacted:
|
||||
continue
|
||||
if len(self.running_probes) >= constants.ALPHA:
|
||||
break
|
||||
if index > (constants.K + len(self.running_probes)):
|
||||
break
|
||||
origin_address = (peer.address, peer.udp_port)
|
||||
if peer.node_id == self.protocol.node_id:
|
||||
continue
|
||||
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
|
||||
continue
|
||||
self._schedule_probe(peer)
|
||||
added += 1
|
||||
log.debug("%s[%x] running %d probes for key %s",
|
||||
type(self).__name__, id(self),
|
||||
len(self.running_probes), self.key.hex()[:8])
|
||||
if not added and not self.running_probes:
|
||||
log.debug("%s[%x] search for %s exhausted",
|
||||
type(self).__name__, id(self),
|
||||
self.key.hex()[:8])
|
||||
self.search_exhausted()
|
||||
|
||||
def _schedule_probe(self, peer: 'KademliaPeer'):
|
||||
self.contacted.add(peer)
|
||||
|
||||
t = self.loop.create_task(self._send_probe(peer))
|
||||
|
||||
def callback(_):
|
||||
self.running_probes.pop(peer, None)
|
||||
if self.running:
|
||||
self._search_round()
|
||||
|
||||
t.add_done_callback(callback)
|
||||
self.running_probes[peer] = t
|
||||
|
||||
def _log_state(self, reason="?"):
|
||||
log.debug("%s[%x] [%s] %s: %i active nodes %i contacted %i produced %i queued",
|
||||
type(self).__name__, id(self), self.key.hex()[:8],
|
||||
reason, len(self.active), len(self.contacted),
|
||||
self.iteration_count, self.iteration_queue.qsize())
|
||||
|
||||
def __aiter__(self):
|
||||
if self.running:
|
||||
raise Exception("already running")
|
||||
self.running = True
|
||||
self.loop.call_soon(self._search_round)
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> typing.List['KademliaPeer']:
|
||||
try:
|
||||
if self.iteration_count == 0:
|
||||
result = self.get_initial_result() or await self.iteration_queue.get()
|
||||
else:
|
||||
result = await self.iteration_queue.get()
|
||||
if not result:
|
||||
raise StopAsyncIteration
|
||||
self.iteration_count += 1
|
||||
return result
|
||||
except asyncio.CancelledError:
|
||||
await self._aclose(reason="cancelled")
|
||||
raise
|
||||
except StopAsyncIteration:
|
||||
await self._aclose(reason="no more results")
|
||||
raise
|
||||
|
||||
async def _aclose(self, reason="?"):
|
||||
log.debug("%s[%x] [%s] shutdown because %s: %i active nodes %i contacted %i produced %i queued",
|
||||
type(self).__name__, id(self), self.key.hex()[:8],
|
||||
reason, len(self.active), len(self.contacted),
|
||||
self.iteration_count, self.iteration_queue.qsize())
|
||||
self.running = False
|
||||
self.iteration_queue.put_nowait(None)
|
||||
for task in chain(self.tasks, self.running_probes.values()):
|
||||
task.cancel()
|
||||
self.tasks.clear()
|
||||
self.running_probes.clear()
|
||||
|
||||
async def aclose(self):
|
||||
if self.running:
|
||||
await self._aclose(reason="aclose")
|
||||
log.debug("%s[%x] [%s] async close completed",
|
||||
type(self).__name__, id(self), self.key.hex()[:8])
|
||||
|
||||
class IterativeNodeFinder(IterativeFinder):
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||
protocol: 'KademliaProtocol', key: bytes,
|
||||
max_results: typing.Optional[int] = constants.K,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||
super().__init__(loop, protocol, key, max_results, shortlist)
|
||||
self.yielded_peers: typing.Set['KademliaPeer'] = set()
|
||||
|
||||
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
|
||||
log.debug("probe %s:%d (%s) for NODE %s",
|
||||
peer.address, peer.udp_port, peer.node_id.hex()[:8] if peer.node_id else '', self.key.hex()[:8])
|
||||
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
|
||||
return FindNodeResponse(self.key, response)
|
||||
|
||||
def search_exhausted(self):
|
||||
self.put_result(self.active.keys(), finish=True)
|
||||
|
||||
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
|
||||
not_yet_yielded = [
|
||||
peer for peer in from_iter
|
||||
if peer not in self.yielded_peers
|
||||
and peer.node_id != self.protocol.node_id
|
||||
and self.peer_manager.peer_is_good(peer) is True # return only peers who answered
|
||||
]
|
||||
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
|
||||
to_yield = not_yet_yielded[:max(constants.K, self.max_results)]
|
||||
if to_yield:
|
||||
self.yielded_peers.update(to_yield)
|
||||
self.iteration_queue.put_nowait(to_yield)
|
||||
if finish:
|
||||
self.iteration_queue.put_nowait(None)
|
||||
|
||||
def check_result_ready(self, response: FindNodeResponse):
|
||||
found = response.found and self.key != self.protocol.node_id
|
||||
|
||||
if found:
|
||||
log.debug("found")
|
||||
return self.put_result(self.active.keys(), finish=True)
|
||||
|
||||
|
||||
class IterativeValueFinder(IterativeFinder):
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||
protocol: 'KademliaProtocol', key: bytes,
|
||||
max_results: typing.Optional[int] = constants.K,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||
super().__init__(loop, protocol, key, max_results, shortlist)
|
||||
self.blob_peers: typing.Set['KademliaPeer'] = set()
|
||||
# this tracks the index of the most recent page we requested from each peer
|
||||
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
|
||||
# this tracks the set of blob peers returned by each peer
|
||||
self.discovered_peers: typing.Dict['KademliaPeer', typing.Set['KademliaPeer']] = defaultdict(set)
|
||||
|
||||
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
|
||||
log.debug("probe %s:%d (%s) for VALUE %s",
|
||||
peer.address, peer.udp_port, peer.node_id.hex()[:8], self.key.hex()[:8])
|
||||
page = self.peer_pages[peer]
|
||||
response = await self.protocol.get_rpc_peer(peer).find_value(self.key, page=page)
|
||||
parsed = FindValueResponse(self.key, response)
|
||||
if not parsed.found:
|
||||
return parsed
|
||||
already_known = len(self.discovered_peers[peer])
|
||||
decoded_peers = set()
|
||||
for compact_addr in parsed.found_compact_addresses:
|
||||
try:
|
||||
decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr))
|
||||
except ValueError:
|
||||
log.warning("misbehaving peer %s:%i returned invalid peer for blob",
|
||||
peer.address, peer.udp_port)
|
||||
self.peer_manager.report_failure(peer.address, peer.udp_port)
|
||||
parsed.found_compact_addresses.clear()
|
||||
return parsed
|
||||
self.discovered_peers[peer].update(decoded_peers)
|
||||
log.debug("probed %s:%i page %i, %i known", peer.address, peer.udp_port, page,
|
||||
already_known + len(parsed.found_compact_addresses))
|
||||
if len(self.discovered_peers[peer]) != already_known + len(parsed.found_compact_addresses):
|
||||
log.warning("misbehaving peer %s:%i returned duplicate peers for blob", peer.address, peer.udp_port)
|
||||
elif len(parsed.found_compact_addresses) >= constants.K and self.peer_pages[peer] < parsed.pages:
|
||||
# the peer returned a full page and indicates it has more
|
||||
self.peer_pages[peer] += 1
|
||||
if peer in self.contacted:
|
||||
# the peer must be removed from self.contacted so that it will be probed for the next page
|
||||
self.contacted.remove(peer)
|
||||
return parsed
|
||||
|
||||
def check_result_ready(self, response: FindValueResponse):
|
||||
if response.found:
|
||||
blob_peers = [decode_tcp_peer_from_compact_address(compact_addr)
|
||||
for compact_addr in response.found_compact_addresses]
|
||||
to_yield = []
|
||||
for blob_peer in blob_peers:
|
||||
if blob_peer not in self.blob_peers:
|
||||
self.blob_peers.add(blob_peer)
|
||||
to_yield.append(blob_peer)
|
||||
if to_yield:
|
||||
self.iteration_queue.put_nowait(to_yield)
|
||||
|
||||
def get_initial_result(self) -> typing.List['KademliaPeer']:
|
||||
if self.protocol.data_store.has_peers_for_blob(self.key):
|
||||
return self.protocol.data_store.get_peers_for_blob(self.key)
|
||||
return []
|
|
@ -1,5 +0,0 @@
|
|||
generate:
|
||||
python generate.py generate > __init__.py
|
||||
|
||||
analyze:
|
||||
python generate.py analyze
|
|
@ -1,95 +0,0 @@
|
|||
# Exceptions
|
||||
|
||||
Exceptions in LBRY are defined and generated from the Markdown table at the end of this README.
|
||||
|
||||
## Guidelines
|
||||
|
||||
When possible, use [built-in Python exceptions](https://docs.python.org/3/library/exceptions.html) or `aiohttp` [general client](https://docs.aiohttp.org/en/latest/client_reference.html#client-exceptions) / [HTTP](https://docs.aiohttp.org/en/latest/web_exceptions.html) exceptions, unless:
|
||||
1. You want to provide a better error message (extend the closest built-in/`aiohttp` exception in this case).
|
||||
2. You need to represent a new situation.
|
||||
|
||||
When defining your own exceptions, consider:
|
||||
1. Extending a built-in Python or `aiohttp` exception.
|
||||
2. Using contextual variables in the error message.
|
||||
|
||||
## Table Column Definitions
|
||||
|
||||
Column | Meaning
|
||||
---|---
|
||||
Code | Codes are used only to define the hierarchy of exceptions and do not end up in the generated output, it is okay to re-number things as necessary at anytime to achieve the desired hierarchy.
|
||||
Name | Becomes the class name of the exception with "Error" appended to the end. Changing names of existing exceptions makes the API backwards incompatible. When extending other exceptions you must specify the full class name, manually adding "Error" as necessary (if extending another SDK exception).
|
||||
Message | User friendly error message explaining the exceptional event. Supports Python formatted strings: any variables used in the string will be generated as arguments in the `__init__` method. Use `--` to provide a doc string after the error message to be added to the class definition.
|
||||
|
||||
## Exceptions Table
|
||||
|
||||
Code | Name | Message
|
||||
---:|---|---
|
||||
**1xx** | UserInput | User input errors.
|
||||
**10x** | Command | Errors preparing to execute commands.
|
||||
101 | CommandDoesNotExist | Command '{command}' does not exist.
|
||||
102 | CommandDeprecated | Command '{command}' is deprecated.
|
||||
103 | CommandInvalidArgument | Invalid argument '{argument}' to command '{command}'.
|
||||
104 | CommandTemporarilyUnavailable | Command '{command}' is temporarily unavailable. -- Such as waiting for required components to start.
|
||||
105 | CommandPermanentlyUnavailable | Command '{command}' is permanently unavailable. -- such as when required component was intentionally configured not to start.
|
||||
**11x** | InputValue(ValueError) | Invalid argument value provided to command.
|
||||
111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid.
|
||||
112 | InputValueIsNone | None or null is not valid value for argument '{argument}'.
|
||||
113 | ConflictingInputValue | Only '{first_argument}' or '{second_argument}' is allowed, not both.
|
||||
114 | InputStringIsBlank | {argument} cannot be blank.
|
||||
115 | EmptyPublishedFile | Cannot publish empty file: {file_path}
|
||||
116 | MissingPublishedFile | File does not exist: {file_path}
|
||||
117 | InvalidStreamURL | Invalid LBRY stream URL: '{url}' -- When an URL cannot be downloaded, such as '@Channel/' or a collection
|
||||
**2xx** | Configuration | Configuration errors.
|
||||
201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues.
|
||||
202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args.
|
||||
203 | ConfigParse | Failed to parse the configuration file '{path}'. -- Includes the syntax error / line number to help user fix it.
|
||||
204 | ConfigMissing | Configuration file '{path}' is missing setting that has no default / fallback.
|
||||
205 | ConfigInvalid | Configuration file '{path}' has setting with invalid value.
|
||||
**3xx** | Network | **Networking**
|
||||
301 | NoInternet | No internet connection.
|
||||
302 | NoUPnPSupport | Router does not support UPnP.
|
||||
**4xx** | Wallet | **Wallet Errors**
|
||||
401 | TransactionRejected | Transaction rejected, unknown reason.
|
||||
402 | TransactionFeeTooLow | Fee too low.
|
||||
403 | TransactionInvalidSignature | Invalid signature.
|
||||
404 | InsufficientFunds | Not enough funds to cover this transaction. -- determined by wallet prior to attempting to broadcast a tx; this is different for example from a TX being created and sent but then rejected by lbrycrd for unspendable utxos.
|
||||
405 | ChannelKeyNotFound | Channel signing key not found.
|
||||
406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key.
|
||||
407 | DataDownload | Failed to download blob. *generic*
|
||||
408 | PrivateKeyNotFound | Couldn't find private key for {key} '{value}'.
|
||||
410 | Resolve | Failed to resolve '{url}'.
|
||||
411 | ResolveTimeout | Failed to resolve '{url}' within the timeout.
|
||||
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{censor_id}'.
|
||||
420 | KeyFeeAboveMaxAllowed | {message}
|
||||
421 | InvalidPassword | Password is invalid.
|
||||
422 | IncompatibleWalletServer | '{server}:{port}' has an incompatibly old version.
|
||||
423 | TooManyClaimSearchParameters | {key} cant have more than {limit} items.
|
||||
424 | AlreadyPurchased | You already have a purchase for claim_id '{claim_id_hex}'. Use --allow-duplicate-purchase flag to override.
|
||||
431 | ServerPaymentInvalidAddress | Invalid address from wallet server: '{address}' - skipping payment round.
|
||||
432 | ServerPaymentWalletLocked | Cannot spend funds with locked wallet, skipping payment round.
|
||||
433 | ServerPaymentFeeAboveMaxAllowed | Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.
|
||||
434 | WalletNotLoaded | Wallet {wallet_id} is not loaded.
|
||||
435 | WalletAlreadyLoaded | Wallet {wallet_path} is already loaded.
|
||||
436 | WalletNotFound | Wallet not found at {wallet_path}.
|
||||
437 | WalletAlreadyExists | Wallet {wallet_path} already exists, use `wallet_add` to load it.
|
||||
**5xx** | Blob | **Blobs**
|
||||
500 | BlobNotFound | Blob not found.
|
||||
501 | BlobPermissionDenied | Permission denied to read blob.
|
||||
502 | BlobTooBig | Blob is too big.
|
||||
503 | BlobEmpty | Blob is empty.
|
||||
510 | BlobFailedDecryption | Failed to decrypt blob.
|
||||
511 | CorruptBlob | Blobs is corrupted.
|
||||
520 | BlobFailedEncryption | Failed to encrypt blob.
|
||||
531 | DownloadCancelled | Download was canceled.
|
||||
532 | DownloadSDTimeout | Failed to download sd blob {download} within timeout.
|
||||
533 | DownloadDataTimeout | Failed to download data blobs for sd hash {download} within timeout.
|
||||
534 | InvalidStreamDescriptor | {message}
|
||||
535 | InvalidData | {message}
|
||||
536 | InvalidBlobHash | {message}
|
||||
**6xx** | Component | **Components**
|
||||
601 | ComponentStartConditionNotMet | Unresolved dependencies for: {components}
|
||||
602 | ComponentsNotStarted | {message}
|
||||
**7xx** | CurrencyExchange | **Currency Exchange**
|
||||
701 | InvalidExchangeRateResponse | Failed to get exchange rate from {source}: {reason}
|
||||
702 | CurrencyConversion | {message}
|
||||
703 | InvalidCurrency | Invalid currency: {currency} is not a supported currency.
|
|
@ -1,494 +0,0 @@
|
|||
from .base import BaseError, claim_id
|
||||
|
||||
|
||||
class UserInputError(BaseError):
|
||||
"""
|
||||
User input errors.
|
||||
"""
|
||||
|
||||
|
||||
class CommandError(UserInputError):
|
||||
"""
|
||||
Errors preparing to execute commands.
|
||||
"""
|
||||
|
||||
|
||||
class CommandDoesNotExistError(CommandError):
|
||||
|
||||
def __init__(self, command):
|
||||
self.command = command
|
||||
super().__init__(f"Command '{command}' does not exist.")
|
||||
|
||||
|
||||
class CommandDeprecatedError(CommandError):
|
||||
|
||||
def __init__(self, command):
|
||||
self.command = command
|
||||
super().__init__(f"Command '{command}' is deprecated.")
|
||||
|
||||
|
||||
class CommandInvalidArgumentError(CommandError):
|
||||
|
||||
def __init__(self, argument, command):
|
||||
self.argument = argument
|
||||
self.command = command
|
||||
super().__init__(f"Invalid argument '{argument}' to command '{command}'.")
|
||||
|
||||
|
||||
class CommandTemporarilyUnavailableError(CommandError):
|
||||
"""
|
||||
Such as waiting for required components to start.
|
||||
"""
|
||||
|
||||
def __init__(self, command):
|
||||
self.command = command
|
||||
super().__init__(f"Command '{command}' is temporarily unavailable.")
|
||||
|
||||
|
||||
class CommandPermanentlyUnavailableError(CommandError):
|
||||
"""
|
||||
such as when required component was intentionally configured not to start.
|
||||
"""
|
||||
|
||||
def __init__(self, command):
|
||||
self.command = command
|
||||
super().__init__(f"Command '{command}' is permanently unavailable.")
|
||||
|
||||
|
||||
class InputValueError(UserInputError, ValueError):
|
||||
"""
|
||||
Invalid argument value provided to command.
|
||||
"""
|
||||
|
||||
|
||||
class GenericInputValueError(InputValueError):
|
||||
|
||||
def __init__(self, value, argument):
|
||||
self.value = value
|
||||
self.argument = argument
|
||||
super().__init__(f"The value '{value}' for argument '{argument}' is not valid.")
|
||||
|
||||
|
||||
class InputValueIsNoneError(InputValueError):
|
||||
|
||||
def __init__(self, argument):
|
||||
self.argument = argument
|
||||
super().__init__(f"None or null is not valid value for argument '{argument}'.")
|
||||
|
||||
|
||||
class ConflictingInputValueError(InputValueError):
|
||||
|
||||
def __init__(self, first_argument, second_argument):
|
||||
self.first_argument = first_argument
|
||||
self.second_argument = second_argument
|
||||
super().__init__(f"Only '{first_argument}' or '{second_argument}' is allowed, not both.")
|
||||
|
||||
|
||||
class InputStringIsBlankError(InputValueError):
|
||||
|
||||
def __init__(self, argument):
|
||||
self.argument = argument
|
||||
super().__init__(f"{argument} cannot be blank.")
|
||||
|
||||
|
||||
class EmptyPublishedFileError(InputValueError):
|
||||
|
||||
def __init__(self, file_path):
|
||||
self.file_path = file_path
|
||||
super().__init__(f"Cannot publish empty file: {file_path}")
|
||||
|
||||
|
||||
class MissingPublishedFileError(InputValueError):
|
||||
|
||||
def __init__(self, file_path):
|
||||
self.file_path = file_path
|
||||
super().__init__(f"File does not exist: {file_path}")
|
||||
|
||||
|
||||
class InvalidStreamURLError(InputValueError):
|
||||
"""
|
||||
When an URL cannot be downloaded, such as '@Channel/' or a collection
|
||||
"""
|
||||
|
||||
def __init__(self, url):
|
||||
self.url = url
|
||||
super().__init__(f"Invalid LBRY stream URL: '{url}'")
|
||||
|
||||
|
||||
class ConfigurationError(BaseError):
|
||||
"""
|
||||
Configuration errors.
|
||||
"""
|
||||
|
||||
|
||||
class ConfigWriteError(ConfigurationError):
|
||||
"""
|
||||
When writing the default config fails on startup, such as due to permission issues.
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
super().__init__(f"Cannot write configuration file '{path}'.")
|
||||
|
||||
|
||||
class ConfigReadError(ConfigurationError):
|
||||
"""
|
||||
Can't open the config file user provided via command line args.
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
super().__init__(f"Cannot find provided configuration file '{path}'.")
|
||||
|
||||
|
||||
class ConfigParseError(ConfigurationError):
|
||||
"""
|
||||
Includes the syntax error / line number to help user fix it.
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
super().__init__(f"Failed to parse the configuration file '{path}'.")
|
||||
|
||||
|
||||
class ConfigMissingError(ConfigurationError):
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
super().__init__(f"Configuration file '{path}' is missing setting that has no default / fallback.")
|
||||
|
||||
|
||||
class ConfigInvalidError(ConfigurationError):
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
super().__init__(f"Configuration file '{path}' has setting with invalid value.")
|
||||
|
||||
|
||||
class NetworkError(BaseError):
|
||||
"""
|
||||
**Networking**
|
||||
"""
|
||||
|
||||
|
||||
class NoInternetError(NetworkError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("No internet connection.")
|
||||
|
||||
|
||||
class NoUPnPSupportError(NetworkError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Router does not support UPnP.")
|
||||
|
||||
|
||||
class WalletError(BaseError):
|
||||
"""
|
||||
**Wallet Errors**
|
||||
"""
|
||||
|
||||
|
||||
class TransactionRejectedError(WalletError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Transaction rejected, unknown reason.")
|
||||
|
||||
|
||||
class TransactionFeeTooLowError(WalletError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Fee too low.")
|
||||
|
||||
|
||||
class TransactionInvalidSignatureError(WalletError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Invalid signature.")
|
||||
|
||||
|
||||
class InsufficientFundsError(WalletError):
|
||||
"""
|
||||
determined by wallet prior to attempting to broadcast a tx; this is different for example from a TX
|
||||
being created and sent but then rejected by lbrycrd for unspendable utxos.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Not enough funds to cover this transaction.")
|
||||
|
||||
|
||||
class ChannelKeyNotFoundError(WalletError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Channel signing key not found.")
|
||||
|
||||
|
||||
class ChannelKeyInvalidError(WalletError):
|
||||
"""
|
||||
For example, channel was updated but you don't have the updated key.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Channel signing key is out of date.")
|
||||
|
||||
|
||||
class DataDownloadError(WalletError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Failed to download blob. *generic*")
|
||||
|
||||
|
||||
class PrivateKeyNotFoundError(WalletError):
|
||||
|
||||
def __init__(self, key, value):
|
||||
self.key = key
|
||||
self.value = value
|
||||
super().__init__(f"Couldn't find private key for {key} '{value}'.")
|
||||
|
||||
|
||||
class ResolveError(WalletError):
|
||||
|
||||
def __init__(self, url):
|
||||
self.url = url
|
||||
super().__init__(f"Failed to resolve '{url}'.")
|
||||
|
||||
|
||||
class ResolveTimeoutError(WalletError):
|
||||
|
||||
def __init__(self, url):
|
||||
self.url = url
|
||||
super().__init__(f"Failed to resolve '{url}' within the timeout.")
|
||||
|
||||
|
||||
class ResolveCensoredError(WalletError):
|
||||
|
||||
def __init__(self, url, censor_id, censor_row):
|
||||
self.url = url
|
||||
self.censor_id = censor_id
|
||||
self.censor_row = censor_row
|
||||
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.")
|
||||
|
||||
|
||||
class KeyFeeAboveMaxAllowedError(WalletError):
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(f"{message}")
|
||||
|
||||
|
||||
class InvalidPasswordError(WalletError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Password is invalid.")
|
||||
|
||||
|
||||
class IncompatibleWalletServerError(WalletError):
|
||||
|
||||
def __init__(self, server, port):
|
||||
self.server = server
|
||||
self.port = port
|
||||
super().__init__(f"'{server}:{port}' has an incompatibly old version.")
|
||||
|
||||
|
||||
class TooManyClaimSearchParametersError(WalletError):
|
||||
|
||||
def __init__(self, key, limit):
|
||||
self.key = key
|
||||
self.limit = limit
|
||||
super().__init__(f"{key} cant have more than {limit} items.")
|
||||
|
||||
|
||||
class AlreadyPurchasedError(WalletError):
|
||||
"""
|
||||
allow-duplicate-purchase flag to override.
|
||||
"""
|
||||
|
||||
def __init__(self, claim_id_hex):
|
||||
self.claim_id_hex = claim_id_hex
|
||||
super().__init__(f"You already have a purchase for claim_id '{claim_id_hex}'. Use")
|
||||
|
||||
|
||||
class ServerPaymentInvalidAddressError(WalletError):
|
||||
|
||||
def __init__(self, address):
|
||||
self.address = address
|
||||
super().__init__(f"Invalid address from wallet server: '{address}' - skipping payment round.")
|
||||
|
||||
|
||||
class ServerPaymentWalletLockedError(WalletError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Cannot spend funds with locked wallet, skipping payment round.")
|
||||
|
||||
|
||||
class ServerPaymentFeeAboveMaxAllowedError(WalletError):
|
||||
|
||||
def __init__(self, daily_fee, max_fee):
|
||||
self.daily_fee = daily_fee
|
||||
self.max_fee = max_fee
|
||||
super().__init__(f"Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.")
|
||||
|
||||
|
||||
class WalletNotLoadedError(WalletError):
|
||||
|
||||
def __init__(self, wallet_id):
|
||||
self.wallet_id = wallet_id
|
||||
super().__init__(f"Wallet {wallet_id} is not loaded.")
|
||||
|
||||
|
||||
class WalletAlreadyLoadedError(WalletError):
|
||||
|
||||
def __init__(self, wallet_path):
|
||||
self.wallet_path = wallet_path
|
||||
super().__init__(f"Wallet {wallet_path} is already loaded.")
|
||||
|
||||
|
||||
class WalletNotFoundError(WalletError):
|
||||
|
||||
def __init__(self, wallet_path):
|
||||
self.wallet_path = wallet_path
|
||||
super().__init__(f"Wallet not found at {wallet_path}.")
|
||||
|
||||
|
||||
class WalletAlreadyExistsError(WalletError):
|
||||
|
||||
def __init__(self, wallet_path):
|
||||
self.wallet_path = wallet_path
|
||||
super().__init__(f"Wallet {wallet_path} already exists, use `wallet_add` to load it.")
|
||||
|
||||
|
||||
class BlobError(BaseError):
|
||||
"""
|
||||
**Blobs**
|
||||
"""
|
||||
|
||||
|
||||
class BlobNotFoundError(BlobError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Blob not found.")
|
||||
|
||||
|
||||
class BlobPermissionDeniedError(BlobError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Permission denied to read blob.")
|
||||
|
||||
|
||||
class BlobTooBigError(BlobError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Blob is too big.")
|
||||
|
||||
|
||||
class BlobEmptyError(BlobError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Blob is empty.")
|
||||
|
||||
|
||||
class BlobFailedDecryptionError(BlobError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Failed to decrypt blob.")
|
||||
|
||||
|
||||
class CorruptBlobError(BlobError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Blobs is corrupted.")
|
||||
|
||||
|
||||
class BlobFailedEncryptionError(BlobError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Failed to encrypt blob.")
|
||||
|
||||
|
||||
class DownloadCancelledError(BlobError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Download was canceled.")
|
||||
|
||||
|
||||
class DownloadSDTimeoutError(BlobError):
|
||||
|
||||
def __init__(self, download):
|
||||
self.download = download
|
||||
super().__init__(f"Failed to download sd blob {download} within timeout.")
|
||||
|
||||
|
||||
class DownloadDataTimeoutError(BlobError):
|
||||
|
||||
def __init__(self, download):
|
||||
self.download = download
|
||||
super().__init__(f"Failed to download data blobs for sd hash {download} within timeout.")
|
||||
|
||||
|
||||
class InvalidStreamDescriptorError(BlobError):
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(f"{message}")
|
||||
|
||||
|
||||
class InvalidDataError(BlobError):
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(f"{message}")
|
||||
|
||||
|
||||
class InvalidBlobHashError(BlobError):
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(f"{message}")
|
||||
|
||||
|
||||
class ComponentError(BaseError):
|
||||
"""
|
||||
**Components**
|
||||
"""
|
||||
|
||||
|
||||
class ComponentStartConditionNotMetError(ComponentError):
|
||||
|
||||
def __init__(self, components):
|
||||
self.components = components
|
||||
super().__init__(f"Unresolved dependencies for: {components}")
|
||||
|
||||
|
||||
class ComponentsNotStartedError(ComponentError):
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(f"{message}")
|
||||
|
||||
|
||||
class CurrencyExchangeError(BaseError):
|
||||
"""
|
||||
**Currency Exchange**
|
||||
"""
|
||||
|
||||
|
||||
class InvalidExchangeRateResponseError(CurrencyExchangeError):
|
||||
|
||||
def __init__(self, source, reason):
|
||||
self.source = source
|
||||
self.reason = reason
|
||||
super().__init__(f"Failed to get exchange rate from {source}: {reason}")
|
||||
|
||||
|
||||
class CurrencyConversionError(CurrencyExchangeError):
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(f"{message}")
|
||||
|
||||
|
||||
class InvalidCurrencyError(CurrencyExchangeError):
|
||||
|
||||
def __init__(self, currency):
|
||||
self.currency = currency
|
||||
super().__init__(f"Invalid currency: {currency} is not a supported currency.")
|
|
@ -1,9 +0,0 @@
|
|||
from binascii import hexlify
|
||||
|
||||
|
||||
def claim_id(claim_hash):
|
||||
return hexlify(claim_hash[::-1]).decode()
|
||||
|
||||
|
||||
class BaseError(Exception):
|
||||
pass
|
|
@ -1,167 +0,0 @@
|
|||
import re
|
||||
import sys
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from textwrap import fill, indent
|
||||
|
||||
|
||||
INDENT = ' ' * 4
|
||||
|
||||
CLASS = """
|
||||
|
||||
class {name}({parents}):{doc}
|
||||
"""
|
||||
|
||||
INIT = """
|
||||
def __init__({args}):{fields}
|
||||
super().__init__({format}"{message}")
|
||||
"""
|
||||
|
||||
FUNCTIONS = ['claim_id']
|
||||
|
||||
|
||||
class ErrorClass:
|
||||
|
||||
def __init__(self, hierarchy, name, message):
|
||||
self.hierarchy = hierarchy.replace('**', '')
|
||||
self.other_parents = []
|
||||
if '(' in name:
|
||||
assert ')' in name, f"Missing closing parenthesis in '{name}'."
|
||||
self.other_parents = name[name.find('(')+1:name.find(')')].split(',')
|
||||
name = name[:name.find('(')]
|
||||
self.name = name
|
||||
self.class_name = name+'Error'
|
||||
self.message = message
|
||||
self.comment = ""
|
||||
if '--' in message:
|
||||
self.message, self.comment = message.split('--')
|
||||
self.message = self.message.strip()
|
||||
self.comment = self.comment.strip()
|
||||
|
||||
@property
|
||||
def is_leaf(self):
|
||||
return 'x' not in self.hierarchy
|
||||
|
||||
@property
|
||||
def code(self):
|
||||
return self.hierarchy.replace('x', '')
|
||||
|
||||
@property
|
||||
def parent_codes(self):
|
||||
return self.hierarchy[0:2], self.hierarchy[0]
|
||||
|
||||
def get_arguments(self):
|
||||
args = ['self']
|
||||
for arg in re.findall('{([a-z0-1_()]+)}', self.message):
|
||||
for func in FUNCTIONS:
|
||||
if arg.startswith(f'{func}('):
|
||||
arg = arg[len(f'{func}('):-1]
|
||||
break
|
||||
args.append(arg)
|
||||
return args
|
||||
|
||||
@staticmethod
|
||||
def get_fields(args):
|
||||
if len(args) > 1:
|
||||
return ''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
|
||||
return ''
|
||||
|
||||
@staticmethod
|
||||
def get_doc_string(doc):
|
||||
if doc:
|
||||
return f'\n{INDENT}"""\n{indent(fill(doc, 100), INDENT)}\n{INDENT}"""'
|
||||
return ""
|
||||
|
||||
def render(self, out, parent):
|
||||
if not parent:
|
||||
parents = ['BaseError']
|
||||
else:
|
||||
parents = [parent.class_name]
|
||||
parents += self.other_parents
|
||||
args = self.get_arguments()
|
||||
if self.is_leaf:
|
||||
out.write((CLASS + INIT).format(
|
||||
name=self.class_name, parents=', '.join(parents),
|
||||
args=', '.join(args), fields=self.get_fields(args),
|
||||
message=self.message, doc=self.get_doc_string(self.comment), format='f' if len(args) > 1 else ''
|
||||
))
|
||||
else:
|
||||
out.write(CLASS.format(
|
||||
name=self.class_name, parents=', '.join(parents),
|
||||
doc=self.get_doc_string(self.comment or self.message)
|
||||
))
|
||||
|
||||
|
||||
def get_errors():
|
||||
with open('README.md', 'r') as readme:
|
||||
lines = iter(readme.readlines())
|
||||
for line in lines:
|
||||
if line.startswith('## Exceptions Table'):
|
||||
break
|
||||
for line in lines:
|
||||
if line.startswith('---:|'):
|
||||
break
|
||||
for line in lines:
|
||||
if not line:
|
||||
break
|
||||
yield ErrorClass(*[c.strip() for c in line.split('|')])
|
||||
|
||||
|
||||
def find_parent(stack, child):
|
||||
for parent_code in child.parent_codes:
|
||||
parent = stack.get(parent_code)
|
||||
if parent:
|
||||
return parent
|
||||
|
||||
|
||||
def generate(out):
|
||||
out.write(f"from .base import BaseError, {', '.join(FUNCTIONS)}\n")
|
||||
stack = {}
|
||||
for error in get_errors():
|
||||
error.render(out, find_parent(stack, error))
|
||||
if not error.is_leaf:
|
||||
assert error.code not in stack, f"Duplicate code: {error.code}"
|
||||
stack[error.code] = error
|
||||
|
||||
|
||||
def analyze():
|
||||
errors = {e.class_name: [] for e in get_errors() if e.is_leaf}
|
||||
here = Path(__file__).absolute().parents[0]
|
||||
module = here.parent
|
||||
for file_path in module.glob('**/*.py'):
|
||||
if here in file_path.parents:
|
||||
continue
|
||||
with open(file_path) as src_file:
|
||||
src = src_file.read()
|
||||
for error in errors.keys():
|
||||
found = src.count(error)
|
||||
if found > 0:
|
||||
errors[error].append((file_path, found))
|
||||
|
||||
print('Unused Errors:\n')
|
||||
for error, used in errors.items():
|
||||
if used:
|
||||
print(f' - {error}')
|
||||
for use in used:
|
||||
print(f' {use[0].relative_to(module.parent)} {use[1]}')
|
||||
print('')
|
||||
|
||||
print('')
|
||||
print('Unused Errors:')
|
||||
for error, used in errors.items():
|
||||
if not used:
|
||||
print(f' - {error}')
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("action", choices=['generate', 'analyze'])
|
||||
args = parser.parse_args()
|
||||
if args.action == "analyze":
|
||||
analyze()
|
||||
elif args.action == "generate":
|
||||
generate(sys.stdout)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,750 +0,0 @@
|
|||
import math
|
||||
import os
|
||||
import asyncio
|
||||
import logging
|
||||
import binascii
|
||||
import typing
|
||||
|
||||
import base58
|
||||
|
||||
from aioupnp import __version__ as aioupnp_version
|
||||
from aioupnp.upnp import UPnP
|
||||
from aioupnp.fault import UPnPError
|
||||
|
||||
from lbry import utils
|
||||
from lbry.dht.node import Node
|
||||
from lbry.dht.peer import is_valid_public_ipv4
|
||||
from lbry.dht.blob_announcer import BlobAnnouncer
|
||||
from lbry.blob.blob_manager import BlobManager
|
||||
from lbry.blob.disk_space_manager import DiskSpaceManager
|
||||
from lbry.blob_exchange.server import BlobServer
|
||||
from lbry.stream.background_downloader import BackgroundDownloader
|
||||
from lbry.stream.stream_manager import StreamManager
|
||||
from lbry.file.file_manager import FileManager
|
||||
from lbry.extras.daemon.component import Component
|
||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
from lbry.torrent.torrent_manager import TorrentManager
|
||||
from lbry.wallet import WalletManager
|
||||
from lbry.wallet.usage_payment import WalletServerPayer
|
||||
from lbry.torrent.tracker import TrackerClient
|
||||
from lbry.torrent.session import TorrentSession
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# settings must be initialized before this file is imported
|
||||
|
||||
DATABASE_COMPONENT = "database"
|
||||
BLOB_COMPONENT = "blob_manager"
|
||||
WALLET_COMPONENT = "wallet"
|
||||
WALLET_SERVER_PAYMENTS_COMPONENT = "wallet_server_payments"
|
||||
DHT_COMPONENT = "dht"
|
||||
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
|
||||
FILE_MANAGER_COMPONENT = "file_manager"
|
||||
DISK_SPACE_COMPONENT = "disk_space"
|
||||
BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
|
||||
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
|
||||
UPNP_COMPONENT = "upnp"
|
||||
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
|
||||
TRACKER_ANNOUNCER_COMPONENT = "tracker_announcer_component"
|
||||
LIBTORRENT_COMPONENT = "libtorrent_component"
|
||||
|
||||
|
||||
class DatabaseComponent(Component):
|
||||
component_name = DATABASE_COMPONENT
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.storage = None
|
||||
|
||||
@property
|
||||
def component(self):
|
||||
return self.storage
|
||||
|
||||
@staticmethod
|
||||
def get_current_db_revision():
|
||||
return 15
|
||||
|
||||
@property
|
||||
def revision_filename(self):
|
||||
return os.path.join(self.conf.data_dir, 'db_revision')
|
||||
|
||||
def _write_db_revision_file(self, version_num):
|
||||
with open(self.revision_filename, mode='w') as db_revision:
|
||||
db_revision.write(str(version_num))
|
||||
|
||||
async def start(self):
|
||||
# check directories exist, create them if they don't
|
||||
log.info("Loading databases")
|
||||
|
||||
if not os.path.exists(self.revision_filename):
|
||||
log.info("db_revision file not found. Creating it")
|
||||
self._write_db_revision_file(self.get_current_db_revision())
|
||||
|
||||
# check the db migration and run any needed migrations
|
||||
with open(self.revision_filename, "r") as revision_read_handle:
|
||||
old_revision = int(revision_read_handle.read().strip())
|
||||
|
||||
if old_revision > self.get_current_db_revision():
|
||||
raise Exception('This version of lbrynet is not compatible with the database\n'
|
||||
'Your database is revision %i, expected %i' %
|
||||
(old_revision, self.get_current_db_revision()))
|
||||
if old_revision < self.get_current_db_revision():
|
||||
from lbry.extras.daemon.migrator import dbmigrator # pylint: disable=import-outside-toplevel
|
||||
log.info("Upgrading your databases (revision %i to %i)", old_revision, self.get_current_db_revision())
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
None, dbmigrator.migrate_db, self.conf, old_revision, self.get_current_db_revision()
|
||||
)
|
||||
self._write_db_revision_file(self.get_current_db_revision())
|
||||
log.info("Finished upgrading the databases.")
|
||||
|
||||
self.storage = SQLiteStorage(
|
||||
self.conf, os.path.join(self.conf.data_dir, "lbrynet.sqlite")
|
||||
)
|
||||
await self.storage.open()
|
||||
|
||||
async def stop(self):
|
||||
await self.storage.close()
|
||||
self.storage = None
|
||||
|
||||
|
||||
class WalletComponent(Component):
|
||||
component_name = WALLET_COMPONENT
|
||||
depends_on = [DATABASE_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.wallet_manager = None
|
||||
|
||||
@property
|
||||
def component(self):
|
||||
return self.wallet_manager
|
||||
|
||||
async def get_status(self):
|
||||
if self.wallet_manager is None:
|
||||
return
|
||||
is_connected = self.wallet_manager.ledger.network.is_connected
|
||||
sessions = []
|
||||
connected = None
|
||||
if is_connected:
|
||||
addr, port = self.wallet_manager.ledger.network.client.server
|
||||
connected = f"{addr}:{port}"
|
||||
sessions.append(self.wallet_manager.ledger.network.client)
|
||||
|
||||
result = {
|
||||
'connected': connected,
|
||||
'connected_features': self.wallet_manager.ledger.network.server_features,
|
||||
'servers': [
|
||||
{
|
||||
'host': session.server[0],
|
||||
'port': session.server[1],
|
||||
'latency': session.connection_latency,
|
||||
'availability': session.available,
|
||||
} for session in sessions
|
||||
],
|
||||
'known_servers': len(self.wallet_manager.ledger.network.known_hubs),
|
||||
'available_servers': 1 if is_connected else 0
|
||||
}
|
||||
|
||||
if self.wallet_manager.ledger.network.remote_height:
|
||||
local_height = self.wallet_manager.ledger.local_height_including_downloaded_height
|
||||
disk_height = len(self.wallet_manager.ledger.headers)
|
||||
remote_height = self.wallet_manager.ledger.network.remote_height
|
||||
download_height, target_height = local_height - disk_height, remote_height - disk_height
|
||||
if target_height > 0:
|
||||
progress = min(max(math.ceil(float(download_height) / float(target_height) * 100), 0), 100)
|
||||
else:
|
||||
progress = 100
|
||||
best_hash = await self.wallet_manager.get_best_blockhash()
|
||||
result.update({
|
||||
'headers_synchronization_progress': progress,
|
||||
'blocks': max(local_height, 0),
|
||||
'blocks_behind': max(remote_height - local_height, 0),
|
||||
'best_blockhash': best_hash,
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
async def start(self):
|
||||
log.info("Starting wallet")
|
||||
self.wallet_manager = await WalletManager.from_lbrynet_config(self.conf)
|
||||
await self.wallet_manager.start()
|
||||
|
||||
async def stop(self):
|
||||
await self.wallet_manager.stop()
|
||||
self.wallet_manager = None
|
||||
|
||||
|
||||
class WalletServerPaymentsComponent(Component):
|
||||
component_name = WALLET_SERVER_PAYMENTS_COMPONENT
|
||||
depends_on = [WALLET_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.usage_payment_service = WalletServerPayer(
|
||||
max_fee=self.conf.max_wallet_server_fee, analytics_manager=self.component_manager.analytics_manager,
|
||||
)
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[WalletServerPayer]:
|
||||
return self.usage_payment_service
|
||||
|
||||
async def start(self):
|
||||
wallet_manager = self.component_manager.get_component(WALLET_COMPONENT)
|
||||
await self.usage_payment_service.start(wallet_manager.ledger, wallet_manager.default_wallet)
|
||||
|
||||
async def stop(self):
|
||||
await self.usage_payment_service.stop()
|
||||
|
||||
async def get_status(self):
|
||||
return {
|
||||
'max_fee': self.usage_payment_service.max_fee,
|
||||
'running': self.usage_payment_service.running
|
||||
}
|
||||
|
||||
|
||||
class BlobComponent(Component):
|
||||
component_name = BLOB_COMPONENT
|
||||
depends_on = [DATABASE_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.blob_manager: typing.Optional[BlobManager] = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[BlobManager]:
|
||||
return self.blob_manager
|
||||
|
||||
async def start(self):
|
||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
data_store = None
|
||||
if DHT_COMPONENT not in self.component_manager.skip_components:
|
||||
dht_node: Node = self.component_manager.get_component(DHT_COMPONENT)
|
||||
if dht_node:
|
||||
data_store = dht_node.protocol.data_store
|
||||
blob_dir = os.path.join(self.conf.data_dir, 'blobfiles')
|
||||
if not os.path.isdir(blob_dir):
|
||||
os.mkdir(blob_dir)
|
||||
self.blob_manager = BlobManager(self.component_manager.loop, blob_dir, storage, self.conf, data_store)
|
||||
return await self.blob_manager.setup()
|
||||
|
||||
async def stop(self):
|
||||
self.blob_manager.stop()
|
||||
|
||||
async def get_status(self):
|
||||
count = 0
|
||||
if self.blob_manager:
|
||||
count = len(self.blob_manager.completed_blob_hashes)
|
||||
return {
|
||||
'finished_blobs': count,
|
||||
'connections': {} if not self.blob_manager else self.blob_manager.connection_manager.status
|
||||
}
|
||||
|
||||
|
||||
class DHTComponent(Component):
|
||||
component_name = DHT_COMPONENT
|
||||
depends_on = [UPNP_COMPONENT, DATABASE_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.dht_node: typing.Optional[Node] = None
|
||||
self.external_udp_port = None
|
||||
self.external_peer_port = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[Node]:
|
||||
return self.dht_node
|
||||
|
||||
async def get_status(self):
|
||||
return {
|
||||
'node_id': None if not self.dht_node else binascii.hexlify(self.dht_node.protocol.node_id),
|
||||
'peers_in_routing_table': 0 if not self.dht_node else len(self.dht_node.protocol.routing_table.get_peers())
|
||||
}
|
||||
|
||||
def get_node_id(self):
|
||||
node_id_filename = os.path.join(self.conf.data_dir, "node_id")
|
||||
if os.path.isfile(node_id_filename):
|
||||
with open(node_id_filename, "r") as node_id_file:
|
||||
return base58.b58decode(str(node_id_file.read()).strip())
|
||||
node_id = utils.generate_id()
|
||||
with open(node_id_filename, "w") as node_id_file:
|
||||
node_id_file.write(base58.b58encode(node_id).decode())
|
||||
return node_id
|
||||
|
||||
async def start(self):
|
||||
log.info("start the dht")
|
||||
upnp_component = self.component_manager.get_component(UPNP_COMPONENT)
|
||||
self.external_peer_port = upnp_component.upnp_redirects.get("TCP", self.conf.tcp_port)
|
||||
self.external_udp_port = upnp_component.upnp_redirects.get("UDP", self.conf.udp_port)
|
||||
external_ip = upnp_component.external_ip
|
||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
if not external_ip:
|
||||
external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||
if not external_ip:
|
||||
log.warning("failed to get external ip")
|
||||
|
||||
self.dht_node = Node(
|
||||
self.component_manager.loop,
|
||||
self.component_manager.peer_manager,
|
||||
node_id=self.get_node_id(),
|
||||
internal_udp_port=self.conf.udp_port,
|
||||
udp_port=self.external_udp_port,
|
||||
external_ip=external_ip,
|
||||
peer_port=self.external_peer_port,
|
||||
rpc_timeout=self.conf.node_rpc_timeout,
|
||||
split_buckets_under_index=self.conf.split_buckets_under_index,
|
||||
is_bootstrap_node=self.conf.is_bootstrap_node,
|
||||
storage=storage
|
||||
)
|
||||
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
|
||||
log.info("Started the dht")
|
||||
|
||||
async def stop(self):
|
||||
self.dht_node.stop()
|
||||
|
||||
|
||||
class HashAnnouncerComponent(Component):
|
||||
component_name = HASH_ANNOUNCER_COMPONENT
|
||||
depends_on = [DHT_COMPONENT, DATABASE_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.hash_announcer: typing.Optional[BlobAnnouncer] = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[BlobAnnouncer]:
|
||||
return self.hash_announcer
|
||||
|
||||
async def start(self):
|
||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
dht_node = self.component_manager.get_component(DHT_COMPONENT)
|
||||
self.hash_announcer = BlobAnnouncer(self.component_manager.loop, dht_node, storage)
|
||||
self.hash_announcer.start(self.conf.concurrent_blob_announcers)
|
||||
log.info("Started blob announcer")
|
||||
|
||||
async def stop(self):
|
||||
self.hash_announcer.stop()
|
||||
log.info("Stopped blob announcer")
|
||||
|
||||
async def get_status(self):
|
||||
return {
|
||||
'announce_queue_size': 0 if not self.hash_announcer else len(self.hash_announcer.announce_queue)
|
||||
}
|
||||
|
||||
|
||||
class FileManagerComponent(Component):
|
||||
component_name = FILE_MANAGER_COMPONENT
|
||||
depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.file_manager: typing.Optional[FileManager] = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[FileManager]:
|
||||
return self.file_manager
|
||||
|
||||
async def get_status(self):
|
||||
if not self.file_manager:
|
||||
return
|
||||
return {
|
||||
'managed_files': len(self.file_manager.get_filtered()),
|
||||
}
|
||||
|
||||
async def start(self):
|
||||
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
wallet = self.component_manager.get_component(WALLET_COMPONENT)
|
||||
node = self.component_manager.get_component(DHT_COMPONENT) \
|
||||
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||
log.info('Starting the file manager')
|
||||
loop = asyncio.get_event_loop()
|
||||
self.file_manager = FileManager(
|
||||
loop, self.conf, wallet, storage, self.component_manager.analytics_manager
|
||||
)
|
||||
self.file_manager.source_managers['stream'] = StreamManager(
|
||||
loop, self.conf, blob_manager, wallet, storage, node,
|
||||
)
|
||||
if self.component_manager.has_component(LIBTORRENT_COMPONENT):
|
||||
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
|
||||
self.file_manager.source_managers['torrent'] = TorrentManager(
|
||||
loop, self.conf, torrent, storage, self.component_manager.analytics_manager
|
||||
)
|
||||
await self.file_manager.start()
|
||||
log.info('Done setting up file manager')
|
||||
|
||||
async def stop(self):
|
||||
await self.file_manager.stop()
|
||||
|
||||
|
||||
class BackgroundDownloaderComponent(Component):
|
||||
MIN_PREFIX_COLLIDING_BITS = 8
|
||||
component_name = BACKGROUND_DOWNLOADER_COMPONENT
|
||||
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.background_task: typing.Optional[asyncio.Task] = None
|
||||
self.download_loop_delay_seconds = 60
|
||||
self.ongoing_download: typing.Optional[asyncio.Task] = None
|
||||
self.space_manager: typing.Optional[DiskSpaceManager] = None
|
||||
self.blob_manager: typing.Optional[BlobManager] = None
|
||||
self.background_downloader: typing.Optional[BackgroundDownloader] = None
|
||||
self.dht_node: typing.Optional[Node] = None
|
||||
self.space_available: typing.Optional[int] = None
|
||||
|
||||
@property
|
||||
def is_busy(self):
|
||||
return bool(self.ongoing_download and not self.ongoing_download.done())
|
||||
|
||||
@property
|
||||
def component(self) -> 'BackgroundDownloaderComponent':
|
||||
return self
|
||||
|
||||
async def get_status(self):
|
||||
return {'running': self.background_task is not None and not self.background_task.done(),
|
||||
'available_free_space_mb': self.space_available,
|
||||
'ongoing_download': self.is_busy}
|
||||
|
||||
async def download_blobs_in_background(self):
|
||||
while True:
|
||||
self.space_available = await self.space_manager.get_free_space_mb(True)
|
||||
if not self.is_busy and self.space_available > 10:
|
||||
self._download_next_close_blob_hash()
|
||||
await asyncio.sleep(self.download_loop_delay_seconds)
|
||||
|
||||
def _download_next_close_blob_hash(self):
|
||||
node_id = self.dht_node.protocol.node_id
|
||||
for blob_hash in self.dht_node.stored_blob_hashes:
|
||||
if blob_hash.hex() in self.blob_manager.completed_blob_hashes:
|
||||
continue
|
||||
if utils.get_colliding_prefix_bits(node_id, blob_hash) >= self.MIN_PREFIX_COLLIDING_BITS:
|
||||
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash.hex()))
|
||||
return
|
||||
|
||||
async def start(self):
|
||||
self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT)
|
||||
if not self.component_manager.has_component(DHT_COMPONENT):
|
||||
return
|
||||
self.dht_node = self.component_manager.get_component(DHT_COMPONENT)
|
||||
self.blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
self.background_downloader = BackgroundDownloader(self.conf, storage, self.blob_manager, self.dht_node)
|
||||
self.background_task = asyncio.create_task(self.download_blobs_in_background())
|
||||
|
||||
async def stop(self):
|
||||
if self.ongoing_download and not self.ongoing_download.done():
|
||||
self.ongoing_download.cancel()
|
||||
if self.background_task:
|
||||
self.background_task.cancel()
|
||||
|
||||
|
||||
class DiskSpaceComponent(Component):
|
||||
component_name = DISK_SPACE_COMPONENT
|
||||
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.disk_space_manager: typing.Optional[DiskSpaceManager] = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[DiskSpaceManager]:
|
||||
return self.disk_space_manager
|
||||
|
||||
async def get_status(self):
|
||||
if self.disk_space_manager:
|
||||
space_used = await self.disk_space_manager.get_space_used_mb(cached=True)
|
||||
return {
|
||||
'total_used_mb': space_used['total'],
|
||||
'published_blobs_storage_used_mb': space_used['private_storage'],
|
||||
'content_blobs_storage_used_mb': space_used['content_storage'],
|
||||
'seed_blobs_storage_used_mb': space_used['network_storage'],
|
||||
'running': self.disk_space_manager.running,
|
||||
}
|
||||
return {'space_used': '0', 'network_seeding_space_used': '0', 'running': False}
|
||||
|
||||
async def start(self):
|
||||
db = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||
self.disk_space_manager = DiskSpaceManager(
|
||||
self.conf, db, blob_manager,
|
||||
analytics=self.component_manager.analytics_manager
|
||||
)
|
||||
await self.disk_space_manager.start()
|
||||
|
||||
async def stop(self):
|
||||
await self.disk_space_manager.stop()
|
||||
|
||||
|
||||
class TorrentComponent(Component):
|
||||
component_name = LIBTORRENT_COMPONENT
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.torrent_session = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[TorrentSession]:
|
||||
return self.torrent_session
|
||||
|
||||
async def get_status(self):
|
||||
if not self.torrent_session:
|
||||
return
|
||||
return {
|
||||
'running': True, # TODO: what to return here?
|
||||
}
|
||||
|
||||
async def start(self):
|
||||
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
|
||||
await self.torrent_session.bind() # TODO: specify host/port
|
||||
|
||||
async def stop(self):
|
||||
if self.torrent_session:
|
||||
await self.torrent_session.pause()
|
||||
|
||||
|
||||
class PeerProtocolServerComponent(Component):
|
||||
component_name = PEER_PROTOCOL_SERVER_COMPONENT
|
||||
depends_on = [UPNP_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.blob_server: typing.Optional[BlobServer] = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[BlobServer]:
|
||||
return self.blob_server
|
||||
|
||||
async def start(self):
|
||||
log.info("start blob server")
|
||||
blob_manager: BlobManager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||
wallet: WalletManager = self.component_manager.get_component(WALLET_COMPONENT)
|
||||
peer_port = self.conf.tcp_port
|
||||
address = await wallet.get_unused_address()
|
||||
self.blob_server = BlobServer(asyncio.get_event_loop(), blob_manager, address)
|
||||
self.blob_server.start_server(peer_port, interface=self.conf.network_interface)
|
||||
await self.blob_server.started_listening.wait()
|
||||
|
||||
async def stop(self):
|
||||
if self.blob_server:
|
||||
self.blob_server.stop_server()
|
||||
|
||||
|
||||
class UPnPComponent(Component):
|
||||
component_name = UPNP_COMPONENT
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self._int_peer_port = self.conf.tcp_port
|
||||
self._int_dht_node_port = self.conf.udp_port
|
||||
self.use_upnp = self.conf.use_upnp
|
||||
self.upnp: typing.Optional[UPnP] = None
|
||||
self.upnp_redirects = {}
|
||||
self.external_ip: typing.Optional[str] = None
|
||||
self._maintain_redirects_task = None
|
||||
|
||||
@property
|
||||
def component(self) -> 'UPnPComponent':
|
||||
return self
|
||||
|
||||
async def _repeatedly_maintain_redirects(self, now=True):
|
||||
while True:
|
||||
if now:
|
||||
await self._maintain_redirects()
|
||||
await asyncio.sleep(360)
|
||||
|
||||
async def _maintain_redirects(self):
|
||||
# setup the gateway if necessary
|
||||
if not self.upnp:
|
||||
try:
|
||||
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
|
||||
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
||||
except Exception as err:
|
||||
log.warning("upnp discovery failed: %s", err)
|
||||
self.upnp = None
|
||||
|
||||
# update the external ip
|
||||
external_ip = None
|
||||
if self.upnp:
|
||||
try:
|
||||
external_ip = await self.upnp.get_external_ip()
|
||||
if external_ip != "0.0.0.0" and not self.external_ip:
|
||||
log.info("got external ip from UPnP: %s", external_ip)
|
||||
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
||||
pass
|
||||
if external_ip and not is_valid_public_ipv4(external_ip):
|
||||
log.warning("UPnP returned a private/reserved ip - %s, checking lbry.com fallback", external_ip)
|
||||
external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||
if self.external_ip and self.external_ip != external_ip:
|
||||
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
|
||||
if external_ip:
|
||||
self.external_ip = external_ip
|
||||
dht_component = self.component_manager.get_component(DHT_COMPONENT)
|
||||
if dht_component:
|
||||
dht_node = dht_component.component
|
||||
dht_node.protocol.external_ip = external_ip
|
||||
# assert self.external_ip is not None # TODO: handle going/starting offline
|
||||
|
||||
if not self.upnp_redirects and self.upnp: # setup missing redirects
|
||||
log.info("add UPnP port mappings")
|
||||
upnp_redirects = {}
|
||||
if PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
||||
try:
|
||||
upnp_redirects["TCP"] = await self.upnp.get_next_mapping(
|
||||
self._int_peer_port, "TCP", "LBRY peer port", self._int_peer_port
|
||||
)
|
||||
except (UPnPError, asyncio.TimeoutError, NotImplementedError):
|
||||
pass
|
||||
if DHT_COMPONENT not in self.component_manager.skip_components:
|
||||
try:
|
||||
upnp_redirects["UDP"] = await self.upnp.get_next_mapping(
|
||||
self._int_dht_node_port, "UDP", "LBRY DHT port", self._int_dht_node_port
|
||||
)
|
||||
except (UPnPError, asyncio.TimeoutError, NotImplementedError):
|
||||
pass
|
||||
if upnp_redirects:
|
||||
log.info("set up redirects: %s", upnp_redirects)
|
||||
self.upnp_redirects.update(upnp_redirects)
|
||||
elif self.upnp: # check existing redirects are still active
|
||||
found = set()
|
||||
mappings = await self.upnp.get_redirects()
|
||||
for mapping in mappings:
|
||||
proto = mapping.protocol
|
||||
if proto in self.upnp_redirects and mapping.external_port == self.upnp_redirects[proto]:
|
||||
if mapping.lan_address == self.upnp.lan_address:
|
||||
found.add(proto)
|
||||
if 'UDP' not in found and DHT_COMPONENT not in self.component_manager.skip_components:
|
||||
try:
|
||||
udp_port = await self.upnp.get_next_mapping(self._int_dht_node_port, "UDP", "LBRY DHT port")
|
||||
self.upnp_redirects['UDP'] = udp_port
|
||||
log.info("refreshed upnp redirect for dht port: %i", udp_port)
|
||||
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
||||
del self.upnp_redirects['UDP']
|
||||
if 'TCP' not in found and PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
||||
try:
|
||||
tcp_port = await self.upnp.get_next_mapping(self._int_peer_port, "TCP", "LBRY peer port")
|
||||
self.upnp_redirects['TCP'] = tcp_port
|
||||
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
|
||||
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
||||
del self.upnp_redirects['TCP']
|
||||
if ('TCP' in self.upnp_redirects and
|
||||
PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components) and \
|
||||
('UDP' in self.upnp_redirects and DHT_COMPONENT not in self.component_manager.skip_components):
|
||||
if self.upnp_redirects:
|
||||
log.debug("upnp redirects are still active")
|
||||
|
||||
async def start(self):
|
||||
log.info("detecting external ip")
|
||||
if not self.use_upnp:
|
||||
self.external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||
return
|
||||
success = False
|
||||
await self._maintain_redirects()
|
||||
if self.upnp:
|
||||
if not self.upnp_redirects and not all(
|
||||
x in self.component_manager.skip_components
|
||||
for x in (DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)
|
||||
):
|
||||
log.error("failed to setup upnp")
|
||||
else:
|
||||
success = True
|
||||
if self.upnp_redirects:
|
||||
log.debug("set up upnp port redirects for gateway: %s", self.upnp.gateway.manufacturer_string)
|
||||
else:
|
||||
log.error("failed to setup upnp")
|
||||
if not self.external_ip:
|
||||
self.external_ip, probed_url = await utils.get_external_ip(self.conf.lbryum_servers)
|
||||
if self.external_ip:
|
||||
log.info("detected external ip using %s fallback", probed_url)
|
||||
if self.component_manager.analytics_manager:
|
||||
self.component_manager.loop.create_task(
|
||||
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
|
||||
success, await self.get_status()
|
||||
)
|
||||
)
|
||||
self._maintain_redirects_task = self.component_manager.loop.create_task(
|
||||
self._repeatedly_maintain_redirects(now=False)
|
||||
)
|
||||
|
||||
async def stop(self):
|
||||
if self.upnp_redirects:
|
||||
log.info("Removing upnp redirects: %s", self.upnp_redirects)
|
||||
await asyncio.wait([
|
||||
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
|
||||
])
|
||||
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
|
||||
self._maintain_redirects_task.cancel()
|
||||
|
||||
async def get_status(self):
|
||||
return {
|
||||
'aioupnp_version': aioupnp_version,
|
||||
'redirects': self.upnp_redirects,
|
||||
'gateway': 'No gateway found' if not self.upnp else self.upnp.gateway.manufacturer_string,
|
||||
'dht_redirect_set': 'UDP' in self.upnp_redirects,
|
||||
'peer_redirect_set': 'TCP' in self.upnp_redirects,
|
||||
'external_ip': self.external_ip
|
||||
}
|
||||
|
||||
|
||||
class ExchangeRateManagerComponent(Component):
|
||||
component_name = EXCHANGE_RATE_MANAGER_COMPONENT
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.exchange_rate_manager = ExchangeRateManager()
|
||||
|
||||
@property
|
||||
def component(self) -> ExchangeRateManager:
|
||||
return self.exchange_rate_manager
|
||||
|
||||
async def start(self):
|
||||
self.exchange_rate_manager.start()
|
||||
|
||||
async def stop(self):
|
||||
self.exchange_rate_manager.stop()
|
||||
|
||||
|
||||
class TrackerAnnouncerComponent(Component):
|
||||
component_name = TRACKER_ANNOUNCER_COMPONENT
|
||||
depends_on = [FILE_MANAGER_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.file_manager = None
|
||||
self.announce_task = None
|
||||
self.tracker_client: typing.Optional[TrackerClient] = None
|
||||
|
||||
@property
|
||||
def component(self):
|
||||
return self.tracker_client
|
||||
|
||||
@property
|
||||
def running(self):
|
||||
return self._running and self.announce_task and not self.announce_task.done()
|
||||
|
||||
async def announce_forever(self):
|
||||
while True:
|
||||
sleep_seconds = 60.0
|
||||
announce_sd_hashes = []
|
||||
for file in self.file_manager.get_filtered():
|
||||
if not file.downloader:
|
||||
continue
|
||||
announce_sd_hashes.append(bytes.fromhex(file.sd_hash))
|
||||
await self.tracker_client.announce_many(*announce_sd_hashes)
|
||||
await asyncio.sleep(sleep_seconds)
|
||||
|
||||
async def start(self):
|
||||
node = self.component_manager.get_component(DHT_COMPONENT) \
|
||||
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||
node_id = node.protocol.node_id if node else None
|
||||
self.tracker_client = TrackerClient(node_id, self.conf.tcp_port, lambda: self.conf.tracker_servers)
|
||||
await self.tracker_client.start()
|
||||
self.file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
|
||||
self.announce_task = asyncio.create_task(self.announce_forever())
|
||||
|
||||
async def stop(self):
|
||||
self.file_manager = None
|
||||
if self.announce_task and not self.announce_task.done():
|
||||
self.announce_task.cancel()
|
||||
self.announce_task = None
|
||||
self.tracker_client.stop()
|
File diff suppressed because it is too large
Load diff
|
@ -1,248 +0,0 @@
|
|||
import json
|
||||
import time
|
||||
import asyncio
|
||||
import logging
|
||||
from statistics import median
|
||||
from decimal import Decimal
|
||||
from typing import Optional, Iterable, Type
|
||||
from aiohttp.client_exceptions import ContentTypeError, ClientConnectionError
|
||||
from lbry.error import InvalidExchangeRateResponseError, CurrencyConversionError
|
||||
from lbry.utils import aiohttp_request
|
||||
from lbry.wallet.dewies import lbc_to_dewies
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExchangeRate:
|
||||
def __init__(self, market, spot, ts):
|
||||
if not int(time.time()) - ts < 600:
|
||||
raise ValueError('The timestamp is too dated.')
|
||||
if not spot > 0:
|
||||
raise ValueError('Spot must be greater than 0.')
|
||||
self.currency_pair = (market[0:3], market[3:6])
|
||||
self.spot = spot
|
||||
self.ts = ts
|
||||
|
||||
def __repr__(self):
|
||||
return f"Currency pair:{self.currency_pair}, spot:{self.spot}, ts:{self.ts}"
|
||||
|
||||
def as_dict(self):
|
||||
return {'spot': self.spot, 'ts': self.ts}
|
||||
|
||||
|
||||
class MarketFeed:
|
||||
name: str = ""
|
||||
market: str = ""
|
||||
url: str = ""
|
||||
params = {}
|
||||
fee = 0
|
||||
|
||||
update_interval = 300
|
||||
request_timeout = 50
|
||||
|
||||
def __init__(self):
|
||||
self.rate: Optional[float] = None
|
||||
self.last_check = 0
|
||||
self._last_response = None
|
||||
self._task: Optional[asyncio.Task] = None
|
||||
self.event = asyncio.Event()
|
||||
|
||||
@property
|
||||
def has_rate(self):
|
||||
return self.rate is not None
|
||||
|
||||
@property
|
||||
def is_online(self):
|
||||
return self.last_check+self.update_interval+self.request_timeout > time.time()
|
||||
|
||||
def get_rate_from_response(self, json_response):
|
||||
raise NotImplementedError()
|
||||
|
||||
async def get_response(self):
|
||||
async with aiohttp_request(
|
||||
'get', self.url, params=self.params,
|
||||
timeout=self.request_timeout, headers={"User-Agent": "lbrynet"}
|
||||
) as response:
|
||||
try:
|
||||
self._last_response = await response.json(content_type=None)
|
||||
except ContentTypeError as e:
|
||||
self._last_response = {}
|
||||
log.warning("Could not parse exchange rate response from %s: %s", self.name, e.message)
|
||||
log.debug(await response.text())
|
||||
return self._last_response
|
||||
|
||||
async def get_rate(self):
|
||||
try:
|
||||
data = await self.get_response()
|
||||
rate = self.get_rate_from_response(data)
|
||||
rate = rate / (1.0 - self.fee)
|
||||
log.debug("Saving rate update %f for %s from %s", rate, self.market, self.name)
|
||||
self.rate = ExchangeRate(self.market, rate, int(time.time()))
|
||||
self.last_check = time.time()
|
||||
return self.rate
|
||||
except asyncio.TimeoutError:
|
||||
log.warning("Timed out fetching exchange rate from %s.", self.name)
|
||||
except json.JSONDecodeError as e:
|
||||
msg = e.doc if '<html>' not in e.doc else 'unexpected content type.'
|
||||
log.warning("Could not parse exchange rate response from %s: %s", self.name, msg)
|
||||
log.debug(e.doc)
|
||||
except InvalidExchangeRateResponseError as e:
|
||||
log.warning(str(e))
|
||||
except ClientConnectionError as e:
|
||||
log.warning("Error trying to connect to exchange rate %s: %s", self.name, str(e))
|
||||
except Exception as e:
|
||||
log.exception("Exchange rate error (%s from %s):", self.market, self.name)
|
||||
finally:
|
||||
self.event.set()
|
||||
|
||||
async def keep_updated(self):
|
||||
while True:
|
||||
await self.get_rate()
|
||||
await asyncio.sleep(self.update_interval)
|
||||
|
||||
def start(self):
|
||||
if not self._task:
|
||||
self._task = asyncio.create_task(self.keep_updated())
|
||||
|
||||
def stop(self):
|
||||
if self._task and not self._task.done():
|
||||
self._task.cancel()
|
||||
self._task = None
|
||||
self.event.clear()
|
||||
|
||||
|
||||
class BaseBittrexFeed(MarketFeed):
|
||||
name = "Bittrex"
|
||||
market = None
|
||||
url = None
|
||||
fee = 0.0025
|
||||
|
||||
def get_rate_from_response(self, json_response):
|
||||
if 'lastTradeRate' not in json_response:
|
||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||
return 1.0 / float(json_response['lastTradeRate'])
|
||||
|
||||
|
||||
class BittrexBTCFeed(BaseBittrexFeed):
|
||||
market = "BTCLBC"
|
||||
url = "https://api.bittrex.com/v3/markets/LBC-BTC/ticker"
|
||||
|
||||
|
||||
class BittrexUSDFeed(BaseBittrexFeed):
|
||||
market = "USDLBC"
|
||||
url = "https://api.bittrex.com/v3/markets/LBC-USD/ticker"
|
||||
|
||||
|
||||
class BaseCoinExFeed(MarketFeed):
|
||||
name = "CoinEx"
|
||||
market = None
|
||||
url = None
|
||||
|
||||
def get_rate_from_response(self, json_response):
|
||||
if 'data' not in json_response or \
|
||||
'ticker' not in json_response['data'] or \
|
||||
'last' not in json_response['data']['ticker']:
|
||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||
return 1.0 / float(json_response['data']['ticker']['last'])
|
||||
|
||||
|
||||
class CoinExBTCFeed(BaseCoinExFeed):
|
||||
market = "BTCLBC"
|
||||
url = "https://api.coinex.com/v1/market/ticker?market=LBCBTC"
|
||||
|
||||
|
||||
class CoinExUSDFeed(BaseCoinExFeed):
|
||||
market = "USDLBC"
|
||||
url = "https://api.coinex.com/v1/market/ticker?market=LBCUSDT"
|
||||
|
||||
|
||||
class BaseHotbitFeed(MarketFeed):
|
||||
name = "hotbit"
|
||||
market = None
|
||||
url = "https://api.hotbit.io/api/v1/market.last"
|
||||
|
||||
def get_rate_from_response(self, json_response):
|
||||
if 'result' not in json_response:
|
||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||
return 1.0 / float(json_response['result'])
|
||||
|
||||
|
||||
class HotbitBTCFeed(BaseHotbitFeed):
|
||||
market = "BTCLBC"
|
||||
params = {"market": "LBC/BTC"}
|
||||
|
||||
|
||||
class HotbitUSDFeed(BaseHotbitFeed):
|
||||
market = "USDLBC"
|
||||
params = {"market": "LBC/USDT"}
|
||||
|
||||
|
||||
class UPbitBTCFeed(MarketFeed):
|
||||
name = "UPbit"
|
||||
market = "BTCLBC"
|
||||
url = "https://api.upbit.com/v1/ticker"
|
||||
params = {"markets": "BTC-LBC"}
|
||||
|
||||
def get_rate_from_response(self, json_response):
|
||||
if "error" in json_response or len(json_response) != 1 or 'trade_price' not in json_response[0]:
|
||||
raise InvalidExchangeRateResponseError(self.name, 'result not found')
|
||||
return 1.0 / float(json_response[0]['trade_price'])
|
||||
|
||||
|
||||
FEEDS: Iterable[Type[MarketFeed]] = (
|
||||
BittrexBTCFeed,
|
||||
BittrexUSDFeed,
|
||||
CoinExBTCFeed,
|
||||
CoinExUSDFeed,
|
||||
# HotbitBTCFeed,
|
||||
# HotbitUSDFeed,
|
||||
# UPbitBTCFeed,
|
||||
)
|
||||
|
||||
|
||||
class ExchangeRateManager:
|
||||
def __init__(self, feeds=FEEDS):
|
||||
self.market_feeds = [Feed() for Feed in feeds]
|
||||
|
||||
def wait(self):
|
||||
return asyncio.wait(
|
||||
[feed.event.wait() for feed in self.market_feeds],
|
||||
)
|
||||
|
||||
def start(self):
|
||||
log.info("Starting exchange rate manager")
|
||||
for feed in self.market_feeds:
|
||||
feed.start()
|
||||
|
||||
def stop(self):
|
||||
log.info("Stopping exchange rate manager")
|
||||
for source in self.market_feeds:
|
||||
source.stop()
|
||||
|
||||
def convert_currency(self, from_currency, to_currency, amount):
|
||||
log.debug(
|
||||
"Converting %f %s to %s, rates: %s",
|
||||
amount, from_currency, to_currency,
|
||||
[market.rate for market in self.market_feeds]
|
||||
)
|
||||
if from_currency == to_currency:
|
||||
return round(amount, 8)
|
||||
|
||||
rates = []
|
||||
for market in self.market_feeds:
|
||||
if (market.has_rate and market.is_online and
|
||||
market.rate.currency_pair == (from_currency, to_currency)):
|
||||
rates.append(market.rate.spot)
|
||||
|
||||
if rates:
|
||||
return round(amount * Decimal(median(rates)), 8)
|
||||
|
||||
raise CurrencyConversionError(
|
||||
f'Unable to convert {amount} from {from_currency} to {to_currency}')
|
||||
|
||||
def to_dewies(self, currency, amount) -> int:
|
||||
converted = self.convert_currency(currency, "LBC", amount)
|
||||
return lbc_to_dewies(str(converted))
|
||||
|
||||
def fee_dict(self):
|
||||
return {market: market.rate.as_dict() for market in self.market_feeds}
|
|
@ -1,361 +0,0 @@
|
|||
import logging
|
||||
from decimal import Decimal
|
||||
from binascii import hexlify, unhexlify
|
||||
from datetime import datetime
|
||||
from json import JSONEncoder
|
||||
|
||||
from google.protobuf.message import DecodeError
|
||||
|
||||
from lbry.schema.claim import Claim
|
||||
from lbry.schema.support import Support
|
||||
from lbry.torrent.torrent_manager import TorrentSource
|
||||
from lbry.wallet import Wallet, Ledger, Account, Transaction, Output
|
||||
from lbry.wallet.bip32 import PublicKey
|
||||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.stream.managed_stream import ManagedStream
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def encode_txo_doc():
|
||||
return {
|
||||
'txid': "hash of transaction in hex",
|
||||
'nout': "position in the transaction",
|
||||
'height': "block where transaction was recorded",
|
||||
'amount': "value of the txo as a decimal",
|
||||
'address': "address of who can spend the txo",
|
||||
'confirmations': "number of confirmed blocks",
|
||||
'is_change': "payment to change address, only available when it can be determined",
|
||||
'is_received': "true if txo was sent from external account to this account",
|
||||
'is_spent': "true if txo is spent",
|
||||
'is_mine': "payment to one of your accounts, only available when it can be determined",
|
||||
'type': "one of 'claim', 'support' or 'purchase'",
|
||||
'name': "when type is 'claim' or 'support', this is the claim name",
|
||||
'claim_id': "when type is 'claim', 'support' or 'purchase', this is the claim id",
|
||||
'claim_op': "when type is 'claim', this determines if it is 'create' or 'update'",
|
||||
'value': "when type is 'claim' or 'support' with payload, this is the decoded protobuf payload",
|
||||
'value_type': "determines the type of the 'value' field: 'channel', 'stream', etc",
|
||||
'protobuf': "hex encoded raw protobuf version of 'value' field",
|
||||
'permanent_url': "when type is 'claim' or 'support', this is the long permanent claim URL",
|
||||
'claim': "for purchase outputs only, metadata of purchased claim",
|
||||
'reposted_claim': "for repost claims only, metadata of claim being reposted",
|
||||
'signing_channel': "for signed claims only, metadata of signing channel",
|
||||
'is_channel_signature_valid': "for signed claims only, whether signature is valid",
|
||||
'purchase_receipt': "metadata for the purchase transaction associated with this claim"
|
||||
}
|
||||
|
||||
|
||||
def encode_tx_doc():
|
||||
return {
|
||||
'txid': "hash of transaction in hex",
|
||||
'height': "block where transaction was recorded",
|
||||
'inputs': [encode_txo_doc()],
|
||||
'outputs': [encode_txo_doc()],
|
||||
'total_input': "sum of inputs as a decimal",
|
||||
'total_output': "sum of outputs, sans fee, as a decimal",
|
||||
'total_fee': "fee amount",
|
||||
'hex': "entire transaction encoded in hex",
|
||||
}
|
||||
|
||||
|
||||
def encode_account_doc():
|
||||
return {
|
||||
'id': 'account_id',
|
||||
'is_default': 'this account is used by default',
|
||||
'ledger': 'name of crypto currency and network',
|
||||
'name': 'optional account name',
|
||||
'seed': 'human friendly words from which account can be recreated',
|
||||
'encrypted': 'if account is encrypted',
|
||||
'private_key': 'extended private key',
|
||||
'public_key': 'extended public key',
|
||||
'address_generator': 'settings for generating addresses',
|
||||
'modified_on': 'date of last modification to account settings'
|
||||
}
|
||||
|
||||
|
||||
def encode_wallet_doc():
|
||||
return {
|
||||
'id': 'wallet_id',
|
||||
'name': 'optional wallet name',
|
||||
}
|
||||
|
||||
|
||||
def encode_file_doc():
|
||||
return {
|
||||
'streaming_url': '(str) url to stream the file using range requests',
|
||||
'completed': '(bool) true if download is completed',
|
||||
'file_name': '(str) name of file',
|
||||
'download_directory': '(str) download directory',
|
||||
'points_paid': '(float) credit paid to download file',
|
||||
'stopped': '(bool) true if download is stopped',
|
||||
'stream_hash': '(str) stream hash of file',
|
||||
'stream_name': '(str) stream name',
|
||||
'suggested_file_name': '(str) suggested file name',
|
||||
'sd_hash': '(str) sd hash of file',
|
||||
'download_path': '(str) download path of file',
|
||||
'mime_type': '(str) mime type of file',
|
||||
'key': '(str) key attached to file',
|
||||
'total_bytes_lower_bound': '(int) lower bound file size in bytes',
|
||||
'total_bytes': '(int) file upper bound size in bytes',
|
||||
'written_bytes': '(int) written size in bytes',
|
||||
'blobs_completed': '(int) number of fully downloaded blobs',
|
||||
'blobs_in_stream': '(int) total blobs on stream',
|
||||
'blobs_remaining': '(int) total blobs remaining to download',
|
||||
'status': '(str) downloader status',
|
||||
'claim_id': '(str) None if claim is not found else the claim id',
|
||||
'txid': '(str) None if claim is not found else the transaction id',
|
||||
'nout': '(int) None if claim is not found else the transaction output index',
|
||||
'outpoint': '(str) None if claim is not found else the tx and output',
|
||||
'metadata': '(dict) None if claim is not found else the claim metadata',
|
||||
'channel_claim_id': '(str) None if claim is not found or not signed',
|
||||
'channel_name': '(str) None if claim is not found or not signed',
|
||||
'claim_name': '(str) None if claim is not found else the claim name',
|
||||
'reflector_progress': '(int) reflector upload progress, 0 to 100',
|
||||
'uploading_to_reflector': '(bool) set to True when currently uploading to reflector'
|
||||
}
|
||||
|
||||
|
||||
class JSONResponseEncoder(JSONEncoder):
|
||||
|
||||
def __init__(self, *args, ledger: Ledger, include_protobuf=False, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.ledger = ledger
|
||||
self.include_protobuf = include_protobuf
|
||||
|
||||
def default(self, obj): # pylint: disable=method-hidden,arguments-renamed,too-many-return-statements
|
||||
if isinstance(obj, Account):
|
||||
return self.encode_account(obj)
|
||||
if isinstance(obj, Wallet):
|
||||
return self.encode_wallet(obj)
|
||||
if isinstance(obj, (ManagedStream, TorrentSource)):
|
||||
return self.encode_file(obj)
|
||||
if isinstance(obj, Transaction):
|
||||
return self.encode_transaction(obj)
|
||||
if isinstance(obj, Output):
|
||||
return self.encode_output(obj)
|
||||
if isinstance(obj, Claim):
|
||||
return self.encode_claim(obj)
|
||||
if isinstance(obj, Support):
|
||||
return obj.to_dict()
|
||||
if isinstance(obj, PublicKey):
|
||||
return obj.extended_key_string()
|
||||
if isinstance(obj, datetime):
|
||||
return obj.strftime("%Y%m%dT%H:%M:%S")
|
||||
if isinstance(obj, Decimal):
|
||||
return float(obj)
|
||||
if isinstance(obj, bytes):
|
||||
return obj.decode()
|
||||
return super().default(obj)
|
||||
|
||||
def encode_transaction(self, tx):
|
||||
return {
|
||||
'txid': tx.id,
|
||||
'height': tx.height,
|
||||
'inputs': [self.encode_input(txo) for txo in tx.inputs],
|
||||
'outputs': [self.encode_output(txo) for txo in tx.outputs],
|
||||
'total_input': dewies_to_lbc(tx.input_sum),
|
||||
'total_output': dewies_to_lbc(tx.input_sum - tx.fee),
|
||||
'total_fee': dewies_to_lbc(tx.fee),
|
||||
'hex': hexlify(tx.raw).decode(),
|
||||
}
|
||||
|
||||
def encode_output(self, txo, check_signature=True):
|
||||
if not txo:
|
||||
return
|
||||
tx_height = txo.tx_ref.height
|
||||
best_height = self.ledger.headers.height
|
||||
output = {
|
||||
'txid': txo.tx_ref.id,
|
||||
'nout': txo.position,
|
||||
'height': tx_height,
|
||||
'amount': dewies_to_lbc(txo.amount),
|
||||
'address': txo.get_address(self.ledger) if txo.has_address else None,
|
||||
'confirmations': (best_height+1) - tx_height if tx_height > 0 else tx_height,
|
||||
'timestamp': self.ledger.headers.estimated_timestamp(tx_height)
|
||||
}
|
||||
if txo.is_spent is not None:
|
||||
output['is_spent'] = txo.is_spent
|
||||
if txo.is_my_output is not None:
|
||||
output['is_my_output'] = txo.is_my_output
|
||||
if txo.is_my_input is not None:
|
||||
output['is_my_input'] = txo.is_my_input
|
||||
if txo.sent_supports is not None:
|
||||
output['sent_supports'] = dewies_to_lbc(txo.sent_supports)
|
||||
if txo.sent_tips is not None:
|
||||
output['sent_tips'] = dewies_to_lbc(txo.sent_tips)
|
||||
if txo.received_tips is not None:
|
||||
output['received_tips'] = dewies_to_lbc(txo.received_tips)
|
||||
if txo.is_internal_transfer is not None:
|
||||
output['is_internal_transfer'] = txo.is_internal_transfer
|
||||
|
||||
if txo.script.is_claim_name:
|
||||
output['type'] = 'claim'
|
||||
output['claim_op'] = 'create'
|
||||
elif txo.script.is_update_claim:
|
||||
output['type'] = 'claim'
|
||||
output['claim_op'] = 'update'
|
||||
elif txo.script.is_support_claim:
|
||||
output['type'] = 'support'
|
||||
elif txo.script.is_return_data:
|
||||
output['type'] = 'data'
|
||||
elif txo.purchase is not None:
|
||||
output['type'] = 'purchase'
|
||||
output['claim_id'] = txo.purchased_claim_id
|
||||
if txo.purchased_claim is not None:
|
||||
output['claim'] = self.encode_output(txo.purchased_claim)
|
||||
else:
|
||||
output['type'] = 'payment'
|
||||
|
||||
if txo.script.is_claim_involved:
|
||||
output.update({
|
||||
'name': txo.claim_name,
|
||||
'normalized_name': txo.normalized_name,
|
||||
'claim_id': txo.claim_id,
|
||||
'permanent_url': txo.permanent_url,
|
||||
'meta': self.encode_claim_meta(txo.meta.copy())
|
||||
})
|
||||
if 'short_url' in output['meta']:
|
||||
output['short_url'] = output['meta'].pop('short_url')
|
||||
if 'canonical_url' in output['meta']:
|
||||
output['canonical_url'] = output['meta'].pop('canonical_url')
|
||||
if txo.claims is not None:
|
||||
output['claims'] = [self.encode_output(o) for o in txo.claims]
|
||||
if txo.reposted_claim is not None:
|
||||
output['reposted_claim'] = self.encode_output(txo.reposted_claim)
|
||||
if txo.script.is_claim_name or txo.script.is_update_claim or txo.script.is_support_claim_data:
|
||||
try:
|
||||
output['value'] = txo.signable
|
||||
if self.include_protobuf:
|
||||
output['protobuf'] = hexlify(txo.signable.to_bytes())
|
||||
if txo.purchase_receipt is not None:
|
||||
output['purchase_receipt'] = self.encode_output(txo.purchase_receipt)
|
||||
if txo.script.is_claim_name or txo.script.is_update_claim:
|
||||
output['value_type'] = txo.claim.claim_type
|
||||
if txo.claim.is_channel:
|
||||
output['has_signing_key'] = txo.has_private_key
|
||||
if check_signature and txo.signable.is_signed:
|
||||
if txo.channel is not None:
|
||||
output['signing_channel'] = self.encode_output(txo.channel)
|
||||
output['is_channel_signature_valid'] = txo.is_signed_by(txo.channel, self.ledger)
|
||||
else:
|
||||
output['signing_channel'] = {'channel_id': txo.signable.signing_channel_id}
|
||||
output['is_channel_signature_valid'] = False
|
||||
except DecodeError:
|
||||
pass
|
||||
return output
|
||||
|
||||
def encode_claim_meta(self, meta):
|
||||
for key, value in meta.items():
|
||||
if key.endswith('_amount'):
|
||||
if isinstance(value, int):
|
||||
meta[key] = dewies_to_lbc(value)
|
||||
if 0 < meta.get('creation_height', 0) <= self.ledger.headers.height:
|
||||
meta['creation_timestamp'] = self.ledger.headers.estimated_timestamp(meta['creation_height'])
|
||||
return meta
|
||||
|
||||
def encode_input(self, txi):
|
||||
return self.encode_output(txi.txo_ref.txo, False) if txi.txo_ref.txo is not None else {
|
||||
'txid': txi.txo_ref.tx_ref.id,
|
||||
'nout': txi.txo_ref.position
|
||||
}
|
||||
|
||||
def encode_account(self, account):
|
||||
result = account.to_dict()
|
||||
result['id'] = account.id
|
||||
result.pop('certificates', None)
|
||||
result['is_default'] = self.ledger.accounts[0] == account
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def encode_wallet(wallet):
|
||||
return {
|
||||
'id': wallet.id,
|
||||
'name': wallet.name
|
||||
}
|
||||
|
||||
def encode_file(self, managed_stream):
|
||||
output_exists = managed_stream.output_file_exists
|
||||
tx_height = managed_stream.stream_claim_info.height
|
||||
best_height = self.ledger.headers.height
|
||||
is_stream = hasattr(managed_stream, 'stream_hash')
|
||||
if is_stream:
|
||||
total_bytes_lower_bound = managed_stream.descriptor.lower_bound_decrypted_length()
|
||||
total_bytes = managed_stream.descriptor.upper_bound_decrypted_length()
|
||||
else:
|
||||
total_bytes_lower_bound = total_bytes = managed_stream.torrent_length
|
||||
result = {
|
||||
'streaming_url': None,
|
||||
'completed': managed_stream.completed,
|
||||
'file_name': None,
|
||||
'download_directory': None,
|
||||
'download_path': None,
|
||||
'points_paid': 0.0,
|
||||
'stopped': not managed_stream.running,
|
||||
'stream_hash': None,
|
||||
'stream_name': None,
|
||||
'suggested_file_name': None,
|
||||
'sd_hash': None,
|
||||
'mime_type': None,
|
||||
'key': None,
|
||||
'total_bytes_lower_bound': total_bytes_lower_bound,
|
||||
'total_bytes': total_bytes,
|
||||
'written_bytes': managed_stream.written_bytes,
|
||||
'blobs_completed': None,
|
||||
'blobs_in_stream': None,
|
||||
'blobs_remaining': None,
|
||||
'status': managed_stream.status,
|
||||
'claim_id': managed_stream.claim_id,
|
||||
'txid': managed_stream.txid,
|
||||
'nout': managed_stream.nout,
|
||||
'outpoint': managed_stream.outpoint,
|
||||
'metadata': managed_stream.metadata,
|
||||
'protobuf': managed_stream.metadata_protobuf,
|
||||
'channel_claim_id': managed_stream.channel_claim_id,
|
||||
'channel_name': managed_stream.channel_name,
|
||||
'claim_name': managed_stream.claim_name,
|
||||
'content_fee': managed_stream.content_fee,
|
||||
'purchase_receipt': self.encode_output(managed_stream.purchase_receipt),
|
||||
'added_on': managed_stream.added_on,
|
||||
'height': tx_height,
|
||||
'confirmations': (best_height + 1) - tx_height if tx_height > 0 else tx_height,
|
||||
'timestamp': self.ledger.headers.estimated_timestamp(tx_height),
|
||||
'is_fully_reflected': False,
|
||||
'reflector_progress': False,
|
||||
'uploading_to_reflector': False
|
||||
}
|
||||
if is_stream:
|
||||
result.update({
|
||||
'streaming_url': managed_stream.stream_url,
|
||||
'stream_hash': managed_stream.stream_hash,
|
||||
'stream_name': managed_stream.stream_name,
|
||||
'suggested_file_name': managed_stream.suggested_file_name,
|
||||
'sd_hash': managed_stream.descriptor.sd_hash,
|
||||
'mime_type': managed_stream.mime_type,
|
||||
'key': managed_stream.descriptor.key,
|
||||
'blobs_completed': managed_stream.blobs_completed,
|
||||
'blobs_in_stream': managed_stream.blobs_in_stream,
|
||||
'blobs_remaining': managed_stream.blobs_remaining,
|
||||
'is_fully_reflected': managed_stream.is_fully_reflected,
|
||||
'reflector_progress': managed_stream.reflector_progress,
|
||||
'uploading_to_reflector': managed_stream.uploading_to_reflector
|
||||
})
|
||||
else:
|
||||
result.update({
|
||||
'streaming_url': f'file://{managed_stream.full_path}',
|
||||
})
|
||||
if output_exists:
|
||||
result.update({
|
||||
'file_name': managed_stream.file_name,
|
||||
'download_directory': managed_stream.download_directory,
|
||||
'download_path': managed_stream.full_path,
|
||||
})
|
||||
return result
|
||||
|
||||
def encode_claim(self, claim):
|
||||
encoded = getattr(claim, claim.claim_type).to_dict()
|
||||
if 'public_key' in encoded:
|
||||
encoded['public_key_id'] = self.ledger.public_key_to_address(
|
||||
unhexlify(encoded['public_key'])
|
||||
)
|
||||
return encoded
|
|
@ -1,74 +0,0 @@
|
|||
# pylint: skip-file
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def migrate_db(conf, start, end):
|
||||
current = start
|
||||
while current < end:
|
||||
if current == 1:
|
||||
from .migrate1to2 import do_migration
|
||||
elif current == 2:
|
||||
from .migrate2to3 import do_migration
|
||||
elif current == 3:
|
||||
from .migrate3to4 import do_migration
|
||||
elif current == 4:
|
||||
from .migrate4to5 import do_migration
|
||||
elif current == 5:
|
||||
from .migrate5to6 import do_migration
|
||||
elif current == 6:
|
||||
from .migrate6to7 import do_migration
|
||||
elif current == 7:
|
||||
from .migrate7to8 import do_migration
|
||||
elif current == 8:
|
||||
from .migrate8to9 import do_migration
|
||||
elif current == 9:
|
||||
from .migrate9to10 import do_migration
|
||||
elif current == 10:
|
||||
from .migrate10to11 import do_migration
|
||||
elif current == 11:
|
||||
from .migrate11to12 import do_migration
|
||||
elif current == 12:
|
||||
from .migrate12to13 import do_migration
|
||||
elif current == 13:
|
||||
from .migrate13to14 import do_migration
|
||||
elif current == 14:
|
||||
from .migrate14to15 import do_migration
|
||||
elif current == 15:
|
||||
from .migrate15to16 import do_migration
|
||||
else:
|
||||
raise Exception(f"DB migration of version {current} to {current+1} is not available")
|
||||
try:
|
||||
do_migration(conf)
|
||||
except Exception:
|
||||
log.exception("failed to migrate database")
|
||||
if os.path.exists(os.path.join(conf.data_dir, "lbrynet.sqlite")):
|
||||
backup_name = f"rev_{current}_unmigrated_database"
|
||||
count = 0
|
||||
while os.path.exists(os.path.join(conf.data_dir, backup_name + ".sqlite")):
|
||||
count += 1
|
||||
backup_name = f"rev_{current}_unmigrated_database_{count}"
|
||||
backup_path = os.path.join(conf.data_dir, backup_name + ".sqlite")
|
||||
os.rename(os.path.join(conf.data_dir, "lbrynet.sqlite"), backup_path)
|
||||
log.info("made a backup of the unmigrated database: %s", backup_path)
|
||||
if os.path.isfile(os.path.join(conf.data_dir, "db_revision")):
|
||||
os.remove(os.path.join(conf.data_dir, "db_revision"))
|
||||
return None
|
||||
current += 1
|
||||
log.info("successfully migrated the database from revision %i to %i", current - 1, current)
|
||||
return None
|
||||
|
||||
|
||||
def run_migration_script():
|
||||
log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s"
|
||||
logging.basicConfig(level=logging.DEBUG, format=log_format, filename="migrator.log")
|
||||
sys.stdout = open("migrator.out.log", 'w')
|
||||
sys.stderr = open("migrator.err.log", 'w')
|
||||
migrate_db(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_migration_script()
|
|
@ -1,54 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
import binascii
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
current_columns = []
|
||||
for col_info in cursor.execute("pragma table_info('file');").fetchall():
|
||||
current_columns.append(col_info[1])
|
||||
if 'content_fee' in current_columns or 'saved_file' in current_columns:
|
||||
connection.close()
|
||||
print("already migrated")
|
||||
return
|
||||
|
||||
cursor.execute(
|
||||
"pragma foreign_keys=off;"
|
||||
)
|
||||
|
||||
cursor.execute("""
|
||||
create table if not exists new_file (
|
||||
stream_hash text primary key not null references stream,
|
||||
file_name text,
|
||||
download_directory text,
|
||||
blob_data_rate real not null,
|
||||
status text not null,
|
||||
saved_file integer not null,
|
||||
content_fee text
|
||||
);
|
||||
""")
|
||||
for (stream_hash, file_name, download_dir, data_rate, status) in cursor.execute("select * from file").fetchall():
|
||||
saved_file = 0
|
||||
if download_dir != '{stream}' and file_name != '{stream}':
|
||||
try:
|
||||
if os.path.isfile(os.path.join(binascii.unhexlify(download_dir).decode(),
|
||||
binascii.unhexlify(file_name).decode())):
|
||||
saved_file = 1
|
||||
else:
|
||||
download_dir, file_name = None, None
|
||||
except Exception:
|
||||
download_dir, file_name = None, None
|
||||
else:
|
||||
download_dir, file_name = None, None
|
||||
cursor.execute(
|
||||
"insert into new_file values (?, ?, ?, ?, ?, ?, NULL)",
|
||||
(stream_hash, file_name, download_dir, data_rate, status, saved_file)
|
||||
)
|
||||
cursor.execute("drop table file")
|
||||
cursor.execute("alter table new_file rename to file")
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,69 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
import time
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, 'lbrynet.sqlite')
|
||||
connection = sqlite3.connect(db_path)
|
||||
connection.row_factory = sqlite3.Row
|
||||
cursor = connection.cursor()
|
||||
|
||||
current_columns = []
|
||||
for col_info in cursor.execute("pragma table_info('file');").fetchall():
|
||||
current_columns.append(col_info[1])
|
||||
|
||||
if 'added_on' in current_columns:
|
||||
connection.close()
|
||||
print('already migrated')
|
||||
return
|
||||
|
||||
# follow 12 step schema change procedure
|
||||
cursor.execute("pragma foreign_keys=off")
|
||||
|
||||
# we don't have any indexes, views or triggers, so step 3 is skipped.
|
||||
cursor.execute("drop table if exists new_file")
|
||||
cursor.execute("""
|
||||
create table if not exists new_file (
|
||||
stream_hash text not null primary key references stream,
|
||||
file_name text,
|
||||
download_directory text,
|
||||
blob_data_rate text not null,
|
||||
status text not null,
|
||||
saved_file integer not null,
|
||||
content_fee text,
|
||||
added_on integer not null
|
||||
);
|
||||
|
||||
|
||||
""")
|
||||
|
||||
# step 5: transfer content from old to new
|
||||
select = "select * from file"
|
||||
for (stream_hash, file_name, download_dir, blob_rate, status, saved_file, fee) \
|
||||
in cursor.execute(select).fetchall():
|
||||
added_on = int(time.time())
|
||||
cursor.execute(
|
||||
"insert into new_file values (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
(stream_hash, file_name, download_dir, blob_rate, status, saved_file, fee, added_on)
|
||||
)
|
||||
|
||||
# step 6: drop old table
|
||||
cursor.execute("drop table file")
|
||||
|
||||
# step 7: rename new table to old table
|
||||
cursor.execute("alter table new_file rename to file")
|
||||
|
||||
# step 8: we aren't using indexes, views or triggers so skip
|
||||
# step 9: no views so skip
|
||||
# step 10: foreign key check
|
||||
cursor.execute("pragma foreign_key_check;")
|
||||
|
||||
# step 11: commit transaction
|
||||
connection.commit()
|
||||
|
||||
# step 12: re-enable foreign keys
|
||||
connection.execute("pragma foreign_keys=on;")
|
||||
|
||||
# done :)
|
||||
connection.close()
|
|
@ -1,80 +0,0 @@
|
|||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
current_columns = []
|
||||
for col_info in cursor.execute("pragma table_info('file');").fetchall():
|
||||
current_columns.append(col_info[1])
|
||||
if 'bt_infohash' in current_columns:
|
||||
connection.close()
|
||||
print("already migrated")
|
||||
return
|
||||
|
||||
cursor.executescript("""
|
||||
pragma foreign_keys=off;
|
||||
|
||||
create table if not exists torrent (
|
||||
bt_infohash char(20) not null primary key,
|
||||
tracker text,
|
||||
length integer not null,
|
||||
name text not null
|
||||
);
|
||||
|
||||
create table if not exists torrent_node ( -- BEP-0005
|
||||
bt_infohash char(20) not null references torrent,
|
||||
host text not null,
|
||||
port integer not null
|
||||
);
|
||||
|
||||
create table if not exists torrent_tracker ( -- BEP-0012
|
||||
bt_infohash char(20) not null references torrent,
|
||||
tracker text not null
|
||||
);
|
||||
|
||||
create table if not exists torrent_http_seed ( -- BEP-0017
|
||||
bt_infohash char(20) not null references torrent,
|
||||
http_seed text not null
|
||||
);
|
||||
|
||||
create table if not exists new_file (
|
||||
stream_hash char(96) references stream,
|
||||
bt_infohash char(20) references torrent,
|
||||
file_name text,
|
||||
download_directory text,
|
||||
blob_data_rate real not null,
|
||||
status text not null,
|
||||
saved_file integer not null,
|
||||
content_fee text,
|
||||
added_on integer not null
|
||||
);
|
||||
|
||||
create table if not exists new_content_claim (
|
||||
stream_hash char(96) references stream,
|
||||
bt_infohash char(20) references torrent,
|
||||
claim_outpoint text unique not null references claim
|
||||
);
|
||||
|
||||
insert into new_file (stream_hash, bt_infohash, file_name, download_directory, blob_data_rate, status,
|
||||
saved_file, content_fee, added_on) select
|
||||
stream_hash, NULL, file_name, download_directory, blob_data_rate, status, saved_file, content_fee,
|
||||
added_on
|
||||
from file;
|
||||
|
||||
insert or ignore into new_content_claim (stream_hash, bt_infohash, claim_outpoint)
|
||||
select stream_hash, NULL, claim_outpoint from content_claim;
|
||||
|
||||
drop table file;
|
||||
drop table content_claim;
|
||||
alter table new_file rename to file;
|
||||
alter table new_content_claim rename to content_claim;
|
||||
|
||||
pragma foreign_keys=on;
|
||||
""")
|
||||
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,21 +0,0 @@
|
|||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
cursor.executescript("""
|
||||
create table if not exists peer (
|
||||
node_id char(96) not null primary key,
|
||||
address text not null,
|
||||
udp_port integer not null,
|
||||
tcp_port integer,
|
||||
unique (address, udp_port)
|
||||
);
|
||||
""")
|
||||
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,16 +0,0 @@
|
|||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
cursor.executescript("""
|
||||
alter table blob add column added_on integer not null default 0;
|
||||
alter table blob add column is_mine integer not null default 1;
|
||||
""")
|
||||
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,17 +0,0 @@
|
|||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
cursor.executescript("""
|
||||
update blob set should_announce=0
|
||||
where should_announce=1 and
|
||||
blob.blob_hash in (select stream_blob.blob_hash from stream_blob where position=0);
|
||||
""")
|
||||
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,20 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
query = "select stream_hash, sd_hash from main.stream"
|
||||
for stream_hash, sd_hash in cursor.execute(query).fetchall():
|
||||
head_blob_hash = cursor.execute(
|
||||
"select blob_hash from stream_blob where position = 0 and stream_hash = ?",
|
||||
(stream_hash,)
|
||||
).fetchone()
|
||||
if not head_blob_hash:
|
||||
continue
|
||||
cursor.execute("update blob set should_announce=1 where blob_hash in (?, ?)", (sd_hash, head_blob_hash[0],))
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,31 +0,0 @@
|
|||
import logging
|
||||
from aiohttp import web
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ensure_request_allowed(request, conf):
|
||||
if is_request_allowed(request, conf):
|
||||
return
|
||||
if conf.allowed_origin:
|
||||
log.warning(
|
||||
"API requests with Origin '%s' are not allowed, "
|
||||
"configuration 'allowed_origin' limits requests to: '%s'",
|
||||
request.headers.get('Origin'), conf.allowed_origin
|
||||
)
|
||||
else:
|
||||
log.warning(
|
||||
"API requests with Origin '%s' are not allowed, "
|
||||
"update configuration 'allowed_origin' to enable this origin.",
|
||||
request.headers.get('Origin')
|
||||
)
|
||||
raise web.HTTPForbidden()
|
||||
|
||||
|
||||
def is_request_allowed(request, conf) -> bool:
|
||||
origin = request.headers.get('Origin')
|
||||
return (
|
||||
origin is None or
|
||||
origin == conf.allowed_origin or
|
||||
conf.allowed_origin == '*'
|
||||
)
|
|
@ -1,29 +0,0 @@
|
|||
import platform
|
||||
import os
|
||||
import logging.handlers
|
||||
|
||||
from lbry import build_info, __version__ as lbrynet_version
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_platform() -> dict:
|
||||
os_system = platform.system()
|
||||
if os.environ and 'ANDROID_ARGUMENT' in os.environ:
|
||||
os_system = 'android'
|
||||
d = {
|
||||
"processor": platform.processor(),
|
||||
"python_version": platform.python_version(),
|
||||
"platform": platform.platform(),
|
||||
"os_release": platform.release(),
|
||||
"os_system": os_system,
|
||||
"lbrynet_version": lbrynet_version,
|
||||
"version": lbrynet_version,
|
||||
"build": build_info.BUILD, # CI server sets this during build step
|
||||
}
|
||||
if d["os_system"] == "Linux":
|
||||
import distro # pylint: disable=import-outside-toplevel
|
||||
d["distro"] = distro.info()
|
||||
d["desktop"] = os.environ.get('XDG_CURRENT_DESKTOP', 'Unknown')
|
||||
|
||||
return d
|
|
@ -1,307 +0,0 @@
|
|||
import asyncio
|
||||
import logging
|
||||
import typing
|
||||
from typing import Optional
|
||||
from aiohttp.web import Request
|
||||
from lbry.error import ResolveError, DownloadSDTimeoutError, InsufficientFundsError
|
||||
from lbry.error import ResolveTimeoutError, DownloadDataTimeoutError, KeyFeeAboveMaxAllowedError
|
||||
from lbry.error import InvalidStreamURLError
|
||||
from lbry.stream.managed_stream import ManagedStream
|
||||
from lbry.torrent.torrent_manager import TorrentSource
|
||||
from lbry.utils import cache_concurrent
|
||||
from lbry.schema.url import URL
|
||||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.file.source_manager import SourceManager
|
||||
from lbry.file.source import ManagedDownloadSource
|
||||
from lbry.extras.daemon.storage import StoredContentClaim
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.extras.daemon.analytics import AnalyticsManager
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
from lbry.wallet import WalletManager
|
||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FileManager:
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, config: 'Config', wallet_manager: 'WalletManager',
|
||||
storage: 'SQLiteStorage', analytics_manager: Optional['AnalyticsManager'] = None):
|
||||
self.loop = loop
|
||||
self.config = config
|
||||
self.wallet_manager = wallet_manager
|
||||
self.storage = storage
|
||||
self.analytics_manager = analytics_manager
|
||||
self.source_managers: typing.Dict[str, SourceManager] = {}
|
||||
self.started = asyncio.Event()
|
||||
|
||||
@property
|
||||
def streams(self):
|
||||
return self.source_managers['stream']._sources
|
||||
|
||||
async def create_stream(self, file_path: str, key: Optional[bytes] = None, **kwargs) -> ManagedDownloadSource:
|
||||
if 'stream' in self.source_managers:
|
||||
return await self.source_managers['stream'].create(file_path, key, **kwargs)
|
||||
raise NotImplementedError
|
||||
|
||||
async def start(self):
|
||||
await asyncio.gather(*(source_manager.start() for source_manager in self.source_managers.values()))
|
||||
for manager in self.source_managers.values():
|
||||
await manager.started.wait()
|
||||
self.started.set()
|
||||
|
||||
async def stop(self):
|
||||
for manager in self.source_managers.values():
|
||||
# fixme: pop or not?
|
||||
await manager.stop()
|
||||
self.started.clear()
|
||||
|
||||
@cache_concurrent
|
||||
async def download_from_uri(self, uri, exchange_rate_manager: 'ExchangeRateManager',
|
||||
timeout: Optional[float] = None, file_name: Optional[str] = None,
|
||||
download_directory: Optional[str] = None,
|
||||
save_file: Optional[bool] = None, resolve_timeout: float = 3.0,
|
||||
wallet: Optional['Wallet'] = None) -> ManagedDownloadSource:
|
||||
|
||||
wallet = wallet or self.wallet_manager.default_wallet
|
||||
timeout = timeout or self.config.download_timeout
|
||||
start_time = self.loop.time()
|
||||
resolved_time = None
|
||||
stream = None
|
||||
claim = None
|
||||
error = None
|
||||
outpoint = None
|
||||
if save_file is None:
|
||||
save_file = self.config.save_files
|
||||
if file_name and not save_file:
|
||||
save_file = True
|
||||
if save_file:
|
||||
download_directory = download_directory or self.config.download_dir
|
||||
else:
|
||||
download_directory = None
|
||||
|
||||
payment = None
|
||||
try:
|
||||
# resolve the claim
|
||||
try:
|
||||
if not URL.parse(uri).has_stream:
|
||||
raise InvalidStreamURLError(uri)
|
||||
except ValueError:
|
||||
raise InvalidStreamURLError(uri)
|
||||
try:
|
||||
resolved_result = await asyncio.wait_for(
|
||||
self.wallet_manager.ledger.resolve(
|
||||
wallet.accounts, [uri],
|
||||
include_purchase_receipt=True,
|
||||
include_is_my_output=True
|
||||
), resolve_timeout
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
raise ResolveTimeoutError(uri)
|
||||
except Exception as err:
|
||||
log.exception("Unexpected error resolving stream:")
|
||||
raise ResolveError(f"Unexpected error resolving stream: {str(err)}")
|
||||
if 'error' in resolved_result:
|
||||
raise ResolveError(f"Unexpected error resolving uri for download: {resolved_result['error']}")
|
||||
if not resolved_result or uri not in resolved_result:
|
||||
raise ResolveError(f"Failed to resolve stream at '{uri}'")
|
||||
txo = resolved_result[uri]
|
||||
if isinstance(txo, dict):
|
||||
raise ResolveError(f"Failed to resolve stream at '{uri}': {txo}")
|
||||
claim = txo.claim
|
||||
outpoint = f"{txo.tx_ref.id}:{txo.position}"
|
||||
resolved_time = self.loop.time() - start_time
|
||||
await self.storage.save_claim_from_output(self.wallet_manager.ledger, txo)
|
||||
|
||||
####################
|
||||
# update or replace
|
||||
####################
|
||||
|
||||
if claim.stream.source.bt_infohash:
|
||||
source_manager = self.source_managers['torrent']
|
||||
existing = source_manager.get_filtered(bt_infohash=claim.stream.source.bt_infohash)
|
||||
elif claim.stream.source.sd_hash:
|
||||
source_manager = self.source_managers['stream']
|
||||
existing = source_manager.get_filtered(sd_hash=claim.stream.source.sd_hash)
|
||||
else:
|
||||
raise ResolveError(f"There is nothing to download at {uri} - Source is unknown or unset")
|
||||
|
||||
# resume or update an existing stream, if the stream changed: download it and delete the old one after
|
||||
to_replace, updated_stream = None, None
|
||||
if existing and existing[0].claim_id != txo.claim_id:
|
||||
raise ResolveError(f"stream for {existing[0].claim_id} collides with existing download {txo.claim_id}")
|
||||
if existing:
|
||||
log.info("claim contains a metadata only update to a stream we have")
|
||||
if claim.stream.source.bt_infohash:
|
||||
await self.storage.save_torrent_content_claim(
|
||||
existing[0].identifier, outpoint, existing[0].torrent_length, existing[0].torrent_name
|
||||
)
|
||||
claim_info = await self.storage.get_content_claim_for_torrent(existing[0].identifier)
|
||||
existing[0].set_claim(claim_info, claim)
|
||||
else:
|
||||
await self.storage.save_content_claim(
|
||||
existing[0].stream_hash, outpoint
|
||||
)
|
||||
await source_manager._update_content_claim(existing[0])
|
||||
updated_stream = existing[0]
|
||||
else:
|
||||
existing_for_claim_id = self.get_filtered(claim_id=txo.claim_id)
|
||||
if existing_for_claim_id:
|
||||
log.info("claim contains an update to a stream we have, downloading it")
|
||||
if save_file and existing_for_claim_id[0].output_file_exists:
|
||||
save_file = False
|
||||
if not claim.stream.source.bt_infohash:
|
||||
existing_for_claim_id[0].downloader.node = source_manager.node
|
||||
await existing_for_claim_id[0].start(timeout=timeout, save_now=save_file)
|
||||
if not existing_for_claim_id[0].output_file_exists and (
|
||||
save_file or file_name or download_directory):
|
||||
await existing_for_claim_id[0].save_file(
|
||||
file_name=file_name, download_directory=download_directory
|
||||
)
|
||||
to_replace = existing_for_claim_id[0]
|
||||
|
||||
# resume or update an existing stream, if the stream changed: download it and delete the old one after
|
||||
if updated_stream:
|
||||
log.info("already have stream for %s", uri)
|
||||
if save_file and updated_stream.output_file_exists:
|
||||
save_file = False
|
||||
if not claim.stream.source.bt_infohash:
|
||||
updated_stream.downloader.node = source_manager.node
|
||||
await updated_stream.start(timeout=timeout, save_now=save_file)
|
||||
if not updated_stream.output_file_exists and (save_file or file_name or download_directory):
|
||||
await updated_stream.save_file(
|
||||
file_name=file_name, download_directory=download_directory
|
||||
)
|
||||
return updated_stream
|
||||
|
||||
####################
|
||||
# pay fee
|
||||
####################
|
||||
|
||||
needs_purchasing = (
|
||||
not to_replace and
|
||||
not txo.is_my_output and
|
||||
txo.has_price and
|
||||
not txo.purchase_receipt
|
||||
)
|
||||
|
||||
if needs_purchasing:
|
||||
payment = await self.wallet_manager.create_purchase_transaction(
|
||||
wallet.accounts, txo, exchange_rate_manager
|
||||
)
|
||||
|
||||
####################
|
||||
# make downloader and wait for start
|
||||
####################
|
||||
# temporary with fields we know so downloader can start. Missing fields are populated later.
|
||||
stored_claim = StoredContentClaim(outpoint=outpoint, claim_id=txo.claim_id, name=txo.claim_name,
|
||||
amount=txo.amount, height=txo.tx_ref.height,
|
||||
serialized=claim.to_bytes().hex())
|
||||
|
||||
if not claim.stream.source.bt_infohash:
|
||||
# fixme: this shouldnt be here
|
||||
stream = ManagedStream(
|
||||
self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash,
|
||||
download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment,
|
||||
analytics_manager=self.analytics_manager, claim=stored_claim
|
||||
)
|
||||
stream.downloader.node = source_manager.node
|
||||
else:
|
||||
stream = TorrentSource(
|
||||
self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash,
|
||||
file_name=file_name, download_directory=download_directory or self.config.download_dir,
|
||||
status=ManagedStream.STATUS_RUNNING, claim=stored_claim, analytics_manager=self.analytics_manager,
|
||||
torrent_session=source_manager.torrent_session
|
||||
)
|
||||
log.info("starting download for %s", uri)
|
||||
|
||||
before_download = self.loop.time()
|
||||
await stream.start(timeout, save_file)
|
||||
|
||||
####################
|
||||
# success case: delete to_replace if applicable, broadcast fee payment
|
||||
####################
|
||||
|
||||
if to_replace: # delete old stream now that the replacement has started downloading
|
||||
await source_manager.delete(to_replace)
|
||||
|
||||
if payment is not None:
|
||||
await self.wallet_manager.broadcast_or_release(payment)
|
||||
payment = None # to avoid releasing in `finally` later
|
||||
log.info("paid fee of %s for %s", dewies_to_lbc(stream.content_fee.outputs[0].amount), uri)
|
||||
await self.storage.save_content_fee(stream.stream_hash, stream.content_fee)
|
||||
|
||||
source_manager.add(stream)
|
||||
|
||||
if not claim.stream.source.bt_infohash:
|
||||
await self.storage.save_content_claim(stream.stream_hash, outpoint)
|
||||
else:
|
||||
await self.storage.save_torrent_content_claim(
|
||||
stream.identifier, outpoint, stream.torrent_length, stream.torrent_name
|
||||
)
|
||||
claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier)
|
||||
stream.set_claim(claim_info, claim)
|
||||
if save_file:
|
||||
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download))
|
||||
return stream
|
||||
except asyncio.TimeoutError:
|
||||
error = DownloadDataTimeoutError(stream.sd_hash)
|
||||
raise error
|
||||
except (Exception, asyncio.CancelledError) as err: # forgive data timeout, don't delete stream
|
||||
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
|
||||
KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError)
|
||||
if isinstance(err, expected):
|
||||
log.warning("Failed to download %s: %s", uri, str(err))
|
||||
elif isinstance(err, asyncio.CancelledError):
|
||||
pass
|
||||
else:
|
||||
log.exception("Unexpected error downloading stream:")
|
||||
error = err
|
||||
raise
|
||||
finally:
|
||||
if payment is not None:
|
||||
# payment is set to None after broadcasting, if we're here an exception probably happened
|
||||
await self.wallet_manager.ledger.release_tx(payment)
|
||||
if self.analytics_manager and claim and claim.stream.source.bt_infohash:
|
||||
# TODO: analytics for torrents
|
||||
pass
|
||||
elif self.analytics_manager and (error or (stream and (stream.downloader.time_to_descriptor or
|
||||
stream.downloader.time_to_first_bytes))):
|
||||
server = self.wallet_manager.ledger.network.client.server
|
||||
self.loop.create_task(
|
||||
self.analytics_manager.send_time_to_first_bytes(
|
||||
resolved_time, self.loop.time() - start_time, None if not stream else stream.download_id,
|
||||
uri, outpoint,
|
||||
None if not stream else len(stream.downloader.blob_downloader.active_connections),
|
||||
None if not stream else len(stream.downloader.blob_downloader.scores),
|
||||
None if not stream else len(stream.downloader.blob_downloader.connection_failures),
|
||||
False if not stream else stream.downloader.added_fixed_peers,
|
||||
self.config.fixed_peer_delay if not stream else stream.downloader.fixed_peers_delay,
|
||||
None if not stream else stream.sd_hash,
|
||||
None if not stream else stream.downloader.time_to_descriptor,
|
||||
None if not (stream and stream.descriptor) else stream.descriptor.blobs[0].blob_hash,
|
||||
None if not (stream and stream.descriptor) else stream.descriptor.blobs[0].length,
|
||||
None if not stream else stream.downloader.time_to_first_bytes,
|
||||
None if not error else error.__class__.__name__,
|
||||
None if not error else str(error),
|
||||
None if not server else f"{server[0]}:{server[1]}"
|
||||
)
|
||||
)
|
||||
|
||||
async def stream_partial_content(self, request: Request, sd_hash: str):
|
||||
return await self.source_managers['stream'].stream_partial_content(request, sd_hash)
|
||||
|
||||
def get_filtered(self, *args, **kwargs) -> typing.List[ManagedDownloadSource]:
|
||||
"""
|
||||
Get a list of filtered and sorted ManagedStream objects
|
||||
|
||||
:param sort_by: field to sort by
|
||||
:param reverse: reverse sorting
|
||||
:param comparison: comparison operator used for filtering
|
||||
:param search_by: fields and values to filter by
|
||||
"""
|
||||
return sum((manager.get_filtered(*args, **kwargs) for manager in self.source_managers.values()), [])
|
||||
|
||||
async def delete(self, source: ManagedDownloadSource, delete_file=False):
|
||||
for manager in self.source_managers.values():
|
||||
await manager.delete(source, delete_file)
|
|
@ -1,162 +0,0 @@
|
|||
import os
|
||||
import asyncio
|
||||
import typing
|
||||
import logging
|
||||
import binascii
|
||||
from typing import Optional
|
||||
from lbry.utils import generate_id
|
||||
from lbry.extras.daemon.storage import StoredContentClaim
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.extras.daemon.analytics import AnalyticsManager
|
||||
from lbry.wallet.transaction import Transaction
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ManagedDownloadSource:
|
||||
STATUS_RUNNING = "running"
|
||||
STATUS_STOPPED = "stopped"
|
||||
STATUS_FINISHED = "finished"
|
||||
|
||||
SAVING_ID = 1
|
||||
STREAMING_ID = 2
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, config: 'Config', storage: 'SQLiteStorage', identifier: str,
|
||||
file_name: Optional[str] = None, download_directory: Optional[str] = None,
|
||||
status: Optional[str] = STATUS_STOPPED, claim: Optional[StoredContentClaim] = None,
|
||||
download_id: Optional[str] = None, rowid: Optional[int] = None,
|
||||
content_fee: Optional['Transaction'] = None,
|
||||
analytics_manager: Optional['AnalyticsManager'] = None,
|
||||
added_on: Optional[int] = None):
|
||||
self.loop = loop
|
||||
self.storage = storage
|
||||
self.config = config
|
||||
self.identifier = identifier
|
||||
self.download_directory = download_directory
|
||||
self._file_name = file_name
|
||||
self._status = status
|
||||
self.stream_claim_info = claim
|
||||
self.download_id = download_id or binascii.hexlify(generate_id()).decode()
|
||||
self.rowid = rowid
|
||||
self.content_fee = content_fee
|
||||
self.purchase_receipt = None
|
||||
self._added_on = added_on
|
||||
self.analytics_manager = analytics_manager
|
||||
self.downloader = None
|
||||
|
||||
self.saving = asyncio.Event()
|
||||
self.finished_writing = asyncio.Event()
|
||||
self.started_writing = asyncio.Event()
|
||||
self.finished_write_attempt = asyncio.Event()
|
||||
|
||||
# @classmethod
|
||||
# async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str,
|
||||
# key: Optional[bytes] = None,
|
||||
# iv_generator: Optional[typing.Generator[bytes, None, None]] = None) -> 'ManagedDownloadSource':
|
||||
# raise NotImplementedError()
|
||||
|
||||
async def start(self, timeout: Optional[float] = None, save_now: Optional[bool] = False):
|
||||
raise NotImplementedError()
|
||||
|
||||
async def stop(self, finished: bool = False):
|
||||
raise NotImplementedError()
|
||||
|
||||
async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None):
|
||||
raise NotImplementedError()
|
||||
|
||||
async def stop_tasks(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):
|
||||
self.stream_claim_info = StoredContentClaim(
|
||||
f"{claim_info['txid']}:{claim_info['nout']}", claim_info['claim_id'],
|
||||
claim_info['name'], claim_info['amount'], claim_info['height'],
|
||||
binascii.hexlify(claim.to_bytes()).decode(), claim.signing_channel_id, claim_info['address'],
|
||||
claim_info['claim_sequence'], claim_info.get('channel_name')
|
||||
)
|
||||
|
||||
# async def update_content_claim(self, claim_info: Optional[typing.Dict] = None):
|
||||
# if not claim_info:
|
||||
# claim_info = await self.blob_manager.storage.get_content_claim(self.stream_hash)
|
||||
# self.set_claim(claim_info, claim_info['value'])
|
||||
|
||||
@property
|
||||
def file_name(self) -> Optional[str]:
|
||||
return self._file_name
|
||||
|
||||
@property
|
||||
def added_on(self) -> Optional[int]:
|
||||
return self._added_on
|
||||
|
||||
@property
|
||||
def status(self) -> str:
|
||||
return self._status
|
||||
|
||||
@property
|
||||
def completed(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
# @property
|
||||
# def stream_url(self):
|
||||
# return f"http://{self.config.streaming_host}:{self.config.streaming_port}/stream/{self.sd_hash}
|
||||
|
||||
@property
|
||||
def finished(self) -> bool:
|
||||
return self.status == self.STATUS_FINISHED
|
||||
|
||||
@property
|
||||
def running(self) -> bool:
|
||||
return self.status == self.STATUS_RUNNING
|
||||
|
||||
@property
|
||||
def claim_id(self) -> Optional[str]:
|
||||
return None if not self.stream_claim_info else self.stream_claim_info.claim_id
|
||||
|
||||
@property
|
||||
def txid(self) -> Optional[str]:
|
||||
return None if not self.stream_claim_info else self.stream_claim_info.txid
|
||||
|
||||
@property
|
||||
def nout(self) -> Optional[int]:
|
||||
return None if not self.stream_claim_info else self.stream_claim_info.nout
|
||||
|
||||
@property
|
||||
def outpoint(self) -> Optional[str]:
|
||||
return None if not self.stream_claim_info else self.stream_claim_info.outpoint
|
||||
|
||||
@property
|
||||
def claim_height(self) -> Optional[int]:
|
||||
return None if not self.stream_claim_info else self.stream_claim_info.height
|
||||
|
||||
@property
|
||||
def channel_claim_id(self) -> Optional[str]:
|
||||
return None if not self.stream_claim_info else self.stream_claim_info.channel_claim_id
|
||||
|
||||
@property
|
||||
def channel_name(self) -> Optional[str]:
|
||||
return None if not self.stream_claim_info else self.stream_claim_info.channel_name
|
||||
|
||||
@property
|
||||
def claim_name(self) -> Optional[str]:
|
||||
return None if not self.stream_claim_info else self.stream_claim_info.claim_name
|
||||
|
||||
@property
|
||||
def metadata(self) -> Optional[typing.Dict]:
|
||||
return None if not self.stream_claim_info else self.stream_claim_info.claim.stream.to_dict()
|
||||
|
||||
@property
|
||||
def metadata_protobuf(self) -> bytes:
|
||||
if self.stream_claim_info:
|
||||
return binascii.hexlify(self.stream_claim_info.claim.to_bytes())
|
||||
|
||||
@property
|
||||
def full_path(self) -> Optional[str]:
|
||||
return os.path.join(self.download_directory, os.path.basename(self.file_name)) \
|
||||
if self.file_name and self.download_directory else None
|
||||
|
||||
@property
|
||||
def output_file_exists(self):
|
||||
return os.path.isfile(self.full_path) if self.full_path else False
|
|
@ -1,138 +0,0 @@
|
|||
import os
|
||||
import asyncio
|
||||
import logging
|
||||
import typing
|
||||
from typing import Optional
|
||||
from lbry.file.source import ManagedDownloadSource
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.extras.daemon.analytics import AnalyticsManager
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
COMPARISON_OPERATORS = {
|
||||
'eq': lambda a, b: a == b,
|
||||
'ne': lambda a, b: a != b,
|
||||
'g': lambda a, b: a > b,
|
||||
'l': lambda a, b: a < b,
|
||||
'ge': lambda a, b: a >= b,
|
||||
'le': lambda a, b: a <= b,
|
||||
}
|
||||
|
||||
|
||||
class SourceManager:
|
||||
filter_fields = {
|
||||
'rowid',
|
||||
'status',
|
||||
'file_name',
|
||||
'added_on',
|
||||
'download_path',
|
||||
'claim_name',
|
||||
'claim_height',
|
||||
'claim_id',
|
||||
'outpoint',
|
||||
'txid',
|
||||
'nout',
|
||||
'channel_claim_id',
|
||||
'channel_name',
|
||||
'completed'
|
||||
}
|
||||
|
||||
set_filter_fields = {
|
||||
"claim_ids": "claim_id",
|
||||
"channel_claim_ids": "channel_claim_id",
|
||||
"outpoints": "outpoint"
|
||||
}
|
||||
|
||||
source_class = ManagedDownloadSource
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, config: 'Config', storage: 'SQLiteStorage',
|
||||
analytics_manager: Optional['AnalyticsManager'] = None):
|
||||
self.loop = loop
|
||||
self.config = config
|
||||
self.storage = storage
|
||||
self.analytics_manager = analytics_manager
|
||||
self._sources: typing.Dict[str, ManagedDownloadSource] = {}
|
||||
self.started = asyncio.Event()
|
||||
|
||||
def add(self, source: ManagedDownloadSource):
|
||||
self._sources[source.identifier] = source
|
||||
|
||||
async def remove(self, source: ManagedDownloadSource):
|
||||
if source.identifier not in self._sources:
|
||||
return
|
||||
self._sources.pop(source.identifier)
|
||||
await source.stop_tasks()
|
||||
|
||||
async def initialize_from_database(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
async def start(self):
|
||||
await self.initialize_from_database()
|
||||
self.started.set()
|
||||
|
||||
async def stop(self):
|
||||
while self._sources:
|
||||
_, source = self._sources.popitem()
|
||||
await source.stop_tasks()
|
||||
self.started.clear()
|
||||
|
||||
async def create(self, file_path: str, key: Optional[bytes] = None,
|
||||
iv_generator: Optional[typing.Generator[bytes, None, None]] = None) -> ManagedDownloadSource:
|
||||
raise NotImplementedError()
|
||||
|
||||
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
|
||||
await self.remove(source)
|
||||
if delete_file and source.output_file_exists:
|
||||
os.remove(source.full_path)
|
||||
|
||||
def get_filtered(self, sort_by: Optional[str] = None, reverse: Optional[bool] = False,
|
||||
comparison: Optional[str] = None, **search_by) -> typing.List[ManagedDownloadSource]:
|
||||
"""
|
||||
Get a list of filtered and sorted ManagedStream objects
|
||||
|
||||
:param sort_by: field to sort by
|
||||
:param reverse: reverse sorting
|
||||
:param comparison: comparison operator used for filtering
|
||||
:param search_by: fields and values to filter by
|
||||
"""
|
||||
if sort_by and sort_by not in self.filter_fields:
|
||||
raise ValueError(f"'{sort_by}' is not a valid field to sort by")
|
||||
if comparison and comparison not in COMPARISON_OPERATORS:
|
||||
raise ValueError(f"'{comparison}' is not a valid comparison")
|
||||
if 'full_status' in search_by:
|
||||
del search_by['full_status']
|
||||
|
||||
for search in search_by:
|
||||
if search not in self.filter_fields:
|
||||
raise ValueError(f"'{search}' is not a valid search operation")
|
||||
|
||||
compare_sets = {}
|
||||
if isinstance(search_by.get('claim_id'), list):
|
||||
compare_sets['claim_ids'] = search_by.pop('claim_id')
|
||||
if isinstance(search_by.get('outpoint'), list):
|
||||
compare_sets['outpoints'] = search_by.pop('outpoint')
|
||||
if isinstance(search_by.get('channel_claim_id'), list):
|
||||
compare_sets['channel_claim_ids'] = search_by.pop('channel_claim_id')
|
||||
|
||||
if search_by or compare_sets:
|
||||
comparison = comparison or 'eq'
|
||||
streams = []
|
||||
for stream in self._sources.values():
|
||||
if compare_sets and not all(
|
||||
getattr(stream, self.set_filter_fields[set_search]) in val
|
||||
for set_search, val in compare_sets.items()):
|
||||
continue
|
||||
if search_by and not all(
|
||||
COMPARISON_OPERATORS[comparison](getattr(stream, search), val)
|
||||
for search, val in search_by.items()):
|
||||
continue
|
||||
streams.append(stream)
|
||||
else:
|
||||
streams = list(self._sources.values())
|
||||
if sort_by:
|
||||
streams.sort(key=lambda s: getattr(s, sort_by) or "")
|
||||
if reverse:
|
||||
streams.reverse()
|
||||
return streams
|
|
@ -1,456 +0,0 @@
|
|||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
from math import ceil
|
||||
|
||||
import lbry.utils
|
||||
from lbry.conf import TranscodeConfig
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VideoFileAnalyzer:
|
||||
|
||||
def _replace_or_pop_env(self, variable):
|
||||
if variable + '_ORIG' in self._env_copy:
|
||||
self._env_copy[variable] = self._env_copy[variable + '_ORIG']
|
||||
else:
|
||||
self._env_copy.pop(variable, None)
|
||||
|
||||
def __init__(self, conf: TranscodeConfig):
|
||||
self._conf = conf
|
||||
self._available_encoders = ""
|
||||
self._ffmpeg_installed = None
|
||||
self._which_ffmpeg = None
|
||||
self._which_ffprobe = None
|
||||
self._env_copy = dict(os.environ)
|
||||
self._checked_ffmpeg = False
|
||||
if lbry.utils.is_running_from_bundle():
|
||||
# handle the situation where PyInstaller overrides our runtime environment:
|
||||
self._replace_or_pop_env('LD_LIBRARY_PATH')
|
||||
|
||||
@staticmethod
|
||||
def _execute(command, environment):
|
||||
# log.debug("Executing: %s", command)
|
||||
try:
|
||||
with subprocess.Popen(
|
||||
shlex.split(command) if platform.system() != 'Windows' else command,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=environment
|
||||
) as process:
|
||||
(stdout, stderr) = process.communicate() # blocks until the process exits
|
||||
return stdout.decode(errors='replace') + stderr.decode(errors='replace'), process.returncode
|
||||
except subprocess.SubprocessError as e:
|
||||
return str(e), -1
|
||||
|
||||
# This create_subprocess_exec call is broken in Windows Python 3.7, but it's prettier than what's here.
|
||||
# The recommended fix is switching to ProactorEventLoop, but that breaks UDP in Linux Python 3.7.
|
||||
# We work around that issue here by using run_in_executor. Check it again in Python 3.8.
|
||||
async def _execute_ffmpeg(self, arguments):
|
||||
arguments = self._which_ffmpeg + " " + arguments
|
||||
return await asyncio.get_event_loop().run_in_executor(None, self._execute, arguments, self._env_copy)
|
||||
|
||||
async def _execute_ffprobe(self, arguments):
|
||||
arguments = self._which_ffprobe + " " + arguments
|
||||
return await asyncio.get_event_loop().run_in_executor(None, self._execute, arguments, self._env_copy)
|
||||
|
||||
async def _verify_executables(self):
|
||||
try:
|
||||
await self._execute_ffprobe("-version")
|
||||
version, code = await self._execute_ffmpeg("-version")
|
||||
except Exception as e:
|
||||
code = -1
|
||||
version = str(e)
|
||||
if code != 0 or not version.startswith("ffmpeg"):
|
||||
log.warning("Unable to run ffmpeg, but it was requested. Code: %d; Message: %s", code, version)
|
||||
raise FileNotFoundError("Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
|
||||
"and ensure that it is callable via PATH or conf.ffmpeg_path")
|
||||
log.debug("Using %s at %s", version.splitlines()[0].split(" Copyright")[0], self._which_ffmpeg)
|
||||
return version
|
||||
|
||||
@staticmethod
|
||||
def _which_ffmpeg_and_ffmprobe(path):
|
||||
return shutil.which("ffmpeg", path=path), shutil.which("ffprobe", path=path)
|
||||
|
||||
async def _verify_ffmpeg_installed(self):
|
||||
if self._ffmpeg_installed:
|
||||
return
|
||||
self._ffmpeg_installed = False
|
||||
path = self._conf.ffmpeg_path
|
||||
if hasattr(self._conf, "data_dir"):
|
||||
path += os.path.pathsep + os.path.join(getattr(self._conf, "data_dir"), "ffmpeg", "bin")
|
||||
path += os.path.pathsep + self._env_copy.get("PATH", "")
|
||||
self._which_ffmpeg, self._which_ffprobe = await asyncio.get_running_loop().run_in_executor(
|
||||
None, self._which_ffmpeg_and_ffmprobe, path
|
||||
)
|
||||
if not self._which_ffmpeg:
|
||||
log.warning("Unable to locate ffmpeg executable. Path: %s", path)
|
||||
raise FileNotFoundError(f"Unable to locate ffmpeg executable. Path: {path}")
|
||||
if not self._which_ffprobe:
|
||||
log.warning("Unable to locate ffprobe executable. Path: %s", path)
|
||||
raise FileNotFoundError(f"Unable to locate ffprobe executable. Path: {path}")
|
||||
if os.path.dirname(self._which_ffmpeg) != os.path.dirname(self._which_ffprobe):
|
||||
log.warning("ffmpeg and ffprobe are in different folders!")
|
||||
|
||||
await self._verify_executables()
|
||||
self._ffmpeg_installed = True
|
||||
|
||||
async def status(self, reset=False, recheck=False):
|
||||
if reset:
|
||||
self._available_encoders = ""
|
||||
self._ffmpeg_installed = None
|
||||
if self._checked_ffmpeg and not recheck:
|
||||
pass
|
||||
elif self._ffmpeg_installed is None:
|
||||
try:
|
||||
await self._verify_ffmpeg_installed()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
self._checked_ffmpeg = True
|
||||
return {
|
||||
"available": self._ffmpeg_installed,
|
||||
"which": self._which_ffmpeg,
|
||||
"analyze_audio_volume": int(self._conf.volume_analysis_time) > 0
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _verify_container(scan_data: json):
|
||||
container = scan_data["format"]["format_name"]
|
||||
log.debug(" Detected container is %s", container)
|
||||
splits = container.split(",")
|
||||
if not {"webm", "mp4", "3gp", "ogg"}.intersection(splits):
|
||||
return "Container format is not in the approved list of WebM, MP4. " \
|
||||
f"Actual: {container} [{scan_data['format']['format_long_name']}]"
|
||||
|
||||
if "matroska" in splits:
|
||||
for stream in scan_data["streams"]:
|
||||
if stream["codec_type"] == "video":
|
||||
codec = stream["codec_name"]
|
||||
if not {"vp8", "vp9", "av1"}.intersection(codec.split(",")):
|
||||
return "WebM format requires VP8/9 or AV1 video. " \
|
||||
f"Actual: {codec} [{stream['codec_long_name']}]"
|
||||
elif stream["codec_type"] == "audio":
|
||||
codec = stream["codec_name"]
|
||||
if not {"vorbis", "opus"}.intersection(codec.split(",")):
|
||||
return "WebM format requires Vorbis or Opus audio. " \
|
||||
f"Actual: {codec} [{stream['codec_long_name']}]"
|
||||
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def _verify_video_encoding(scan_data: json):
|
||||
for stream in scan_data["streams"]:
|
||||
if stream["codec_type"] != "video":
|
||||
continue
|
||||
codec = stream["codec_name"]
|
||||
log.debug(" Detected video codec is %s, format is %s", codec, stream["pix_fmt"])
|
||||
if not {"h264", "vp8", "vp9", "av1", "theora"}.intersection(codec.split(",")):
|
||||
return "Video codec is not in the approved list of H264, VP8, VP9, AV1, Theora. " \
|
||||
f"Actual: {codec} [{stream['codec_long_name']}]"
|
||||
|
||||
if "h264" in codec.split(",") and stream["pix_fmt"] != "yuv420p":
|
||||
return "Video codec is H264, but its pixel format does not match the approved yuv420p. " \
|
||||
f"Actual: {stream['pix_fmt']}"
|
||||
|
||||
return ""
|
||||
|
||||
def _verify_bitrate(self, scan_data: json, file_path):
|
||||
bit_rate_max = float(self._conf.video_bitrate_maximum)
|
||||
if bit_rate_max <= 0:
|
||||
return ""
|
||||
|
||||
if "bit_rate" in scan_data["format"]:
|
||||
bit_rate = float(scan_data["format"]["bit_rate"])
|
||||
else:
|
||||
bit_rate = os.stat(file_path).st_size / float(scan_data["format"]["duration"])
|
||||
log.debug(" Detected bitrate is %s Mbps. Allowed max: %s Mbps",
|
||||
str(bit_rate / 1000000.0), str(bit_rate_max / 1000000.0))
|
||||
|
||||
if bit_rate > bit_rate_max:
|
||||
return "The bit rate is above the configured maximum. Actual: " \
|
||||
f"{bit_rate / 1000000.0} Mbps; Allowed max: {bit_rate_max / 1000000.0} Mbps"
|
||||
|
||||
return ""
|
||||
|
||||
async def _verify_fast_start(self, scan_data: json, video_file):
|
||||
container = scan_data["format"]["format_name"]
|
||||
if {"webm", "ogg"}.intersection(container.split(",")):
|
||||
return ""
|
||||
|
||||
result, _ = await self._execute_ffprobe(f'-v debug "{video_file}"')
|
||||
match = re.search(r"Before avformat_find_stream_info.+?\s+seeks:(\d+)\s+", result)
|
||||
if match and int(match.group(1)) != 0:
|
||||
return "Video stream descriptors are not at the start of the file (the faststart flag was not used)."
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def _verify_audio_encoding(scan_data: json):
|
||||
for stream in scan_data["streams"]:
|
||||
if stream["codec_type"] != "audio":
|
||||
continue
|
||||
codec = stream["codec_name"]
|
||||
log.debug(" Detected audio codec is %s", codec)
|
||||
if not {"aac", "mp3", "flac", "vorbis", "opus"}.intersection(codec.split(",")):
|
||||
return "Audio codec is not in the approved list of AAC, FLAC, MP3, Vorbis, and Opus. " \
|
||||
f"Actual: {codec} [{stream['codec_long_name']}]"
|
||||
if int(stream['sample_rate']) > 48000:
|
||||
return "Sample rate out of range"
|
||||
|
||||
return ""
|
||||
|
||||
async def _verify_audio_volume(self, seconds, video_file):
|
||||
try:
|
||||
validate_volume = int(seconds) > 0
|
||||
except ValueError:
|
||||
validate_volume = False
|
||||
|
||||
if not validate_volume:
|
||||
return ""
|
||||
|
||||
result, _ = await self._execute_ffmpeg(f'-i "{video_file}" -t {seconds} '
|
||||
f'-af volumedetect -vn -sn -dn -f null "{os.devnull}"')
|
||||
try:
|
||||
mean_volume = float(re.search(r"mean_volume:\s+([-+]?\d*\.\d+|\d+)", result).group(1))
|
||||
max_volume = float(re.search(r"max_volume:\s+([-+]?\d*\.\d+|\d+)", result).group(1))
|
||||
except Exception as e:
|
||||
log.debug(" Failure in volume analysis. Message: %s", str(e))
|
||||
return ""
|
||||
|
||||
if max_volume < -5.0 and mean_volume < -22.0:
|
||||
return "Audio is at least five dB lower than prime. " \
|
||||
f"Actual max: {max_volume}, mean: {mean_volume}"
|
||||
|
||||
log.debug(" Detected audio volume has mean, max of %f, %f dB", mean_volume, max_volume)
|
||||
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def _compute_crf(scan_data):
|
||||
height = 240.0
|
||||
for stream in scan_data["streams"]:
|
||||
if stream["codec_type"] == "video":
|
||||
height = max(height, float(stream["height"]))
|
||||
|
||||
# https://developers.google.com/media/vp9/settings/vod/
|
||||
return int(-0.011 * height + 40)
|
||||
|
||||
def _get_video_scaler(self):
|
||||
return self._conf.video_scaler
|
||||
|
||||
async def _get_video_encoder(self, scan_data):
|
||||
# use what the user said if it's there:
|
||||
# if it's not there, use h264 if we can because it's way faster than the others
|
||||
# if we don't have h264 use vp9; it's fairly compatible even though it's slow
|
||||
|
||||
if not self._available_encoders:
|
||||
self._available_encoders, _ = await self._execute_ffmpeg("-encoders -v quiet")
|
||||
|
||||
encoder = self._conf.video_encoder.split(" ", 1)[0]
|
||||
if re.search(fr"^\s*V..... {encoder} ", self._available_encoders, re.MULTILINE):
|
||||
return self._conf.video_encoder
|
||||
|
||||
if re.search(r"^\s*V..... libx264 ", self._available_encoders, re.MULTILINE):
|
||||
if encoder:
|
||||
log.warning(" Using libx264 since the requested encoder was unavailable. Requested: %s", encoder)
|
||||
return 'libx264 -crf 19 -vf "format=yuv420p"'
|
||||
|
||||
if not encoder:
|
||||
encoder = "libx264"
|
||||
|
||||
if re.search(r"^\s*V..... libvpx-vp9 ", self._available_encoders, re.MULTILINE):
|
||||
log.warning(" Using libvpx-vp9 since the requested encoder was unavailable. Requested: %s", encoder)
|
||||
crf = self._compute_crf(scan_data)
|
||||
return f"libvpx-vp9 -crf {crf} -b:v 0"
|
||||
|
||||
if re.search(r"^\s*V..... libtheora", self._available_encoders, re.MULTILINE):
|
||||
log.warning(" Using libtheora since the requested encoder was unavailable. Requested: %s", encoder)
|
||||
return "libtheora -q:v 7"
|
||||
|
||||
raise Exception(f"The video encoder is not available. Requested: {encoder}")
|
||||
|
||||
async def _get_audio_encoder(self, extension):
|
||||
# if the video encoding is theora or av1/vp8/vp9 use opus (or fallback to vorbis)
|
||||
# or we don't have a video encoding but we have an ogg or webm container use opus
|
||||
# if we need to use opus/vorbis see if the conf file has it else use our own params
|
||||
# else use the user-set value if it exists
|
||||
# else use aac
|
||||
|
||||
wants_opus = extension != "mp4"
|
||||
if not self._available_encoders:
|
||||
self._available_encoders, _ = await self._execute_ffmpeg("-encoders -v quiet")
|
||||
|
||||
encoder = self._conf.audio_encoder.split(" ", 1)[0]
|
||||
if wants_opus and 'opus' in encoder:
|
||||
return self._conf.audio_encoder
|
||||
|
||||
if wants_opus and re.search(r"^\s*A..... libopus ", self._available_encoders, re.MULTILINE):
|
||||
return "libopus -b:a 160k"
|
||||
|
||||
if wants_opus and 'vorbis' in encoder:
|
||||
return self._conf.audio_encoder
|
||||
|
||||
if wants_opus and re.search(r"^\s*A..... libvorbis ", self._available_encoders, re.MULTILINE):
|
||||
return "libvorbis -q:a 6"
|
||||
|
||||
if re.search(fr"^\s*A..... {encoder} ", self._available_encoders, re.MULTILINE):
|
||||
return self._conf.audio_encoder
|
||||
|
||||
if re.search(r"^\s*A..... aac ", self._available_encoders, re.MULTILINE):
|
||||
return "aac -b:a 192k"
|
||||
|
||||
raise Exception(f"The audio encoder is not available. Requested: {encoder or 'aac'}")
|
||||
|
||||
@staticmethod
|
||||
def _get_best_container_extension(scan_data, video_encoder):
|
||||
# the container is chosen by the video format
|
||||
# if we are theora-encoded, we want ogg
|
||||
# if we are vp8/vp9/av1 we want webm
|
||||
# use mp4 for anything else
|
||||
|
||||
if video_encoder: # not re-encoding video
|
||||
if "theora" in video_encoder:
|
||||
return "ogv"
|
||||
if re.search(r"vp[89x]|av1", video_encoder.split(" ", 1)[0]):
|
||||
return "webm"
|
||||
return "mp4"
|
||||
|
||||
for stream in scan_data["streams"]:
|
||||
if stream["codec_type"] != "video":
|
||||
continue
|
||||
codec = stream["codec_name"].split(",")
|
||||
if "theora" in codec:
|
||||
return "ogv"
|
||||
if {"vp8", "vp9", "av1"}.intersection(codec):
|
||||
return "webm"
|
||||
|
||||
return "mp4"
|
||||
|
||||
async def _get_scan_data(self, validate, file_path):
|
||||
arguments = f'-v quiet -print_format json -show_format -show_streams "{file_path}"'
|
||||
result, _ = await self._execute_ffprobe(arguments)
|
||||
try:
|
||||
scan_data = json.loads(result)
|
||||
except Exception as e:
|
||||
log.debug("Failure in JSON parsing ffprobe results. Message: %s", str(e))
|
||||
raise ValueError(f'Absent or unreadable video file: {file_path}')
|
||||
|
||||
if "format" not in scan_data or "duration" not in scan_data["format"]:
|
||||
log.debug("Format data is missing from ffprobe results for: %s", file_path)
|
||||
raise ValueError(f'Media file does not appear to contain video content: {file_path}')
|
||||
|
||||
if float(scan_data["format"]["duration"]) < 0.1:
|
||||
log.debug("Media file appears to be an image: %s", file_path)
|
||||
raise ValueError(f'Assuming image file at: {file_path}')
|
||||
|
||||
return scan_data
|
||||
|
||||
@staticmethod
|
||||
def _build_spec(scan_data):
|
||||
assert scan_data
|
||||
|
||||
duration = ceil(float(scan_data["format"]["duration"])) # existence verified when scan_data made
|
||||
width = -1
|
||||
height = -1
|
||||
for stream in scan_data["streams"]:
|
||||
if stream["codec_type"] != "video":
|
||||
continue
|
||||
width = max(width, int(stream["width"]))
|
||||
height = max(height, int(stream["height"]))
|
||||
|
||||
log.debug(" Detected duration: %d sec. with resolution: %d x %d", duration, width, height)
|
||||
|
||||
spec = {"duration": duration}
|
||||
if height >= 0:
|
||||
spec["height"] = height
|
||||
if width >= 0:
|
||||
spec["width"] = width
|
||||
return spec
|
||||
|
||||
async def verify_or_repair(self, validate, repair, file_path, ignore_non_video=False):
|
||||
if not validate and not repair:
|
||||
return file_path, {}
|
||||
|
||||
if ignore_non_video and not file_path:
|
||||
return file_path, {}
|
||||
|
||||
await self._verify_ffmpeg_installed()
|
||||
try:
|
||||
scan_data = await self._get_scan_data(validate, file_path)
|
||||
except ValueError:
|
||||
if ignore_non_video:
|
||||
return file_path, {}
|
||||
raise
|
||||
|
||||
fast_start_msg = await self._verify_fast_start(scan_data, file_path)
|
||||
log.debug("Analyzing %s:", file_path)
|
||||
spec = self._build_spec(scan_data)
|
||||
log.debug(" Detected faststart is %s", "false" if fast_start_msg else "true")
|
||||
container_msg = self._verify_container(scan_data)
|
||||
bitrate_msg = self._verify_bitrate(scan_data, file_path)
|
||||
video_msg = self._verify_video_encoding(scan_data)
|
||||
audio_msg = self._verify_audio_encoding(scan_data)
|
||||
volume_msg = await self._verify_audio_volume(self._conf.volume_analysis_time, file_path)
|
||||
messages = [container_msg, bitrate_msg, fast_start_msg, video_msg, audio_msg, volume_msg]
|
||||
|
||||
if not any(messages):
|
||||
return file_path, spec
|
||||
|
||||
if not repair:
|
||||
errors = ["Streamability verification failed:"]
|
||||
errors.extend(filter(None, messages))
|
||||
raise Exception("\n ".join(errors))
|
||||
|
||||
# the plan for transcoding:
|
||||
# we have to re-encode the video if it is in a nonstandard format
|
||||
# we also re-encode if we are h264 but not yuv420p (both errors caught in video_msg)
|
||||
# we also re-encode if our bitrate or sample rate is too high
|
||||
|
||||
try:
|
||||
transcode_command = [f'-i "{file_path}" -y -c:s copy -c:d copy -c:v']
|
||||
|
||||
video_encoder = ""
|
||||
if video_msg or bitrate_msg:
|
||||
video_encoder = await self._get_video_encoder(scan_data)
|
||||
transcode_command.append(video_encoder)
|
||||
# could do the scaling only if bitrate_msg, but if we're going to the effort to re-encode anyway...
|
||||
transcode_command.append(self._get_video_scaler())
|
||||
else:
|
||||
transcode_command.append("copy")
|
||||
|
||||
transcode_command.append("-movflags +faststart -c:a")
|
||||
extension = self._get_best_container_extension(scan_data, video_encoder)
|
||||
|
||||
if audio_msg or volume_msg:
|
||||
audio_encoder = await self._get_audio_encoder(extension)
|
||||
transcode_command.append(audio_encoder)
|
||||
if volume_msg and self._conf.volume_filter:
|
||||
transcode_command.append(self._conf.volume_filter)
|
||||
if audio_msg == "Sample rate out of range":
|
||||
transcode_command.append(" -ar 48000 ")
|
||||
else:
|
||||
transcode_command.append("copy")
|
||||
|
||||
# TODO: put it in a temp folder and delete it after we upload?
|
||||
path = pathlib.Path(file_path)
|
||||
output = path.parent / f"{path.stem}_fixed.{extension}"
|
||||
transcode_command.append(f'"{output}"')
|
||||
|
||||
ffmpeg_command = " ".join(transcode_command)
|
||||
log.info("Proceeding on transcode via: ffmpeg %s", ffmpeg_command)
|
||||
result, code = await self._execute_ffmpeg(ffmpeg_command)
|
||||
if code != 0:
|
||||
raise Exception(f"Failure to complete the transcode command. Output: {result}")
|
||||
except Exception as e:
|
||||
if validate:
|
||||
raise
|
||||
log.info("Unable to transcode %s . Message: %s", file_path, str(e))
|
||||
# TODO: delete partial output file here if it exists?
|
||||
return file_path, spec
|
||||
|
||||
return str(output), spec
|
|
@ -1,68 +0,0 @@
|
|||
import time
|
||||
import logging
|
||||
import asyncio
|
||||
import asyncio.tasks
|
||||
from aiohttp import web
|
||||
from prometheus_client import generate_latest as prom_generate_latest
|
||||
from prometheus_client import Counter, Histogram, Gauge
|
||||
|
||||
|
||||
PROBES_IN_FLIGHT = Counter("probes_in_flight", "Number of loop probes in flight", namespace='asyncio')
|
||||
PROBES_FINISHED = Counter("probes_finished", "Number of finished loop probes", namespace='asyncio')
|
||||
PROBE_TIMES = Histogram("probe_times", "Loop probe times", namespace='asyncio')
|
||||
TASK_COUNT = Gauge("running_tasks", "Number of running tasks", namespace='asyncio')
|
||||
|
||||
|
||||
def get_loop_metrics(delay=1):
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
def callback(started):
|
||||
PROBE_TIMES.observe(time.perf_counter() - started - delay)
|
||||
PROBES_FINISHED.inc()
|
||||
|
||||
async def monitor_loop_responsiveness():
|
||||
while True:
|
||||
now = time.perf_counter()
|
||||
loop.call_later(delay, callback, now)
|
||||
PROBES_IN_FLIGHT.inc()
|
||||
TASK_COUNT.set(len(asyncio.tasks._all_tasks))
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
return loop.create_task(monitor_loop_responsiveness())
|
||||
|
||||
|
||||
class PrometheusServer:
|
||||
def __init__(self, logger=None):
|
||||
self.runner = None
|
||||
self.logger = logger or logging.getLogger(__name__)
|
||||
self._monitor_loop_task = None
|
||||
|
||||
async def start(self, interface: str, port: int):
|
||||
self.logger.info("start prometheus metrics")
|
||||
prom_app = web.Application()
|
||||
prom_app.router.add_get('/metrics', self.handle_metrics_get_request)
|
||||
self.runner = web.AppRunner(prom_app)
|
||||
await self.runner.setup()
|
||||
|
||||
metrics_site = web.TCPSite(self.runner, interface, port, shutdown_timeout=.5)
|
||||
await metrics_site.start()
|
||||
self.logger.info(
|
||||
'prometheus metrics server listening on %s:%i', *metrics_site._server.sockets[0].getsockname()[:2]
|
||||
)
|
||||
self._monitor_loop_task = get_loop_metrics()
|
||||
|
||||
async def handle_metrics_get_request(self, request: web.Request):
|
||||
try:
|
||||
return web.Response(
|
||||
text=prom_generate_latest().decode(),
|
||||
content_type='text/plain; version=0.0.4'
|
||||
)
|
||||
except Exception:
|
||||
self.logger.exception('could not generate prometheus data')
|
||||
raise
|
||||
|
||||
async def stop(self):
|
||||
if self._monitor_loop_task and not self._monitor_loop_task.done():
|
||||
self._monitor_loop_task.cancel()
|
||||
self._monitor_loop_task = None
|
||||
await self.runner.cleanup()
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue