Compare commits

..

1 commit

Author SHA1 Message Date
Lex Berezhny
e01fbc6f39 initial import of android lbrysdk build 2019-01-27 22:45:32 -05:00
468 changed files with 30425 additions and 71466 deletions

View file

@ -2,6 +2,6 @@
.tox
__pycache__
dist
lbry.egg-info
lbrynet.egg-info
docs
tests

1
.gitattributes vendored Normal file
View file

@ -0,0 +1 @@
/CHANGELOG.md merge=union

74
.github/ISSUE_TEMPLATE.md vendored Normal file
View file

@ -0,0 +1,74 @@
<!--
Thanks for reporting an issue to LBRY and helping us improve!
To make it possible for us to help you, please fill out below information carefully.
Before reporting any issues, please make sure that you're using the latest version.
- App: https://github.com/lbryio/lbry-desktop/releases
- Daemon: https://github.com/lbryio/lbry/releases
We are also available on Discord at https://chat.lbry.io
-->
## The Issue
In order to <achieve some value>,
as a <type of user>,
I want <some functionality>.
### Steps to reproduce
1.
2.
3.
### Expected behaviour
Tell us what should happen
### Actual behaviour
Tell us what happens instead
## System Configuration
<!-- For the app, this info is in the About section at the bottom of the Help page.
You can include a screenshot instead of typing it out -->
<!-- For the daemon, run:
curl 'http://localhost:5279' --data '{"method":"version"}'
and include the full output -->
- LBRY Daemon version:
- LBRY App version:
- LBRY Installation ID:
- Operating system:
## Anything Else
<!-- Include anything else that does not fit into the above sections -->
## Screenshots
<!-- If a screenshot would help explain the bug, please include one or two here -->
## Internal Use
### Acceptance Criteria
1.
2.
3.
### Definition of Done
- [ ] Tested against acceptance criteria
- [ ] Tested against the assumptions of user story
- [ ] The project builds without errors
- [ ] Unit tests are written and passing
- [ ] Tests on devices/browsers listed in the issue have passed
- [ ] QA performed & issues resolved
- [ ] Refactoring completed
- [ ] Any configuration or build changes documented
- [ ] Documentation updated
- [ ] Peer Code Review performed

38
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View file

@ -0,0 +1,38 @@
## PR Checklist
Please check all that apply to this PR using "x":
- [ ] I have checked that this PR is not a duplicate of an existing PR (open, closed or merged)
- [ ] I have checked that this PR does not introduce a breaking change
- [ ] This PR introduces breaking changes and I have provided a detailed explanation below
## PR Type
What kind of change does this PR introduce?
Why is this change necessary?
<!-- Please check all that apply to this PR using "x". -->
- [ ] Bugfix
- [ ] Feature
- [ ] Breaking changes (bugfix or feature that introduces breaking changes)
- [ ] Code style update (formatting)
- [ ] Refactoring (no functional changes)
- [ ] Documentation changes
- [ ] Other - Please describe:
## Fixes
Issue Number: N/A
## What is the current behavior?
## What is the new behavior?
## Other information
<!-- If this PR contains a breaking change, please describe the impact and solution strategy for existing applications below. -->

View file

@ -1,206 +0,0 @@
name: ci
on: ["push", "pull_request", "workflow_dispatch"]
jobs:
lint:
name: lint
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: extract pip cache
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- run: pip install --user --upgrade pip wheel
- run: pip install -e .[lint]
- run: make lint
tests-unit:
name: "tests / unit"
strategy:
matrix:
os:
- ubuntu-20.04
- macos-latest
- windows-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: set pip cache dir
shell: bash
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
- name: extract pip cache
uses: actions/cache@v3
with:
path: ${{ env.PIP_CACHE_DIR }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- id: os-name
uses: ASzc/change-string-case-action@v5
with:
string: ${{ runner.os }}
- run: python -m pip install --user --upgrade pip wheel
- if: startsWith(runner.os, 'linux')
run: pip install -e .[test]
- if: startsWith(runner.os, 'linux')
env:
HOME: /tmp
run: make test-unit-coverage
- if: startsWith(runner.os, 'linux') != true
run: pip install -e .[test]
- if: startsWith(runner.os, 'linux') != true
env:
HOME: /tmp
run: coverage run --source=lbry -m unittest tests/unit/test_conf.py
- name: submit coverage report
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_FLAG_NAME: tests-unit-${{ steps.os-name.outputs.lowercase }}
COVERALLS_PARALLEL: true
run: |
pip install coveralls
coveralls --service=github
tests-integration:
name: "tests / integration"
runs-on: ubuntu-20.04
strategy:
matrix:
test:
- datanetwork
- blockchain
- claims
- takeovers
- transactions
- other
steps:
- name: Configure sysctl limits
run: |
sudo swapoff -a
sudo sysctl -w vm.swappiness=1
sudo sysctl -w fs.file-max=262144
sudo sysctl -w vm.max_map_count=262144
- name: Runs Elasticsearch
uses: elastic/elastic-github-actions/elasticsearch@master
with:
stack-version: 7.12.1
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- if: matrix.test == 'other'
run: |
sudo apt-get update
sudo apt-get install -y --no-install-recommends ffmpeg
- name: extract pip cache
uses: actions/cache@v3
with:
path: ./.tox
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
restore-keys: txo-integration-${{ matrix.test }}-
- run: pip install tox coverage coveralls
- if: matrix.test == 'claims'
run: rm -rf .tox
- run: tox -e ${{ matrix.test }}
- name: submit coverage report
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_FLAG_NAME: tests-integration-${{ matrix.test }}
COVERALLS_PARALLEL: true
run: |
coverage combine tests
coveralls --service=github
coverage:
needs: ["tests-unit", "tests-integration"]
runs-on: ubuntu-20.04
steps:
- name: finalize coverage report submission
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
pip install coveralls
coveralls --service=github --finish
build:
needs: ["lint", "tests-unit", "tests-integration"]
name: "build / binary"
strategy:
matrix:
os:
- ubuntu-20.04
- macos-latest
- windows-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- id: os-name
uses: ASzc/change-string-case-action@v5
with:
string: ${{ runner.os }}
- name: set pip cache dir
shell: bash
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
- name: extract pip cache
uses: actions/cache@v3
with:
path: ${{ env.PIP_CACHE_DIR }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- run: pip install pyinstaller==4.6
- run: pip install -e .
- if: startsWith(github.ref, 'refs/tags/v')
run: python docker/set_build.py
- if: startsWith(runner.os, 'linux') || startsWith(runner.os, 'mac')
name: Build & Run (Unix)
run: |
pyinstaller --onefile --name lbrynet lbry/extras/cli.py
dist/lbrynet --version
- if: startsWith(runner.os, 'windows')
name: Build & Run (Windows)
run: |
pip install pywin32==301
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
dist/lbrynet.exe --version
- uses: actions/upload-artifact@v3
with:
name: lbrynet-${{ steps.os-name.outputs.lowercase }}
path: dist/
release:
name: "release"
if: startsWith(github.ref, 'refs/tags/v')
needs: ["build"]
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v1
- uses: actions/download-artifact@v2
- name: upload binaries
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_API_TOKEN }}
run: |
pip install githubrelease
chmod +x lbrynet-macos/lbrynet
chmod +x lbrynet-linux/lbrynet
zip --junk-paths lbrynet-mac.zip lbrynet-macos/lbrynet
zip --junk-paths lbrynet-linux.zip lbrynet-linux/lbrynet
zip --junk-paths lbrynet-windows.zip lbrynet-windows/lbrynet.exe
ls -lh
githubrelease release lbryio/lbry-sdk info ${GITHUB_REF#refs/tags/}
githubrelease asset lbryio/lbry-sdk upload ${GITHUB_REF#refs/tags/} \
lbrynet-mac.zip lbrynet-linux.zip lbrynet-windows.zip
githubrelease release lbryio/lbry-sdk publish ${GITHUB_REF#refs/tags/}

View file

@ -1,22 +0,0 @@
name: slack
on:
release:
types: [published]
jobs:
release:
name: "slack notification"
runs-on: ubuntu-20.04
steps:
- uses: LoveToKnow/slackify-markdown-action@v1.0.0
id: markdown
with:
text: "There is a new SDK release: ${{github.event.release.html_url}}\n${{ github.event.release.body }}"
- uses: slackapi/slack-github-action@v1.14.0
env:
CHANGELOG: '<!channel> ${{ steps.markdown.outputs.text }}'
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_RELEASE_BOT_WEBHOOK }}
with:
payload: '{"type": "mrkdwn", "text": ${{ toJSON(env.CHANGELOG) }} }'

19
.gitignore vendored
View file

@ -1,22 +1,9 @@
/.idea
/.DS_Store
/build
/dist
/.tox
/.coverage*
/lbry-venv
/venv
/lbry/blockchain
/.idea
/.coverage
lbry.egg-info
lbrynet.egg-info
__pycache__
_trial_temp/
trending*.log
/tests/integration/claims/files
/tests/.coverage.*
/lbry/wallet/bin
/.vscode
/.gitignore

447
.pylintrc Normal file
View file

@ -0,0 +1,447 @@
[MASTER]
# Specify a configuration file.
#rcfile=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS,schema
# Add files or directories matching the regex patterns to the
# blacklist. The regex matches against base names, not paths.
# `\.#.*` - add emacs tmp files to the blacklist
ignore-patterns=\.#.*
# Pickle collected data for later comparisons.
persistent=yes
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Use multiple processes to speed up Pylint.
jobs=1
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
extension-pkg-whitelist=
miniupnpc,
unqlite
# Allow optimization of some AST trees. This will activate a peephole AST
# optimizer, which will apply various small optimizations. For instance, it can
# be used to obtain the result of joining multiple strings with the addition
# operator. Joining a lot of strings can lead to a maximum recursion error in
# Pylint and this flag can prevent that. It has one side effect, the resulting
# AST will be different than the one from reality.
optimize-ast=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=
anomalous-backslash-in-string,
arguments-differ,
attribute-defined-outside-init,
bad-continuation,
bare-except,
broad-except,
cell-var-from-loop,
consider-iterating-dictionary,
dangerous-default-value,
duplicate-code,
fixme,
global-statement,
inherit-non-class,
invalid-name,
len-as-condition,
locally-disabled,
logging-not-lazy,
missing-docstring,
no-else-return,
no-init,
no-member,
no-self-use,
protected-access,
redefined-builtin,
redefined-outer-name,
redefined-variable-type,
relative-import,
signature-differs,
super-init-not-called,
too-few-public-methods,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-nested-blocks,
too-many-public-methods,
too-many-return-statements,
too-many-statements,
trailing-newlines,
undefined-loop-variable,
ungrouped-imports,
unnecessary-lambda,
unused-argument,
unused-variable,
wildcard-import,
wrong-import-order,
wrong-import-position,
deprecated-lambda,
simplifiable-if-statement,
unidiomatic-typecheck,
global-at-module-level,
inconsistent-return-statements,
keyword-arg-before-vararg,
assignment-from-no-return,
useless-return,
assignment-from-none,
stop-iteration-return
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html. You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=text
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]".
files-output=no
# Tells whether to display a full report or only the messages
reports=no
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=_$|dummy
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,_cb
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[BASIC]
# List of builtins function names that should not be used, separated by a comma
bad-functions=map,filter,input
# Good variable names which should always be accepted, separated by a comma
# allow `d` as its used frequently for deferred callback chains
good-names=i,j,k,ex,Run,_,d
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Include a hint for the correct naming format with invalid-name
include-naming-hint=no
# Regular expression matching correct function names
function-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for function names
function-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for variable names
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct constant names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Naming hint for constant names
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression matching correct attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for attribute names
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct argument names
argument-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for argument names
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct class attribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Naming hint for class attribute names
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Regular expression matching correct inline iteration names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Naming hint for inline iteration names
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
# Regular expression matching correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
# Naming hint for class names
class-name-hint=[A-Z_][a-zA-Z0-9]+$
# Regular expression matching correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Naming hint for module names
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression matching correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for method names
method-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
[ELIF]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
[SPELLING]
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=120
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,dict-separator
# Maximum number of lines in a module
max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
[TYPECHECK]
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=leveldb,distutils
# Ignoring distutils because: https://github.com/PyCQA/pylint/issues/73
# List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set). This supports can work
# with qualified names.
# ignored-classes=
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=lbrynet.lbrynet_daemon.LBRYDaemon.Parameters
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=regsub,TERMIOS,Bastion,rexec
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
[DESIGN]
# Maximum number of arguments for function / method
max-args=10
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branches=12
# Maximum number of statements in function / method body
max-statements=50
# Maximum number of parents for a class (see R0901).
max-parents=8
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of boolean expressions in a if statement
max-bool-expr=5
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,_fields,_replace,_source,_make
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception

140
.travis.yml Normal file
View file

@ -0,0 +1,140 @@
sudo: required
dist: xenial
language: python
python: "3.7"
jobs:
include:
- stage: code quality
name: "pylint lbrynet"
install:
- pip install astroid==2.0.4 aiohttp==3.4.4
# newer astroid and aiohttp fails in pylint so we pre-install older version
- pip install pylint
- pip install git+https://github.com/lbryio/torba.git#egg=torba
- pip install -e .
script: pylint lbrynet
- stage: test
name: "Unit Tests"
install:
- pip install coverage
- pip install git+https://github.com/lbryio/torba.git#egg=torba
- pip install -e .
script:
- HOME=/tmp coverage run -p --source=lbrynet -m unittest discover -vv tests.unit
after_success:
- coverage combine
- bash <(curl -s https://codecov.io/bash)
- name: "Integration Tests"
install:
- pip install tox-travis coverage
- pushd .. && git clone https://github.com/lbryio/torba.git && popd
script: tox
after_success:
- coverage combine tests/
- bash <(curl -s https://codecov.io/bash)
- stage: build
name: "Windows"
language: generic
services:
- docker
install:
- docker pull lbry/pyinstaller34_32bits:py371
script:
- python scripts/set_build.py
- docker run -v "$(pwd):/src/lbry" lbry/pyinstaller34_32bits:py371 lbry/scripts/wine_build.sh
- sudo zip -j dist/lbrynet-windows.zip dist/lbrynet.exe
deploy:
provider: releases
api_key: $GITHUB_OAUTH_TOKEN
file: dist/lbrynet-windows.zip
skip_cleanup: true
overwrite: true
draft: true
on:
tags: true
addons:
artifacts:
working_dir: dist
paths:
- lbrynet-windows.zip
target_paths:
- /daemon/build-${TRAVIS_BUILD_NUMBER}_commit-${TRAVIS_COMMIT:0:7}_branch-${TRAVIS_BRANCH}$([ ! -z ${TRAVIS_TAG} ] && echo _tag-${TRAVIS_TAG})
- &build
name: "Linux"
env: OS=linux
install:
- pip3 install pyinstaller
- pip3 install git+https://github.com/lbryio/torba.git
- python scripts/set_build.py
- pip3 install -e .
script:
- pyinstaller -F -n lbrynet lbrynet/extras/cli.py
- chmod +x dist/lbrynet
- zip -j dist/lbrynet-${OS}.zip dist/lbrynet
- ./dist/lbrynet --version
deploy:
provider: releases
api_key: $GITHUB_OAUTH_TOKEN
file: dist/lbrynet-${OS}.zip
skip_cleanup: true
overwrite: true
draft: true
on:
tags: true
addons:
artifacts:
working_dir: dist
paths:
- lbrynet-${OS}.zip
# artifact uploader thinks lbrynet is a directory, https://github.com/travis-ci/artifacts/issues/78
target_paths:
- /daemon/build-${TRAVIS_BUILD_NUMBER}_commit-${TRAVIS_COMMIT:0:7}_branch-${TRAVIS_BRANCH}$([ ! -z ${TRAVIS_TAG} ] && echo _tag-${TRAVIS_TAG})
- <<: *build
name: "Mac"
os: osx
osx_image: xcode8.3
language: generic
env: OS=mac
cache: false
before_install:
- brew upgrade python || true
- brew upgrade python || true
install:
- python3 --version
- pip3 --version
- pip3 install pyinstaller
- git clone https://github.com/lbryio/torba.git --depth 1
- sed -i -e "s/'plyvel',//" torba/setup.py
- cd torba && pip3 install -e . && cd ..
- python3 scripts/set_build.py
- pip3 install -e .
- if: tag IS present
stage: build
name: "Wallet Server Docker Image - Tagged Release"
script:
- set -e
- echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
- travis_retry docker build -t lbry/wallet-server:$TRAVIS_TAG -f scripts/Dockerfile.wallet_server .
- docker push lbry/wallet-server:$TRAVIS_TAG
- if: tag IS blank AND branch = master
stage: build
name: "Wallet Server Docker Image - Master"
script:
- set -e
- echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
- travis_retry docker build -t lbry/wallet-server:master -f scripts/Dockerfile.wallet_server .
- docker push lbry/wallet-server:master
cache:
directories:
- $HOME/.cache/pip
- $HOME/Library/Caches/pip
- $TRAVIS_BUILD_DIR/.tox

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,3 @@
## Contributing to LBRY
https://lbry.tech/contribute
https://lbry.io/faq/contributing

6
Dangerfile Normal file
View file

@ -0,0 +1,6 @@
# Add a CHANGELOG entry for app changes
has_app_changes = !git.modified_files.grep(/lbrynet/).empty?
if !git.modified_files.include?("CHANGELOG.md") && has_app_changes
fail("Please include a CHANGELOG entry.")
message "See http://keepachangelog.com/en/0.3.0/ for details on good changelog guidelines"
end

View file

@ -1,6 +1,6 @@
# Installing LBRY
If only the JSON-RPC API server is needed, the recommended way to install LBRY is to use a pre-built binary. We provide binaries for all major operating systems. See the [README](README.md)!
If only the JSON-RPC API server is needed, the recommended way to install LBRY is to use a pre-built binary. We provide binaries for all major operating systems. See the [README](README.md).
These instructions are for installing LBRY from source, which is recommended if you are interested in doing development work or LBRY is not available on your operating system (godspeed, TempleOS users).
@ -9,47 +9,36 @@ Here's a video walkthrough of this setup, which is itself hosted by the LBRY net
## Prerequisites
Running `lbrynet` from source requires Python 3.7. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
Running `lbrynet` from source requires Python 3.6 or higher (3.7 is preferred). Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/)
After installing Python 3.7, you'll need to install some additional libraries depending on your operating system.
After installing python 3, you'll need to install some additional libraries depending on your operating system.
### Virtualenv
Once python 3 is installed run `python3 -m pip install virtualenv` to install virtualenv.
### Windows
Windows users will need to install `Visual C++ Build Tools`, which can be installed by [Microsoft Build Tools](Microsoft Build Tools 2015)
Because of [issue #2769](https://github.com/lbryio/lbry-sdk/issues/2769)
at the moment the `lbrynet` daemon will only work correctly with Python 3.7.
If Python 3.8+ is used, the daemon will start but the RPC server
may not accept messages, returning the following:
```
Could not connect to daemon. Are you sure it's running?
```
### macOS
macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/).
These environment variables also need to be set:
```
PYTHONUNBUFFERED=1
EVENT_NOKQUEUE=1
```
Remaining dependencies can then be installed by running:
```
brew install python protobuf
```
Assistance installing Python3: https://docs.python-guide.org/starting/install3/osx/.
```
brew install python3 protobuf
```
### Linux
On Ubuntu (we recommend 18.04 or 20.04), install the following:
```
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt-get update
sudo apt-get install build-essential python3.7 python3.7-dev git python3.7-venv libssl-dev python-protobuf
```
On Ubuntu (we recommend 18.04), install the following:
The [deadsnakes PPA](https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa) provides Python 3.7
for those Ubuntu distributions that no longer have it in their
official repositories.
```
sudo apt-get install build-essential python3.7 python3.7-dev git python-virtualenv libssl-dev python-protobuf
```
On Raspbian, you will also need to install `python-pyparsing`.
@ -57,121 +46,32 @@ If you're running another Linux distro, install the equivalent of the above pack
## Installation
### Linux/Mac
To install:
Clone the repository:
```bash
git clone https://github.com/lbryio/lbry-sdk.git
cd lbry-sdk
```
```
git clone https://github.com/lbryio/lbry.git
cd lbry
Create a Python virtual environment for lbry-sdk:
```bash
python3.7 -m venv lbry-venv
```
virtualenv lbry-venv --python=python3.7
source lbry-venv/bin/activate
Activate virtual environment:
```bash
source lbry-venv/bin/activate
```
python --version # Python 2 is not supported. Make sure you're on Python 3.7
Make sure you're on Python 3.7+ as default in the virtual environment:
```bash
python --version
```
pip install -e .
```
Install packages:
```bash
make install
```
If you are on Linux and using PyCharm, generates initial configs:
```bash
make idea
```
To verify your installation, `which lbrynet` should return a path inside
of the `lbry-venv` folder.
```bash
(lbry-venv) $ which lbrynet
/opt/lbry-sdk/lbry-venv/bin/lbrynet
```
To exit the virtual environment simply use the command `deactivate`.
### Windows
Clone the repository:
```bash
git clone https://github.com/lbryio/lbry-sdk.git
cd lbry-sdk
```
Create a Python virtual environment for lbry-sdk:
```bash
python -m venv lbry-venv
```
Activate virtual environment:
```bash
lbry-venv\Scripts\activate
```
Install packages:
```bash
pip install -e .
```
To verify your installation, `which lbrynet` should return a path inside of the `lbry-venv` folder created by the `virtualenv` command.
## Run the tests
### Elasticsearch
For running integration tests, Elasticsearch is required to be available at localhost:9200/
The easiest way to start it is using docker with:
```bash
make elastic-docker
```
Alternative installation methods are available [at Elasticsearch website](https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html).
To run the unit and integration tests from the repo directory:
```
python -m unittest discover tests.unit
python -m unittest discover tests.integration
```
To run the unit tests from the repo directory:
```
trial --reactor=asyncio tests.unit
```
## Usage
To start the API server:
```
lbrynet start
```
`lbrynet start`
Whenever the code inside [lbry-sdk/lbry](./lbry)
is modified we should run `make install` to recompile the `lbrynet`
executable with the newest code.
## Development
When developing, remember to enter the environment,
and if you wish start the server interactively.
```bash
$ source lbry-venv/bin/activate
(lbry-venv) $ python lbry/extras/cli.py start
```
Parameters can be passed in the same way.
```bash
(lbry-venv) $ python lbry/extras/cli.py wallet balance
```
If a Python debugger (`pdb` or `ipdb`) is installed we can also start it
in this way, set up break points, and step through the code.
```bash
(lbry-venv) $ pip install ipdb
(lbry-venv) $ ipdb lbry/extras/cli.py
```
Happy hacking!

View file

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2015-2022 LBRY Inc
Copyright (c) 2015-2018 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,

View file

@ -1,4 +0,0 @@
include README.md
include CHANGELOG.md
include LICENSE
recursive-include lbry *.txt *.py

View file

@ -1,26 +0,0 @@
.PHONY: install tools lint test test-unit test-unit-coverage test-integration idea
install:
pip install -e .
lint:
pylint --rcfile=setup.cfg lbry
#mypy --ignore-missing-imports lbry
test: test-unit test-integration
test-unit:
python -m unittest discover tests.unit
test-unit-coverage:
coverage run --source=lbry -m unittest discover -vv tests.unit
test-integration:
tox
idea:
mkdir -p .idea
cp -r scripts/idea/* .idea
elastic-docker:
docker run -d -v lbryhub:/usr/share/elasticsearch/data -p 9200:9200 -p 9300:9300 -e"ES_JAVA_OPTS=-Xms512m -Xmx512m" -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.12.1

View file

@ -1,19 +1,19 @@
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![build](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml/badge.svg)](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml) [![coverage](https://coveralls.io/repos/github/lbryio/lbry-sdk/badge.svg)](https://coveralls.io/github/lbryio/lbry-sdk)
# <img src="https://raw.githubusercontent.com/lbryio/lbry/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![Build Status](https://travis-ci.org/lbryio/lbry.svg?branch=master)](https://travis-ci.org/lbryio/lbry) [![Test Coverage](https://codecov.io/gh/lbryio/lbry/branch/master/graph/badge.svg)](https://codecov.io/gh/lbryio/lbry)
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
LBRY is a decentralized peer-to-peer network providing distribution, discovery, and purchase of digital content (data). It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
LBRY SDK for Python is currently the most fully featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components include:
LBRY SDK for Python is currently the most full featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components:
* Built on Python 3.7 and `asyncio`.
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/dht)).
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/blob_exchange)).
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/schema)).
* Wallet implementation for the LBRY blockchain ([lbry.wallet](https://github.com/lbryio/lbry-sdk/tree/master/lbry/wallet)).
* Daemon with a JSON-RPC API to ease building end user applications in any language and for automating various tasks ([lbry.extras.daemon](https://github.com/lbryio/lbry-sdk/tree/master/lbry/extras/daemon)).
* Built on Python 3.7+ and `asyncio`.
* DHT (Distributed Hash Table) implementation for finding peers ([lbrynet.dht](https://github.com/lbryio/lbry/tree/master/lbrynet/dht)).
* Blob exchange protocol for downloading content and negotiating payments ([lbrynet.blob_exchange](https://github.com/lbryio/lbry/tree/master/lbrynet/blob_exchange)).
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbrynet.schema](https://github.com/lbryio/lbry/tree/master/lbrynet/schema)).
* Wallet implementation for the LBRY blockchain ([lbrynet.extras.wallet](https://github.com/lbryio/lbry/tree/master/lbrynet/extras/wallet)).
* Daemon with a JSON-RPC API to ease building end user applications in any language and for automating various tasks ([lbrynet.extras.daemon](https://github.com/lbryio/lbry/tree/master/lbrynet/extras/daemon)).
## Installation
Our [releases page](https://github.com/lbryio/lbry-sdk/releases) contains pre-built binaries of the latest release, pre-releases, and past releases for macOS, Debian-based Linux, and Windows. [Automated travis builds](http://build.lbry.io/daemon/) are also available for testing.
Our [releases page](https://github.com/lbryio/lbry/releases) contains pre-built binaries of the latest release, pre-releases, and past releases for macOS, Debian-based Linux, and Windows. [Automated travis builds](http://build.lbry.io/daemon/) are also available for testing.
## Usage
@ -33,7 +33,7 @@ Installing from source is also relatively painless. Full instructions are in [IN
## Contributing
Contributions to this project are welcome, encouraged, and compensated. For more details, please check [this](https://lbry.tech/contribute) link.
Contributions to this project are welcome, encouraged, and compensated. For more details, please check [this](https://lbry.io/faq/contributing) link.
## License
@ -41,11 +41,11 @@ This project is MIT licensed. For the full license, see [LICENSE](LICENSE).
## Security
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
We take security seriously. Please contact security@lbry.io regarding any security issues. [Our GPG key is here](https://lbry.io/faq/gpg-key) if you need it.
## Contact
The primary contact for this project is [@eukreign](mailto:lex@lbry.com).
The primary contact for this project is [@eukreign](mailto:lex@lbry.io).
## Additional information and links
@ -53,4 +53,4 @@ The documentation for the API can be found [here](https://lbry.tech/api/sdk).
Daemon defaults, ports, and other settings are documented [here](https://lbry.tech/resources/daemon-settings).
Settings can be configured using a daemon-settings.yml file. An example can be found [here](https://github.com/lbryio/lbry-sdk/blob/master/example_daemon_settings.yml).
Settings can be configured using a daemon-settings.yml file. An example can be found [here](https://github.com/lbryio/lbry/blob/master/example_daemon_settings.yml).

View file

@ -1,9 +0,0 @@
# Security Policy
## Supported Versions
While we are not at v1.0 yet, only the latest release will be supported.
## Reporting a Vulnerability
See https://lbry.com/faq/security

5
android/Makefile Normal file
View file

@ -0,0 +1,5 @@
build:
python setup.py apk
adb install -r lbry-0.1-service-debug.apk
.PHONY: build

View file

@ -0,0 +1,49 @@
import inspect
from os.path import dirname, join
import sh
from pythonforandroid.util import ensure_dir
from pythonforandroid.toolchain import (
Bootstrap, current_directory, info, info_main, shprint
)
class LBRYServiceBootstrap(Bootstrap):
name = 'lbry-service'
recipe_depends = ['genericndkbuild', 'python3']
bootstrap_dir = dirname(__file__)
def get_common_dir(self):
return join(dirname(inspect.getfile(Bootstrap)), 'bootstraps', 'common')
def run_distribute(self):
info_main('# Creating Android project from build and {} bootstrap'.format(
self.name))
info('This currently just copies the build stuff straight from the build dir.')
shprint(sh.rm, '-rf', self.dist_dir)
shprint(sh.cp, '-r', self.build_dir, self.dist_dir)
with current_directory(self.dist_dir):
with open('local.properties', 'w') as fileh:
fileh.write('sdk.dir={}'.format(self.ctx.sdk_dir))
arch = self.ctx.archs[0]
if len(self.ctx.archs) > 1:
raise ValueError('built for more than one arch, but bootstrap cannot handle that yet')
info('Bootstrap running with arch {}'.format(arch))
with current_directory(self.dist_dir):
info('Copying python distribution')
self.distribute_libs(arch, [self.ctx.get_libs_dir(arch.arch)])
self.distribute_aars(arch)
self.distribute_javaclasses(self.ctx.javaclass_dir)
python_bundle_dir = join('_python_bundle', '_python_bundle')
ensure_dir(python_bundle_dir)
site_packages_dir = self.ctx.python_recipe.create_python_bundle(
join(self.dist_dir, python_bundle_dir), arch)
self.strip_libraries(arch)
self.fry_eggs(site_packages_dir)
super().run_distribute()

View file

@ -0,0 +1,89 @@
# prevent user to include invalid extensions
*.apk
*.pxd
# eggs
*.egg-info
# unit test
unittest/*
# python config
config/makesetup
# unused kivy files (platform specific)
kivy/input/providers/wm_*
kivy/input/providers/mactouch*
kivy/input/providers/probesysfs*
kivy/input/providers/mtdev*
kivy/input/providers/hidinput*
kivy/core/camera/camera_videocapture*
kivy/core/spelling/*osx*
kivy/core/video/video_pyglet*
kivy/tools
kivy/tests/*
kivy/*/*.h
kivy/*/*.pxi
# unused encodings
lib-dynload/*codec*
encodings/cp*.pyo
encodings/tis*
encodings/shift*
encodings/bz2*
encodings/iso*
encodings/undefined*
encodings/johab*
encodings/p*
encodings/m*
encodings/euc*
encodings/k*
encodings/unicode_internal*
encodings/quo*
encodings/gb*
encodings/big5*
encodings/hp*
encodings/hz*
# unused python modules
bsddb/*
wsgiref/*
hotshot/*
pydoc_data/*
tty.pyo
anydbm.pyo
nturl2path.pyo
LICENCE.txt
macurl2path.pyo
dummy_threading.pyo
audiodev.pyo
antigravity.pyo
dumbdbm.pyo
sndhdr.pyo
__phello__.foo.pyo
sunaudio.pyo
os2emxpath.pyo
multiprocessing/dummy*
# unused binaries python modules
lib-dynload/termios.so
lib-dynload/_lsprof.so
lib-dynload/*audioop.so
lib-dynload/_hotshot.so
lib-dynload/_heapq.so
lib-dynload/_json.so
lib-dynload/grp.so
lib-dynload/resource.so
lib-dynload/pyexpat.so
lib-dynload/_ctypes_test.so
lib-dynload/_testcapi.so
# odd files
plat-linux3/regen
#>sqlite3
# conditionnal include depending if some recipes are included or not.
sqlite3/*
lib-dynload/_sqlite3.so
#<sqlite3

View file

@ -0,0 +1 @@
include $(call all-subdir-makefiles)

View file

@ -0,0 +1,7 @@
# Uncomment this if you're using STL in your project
# See CPLUSPLUS-SUPPORT.html in the NDK documentation for more information
# APP_STL := stlport_static
# APP_ABI := armeabi armeabi-v7a x86
APP_ABI := $(ARCH)

View file

@ -0,0 +1,22 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := main
# Add your application source files here...
LOCAL_SRC_FILES := start.c pyjniusjni.c
LOCAL_CFLAGS += -I$(LOCAL_PATH)/../../../../../other_builds/$(MK_PYTHON_INCLUDE_ROOT) $(EXTRA_CFLAGS)
LOCAL_SHARED_LIBRARIES := python_shared
LOCAL_LDLIBS := -llog $(EXTRA_LDLIBS)
LOCAL_LDFLAGS += -L$(LOCAL_PATH)/../../../../../other_builds/$(MK_PYTHON_LINK_ROOT) $(APPLICATION_ADDITIONAL_LDFLAGS)
include $(BUILD_SHARED_LIBRARY)
ifdef CRYSTAX_PYTHON_VERSION
$(call import-module,python/$(CRYSTAX_PYTHON_VERSION))
endif

View file

@ -0,0 +1,10 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := main
LOCAL_SRC_FILES := YourSourceHere.c
include $(BUILD_SHARED_LIBRARY)
$(call import-module,SDL)LOCAL_PATH := $(call my-dir)

View file

@ -0,0 +1,6 @@
#define BOOTSTRAP_NAME_SERVICEONLY
#define BOOTSTRAP_USES_NO_SDL_HEADERS
const char bootstrap_name[] = "service_only";

View file

@ -0,0 +1,103 @@
#include <pthread.h>
#include <jni.h>
#define LOGI(...) do {} while (0)
#define LOGE(...) do {} while (0)
#include "android/log.h"
/* These JNI management functions are taken from SDL2, but modified to refer to pyjnius */
/* #define LOG(n, x) __android_log_write(ANDROID_LOG_INFO, (n), (x)) */
/* #define LOGP(x) LOG("python", (x)) */
#define LOG_TAG "Python_android"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
/* Function headers */
JNIEnv* Android_JNI_GetEnv(void);
static void Android_JNI_ThreadDestroyed(void*);
static pthread_key_t mThreadKey;
static JavaVM* mJavaVM;
int Android_JNI_SetupThread(void)
{
Android_JNI_GetEnv();
return 1;
}
/* Library init */
JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void* reserved)
{
JNIEnv *env;
mJavaVM = vm;
LOGI("JNI_OnLoad called");
if ((*mJavaVM)->GetEnv(mJavaVM, (void**) &env, JNI_VERSION_1_4) != JNI_OK) {
LOGE("Failed to get the environment using GetEnv()");
return -1;
}
/*
* Create mThreadKey so we can keep track of the JNIEnv assigned to each thread
* Refer to http://developer.android.com/guide/practices/design/jni.html for the rationale behind this
*/
if (pthread_key_create(&mThreadKey, Android_JNI_ThreadDestroyed) != 0) {
__android_log_print(ANDROID_LOG_ERROR, "pyjniusjni", "Error initializing pthread key");
}
Android_JNI_SetupThread();
return JNI_VERSION_1_4;
}
JNIEnv* Android_JNI_GetEnv(void)
{
/* From http://developer.android.com/guide/practices/jni.html
* All threads are Linux threads, scheduled by the kernel.
* They're usually started from managed code (using Thread.start), but they can also be created elsewhere and then
* attached to the JavaVM. For example, a thread started with pthread_create can be attached with the
* JNI AttachCurrentThread or AttachCurrentThreadAsDaemon functions. Until a thread is attached, it has no JNIEnv,
* and cannot make JNI calls.
* Attaching a natively-created thread causes a java.lang.Thread object to be constructed and added to the "main"
* ThreadGroup, making it visible to the debugger. Calling AttachCurrentThread on an already-attached thread
* is a no-op.
* Note: You can call this function any number of times for the same thread, there's no harm in it
*/
JNIEnv *env;
int status = (*mJavaVM)->AttachCurrentThread(mJavaVM, &env, NULL);
if(status < 0) {
LOGE("failed to attach current thread");
return 0;
}
/* From http://developer.android.com/guide/practices/jni.html
* Threads attached through JNI must call DetachCurrentThread before they exit. If coding this directly is awkward,
* in Android 2.0 (Eclair) and higher you can use pthread_key_create to define a destructor function that will be
* called before the thread exits, and call DetachCurrentThread from there. (Use that key with pthread_setspecific
* to store the JNIEnv in thread-local-storage; that way it'll be passed into your destructor as the argument.)
* Note: The destructor is not called unless the stored value is != NULL
* Note: You can call this function any number of times for the same thread, there's no harm in it
* (except for some lost CPU cycles)
*/
pthread_setspecific(mThreadKey, (void*) env);
return env;
}
static void Android_JNI_ThreadDestroyed(void* value)
{
/* The thread is being destroyed, detach it from the Java VM and set the mThreadKey value to NULL as required */
JNIEnv *env = (JNIEnv*) value;
if (env != NULL) {
(*mJavaVM)->DetachCurrentThread(mJavaVM);
pthread_setspecific(mThreadKey, NULL);
}
}
void *WebView_AndroidGetJNIEnv()
{
return Android_JNI_GetEnv();
}

View file

@ -0,0 +1,141 @@
/**
* Copyright 2012 Kamran Zafar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.kamranzafar.jtar;
/**
* @author Kamran Zafar
*
*/
public class Octal {
/**
* Parse an octal string from a header buffer. This is used for the file
* permission mode value.
*
* @param header
* The header buffer from which to parse.
* @param offset
* The offset into the buffer from which to parse.
* @param length
* The number of header bytes to parse.
*
* @return The long value of the octal string.
*/
public static long parseOctal(byte[] header, int offset, int length) {
long result = 0;
boolean stillPadding = true;
int end = offset + length;
for (int i = offset; i < end; ++i) {
if (header[i] == 0)
break;
if (header[i] == (byte) ' ' || header[i] == '0') {
if (stillPadding)
continue;
if (header[i] == (byte) ' ')
break;
}
stillPadding = false;
result = ( result << 3 ) + ( header[i] - '0' );
}
return result;
}
/**
* Parse an octal integer from a header buffer.
*
* @param value
* @param buf
* The header buffer from which to parse.
* @param offset
* The offset into the buffer from which to parse.
* @param length
* The number of header bytes to parse.
*
* @return The integer value of the octal bytes.
*/
public static int getOctalBytes(long value, byte[] buf, int offset, int length) {
int idx = length - 1;
buf[offset + idx] = 0;
--idx;
buf[offset + idx] = (byte) ' ';
--idx;
if (value == 0) {
buf[offset + idx] = (byte) '0';
--idx;
} else {
for (long val = value; idx >= 0 && val > 0; --idx) {
buf[offset + idx] = (byte) ( (byte) '0' + (byte) ( val & 7 ) );
val = val >> 3;
}
}
for (; idx >= 0; --idx) {
buf[offset + idx] = (byte) ' ';
}
return offset + length;
}
/**
* Parse the checksum octal integer from a header buffer.
*
* @param value
* @param buf
* The header buffer from which to parse.
* @param offset
* The offset into the buffer from which to parse.
* @param length
* The number of header bytes to parse.
* @return The integer value of the entry's checksum.
*/
public static int getCheckSumOctalBytes(long value, byte[] buf, int offset, int length) {
getOctalBytes( value, buf, offset, length );
buf[offset + length - 1] = (byte) ' ';
buf[offset + length - 2] = 0;
return offset + length;
}
/**
* Parse an octal long integer from a header buffer.
*
* @param value
* @param buf
* The header buffer from which to parse.
* @param offset
* The offset into the buffer from which to parse.
* @param length
* The number of header bytes to parse.
*
* @return The long value of the octal bytes.
*/
public static int getLongOctalBytes(long value, byte[] buf, int offset, int length) {
byte[] temp = new byte[length + 1];
getOctalBytes( value, temp, 0, length + 1 );
System.arraycopy( temp, 0, buf, offset, length );
return offset + length;
}
}

View file

@ -0,0 +1,28 @@
/**
* Copyright 2012 Kamran Zafar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.kamranzafar.jtar;
/**
* @author Kamran Zafar
*
*/
public class TarConstants {
public static final int EOF_BLOCK = 1024;
public static final int DATA_BLOCK = 512;
public static final int HEADER_BLOCK = 512;
}

View file

@ -0,0 +1,284 @@
/**
* Copyright 2012 Kamran Zafar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.kamranzafar.jtar;
import java.io.File;
import java.util.Date;
/**
* @author Kamran Zafar
*
*/
public class TarEntry {
protected File file;
protected TarHeader header;
private TarEntry() {
this.file = null;
header = new TarHeader();
}
public TarEntry(File file, String entryName) {
this();
this.file = file;
this.extractTarHeader(entryName);
}
public TarEntry(byte[] headerBuf) {
this();
this.parseTarHeader(headerBuf);
}
/**
* Constructor to create an entry from an existing TarHeader object.
*
* This method is useful to add new entries programmatically (e.g. for
* adding files or directories that do not exist in the file system).
*
* @param header
*
*/
public TarEntry(TarHeader header) {
this.file = null;
this.header = header;
}
public boolean equals(TarEntry it) {
return header.name.toString().equals(it.header.name.toString());
}
public boolean isDescendent(TarEntry desc) {
return desc.header.name.toString().startsWith(header.name.toString());
}
public TarHeader getHeader() {
return header;
}
public String getName() {
String name = header.name.toString();
if (header.namePrefix != null && !header.namePrefix.toString().equals("")) {
name = header.namePrefix.toString() + "/" + name;
}
return name;
}
public void setName(String name) {
header.name = new StringBuffer(name);
}
public int getUserId() {
return header.userId;
}
public void setUserId(int userId) {
header.userId = userId;
}
public int getGroupId() {
return header.groupId;
}
public void setGroupId(int groupId) {
header.groupId = groupId;
}
public String getUserName() {
return header.userName.toString();
}
public void setUserName(String userName) {
header.userName = new StringBuffer(userName);
}
public String getGroupName() {
return header.groupName.toString();
}
public void setGroupName(String groupName) {
header.groupName = new StringBuffer(groupName);
}
public void setIds(int userId, int groupId) {
this.setUserId(userId);
this.setGroupId(groupId);
}
public void setModTime(long time) {
header.modTime = time / 1000;
}
public void setModTime(Date time) {
header.modTime = time.getTime() / 1000;
}
public Date getModTime() {
return new Date(header.modTime * 1000);
}
public File getFile() {
return this.file;
}
public long getSize() {
return header.size;
}
public void setSize(long size) {
header.size = size;
}
/**
* Checks if the org.kamrazafar.jtar entry is a directory
*
* @return
*/
public boolean isDirectory() {
if (this.file != null)
return this.file.isDirectory();
if (header != null) {
if (header.linkFlag == TarHeader.LF_DIR)
return true;
if (header.name.toString().endsWith("/"))
return true;
}
return false;
}
/**
* Extract header from File
*
* @param entryName
*/
public void extractTarHeader(String entryName) {
header = TarHeader.createHeader(entryName, file.length(), file.lastModified() / 1000, file.isDirectory());
}
/**
* Calculate checksum
*
* @param buf
* @return
*/
public long computeCheckSum(byte[] buf) {
long sum = 0;
for (int i = 0; i < buf.length; ++i) {
sum += 255 & buf[i];
}
return sum;
}
/**
* Writes the header to the byte buffer
*
* @param outbuf
*/
public void writeEntryHeader(byte[] outbuf) {
int offset = 0;
offset = TarHeader.getNameBytes(header.name, outbuf, offset, TarHeader.NAMELEN);
offset = Octal.getOctalBytes(header.mode, outbuf, offset, TarHeader.MODELEN);
offset = Octal.getOctalBytes(header.userId, outbuf, offset, TarHeader.UIDLEN);
offset = Octal.getOctalBytes(header.groupId, outbuf, offset, TarHeader.GIDLEN);
long size = header.size;
offset = Octal.getLongOctalBytes(size, outbuf, offset, TarHeader.SIZELEN);
offset = Octal.getLongOctalBytes(header.modTime, outbuf, offset, TarHeader.MODTIMELEN);
int csOffset = offset;
for (int c = 0; c < TarHeader.CHKSUMLEN; ++c)
outbuf[offset++] = (byte) ' ';
outbuf[offset++] = header.linkFlag;
offset = TarHeader.getNameBytes(header.linkName, outbuf, offset, TarHeader.NAMELEN);
offset = TarHeader.getNameBytes(header.magic, outbuf, offset, TarHeader.USTAR_MAGICLEN);
offset = TarHeader.getNameBytes(header.userName, outbuf, offset, TarHeader.USTAR_USER_NAMELEN);
offset = TarHeader.getNameBytes(header.groupName, outbuf, offset, TarHeader.USTAR_GROUP_NAMELEN);
offset = Octal.getOctalBytes(header.devMajor, outbuf, offset, TarHeader.USTAR_DEVLEN);
offset = Octal.getOctalBytes(header.devMinor, outbuf, offset, TarHeader.USTAR_DEVLEN);
offset = TarHeader.getNameBytes(header.namePrefix, outbuf, offset, TarHeader.USTAR_FILENAME_PREFIX);
for (; offset < outbuf.length;)
outbuf[offset++] = 0;
long checkSum = this.computeCheckSum(outbuf);
Octal.getCheckSumOctalBytes(checkSum, outbuf, csOffset, TarHeader.CHKSUMLEN);
}
/**
* Parses the tar header to the byte buffer
*
* @param header
* @param bh
*/
public void parseTarHeader(byte[] bh) {
int offset = 0;
header.name = TarHeader.parseName(bh, offset, TarHeader.NAMELEN);
offset += TarHeader.NAMELEN;
header.mode = (int) Octal.parseOctal(bh, offset, TarHeader.MODELEN);
offset += TarHeader.MODELEN;
header.userId = (int) Octal.parseOctal(bh, offset, TarHeader.UIDLEN);
offset += TarHeader.UIDLEN;
header.groupId = (int) Octal.parseOctal(bh, offset, TarHeader.GIDLEN);
offset += TarHeader.GIDLEN;
header.size = Octal.parseOctal(bh, offset, TarHeader.SIZELEN);
offset += TarHeader.SIZELEN;
header.modTime = Octal.parseOctal(bh, offset, TarHeader.MODTIMELEN);
offset += TarHeader.MODTIMELEN;
header.checkSum = (int) Octal.parseOctal(bh, offset, TarHeader.CHKSUMLEN);
offset += TarHeader.CHKSUMLEN;
header.linkFlag = bh[offset++];
header.linkName = TarHeader.parseName(bh, offset, TarHeader.NAMELEN);
offset += TarHeader.NAMELEN;
header.magic = TarHeader.parseName(bh, offset, TarHeader.USTAR_MAGICLEN);
offset += TarHeader.USTAR_MAGICLEN;
header.userName = TarHeader.parseName(bh, offset, TarHeader.USTAR_USER_NAMELEN);
offset += TarHeader.USTAR_USER_NAMELEN;
header.groupName = TarHeader.parseName(bh, offset, TarHeader.USTAR_GROUP_NAMELEN);
offset += TarHeader.USTAR_GROUP_NAMELEN;
header.devMajor = (int) Octal.parseOctal(bh, offset, TarHeader.USTAR_DEVLEN);
offset += TarHeader.USTAR_DEVLEN;
header.devMinor = (int) Octal.parseOctal(bh, offset, TarHeader.USTAR_DEVLEN);
offset += TarHeader.USTAR_DEVLEN;
header.namePrefix = TarHeader.parseName(bh, offset, TarHeader.USTAR_FILENAME_PREFIX);
}
}

View file

@ -0,0 +1,243 @@
/**
* Copyright 2012 Kamran Zafar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.kamranzafar.jtar;
import java.io.File;
/**
* Header
*
* <pre>
* Offset Size Field
* 0 100 File name
* 100 8 File mode
* 108 8 Owner's numeric user ID
* 116 8 Group's numeric user ID
* 124 12 File size in bytes
* 136 12 Last modification time in numeric Unix time format
* 148 8 Checksum for header block
* 156 1 Link indicator (file type)
* 157 100 Name of linked file
* </pre>
*
*
* File Types
*
* <pre>
* Value Meaning
* '0' Normal file
* (ASCII NUL) Normal file (now obsolete)
* '1' Hard link
* '2' Symbolic link
* '3' Character special
* '4' Block special
* '5' Directory
* '6' FIFO
* '7' Contigous
* </pre>
*
*
*
* Ustar header
*
* <pre>
* Offset Size Field
* 257 6 UStar indicator "ustar"
* 263 2 UStar version "00"
* 265 32 Owner user name
* 297 32 Owner group name
* 329 8 Device major number
* 337 8 Device minor number
* 345 155 Filename prefix
* </pre>
*/
public class TarHeader {
/*
* Header
*/
public static final int NAMELEN = 100;
public static final int MODELEN = 8;
public static final int UIDLEN = 8;
public static final int GIDLEN = 8;
public static final int SIZELEN = 12;
public static final int MODTIMELEN = 12;
public static final int CHKSUMLEN = 8;
public static final byte LF_OLDNORM = 0;
/*
* File Types
*/
public static final byte LF_NORMAL = (byte) '0';
public static final byte LF_LINK = (byte) '1';
public static final byte LF_SYMLINK = (byte) '2';
public static final byte LF_CHR = (byte) '3';
public static final byte LF_BLK = (byte) '4';
public static final byte LF_DIR = (byte) '5';
public static final byte LF_FIFO = (byte) '6';
public static final byte LF_CONTIG = (byte) '7';
/*
* Ustar header
*/
public static final String USTAR_MAGIC = "ustar"; // POSIX
public static final int USTAR_MAGICLEN = 8;
public static final int USTAR_USER_NAMELEN = 32;
public static final int USTAR_GROUP_NAMELEN = 32;
public static final int USTAR_DEVLEN = 8;
public static final int USTAR_FILENAME_PREFIX = 155;
// Header values
public StringBuffer name;
public int mode;
public int userId;
public int groupId;
public long size;
public long modTime;
public int checkSum;
public byte linkFlag;
public StringBuffer linkName;
public StringBuffer magic; // ustar indicator and version
public StringBuffer userName;
public StringBuffer groupName;
public int devMajor;
public int devMinor;
public StringBuffer namePrefix;
public TarHeader() {
this.magic = new StringBuffer(TarHeader.USTAR_MAGIC);
this.name = new StringBuffer();
this.linkName = new StringBuffer();
String user = System.getProperty("user.name", "");
if (user.length() > 31)
user = user.substring(0, 31);
this.userId = 0;
this.groupId = 0;
this.userName = new StringBuffer(user);
this.groupName = new StringBuffer("");
this.namePrefix = new StringBuffer();
}
/**
* Parse an entry name from a header buffer.
*
* @param name
* @param header
* The header buffer from which to parse.
* @param offset
* The offset into the buffer from which to parse.
* @param length
* The number of header bytes to parse.
* @return The header's entry name.
*/
public static StringBuffer parseName(byte[] header, int offset, int length) {
StringBuffer result = new StringBuffer(length);
int end = offset + length;
for (int i = offset; i < end; ++i) {
if (header[i] == 0)
break;
result.append((char) header[i]);
}
return result;
}
/**
* Determine the number of bytes in an entry name.
*
* @param name
* @param header
* The header buffer from which to parse.
* @param offset
* The offset into the buffer from which to parse.
* @param length
* The number of header bytes to parse.
* @return The number of bytes in a header's entry name.
*/
public static int getNameBytes(StringBuffer name, byte[] buf, int offset, int length) {
int i;
for (i = 0; i < length && i < name.length(); ++i) {
buf[offset + i] = (byte) name.charAt(i);
}
for (; i < length; ++i) {
buf[offset + i] = 0;
}
return offset + length;
}
/**
* Creates a new header for a file/directory entry.
*
*
* @param name
* File name
* @param size
* File size in bytes
* @param modTime
* Last modification time in numeric Unix time format
* @param dir
* Is directory
*
* @return
*/
public static TarHeader createHeader(String entryName, long size, long modTime, boolean dir) {
String name = entryName;
name = TarUtils.trim(name.replace(File.separatorChar, '/'), '/');
TarHeader header = new TarHeader();
header.linkName = new StringBuffer("");
if (name.length() > 100) {
header.namePrefix = new StringBuffer(name.substring(0, name.lastIndexOf('/')));
header.name = new StringBuffer(name.substring(name.lastIndexOf('/') + 1));
} else {
header.name = new StringBuffer(name);
}
if (dir) {
header.mode = 040755;
header.linkFlag = TarHeader.LF_DIR;
if (header.name.charAt(header.name.length() - 1) != '/') {
header.name.append("/");
}
header.size = 0;
} else {
header.mode = 0100644;
header.linkFlag = TarHeader.LF_NORMAL;
header.size = size;
}
header.modTime = modTime;
header.checkSum = 0;
header.devMajor = 0;
header.devMinor = 0;
return header;
}
}

View file

@ -0,0 +1,249 @@
/**
* Copyright 2012 Kamran Zafar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.kamranzafar.jtar;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
/**
* @author Kamran Zafar
*
*/
public class TarInputStream extends FilterInputStream {
private static final int SKIP_BUFFER_SIZE = 2048;
private TarEntry currentEntry;
private long currentFileSize;
private long bytesRead;
private boolean defaultSkip = false;
public TarInputStream(InputStream in) {
super(in);
currentFileSize = 0;
bytesRead = 0;
}
@Override
public boolean markSupported() {
return false;
}
/**
* Not supported
*
*/
@Override
public synchronized void mark(int readlimit) {
}
/**
* Not supported
*
*/
@Override
public synchronized void reset() throws IOException {
throw new IOException("mark/reset not supported");
}
/**
* Read a byte
*
* @see java.io.FilterInputStream#read()
*/
@Override
public int read() throws IOException {
byte[] buf = new byte[1];
int res = this.read(buf, 0, 1);
if (res != -1) {
return 0xFF & buf[0];
}
return res;
}
/**
* Checks if the bytes being read exceed the entry size and adjusts the byte
* array length. Updates the byte counters
*
*
* @see java.io.FilterInputStream#read(byte[], int, int)
*/
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (currentEntry != null) {
if (currentFileSize == currentEntry.getSize()) {
return -1;
} else if ((currentEntry.getSize() - currentFileSize) < len) {
len = (int) (currentEntry.getSize() - currentFileSize);
}
}
int br = super.read(b, off, len);
if (br != -1) {
if (currentEntry != null) {
currentFileSize += br;
}
bytesRead += br;
}
return br;
}
/**
* Returns the next entry in the tar file
*
* @return TarEntry
* @throws IOException
*/
public TarEntry getNextEntry() throws IOException {
closeCurrentEntry();
byte[] header = new byte[TarConstants.HEADER_BLOCK];
byte[] theader = new byte[TarConstants.HEADER_BLOCK];
int tr = 0;
// Read full header
while (tr < TarConstants.HEADER_BLOCK) {
int res = read(theader, 0, TarConstants.HEADER_BLOCK - tr);
if (res < 0) {
break;
}
System.arraycopy(theader, 0, header, tr, res);
tr += res;
}
// Check if record is null
boolean eof = true;
for (byte b : header) {
if (b != 0) {
eof = false;
break;
}
}
if (!eof) {
currentEntry = new TarEntry(header);
}
return currentEntry;
}
/**
* Returns the current offset (in bytes) from the beginning of the stream.
* This can be used to find out at which point in a tar file an entry's content begins, for instance.
*/
public long getCurrentOffset() {
return bytesRead;
}
/**
* Closes the current tar entry
*
* @throws IOException
*/
protected void closeCurrentEntry() throws IOException {
if (currentEntry != null) {
if (currentEntry.getSize() > currentFileSize) {
// Not fully read, skip rest of the bytes
long bs = 0;
while (bs < currentEntry.getSize() - currentFileSize) {
long res = skip(currentEntry.getSize() - currentFileSize - bs);
if (res == 0 && currentEntry.getSize() - currentFileSize > 0) {
// I suspect file corruption
throw new IOException("Possible tar file corruption");
}
bs += res;
}
}
currentEntry = null;
currentFileSize = 0L;
skipPad();
}
}
/**
* Skips the pad at the end of each tar entry file content
*
* @throws IOException
*/
protected void skipPad() throws IOException {
if (bytesRead > 0) {
int extra = (int) (bytesRead % TarConstants.DATA_BLOCK);
if (extra > 0) {
long bs = 0;
while (bs < TarConstants.DATA_BLOCK - extra) {
long res = skip(TarConstants.DATA_BLOCK - extra - bs);
bs += res;
}
}
}
}
/**
* Skips 'n' bytes on the InputStream<br>
* Overrides default implementation of skip
*
*/
@Override
public long skip(long n) throws IOException {
if (defaultSkip) {
// use skip method of parent stream
// may not work if skip not implemented by parent
long bs = super.skip(n);
bytesRead += bs;
return bs;
}
if (n <= 0) {
return 0;
}
long left = n;
byte[] sBuff = new byte[SKIP_BUFFER_SIZE];
while (left > 0) {
int res = read(sBuff, 0, (int) (left < SKIP_BUFFER_SIZE ? left : SKIP_BUFFER_SIZE));
if (res < 0) {
break;
}
left -= res;
}
return n - left;
}
public boolean isDefaultSkip() {
return defaultSkip;
}
public void setDefaultSkip(boolean defaultSkip) {
this.defaultSkip = defaultSkip;
}
}

View file

@ -0,0 +1,163 @@
/**
* Copyright 2012 Kamran Zafar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.kamranzafar.jtar;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.RandomAccessFile;
/**
* @author Kamran Zafar
*
*/
public class TarOutputStream extends OutputStream {
private final OutputStream out;
private long bytesWritten;
private long currentFileSize;
private TarEntry currentEntry;
public TarOutputStream(OutputStream out) {
this.out = out;
bytesWritten = 0;
currentFileSize = 0;
}
public TarOutputStream(final File fout) throws FileNotFoundException {
this.out = new BufferedOutputStream(new FileOutputStream(fout));
bytesWritten = 0;
currentFileSize = 0;
}
/**
* Opens a file for writing.
*/
public TarOutputStream(final File fout, final boolean append) throws IOException {
@SuppressWarnings("resource")
RandomAccessFile raf = new RandomAccessFile(fout, "rw");
final long fileSize = fout.length();
if (append && fileSize > TarConstants.EOF_BLOCK) {
raf.seek(fileSize - TarConstants.EOF_BLOCK);
}
out = new BufferedOutputStream(new FileOutputStream(raf.getFD()));
}
/**
* Appends the EOF record and closes the stream
*
* @see java.io.FilterOutputStream#close()
*/
@Override
public void close() throws IOException {
closeCurrentEntry();
write( new byte[TarConstants.EOF_BLOCK] );
out.close();
}
/**
* Writes a byte to the stream and updates byte counters
*
* @see java.io.FilterOutputStream#write(int)
*/
@Override
public void write(int b) throws IOException {
out.write( b );
bytesWritten += 1;
if (currentEntry != null) {
currentFileSize += 1;
}
}
/**
* Checks if the bytes being written exceed the current entry size.
*
* @see java.io.FilterOutputStream#write(byte[], int, int)
*/
@Override
public void write(byte[] b, int off, int len) throws IOException {
if (currentEntry != null && !currentEntry.isDirectory()) {
if (currentEntry.getSize() < currentFileSize + len) {
throw new IOException( "The current entry[" + currentEntry.getName() + "] size["
+ currentEntry.getSize() + "] is smaller than the bytes[" + ( currentFileSize + len )
+ "] being written." );
}
}
out.write( b, off, len );
bytesWritten += len;
if (currentEntry != null) {
currentFileSize += len;
}
}
/**
* Writes the next tar entry header on the stream
*
* @param entry
* @throws IOException
*/
public void putNextEntry(TarEntry entry) throws IOException {
closeCurrentEntry();
byte[] header = new byte[TarConstants.HEADER_BLOCK];
entry.writeEntryHeader( header );
write( header );
currentEntry = entry;
}
/**
* Closes the current tar entry
*
* @throws IOException
*/
protected void closeCurrentEntry() throws IOException {
if (currentEntry != null) {
if (currentEntry.getSize() > currentFileSize) {
throw new IOException( "The current entry[" + currentEntry.getName() + "] of size["
+ currentEntry.getSize() + "] has not been fully written." );
}
currentEntry = null;
currentFileSize = 0;
pad();
}
}
/**
* Pads the last content block
*
* @throws IOException
*/
protected void pad() throws IOException {
if (bytesWritten > 0) {
int extra = (int) ( bytesWritten % TarConstants.DATA_BLOCK );
if (extra > 0) {
write( new byte[TarConstants.DATA_BLOCK - extra] );
}
}
}
}

View file

@ -0,0 +1,96 @@
/**
* Copyright 2012 Kamran Zafar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.kamranzafar.jtar;
import java.io.File;
/**
* @author Kamran
*
*/
public class TarUtils {
/**
* Determines the tar file size of the given folder/file path
*
* @param path
* @return
*/
public static long calculateTarSize(File path) {
return tarSize(path) + TarConstants.EOF_BLOCK;
}
private static long tarSize(File dir) {
long size = 0;
if (dir.isFile()) {
return entrySize(dir.length());
} else {
File[] subFiles = dir.listFiles();
if (subFiles != null && subFiles.length > 0) {
for (File file : subFiles) {
if (file.isFile()) {
size += entrySize(file.length());
} else {
size += tarSize(file);
}
}
} else {
// Empty folder header
return TarConstants.HEADER_BLOCK;
}
}
return size;
}
private static long entrySize(long fileSize) {
long size = 0;
size += TarConstants.HEADER_BLOCK; // Header
size += fileSize; // File size
long extra = size % TarConstants.DATA_BLOCK;
if (extra > 0) {
size += (TarConstants.DATA_BLOCK - extra); // pad
}
return size;
}
public static String trim(String s, char c) {
StringBuffer tmp = new StringBuffer(s);
for (int i = 0; i < tmp.length(); i++) {
if (tmp.charAt(i) != c) {
break;
} else {
tmp.deleteCharAt(i);
}
}
for (int i = tmp.length() - 1; i >= 0; i--) {
if (tmp.charAt(i) != c) {
break;
} else {
tmp.deleteCharAt(i);
}
}
return tmp.toString();
}
}

View file

@ -0,0 +1,400 @@
package org.kivy.android;
import java.net.Socket;
import java.net.InetSocketAddress;
import android.os.SystemClock;
import java.io.InputStream;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.ArrayList;
import android.app.*;
import android.content.*;
import android.view.*;
import android.view.SurfaceView;
import android.app.Activity;
import android.content.Intent;
import android.util.Log;
import android.widget.Toast;
import android.os.AsyncTask;
import android.os.Bundle;
import android.os.PowerManager;
import android.graphics.PixelFormat;
import android.view.SurfaceHolder;
import android.content.Context;
import android.content.pm.ActivityInfo;
import android.content.pm.PackageManager;
import android.content.pm.ApplicationInfo;
import android.content.Intent;
import android.widget.ImageView;
import java.io.InputStream;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.Color;
import android.widget.AbsoluteLayout;
import android.webkit.WebViewClient;
import android.webkit.WebView;
import org.kivy.android.PythonUtil;
import org.renpy.android.ResourceManager;
import org.renpy.android.AssetExtract;
public class PythonActivity extends Activity {
// This activity is modified from a mixture of the SDLActivity and
// PythonActivity in the SDL2 bootstrap, but removing all the SDL2
// specifics.
private static final String TAG = "PythonActivity";
public static PythonActivity mActivity = null;
/** If shared libraries (e.g. the native application) could not be loaded. */
public static boolean mBrokenLibraries;
protected static Thread mPythonThread;
private ResourceManager resourceManager = null;
private Bundle mMetaData = null;
private PowerManager.WakeLock mWakeLock = null;
public String getAppRoot() {
String app_root = getFilesDir().getAbsolutePath() + "/app";
return app_root;
}
public static void initialize() {
// The static nature of the singleton and Android quirkiness force us to initialize everything here
// Otherwise, when exiting the app and returning to it, these variables *keep* their pre exit values
mBrokenLibraries = false;
}
@Override
protected void onCreate(Bundle savedInstanceState) {
Log.v(TAG, "My oncreate running");
resourceManager = new ResourceManager(this);
Log.v(TAG, "Ready to unpack");
File app_root_file = new File(getAppRoot());
unpackData("private", app_root_file);
Log.v(TAG, "About to do super onCreate");
super.onCreate(savedInstanceState);
Log.v(TAG, "Did super onCreate");
this.mActivity = this;
//this.showLoadingScreen();
Log.v("Python", "Device: " + android.os.Build.DEVICE);
Log.v("Python", "Model: " + android.os.Build.MODEL);
//Log.v(TAG, "Ready to unpack");
//new UnpackFilesTask().execute(getAppRoot());
PythonActivity.initialize();
// Load shared libraries
String errorMsgBrokenLib = "";
try {
loadLibraries();
} catch(UnsatisfiedLinkError e) {
System.err.println(e.getMessage());
mBrokenLibraries = true;
errorMsgBrokenLib = e.getMessage();
} catch(Exception e) {
System.err.println(e.getMessage());
mBrokenLibraries = true;
errorMsgBrokenLib = e.getMessage();
}
if (mBrokenLibraries)
{
AlertDialog.Builder dlgAlert = new AlertDialog.Builder(this);
dlgAlert.setMessage("An error occurred while trying to load the application libraries. Please try again and/or reinstall."
+ System.getProperty("line.separator")
+ System.getProperty("line.separator")
+ "Error: " + errorMsgBrokenLib);
dlgAlert.setTitle("Python Error");
dlgAlert.setPositiveButton("Exit",
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog,int id) {
// if this button is clicked, close current activity
PythonActivity.mActivity.finish();
}
});
dlgAlert.setCancelable(false);
dlgAlert.create().show();
return;
}
// Set up the Python environment
String app_root_dir = getAppRoot();
String mFilesDirectory = mActivity.getFilesDir().getAbsolutePath();
Log.v(TAG, "Setting env vars for start.c and Python to use");
PythonActivity.nativeSetEnv("ANDROID_ENTRYPOINT", "main.pyo");
PythonActivity.nativeSetEnv("ANDROID_ARGUMENT", app_root_dir);
PythonActivity.nativeSetEnv("ANDROID_APP_PATH", app_root_dir);
PythonActivity.nativeSetEnv("ANDROID_PRIVATE", mFilesDirectory);
PythonActivity.nativeSetEnv("ANDROID_UNPACK", app_root_dir);
PythonActivity.nativeSetEnv("PYTHONHOME", app_root_dir);
PythonActivity.nativeSetEnv("PYTHONPATH", app_root_dir + ":" + app_root_dir + "/lib");
PythonActivity.nativeSetEnv("PYTHONOPTIMIZE", "2");
try {
Log.v(TAG, "Access to our meta-data...");
mActivity.mMetaData = mActivity.getPackageManager().getApplicationInfo(
mActivity.getPackageName(), PackageManager.GET_META_DATA).metaData;
PowerManager pm = (PowerManager) mActivity.getSystemService(Context.POWER_SERVICE);
if ( mActivity.mMetaData.getInt("wakelock") == 1 ) {
mActivity.mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK, "Screen On");
mActivity.mWakeLock.acquire();
}
} catch (PackageManager.NameNotFoundException e) {
}
final Thread pythonThread = new Thread(new PythonMain(), "PythonThread");
PythonActivity.mPythonThread = pythonThread;
pythonThread.start();
}
@Override
public void onDestroy() {
Log.i("Destroy", "end of app");
super.onDestroy();
// make sure all child threads (python_thread) are stopped
android.os.Process.killProcess(android.os.Process.myPid());
}
public void loadLibraries() {
String app_root = new String(getAppRoot());
File app_root_file = new File(app_root);
PythonUtil.loadLibraries(app_root_file);
}
public void recursiveDelete(File f) {
if (f.isDirectory()) {
for (File r : f.listFiles()) {
recursiveDelete(r);
}
}
f.delete();
}
/**
* Show an error using a toast. (Only makes sense from non-UI
* threads.)
*/
public void toastError(final String msg) {
final Activity thisActivity = this;
runOnUiThread(new Runnable () {
public void run() {
Toast.makeText(thisActivity, msg, Toast.LENGTH_LONG).show();
}
});
// Wait to show the error.
synchronized (this) {
try {
this.wait(1000);
} catch (InterruptedException e) {
}
}
}
public void unpackData(final String resource, File target) {
Log.v(TAG, "UNPACKING!!! " + resource + " " + target.getName());
// The version of data in memory and on disk.
String data_version = resourceManager.getString(resource + "_version");
String disk_version = null;
Log.v(TAG, "Data version is " + data_version);
// If no version, no unpacking is necessary.
if (data_version == null) {
return;
}
// Check the current disk version, if any.
String filesDir = target.getAbsolutePath();
String disk_version_fn = filesDir + "/" + resource + ".version";
try {
byte buf[] = new byte[64];
InputStream is = new FileInputStream(disk_version_fn);
int len = is.read(buf);
disk_version = new String(buf, 0, len);
is.close();
} catch (Exception e) {
disk_version = "";
}
// If the disk data is out of date, extract it and write the
// version file.
// if (! data_version.equals(disk_version)) {
if (! data_version.equals(disk_version)) {
Log.v(TAG, "Extracting " + resource + " assets.");
recursiveDelete(target);
target.mkdirs();
AssetExtract ae = new AssetExtract(this);
if (!ae.extractTar(resource + ".mp3", target.getAbsolutePath())) {
toastError("Could not extract " + resource + " data.");
}
try {
// Write .nomedia.
new File(target, ".nomedia").createNewFile();
// Write version file.
FileOutputStream os = new FileOutputStream(disk_version_fn);
os.write(data_version.getBytes());
os.close();
} catch (Exception e) {
Log.w("python", e);
}
}
}
long lastBackClick = SystemClock.elapsedRealtime();
@Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
// If it wasn't the Back key or there's no web page history, bubble up to the default
// system behavior (probably exit the activity)
if (SystemClock.elapsedRealtime() - lastBackClick > 2000){
lastBackClick = SystemClock.elapsedRealtime();
Toast.makeText(this, "Click again to close the app",
Toast.LENGTH_LONG).show();
return true;
}
lastBackClick = SystemClock.elapsedRealtime();
return super.onKeyDown(keyCode, event);
}
//----------------------------------------------------------------------------
// Listener interface for onNewIntent
//
public interface NewIntentListener {
void onNewIntent(Intent intent);
}
private List<NewIntentListener> newIntentListeners = null;
public void registerNewIntentListener(NewIntentListener listener) {
if ( this.newIntentListeners == null )
this.newIntentListeners = Collections.synchronizedList(new ArrayList<NewIntentListener>());
this.newIntentListeners.add(listener);
}
public void unregisterNewIntentListener(NewIntentListener listener) {
if ( this.newIntentListeners == null )
return;
this.newIntentListeners.remove(listener);
}
@Override
protected void onNewIntent(Intent intent) {
if ( this.newIntentListeners == null )
return;
this.onResume();
synchronized ( this.newIntentListeners ) {
Iterator<NewIntentListener> iterator = this.newIntentListeners.iterator();
while ( iterator.hasNext() ) {
(iterator.next()).onNewIntent(intent);
}
}
}
//----------------------------------------------------------------------------
// Listener interface for onActivityResult
//
public interface ActivityResultListener {
void onActivityResult(int requestCode, int resultCode, Intent data);
}
private List<ActivityResultListener> activityResultListeners = null;
public void registerActivityResultListener(ActivityResultListener listener) {
if ( this.activityResultListeners == null )
this.activityResultListeners = Collections.synchronizedList(new ArrayList<ActivityResultListener>());
this.activityResultListeners.add(listener);
}
public void unregisterActivityResultListener(ActivityResultListener listener) {
if ( this.activityResultListeners == null )
return;
this.activityResultListeners.remove(listener);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent intent) {
if ( this.activityResultListeners == null )
return;
this.onResume();
synchronized ( this.activityResultListeners ) {
Iterator<ActivityResultListener> iterator = this.activityResultListeners.iterator();
while ( iterator.hasNext() )
(iterator.next()).onActivityResult(requestCode, resultCode, intent);
}
}
public static void start_service(String serviceTitle, String serviceDescription,
String pythonServiceArgument) {
Intent serviceIntent = new Intent(PythonActivity.mActivity, PythonService.class);
String argument = PythonActivity.mActivity.getFilesDir().getAbsolutePath();
String filesDirectory = argument;
String app_root_dir = PythonActivity.mActivity.getAppRoot();
serviceIntent.putExtra("androidPrivate", argument);
serviceIntent.putExtra("androidArgument", app_root_dir);
serviceIntent.putExtra("serviceEntrypoint", "service/main.pyo");
serviceIntent.putExtra("pythonName", "python");
serviceIntent.putExtra("pythonHome", app_root_dir);
serviceIntent.putExtra("pythonPath", app_root_dir + ":" + app_root_dir + "/lib");
serviceIntent.putExtra("serviceTitle", serviceTitle);
serviceIntent.putExtra("serviceDescription", serviceDescription);
serviceIntent.putExtra("pythonServiceArgument", pythonServiceArgument);
PythonActivity.mActivity.startService(serviceIntent);
}
public static void stop_service() {
Intent serviceIntent = new Intent(PythonActivity.mActivity, PythonService.class);
PythonActivity.mActivity.stopService(serviceIntent);
}
public static native void nativeSetEnv(String j_name, String j_value);
public static native int nativeInit(Object arguments);
}
class PythonMain implements Runnable {
@Override
public void run() {
PythonActivity.nativeInit(new String[0]);
}
}

View file

@ -0,0 +1,45 @@
package org.kivy.android.concurrency;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* Created by ryan on 3/28/14.
*/
public class PythonEvent {
private final Lock lock = new ReentrantLock();
private final Condition cond = lock.newCondition();
private boolean flag = false;
public void set() {
lock.lock();
try {
flag = true;
cond.signalAll();
} finally {
lock.unlock();
}
}
public void wait_() throws InterruptedException {
lock.lock();
try {
while (!flag) {
cond.await();
}
} finally {
lock.unlock();
}
}
public void clear() {
lock.lock();
try {
flag = false;
cond.signalAll();
} finally {
lock.unlock();
}
}
}

View file

@ -0,0 +1,19 @@
package org.kivy.android.concurrency;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* Created by ryan on 3/28/14.
*/
public class PythonLock {
private final Lock lock = new ReentrantLock();
public void acquire() {
lock.lock();
}
public void release() {
lock.unlock();
}
}

View file

@ -0,0 +1,257 @@
package org.renpy.android;
import java.util.List;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.hardware.Sensor;
import android.hardware.SensorEvent;
import android.hardware.SensorEventListener;
import android.hardware.SensorManager;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.net.wifi.ScanResult;
import android.net.wifi.WifiManager;
import android.os.Vibrator;
import android.view.View;
/**
* Methods that are expected to be called via JNI, to access the device's
* non-screen hardware. (For example, the vibration and accelerometer.)
*/
public class Hardware {
// The context.
static Context context;
static View view;
/**
* Vibrate for s seconds.
*/
public static void vibrate(double s) {
Vibrator v = (Vibrator) context
.getSystemService(Context.VIBRATOR_SERVICE);
if (v != null) {
v.vibrate((int) (1000 * s));
}
}
/**
* Get an Overview of all Hardware Sensors of an Android Device
*/
public static String getHardwareSensors() {
SensorManager sm = (SensorManager) context
.getSystemService(Context.SENSOR_SERVICE);
List<Sensor> allSensors = sm.getSensorList(Sensor.TYPE_ALL);
if (allSensors != null) {
String resultString = "";
for (Sensor s : allSensors) {
resultString += String.format("Name=" + s.getName());
resultString += String.format(",Vendor=" + s.getVendor());
resultString += String.format(",Version=" + s.getVersion());
resultString += String.format(",MaximumRange="
+ s.getMaximumRange());
// XXX MinDelay is not in the 2.2
// resultString += String.format(",MinDelay=" +
// s.getMinDelay());
resultString += String.format(",Power=" + s.getPower());
resultString += String.format(",Type=" + s.getType() + "\n");
}
return resultString;
}
return "";
}
/**
* Get Access to 3 Axis Hardware Sensors Accelerometer, Orientation and
* Magnetic Field Sensors
*/
public static class generic3AxisSensor implements SensorEventListener {
private final SensorManager sSensorManager;
private final Sensor sSensor;
private final int sSensorType;
SensorEvent sSensorEvent;
public generic3AxisSensor(int sensorType) {
sSensorType = sensorType;
sSensorManager = (SensorManager) context
.getSystemService(Context.SENSOR_SERVICE);
sSensor = sSensorManager.getDefaultSensor(sSensorType);
}
public void onAccuracyChanged(Sensor sensor, int accuracy) {
}
public void onSensorChanged(SensorEvent event) {
sSensorEvent = event;
}
/**
* Enable or disable the Sensor by registering/unregistering
*/
public void changeStatus(boolean enable) {
if (enable) {
sSensorManager.registerListener(this, sSensor,
SensorManager.SENSOR_DELAY_NORMAL);
} else {
sSensorManager.unregisterListener(this, sSensor);
}
}
/**
* Read the Sensor
*/
public float[] readSensor() {
if (sSensorEvent != null) {
return sSensorEvent.values;
} else {
float rv[] = { 0f, 0f, 0f };
return rv;
}
}
}
public static generic3AxisSensor accelerometerSensor = null;
public static generic3AxisSensor orientationSensor = null;
public static generic3AxisSensor magneticFieldSensor = null;
/**
* functions for backward compatibility reasons
*/
public static void accelerometerEnable(boolean enable) {
if (accelerometerSensor == null)
accelerometerSensor = new generic3AxisSensor(
Sensor.TYPE_ACCELEROMETER);
accelerometerSensor.changeStatus(enable);
}
public static float[] accelerometerReading() {
float rv[] = { 0f, 0f, 0f };
if (accelerometerSensor == null)
return rv;
return (float[]) accelerometerSensor.readSensor();
}
public static void orientationSensorEnable(boolean enable) {
if (orientationSensor == null)
orientationSensor = new generic3AxisSensor(Sensor.TYPE_ORIENTATION);
orientationSensor.changeStatus(enable);
}
public static float[] orientationSensorReading() {
float rv[] = { 0f, 0f, 0f };
if (orientationSensor == null)
return rv;
return (float[]) orientationSensor.readSensor();
}
public static void magneticFieldSensorEnable(boolean enable) {
if (magneticFieldSensor == null)
magneticFieldSensor = new generic3AxisSensor(
Sensor.TYPE_MAGNETIC_FIELD);
magneticFieldSensor.changeStatus(enable);
}
public static float[] magneticFieldSensorReading() {
float rv[] = { 0f, 0f, 0f };
if (magneticFieldSensor == null)
return rv;
return (float[]) magneticFieldSensor.readSensor();
}
/**
* Scan WiFi networks
*/
static List<ScanResult> latestResult;
public static void enableWifiScanner() {
IntentFilter i = new IntentFilter();
i.addAction(WifiManager.SCAN_RESULTS_AVAILABLE_ACTION);
context.registerReceiver(new BroadcastReceiver() {
@Override
public void onReceive(Context c, Intent i) {
// Code to execute when SCAN_RESULTS_AVAILABLE_ACTION event
// occurs
WifiManager w = (WifiManager) c
.getSystemService(Context.WIFI_SERVICE);
latestResult = w.getScanResults(); // Returns a <list> of
// scanResults
}
}, i);
}
public static String scanWifi() {
// Now you can call this and it should execute the broadcastReceiver's
// onReceive()
WifiManager wm = (WifiManager) context
.getSystemService(Context.WIFI_SERVICE);
boolean a = wm.startScan();
if (latestResult != null) {
String latestResultString = "";
for (ScanResult result : latestResult) {
latestResultString += String.format("%s\t%s\t%d\n",
result.SSID, result.BSSID, result.level);
}
return latestResultString;
}
return "";
}
/**
* network state
*/
public static boolean network_state = false;
/**
* Check network state directly
*
* (only one connection can be active at a given moment, detects all network
* type)
*
*/
public static boolean checkNetwork() {
boolean state = false;
final ConnectivityManager conMgr = (ConnectivityManager) context
.getSystemService(Context.CONNECTIVITY_SERVICE);
final NetworkInfo activeNetwork = conMgr.getActiveNetworkInfo();
if (activeNetwork != null && activeNetwork.isConnected()) {
state = true;
} else {
state = false;
}
return state;
}
/**
* To recieve network state changes
*/
public static void registerNetworkCheck() {
IntentFilter i = new IntentFilter();
i.addAction(ConnectivityManager.CONNECTIVITY_ACTION);
context.registerReceiver(new BroadcastReceiver() {
@Override
public void onReceive(Context c, Intent i) {
network_state = checkNetwork();
}
}, i);
}
}

View file

@ -0,0 +1,93 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="{{ args.package }}"
android:versionCode="{{ args.numeric_version }}"
android:versionName="{{ args.version }}"
android:installLocation="auto">
<supports-screens
android:smallScreens="true"
android:normalScreens="true"
android:largeScreens="true"
android:anyDensity="true"
{% if args.min_sdk_version >= 9 %}
android:xlargeScreens="true"
{% endif %}
/>
<!-- Android 2.3.3 -->
<uses-sdk android:minSdkVersion="{{ args.min_sdk_version }}" android:targetSdkVersion="{{ android_api }}" />
<!-- Set permissions -->
{% for perm in args.permissions %}
{% if '.' in perm %}
<uses-permission android:name="{{ perm }}" />
{% else %}
<uses-permission android:name="android.permission.{{ perm }}" />
{% endif %}
{% endfor %}
{% if args.wakelock %}
<uses-permission android:name="android.permission.WAKE_LOCK" />
{% endif %}
{% if args.billing_pubkey %}
<uses-permission android:name="com.android.vending.BILLING" />
{% endif %}
<!-- Create a Java class extending SDLActivity and place it in a
directory under src matching the package, e.g.
src/com/gamemaker/game/MyGame.java
then replace "SDLActivity" with the name of your class (e.g. "MyGame")
in the XML below.
An example Java class can be found in README-android.txt
-->
<application android:label="@string/app_name"
android:icon="@drawable/icon"
android:allowBackup="true"
android:theme="@android:style/Theme.NoTitleBar{% if not args.window %}.Fullscreen{% endif %}"
android:hardwareAccelerated="true" >
{% for m in args.meta_data %}
<meta-data android:name="{{ m.split('=', 1)[0] }}" android:value="{{ m.split('=', 1)[-1] }}"/>{% endfor %}
<meta-data android:name="wakelock" android:value="{% if args.wakelock %}1{% else %}0{% endif %}"/>
<activity android:name="org.kivy.android.PythonActivity"
android:label="@string/app_name"
android:configChanges="keyboardHidden|orientation{% if args.min_sdk_version >= 13 %}|screenSize{% endif %}"
>
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
{%- if args.intent_filters -%}
{{- args.intent_filters -}}
{%- endif -%}
</activity>
{% if service %}
<service android:name="org.kivy.android.PythonService"
android:process=":pythonservice" />
{% endif %}
{% for name in service_names %}
<service android:name="{{ args.package }}.Service{{ name|capitalize }}"
android:process=":service_{{ name }}" />
{% endfor %}
{% if args.billing_pubkey %}
<service android:name="org.kivy.android.billing.BillingReceiver"
android:process=":pythonbilling" />
<receiver android:name="org.kivy.android.billing.BillingReceiver"
android:process=":pythonbillingreceiver">
<intent-filter>
<action android:name="com.android.vending.billing.IN_APP_NOTIFY" />
<action android:name="com.android.vending.billing.RESPONSE_CODE" />
<action android:name="com.android.vending.billing.PURCHASE_STATE_CHANGED" />
</intent-filter>
</receiver>
{% endif %}
</application>
</manifest>

View file

@ -0,0 +1,74 @@
package {{ args.package }};
import android.os.Binder;
import android.os.IBinder;
import android.content.Intent;
import android.content.Context;
import org.kivy.android.PythonService;
public class Service{{ name|capitalize }} extends PythonService {
/**
* Binder given to clients
*/
private final IBinder mBinder = new Service{{ name|capitalize }}Binder();
{% if sticky %}
/**
* {@inheritDoc}
*/
@Override
public int getStartType() {
return START_STICKY;
}
{% endif %}
{% if foreground %}
/**
* {@inheritDoc}
*/
@Override
public boolean getStartForeground() {
return true;
}
{% endif %}
public static void start(Context ctx, String pythonServiceArgument) {
String argument = ctx.getFilesDir().getAbsolutePath() + "/app";
Intent intent = new Intent(ctx, Service{{ name|capitalize }}.class);
intent.putExtra("androidPrivate", argument);
intent.putExtra("androidArgument", argument);
intent.putExtra("serviceEntrypoint", "{{ entrypoint }}");
intent.putExtra("serviceTitle", "{{ name|capitalize }}");
intent.putExtra("serviceDescription", "");
intent.putExtra("pythonName", "{{ name }}");
intent.putExtra("pythonHome", argument);
intent.putExtra("androidUnpack", argument);
intent.putExtra("pythonPath", argument + ":" + argument + "/lib");
intent.putExtra("pythonServiceArgument", pythonServiceArgument);
ctx.startService(intent);
}
public static void stop(Context ctx) {
Intent intent = new Intent(ctx, Service{{ name|capitalize }}.class);
ctx.stopService(intent);
}
/**
* {@inheritDoc}
*/
@Override
public IBinder onBind(Intent intent) {
return mBinder;
}
/**
* Class used for the client Binder. Because we know this service always
* runs in the same process as its clients, we don't need to deal with IPC.
*/
public class Service{{ name|capitalize }}Binder extends Binder {
Service{{ name|capitalize }} getService() {
// Return this instance of Service{{ name|capitalize }} so clients can call public methods
return Service{{ name|capitalize }}.this;
}
}
}

View file

@ -0,0 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">{{ args.name }}</string>
<string name="private_version">{{ private_version }}</string>
</resources>

68
android/service/main.py Normal file
View file

@ -0,0 +1,68 @@
from lbrynet.extras.daemon.Daemon import Daemon
from jnius import PythonJavaClass, autoclass, java_method
JAVA_NAMESPACE = 'org.kivy.android'
JNI_NAMESPACE = 'org/kivy/android'
_activity = autoclass(JAVA_NAMESPACE + '.PythonActivity').mActivity
_callbacks = {
'on_new_intent': [],
'on_activity_result': [],
}
class NewIntentListener(PythonJavaClass):
__javainterfaces__ = [JNI_NAMESPACE + '/PythonActivity$NewIntentListener']
__javacontext__ = 'app'
def __init__(self, callback, **kwargs):
super(NewIntentListener, self).__init__(**kwargs)
self.callback = callback
@java_method('(Landroid/content/Intent;)V')
def onNewIntent(self, intent):
self.callback(intent)
class ActivityResultListener(PythonJavaClass):
__javainterfaces__ = [JNI_NAMESPACE + '/PythonActivity$ActivityResultListener']
__javacontext__ = 'app'
def __init__(self, callback):
super(ActivityResultListener, self).__init__()
self.callback = callback
@java_method('(IILandroid/content/Intent;)V')
def onActivityResult(self, requestCode, resultCode, intent):
self.callback(requestCode, resultCode, intent)
def bind(**kwargs):
for event, callback in kwargs.items():
if event not in _callbacks:
raise Exception('Unknown {!r} event'.format(event))
elif event == 'on_new_intent':
listener = NewIntentListener(callback)
_activity.registerNewIntentListener(listener)
_callbacks[event].append(listener)
elif event == 'on_activity_result':
listener = ActivityResultListener(callback)
_activity.registerActivityResultListener(listener)
_callbacks[event].append(listener)
def unbind(**kwargs):
for event, callback in kwargs.items():
if event not in _callbacks:
raise Exception('Unknown {!r} event'.format(event))
else:
for listener in _callbacks[event][:]:
if listener.callback == callback:
_callbacks[event].remove(listener)
if event == 'on_new_intent':
_activity.unregisterNewIntentListener(listener)
elif event == 'on_activity_result':
_activity.unregisterActivityResultListener(listener)
print('YEAH! Done all the things!!! WOOOHOOO!!!!')

49
android/setup.py Normal file
View file

@ -0,0 +1,49 @@
from os.path import join, dirname, abspath
from pythonforandroid.toolchain import Bootstrap
from setuptools import setup, find_packages
from bootstrap import LBRYServiceBootstrap
Bootstrap.bootstraps = {
'lbry-service': LBRYServiceBootstrap()
}
setup(
name='lbryservice',
version='0.1',
author="LBRY Inc.",
author_email="hello@lbry.io",
url="https://lbry.io",
description="Android Service for LBRY Network.",
license='MIT',
python_requires='>=3.7',
packages=find_packages(),
package_data={'service': ['*.py']},
options={
'apk': {
'dist_name': 'lbry-service',
'bootstrap': 'lbry-service',
'package': 'io.lbry.service',
'permissions': ['INTERNET'],
'requirements': ','.join([
# needed by aiohttp
'multidict', 'yarl', 'async_timeout', 'chardet',
# minimum needed by torba:
'aiohttp', 'coincurve', 'pbkdf2', 'cryptography', 'attrs',
abspath(join(dirname(__file__), '..', '..', 'torba')),
# minimum needed by lbrynet
'aioupnp', 'appdirs', 'distro', 'base58', 'jsonrpc', 'protobuf',
'msgpack', 'jsonschema', 'ecdsa', 'pyyaml', 'docopt',
abspath(join(dirname(__file__), '..')),
'genericndkbuild', 'pyjnius', 'sqlite3', 'python3'
]),
'android-api': '26',
'ndk-api': '21',
'ndk-version': 'r17c',
'arch': 'armeabi-v7a',
'sdk-dir': '/home/lex/projects/android',
'ndk-dir': '/home/lex/projects/android/android-ndk-r17c/'
}
}
)

View file

@ -1,43 +0,0 @@
FROM debian:10-slim
ARG user=lbry
ARG projects_dir=/home/$user
ARG db_dir=/database
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
automake libtool \
tar unzip \
build-essential \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
RUN mkdir -p $db_dir
RUN chown -R $user:$user $db_dir
USER $user
WORKDIR $projects_dir
RUN python3 -m pip install -U setuptools pip
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
VOLUME $db_dir
ENTRYPOINT ["python3", "scripts/dht_node.py"]

View file

@ -1,56 +0,0 @@
FROM debian:10-slim
ARG user=lbry
ARG db_dir=/database
ARG projects_dir=/home/$user
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
tar unzip \
build-essential \
automake libtool \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-cffi \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
RUN mkdir -p $db_dir
RUN chown -R $user:$user $db_dir
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
USER $user
WORKDIR $projects_dir
RUN pip install uvloop
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
# entry point
ARG host=0.0.0.0
ARG tcp_port=50001
ARG daemon_url=http://lbry:lbry@localhost:9245/
VOLUME $db_dir
ENV TCP_PORT=$tcp_port
ENV HOST=$host
ENV DAEMON_URL=$daemon_url
ENV DB_DIRECTORY=$db_dir
ENV MAX_SESSIONS=1000000000
ENV MAX_SEND=1000000000000000000
ENV EVENT_LOOP_POLICY=uvloop
COPY ./docker/wallet_server_entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

View file

@ -1,45 +0,0 @@
FROM debian:10-slim
ARG user=lbry
ARG downloads_dir=/database
ARG projects_dir=/home/$user
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
automake libtool \
tar unzip \
build-essential \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
RUN mkdir -p $downloads_dir
RUN chown -R $user:$user $downloads_dir
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
USER $user
WORKDIR $projects_dir
RUN pip install uvloop
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
# entry point
VOLUME $downloads_dir
COPY ./docker/webconf.yaml /webconf.yaml
ENTRYPOINT ["/home/lbry/.local/bin/lbrynet", "start", "--config=/webconf.yaml"]

View file

@ -1,9 +0,0 @@
### How to run with docker-compose
1. Edit config file and after that fix permissions with
```
sudo chown -R 999:999 webconf.yaml
```
2. Start SDK with
```
docker-compose up -d
```

View file

@ -1,49 +0,0 @@
version: "3"
volumes:
wallet_server:
es01:
services:
wallet_server:
depends_on:
- es01
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
restart: always
network_mode: host
ports:
- "50001:50001" # rpc port
- "2112:2112" # uncomment to enable prometheus
volumes:
- "wallet_server:/database"
environment:
- DAEMON_URL=http://lbry:lbry@127.0.0.1:9245
- MAX_QUERY_WORKERS=4
- CACHE_MB=1024
- CACHE_ALL_TX_HASHES=
- CACHE_ALL_CLAIM_TXOS=
- MAX_SEND=1000000000000000000
- MAX_RECEIVE=1000000000000000000
- MAX_SESSIONS=100000
- HOST=0.0.0.0
- TCP_PORT=50001
- PROMETHEUS_PORT=2112
- FILTERING_CHANNEL_IDS=770bd7ecba84fd2f7607fb15aedd2b172c2e153f 95e5db68a3101df19763f3a5182e4b12ba393ee8
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
container_name: es01
environment:
- node.name=es01
- discovery.type=single-node
- indices.query.bool.max_clause_count=8192
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms4g -Xmx4g" # no more than 32, remember to disable swap
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- es01:/usr/share/elasticsearch/data
ports:
- 127.0.0.1:9200:9200

View file

@ -1,9 +0,0 @@
version: '3'
services:
websdk:
image: vshyba/websdk
ports:
- '5279:5279'
- '5280:5280'
volumes:
- ./webconf.yaml:/webconf.yaml

View file

@ -1,7 +0,0 @@
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd "$DIR/../.." ## make sure we're in the right place. Docker Hub screws this up sometimes
echo "docker build dir: $(pwd)"
docker build --build-arg DOCKER_TAG=$DOCKER_TAG --build-arg DOCKER_COMMIT=$SOURCE_COMMIT -f $DOCKERFILE_PATH -t $IMAGE_NAME .

View file

@ -1,11 +0,0 @@
# requires powershell and .NET 4+. see https://chocolatey.org/install for more info.
$chocoVersion = powershell choco -v
if(-not($chocoVersion)){
Write-Output "Chocolatey is not installed, installing now"
Write-Output "IF YOU KEEP GETTING THIS MESSAGE ON EVERY BUILD, TRY RESTARTING THE GITLAB RUNNER SO IT GETS CHOCO INTO IT'S ENV"
Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
}
else{
Write-Output "Chocolatey version $chocoVersion is already installed"
}

View file

@ -1,44 +0,0 @@
import sys
import os
import re
import logging
import lbry.build_info as build_info_mod
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
def _check_and_set(d: dict, key: str, value: str):
try:
d[key]
except KeyError:
raise Exception(f"{key} var does not exist in {build_info_mod.__file__}")
d[key] = value
def main():
build_info = {item: build_info_mod.__dict__[item] for item in dir(build_info_mod) if not item.startswith("__")}
commit_hash = os.getenv('DOCKER_COMMIT', os.getenv('GITHUB_SHA'))
if commit_hash is None:
raise ValueError("Commit hash not found in env vars")
_check_and_set(build_info, "COMMIT_HASH", commit_hash[:6])
docker_tag = os.getenv('DOCKER_TAG')
if docker_tag:
_check_and_set(build_info, "DOCKER_TAG", docker_tag)
_check_and_set(build_info, "BUILD", "docker")
else:
if re.match(r'refs/tags/v\d+\.\d+\.\d+$', str(os.getenv('GITHUB_REF'))):
_check_and_set(build_info, "BUILD", "release")
else:
_check_and_set(build_info, "BUILD", "qa")
log.debug("build info: %s", ", ".join([f"{k}={v}" for k, v in build_info.items()]))
with open(build_info_mod.__file__, 'w') as f:
f.write("\n".join([f"{k} = \"{v}\"" for k, v in build_info.items()]) + "\n")
if __name__ == '__main__':
sys.exit(main())

View file

@ -1,25 +0,0 @@
#!/bin/bash
# entrypoint for wallet server Docker image
set -euo pipefail
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
files="$(ls)"
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
echo "Extracting snapshot..."
filename="$(grep -vf <(echo "$files") <(ls))" # finds the file that was not there before
case "$filename" in
*.tgz|*.tar.gz|*.tar.bz2 ) tar xvf "$filename" --directory /database ;;
*.zip ) unzip "$filename" -d /database ;;
* ) echo "Don't know how to extract ${filename}. SNAPSHOT COULD NOT BE LOADED" && exit 1 ;;
esac
rm "$filename"
fi
/home/lbry/.local/bin/lbry-hub-elastic-sync
echo 'starting server'
/home/lbry/.local/bin/lbry-hub "$@"

View file

@ -1,9 +0,0 @@
allowed_origin: "*"
max_key_fee: "0.0 USD"
save_files: false
save_blobs: false
streaming_server: "0.0.0.0:5280"
api: "0.0.0.0:5279"
data_dir: /tmp
download_dir: /tmp
wallet_dir: /tmp

307
docs/404.html Normal file
View file

@ -0,0 +1,307 @@
<!DOCTYPE html>
<html lang="en" class="no-js">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<meta name="lang:clipboard.copy" content="Copy to clipboard">
<meta name="lang:clipboard.copied" content="Copied to clipboard">
<meta name="lang:search.language" content="en">
<meta name="lang:search.pipeline.stopwords" content="True">
<meta name="lang:search.pipeline.trimmer" content="True">
<meta name="lang:search.result.none" content="No matching documents">
<meta name="lang:search.result.one" content="1 matching document">
<meta name="lang:search.result.other" content="# matching documents">
<meta name="lang:search.tokenizer" content="[\s\-]+">
<link rel="shortcut icon" href="/assets/images/favicon.png">
<meta name="generator" content="mkdocs-0.17.3, mkdocs-material-2.7.0">
<title>LBRY</title>
<link rel="stylesheet" href="/assets/stylesheets/application.78aab2dc.css">
<link rel="stylesheet" href="/assets/stylesheets/application-palette.6079476c.css">
<script src="/assets/javascripts/modernizr.1aa3b519.js"></script>
<link href="https://fonts.gstatic.com" rel="preconnect" crossorigin>
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,400i,700|Roboto+Mono">
<style>body,input{font-family:"Roboto","Helvetica Neue",Helvetica,Arial,sans-serif}code,kbd,pre{font-family:"Roboto Mono","Courier New",Courier,monospace}</style>
<link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons">
</head>
<body dir="ltr" data-md-color-primary="teal" data-md-color-accent="green">
<svg class="md-svg">
<defs>
<svg xmlns="http://www.w3.org/2000/svg" width="416" height="448"
viewBox="0 0 416 448" id="github">
<path fill="currentColor" d="M160 304q0 10-3.125 20.5t-10.75 19-18.125
8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19 18.125-8.5
18.125 8.5 10.75 19 3.125 20.5zM320 304q0 10-3.125 20.5t-10.75
19-18.125 8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19
18.125-8.5 18.125 8.5 10.75 19 3.125 20.5zM360
304q0-30-17.25-51t-46.75-21q-10.25 0-48.75 5.25-17.75 2.75-39.25
2.75t-39.25-2.75q-38-5.25-48.75-5.25-29.5 0-46.75 21t-17.25 51q0 22 8
38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0
37.25-1.75t35-7.375 30.5-15 20.25-25.75 8-38.375zM416 260q0 51.75-15.25
82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5-41.75
1.125q-19.5 0-35.5-0.75t-36.875-3.125-38.125-7.5-34.25-12.875-30.25-20.25-21.5-28.75q-15.5-30.75-15.5-82.75
0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25
30.875q36.75-8.75 77.25-8.75 37 0 70 8 26.25-20.5
46.75-30.25t47.25-9.75q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34
99.5z" />
</svg>
</defs>
</svg>
<input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="drawer">
<input class="md-toggle" data-md-toggle="search" type="checkbox" id="search">
<label class="md-overlay" data-md-component="overlay" for="drawer"></label>
<header class="md-header" data-md-component="header">
<nav class="md-header-nav md-grid">
<div class="md-flex">
<div class="md-flex__cell md-flex__cell--shrink">
<a href="/" title="LBRY" class="md-header-nav__button md-logo">
<img src="https://s3.amazonaws.com/files.lbry.io/logo-square-white-bookonly.png" alt="LBRY logo" width="24" height="24">
</a>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<label class="md-icon md-icon--menu md-header-nav__button" for="drawer"></label>
</div>
<div class="md-flex__cell md-flex__cell--stretch">
<div class="md-flex__ellipsis md-header-nav__title" data-md-component="title">
<span class="md-header-nav__topic">
LBRY
</span>
<span class="md-header-nav__topic">
</span>
</div>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<label class="md-icon md-icon--search md-header-nav__button" for="search"></label>
<div class="md-search" data-md-component="search" role="dialog">
<label class="md-search__overlay" for="search"></label>
<div class="md-search__inner" role="search">
<form class="md-search__form" name="search">
<input type="text" class="md-search__input" name="query" placeholder="Search" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="query" data-md-state="active">
<label class="md-icon md-search__icon" for="search"></label>
<button type="reset" class="md-icon md-search__icon" data-md-component="reset" tabindex="-1">
&#xE5CD;
</button>
</form>
<div class="md-search__output">
<div class="md-search__scrollwrap" data-md-scrollfix>
<div class="md-search-result" data-md-component="result">
<div class="md-search-result__meta">
Type to start searching
</div>
<ol class="md-search-result__list"></ol>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="md-flex__cell md-flex__cell--shrink">
<div class="md-header-nav__source">
<a href="https://github.com/lbryio/lbry/" title="Go to repository" class="md-source" data-md-source="github">
<div class="md-source__icon">
<svg viewBox="0 0 24 24" width="24" height="24">
<use xlink:href="#github" width="24" height="24"></use>
</svg>
</div>
<div class="md-source__repository">
GitHub
</div>
</a>
</div>
</div>
</div>
</nav>
</header>
<div class="md-container">
<main class="md-main">
<div class="md-main__inner md-grid" data-md-component="container">
<div class="md-sidebar md-sidebar--primary" data-md-component="navigation">
<div class="md-sidebar__scrollwrap">
<div class="md-sidebar__inner">
<nav class="md-nav md-nav--primary" data-md-level="0">
<label class="md-nav__title md-nav__title--site" for="drawer">
<span class="md-nav__button md-logo">
<img src="https://s3.amazonaws.com/files.lbry.io/logo-square-white-bookonly.png" alt="LBRY logo" width="48" height="48">
</span>
LBRY
</label>
<div class="md-nav__source">
<a href="https://github.com/lbryio/lbry/" title="Go to repository" class="md-source" data-md-source="github">
<div class="md-source__icon">
<svg viewBox="0 0 24 24" width="24" height="24">
<use xlink:href="#github" width="24" height="24"></use>
</svg>
</div>
<div class="md-source__repository">
GitHub
</div>
</a>
</div>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="/" title="API" class="md-nav__link">
API
</a>
</li>
<li class="md-nav__item">
<a href="/cli/" title="CLI" class="md-nav__link">
CLI
</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<div class="md-content">
<article class="md-content__inner md-typeset">
<h1>404 - Not found</h1>
</article>
</div>
</div>
</main>
<footer class="md-footer">
<div class="md-footer-meta md-typeset">
<div class="md-footer-meta__inner md-grid">
<div class="md-footer-copyright">
powered by
<a href="http://www.mkdocs.org">MkDocs</a>
and
<a href="https://squidfunk.github.io/mkdocs-material/">
Material for MkDocs</a>
</div>
</div>
</div>
</footer>
</div>
<script src="/assets/javascripts/application.8eb9be28.js"></script>
<script>app.initialize({version:"0.17.3",url:{base:""}})</script>
<script>!function(e,a,t,n,o,c,i){e.GoogleAnalyticsObject=o,e.ga=e.ga||function(){(e.ga.q=e.ga.q||[]).push(arguments)},e.ga.l=1*new Date,c=a.createElement(t),i=a.getElementsByTagName(t)[0],c.async=1,c.src="https://www.google-analytics.com/analytics.js",i.parentNode.insertBefore(c,i)}(window,document,"script",0,"ga"),ga("create","UA-60403362-1","auto"),ga("set","anonymizeIp",!0),ga("send","pageview");var links=document.getElementsByTagName("a");if(Array.prototype.map.call(links,function(e){e.host!=document.location.host&&e.addEventListener("click",function(){var a=e.getAttribute("data-md-action")||"follow";ga("send","event","outbound",a,e.href)})}),document.forms.search){var query=document.forms.search.query;query.addEventListener("blur",function(){if(this.value){var e=document.location.pathname;ga("send","pageview",e+"?q="+this.value)}})}</script>
</body>
</html>

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 521 B

View file

@ -0,0 +1,20 @@
<svg xmlns="http://www.w3.org/2000/svg" width="352" height="448"
viewBox="0 0 352 448" id="bitbucket">
<path fill="currentColor" d="M203.75 214.75q2 15.75-12.625 25.25t-27.875
1.5q-9.75-4.25-13.375-14.5t-0.125-20.5 13-14.5q9-4.5 18.125-3t16 8.875
6.875 16.875zM231.5 209.5q-3.5-26.75-28.25-41t-49.25-3.25q-15.75
7-25.125 22.125t-8.625 32.375q1 22.75 19.375 38.75t41.375 14q22.75-2
38-21t12.5-42zM291.25
74q-5-6.75-14-11.125t-14.5-5.5-17.75-3.125q-72.75-11.75-141.5 0.5-10.75
1.75-16.5 3t-13.75 5.5-12.5 10.75q7.5 7 19 11.375t18.375 5.5 21.875
2.875q57 7.25 112 0.25 15.75-2 22.375-3t18.125-5.375 18.75-11.625zM305.5
332.75q-2 6.5-3.875 19.125t-3.5 21-7.125 17.5-14.5 14.125q-21.5
12-47.375 17.875t-50.5 5.5-50.375-4.625q-11.5-2-20.375-4.5t-19.125-6.75-18.25-10.875-13-15.375q-6.25-24-14.25-73l1.5-4
4.5-2.25q55.75 37 126.625 37t126.875-37q5.25 1.5 6 5.75t-1.25 11.25-2
9.25zM350.75 92.5q-6.5 41.75-27.75 163.75-1.25 7.5-6.75 14t-10.875
10-13.625 7.75q-63 31.5-152.5
22-62-6.75-98.5-34.75-3.75-3-6.375-6.625t-4.25-8.75-2.25-8.5-1.5-9.875-1.375-8.75q-2.25-12.5-6.625-37.5t-7-40.375-5.875-36.875-5.5-39.5q0.75-6.5
4.375-12.125t7.875-9.375 11.25-7.5 11.5-5.625 12-4.625q31.25-11.5
78.25-16 94.75-9.25 169 12.5 38.75 11.5 53.75 30.5 4 5 4.125
12.75t-1.375 13.5z" />
</svg>

After

Width:  |  Height:  |  Size: 1.4 KiB

View file

@ -0,0 +1,18 @@
<svg xmlns="http://www.w3.org/2000/svg" width="416" height="448"
viewBox="0 0 416 448" id="github">
<path fill="currentColor" d="M160 304q0 10-3.125 20.5t-10.75 19-18.125
8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19 18.125-8.5
18.125 8.5 10.75 19 3.125 20.5zM320 304q0 10-3.125 20.5t-10.75
19-18.125 8.5-18.125-8.5-10.75-19-3.125-20.5 3.125-20.5 10.75-19
18.125-8.5 18.125 8.5 10.75 19 3.125 20.5zM360
304q0-30-17.25-51t-46.75-21q-10.25 0-48.75 5.25-17.75 2.75-39.25
2.75t-39.25-2.75q-38-5.25-48.75-5.25-29.5 0-46.75 21t-17.25 51q0 22 8
38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0
37.25-1.75t35-7.375 30.5-15 20.25-25.75 8-38.375zM416 260q0 51.75-15.25
82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5-41.75
1.125q-19.5 0-35.5-0.75t-36.875-3.125-38.125-7.5-34.25-12.875-30.25-20.25-21.5-28.75q-15.5-30.75-15.5-82.75
0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25
30.875q36.75-8.75 77.25-8.75 37 0 70 8 26.25-20.5
46.75-30.25t47.25-9.75q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34
99.5z" />
</svg>

After

Width:  |  Height:  |  Size: 1.2 KiB

View file

@ -0,0 +1,38 @@
<svg xmlns="http://www.w3.org/2000/svg" width="500" height="500"
viewBox="0 0 500 500" id="gitlab">
<g transform="translate(156.197863, 1.160267)">
<path fill="currentColor"
d="M93.667,473.347L93.667,473.347l90.684-279.097H2.983L93.667,
473.347L93.667,473.347z" />
</g>
<g transform="translate(28.531199, 1.160800)" opacity="0.7">
<path fill="currentColor"
d="M221.333,473.345L130.649,194.25H3.557L221.333,473.345L221.333,
473.345z" />
</g>
<g transform="translate(0.088533, 0.255867)" opacity="0.5">
<path fill="currentColor"
d="M32,195.155L32,195.155L4.441,279.97c-2.513,7.735,0.24,16.21,6.821,
20.99l238.514,173.29 L32,195.155L32,195.155z" />
</g>
<g transform="translate(29.421866, 280.255593)">
<path fill="currentColor"
d="M2.667-84.844h127.092L75.14-252.942c-2.811-8.649-15.047-8.649-17.856,
0L2.667-84.844 L2.667-84.844z" />
</g>
<g transform="translate(247.197860, 1.160800)" opacity="0.7">
<path fill="currentColor"
d="M2.667,473.345L93.351,194.25h127.092L2.667,473.345L2.667,
473.345z" />
</g>
<g transform="translate(246.307061, 0.255867)" opacity="0.5">
<path fill="currentColor"
d="M221.334,195.155L221.334,195.155l27.559,84.815c2.514,7.735-0.24,
16.21-6.821,20.99 L3.557,474.25L221.334,195.155L221.334,195.155z" />
</g>
<g transform="translate(336.973725, 280.255593)">
<path fill="currentColor"
d="M130.667-84.844H3.575l54.618-168.098c2.811-8.649,15.047-8.649,
17.856,0L130.667-84.844 L130.667-84.844z" />
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.6 KiB

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1 @@
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,i,n;e.da=function(){this.pipeline.reset(),this.pipeline.add(e.da.trimmer,e.da.stopWordFilter,e.da.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.da.stemmer))},e.da.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA--",e.da.trimmer=e.trimmerSupport.generateTrimmer(e.da.wordCharacters),e.Pipeline.registerFunction(e.da.trimmer,"trimmer-da"),e.da.stemmer=(r=e.stemmerSupport.Among,i=e.stemmerSupport.SnowballProgram,n=new function(){var e,n,t,s=[new r("hed",-1,1),new r("ethed",0,1),new r("ered",-1,1),new r("e",-1,1),new r("erede",3,1),new r("ende",3,1),new r("erende",5,1),new r("ene",3,1),new r("erne",3,1),new r("ere",3,1),new r("en",-1,1),new r("heden",10,1),new r("eren",10,1),new r("er",-1,1),new r("heder",13,1),new r("erer",13,1),new r("s",-1,2),new r("heds",16,1),new r("es",16,1),new r("endes",18,1),new r("erendes",19,1),new r("enes",18,1),new r("ernes",18,1),new r("eres",18,1),new r("ens",16,1),new r("hedens",24,1),new r("erens",24,1),new r("ers",16,1),new r("ets",16,1),new r("erets",28,1),new r("et",-1,1),new r("eret",30,1)],o=[new r("gd",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1)],a=[new r("ig",-1,1),new r("lig",0,1),new r("elig",1,1),new r("els",-1,1),new r("løst",-1,2)],d=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],u=[239,254,42,3,0,0,0,0,0,0,0,0,0,0,0,0,16],c=new i;function l(){var e,r=c.limit-c.cursor;c.cursor>=n&&(e=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,c.find_among_b(o,4)?(c.bra=c.cursor,c.limit_backward=e,c.cursor=c.limit-r,c.cursor>c.limit_backward&&(c.cursor--,c.bra=c.cursor,c.slice_del())):c.limit_backward=e)}this.setCurrent=function(e){c.setCurrent(e)},this.getCurrent=function(){return c.getCurrent()},this.stem=function(){var r,i=c.cursor;return function(){var r,i=c.cursor+3;if(n=c.limit,0<=i&&i<=c.limit){for(e=i;;){if(r=c.cursor,c.in_grouping(d,97,248)){c.cursor=r;break}if(c.cursor=r,r>=c.limit)return;c.cursor++}for(;!c.out_grouping(d,97,248);){if(c.cursor>=c.limit)return;c.cursor++}(n=c.cursor)<e&&(n=e)}}(),c.limit_backward=i,c.cursor=c.limit,function(){var e,r;if(c.cursor>=n&&(r=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,e=c.find_among_b(s,32),c.limit_backward=r,e))switch(c.bra=c.cursor,e){case 1:c.slice_del();break;case 2:c.in_grouping_b(u,97,229)&&c.slice_del()}}(),c.cursor=c.limit,l(),c.cursor=c.limit,function(){var e,r,i,t=c.limit-c.cursor;if(c.ket=c.cursor,c.eq_s_b(2,"st")&&(c.bra=c.cursor,c.eq_s_b(2,"ig")&&c.slice_del()),c.cursor=c.limit-t,c.cursor>=n&&(r=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,e=c.find_among_b(a,5),c.limit_backward=r,e))switch(c.bra=c.cursor,e){case 1:c.slice_del(),i=c.limit-c.cursor,l(),c.cursor=c.limit-i;break;case 2:c.slice_from("løs")}}(),c.cursor=c.limit,c.cursor>=n&&(r=c.limit_backward,c.limit_backward=n,c.ket=c.cursor,c.out_grouping_b(d,97,248)?(c.bra=c.cursor,t=c.slice_to(t),c.limit_backward=r,c.eq_v_b(t)&&c.slice_del()):c.limit_backward=r),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}),e.Pipeline.registerFunction(e.da.stemmer,"stemmer-da"),e.da.stopWordFilter=e.generateStopWordFilter("ad af alle alt anden at blev blive bliver da de dem den denne der deres det dette dig din disse dog du efter eller en end er et for fra ham han hans har havde have hende hendes her hos hun hvad hvis hvor i ikke ind jeg jer jo kunne man mange med meget men mig min mine mit mod ned noget nogle nu når og også om op os over på selv sig sin sine sit skal skulle som sådan thi til ud under var vi vil ville vor være været".split(" ")),e.Pipeline.registerFunction(e.da.stopWordFilter,"stopWordFilter-da")}});

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1 @@
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r="2"==e.version[0];e.jp=function(){this.pipeline.reset(),this.pipeline.add(e.jp.stopWordFilter,e.jp.stemmer),r?this.tokenizer=e.jp.tokenizer:(e.tokenizer&&(e.tokenizer=e.jp.tokenizer),this.tokenizerFn&&(this.tokenizerFn=e.jp.tokenizer))};var t=new e.TinySegmenter;e.jp.tokenizer=function(n){if(!arguments.length||null==n||null==n)return[];if(Array.isArray(n))return n.map(function(t){return r?new e.Token(t.toLowerCase()):t.toLowerCase()});for(var i=n.toString().toLowerCase().replace(/^\s+/,""),o=i.length-1;o>=0;o--)if(/\S/.test(i.charAt(o))){i=i.substring(0,o+1);break}return t.segment(i).filter(function(e){return!!e}).map(function(t){return r?new e.Token(t):t})},e.jp.stemmer=function(e){return e},e.Pipeline.registerFunction(e.jp.stemmer,"stemmer-jp"),e.jp.wordCharacters="一二三四五六七八九十百千万億兆一-龠々〆ヵヶぁ-んァ-ヴーア-ン゙a-zA-Z--0-9-",e.jp.stopWordFilter=function(t){if(-1===e.jp.stopWordFilter.stopWords.indexOf(r?t.toString():t))return t},e.jp.stopWordFilter=e.generateStopWordFilter("これ それ あれ この その あの ここ そこ あそこ こちら どこ だれ なに なん 何 私 貴方 貴方方 我々 私達 あの人 あのかた 彼女 彼 です あります おります います は が の に を で え から まで より も どの と し それで しかし".split(" ")),e.Pipeline.registerFunction(e.jp.stopWordFilter,"stopWordFilter-jp")}});

View file

@ -0,0 +1 @@
!function(e,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(e.lunr)}(this,function(){return function(e){e.multiLanguage=function(){for(var i=Array.prototype.slice.call(arguments),t=i.join("-"),r="",n=[],s=[],p=0;p<i.length;++p)"en"==i[p]?(r+="\\w",n.unshift(e.stopWordFilter),n.push(e.stemmer),s.push(e.stemmer)):(r+=e[i[p]].wordCharacters,n.unshift(e[i[p]].stopWordFilter),n.push(e[i[p]].stemmer),s.push(e[i[p]].stemmer));var o=e.trimmerSupport.generateTrimmer(r);return e.Pipeline.registerFunction(o,"lunr-multi-trimmer-"+t),n.unshift(o),function(){this.pipeline.reset(),this.pipeline.add.apply(this.pipeline,n),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add.apply(this.searchPipeline,s))}}}});

View file

@ -0,0 +1 @@
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,n,i;e.no=function(){this.pipeline.reset(),this.pipeline.add(e.no.trimmer,e.no.stopWordFilter,e.no.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.no.stemmer))},e.no.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA--",e.no.trimmer=e.trimmerSupport.generateTrimmer(e.no.wordCharacters),e.Pipeline.registerFunction(e.no.trimmer,"trimmer-no"),e.no.stemmer=(r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){var e,i,t=[new r("a",-1,1),new r("e",-1,1),new r("ede",1,1),new r("ande",1,1),new r("ende",1,1),new r("ane",1,1),new r("ene",1,1),new r("hetene",6,1),new r("erte",1,3),new r("en",-1,1),new r("heten",9,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",12,1),new r("s",-1,2),new r("as",14,1),new r("es",14,1),new r("edes",16,1),new r("endes",16,1),new r("enes",16,1),new r("hetenes",19,1),new r("ens",14,1),new r("hetens",21,1),new r("ers",14,1),new r("ets",14,1),new r("et",-1,1),new r("het",25,1),new r("ert",-1,3),new r("ast",-1,1)],o=[new r("dt",-1,-1),new r("vt",-1,-1)],s=[new r("leg",-1,1),new r("eleg",0,1),new r("ig",-1,1),new r("eig",2,1),new r("lig",2,1),new r("elig",4,1),new r("els",-1,1),new r("lov",-1,1),new r("elov",7,1),new r("slov",7,1),new r("hetslov",9,1)],a=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],m=[119,125,149,1],l=new n;this.setCurrent=function(e){l.setCurrent(e)},this.getCurrent=function(){return l.getCurrent()},this.stem=function(){var r,n,u,d,c=l.cursor;return function(){var r,n=l.cursor+3;if(i=l.limit,0<=n||n<=l.limit){for(e=n;;){if(r=l.cursor,l.in_grouping(a,97,248)){l.cursor=r;break}if(r>=l.limit)return;l.cursor=r+1}for(;!l.out_grouping(a,97,248);){if(l.cursor>=l.limit)return;l.cursor++}(i=l.cursor)<e&&(i=e)}}(),l.limit_backward=c,l.cursor=l.limit,function(){var e,r,n;if(l.cursor>=i&&(r=l.limit_backward,l.limit_backward=i,l.ket=l.cursor,e=l.find_among_b(t,29),l.limit_backward=r,e))switch(l.bra=l.cursor,e){case 1:l.slice_del();break;case 2:n=l.limit-l.cursor,l.in_grouping_b(m,98,122)?l.slice_del():(l.cursor=l.limit-n,l.eq_s_b(1,"k")&&l.out_grouping_b(a,97,248)&&l.slice_del());break;case 3:l.slice_from("er")}}(),l.cursor=l.limit,n=l.limit-l.cursor,l.cursor>=i&&(r=l.limit_backward,l.limit_backward=i,l.ket=l.cursor,l.find_among_b(o,2)?(l.bra=l.cursor,l.limit_backward=r,l.cursor=l.limit-n,l.cursor>l.limit_backward&&(l.cursor--,l.bra=l.cursor,l.slice_del())):l.limit_backward=r),l.cursor=l.limit,l.cursor>=i&&(d=l.limit_backward,l.limit_backward=i,l.ket=l.cursor,(u=l.find_among_b(s,11))?(l.bra=l.cursor,l.limit_backward=d,1==u&&l.slice_del()):l.limit_backward=d),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}),e.Pipeline.registerFunction(e.no.stemmer,"stemmer-no"),e.no.stopWordFilter=e.generateStopWordFilter("alle at av bare begge ble blei bli blir blitt både båe da de deg dei deim deira deires dem den denne der dere deres det dette di din disse ditt du dykk dykkar då eg ein eit eitt eller elles en enn er et ett etter for fordi fra før ha hadde han hans har hennar henne hennes her hjå ho hoe honom hoss hossen hun hva hvem hver hvilke hvilken hvis hvor hvordan hvorfor i ikke ikkje ikkje ingen ingi inkje inn inni ja jeg kan kom korleis korso kun kunne kva kvar kvarhelst kven kvi kvifor man mange me med medan meg meget mellom men mi min mine mitt mot mykje ned no noe noen noka noko nokon nokor nokre nå når og også om opp oss over på samme seg selv si si sia sidan siden sin sine sitt sjøl skal skulle slik so som som somme somt så sånn til um upp ut uten var vart varte ved vere verte vi vil ville vore vors vort vår være være vært å".split(" ")),e.Pipeline.registerFunction(e.no.stopWordFilter,"stopWordFilter-no")}});

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1 @@
!function(r,t){"function"==typeof define&&define.amd?define(t):"object"==typeof exports?module.exports=t():t()(r.lunr)}(this,function(){return function(r){r.stemmerSupport={Among:function(r,t,i,s){if(this.toCharArray=function(r){for(var t=r.length,i=new Array(t),s=0;s<t;s++)i[s]=r.charCodeAt(s);return i},!r&&""!=r||!t&&0!=t||!i)throw"Bad Among initialisation: s:"+r+", substring_i: "+t+", result: "+i;this.s_size=r.length,this.s=this.toCharArray(r),this.substring_i=t,this.result=i,this.method=s},SnowballProgram:function(){var r;return{bra:0,ket:0,limit:0,cursor:0,limit_backward:0,setCurrent:function(t){r=t,this.cursor=0,this.limit=t.length,this.limit_backward=0,this.bra=this.cursor,this.ket=this.limit},getCurrent:function(){var t=r;return r=null,t},in_grouping:function(t,i,s){if(this.cursor<this.limit){var e=r.charCodeAt(this.cursor);if(e<=s&&e>=i&&t[(e-=i)>>3]&1<<(7&e))return this.cursor++,!0}return!1},in_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e<=s&&e>=i&&t[(e-=i)>>3]&1<<(7&e))return this.cursor--,!0}return!1},out_grouping:function(t,i,s){if(this.cursor<this.limit){var e=r.charCodeAt(this.cursor);if(e>s||e<i)return this.cursor++,!0;if(!(t[(e-=i)>>3]&1<<(7&e)))return this.cursor++,!0}return!1},out_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e>s||e<i)return this.cursor--,!0;if(!(t[(e-=i)>>3]&1<<(7&e)))return this.cursor--,!0}return!1},eq_s:function(t,i){if(this.limit-this.cursor<t)return!1;for(var s=0;s<t;s++)if(r.charCodeAt(this.cursor+s)!=i.charCodeAt(s))return!1;return this.cursor+=t,!0},eq_s_b:function(t,i){if(this.cursor-this.limit_backward<t)return!1;for(var s=0;s<t;s++)if(r.charCodeAt(this.cursor-t+s)!=i.charCodeAt(s))return!1;return this.cursor-=t,!0},find_among:function(t,i){for(var s=0,e=i,n=this.cursor,u=this.limit,o=0,h=0,c=!1;;){for(var a=s+(e-s>>1),f=0,l=o<h?o:h,_=t[a],m=l;m<_.s_size;m++){if(n+l==u){f=-1;break}if(f=r.charCodeAt(n+l)-_.s[m])break;l++}if(f<0?(e=a,h=l):(s=a,o=l),e-s<=1){if(s>0||e==s||c)break;c=!0}}for(;;){if(o>=(_=t[s]).s_size){if(this.cursor=n+_.s_size,!_.method)return _.result;var b=_.method();if(this.cursor=n+_.s_size,b)return _.result}if((s=_.substring_i)<0)return 0}},find_among_b:function(t,i){for(var s=0,e=i,n=this.cursor,u=this.limit_backward,o=0,h=0,c=!1;;){for(var a=s+(e-s>>1),f=0,l=o<h?o:h,_=(m=t[a]).s_size-1-l;_>=0;_--){if(n-l==u){f=-1;break}if(f=r.charCodeAt(n-1-l)-m.s[_])break;l++}if(f<0?(e=a,h=l):(s=a,o=l),e-s<=1){if(s>0||e==s||c)break;c=!0}}for(;;){var m;if(o>=(m=t[s]).s_size){if(this.cursor=n-m.s_size,!m.method)return m.result;var b=m.method();if(this.cursor=n-m.s_size,b)return m.result}if((s=m.substring_i)<0)return 0}},replace_s:function(t,i,s){var e=s.length-(i-t),n=r.substring(0,t),u=r.substring(i);return r=n+s+u,this.limit+=e,this.cursor>=i?this.cursor+=e:this.cursor>t&&(this.cursor=t),e},slice_check:function(){if(this.bra<0||this.bra>this.ket||this.ket>this.limit||this.limit>r.length)throw"faulty slice operation"},slice_from:function(r){this.slice_check(),this.replace_s(this.bra,this.ket,r)},slice_del:function(){this.slice_from("")},insert:function(r,t,i){var s=this.replace_s(r,t,i);r<=this.bra&&(this.bra+=s),r<=this.ket&&(this.ket+=s)},slice_to:function(){return this.slice_check(),r.substring(this.bra,this.ket)},eq_v_b:function(r){return this.eq_s_b(r.length,r)}}}},r.trimmerSupport={generateTrimmer:function(r){var t=new RegExp("^[^"+r+"]+"),i=new RegExp("[^"+r+"]+$");return function(r){return"function"==typeof r.update?r.update(function(r){return r.replace(t,"").replace(i,"")}):r.replace(t,"").replace(i,"")}}}}});

View file

@ -0,0 +1 @@
!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r,n,t;e.sv=function(){this.pipeline.reset(),this.pipeline.add(e.sv.trimmer,e.sv.stopWordFilter,e.sv.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.sv.stemmer))},e.sv.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA--",e.sv.trimmer=e.trimmerSupport.generateTrimmer(e.sv.wordCharacters),e.Pipeline.registerFunction(e.sv.trimmer,"trimmer-sv"),e.sv.stemmer=(r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,t=new function(){var e,t,i=[new r("a",-1,1),new r("arna",0,1),new r("erna",0,1),new r("heterna",2,1),new r("orna",0,1),new r("ad",-1,1),new r("e",-1,1),new r("ade",6,1),new r("ande",6,1),new r("arne",6,1),new r("are",6,1),new r("aste",6,1),new r("en",-1,1),new r("anden",12,1),new r("aren",12,1),new r("heten",12,1),new r("ern",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",18,1),new r("or",-1,1),new r("s",-1,2),new r("as",21,1),new r("arnas",22,1),new r("ernas",22,1),new r("ornas",22,1),new r("es",21,1),new r("ades",26,1),new r("andes",26,1),new r("ens",21,1),new r("arens",29,1),new r("hetens",29,1),new r("erns",21,1),new r("at",-1,1),new r("andet",-1,1),new r("het",-1,1),new r("ast",-1,1)],s=[new r("dd",-1,-1),new r("gd",-1,-1),new r("nn",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1),new r("tt",-1,-1)],a=[new r("ig",-1,1),new r("lig",0,1),new r("els",-1,1),new r("fullt",-1,3),new r("löst",-1,2)],o=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,24,0,32],u=[119,127,149],m=new n;this.setCurrent=function(e){m.setCurrent(e)},this.getCurrent=function(){return m.getCurrent()},this.stem=function(){var r,n=m.cursor;return function(){var r,n=m.cursor+3;if(t=m.limit,0<=n||n<=m.limit){for(e=n;;){if(r=m.cursor,m.in_grouping(o,97,246)){m.cursor=r;break}if(m.cursor=r,m.cursor>=m.limit)return;m.cursor++}for(;!m.out_grouping(o,97,246);){if(m.cursor>=m.limit)return;m.cursor++}(t=m.cursor)<e&&(t=e)}}(),m.limit_backward=n,m.cursor=m.limit,function(){var e,r=m.limit_backward;if(m.cursor>=t&&(m.limit_backward=t,m.cursor=m.limit,m.ket=m.cursor,e=m.find_among_b(i,37),m.limit_backward=r,e))switch(m.bra=m.cursor,e){case 1:m.slice_del();break;case 2:m.in_grouping_b(u,98,121)&&m.slice_del()}}(),m.cursor=m.limit,r=m.limit_backward,m.cursor>=t&&(m.limit_backward=t,m.cursor=m.limit,m.find_among_b(s,7)&&(m.cursor=m.limit,m.ket=m.cursor,m.cursor>m.limit_backward&&(m.bra=--m.cursor,m.slice_del())),m.limit_backward=r),m.cursor=m.limit,function(){var e,r;if(m.cursor>=t){if(r=m.limit_backward,m.limit_backward=t,m.cursor=m.limit,m.ket=m.cursor,e=m.find_among_b(a,5))switch(m.bra=m.cursor,e){case 1:m.slice_del();break;case 2:m.slice_from("lös");break;case 3:m.slice_from("full")}m.limit_backward=r}}(),!0}},function(e){return"function"==typeof e.update?e.update(function(e){return t.setCurrent(e),t.stem(),t.getCurrent()}):(t.setCurrent(e),t.stem(),t.getCurrent())}),e.Pipeline.registerFunction(e.sv.stemmer,"stemmer-sv"),e.sv.stopWordFilter=e.generateStopWordFilter("alla allt att av blev bli blir blivit de dem den denna deras dess dessa det detta dig din dina ditt du där då efter ej eller en er era ert ett från för ha hade han hans har henne hennes hon honom hur här i icke ingen inom inte jag ju kan kunde man med mellan men mig min mina mitt mot mycket ni nu när någon något några och om oss på samma sedan sig sin sina sitta själv skulle som så sådan sådana sådant till under upp ut utan vad var vara varför varit varje vars vart vem vi vid vilka vilkas vilken vilket vår våra vårt än är åt över".split(" ")),e.Pipeline.registerFunction(e.sv.stopWordFilter,"stopWordFilter-sv")}});

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

2589
docs/cli/index.html Normal file

File diff suppressed because it is too large Load diff

2315
docs/index.html Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

View file

@ -4,22 +4,20 @@
share_usage_data: True
lbryum_servers:
- lbryumx1.lbry.com:50001
- lbryumx2.lbry.com:50001
- lbryumx4.lbry.com:50001
- lbryumx1.lbry.io:50001
- lbryumx2.lbry.io:50001
blockchain_name: lbrycrd_main
data_dir: /home/lbry/.lbrynet
download_directory: /home/lbry/downloads
save_blobs: true
save_files: false
delete_blobs_on_remove: True
dht_node_port: 4444
peer_port: 3333
use_upnp: true
use_upnp: True
#components_to_skip:
# - peer_protocol_server
# - hash_announcer
# - blob_server
# - dht

View file

@ -1,2 +0,0 @@
__version__ = "0.113.0"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name

View file

@ -1,6 +0,0 @@
from lbry.utils import get_lbry_hash_obj
MAX_BLOB_SIZE = 2 * 2 ** 20
# digest_size is in bytes, and blob hashes are hex encoded
BLOBHASH_LENGTH = get_lbry_hash_obj().digest_size * 2

View file

@ -1,366 +0,0 @@
import os
import re
import time
import asyncio
import binascii
import logging
import typing
import contextlib
from io import BytesIO
from cryptography.hazmat.primitives.ciphers import Cipher, modes
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.padding import PKCS7
from cryptography.hazmat.backends import default_backend
from lbry.utils import get_lbry_hash_obj
from lbry.error import DownloadCancelledError, InvalidBlobHashError, InvalidDataError
from lbry.blob import MAX_BLOB_SIZE, BLOBHASH_LENGTH
from lbry.blob.blob_info import BlobInfo
from lbry.blob.writer import HashBlobWriter
log = logging.getLogger(__name__)
HEXMATCH = re.compile("^[a-f,0-9]+$")
BACKEND = default_backend()
def is_valid_blobhash(blobhash: str) -> bool:
"""Checks whether the blobhash is the correct length and contains only
valid characters (0-9, a-f)
@param blobhash: string, the blobhash to check
@return: True/False
"""
return len(blobhash) == BLOBHASH_LENGTH and HEXMATCH.match(blobhash)
def encrypt_blob_bytes(key: bytes, iv: bytes, unencrypted: bytes) -> typing.Tuple[bytes, str]:
cipher = Cipher(AES(key), modes.CBC(iv), backend=BACKEND)
padder = PKCS7(AES.block_size).padder()
encryptor = cipher.encryptor()
encrypted = encryptor.update(padder.update(unencrypted) + padder.finalize()) + encryptor.finalize()
digest = get_lbry_hash_obj()
digest.update(encrypted)
return encrypted, digest.hexdigest()
def decrypt_blob_bytes(data: bytes, length: int, key: bytes, iv: bytes) -> bytes:
if len(data) != length:
raise ValueError("unexpected length")
cipher = Cipher(AES(key), modes.CBC(iv), backend=BACKEND)
unpadder = PKCS7(AES.block_size).unpadder()
decryptor = cipher.decryptor()
return unpadder.update(decryptor.update(data) + decryptor.finalize()) + unpadder.finalize()
class AbstractBlob:
"""
A chunk of data (up to 2MB) available on the network which is specified by a sha384 hash
This class is non-io specific
"""
__slots__ = [
'loop',
'blob_hash',
'length',
'blob_completed_callback',
'blob_directory',
'writers',
'verified',
'writing',
'readers',
'added_on',
'is_mine',
]
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False,
):
self.loop = loop
self.blob_hash = blob_hash
self.length = length
self.blob_completed_callback = blob_completed_callback
self.blob_directory = blob_directory
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
self.verified: asyncio.Event = asyncio.Event()
self.writing: asyncio.Event = asyncio.Event()
self.readers: typing.List[typing.BinaryIO] = []
self.added_on = added_on or time.time()
self.is_mine = is_mine
if not is_valid_blobhash(blob_hash):
raise InvalidBlobHashError(blob_hash)
def __del__(self):
if self.writers or self.readers:
log.warning("%s not closed before being garbage collected", self.blob_hash)
self.close()
@contextlib.contextmanager
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
raise NotImplementedError()
@contextlib.contextmanager
def reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
if not self.is_readable():
raise OSError(f"{str(type(self))} not readable, {len(self.readers)} readers {len(self.writers)} writers")
with self._reader_context() as reader:
try:
self.readers.append(reader)
yield reader
finally:
if reader in self.readers:
self.readers.remove(reader)
def _write_blob(self, blob_bytes: bytes) -> asyncio.Task:
raise NotImplementedError()
def set_length(self, length) -> None:
if self.length is not None and length == self.length:
return
if self.length is None and 0 <= length <= MAX_BLOB_SIZE:
self.length = length
return
log.warning("Got an invalid length. Previous length: %s, Invalid length: %s", self.length, length)
def get_length(self) -> typing.Optional[int]:
return self.length
def get_is_verified(self) -> bool:
return self.verified.is_set()
def is_readable(self) -> bool:
return self.verified.is_set()
def is_writeable(self) -> bool:
return not self.writing.is_set()
def write_blob(self, blob_bytes: bytes):
if not self.is_writeable():
raise OSError("cannot open blob for writing")
try:
self.writing.set()
self._write_blob(blob_bytes)
finally:
self.writing.clear()
def close(self):
while self.writers:
_, writer = self.writers.popitem()
if writer and writer.finished and not writer.finished.done() and not self.loop.is_closed():
writer.finished.cancel()
while self.readers:
reader = self.readers.pop()
if reader:
reader.close()
def delete(self):
self.close()
self.verified.clear()
self.length = None
async def sendfile(self, writer: asyncio.StreamWriter) -> int:
"""
Read and send the file to the writer and return the number of bytes sent
"""
if not self.is_readable():
raise OSError('blob files cannot be read')
with self.reader_context() as handle:
try:
return await self.loop.sendfile(writer.transport, handle, count=self.get_length())
except (ConnectionError, BrokenPipeError, RuntimeError, OSError, AttributeError):
return -1
def decrypt(self, key: bytes, iv: bytes) -> bytes:
"""
Decrypt a BlobFile to plaintext bytes
"""
with self.reader_context() as reader:
return decrypt_blob_bytes(reader.read(), self.length, key, iv)
@classmethod
async def create_from_unencrypted(
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
) -> BlobInfo:
"""
Create an encrypted BlobFile from plaintext bytes
"""
blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted)
length = len(blob_bytes)
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir, added_on, is_mine)
writer = blob.get_blob_writer()
writer.write(blob_bytes)
await blob.verified.wait()
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), added_on, blob_hash, is_mine)
def save_verified_blob(self, verified_bytes: bytes):
if self.verified.is_set():
return
def update_events(_):
self.verified.set()
self.writing.clear()
if self.is_writeable():
self.writing.set()
task = self._write_blob(verified_bytes)
task.add_done_callback(update_events)
if self.blob_completed_callback:
task.add_done_callback(lambda _: self.blob_completed_callback(self))
def get_blob_writer(self, peer_address: typing.Optional[str] = None,
peer_port: typing.Optional[int] = None) -> HashBlobWriter:
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
fut = asyncio.Future()
writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
self.writers[(peer_address, peer_port)] = writer
def remove_writer(_):
if (peer_address, peer_port) in self.writers:
del self.writers[(peer_address, peer_port)]
fut.add_done_callback(remove_writer)
def writer_finished_callback(finished: asyncio.Future):
try:
err = finished.exception()
if err:
raise err
verified_bytes = finished.result()
while self.writers:
_, other = self.writers.popitem()
if other is not writer:
other.close_handle()
self.save_verified_blob(verified_bytes)
except (InvalidBlobHashError, InvalidDataError) as error:
log.warning("writer error downloading %s: %s", self.blob_hash[:8], str(error))
except (DownloadCancelledError, asyncio.CancelledError, asyncio.TimeoutError):
pass
fut.add_done_callback(writer_finished_callback)
return writer
class BlobBuffer(AbstractBlob):
"""
An in-memory only blob
"""
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
):
self._verified_bytes: typing.Optional[BytesIO] = None
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
@contextlib.contextmanager
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
if not self.is_readable():
raise OSError("cannot open blob for reading")
try:
yield self._verified_bytes
finally:
if self._verified_bytes:
self._verified_bytes.close()
self._verified_bytes = None
self.verified.clear()
def _write_blob(self, blob_bytes: bytes):
async def write():
if self._verified_bytes:
raise OSError("already have bytes for blob")
self._verified_bytes = BytesIO(blob_bytes)
return self.loop.create_task(write())
def delete(self):
if self._verified_bytes:
self._verified_bytes.close()
self._verified_bytes = None
return super().delete()
def __del__(self):
super().__del__()
if self._verified_bytes:
self.delete()
class BlobFile(AbstractBlob):
"""
A blob existing on the local file system
"""
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
):
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
if not blob_directory or not os.path.isdir(blob_directory):
raise OSError(f"invalid blob directory '{blob_directory}'")
self.file_path = os.path.join(self.blob_directory, self.blob_hash)
if self.file_exists:
file_size = int(os.stat(self.file_path).st_size)
if length and length != file_size:
log.warning("expected %s to be %s bytes, file has %s", self.blob_hash, length, file_size)
self.delete()
else:
self.length = file_size
self.verified.set()
@property
def file_exists(self):
return os.path.isfile(self.file_path)
def is_writeable(self) -> bool:
return super().is_writeable() and not os.path.isfile(self.file_path)
def get_blob_writer(self, peer_address: typing.Optional[str] = None,
peer_port: typing.Optional[str] = None) -> HashBlobWriter:
if self.file_exists:
raise OSError(f"File already exists '{self.file_path}'")
return super().get_blob_writer(peer_address, peer_port)
@contextlib.contextmanager
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
handle = open(self.file_path, 'rb')
try:
yield handle
finally:
handle.close()
def _write_blob(self, blob_bytes: bytes):
def _write_blob():
with open(self.file_path, 'wb') as f:
f.write(blob_bytes)
async def write_blob():
await self.loop.run_in_executor(None, _write_blob)
return self.loop.create_task(write_blob())
def delete(self):
super().delete()
if os.path.isfile(self.file_path):
os.remove(self.file_path)
@classmethod
async def create_from_unencrypted(
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int, added_on: float, is_mine: bool,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None
) -> BlobInfo:
if not blob_dir or not os.path.isdir(blob_dir):
raise OSError(f"cannot create blob in directory: '{blob_dir}'")
return await super().create_from_unencrypted(
loop, blob_dir, key, iv, unencrypted, blob_num, added_on, is_mine, blob_completed_callback
)

View file

@ -1,148 +0,0 @@
import os
import typing
import asyncio
import logging
from lbry.utils import LRUCacheWithMetrics
from lbry.blob.blob_file import is_valid_blobhash, BlobFile, BlobBuffer, AbstractBlob
from lbry.stream.descriptor import StreamDescriptor
from lbry.connection_manager import ConnectionManager
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.dht.protocol.data_store import DictDataStore
from lbry.extras.daemon.storage import SQLiteStorage
log = logging.getLogger(__name__)
class BlobManager:
def __init__(self, loop: asyncio.AbstractEventLoop, blob_dir: str, storage: 'SQLiteStorage', config: 'Config',
node_data_store: typing.Optional['DictDataStore'] = None):
"""
This class stores blobs on the hard disk
blob_dir - directory where blobs are stored
storage - SQLiteStorage object
"""
self.loop = loop
self.blob_dir = blob_dir
self.storage = storage
self._node_data_store = node_data_store
self.completed_blob_hashes: typing.Set[str] = set() if not self._node_data_store\
else self._node_data_store.completed_blobs
self.blobs: typing.Dict[str, AbstractBlob] = {}
self.config = config
self.decrypted_blob_lru_cache = None if not self.config.blob_lru_cache_size else LRUCacheWithMetrics(
self.config.blob_lru_cache_size)
self.connection_manager = ConnectionManager(loop)
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None, is_mine: bool = False):
if self.config.save_blobs or (
is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))):
return BlobFile(
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
)
return BlobBuffer(
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
)
def get_blob(self, blob_hash, length: typing.Optional[int] = None, is_mine: bool = False):
if blob_hash in self.blobs:
if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer):
buffer = self.blobs.pop(blob_hash)
if blob_hash in self.completed_blob_hashes:
self.completed_blob_hashes.remove(blob_hash)
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
if buffer.is_readable():
with buffer.reader_context() as reader:
self.blobs[blob_hash].write_blob(reader.read())
if length and self.blobs[blob_hash].length is None:
self.blobs[blob_hash].set_length(length)
else:
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
return self.blobs[blob_hash]
def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool:
if not is_valid_blobhash(blob_hash):
raise ValueError(blob_hash)
if not os.path.isfile(os.path.join(self.blob_dir, blob_hash)):
return False
if blob_hash in self.blobs:
return self.blobs[blob_hash].get_is_verified()
return self._get_blob(blob_hash, length).get_is_verified()
async def setup(self) -> bool:
def get_files_in_blob_dir() -> typing.Set[str]:
if not self.blob_dir:
return set()
return {
item.name for item in os.scandir(self.blob_dir) if is_valid_blobhash(item.name)
}
in_blobfiles_dir = await self.loop.run_in_executor(None, get_files_in_blob_dir)
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
if to_add:
self.completed_blob_hashes.update(to_add)
# check blobs that aren't set as finished but were seen on disk
await self.ensure_completed_blobs_status(in_blobfiles_dir - to_add)
if self.config.track_bandwidth:
self.connection_manager.start()
return True
def stop(self):
self.connection_manager.stop()
while self.blobs:
_, blob = self.blobs.popitem()
blob.close()
self.completed_blob_hashes.clear()
def get_stream_descriptor(self, sd_hash):
return StreamDescriptor.from_stream_descriptor_blob(self.loop, self.blob_dir, self.get_blob(sd_hash))
def blob_completed(self, blob: AbstractBlob) -> asyncio.Task:
if blob.blob_hash is None:
raise Exception("Blob hash is None")
if not blob.length:
raise Exception("Blob has a length of 0")
if isinstance(blob, BlobFile):
if blob.blob_hash not in self.completed_blob_hashes:
self.completed_blob_hashes.add(blob.blob_hash)
return self.loop.create_task(self.storage.add_blobs(
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=True)
)
else:
return self.loop.create_task(self.storage.add_blobs(
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False)
)
async def ensure_completed_blobs_status(self, blob_hashes: typing.Iterable[str]):
"""Ensures that completed blobs from a given list of blob hashes are set as 'finished' in the database."""
to_add = []
for blob_hash in blob_hashes:
if not self.is_blob_verified(blob_hash):
continue
blob = self.get_blob(blob_hash)
to_add.append((blob.blob_hash, blob.length, blob.added_on, blob.is_mine))
if len(to_add) > 500:
await self.storage.add_blobs(*to_add, finished=True)
to_add.clear()
return await self.storage.add_blobs(*to_add, finished=True)
def delete_blob(self, blob_hash: str):
if not is_valid_blobhash(blob_hash):
raise Exception("invalid blob hash to delete")
if blob_hash not in self.blobs:
if self.blob_dir and os.path.isfile(os.path.join(self.blob_dir, blob_hash)):
os.remove(os.path.join(self.blob_dir, blob_hash))
else:
self.blobs.pop(blob_hash).delete()
if blob_hash in self.completed_blob_hashes:
self.completed_blob_hashes.remove(blob_hash)
async def delete_blobs(self, blob_hashes: typing.List[str], delete_from_db: typing.Optional[bool] = True):
for blob_hash in blob_hashes:
self.delete_blob(blob_hash)
if delete_from_db:
await self.storage.delete_blobs_from_db(blob_hashes)

View file

@ -1,77 +0,0 @@
import asyncio
import logging
log = logging.getLogger(__name__)
class DiskSpaceManager:
def __init__(self, config, db, blob_manager, cleaning_interval=30 * 60, analytics=None):
self.config = config
self.db = db
self.blob_manager = blob_manager
self.cleaning_interval = cleaning_interval
self.running = False
self.task = None
self.analytics = analytics
self._used_space_bytes = None
async def get_free_space_mb(self, is_network_blob=False):
limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
space_used_mb = await self.get_space_used_mb()
space_used_mb = space_used_mb['network_storage'] if is_network_blob else space_used_mb['content_storage']
return max(0, limit_mb - space_used_mb)
async def get_space_used_bytes(self):
self._used_space_bytes = await self.db.get_stored_blob_disk_usage()
return self._used_space_bytes
async def get_space_used_mb(self, cached=True):
cached = cached and self._used_space_bytes is not None
space_used_bytes = self._used_space_bytes if cached else await self.get_space_used_bytes()
return {key: int(value/1024.0/1024.0) for key, value in space_used_bytes.items()}
async def clean(self):
await self._clean(False)
await self._clean(True)
async def _clean(self, is_network_blob=False):
space_used_mb = await self.get_space_used_mb(cached=False)
if is_network_blob:
space_used_mb = space_used_mb['network_storage']
else:
space_used_mb = space_used_mb['content_storage'] + space_used_mb['private_storage']
storage_limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
if self.analytics:
asyncio.create_task(
self.analytics.send_disk_space_used(space_used_mb, storage_limit_mb, is_network_blob)
)
delete = []
available = storage_limit_mb - space_used_mb
if storage_limit_mb == 0 if not is_network_blob else available >= 0:
return 0
for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False, is_network_blob=is_network_blob):
delete.append(blob_hash)
available += int(file_size/1024.0/1024.0)
if available >= 0:
break
if delete:
await self.db.stop_all_files()
await self.blob_manager.delete_blobs(delete, delete_from_db=True)
self._used_space_bytes = None
return len(delete)
async def cleaning_loop(self):
while self.running:
await asyncio.sleep(self.cleaning_interval)
await self.clean()
async def start(self):
self.running = True
self.task = asyncio.create_task(self.cleaning_loop())
self.task.add_done_callback(lambda _: log.info("Stopping blob cleanup service."))
async def stop(self):
if self.running:
self.running = False
self.task.cancel()

View file

@ -1,255 +0,0 @@
import asyncio
import time
import logging
import typing
import binascii
from typing import Optional
from lbry.error import InvalidBlobHashError, InvalidDataError
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest
from lbry.utils import cache_concurrent
if typing.TYPE_CHECKING:
from lbry.blob.blob_file import AbstractBlob
from lbry.blob.writer import HashBlobWriter
from lbry.connection_manager import ConnectionManager
log = logging.getLogger(__name__)
class BlobExchangeClientProtocol(asyncio.Protocol):
def __init__(self, loop: asyncio.AbstractEventLoop, peer_timeout: typing.Optional[float] = 10,
connection_manager: typing.Optional['ConnectionManager'] = None):
self.loop = loop
self.peer_port: typing.Optional[int] = None
self.peer_address: typing.Optional[str] = None
self.transport: typing.Optional[asyncio.Transport] = None
self.peer_timeout = peer_timeout
self.connection_manager = connection_manager
self.writer: typing.Optional['HashBlobWriter'] = None
self.blob: typing.Optional['AbstractBlob'] = None
self._blob_bytes_received = 0
self._response_fut: typing.Optional[asyncio.Future] = None
self.buf = b''
# this is here to handle the race when the downloader is closed right as response_fut gets a result
self.closed = asyncio.Event()
def data_received(self, data: bytes):
if self.connection_manager:
if not self.peer_address:
addr_info = self.transport.get_extra_info('peername')
self.peer_address, self.peer_port = addr_info
# assert self.peer_address is not None
self.connection_manager.received_data(f"{self.peer_address}:{self.peer_port}", len(data))
if not self.transport or self.transport.is_closing():
log.warning("transport closing, but got more bytes from %s:%i\n%s", self.peer_address, self.peer_port,
binascii.hexlify(data))
if self._response_fut and not self._response_fut.done():
self._response_fut.cancel()
return
if not self._response_fut:
log.warning("Protocol received data before expected, probable race on keep alive. Closing transport.")
return self.close()
if self._blob_bytes_received and not self.writer.closed():
return self._write(data)
response = BlobResponse.deserialize(self.buf + data)
if not response.responses and not self._response_fut.done():
self.buf += data
return
else:
self.buf = b''
if response.responses and self.blob:
blob_response = response.get_blob_response()
if blob_response and not blob_response.error and blob_response.blob_hash == self.blob.blob_hash:
# set the expected length for the incoming blob if we didn't know it
self.blob.set_length(blob_response.length)
elif blob_response and not blob_response.error and self.blob.blob_hash != blob_response.blob_hash:
# the server started sending a blob we didn't request
log.warning("%s started sending blob we didn't request %s instead of %s", self.peer_address,
blob_response.blob_hash, self.blob.blob_hash)
return
if response.responses:
log.debug("got response from %s:%i <- %s", self.peer_address, self.peer_port, response.to_dict())
# fire the Future with the response to our request
self._response_fut.set_result(response)
if response.blob_data and self.writer and not self.writer.closed():
# log.debug("got %i blob bytes from %s:%i", len(response.blob_data), self.peer_address, self.peer_port)
# write blob bytes if we're writing a blob and have blob bytes to write
self._write(response.blob_data)
def _write(self, data: bytes):
if len(data) > (self.blob.get_length() - self._blob_bytes_received):
data = data[:(self.blob.get_length() - self._blob_bytes_received)]
log.warning("got more than asked from %s:%d, probable sendfile bug", self.peer_address, self.peer_port)
self._blob_bytes_received += len(data)
try:
self.writer.write(data)
except OSError as err:
log.error("error downloading blob from %s:%i: %s", self.peer_address, self.peer_port, err)
if self._response_fut and not self._response_fut.done():
self._response_fut.set_exception(err)
except asyncio.TimeoutError as err:
log.error("%s downloading blob from %s:%i", str(err), self.peer_address, self.peer_port)
if self._response_fut and not self._response_fut.done():
self._response_fut.set_exception(err)
async def _download_blob(self) -> typing.Tuple[int, Optional['BlobExchangeClientProtocol']]: # pylint: disable=too-many-return-statements
"""
:return: download success (bool), connected protocol (BlobExchangeClientProtocol)
"""
start_time = time.perf_counter()
request = BlobRequest.make_request_for_blob_hash(self.blob.blob_hash)
blob_hash = self.blob.blob_hash
if not self.peer_address:
addr_info = self.transport.get_extra_info('peername')
self.peer_address, self.peer_port = addr_info
try:
msg = request.serialize()
log.debug("send request to %s:%i -> %s", self.peer_address, self.peer_port, msg.decode())
self.transport.write(msg)
if self.connection_manager:
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout)
availability_response = response.get_availability_response()
price_response = response.get_price_response()
blob_response = response.get_blob_response()
if self.closed.is_set():
msg = f"cancelled blob request for {blob_hash} immediately after we got a response"
log.warning(msg)
raise asyncio.CancelledError(msg)
if (not blob_response or blob_response.error) and\
(not availability_response or not availability_response.available_blobs):
log.warning("%s not in availability response from %s:%i", self.blob.blob_hash, self.peer_address,
self.peer_port)
log.warning(response.to_dict())
return self._blob_bytes_received, self.close()
elif availability_response and availability_response.available_blobs and \
availability_response.available_blobs != [self.blob.blob_hash]:
log.warning("blob availability response doesn't match our request from %s:%i",
self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
elif not availability_response:
log.warning("response from %s:%i did not include an availability response (we requested %s)",
self.peer_address, self.peer_port, blob_hash)
return self._blob_bytes_received, self.close()
if not price_response or price_response.blob_data_payment_rate != 'RATE_ACCEPTED':
log.warning("data rate rejected by %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
if not blob_response or blob_response.error:
log.warning("blob can't be downloaded from %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
if not blob_response.error and blob_response.blob_hash != self.blob.blob_hash:
log.warning("incoming blob hash mismatch from %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
if self.blob.length is not None and self.blob.length != blob_response.length:
log.warning("incoming blob unexpected length from %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
msg = f"downloading {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}," \
f" timeout in {self.peer_timeout}"
log.debug(msg)
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
await asyncio.wait_for(self.writer.finished, self.peer_timeout)
# wait for the io to finish
await self.blob.verified.wait()
log.info("%s at %fMB/s", msg,
round((float(self._blob_bytes_received) /
float(time.perf_counter() - start_time)) / 1000000.0, 2))
# await self.blob.finished_writing.wait() not necessary, but a dangerous change. TODO: is it needed?
return self._blob_bytes_received, self
except asyncio.TimeoutError:
return self._blob_bytes_received, self.close()
except (InvalidBlobHashError, InvalidDataError):
log.warning("invalid blob from %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
def close(self):
self.closed.set()
if self._response_fut and not self._response_fut.done():
self._response_fut.cancel()
if self.writer and not self.writer.closed():
self.writer.close_handle()
self._response_fut = None
self.writer = None
self.blob = None
if self.transport:
self.transport.close()
self.transport = None
self.buf = b''
async def download_blob(self, blob: 'AbstractBlob') -> typing.Tuple[int, Optional['BlobExchangeClientProtocol']]:
self.closed.clear()
blob_hash = blob.blob_hash
if blob.get_is_verified() or not blob.is_writeable():
return 0, self
try:
self._blob_bytes_received = 0
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
self._response_fut = asyncio.Future()
return await self._download_blob()
except OSError:
# i'm not sure how to fix this race condition - jack
log.warning("race happened downloading %s from %s:%s", blob_hash, self.peer_address, self.peer_port)
# return self._blob_bytes_received, self.transport
raise
except asyncio.TimeoutError:
if self._response_fut and not self._response_fut.done():
self._response_fut.cancel()
self.close()
return self._blob_bytes_received, None
except asyncio.CancelledError:
self.close()
raise
finally:
if self.writer and not self.writer.closed():
self.writer.close_handle()
self.writer = None
def connection_made(self, transport: asyncio.Transport):
addr = transport.get_extra_info('peername')
self.peer_address, self.peer_port = addr[0], addr[1]
self.transport = transport
if self.connection_manager:
self.connection_manager.connection_made(f"{self.peer_address}:{self.peer_port}")
log.debug("connection made to %s:%i", self.peer_address, self.peer_port)
def connection_lost(self, exc):
if self.connection_manager:
self.connection_manager.outgoing_connection_lost(f"{self.peer_address}:{self.peer_port}")
log.debug("connection lost to %s:%i (reason: %s, %s)", self.peer_address, self.peer_port, str(exc),
str(type(exc)))
self.close()
@cache_concurrent
async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['AbstractBlob'], address: str,
tcp_port: int, peer_connect_timeout: float, blob_download_timeout: float,
connected_protocol: Optional['BlobExchangeClientProtocol'] = None,
connection_id: int = 0, connection_manager: Optional['ConnectionManager'] = None)\
-> typing.Tuple[int, Optional['BlobExchangeClientProtocol']]:
"""
Returns [<amount of bytes received>, <client protocol if connected>]
"""
protocol = connected_protocol
if not connected_protocol or not connected_protocol.transport or connected_protocol.transport.is_closing():
connected_protocol = None
protocol = BlobExchangeClientProtocol(
loop, blob_download_timeout, connection_manager
)
else:
log.debug("reusing connection for %s:%d", address, tcp_port)
try:
if not connected_protocol:
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
peer_connect_timeout)
connected_protocol = protocol
if blob is None or blob.get_is_verified() or not blob.is_writeable():
# blob is None happens when we are just opening a connection
# file exists but not verified means someone is writing right now, give it time, come back later
return 0, connected_protocol
return await connected_protocol.download_blob(blob)
except (asyncio.TimeoutError, ConnectionRefusedError, ConnectionAbortedError, OSError):
return 0, None

View file

@ -1,141 +0,0 @@
import asyncio
import typing
import logging
from lbry.utils import cache_concurrent
from lbry.blob_exchange.client import request_blob
from lbry.dht.node import get_kademlia_peers_from_hosts
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.dht.node import Node
from lbry.dht.peer import KademliaPeer
from lbry.blob.blob_manager import BlobManager
from lbry.blob.blob_file import AbstractBlob
from lbry.blob_exchange.client import BlobExchangeClientProtocol
log = logging.getLogger(__name__)
class BlobDownloader:
BAN_FACTOR = 2.0 # fixme: when connection manager gets implemented, move it out from here
def __init__(self, loop: asyncio.AbstractEventLoop, config: 'Config', blob_manager: 'BlobManager',
peer_queue: asyncio.Queue):
self.loop = loop
self.config = config
self.blob_manager = blob_manager
self.peer_queue = peer_queue
self.active_connections: typing.Dict['KademliaPeer', asyncio.Task] = {} # active request_blob calls
self.ignored: typing.Dict['KademliaPeer', int] = {}
self.scores: typing.Dict['KademliaPeer', int] = {}
self.failures: typing.Dict['KademliaPeer', int] = {}
self.connection_failures: typing.Set['KademliaPeer'] = set()
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
self.is_running = asyncio.Event()
def should_race_continue(self, blob: 'AbstractBlob'):
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
if len(self.active_connections) >= max_probes:
return False
return not (blob.get_is_verified() or not blob.is_writeable())
async def request_blob_from_peer(self, blob: 'AbstractBlob', peer: 'KademliaPeer', connection_id: int = 0,
just_probe: bool = False):
if blob.get_is_verified():
return
start = self.loop.time()
bytes_received, protocol = await request_blob(
self.loop, blob if not just_probe else None, peer.address, peer.tcp_port, self.config.peer_connect_timeout,
self.config.blob_download_timeout, connected_protocol=self.connections.get(peer),
connection_id=connection_id, connection_manager=self.blob_manager.connection_manager
)
if not bytes_received and not protocol and peer not in self.connection_failures:
self.connection_failures.add(peer)
if not protocol and peer not in self.ignored:
self.ignored[peer] = self.loop.time()
log.debug("drop peer %s:%i", peer.address, peer.tcp_port)
self.failures[peer] = self.failures.get(peer, 0) + 1
if peer in self.connections:
del self.connections[peer]
elif protocol:
log.debug("keep peer %s:%i", peer.address, peer.tcp_port)
self.failures[peer] = 0
self.connections[peer] = protocol
elapsed = self.loop.time() - start
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
async def new_peer_or_finished(self):
active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
def cleanup_active(self):
if not self.active_connections and not self.connections:
self.clearbanned()
to_remove = [peer for (peer, task) in self.active_connections.items() if task.done()]
for peer in to_remove:
del self.active_connections[peer]
def clearbanned(self):
now = self.loop.time()
self.ignored = {
peer: when for (peer, when) in self.ignored.items()
if (now - when) < min(30.0, (self.failures.get(peer, 0) ** self.BAN_FACTOR))
}
@cache_concurrent
async def download_blob(self, blob_hash: str, length: typing.Optional[int] = None,
connection_id: int = 0) -> 'AbstractBlob':
blob = self.blob_manager.get_blob(blob_hash, length)
if blob.get_is_verified():
return blob
self.is_running.set()
try:
while not blob.get_is_verified() and self.is_running.is_set():
batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
while not self.peer_queue.empty():
batch.update(self.peer_queue.get_nowait())
log.debug(
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
)
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
if peer in self.ignored:
continue
if peer in self.active_connections or not self.should_race_continue(blob):
continue
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
self.active_connections[peer] = t
self.peer_queue.put_nowait(list(batch))
await self.new_peer_or_finished()
self.cleanup_active()
log.debug("downloaded %s", blob_hash[:8])
return blob
finally:
blob.close()
if self.loop.is_running():
self.loop.call_soon(self.cleanup_active)
def close(self):
self.connection_failures.clear()
self.scores.clear()
self.ignored.clear()
self.is_running.clear()
for protocol in self.connections.values():
protocol.close()
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
blob_hash: str) -> 'AbstractBlob':
search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
search_queue.put_nowait(blob_hash)
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)
if fixed_peers:
loop.call_later(config.fixed_peer_delay, peer_queue.put_nowait, fixed_peers)
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
try:
return await downloader.download_blob(blob_hash)
finally:
if accumulate_task and not accumulate_task.done():
accumulate_task.cancel()
downloader.close()

View file

@ -1,194 +0,0 @@
import asyncio
import binascii
import logging
import socket
import typing
from json.decoder import JSONDecodeError
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
from lbry.blob_exchange.serialization import BlobAvailabilityResponse, BlobPriceResponse, BlobDownloadResponse, \
BlobPaymentAddressResponse
if typing.TYPE_CHECKING:
from lbry.blob.blob_manager import BlobManager
log = logging.getLogger(__name__)
# a standard request will be 295 bytes
MAX_REQUEST_SIZE = 1200
class BlobServerProtocol(asyncio.Protocol):
def __init__(self, loop: asyncio.AbstractEventLoop, blob_manager: 'BlobManager', lbrycrd_address: str,
idle_timeout: float = 30.0, transfer_timeout: float = 60.0):
self.loop = loop
self.blob_manager = blob_manager
self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout
self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event()
self.buf = b''
self.transport: typing.Optional[asyncio.Transport] = None
self.lbrycrd_address = lbrycrd_address
self.peer_address_and_port: typing.Optional[str] = None
self.started_transfer = asyncio.Event()
self.transfer_finished = asyncio.Event()
self.close_on_idle_task: typing.Optional[asyncio.Task] = None
async def close_on_idle(self):
while self.transport:
try:
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
except asyncio.TimeoutError:
log.debug("closing idle connection from %s", self.peer_address_and_port)
return self.close()
self.started_transfer.clear()
await self.transfer_finished.wait()
self.transfer_finished.clear()
def close(self):
if self.transport:
self.transport.close()
def connection_made(self, transport):
self.transport = transport
self.close_on_idle_task = self.loop.create_task(self.close_on_idle())
self.peer_address_and_port = "%s:%i" % self.transport.get_extra_info('peername')
self.blob_manager.connection_manager.connection_received(self.peer_address_and_port)
log.debug("received connection from %s", self.peer_address_and_port)
def connection_lost(self, exc: typing.Optional[Exception]) -> None:
log.debug("lost connection from %s", self.peer_address_and_port)
self.blob_manager.connection_manager.incoming_connection_lost(self.peer_address_and_port)
self.transport = None
if self.close_on_idle_task and not self.close_on_idle_task.done():
self.close_on_idle_task.cancel()
self.close_on_idle_task = None
def send_response(self, responses: typing.List[blob_response_types]):
to_send = []
while responses:
to_send.append(responses.pop())
serialized = BlobResponse(to_send).serialize()
self.transport.write(serialized)
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, len(serialized))
async def handle_request(self, request: BlobRequest):
addr = self.transport.get_extra_info('peername')
peer_address, peer_port = addr
responses = []
address_request = request.get_address_request()
if address_request:
responses.append(BlobPaymentAddressResponse(lbrycrd_address=self.lbrycrd_address))
availability_request = request.get_availability_request()
if availability_request:
responses.append(BlobAvailabilityResponse(available_blobs=list(set(
filter(lambda blob_hash: blob_hash in self.blob_manager.completed_blob_hashes,
availability_request.requested_blobs)
))))
price_request = request.get_price_request()
if price_request:
responses.append(BlobPriceResponse(blob_data_payment_rate='RATE_ACCEPTED'))
download_request = request.get_blob_request()
if download_request:
blob = self.blob_manager.get_blob(download_request.requested_blob)
if blob.get_is_verified():
incoming_blob = {'blob_hash': blob.blob_hash, 'length': blob.length}
responses.append(BlobDownloadResponse(incoming_blob=incoming_blob))
self.send_response(responses)
blob_hash = blob.blob_hash[:8]
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
self.started_transfer.set()
try:
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
if sent and sent > 0:
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
else:
self.close()
log.debug("stopped sending %s to %s:%i", blob_hash, peer_address, peer_port)
return
except (OSError, ValueError, asyncio.TimeoutError) as err:
if isinstance(err, asyncio.TimeoutError):
log.debug("timed out sending blob %s to %s", blob_hash, peer_address)
else:
log.warning("could not read blob %s to send %s:%i", blob_hash, peer_address, peer_port)
self.close()
return
finally:
self.transfer_finished.set()
else:
log.info("don't have %s to send %s:%i", blob.blob_hash[:8], peer_address, peer_port)
if responses and not self.transport.is_closing():
self.send_response(responses)
def data_received(self, data):
request = None
if len(self.buf) + len(data or b'') >= MAX_REQUEST_SIZE:
log.warning("request from %s is too large", self.peer_address_and_port)
self.close()
return
if data:
self.blob_manager.connection_manager.received_data(self.peer_address_and_port, len(data))
_, separator, remainder = data.rpartition(b'}')
if not separator:
self.buf += data
return
try:
request = BlobRequest.deserialize(self.buf + data)
self.buf = remainder
except (UnicodeDecodeError, JSONDecodeError):
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
self.close()
return
if not request.requests:
log.error("failed to decode request from %s (%i bytes): %s", self.peer_address_and_port,
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
self.close()
return
self.loop.create_task(self.handle_request(request))
class BlobServer:
def __init__(self, loop: asyncio.AbstractEventLoop, blob_manager: 'BlobManager', lbrycrd_address: str,
idle_timeout: float = 30.0, transfer_timeout: float = 60.0):
self.loop = loop
self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event()
self.lbrycrd_address = lbrycrd_address
self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout
self.server_protocol_class = BlobServerProtocol
def start_server(self, port: int, interface: typing.Optional[str] = '0.0.0.0'):
if self.server_task is not None:
raise Exception("already running")
async def _start_server():
# checking if the port is in use
# thx https://stackoverflow.com/a/52872579
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)) == 0:
# the port is already in use!
log.error("Failed to bind TCP %s:%d", interface, port)
server = await self.loop.create_server(
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
self.idle_timeout, self.transfer_timeout),
interface, port
)
self.started_listening.set()
log.info("Blob server listening on TCP %s:%i", interface, port)
async with server:
await server.serve_forever()
self.server_task = self.loop.create_task(_start_server())
def stop_server(self):
if self.server_task:
self.server_task.cancel()
self.server_task = None
log.info("Stopped blob server")

View file

@ -1,835 +0,0 @@
import os
import re
import sys
import logging
from typing import List, Dict, Tuple, Union, TypeVar, Generic, Optional
from argparse import ArgumentParser
from contextlib import contextmanager
from appdirs import user_data_dir, user_config_dir
import yaml
from lbry.error import InvalidCurrencyError
from lbry.dht import constants
from lbry.wallet.coinselection import STRATEGIES
log = logging.getLogger(__name__)
NOT_SET = type('NOT_SET', (object,), {}) # pylint: disable=invalid-name
T = TypeVar('T')
CURRENCIES = {
'BTC': {'type': 'crypto'},
'LBC': {'type': 'crypto'},
'USD': {'type': 'fiat'},
}
class Setting(Generic[T]):
def __init__(self, doc: str, default: Optional[T] = None,
previous_names: Optional[List[str]] = None,
metavar: Optional[str] = None):
self.doc = doc
self.default = default
self.previous_names = previous_names or []
self.metavar = metavar
def __set_name__(self, owner, name):
self.name = name # pylint: disable=attribute-defined-outside-init
@property
def cli_name(self):
return f"--{self.name.replace('_', '-')}"
@property
def no_cli_name(self):
return f"--no-{self.name.replace('_', '-')}"
def __get__(self, obj: Optional['BaseConfig'], owner) -> T:
if obj is None:
return self
for location in obj.search_order:
if self.name in location:
return location[self.name]
return self.default
def __set__(self, obj: 'BaseConfig', val: Union[T, NOT_SET]):
if val == NOT_SET:
for location in obj.modify_order:
if self.name in location:
del location[self.name]
else:
self.validate(val)
for location in obj.modify_order:
location[self.name] = val
def is_set(self, obj: 'BaseConfig') -> bool:
for location in obj.search_order:
if self.name in location:
return True
return False
def is_set_to_default(self, obj: 'BaseConfig') -> bool:
for location in obj.search_order:
if self.name in location:
return location[self.name] == self.default
return False
def validate(self, value):
raise NotImplementedError()
def deserialize(self, value): # pylint: disable=no-self-use
return value
def serialize(self, value): # pylint: disable=no-self-use
return value
def contribute_to_argparse(self, parser: ArgumentParser):
parser.add_argument(
self.cli_name,
help=self.doc,
metavar=self.metavar,
default=NOT_SET
)
class String(Setting[str]):
def validate(self, value):
assert isinstance(value, str), \
f"Setting '{self.name}' must be a string."
# TODO: removes this after pylint starts to understand generics
def __get__(self, obj: Optional['BaseConfig'], owner) -> str: # pylint: disable=useless-super-delegation
return super().__get__(obj, owner)
class Integer(Setting[int]):
def validate(self, value):
assert isinstance(value, int), \
f"Setting '{self.name}' must be an integer."
def deserialize(self, value):
return int(value)
class Float(Setting[float]):
def validate(self, value):
assert isinstance(value, float), \
f"Setting '{self.name}' must be a decimal."
def deserialize(self, value):
return float(value)
class Toggle(Setting[bool]):
def validate(self, value):
assert isinstance(value, bool), \
f"Setting '{self.name}' must be a true/false value."
def contribute_to_argparse(self, parser: ArgumentParser):
parser.add_argument(
self.cli_name,
help=self.doc,
action="store_true",
default=NOT_SET
)
parser.add_argument(
self.no_cli_name,
help=f"Opposite of {self.cli_name}",
dest=self.name,
action="store_false",
default=NOT_SET
)
class Path(String):
def __init__(self, doc: str, *args, default: str = '', **kwargs):
super().__init__(doc, default, *args, **kwargs)
def __get__(self, obj, owner) -> str:
value = super().__get__(obj, owner)
if isinstance(value, str):
return os.path.expanduser(os.path.expandvars(value))
return value
class MaxKeyFee(Setting[dict]):
def validate(self, value):
if value is not None:
assert isinstance(value, dict) and set(value) == {'currency', 'amount'}, \
f"Setting '{self.name}' must be a dict like \"{{'amount': 50.0, 'currency': 'USD'}}\"."
if value["currency"] not in CURRENCIES:
raise InvalidCurrencyError(value["currency"])
@staticmethod
def _parse_list(l):
if l == ['null']:
return None
assert len(l) == 2, (
'Max key fee is made up of either two values: '
'"AMOUNT CURRENCY", or "null" (to set no limit)'
)
try:
amount = float(l[0])
except ValueError:
raise AssertionError('First value in max key fee is a decimal: "AMOUNT CURRENCY"')
currency = str(l[1]).upper()
if currency not in CURRENCIES:
raise InvalidCurrencyError(currency)
return {'amount': amount, 'currency': currency}
def deserialize(self, value):
if value is None:
return
if isinstance(value, dict):
return {
'currency': value['currency'],
'amount': float(value['amount']),
}
if isinstance(value, str):
value = value.split()
if isinstance(value, list):
return self._parse_list(value)
raise AssertionError('Invalid max key fee.')
def contribute_to_argparse(self, parser: ArgumentParser):
parser.add_argument(
self.cli_name,
help=self.doc,
nargs='+',
metavar=('AMOUNT', 'CURRENCY'),
default=NOT_SET
)
parser.add_argument(
self.no_cli_name,
help="Disable maximum key fee check.",
dest=self.name,
const=None,
action="store_const",
default=NOT_SET
)
class StringChoice(String):
def __init__(self, doc: str, valid_values: List[str], default: str, *args, **kwargs):
super().__init__(doc, default, *args, **kwargs)
if not valid_values:
raise ValueError("No valid values provided")
if default not in valid_values:
raise ValueError(f"Default value must be one of: {', '.join(valid_values)}")
self.valid_values = valid_values
def validate(self, value):
super().validate(value)
if value not in self.valid_values:
raise ValueError(f"Setting '{self.name}' value must be one of: {', '.join(self.valid_values)}")
class ListSetting(Setting[list]):
def validate(self, value):
assert isinstance(value, (tuple, list)), \
f"Setting '{self.name}' must be a tuple or list."
def contribute_to_argparse(self, parser: ArgumentParser):
parser.add_argument(
self.cli_name,
help=self.doc,
action='append'
)
class Servers(ListSetting):
def validate(self, value):
assert isinstance(value, (tuple, list)), \
f"Setting '{self.name}' must be a tuple or list of servers."
for idx, server in enumerate(value):
assert isinstance(server, (tuple, list)) and len(server) == 2, \
f"Server defined '{server}' at index {idx} in setting " \
f"'{self.name}' must be a tuple or list of two items."
assert isinstance(server[0], str), \
f"Server defined '{server}' at index {idx} in setting " \
f"'{self.name}' must be have hostname as string in first position."
assert isinstance(server[1], int), \
f"Server defined '{server}' at index {idx} in setting " \
f"'{self.name}' must be have port as int in second position."
def deserialize(self, value):
servers = []
if isinstance(value, list):
for server in value:
if isinstance(server, str) and server.count(':') == 1:
host, port = server.split(':')
try:
servers.append((host, int(port)))
except ValueError:
pass
return servers
def serialize(self, value):
if value:
return [f"{host}:{port}" for host, port in value]
return value
class Strings(ListSetting):
def validate(self, value):
assert isinstance(value, (tuple, list)), \
f"Setting '{self.name}' must be a tuple or list of strings."
for idx, string in enumerate(value):
assert isinstance(string, str), \
f"Value of '{string}' at index {idx} in setting " \
f"'{self.name}' must be a string."
class KnownHubsList:
def __init__(self, config: 'Config' = None, file_name: str = 'known_hubs.yml'):
self.file_name = file_name
self.path = os.path.join(config.wallet_dir, self.file_name) if config else None
self.hubs: Dict[Tuple[str, int], Dict] = {}
if self.exists:
self.load()
@property
def exists(self):
return self.path and os.path.exists(self.path)
@property
def serialized(self) -> Dict[str, Dict]:
return {f"{host}:{port}": details for (host, port), details in self.hubs.items()}
def filter(self, match_none=False, **kwargs):
if not kwargs:
return self.hubs
result = {}
for hub, details in self.hubs.items():
for key, constraint in kwargs.items():
value = details.get(key)
if value == constraint or (match_none and value is None):
result[hub] = details
break
return result
def load(self):
if self.path:
with open(self.path, 'r') as known_hubs_file:
raw = known_hubs_file.read()
for hub, details in yaml.safe_load(raw).items():
self.set(hub, details)
def save(self):
if self.path:
with open(self.path, 'w') as known_hubs_file:
known_hubs_file.write(yaml.safe_dump(self.serialized, default_flow_style=False))
def set(self, hub: str, details: Dict):
if hub and hub.count(':') == 1:
host, port = hub.split(':')
hub_parts = (host, int(port))
if hub_parts not in self.hubs:
self.hubs[hub_parts] = details
return hub
def add_hubs(self, hubs: List[str]):
added = False
for hub in hubs:
if self.set(hub, {}) is not None:
added = True
return added
def items(self):
return self.hubs.items()
def __bool__(self):
return len(self) > 0
def __len__(self):
return self.hubs.__len__()
def __iter__(self):
return iter(self.hubs)
class EnvironmentAccess:
PREFIX = 'LBRY_'
def __init__(self, config: 'BaseConfig', environ: dict):
self.configuration = config
self.data = {}
if environ:
self.load(environ)
def load(self, environ):
for setting in self.configuration.get_settings():
value = environ.get(f'{self.PREFIX}{setting.name.upper()}', NOT_SET)
if value != NOT_SET and not (isinstance(setting, ListSetting) and value is None):
self.data[setting.name] = setting.deserialize(value)
def __contains__(self, item: str):
return item in self.data
def __getitem__(self, item: str):
return self.data[item]
class ArgumentAccess:
def __init__(self, config: 'BaseConfig', args: dict):
self.configuration = config
self.args = {}
if args:
self.load(args)
def load(self, args):
for setting in self.configuration.get_settings():
value = getattr(args, setting.name, NOT_SET)
if value != NOT_SET and not (isinstance(setting, ListSetting) and value is None):
self.args[setting.name] = setting.deserialize(value)
def __contains__(self, item: str):
return item in self.args
def __getitem__(self, item: str):
return self.args[item]
class ConfigFileAccess:
def __init__(self, config: 'BaseConfig', path: str):
self.configuration = config
self.path = path
self.data = {}
if self.exists:
self.load()
@property
def exists(self):
return self.path and os.path.exists(self.path)
def load(self):
cls = type(self.configuration)
with open(self.path, 'r') as config_file:
raw = config_file.read()
serialized = yaml.safe_load(raw) or {}
for key, value in serialized.items():
attr = getattr(cls, key, None)
if attr is None:
for setting in self.configuration.settings:
if key in setting.previous_names:
attr = setting
break
if attr is not None:
self.data[key] = attr.deserialize(value)
def save(self):
cls = type(self.configuration)
serialized = {}
for key, value in self.data.items():
attr = getattr(cls, key)
serialized[key] = attr.serialize(value)
with open(self.path, 'w') as config_file:
config_file.write(yaml.safe_dump(serialized, default_flow_style=False))
def upgrade(self) -> bool:
upgraded = False
for key in list(self.data):
for setting in self.configuration.settings:
if key in setting.previous_names:
self.data[setting.name] = self.data[key]
del self.data[key]
upgraded = True
break
return upgraded
def __contains__(self, item: str):
return item in self.data
def __getitem__(self, item: str):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
TBC = TypeVar('TBC', bound='BaseConfig')
class BaseConfig:
config = Path("Path to configuration file.", metavar='FILE')
def __init__(self, **kwargs):
self.runtime = {} # set internally or by various API calls
self.arguments = {} # from command line arguments
self.environment = {} # from environment variables
self.persisted = {} # from config file
self._updating_config = False
for key, value in kwargs.items():
setattr(self, key, value)
@contextmanager
def update_config(self):
self._updating_config = True
yield self
self._updating_config = False
if isinstance(self.persisted, ConfigFileAccess):
self.persisted.save()
@property
def modify_order(self):
locations = [self.runtime]
if self._updating_config:
locations.append(self.persisted)
return locations
@property
def search_order(self):
return [
self.runtime,
self.arguments,
self.environment,
self.persisted
]
@classmethod
def get_settings(cls):
for attr in dir(cls):
setting = getattr(cls, attr)
if isinstance(setting, Setting):
yield setting
@property
def settings(self):
return self.get_settings()
@property
def settings_dict(self):
return {
setting.name: getattr(self, setting.name) for setting in self.settings
}
@classmethod
def create_from_arguments(cls, args) -> TBC:
conf = cls()
conf.set_arguments(args)
conf.set_environment()
conf.set_persisted()
return conf
@classmethod
def contribute_to_argparse(cls, parser: ArgumentParser):
for setting in cls.get_settings():
setting.contribute_to_argparse(parser)
def set_arguments(self, args):
self.arguments = ArgumentAccess(self, args)
def set_environment(self, environ=None):
self.environment = EnvironmentAccess(self, environ or os.environ)
def set_persisted(self, config_file_path=None):
if config_file_path is None:
config_file_path = self.config
if not config_file_path:
return
ext = os.path.splitext(config_file_path)[1]
assert ext in ('.yml', '.yaml'),\
f"File extension '{ext}' is not supported, " \
f"configuration file must be in YAML (.yaml)."
self.persisted = ConfigFileAccess(self, config_file_path)
if self.persisted.upgrade():
self.persisted.save()
class TranscodeConfig(BaseConfig):
ffmpeg_path = String('A list of places to check for ffmpeg and ffprobe. '
f'$data_dir/ffmpeg/bin and $PATH are checked afterward. Separator: {os.pathsep}',
'', previous_names=['ffmpeg_folder'])
video_encoder = String('FFmpeg codec and parameters for the video encoding. '
'Example: libaom-av1 -crf 25 -b:v 0 -strict experimental',
'libx264 -crf 24 -preset faster -pix_fmt yuv420p')
video_bitrate_maximum = Integer('Maximum bits per second allowed for video streams (0 to disable).', 5_000_000)
video_scaler = String('FFmpeg scaling parameters for reducing bitrate. '
'Example: -vf "scale=-2:720,fps=24" -maxrate 5M -bufsize 3M',
r'-vf "scale=if(gte(iw\,ih)\,min(1920\,iw)\,-2):if(lt(iw\,ih)\,min(1920\,ih)\,-2)" '
r'-maxrate 5500K -bufsize 5000K')
audio_encoder = String('FFmpeg codec and parameters for the audio encoding. '
'Example: libopus -b:a 128k',
'aac -b:a 160k')
volume_filter = String('FFmpeg filter for audio normalization. Exmple: -af loudnorm', '')
volume_analysis_time = Integer('Maximum seconds into the file that we examine audio volume (0 to disable).', 240)
class CLIConfig(TranscodeConfig):
api = String('Host name and port for lbrynet daemon API.', 'localhost:5279', metavar='HOST:PORT')
@property
def api_connection_url(self) -> str:
return f"http://{self.api}/lbryapi"
@property
def api_host(self):
return self.api.split(':')[0]
@property
def api_port(self):
return int(self.api.split(':')[1])
class Config(CLIConfig):
jurisdiction = String("Limit interactions to wallet server in this jurisdiction.")
# directories
data_dir = Path("Directory path to store blobs.", metavar='DIR')
download_dir = Path(
"Directory path to place assembled files downloaded from LBRY.",
previous_names=['download_directory'], metavar='DIR'
)
wallet_dir = Path(
"Directory containing a 'wallets' subdirectory with 'default_wallet' file.",
previous_names=['lbryum_wallet_dir'], metavar='DIR'
)
wallets = Strings(
"Wallet files in 'wallet_dir' to load at startup.",
['default_wallet']
)
# network
use_upnp = Toggle(
"Use UPnP to setup temporary port redirects for the DHT and the hosting of blobs. If you manually forward"
"ports or have firewall rules you likely want to disable this.", True
)
udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port'])
tcp_port = Integer("TCP port to listen for incoming blob requests", 4444, previous_names=['peer_port'])
prometheus_port = Integer("Port to expose prometheus metrics (off by default)", 0)
network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0')
# routing table
split_buckets_under_index = Integer(
"Routing table bucket index below which we always split the bucket if given a new key to add to it and "
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
"use.", 2
)
is_bootstrap_node = Toggle(
"When running as a bootstrap node, disable all logic related to balancing the routing table, so we can "
"add as many peers as possible and better help first-runs.", False
)
# protocol timeouts
download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0)
blob_download_timeout = Float("Timeout to download a blob from a peer", 30.0)
hub_timeout = Float("Timeout when making a hub request", 30.0)
peer_connect_timeout = Float("Timeout to establish a TCP connection to a peer", 3.0)
node_rpc_timeout = Float("Timeout when making a DHT request", constants.RPC_TIMEOUT)
# blob announcement and download
save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True)
network_storage_limit = Integer("Disk space in MB to be allocated for helping the P2P network. 0 = disable", 0)
blob_storage_limit = Integer("Disk space in MB to be allocated for blob storage. 0 = no limit", 0)
blob_lru_cache_size = Integer(
"LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when "
"replying to a range request. Set to 0 to disable.", 32
)
announce_head_and_sd_only = Toggle(
"Announce only the descriptor and first (rather than all) data blob for a stream to the DHT", True,
previous_names=['announce_head_blobs_only']
)
concurrent_blob_announcers = Integer(
"Number of blobs to iteratively announce at once, set to 0 to disable", 10,
previous_names=['concurrent_announcers']
)
max_connections_per_download = Integer(
"Maximum number of peers to connect to while downloading a blob", 4,
previous_names=['max_connections_per_stream']
)
concurrent_hub_requests = Integer("Maximum number of concurrent hub requests", 32)
fixed_peer_delay = Float(
"Amount of seconds before adding the reflector servers as potential peers to download from in case dht"
"peers are not found or are slow", 2.0
)
max_key_fee = MaxKeyFee(
"Don't download streams with fees exceeding this amount. When set to "
"null, the amount is unbounded.", {'currency': 'USD', 'amount': 50.0}
)
max_wallet_server_fee = String("Maximum daily LBC amount allowed as payment for wallet servers.", "0.0")
# reflector settings
reflect_streams = Toggle(
"Upload completed streams (published and downloaded) reflector in order to re-host them", True,
previous_names=['reflect_uploads']
)
concurrent_reflector_uploads = Integer(
"Maximum number of streams to upload to a reflector server at a time", 10
)
# servers
reflector_servers = Servers("Reflector re-hosting servers for mirroring publishes", [
('reflector.lbry.com', 5566)
])
fixed_peers = Servers("Fixed peers to fall back to if none are found on P2P for a blob", [
('cdn.reflector.lbry.com', 5567)
])
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
('tracker.lbry.com', 9252),
('tracker.lbry.grin.io', 9252),
('tracker.lbry.pigg.es', 9252),
('tracker.lizard.technology', 9252),
('s1.lbry.network', 9252),
])
lbryum_servers = Servers("SPV wallet servers", [
('spv11.lbry.com', 50001),
('spv12.lbry.com', 50001),
('spv13.lbry.com', 50001),
('spv14.lbry.com', 50001),
('spv15.lbry.com', 50001),
('spv16.lbry.com', 50001),
('spv17.lbry.com', 50001),
('spv18.lbry.com', 50001),
('spv19.lbry.com', 50001),
('hub.lbry.grin.io', 50001),
('hub.lizard.technology', 50001),
('s1.lbry.network', 50001),
])
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
('dht.lbry.grin.io', 4444), # Grin
('dht.lbry.madiator.com', 4444), # Madiator
('dht.lbry.pigg.es', 4444), # Pigges
('lbrynet1.lbry.com', 4444), # US EAST
('lbrynet2.lbry.com', 4444), # US WEST
('lbrynet3.lbry.com', 4444), # EU
('lbrynet4.lbry.com', 4444), # ASIA
('dht.lizard.technology', 4444), # Jack
('s2.lbry.network', 4444),
])
# blockchain
blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main')
# daemon
save_files = Toggle("Save downloaded files when calling `get` by default", False)
components_to_skip = Strings("components which will be skipped during start-up of daemon", [])
share_usage_data = Toggle(
"Whether to share usage stats and diagnostic info with LBRY.", False,
previous_names=['upload_log', 'upload_log', 'share_debug_info']
)
track_bandwidth = Toggle("Track bandwidth usage", True)
allowed_origin = String(
"Allowed `Origin` header value for API request (sent by browser), use * to allow "
"all hosts; default is to only allow API requests with no `Origin` value.", "")
# media server
streaming_server = String('Host name and port to serve streaming media over range requests',
'localhost:5280', metavar='HOST:PORT')
streaming_get = Toggle("Enable the /get endpoint for the streaming media server. "
"Disable to prevent new streams from being added.", True)
coin_selection_strategy = StringChoice(
"Strategy to use when selecting UTXOs for a transaction",
STRATEGIES, "prefer_confirmed"
)
transaction_cache_size = Integer("Transaction cache size", 2 ** 17)
save_resolved_claims = Toggle(
"Save content claims to the database when they are resolved to keep file_list up to date, "
"only disable this if file_x commands are not needed", True
)
@property
def streaming_host(self):
return self.streaming_server.split(':')[0]
@property
def streaming_port(self):
return int(self.streaming_server.split(':')[1])
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_default_paths()
self.known_hubs = KnownHubsList(self)
def set_default_paths(self):
if 'darwin' in sys.platform.lower():
get_directories = get_darwin_directories
elif 'win' in sys.platform.lower():
get_directories = get_windows_directories
elif 'linux' in sys.platform.lower():
get_directories = get_linux_directories
else:
return
cls = type(self)
cls.data_dir.default, cls.wallet_dir.default, cls.download_dir.default = get_directories()
cls.config.default = os.path.join(
self.data_dir, 'daemon_settings.yml'
)
@property
def log_file_path(self):
return os.path.join(self.data_dir, 'lbrynet.log')
def get_windows_directories() -> Tuple[str, str, str]:
from lbry.winpaths import get_path, FOLDERID, UserHandle, \
PathNotFoundException # pylint: disable=import-outside-toplevel
try:
download_dir = get_path(FOLDERID.Downloads, UserHandle.current)
except PathNotFoundException:
download_dir = os.getcwd()
# old
appdata = get_path(FOLDERID.RoamingAppData, UserHandle.current)
data_dir = os.path.join(appdata, 'lbrynet')
lbryum_dir = os.path.join(appdata, 'lbryum')
if os.path.isdir(data_dir) or os.path.isdir(lbryum_dir):
return data_dir, lbryum_dir, download_dir
# new
data_dir = user_data_dir('lbrynet', 'lbry')
lbryum_dir = user_data_dir('lbryum', 'lbry')
return data_dir, lbryum_dir, download_dir
def get_darwin_directories() -> Tuple[str, str, str]:
data_dir = user_data_dir('LBRY')
lbryum_dir = os.path.expanduser('~/.lbryum')
download_dir = os.path.expanduser('~/Downloads')
return data_dir, lbryum_dir, download_dir
def get_linux_directories() -> Tuple[str, str, str]:
try:
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read())
if down_dir:
down_dir = re.sub(r'\$HOME', os.getenv('HOME') or os.path.expanduser("~/"), down_dir.group(1))
download_dir = re.sub('\"', '', down_dir)
except OSError:
download_dir = os.getenv('XDG_DOWNLOAD_DIR')
if not download_dir:
download_dir = os.path.expanduser('~/Downloads')
# old
data_dir = os.path.expanduser('~/.lbrynet')
lbryum_dir = os.path.expanduser('~/.lbryum')
if os.path.isdir(data_dir) or os.path.isdir(lbryum_dir):
return data_dir, lbryum_dir, download_dir
# new
return user_data_dir('lbry/lbrynet'), user_data_dir('lbry/lbryum'), download_dir

View file

@ -1,105 +0,0 @@
import time
import asyncio
import typing
import collections
import logging
log = logging.getLogger(__name__)
CONNECTED_EVENT = "connected"
DISCONNECTED_EVENT = "disconnected"
TRANSFERRED_EVENT = "transferred"
class ConnectionManager:
def __init__(self, loop: asyncio.AbstractEventLoop):
self.loop = loop
self.incoming_connected: typing.Set[str] = set()
self.incoming: typing.DefaultDict[str, int] = collections.defaultdict(int)
self.outgoing_connected: typing.Set[str] = set()
self.outgoing: typing.DefaultDict[str, int] = collections.defaultdict(int)
self._max_incoming_mbs = 0.0
self._max_outgoing_mbs = 0.0
self._status = {}
self._running = False
self._task: typing.Optional[asyncio.Task] = None
@property
def status(self):
return self._status
def sent_data(self, host_and_port: str, size: int):
if self._running:
self.outgoing[host_and_port] += size
def received_data(self, host_and_port: str, size: int):
if self._running:
self.incoming[host_and_port] += size
def connection_made(self, host_and_port: str):
if self._running:
self.outgoing_connected.add(host_and_port)
def connection_received(self, host_and_port: str):
# self.incoming_connected.add(host_and_port)
pass
def outgoing_connection_lost(self, host_and_port: str):
if self._running and host_and_port in self.outgoing_connected:
self.outgoing_connected.remove(host_and_port)
def incoming_connection_lost(self, host_and_port: str):
if self._running and host_and_port in self.incoming_connected:
self.incoming_connected.remove(host_and_port)
async def _update(self):
self._status = {
'incoming_bps': {},
'outgoing_bps': {},
'total_incoming_mbs': 0.0,
'total_outgoing_mbs': 0.0,
'total_sent': 0,
'total_received': 0,
'max_incoming_mbs': 0.0,
'max_outgoing_mbs': 0.0
}
while True:
last = time.perf_counter()
await asyncio.sleep(0.1)
self._status['incoming_bps'].clear()
self._status['outgoing_bps'].clear()
now = time.perf_counter()
while self.outgoing:
k, sent = self.outgoing.popitem()
self._status['total_sent'] += sent
self._status['outgoing_bps'][k] = sent / (now - last)
while self.incoming:
k, received = self.incoming.popitem()
self._status['total_received'] += received
self._status['incoming_bps'][k] = received / (now - last)
self._status['total_outgoing_mbs'] = int(sum(list(self._status['outgoing_bps'].values())
)) / 1000000.0
self._status['total_incoming_mbs'] = int(sum(list(self._status['incoming_bps'].values())
)) / 1000000.0
self._max_incoming_mbs = max(self._max_incoming_mbs, self._status['total_incoming_mbs'])
self._max_outgoing_mbs = max(self._max_outgoing_mbs, self._status['total_outgoing_mbs'])
self._status['max_incoming_mbs'] = self._max_incoming_mbs
self._status['max_outgoing_mbs'] = self._max_outgoing_mbs
def stop(self):
if self._task:
self._task.cancel()
self._task = None
self.outgoing.clear()
self.outgoing_connected.clear()
self.incoming.clear()
self.incoming_connected.clear()
self._status.clear()
self._running = False
def start(self):
self.stop()
self._running = True
self._task = self.loop.create_task(self._update())

View file

@ -1,2 +0,0 @@
CENT = 1000000
COIN = 100*CENT

Some files were not shown because too many files have changed in this diff Show more