Compare commits

..

No commits in common. "master" and "v0.90.1" have entirely different histories.

213 changed files with 17126 additions and 10468 deletions

View file

@ -1,206 +1,84 @@
name: ci
on: ["push", "pull_request", "workflow_dispatch"]
on: pull_request
jobs:
lint:
name: lint
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v2
- uses: actions/setup-python@v1
with:
python-version: '3.9'
- name: extract pip cache
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- run: pip install --user --upgrade pip wheel
- run: pip install -e .[lint]
python-version: '3.7'
- run: make install tools
- run: make lint
tests-unit:
name: "tests / unit"
strategy:
matrix:
os:
- ubuntu-20.04
- macos-latest
- windows-latest
runs-on: ${{ matrix.os }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v2
- uses: actions/setup-python@v1
with:
python-version: '3.9'
- name: set pip cache dir
shell: bash
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
- name: extract pip cache
uses: actions/cache@v3
with:
path: ${{ env.PIP_CACHE_DIR }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- id: os-name
uses: ASzc/change-string-case-action@v5
with:
string: ${{ runner.os }}
- run: python -m pip install --user --upgrade pip wheel
- if: startsWith(runner.os, 'linux')
run: pip install -e .[test]
- if: startsWith(runner.os, 'linux')
python-version: '3.7'
- run: make install tools
- working-directory: lbry
env:
HOME: /tmp
run: make test-unit-coverage
- if: startsWith(runner.os, 'linux') != true
run: pip install -e .[test]
- if: startsWith(runner.os, 'linux') != true
env:
HOME: /tmp
run: coverage run --source=lbry -m unittest tests/unit/test_conf.py
- name: submit coverage report
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_FLAG_NAME: tests-unit-${{ steps.os-name.outputs.lowercase }}
COVERALLS_PARALLEL: true
run: |
pip install coveralls
coveralls --service=github
run: coverage run -p --source=lbry -m unittest discover -vv tests.unit
tests-integration:
name: "tests / integration"
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
strategy:
matrix:
test:
- datanetwork
- blockchain
- claims
- takeovers
- transactions
- other
steps:
- name: Configure sysctl limits
run: |
sudo swapoff -a
sudo sysctl -w vm.swappiness=1
sudo sysctl -w fs.file-max=262144
sudo sysctl -w vm.max_map_count=262144
- name: Runs Elasticsearch
uses: elastic/elastic-github-actions/elasticsearch@master
- uses: actions/checkout@v2
- uses: actions/setup-python@v1
with:
stack-version: 7.12.1
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
python-version: '3.7'
- if: matrix.test == 'other'
run: |
sudo apt-get update
sudo apt-get install -y --no-install-recommends ffmpeg
- name: extract pip cache
uses: actions/cache@v3
with:
path: ./.tox
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
restore-keys: txo-integration-${{ matrix.test }}-
- run: pip install tox coverage coveralls
- if: matrix.test == 'claims'
run: rm -rf .tox
- run: pip install tox-travis
- run: tox -e ${{ matrix.test }}
- name: submit coverage report
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_FLAG_NAME: tests-integration-${{ matrix.test }}
COVERALLS_PARALLEL: true
run: |
coverage combine tests
coveralls --service=github
coverage:
needs: ["tests-unit", "tests-integration"]
runs-on: ubuntu-20.04
steps:
- name: finalize coverage report submission
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
pip install coveralls
coveralls --service=github --finish
build:
needs: ["lint", "tests-unit", "tests-integration"]
name: "build / binary"
name: "build"
strategy:
matrix:
os:
- ubuntu-20.04
- ubuntu-latest
- macos-latest
- windows-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v2
- uses: actions/setup-python@v1
with:
python-version: '3.9'
- id: os-name
uses: ASzc/change-string-case-action@v5
with:
string: ${{ runner.os }}
- name: set pip cache dir
shell: bash
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
- name: extract pip cache
uses: actions/cache@v3
with:
path: ${{ env.PIP_CACHE_DIR }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- run: pip install pyinstaller==4.6
- run: pip install -e .
- if: startsWith(github.ref, 'refs/tags/v')
run: python docker/set_build.py
- if: startsWith(runner.os, 'linux') || startsWith(runner.os, 'mac')
python-version: '3.7'
- name: Setup
run: |
pip install pyinstaller
pip install -e .
# https://stackoverflow.com/a/61693590
# https://github.com/pypa/setuptools/issues/1963
pip install --upgrade 'setuptools<45.0.0'
- if: startsWith(matrix.os, 'windows') == false
name: Build & Run (Unix)
run: |
pyinstaller --onefile --name lbrynet lbry/extras/cli.py
chmod +x dist/lbrynet
dist/lbrynet --version
- if: startsWith(runner.os, 'windows')
- if: startsWith(matrix.os, 'windows')
name: Build & Run (Windows)
run: |
pip install pywin32==301
pip install pywin32
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
dist/lbrynet.exe --version
- uses: actions/upload-artifact@v3
with:
name: lbrynet-${{ steps.os-name.outputs.lowercase }}
path: dist/
release:
name: "release"
if: startsWith(github.ref, 'refs/tags/v')
needs: ["build"]
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v1
- uses: actions/download-artifact@v2
- name: upload binaries
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_API_TOKEN }}
run: |
pip install githubrelease
chmod +x lbrynet-macos/lbrynet
chmod +x lbrynet-linux/lbrynet
zip --junk-paths lbrynet-mac.zip lbrynet-macos/lbrynet
zip --junk-paths lbrynet-linux.zip lbrynet-linux/lbrynet
zip --junk-paths lbrynet-windows.zip lbrynet-windows/lbrynet.exe
ls -lh
githubrelease release lbryio/lbry-sdk info ${GITHUB_REF#refs/tags/}
githubrelease asset lbryio/lbry-sdk upload ${GITHUB_REF#refs/tags/} \
lbrynet-mac.zip lbrynet-linux.zip lbrynet-windows.zip
githubrelease release lbryio/lbry-sdk publish ${GITHUB_REF#refs/tags/}

View file

@ -1,22 +0,0 @@
name: slack
on:
release:
types: [published]
jobs:
release:
name: "slack notification"
runs-on: ubuntu-20.04
steps:
- uses: LoveToKnow/slackify-markdown-action@v1.0.0
id: markdown
with:
text: "There is a new SDK release: ${{github.event.release.html_url}}\n${{ github.event.release.body }}"
- uses: slackapi/slack-github-action@v1.14.0
env:
CHANGELOG: '<!channel> ${{ steps.markdown.outputs.text }}'
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_RELEASE_BOT_WEBHOOK }}
with:
payload: '{"type": "mrkdwn", "text": ${{ toJSON(env.CHANGELOG) }} }'

2
.gitignore vendored
View file

@ -13,7 +13,7 @@ __pycache__
_trial_temp/
trending*.log
/tests/integration/claims/files
/tests/integration/blockchain/files
/tests/.coverage.*
/lbry/wallet/bin

213
.gitlab-ci.yml Normal file
View file

@ -0,0 +1,213 @@
default:
image: python:3.7
#cache:
# directories:
# - $HOME/venv
# - $HOME/.cache/pip
# - $HOME/Library/Caches/pip
# - $HOME/Library/Caches/Homebrew
# - $TRAVIS_BUILD_DIR/.tox
stages:
- test
- build
- assets
- release
.tagged:
rules:
- if: '$CI_COMMIT_TAG =~ /^v[0-9\.]+$/'
when: on_success
test:lint:
stage: test
script:
- make install tools
- make lint
test:unit:
stage: test
script:
- make install tools
- HOME=/tmp coverage run -p --source=lbry -m unittest discover -vv tests.unit
test:datanetwork-integration:
stage: test
script:
- pip install tox-travis
- tox -e datanetwork --recreate
test:blockchain-integration:
stage: test
script:
- pip install tox-travis
- tox -e blockchain
test:other-integration:
stage: test
script:
- apt-get update
- apt-get install -y --no-install-recommends ffmpeg
- pip install tox-travis
- tox -e other
test:json-api:
stage: test
script:
- make install tools
- HOME=/tmp coverage run -p --source=lbry scripts/generate_json_api.py
.build:
stage: build
artifacts:
expire_in: 1 day
paths:
- lbrynet-${OS}.zip
script:
- pip install --upgrade 'setuptools<45.0.0'
- pip install pyinstaller
- pip install -e .
- python3.7 docker/set_build.py # must come after lbry is installed because it imports lbry
- pyinstaller --onefile --name lbrynet lbry/extras/cli.py
- chmod +x dist/lbrynet
- zip --junk-paths ${CI_PROJECT_DIR}/lbrynet-${OS}.zip dist/lbrynet # gitlab expects artifacts to be in $CI_PROJECT_DIR
- openssl dgst -sha256 ${CI_PROJECT_DIR}/lbrynet-${OS}.zip | egrep -o [0-9a-f]+$ # get sha256 of asset. works on mac and ubuntu
- dist/lbrynet --version
build:linux:
extends: .build
image: ubuntu:16.04
variables:
OS: linux
before_script:
- apt-get update
- apt-get install -y --no-install-recommends software-properties-common zip curl build-essential
- add-apt-repository -y ppa:deadsnakes/ppa
- apt-get update
- apt-get install -y --no-install-recommends python3.7-dev
- python3.7 <(curl -q https://bootstrap.pypa.io/get-pip.py) # make sure we get pip with python3.7
- pip install lbry-libtorrent
build:mac:
extends: .build
tags: [macos] # makes gitlab use the mac runner
variables:
OS: mac
GIT_DEPTH: 5
VENV: /tmp/gitlab-lbry-sdk-venv
before_script:
# - brew upgrade python || true
- python3 --version | grep -q '^Python 3\.7\.' # dont upgrade python on every run. just make sure we're on the right Python
# - pip3 install --user --upgrade pip virtualenv
- pip3 --version | grep -q '\(python 3\.7\)'
- virtualenv --python=python3.7 "${VENV}"
- source "${VENV}/bin/activate"
after_script:
- rm -rf "${VENV}"
build:windows:
extends: .build
tags: [windows] # makes gitlab use the windows runner
variables:
OS: windows
GIT_DEPTH: 5
before_script:
- ./docker/install_choco.ps1
- choco install -y --x64 python --version=3.7.9
- choco install -y 7zip checksum
- python --version # | findstr /B "Python 3\.7\." # dont upgrade python on every run. just make sure we're on the right Python
- pip --version # | findstr /E '\(python 3\.7\)'
- python -c "import sys;print(f'{str(64 if sys.maxsize > 2**32 else 32)} bit python\n{sys.platform}')"
- pip install virtualenv pywin32
- virtualenv venv
- venv/Scripts/activate.ps1
- pip install pip==19.3.1; $true # $true ignores errors. need this to get the correct coincurve wheel. see commit notes for details.
after_script:
- rmdir -Recurse venv
script:
- pip install --upgrade 'setuptools<45.0.0'
- pip install pyinstaller==3.5
- pip install -e .
- python docker/set_build.py # must come after lbry is installed because it imports lbry
- pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico -F -n lbrynet lbry/extras/cli.py
- 7z a -tzip $env:CI_PROJECT_DIR/lbrynet-${OS}.zip ./dist/lbrynet.exe
- checksum --type=sha256 --file=$env:CI_PROJECT_DIR/lbrynet-${OS}.zip
- dist/lbrynet.exe --version
# s3 = upload asset to s3 (build.lbry.io)
.s3:
stage: assets
variables:
GIT_STRATEGY: none
script:
- "[ -f lbrynet-${OS}.zip ]" # check that asset exists before trying to upload
- pip install awscli
- S3_PATH="daemon/gitlab-build-${CI_PIPELINE_ID}_commit-${CI_COMMIT_SHA:0:7}$( if [ ! -z ${CI_COMMIT_TAG} ]; then echo _tag-${CI_COMMIT_TAG}; else echo _branch-${CI_COMMIT_REF_NAME}; fi )"
- AWS_ACCESS_KEY_ID=${ARTIFACTS_KEY} AWS_SECRET_ACCESS_KEY=${ARTIFACTS_SECRET} AWS_REGION=${ARTIFACTS_REGION}
aws s3 cp lbrynet-${OS}.zip s3://${ARTIFACTS_BUCKET}/${S3_PATH}/lbrynet-${OS}.zip
s3:linux:
extends: .s3
variables: {OS: linux}
needs: ["build:linux"]
s3:mac:
extends: .s3
variables: {OS: mac}
needs: ["build:mac"]
s3:windows:
extends: .s3
variables: {OS: windows}
needs: ["build:windows"]
# github = upload assets to github when there's a tagged release
.github:
extends: .tagged
stage: assets
variables:
GIT_STRATEGY: none
script:
- "[ -f lbrynet-${OS}.zip ]" # check that asset exists before trying to upload. githubrelease won't error if its missing
- pip install githubrelease
- githubrelease --no-progress --github-token ${GITHUB_CI_USER_ACCESS_TOKEN} asset lbryio/lbry-sdk upload ${CI_COMMIT_TAG} lbrynet-${OS}.zip
github:linux:
extends: .github
variables: {OS: linux}
needs: ["build:linux"]
github:mac:
extends: .github
variables: {OS: mac}
needs: ["build:mac"]
github:windows:
extends: .github
variables: {OS: windows}
needs: ["build:windows"]
publish:
extends: .tagged
stage: release
variables:
GIT_STRATEGY: none
script:
- pip install githubrelease
- githubrelease --no-progress --github-token ${GITHUB_CI_USER_ACCESS_TOKEN} release lbryio/lbry-sdk publish ${CI_COMMIT_TAG}
- >
curl -X POST -H 'Content-type: application/json' --data '{"text":"<!channel> There is a new SDK release: https://github.com/lbryio/lbry-sdk/releases/tag/'"${CI_COMMIT_TAG}"'\n'"$(curl -s "https://api.github.com/repos/lbryio/lbry-sdk/releases/tags/${CI_COMMIT_TAG}" | egrep '\w*\"body\":' | cut -d':' -f 2- | tail -c +3 | head -c -2)"'", "channel":"tech"}' "$(echo ${SLACK_WEBHOOK_URL_BASE64} | base64 -d)"

View file

@ -9,29 +9,20 @@ Here's a video walkthrough of this setup, which is itself hosted by the LBRY net
## Prerequisites
Running `lbrynet` from source requires Python 3.7. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
Running `lbrynet` from source requires Python 3.7 or higher. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
After installing Python 3.7, you'll need to install some additional libraries depending on your operating system.
Because of [issue #2769](https://github.com/lbryio/lbry-sdk/issues/2769)
at the moment the `lbrynet` daemon will only work correctly with Python 3.7.
If Python 3.8+ is used, the daemon will start but the RPC server
may not accept messages, returning the following:
```
Could not connect to daemon. Are you sure it's running?
```
After installing python 3, you'll need to install some additional libraries depending on your operating system.
### macOS
macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/).
These environment variables also need to be set:
```
PYTHONUNBUFFERED=1
EVENT_NOKQUEUE=1
```
1. PYTHONUNBUFFERED=1
2. EVENT_NOKQUEUE=1
Remaining dependencies can then be installed by running:
```
brew install python protobuf
```
@ -40,17 +31,14 @@ Assistance installing Python3: https://docs.python-guide.org/starting/install3/o
### Linux
On Ubuntu (we recommend 18.04 or 20.04), install the following:
On Ubuntu (16.04 minimum, we recommend 18.04), install the following:
```
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt-get update
sudo apt-get install build-essential python3.7 python3.7-dev git python3.7-venv libssl-dev python-protobuf
```
The [deadsnakes PPA](https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa) provides Python 3.7
for those Ubuntu distributions that no longer have it in their
official repositories.
On Raspbian, you will also need to install `python-pyparsing`.
If you're running another Linux distro, install the equivalent of the above packages for your system.
@ -59,119 +47,62 @@ If you're running another Linux distro, install the equivalent of the above pack
### Linux/Mac
Clone the repository:
```bash
git clone https://github.com/lbryio/lbry-sdk.git
cd lbry-sdk
```
To install on Linux/Mac:
Create a Python virtual environment for lbry-sdk:
```bash
python3.7 -m venv lbry-venv
```
```
Clone the repository:
$ git clone https://github.com/lbryio/lbry-sdk.git
$ cd lbry-sdk
Activate virtual environment:
```bash
source lbry-venv/bin/activate
```
Create a Python virtual environment for lbry-sdk:
$ python3.7 -m venv lbry-venv
Activating lbry-sdk virtual environment:
$ source lbry-venv/bin/activate
Make sure you're on Python 3.7+ (as the default Python in virtual environment):
$ python --version
Make sure you're on Python 3.7+ as default in the virtual environment:
```bash
python --version
```
Install packages:
$ make install
Install packages:
```bash
make install
```
If you are on Linux and using PyCharm, generates initial configs:
$ make idea
```
If you are on Linux and using PyCharm, generates initial configs:
```bash
make idea
```
To verify your installation, `which lbrynet` should return a path inside
of the `lbry-venv` folder.
```bash
(lbry-venv) $ which lbrynet
/opt/lbry-sdk/lbry-venv/bin/lbrynet
```
To exit the virtual environment simply use the command `deactivate`.
To verify your installation, `which lbrynet` should return a path inside of the `lbry-venv` folder created by the `python3.7 -m venv lbry-venv` command.
### Windows
Clone the repository:
```bash
git clone https://github.com/lbryio/lbry-sdk.git
cd lbry-sdk
```
To install on Windows:
Create a Python virtual environment for lbry-sdk:
```bash
python -m venv lbry-venv
```
```
Clone the repository:
> git clone https://github.com/lbryio/lbry-sdk.git
> cd lbry-sdk
Activate virtual environment:
```bash
lbry-venv\Scripts\activate
```
Create a Python virtual environment for lbry-sdk:
> python -m venv lbry-venv
Install packages:
```bash
pip install -e .
```
Activating lbry-sdk virtual environment:
> lbry-venv\Scripts\activate
Install packages:
> pip install -e .
```
## Run the tests
### Elasticsearch
For running integration tests, Elasticsearch is required to be available at localhost:9200/
To run the unit tests from the repo directory:
The easiest way to start it is using docker with:
```bash
make elastic-docker
```
Alternative installation methods are available [at Elasticsearch website](https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html).
To run the unit and integration tests from the repo directory:
```
python -m unittest discover tests.unit
python -m unittest discover tests.integration
```
```
python -m unittest discover tests.unit
```
## Usage
To start the API server:
```
lbrynet start
```
`lbrynet start`
Whenever the code inside [lbry-sdk/lbry](./lbry)
is modified we should run `make install` to recompile the `lbrynet`
executable with the newest code.
## Development
When developing, remember to enter the environment,
and if you wish start the server interactively.
```bash
$ source lbry-venv/bin/activate
(lbry-venv) $ python lbry/extras/cli.py start
```
Parameters can be passed in the same way.
```bash
(lbry-venv) $ python lbry/extras/cli.py wallet balance
```
If a Python debugger (`pdb` or `ipdb`) is installed we can also start it
in this way, set up break points, and step through the code.
```bash
(lbry-venv) $ pip install ipdb
(lbry-venv) $ ipdb lbry/extras/cli.py
```
Happy hacking!

View file

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2015-2022 LBRY Inc
Copyright (c) 2015-2020 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,

View file

@ -1,26 +1,24 @@
.PHONY: install tools lint test test-unit test-unit-coverage test-integration idea
.PHONY: install tools lint test idea
install:
pip install https://s3.amazonaws.com/files.lbry.io/python_libtorrent-1.2.4-py3-none-any.whl
CFLAGS="-DSQLITE_MAX_VARIABLE_NUMBER=2500000" pip install -U https://github.com/rogerbinns/apsw/releases/download/3.30.1-r1/apsw-3.30.1-r1.zip \
--global-option=fetch \
--global-option=--version --global-option=3.30.1 --global-option=--all \
--global-option=build --global-option=--enable --global-option=fts5
pip install -e .
tools:
pip install mypy==0.701 pylint==2.4.4
pip install coverage astroid pylint
lint:
pylint --rcfile=setup.cfg lbry
#mypy --ignore-missing-imports lbry
test: test-unit test-integration
test-unit:
python -m unittest discover tests.unit
test-unit-coverage:
coverage run --source=lbry -m unittest discover -vv tests.unit
test-integration:
test:
tox
idea:
mkdir -p .idea
cp -r scripts/idea/* .idea
elastic-docker:
docker run -d -v lbryhub:/usr/share/elasticsearch/data -p 9200:9200 -p 9300:9300 -e"ES_JAVA_OPTS=-Xms512m -Xmx512m" -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.12.1

View file

@ -1,10 +1,10 @@
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![build](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml/badge.svg)](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml) [![coverage](https://coveralls.io/repos/github/lbryio/lbry-sdk/badge.svg)](https://coveralls.io/github/lbryio/lbry-sdk)
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![Gitlab CI Badge](https://ci.lbry.tech/lbry/lbry-sdk/badges/master/pipeline.svg)](https://ci.lbry.tech/lbry/lbry-sdk)
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
LBRY SDK for Python is currently the most fully featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components include:
* Built on Python 3.7 and `asyncio`.
* Built on Python 3.7+ and `asyncio`.
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/dht)).
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/blob_exchange)).
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/schema)).
@ -41,7 +41,7 @@ This project is MIT licensed. For the full license, see [LICENSE](LICENSE).
## Security
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our GPG key is here](https://lbry.com/faq/gpg-key) if you need it.
## Contact

View file

@ -1,9 +0,0 @@
# Security Policy
## Supported Versions
While we are not at v1.0 yet, only the latest release will be supported.
## Reporting a Vulnerability
See https://lbry.com/faq/security

View file

@ -1,43 +0,0 @@
FROM debian:10-slim
ARG user=lbry
ARG projects_dir=/home/$user
ARG db_dir=/database
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
automake libtool \
tar unzip \
build-essential \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
RUN mkdir -p $db_dir
RUN chown -R $user:$user $db_dir
USER $user
WORKDIR $projects_dir
RUN python3 -m pip install -U setuptools pip
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
VOLUME $db_dir
ENTRYPOINT ["python3", "scripts/dht_node.py"]

View file

@ -1,4 +1,4 @@
FROM debian:10-slim
FROM ubuntu:20.04
ARG user=lbry
ARG db_dir=/database
@ -13,14 +13,10 @@ RUN apt-get update && \
wget \
tar unzip \
build-essential \
automake libtool \
pkg-config \
libleveldb-dev \
python3.7 \
python3 \
python3-dev \
python3-pip \
python3-wheel \
python3-cffi \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*

View file

@ -1,45 +0,0 @@
FROM debian:10-slim
ARG user=lbry
ARG downloads_dir=/database
ARG projects_dir=/home/$user
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
automake libtool \
tar unzip \
build-essential \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
RUN mkdir -p $downloads_dir
RUN chown -R $user:$user $downloads_dir
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
USER $user
WORKDIR $projects_dir
RUN pip install uvloop
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
# entry point
VOLUME $downloads_dir
COPY ./docker/webconf.yaml /webconf.yaml
ENTRYPOINT ["/home/lbry/.local/bin/lbrynet", "start", "--config=/webconf.yaml"]

View file

@ -1,9 +0,0 @@
### How to run with docker-compose
1. Edit config file and after that fix permissions with
```
sudo chown -R 999:999 webconf.yaml
```
2. Start SDK with
```
docker-compose up -d
```

View file

@ -1,49 +1,36 @@
version: "3"
volumes:
lbrycrd:
wallet_server:
es01:
services:
wallet_server:
depends_on:
- es01
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
lbrycrd:
image: lbry/lbrycrd:${LBRYCRD_TAG:-latest-release}
restart: always
ports: # accessible from host
- "9246:9246" # rpc port
expose: # internal to docker network. also this doesn't do anything. its for documentation only.
- "9245" # node-to-node comms port
volumes:
- "lbrycrd:/data/.lbrycrd"
environment:
- RUN_MODE=default
# Curently not snapshot provided
#- SNAPSHOT_URL=${LBRYCRD_SNAPSHOT_URL-https://lbry.com/snapshot/blockchain}
- RPC_ALLOW_IP=0.0.0.0/0
wallet_server:
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
depends_on:
- lbrycrd
restart: always
network_mode: host
ports:
- "50001:50001" # rpc port
- "2112:2112" # uncomment to enable prometheus
- "50005:50005" # websocket port
#- "2112:2112" # uncomment to enable prometheus
volumes:
- "wallet_server:/database"
environment:
- DAEMON_URL=http://lbry:lbry@127.0.0.1:9245
- MAX_QUERY_WORKERS=4
- CACHE_MB=1024
- CACHE_ALL_TX_HASHES=
- CACHE_ALL_CLAIM_TXOS=
- MAX_SEND=1000000000000000000
- MAX_RECEIVE=1000000000000000000
- MAX_SESSIONS=100000
- HOST=0.0.0.0
- TCP_PORT=50001
- PROMETHEUS_PORT=2112
- FILTERING_CHANNEL_IDS=770bd7ecba84fd2f7607fb15aedd2b172c2e153f 95e5db68a3101df19763f3a5182e4b12ba393ee8
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
container_name: es01
environment:
- node.name=es01
- discovery.type=single-node
- indices.query.bool.max_clause_count=8192
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms4g -Xmx4g" # no more than 32, remember to disable swap
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- es01:/usr/share/elasticsearch/data
ports:
- 127.0.0.1:9200:9200
# Curently not snapshot provided
# - SNAPSHOT_URL=${WALLET_SERVER_SNAPSHOT_URL-https://lbry.com/snapshot/wallet}
- DAEMON_URL=http://lbry:lbry@lbrycrd:9245

View file

@ -1,9 +0,0 @@
version: '3'
services:
websdk:
image: vshyba/websdk
ports:
- '5279:5279'
- '5280:5280'
volumes:
- ./webconf.yaml:/webconf.yaml

View file

@ -20,7 +20,7 @@ def _check_and_set(d: dict, key: str, value: str):
def main():
build_info = {item: build_info_mod.__dict__[item] for item in dir(build_info_mod) if not item.startswith("__")}
commit_hash = os.getenv('DOCKER_COMMIT', os.getenv('GITHUB_SHA'))
commit_hash = os.getenv('DOCKER_COMMIT', os.getenv('CI_COMMIT_SHA', os.getenv('TRAVIS_COMMIT')))
if commit_hash is None:
raise ValueError("Commit hash not found in env vars")
_check_and_set(build_info, "COMMIT_HASH", commit_hash[:6])
@ -30,10 +30,8 @@ def main():
_check_and_set(build_info, "DOCKER_TAG", docker_tag)
_check_and_set(build_info, "BUILD", "docker")
else:
if re.match(r'refs/tags/v\d+\.\d+\.\d+$', str(os.getenv('GITHUB_REF'))):
_check_and_set(build_info, "BUILD", "release")
else:
_check_and_set(build_info, "BUILD", "qa")
ci_tag = os.getenv('CI_COMMIT_TAG', os.getenv('TRAVIS_TAG'))
_check_and_set(build_info, "BUILD", "release" if re.match(r'v\d+\.\d+\.\d+$', str(ci_tag)) else "qa")
log.debug("build info: %s", ", ".join([f"{k}={v}" for k, v in build_info.items()]))
with open(build_info_mod.__file__, 'w') as f:

View file

@ -6,7 +6,7 @@ set -euo pipefail
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/claims.db ]]; then
files="$(ls)"
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
@ -20,6 +20,4 @@ if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
rm "$filename"
fi
/home/lbry/.local/bin/lbry-hub-elastic-sync
echo 'starting server'
/home/lbry/.local/bin/lbry-hub "$@"
/home/lbry/.local/bin/torba-server "$@"

View file

@ -1,9 +0,0 @@
allowed_origin: "*"
max_key_fee: "0.0 USD"
save_files: false
save_blobs: false
streaming_server: "0.0.0.0:5280"
api: "0.0.0.0:5279"
data_dir: /tmp
download_dir: /tmp
wallet_dir: /tmp

File diff suppressed because one or more lines are too long

View file

@ -1,2 +1,2 @@
__version__ = "0.113.0"
__version__ = "0.90.1"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name

View file

@ -1,6 +1,5 @@
import os
import re
import time
import asyncio
import binascii
import logging
@ -71,27 +70,21 @@ class AbstractBlob:
'writers',
'verified',
'writing',
'readers',
'added_on',
'is_mine',
'readers'
]
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False,
):
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None):
self.loop = loop
self.blob_hash = blob_hash
self.length = length
self.blob_completed_callback = blob_completed_callback
self.blob_directory = blob_directory
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
self.verified: asyncio.Event = asyncio.Event()
self.writing: asyncio.Event = asyncio.Event()
self.verified: asyncio.Event = asyncio.Event(loop=self.loop)
self.writing: asyncio.Event = asyncio.Event(loop=self.loop)
self.readers: typing.List[typing.BinaryIO] = []
self.added_on = added_on or time.time()
self.is_mine = is_mine
if not is_valid_blobhash(blob_hash):
raise InvalidBlobHashError(blob_hash)
@ -187,21 +180,20 @@ class AbstractBlob:
@classmethod
async def create_from_unencrypted(
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
) -> BlobInfo:
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None) -> BlobInfo:
"""
Create an encrypted BlobFile from plaintext bytes
"""
blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted)
length = len(blob_bytes)
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir, added_on, is_mine)
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir)
writer = blob.get_blob_writer()
writer.write(blob_bytes)
await blob.verified.wait()
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), added_on, blob_hash, is_mine)
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash)
def save_verified_blob(self, verified_bytes: bytes):
if self.verified.is_set():
@ -222,7 +214,7 @@ class AbstractBlob:
peer_port: typing.Optional[int] = None) -> HashBlobWriter:
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
fut = asyncio.Future()
fut = asyncio.Future(loop=self.loop)
writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
self.writers[(peer_address, peer_port)] = writer
@ -256,13 +248,11 @@ class BlobBuffer(AbstractBlob):
"""
An in-memory only blob
"""
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
):
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None):
self._verified_bytes: typing.Optional[BytesIO] = None
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
@contextlib.contextmanager
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
@ -299,12 +289,10 @@ class BlobFile(AbstractBlob):
"""
A blob existing on the local file system
"""
def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
):
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None):
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory)
if not blob_directory or not os.path.isdir(blob_directory):
raise OSError(f"invalid blob directory '{blob_directory}'")
self.file_path = os.path.join(self.blob_directory, self.blob_hash)
@ -355,12 +343,12 @@ class BlobFile(AbstractBlob):
@classmethod
async def create_from_unencrypted(
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int, added_on: float, is_mine: bool,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None
) -> BlobInfo:
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'],
asyncio.Task]] = None) -> BlobInfo:
if not blob_dir or not os.path.isdir(blob_dir):
raise OSError(f"cannot create blob in directory: '{blob_dir}'")
return await super().create_from_unencrypted(
loop, blob_dir, key, iv, unencrypted, blob_num, added_on, is_mine, blob_completed_callback
loop, blob_dir, key, iv, unencrypted, blob_num, blob_completed_callback
)

View file

@ -7,19 +7,13 @@ class BlobInfo:
'blob_num',
'length',
'iv',
'added_on',
'is_mine'
]
def __init__(
self, blob_num: int, length: int, iv: str, added_on,
blob_hash: typing.Optional[str] = None, is_mine=False):
def __init__(self, blob_num: int, length: int, iv: str, blob_hash: typing.Optional[str] = None):
self.blob_hash = blob_hash
self.blob_num = blob_num
self.length = length
self.iv = iv
self.added_on = added_on
self.is_mine = is_mine
def as_dict(self) -> typing.Dict:
d = {

View file

@ -36,30 +36,30 @@ class BlobManager:
self.config.blob_lru_cache_size)
self.connection_manager = ConnectionManager(loop)
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None, is_mine: bool = False):
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None):
if self.config.save_blobs or (
is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))):
return BlobFile(
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
)
return BlobBuffer(
self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
self.loop, blob_hash, length, self.blob_completed, self.blob_dir
)
def get_blob(self, blob_hash, length: typing.Optional[int] = None, is_mine: bool = False):
def get_blob(self, blob_hash, length: typing.Optional[int] = None):
if blob_hash in self.blobs:
if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer):
buffer = self.blobs.pop(blob_hash)
if blob_hash in self.completed_blob_hashes:
self.completed_blob_hashes.remove(blob_hash)
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
if buffer.is_readable():
with buffer.reader_context() as reader:
self.blobs[blob_hash].write_blob(reader.read())
if length and self.blobs[blob_hash].length is None:
self.blobs[blob_hash].set_length(length)
else:
self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
self.blobs[blob_hash] = self._get_blob(blob_hash, length)
return self.blobs[blob_hash]
def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool:
@ -83,8 +83,6 @@ class BlobManager:
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
if to_add:
self.completed_blob_hashes.update(to_add)
# check blobs that aren't set as finished but were seen on disk
await self.ensure_completed_blobs_status(in_blobfiles_dir - to_add)
if self.config.track_bandwidth:
self.connection_manager.start()
return True
@ -107,26 +105,13 @@ class BlobManager:
if isinstance(blob, BlobFile):
if blob.blob_hash not in self.completed_blob_hashes:
self.completed_blob_hashes.add(blob.blob_hash)
return self.loop.create_task(self.storage.add_blobs(
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=True)
)
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=True))
else:
return self.loop.create_task(self.storage.add_blobs(
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False)
)
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=False))
async def ensure_completed_blobs_status(self, blob_hashes: typing.Iterable[str]):
"""Ensures that completed blobs from a given list of blob hashes are set as 'finished' in the database."""
to_add = []
for blob_hash in blob_hashes:
if not self.is_blob_verified(blob_hash):
continue
blob = self.get_blob(blob_hash)
to_add.append((blob.blob_hash, blob.length, blob.added_on, blob.is_mine))
if len(to_add) > 500:
await self.storage.add_blobs(*to_add, finished=True)
to_add.clear()
return await self.storage.add_blobs(*to_add, finished=True)
def check_completed_blobs(self, blob_hashes: typing.List[str]) -> typing.List[str]:
"""Returns of the blobhashes_to_check, which are valid"""
return [blob_hash for blob_hash in blob_hashes if self.is_blob_verified(blob_hash)]
def delete_blob(self, blob_hash: str):
if not is_valid_blobhash(blob_hash):

View file

@ -1,77 +0,0 @@
import asyncio
import logging
log = logging.getLogger(__name__)
class DiskSpaceManager:
def __init__(self, config, db, blob_manager, cleaning_interval=30 * 60, analytics=None):
self.config = config
self.db = db
self.blob_manager = blob_manager
self.cleaning_interval = cleaning_interval
self.running = False
self.task = None
self.analytics = analytics
self._used_space_bytes = None
async def get_free_space_mb(self, is_network_blob=False):
limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
space_used_mb = await self.get_space_used_mb()
space_used_mb = space_used_mb['network_storage'] if is_network_blob else space_used_mb['content_storage']
return max(0, limit_mb - space_used_mb)
async def get_space_used_bytes(self):
self._used_space_bytes = await self.db.get_stored_blob_disk_usage()
return self._used_space_bytes
async def get_space_used_mb(self, cached=True):
cached = cached and self._used_space_bytes is not None
space_used_bytes = self._used_space_bytes if cached else await self.get_space_used_bytes()
return {key: int(value/1024.0/1024.0) for key, value in space_used_bytes.items()}
async def clean(self):
await self._clean(False)
await self._clean(True)
async def _clean(self, is_network_blob=False):
space_used_mb = await self.get_space_used_mb(cached=False)
if is_network_blob:
space_used_mb = space_used_mb['network_storage']
else:
space_used_mb = space_used_mb['content_storage'] + space_used_mb['private_storage']
storage_limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
if self.analytics:
asyncio.create_task(
self.analytics.send_disk_space_used(space_used_mb, storage_limit_mb, is_network_blob)
)
delete = []
available = storage_limit_mb - space_used_mb
if storage_limit_mb == 0 if not is_network_blob else available >= 0:
return 0
for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False, is_network_blob=is_network_blob):
delete.append(blob_hash)
available += int(file_size/1024.0/1024.0)
if available >= 0:
break
if delete:
await self.db.stop_all_files()
await self.blob_manager.delete_blobs(delete, delete_from_db=True)
self._used_space_bytes = None
return len(delete)
async def cleaning_loop(self):
while self.running:
await asyncio.sleep(self.cleaning_interval)
await self.clean()
async def start(self):
self.running = True
self.task = asyncio.create_task(self.cleaning_loop())
self.task.add_done_callback(lambda _: log.info("Stopping blob cleanup service."))
async def stop(self):
if self.running:
self.running = False
self.task.cancel()

View file

@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.buf = b''
# this is here to handle the race when the downloader is closed right as response_fut gets a result
self.closed = asyncio.Event()
self.closed = asyncio.Event(loop=self.loop)
def data_received(self, data: bytes):
if self.connection_manager:
@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.transport.write(msg)
if self.connection_manager:
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout)
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop)
availability_response = response.get_availability_response()
price_response = response.get_price_response()
blob_response = response.get_blob_response()
@ -151,7 +151,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
f" timeout in {self.peer_timeout}"
log.debug(msg)
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
await asyncio.wait_for(self.writer.finished, self.peer_timeout)
await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop)
# wait for the io to finish
await self.blob.verified.wait()
log.info("%s at %fMB/s", msg,
@ -187,7 +187,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
try:
self._blob_bytes_received = 0
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
self._response_fut = asyncio.Future()
self._response_fut = asyncio.Future(loop=self.loop)
return await self._download_blob()
except OSError:
# i'm not sure how to fix this race condition - jack
@ -244,7 +244,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract
try:
if not connected_protocol:
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
peer_connect_timeout)
peer_connect_timeout, loop=loop)
connected_protocol = protocol
if blob is None or blob.get_is_verified() or not blob.is_writeable():
# blob is None happens when we are just opening a connection

View file

@ -3,7 +3,6 @@ import typing
import logging
from lbry.utils import cache_concurrent
from lbry.blob_exchange.client import request_blob
from lbry.dht.node import get_kademlia_peers_from_hosts
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.dht.node import Node
@ -30,7 +29,7 @@ class BlobDownloader:
self.failures: typing.Dict['KademliaPeer', int] = {}
self.connection_failures: typing.Set['KademliaPeer'] = set()
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
self.is_running = asyncio.Event()
self.is_running = asyncio.Event(loop=self.loop)
def should_race_continue(self, blob: 'AbstractBlob'):
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
@ -64,8 +63,8 @@ class BlobDownloader:
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
async def new_peer_or_finished(self):
active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)]
await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED')
def cleanup_active(self):
if not self.active_connections and not self.connections:
@ -88,6 +87,7 @@ class BlobDownloader:
if blob.get_is_verified():
return blob
self.is_running.set()
tried_for_this_blob: typing.Set['KademliaPeer'] = set()
try:
while not blob.get_is_verified() and self.is_running.is_set():
batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
@ -97,15 +97,24 @@ class BlobDownloader:
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
)
re_add: typing.Set['KademliaPeer'] = set()
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
if peer in self.ignored:
continue
if peer in self.active_connections or not self.should_race_continue(blob):
if peer in tried_for_this_blob:
continue
if peer in self.active_connections:
if peer not in re_add:
re_add.add(peer)
continue
if not self.should_race_continue(blob):
break
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
self.active_connections[peer] = t
self.peer_queue.put_nowait(list(batch))
tried_for_this_blob.add(peer)
if not re_add:
self.peer_queue.put_nowait(list(batch))
await self.new_peer_or_finished()
self.cleanup_active()
log.debug("downloaded %s", blob_hash[:8])
@ -124,14 +133,11 @@ class BlobDownloader:
protocol.close()
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', node: 'Node',
blob_hash: str) -> 'AbstractBlob':
search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download)
search_queue.put_nowait(blob_hash)
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)
if fixed_peers:
loop.call_later(config.fixed_peer_delay, peer_queue.put_nowait, fixed_peers)
peer_queue, accumulate_task = node.accumulate_peers(search_queue)
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
try:
return await downloader.download_blob(blob_hash)

View file

@ -1,7 +1,6 @@
import asyncio
import binascii
import logging
import socket
import typing
from json.decoder import JSONDecodeError
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
@ -25,19 +24,19 @@ class BlobServerProtocol(asyncio.Protocol):
self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout
self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event()
self.started_listening = asyncio.Event(loop=self.loop)
self.buf = b''
self.transport: typing.Optional[asyncio.Transport] = None
self.lbrycrd_address = lbrycrd_address
self.peer_address_and_port: typing.Optional[str] = None
self.started_transfer = asyncio.Event()
self.transfer_finished = asyncio.Event()
self.started_transfer = asyncio.Event(loop=self.loop)
self.transfer_finished = asyncio.Event(loop=self.loop)
self.close_on_idle_task: typing.Optional[asyncio.Task] = None
async def close_on_idle(self):
while self.transport:
try:
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop)
except asyncio.TimeoutError:
log.debug("closing idle connection from %s", self.peer_address_and_port)
return self.close()
@ -101,7 +100,7 @@ class BlobServerProtocol(asyncio.Protocol):
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
self.started_transfer.set()
try:
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop)
if sent and sent > 0:
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
@ -138,7 +137,7 @@ class BlobServerProtocol(asyncio.Protocol):
try:
request = BlobRequest.deserialize(self.buf + data)
self.buf = remainder
except (UnicodeDecodeError, JSONDecodeError):
except JSONDecodeError:
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
self.close()
@ -157,7 +156,7 @@ class BlobServer:
self.loop = loop
self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event()
self.started_listening = asyncio.Event(loop=self.loop)
self.lbrycrd_address = lbrycrd_address
self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout
@ -168,13 +167,6 @@ class BlobServer:
raise Exception("already running")
async def _start_server():
# checking if the port is in use
# thx https://stackoverflow.com/a/52872579
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)) == 0:
# the port is already in use!
log.error("Failed to bind TCP %s:%d", interface, port)
server = await self.loop.create_server(
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
self.idle_timeout, self.transfer_timeout),

View file

@ -1,8 +1,8 @@
import os
import re
import sys
import typing
import logging
from typing import List, Dict, Tuple, Union, TypeVar, Generic, Optional
from argparse import ArgumentParser
from contextlib import contextmanager
from appdirs import user_data_dir, user_config_dir
@ -15,7 +15,7 @@ log = logging.getLogger(__name__)
NOT_SET = type('NOT_SET', (object,), {}) # pylint: disable=invalid-name
T = TypeVar('T')
T = typing.TypeVar('T')
CURRENCIES = {
'BTC': {'type': 'crypto'},
@ -24,11 +24,11 @@ CURRENCIES = {
}
class Setting(Generic[T]):
class Setting(typing.Generic[T]):
def __init__(self, doc: str, default: Optional[T] = None,
previous_names: Optional[List[str]] = None,
metavar: Optional[str] = None):
def __init__(self, doc: str, default: typing.Optional[T] = None,
previous_names: typing.Optional[typing.List[str]] = None,
metavar: typing.Optional[str] = None):
self.doc = doc
self.default = default
self.previous_names = previous_names or []
@ -45,7 +45,7 @@ class Setting(Generic[T]):
def no_cli_name(self):
return f"--no-{self.name.replace('_', '-')}"
def __get__(self, obj: Optional['BaseConfig'], owner) -> T:
def __get__(self, obj: typing.Optional['BaseConfig'], owner) -> T:
if obj is None:
return self
for location in obj.search_order:
@ -53,7 +53,7 @@ class Setting(Generic[T]):
return location[self.name]
return self.default
def __set__(self, obj: 'BaseConfig', val: Union[T, NOT_SET]):
def __set__(self, obj: 'BaseConfig', val: typing.Union[T, NOT_SET]):
if val == NOT_SET:
for location in obj.modify_order:
if self.name in location:
@ -63,18 +63,6 @@ class Setting(Generic[T]):
for location in obj.modify_order:
location[self.name] = val
def is_set(self, obj: 'BaseConfig') -> bool:
for location in obj.search_order:
if self.name in location:
return True
return False
def is_set_to_default(self, obj: 'BaseConfig') -> bool:
for location in obj.search_order:
if self.name in location:
return location[self.name] == self.default
return False
def validate(self, value):
raise NotImplementedError()
@ -99,7 +87,7 @@ class String(Setting[str]):
f"Setting '{self.name}' must be a string."
# TODO: removes this after pylint starts to understand generics
def __get__(self, obj: Optional['BaseConfig'], owner) -> str: # pylint: disable=useless-super-delegation
def __get__(self, obj: typing.Optional['BaseConfig'], owner) -> str: # pylint: disable=useless-super-delegation
return super().__get__(obj, owner)
@ -203,7 +191,7 @@ class MaxKeyFee(Setting[dict]):
)
parser.add_argument(
self.no_cli_name,
help="Disable maximum key fee check.",
help=f"Disable maximum key fee check.",
dest=self.name,
const=None,
action="store_const",
@ -212,7 +200,7 @@ class MaxKeyFee(Setting[dict]):
class StringChoice(String):
def __init__(self, doc: str, valid_values: List[str], default: str, *args, **kwargs):
def __init__(self, doc: str, valid_values: typing.List[str], default: str, *args, **kwargs):
super().__init__(doc, default, *args, **kwargs)
if not valid_values:
raise ValueError("No valid values provided")
@ -285,75 +273,6 @@ class Strings(ListSetting):
f"'{self.name}' must be a string."
class KnownHubsList:
def __init__(self, config: 'Config' = None, file_name: str = 'known_hubs.yml'):
self.file_name = file_name
self.path = os.path.join(config.wallet_dir, self.file_name) if config else None
self.hubs: Dict[Tuple[str, int], Dict] = {}
if self.exists:
self.load()
@property
def exists(self):
return self.path and os.path.exists(self.path)
@property
def serialized(self) -> Dict[str, Dict]:
return {f"{host}:{port}": details for (host, port), details in self.hubs.items()}
def filter(self, match_none=False, **kwargs):
if not kwargs:
return self.hubs
result = {}
for hub, details in self.hubs.items():
for key, constraint in kwargs.items():
value = details.get(key)
if value == constraint or (match_none and value is None):
result[hub] = details
break
return result
def load(self):
if self.path:
with open(self.path, 'r') as known_hubs_file:
raw = known_hubs_file.read()
for hub, details in yaml.safe_load(raw).items():
self.set(hub, details)
def save(self):
if self.path:
with open(self.path, 'w') as known_hubs_file:
known_hubs_file.write(yaml.safe_dump(self.serialized, default_flow_style=False))
def set(self, hub: str, details: Dict):
if hub and hub.count(':') == 1:
host, port = hub.split(':')
hub_parts = (host, int(port))
if hub_parts not in self.hubs:
self.hubs[hub_parts] = details
return hub
def add_hubs(self, hubs: List[str]):
added = False
for hub in hubs:
if self.set(hub, {}) is not None:
added = True
return added
def items(self):
return self.hubs.items()
def __bool__(self):
return len(self) > 0
def __len__(self):
return self.hubs.__len__()
def __iter__(self):
return iter(self.hubs)
class EnvironmentAccess:
PREFIX = 'LBRY_'
@ -458,7 +377,7 @@ class ConfigFileAccess:
del self.data[key]
TBC = TypeVar('TBC', bound='BaseConfig')
TBC = typing.TypeVar('TBC', bound='BaseConfig')
class BaseConfig:
@ -589,9 +508,6 @@ class CLIConfig(TranscodeConfig):
class Config(CLIConfig):
jurisdiction = String("Limit interactions to wallet server in this jurisdiction.")
# directories
data_dir = Path("Directory path to store blobs.", metavar='DIR')
download_dir = Path(
@ -613,7 +529,7 @@ class Config(CLIConfig):
"ports or have firewall rules you likely want to disable this.", True
)
udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port'])
tcp_port = Integer("TCP port to listen for incoming blob requests", 4444, previous_names=['peer_port'])
tcp_port = Integer("TCP port to listen for incoming blob requests", 3333, previous_names=['peer_port'])
prometheus_port = Integer("Port to expose prometheus metrics (off by default)", 0)
network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0')
@ -622,24 +538,17 @@ class Config(CLIConfig):
"Routing table bucket index below which we always split the bucket if given a new key to add to it and "
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
"use.", 2
)
is_bootstrap_node = Toggle(
"When running as a bootstrap node, disable all logic related to balancing the routing table, so we can "
"add as many peers as possible and better help first-runs.", False
"use.", 1
)
# protocol timeouts
download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0)
blob_download_timeout = Float("Timeout to download a blob from a peer", 30.0)
hub_timeout = Float("Timeout when making a hub request", 30.0)
peer_connect_timeout = Float("Timeout to establish a TCP connection to a peer", 3.0)
node_rpc_timeout = Float("Timeout when making a DHT request", constants.RPC_TIMEOUT)
# blob announcement and download
save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True)
network_storage_limit = Integer("Disk space in MB to be allocated for helping the P2P network. 0 = disable", 0)
blob_storage_limit = Integer("Disk space in MB to be allocated for blob storage. 0 = no limit", 0)
blob_lru_cache_size = Integer(
"LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when "
"replying to a range request. Set to 0 to disable.", 32
@ -656,7 +565,6 @@ class Config(CLIConfig):
"Maximum number of peers to connect to while downloading a blob", 4,
previous_names=['max_connections_per_stream']
)
concurrent_hub_requests = Integer("Maximum number of concurrent hub requests", 32)
fixed_peer_delay = Float(
"Amount of seconds before adding the reflector servers as potential peers to download from in case dht"
"peers are not found or are slow", 2.0
@ -685,14 +593,6 @@ class Config(CLIConfig):
('cdn.reflector.lbry.com', 5567)
])
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
('tracker.lbry.com', 9252),
('tracker.lbry.grin.io', 9252),
('tracker.lbry.pigg.es', 9252),
('tracker.lizard.technology', 9252),
('s1.lbry.network', 9252),
])
lbryum_servers = Servers("SPV wallet servers", [
('spv11.lbry.com', 50001),
('spv12.lbry.com', 50001),
@ -703,27 +603,21 @@ class Config(CLIConfig):
('spv17.lbry.com', 50001),
('spv18.lbry.com', 50001),
('spv19.lbry.com', 50001),
('hub.lbry.grin.io', 50001),
('hub.lizard.technology', 50001),
('s1.lbry.network', 50001),
])
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
('dht.lbry.grin.io', 4444), # Grin
('dht.lbry.madiator.com', 4444), # Madiator
('dht.lbry.pigg.es', 4444), # Pigges
('lbrynet1.lbry.com', 4444), # US EAST
('lbrynet2.lbry.com', 4444), # US WEST
('lbrynet3.lbry.com', 4444), # EU
('lbrynet4.lbry.com', 4444), # ASIA
('dht.lizard.technology', 4444), # Jack
('s2.lbry.network', 4444),
('lbrynet4.lbry.com', 4444) # ASIA
])
comment_server = String("Comment server API URL", "https://comments.lbry.com/api/v2")
# blockchain
blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main')
# daemon
save_files = Toggle("Save downloaded files when calling `get` by default", False)
save_files = Toggle("Save downloaded files when calling `get` by default", True)
components_to_skip = Strings("components which will be skipped during start-up of daemon", [])
share_usage_data = Toggle(
"Whether to share usage stats and diagnostic info with LBRY.", False,
@ -742,8 +636,7 @@ class Config(CLIConfig):
coin_selection_strategy = StringChoice(
"Strategy to use when selecting UTXOs for a transaction",
STRATEGIES, "prefer_confirmed"
)
STRATEGIES, "standard")
transaction_cache_size = Integer("Transaction cache size", 2 ** 17)
save_resolved_claims = Toggle(
@ -762,7 +655,6 @@ class Config(CLIConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.set_default_paths()
self.known_hubs = KnownHubsList(self)
def set_default_paths(self):
if 'darwin' in sys.platform.lower():
@ -784,7 +676,7 @@ class Config(CLIConfig):
return os.path.join(self.data_dir, 'lbrynet.log')
def get_windows_directories() -> Tuple[str, str, str]:
def get_windows_directories() -> typing.Tuple[str, str, str]:
from lbry.winpaths import get_path, FOLDERID, UserHandle, \
PathNotFoundException # pylint: disable=import-outside-toplevel
@ -806,14 +698,14 @@ def get_windows_directories() -> Tuple[str, str, str]:
return data_dir, lbryum_dir, download_dir
def get_darwin_directories() -> Tuple[str, str, str]:
def get_darwin_directories() -> typing.Tuple[str, str, str]:
data_dir = user_data_dir('LBRY')
lbryum_dir = os.path.expanduser('~/.lbryum')
download_dir = os.path.expanduser('~/Downloads')
return data_dir, lbryum_dir, download_dir
def get_linux_directories() -> Tuple[str, str, str]:
def get_linux_directories() -> typing.Tuple[str, str, str]:
try:
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read())

View file

@ -67,7 +67,7 @@ class ConnectionManager:
while True:
last = time.perf_counter()
await asyncio.sleep(0.1)
await asyncio.sleep(0.1, loop=self.loop)
self._status['incoming_bps'].clear()
self._status['outgoing_bps'].clear()
now = time.perf_counter()

View file

@ -1,9 +1,6 @@
import asyncio
import typing
import logging
from prometheus_client import Counter, Gauge
if typing.TYPE_CHECKING:
from lbry.dht.node import Node
from lbry.extras.daemon.storage import SQLiteStorage
@ -12,59 +9,45 @@ log = logging.getLogger(__name__)
class BlobAnnouncer:
announcements_sent_metric = Counter(
"announcements_sent", "Number of announcements sent and their respective status.", namespace="dht_node",
labelnames=("peers", "error"),
)
announcement_queue_size_metric = Gauge(
"announcement_queue_size", "Number of hashes waiting to be announced.", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
self.loop = loop
self.node = node
self.storage = storage
self.announce_task: asyncio.Task = None
self.announce_queue: typing.List[str] = []
self._done = asyncio.Event()
self.announced = set()
async def _run_consumer(self):
while self.announce_queue:
try:
blob_hash = self.announce_queue.pop()
peers = len(await self.node.announce_blob(blob_hash))
self.announcements_sent_metric.labels(peers=peers, error=False).inc()
if peers > 4:
self.announced.add(blob_hash)
else:
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
except Exception as err:
self.announcements_sent_metric.labels(peers=0, error=True).inc()
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
async def _submit_announcement(self, blob_hash):
try:
peers = len(await self.node.announce_blob(blob_hash))
if peers > 4:
return blob_hash
else:
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise err
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
async def _announce(self, batch_size: typing.Optional[int] = 10):
while batch_size:
if not self.node.joined.is_set():
await self.node.joined.wait()
await asyncio.sleep(60)
await asyncio.sleep(60, loop=self.loop)
if not self.node.protocol.routing_table.get_peers():
log.warning("No peers in DHT, announce round skipped")
continue
self.announce_queue.extend(await self.storage.get_blobs_to_announce())
self.announcement_queue_size_metric.labels(scope="global").set(len(self.announce_queue))
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
while len(self.announce_queue) > 0:
log.info("%i blobs to announce", len(self.announce_queue))
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)])
announced = list(filter(None, self.announced))
announced = await asyncio.gather(*[
self._submit_announcement(
self.announce_queue.pop()) for _ in range(batch_size) if self.announce_queue
], loop=self.loop)
announced = list(filter(None, announced))
if announced:
await self.storage.update_last_announced_blobs(announced)
log.info("announced %i blobs", len(announced))
self.announced.clear()
self._done.set()
self._done.clear()
def start(self, batch_size: typing.Optional[int] = 10):
assert not self.announce_task or self.announce_task.done(), "already running"
@ -73,6 +56,3 @@ class BlobAnnouncer:
def stop(self):
if self.announce_task and not self.announce_task.done():
self.announce_task.cancel()
def wait(self):
return self._done.wait()

View file

@ -20,6 +20,7 @@ MAYBE_PING_DELAY = 300 # 5 minutes
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
RPC_ID_LENGTH = 20
PROTOCOL_VERSION = 1
BOTTOM_OUT_LIMIT = 3
MSG_SIZE_LIMIT = 1400

View file

@ -1,11 +1,9 @@
import logging
import asyncio
import typing
import binascii
import socket
from prometheus_client import Gauge
from lbry.utils import aclosing, resolve_host
from lbry.utils import resolve_host
from lbry.dht import constants
from lbry.dht.peer import make_kademlia_peer
from lbry.dht.protocol.distance import Distance
@ -20,32 +18,20 @@ log = logging.getLogger(__name__)
class Node:
storing_peers_metric = Gauge(
"storing_peers", "Number of peers storing blobs announced to this node", namespace="dht_node",
labelnames=("scope",),
)
stored_blob_with_x_bytes_colliding = Gauge(
"stored_blobs_x_bytes_colliding", "Number of blobs with at least X bytes colliding with this node id prefix",
namespace="dht_node", labelnames=("amount",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX,
storage: typing.Optional['SQLiteStorage'] = None):
self.loop = loop
self.internal_udp_port = internal_udp_port
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
split_buckets_under_index, is_bootstrap_node)
split_buckets_under_index)
self.listening_port: asyncio.DatagramTransport = None
self.joined = asyncio.Event()
self.joined = asyncio.Event(loop=self.loop)
self._join_task: asyncio.Task = None
self._refresh_task: asyncio.Task = None
self._storage = storage
@property
def stored_blob_hashes(self):
return self.protocol.data_store.keys()
async def refresh_node(self, force_once=False):
while True:
# remove peers with expired blob announcements from the datastore
@ -55,21 +41,17 @@ class Node:
# add all peers in the routing table
total_peers.extend(self.protocol.routing_table.get_peers())
# add all the peers who have announced blobs to us
storing_peers = self.protocol.data_store.get_storing_contacts()
self.storing_peers_metric.labels("global").set(len(storing_peers))
total_peers.extend(storing_peers)
counts = {0: 0, 1: 0, 2: 0}
node_id = self.protocol.node_id
for blob_hash in self.protocol.data_store.keys():
bytes_colliding = 0 if blob_hash[0] != node_id[0] else 2 if blob_hash[1] == node_id[1] else 1
counts[bytes_colliding] += 1
self.stored_blob_with_x_bytes_colliding.labels(amount=0).set(counts[0])
self.stored_blob_with_x_bytes_colliding.labels(amount=1).set(counts[1])
self.stored_blob_with_x_bytes_colliding.labels(amount=2).set(counts[2])
total_peers.extend(self.protocol.data_store.get_storing_contacts())
# get ids falling in the midpoint of each bucket that hasn't been recently updated
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
# populate/split the buckets further
buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts()
if buckets_with_contacts <= 3:
for i in range(buckets_with_contacts):
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
if self.protocol.routing_table.get_peers():
# if we have node ids to look up, perform the iterative search until we have k results
@ -79,7 +61,7 @@ class Node:
else:
if force_once:
break
fut = asyncio.Future()
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
await fut
continue
@ -93,12 +75,12 @@ class Node:
if force_once:
break
fut = asyncio.Future()
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
await fut
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
hash_value = bytes.fromhex(blob_hash)
hash_value = binascii.unhexlify(blob_hash.encode())
assert len(hash_value) == constants.HASH_LENGTH
peers = await self.peer_search(hash_value)
@ -108,12 +90,12 @@ class Node:
for peer in peers:
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
stored_to_tup = await asyncio.gather(
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers)
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop
)
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
if stored_to:
log.debug(
"Stored %s to %i of %i attempted peers", hash_value.hex()[:8],
"Stored %s to %i of %i attempted peers", binascii.hexlify(hash_value).decode()[:8],
len(stored_to), len(peers)
)
else:
@ -182,36 +164,39 @@ class Node:
for address, udp_port in known_node_urls or []
]))
except socket.gaierror:
await asyncio.sleep(30)
await asyncio.sleep(30, loop=self.loop)
continue
self.protocol.peer_manager.reset()
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
await asyncio.sleep(1)
await asyncio.sleep(1, loop=self.loop)
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
bottom_out_limit: int = constants.BOTTOM_OUT_LIMIT,
max_results: int = constants.K) -> IterativeNodeFinder:
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
return IterativeNodeFinder(self.loop, self.protocol, key, max_results, shortlist)
return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
key, bottom_out_limit, max_results, None, shortlist)
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
bottom_out_limit: int = 40,
max_results: int = -1) -> IterativeValueFinder:
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
return IterativeValueFinder(self.loop, self.protocol, key, max_results, shortlist)
return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
key, bottom_out_limit, max_results, None, shortlist)
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None
bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None
) -> typing.List['KademliaPeer']:
peers = []
async with aclosing(self.get_iterative_node_finder(
node_id, shortlist=shortlist, max_results=max_results)) as node_finder:
async for iteration_peers in node_finder:
peers.extend(iteration_peers)
async for iteration_peers in self.get_iterative_node_finder(
node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results):
peers.extend(iteration_peers)
distance = Distance(node_id)
peers.sort(key=lambda peer: distance(peer.node_id))
return peers[:count]
@ -237,46 +222,39 @@ class Node:
# prioritize peers who reply to a dht ping first
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
async with aclosing(self.get_iterative_value_finder(bytes.fromhex(blob_hash))) as value_finder:
async for results in value_finder:
to_put = []
for peer in results:
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
continue
is_good = self.protocol.peer_manager.peer_is_good(peer)
if is_good:
# the peer has replied recently over UDP, it can probably be reached on the TCP port
to_put.append(peer)
elif is_good is None:
if not peer.udp_port:
# TODO: use the same port for TCP and UDP
# the udp port must be guessed
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
# including on a network with several nodes, then assume the udp port is proportionately
# based on a starting port of 4444
udp_port_to_try = peer.tcp_port
if 3400 > peer.tcp_port > 3332:
udp_port_to_try = (peer.tcp_port - 3333) + 4444
self.loop.create_task(put_into_result_queue_after_pong(
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
))
else:
self.loop.create_task(put_into_result_queue_after_pong(peer))
async for results in self.get_iterative_value_finder(binascii.unhexlify(blob_hash.encode())):
to_put = []
for peer in results:
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
continue
is_good = self.protocol.peer_manager.peer_is_good(peer)
if is_good:
# the peer has replied recently over UDP, it can probably be reached on the TCP port
to_put.append(peer)
elif is_good is None:
if not peer.udp_port:
# TODO: use the same port for TCP and UDP
# the udp port must be guessed
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
# including on a network with several nodes, then assume the udp port is proportionately
# based on a starting port of 4444
udp_port_to_try = peer.tcp_port
if 3400 > peer.tcp_port > 3332:
udp_port_to_try = (peer.tcp_port - 3333) + 4444
self.loop.create_task(put_into_result_queue_after_pong(
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
))
else:
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
if to_put:
result_queue.put_nowait(to_put)
self.loop.create_task(put_into_result_queue_after_pong(peer))
else:
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
if to_put:
result_queue.put_nowait(to_put)
def accumulate_peers(self, search_queue: asyncio.Queue,
peer_queue: typing.Optional[asyncio.Queue] = None
) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
queue = peer_queue or asyncio.Queue()
queue = peer_queue or asyncio.Queue(loop=self.loop)
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))
async def get_kademlia_peers_from_hosts(peer_list: typing.List[typing.Tuple[str, int]]) -> typing.List['KademliaPeer']:
peer_address_list = [(await resolve_host(url, port, proto='tcp'), port) for url, port in peer_list]
kademlia_peer_list = [make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
for address, port in peer_address_list]
return kademlia_peer_list

View file

@ -1,21 +1,18 @@
import typing
import asyncio
import logging
from binascii import hexlify
from dataclasses import dataclass, field
from functools import lru_cache
from prometheus_client import Gauge
from lbry.utils import is_valid_public_ipv4 as _is_valid_public_ipv4, LRUCache
from lbry.utils import is_valid_public_ipv4 as _is_valid_public_ipv4
from lbry.dht import constants
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
ALLOW_LOCALHOST = False
CACHE_SIZE = 16384
log = logging.getLogger(__name__)
@lru_cache(CACHE_SIZE)
@lru_cache(1024)
def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str],
udp_port: typing.Optional[int] = None,
tcp_port: typing.Optional[int] = None,
@ -29,26 +26,17 @@ def is_valid_public_ipv4(address, allow_localhost: bool = False):
class PeerManager:
peer_manager_keys_metric = Gauge(
"peer_manager_keys", "Number of keys tracked by PeerManager dicts (sum)", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop):
self._loop = loop
self._rpc_failures: typing.Dict[
typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]]
] = LRUCache(CACHE_SIZE)
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = LRUCache(CACHE_SIZE)
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = LRUCache(CACHE_SIZE)
self._node_tokens: typing.Dict[bytes, (float, bytes)] = LRUCache(CACHE_SIZE)
def count_cache_keys(self):
return len(self._rpc_failures) + len(self._last_replied) + len(self._last_sent) + len(
self._last_requested) + len(self._node_id_mapping) + len(self._node_id_reverse_mapping) + len(
self._node_tokens)
] = {}
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = {}
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = {}
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = {}
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = {}
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = {}
self._node_tokens: typing.Dict[bytes, (float, bytes)] = {}
def reset(self):
for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested):
@ -98,10 +86,6 @@ class PeerManager:
self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id))
self._node_id_mapping[(address, udp_port)] = node_id
self._node_id_reverse_mapping[node_id] = (address, udp_port)
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
def get_node_id_for_endpoint(self, address, port):
return self._node_id_mapping.get((address, port))
def prune(self): # TODO: periodically call this
now = self._loop.time()
@ -153,10 +137,9 @@ class PeerManager:
def peer_is_good(self, peer: 'KademliaPeer'):
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
def decode_tcp_peer_from_compact_address(compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
node_id, address, tcp_port = decode_compact_address(compact_address)
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
node_id, address, tcp_port = decode_compact_address(compact_address)
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
@dataclass(unsafe_hash=True)
@ -171,11 +154,11 @@ class KademliaPeer:
def __post_init__(self):
if self._node_id is not None:
if not len(self._node_id) == constants.HASH_LENGTH:
raise ValueError("invalid node_id: {}".format(self._node_id.hex()))
if self.udp_port is not None and not 1024 <= self.udp_port <= 65535:
raise ValueError(f"invalid udp port: {self.address}:{self.udp_port}")
if self.tcp_port is not None and not 1024 <= self.tcp_port <= 65535:
raise ValueError(f"invalid tcp port: {self.address}:{self.tcp_port}")
raise ValueError("invalid node_id: {}".format(hexlify(self._node_id).decode()))
if self.udp_port is not None and not 1 <= self.udp_port <= 65535:
raise ValueError("invalid udp port")
if self.tcp_port is not None and not 1 <= self.tcp_port <= 65535:
raise ValueError("invalid tcp port")
if not is_valid_public_ipv4(self.address, self.allow_localhost):
raise ValueError(f"invalid ip address: '{self.address}'")
@ -194,6 +177,3 @@ class KademliaPeer:
def compact_ip(self):
return make_compact_ip(self.address)
def __str__(self):
return f"{self.__class__.__name__}({self.node_id.hex()[:8]}@{self.address}:{self.udp_port}-{self.tcp_port})"

View file

@ -16,12 +16,6 @@ class DictDataStore:
self._peer_manager = peer_manager
self.completed_blobs: typing.Set[str] = set()
def keys(self):
return self._data_store.keys()
def __len__(self):
return self._data_store.__len__()
def removed_expired_peers(self):
now = self.loop.time()
keys = list(self._data_store.keys())

View file

@ -1,17 +1,18 @@
import asyncio
from binascii import hexlify
from itertools import chain
from collections import defaultdict, OrderedDict
from collections.abc import AsyncIterator
from collections import defaultdict
import typing
import logging
from typing import TYPE_CHECKING
from lbry.dht import constants
from lbry.dht.error import RemoteException, TransportNotConnected
from lbry.dht.protocol.distance import Distance
from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address
from lbry.dht.peer import make_kademlia_peer
from lbry.dht.serialization.datagram import PAGE_KEY
if TYPE_CHECKING:
from lbry.dht.protocol.routing_table import TreeRoutingTable
from lbry.dht.protocol.protocol import KademliaProtocol
from lbry.dht.peer import PeerManager, KademliaPeer
@ -26,15 +27,6 @@ class FindResponse:
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
raise NotImplementedError()
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
for contact_triple in self.get_close_triples():
node_id, address, udp_port = contact_triple
try:
yield make_kademlia_peer(node_id, address, udp_port)
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
peer_info.udp_port, address, udp_port)
class FindNodeResponse(FindResponse):
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
@ -65,33 +57,57 @@ class FindValueResponse(FindResponse):
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
class IterativeFinder(AsyncIterator):
def __init__(self, loop: asyncio.AbstractEventLoop,
protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K,
def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']:
"""
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table
:param routing_table: a TreeRoutingTable
:param key: a 48 byte hash
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
"""
if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key))
return shortlist or routing_table.find_close_peers(key)
class IterativeFinder:
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key))
self.loop = loop
self.peer_manager = protocol.peer_manager
self.peer_manager = peer_manager
self.routing_table = routing_table
self.protocol = protocol
self.key = key
self.max_results = max(constants.K, max_results)
self.bottom_out_limit = bottom_out_limit
self.max_results = max_results
self.exclude = exclude or []
self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
self.active: typing.Set['KademliaPeer'] = set()
self.contacted: typing.Set['KademliaPeer'] = set()
self.distance = Distance(key)
self.iteration_queue = asyncio.Queue()
self.closest_peer: typing.Optional['KademliaPeer'] = None
self.prev_closest_peer: typing.Optional['KademliaPeer'] = None
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
self.iteration_queue = asyncio.Queue(loop=self.loop)
self.running_probes: typing.Set[asyncio.Task] = set()
self.iteration_count = 0
self.bottom_out_count = 0
self.running = False
self.tasks: typing.List[asyncio.Task] = []
for peer in shortlist:
self.delayed_calls: typing.List[asyncio.Handle] = []
for peer in get_shortlist(routing_table, key, shortlist):
if peer.node_id:
self._add_active(peer, force=True)
self._add_active(peer)
else:
# seed nodes
self._schedule_probe(peer)
@ -123,79 +139,66 @@ class IterativeFinder(AsyncIterator):
"""
return []
def _add_active(self, peer, force=False):
if not force and self.peer_manager.peer_is_good(peer) is False:
return
if peer in self.contacted:
return
def _is_closer(self, peer: 'KademliaPeer') -> bool:
return not self.closest_peer or self.distance.is_closer(peer.node_id, self.closest_peer.node_id)
def _add_active(self, peer):
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
self.active[peer] = self.distance(peer.node_id)
self.active = OrderedDict(sorted(self.active.items(), key=lambda item: item[1]))
self.active.add(peer)
if self._is_closer(peer):
self.prev_closest_peer = self.closest_peer
self.closest_peer = peer
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
self._add_active(peer)
for new_peer in response.get_close_kademlia_peers(peer):
self._add_active(new_peer)
for contact_triple in response.get_close_triples():
node_id, address, udp_port = contact_triple
try:
self._add_active(make_kademlia_peer(node_id, address, udp_port))
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address,
peer.udp_port, address, udp_port)
self.check_result_ready(response)
self._log_state(reason="check result")
def _reset_closest(self, peer):
if peer in self.active:
del self.active[peer]
async def _send_probe(self, peer: 'KademliaPeer'):
try:
response = await self.send_probe(peer)
except asyncio.TimeoutError:
self._reset_closest(peer)
self.active.discard(peer)
return
except asyncio.CancelledError:
log.debug("%s[%x] cancelled probe",
type(self).__name__, id(self))
raise
except ValueError as err:
log.warning(str(err))
self._reset_closest(peer)
self.active.discard(peer)
return
except TransportNotConnected:
await self._aclose(reason="not connected")
return
return self.aclose()
except RemoteException:
self._reset_closest(peer)
return
return await self._handle_probe_result(peer, response)
def _search_round(self):
async def _search_round(self):
"""
Send up to constants.alpha (5) probes to closest active peers
"""
added = 0
for index, peer in enumerate(self.active.keys()):
if index == 0:
log.debug("%s[%x] closest to probe: %s",
type(self).__name__, id(self),
peer.node_id.hex()[:8])
if peer in self.contacted:
continue
if len(self.running_probes) >= constants.ALPHA:
break
if index > (constants.K + len(self.running_probes)):
to_probe = list(self.active - self.contacted)
to_probe.sort(key=lambda peer: self.distance(self.key))
for peer in to_probe:
if added >= constants.ALPHA:
break
origin_address = (peer.address, peer.udp_port)
if origin_address in self.exclude:
continue
if peer.node_id == self.protocol.node_id:
continue
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
continue
self._schedule_probe(peer)
added += 1
log.debug("%s[%x] running %d probes for key %s",
type(self).__name__, id(self),
len(self.running_probes), self.key.hex()[:8])
log.debug("running %d probes", len(self.running_probes))
if not added and not self.running_probes:
log.debug("%s[%x] search for %s exhausted",
type(self).__name__, id(self),
self.key.hex()[:8])
log.debug("search for %s exhausted", hexlify(self.key)[:8])
self.search_exhausted()
def _schedule_probe(self, peer: 'KademliaPeer'):
@ -204,24 +207,33 @@ class IterativeFinder(AsyncIterator):
t = self.loop.create_task(self._send_probe(peer))
def callback(_):
self.running_probes.pop(peer, None)
if self.running:
self._search_round()
self.running_probes.difference_update({
probe for probe in self.running_probes if probe.done() or probe == t
})
if not self.running_probes:
self.tasks.append(self.loop.create_task(self._search_task(0.0)))
t.add_done_callback(callback)
self.running_probes[peer] = t
self.running_probes.add(t)
def _log_state(self, reason="?"):
log.debug("%s[%x] [%s] %s: %i active nodes %i contacted %i produced %i queued",
type(self).__name__, id(self), self.key.hex()[:8],
reason, len(self.active), len(self.contacted),
self.iteration_count, self.iteration_queue.qsize())
async def _search_task(self, delay: typing.Optional[float] = constants.ITERATIVE_LOOKUP_DELAY):
try:
if self.running:
await self._search_round()
if self.running:
self.delayed_calls.append(self.loop.call_later(delay, self._search))
except (asyncio.CancelledError, StopAsyncIteration, TransportNotConnected):
if self.running:
self.loop.call_soon(self.aclose)
def _search(self):
self.tasks.append(self.loop.create_task(self._search_task()))
def __aiter__(self):
if self.running:
raise Exception("already running")
self.running = True
self.loop.call_soon(self._search_round)
self._search()
return self
async def __anext__(self) -> typing.List['KademliaPeer']:
@ -234,57 +246,47 @@ class IterativeFinder(AsyncIterator):
raise StopAsyncIteration
self.iteration_count += 1
return result
except asyncio.CancelledError:
await self._aclose(reason="cancelled")
raise
except StopAsyncIteration:
await self._aclose(reason="no more results")
except (asyncio.CancelledError, StopAsyncIteration):
self.loop.call_soon(self.aclose)
raise
async def _aclose(self, reason="?"):
log.debug("%s[%x] [%s] shutdown because %s: %i active nodes %i contacted %i produced %i queued",
type(self).__name__, id(self), self.key.hex()[:8],
reason, len(self.active), len(self.contacted),
self.iteration_count, self.iteration_queue.qsize())
def aclose(self):
self.running = False
self.iteration_queue.put_nowait(None)
for task in chain(self.tasks, self.running_probes.values()):
for task in chain(self.tasks, self.running_probes, self.delayed_calls):
task.cancel()
self.tasks.clear()
self.running_probes.clear()
self.delayed_calls.clear()
async def aclose(self):
if self.running:
await self._aclose(reason="aclose")
log.debug("%s[%x] [%s] async close completed",
type(self).__name__, id(self), self.key.hex()[:8])
class IterativeNodeFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop,
protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K,
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, protocol, key, max_results, shortlist)
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
shortlist)
self.yielded_peers: typing.Set['KademliaPeer'] = set()
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
log.debug("probe %s:%d (%s) for NODE %s",
peer.address, peer.udp_port, peer.node_id.hex()[:8] if peer.node_id else '', self.key.hex()[:8])
log.debug("probing %s:%d %s", peer.address, peer.udp_port, hexlify(peer.node_id)[:8] if peer.node_id else '')
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
return FindNodeResponse(self.key, response)
def search_exhausted(self):
self.put_result(self.active.keys(), finish=True)
self.put_result(self.active, finish=True)
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
not_yet_yielded = [
peer for peer in from_iter
if peer not in self.yielded_peers
and peer.node_id != self.protocol.node_id
and self.peer_manager.peer_is_good(peer) is True # return only peers who answered
and self.peer_manager.peer_is_good(peer) is not False
]
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
to_yield = not_yet_yielded[:max(constants.K, self.max_results)]
to_yield = not_yet_yielded[:min(constants.K, len(not_yet_yielded))]
if to_yield:
self.yielded_peers.update(to_yield)
self.iteration_queue.put_nowait(to_yield)
@ -296,15 +298,27 @@ class IterativeNodeFinder(IterativeFinder):
if found:
log.debug("found")
return self.put_result(self.active.keys(), finish=True)
return self.put_result(self.active, finish=True)
if self.prev_closest_peer and self.closest_peer and not self._is_closer(self.prev_closest_peer):
# log.info("improving, %i %i %i %i %i", len(self.shortlist), len(self.active), len(self.contacted),
# self.bottom_out_count, self.iteration_count)
self.bottom_out_count = 0
elif self.prev_closest_peer and self.closest_peer:
self.bottom_out_count += 1
log.info("bottom out %i %i %i", len(self.active), len(self.contacted), self.bottom_out_count)
if self.bottom_out_count >= self.bottom_out_limit or self.iteration_count >= self.bottom_out_limit:
log.info("limit hit")
self.put_result(self.active, True)
class IterativeValueFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop,
protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K,
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, protocol, key, max_results, shortlist)
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
shortlist)
self.blob_peers: typing.Set['KademliaPeer'] = set()
# this tracks the index of the most recent page we requested from each peer
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
@ -312,8 +326,6 @@ class IterativeValueFinder(IterativeFinder):
self.discovered_peers: typing.Dict['KademliaPeer', typing.Set['KademliaPeer']] = defaultdict(set)
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
log.debug("probe %s:%d (%s) for VALUE %s",
peer.address, peer.udp_port, peer.node_id.hex()[:8], self.key.hex()[:8])
page = self.peer_pages[peer]
response = await self.protocol.get_rpc_peer(peer).find_value(self.key, page=page)
parsed = FindValueResponse(self.key, response)
@ -323,7 +335,7 @@ class IterativeValueFinder(IterativeFinder):
decoded_peers = set()
for compact_addr in parsed.found_compact_addresses:
try:
decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr))
decoded_peers.add(self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr))
except ValueError:
log.warning("misbehaving peer %s:%i returned invalid peer for blob",
peer.address, peer.udp_port)
@ -335,6 +347,7 @@ class IterativeValueFinder(IterativeFinder):
already_known + len(parsed.found_compact_addresses))
if len(self.discovered_peers[peer]) != already_known + len(parsed.found_compact_addresses):
log.warning("misbehaving peer %s:%i returned duplicate peers for blob", peer.address, peer.udp_port)
parsed.found_compact_addresses.clear()
elif len(parsed.found_compact_addresses) >= constants.K and self.peer_pages[peer] < parsed.pages:
# the peer returned a full page and indicates it has more
self.peer_pages[peer] += 1
@ -345,15 +358,26 @@ class IterativeValueFinder(IterativeFinder):
def check_result_ready(self, response: FindValueResponse):
if response.found:
blob_peers = [decode_tcp_peer_from_compact_address(compact_addr)
blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)
for compact_addr in response.found_compact_addresses]
to_yield = []
self.bottom_out_count = 0
for blob_peer in blob_peers:
if blob_peer not in self.blob_peers:
self.blob_peers.add(blob_peer)
to_yield.append(blob_peer)
if to_yield:
# log.info("found %i new peers for blob", len(to_yield))
self.iteration_queue.put_nowait(to_yield)
# if self.max_results and len(self.blob_peers) >= self.max_results:
# log.info("enough blob peers found")
# if not self.finished.is_set():
# self.finished.set()
elif self.prev_closest_peer and self.closest_peer:
self.bottom_out_count += 1
if self.bottom_out_count >= self.bottom_out_limit:
log.info("blob peer search bottomed out")
self.iteration_queue.put_nowait(None)
def get_initial_result(self) -> typing.List['KademliaPeer']:
if self.protocol.data_store.has_peers_for_blob(self.key):

View file

@ -3,14 +3,12 @@ import socket
import functools
import hashlib
import asyncio
import time
import typing
import binascii
import random
from asyncio.protocols import DatagramProtocol
from asyncio.transports import DatagramTransport
from prometheus_client import Gauge, Counter, Histogram
from lbry.dht import constants
from lbry.dht.serialization.bencoding import DecodeError
from lbry.dht.serialization.datagram import decode_datagram, ErrorDatagram, ResponseDatagram, RequestDatagram
@ -33,11 +31,6 @@ OLD_PROTOCOL_ERRORS = {
class KademliaRPC:
stored_blob_metric = Gauge(
"stored_blobs", "Number of blobs announced by other peers", namespace="dht_node",
labelnames=("scope",),
)
def __init__(self, protocol: 'KademliaProtocol', loop: asyncio.AbstractEventLoop, peer_port: int = 3333):
self.protocol = protocol
self.loop = loop
@ -69,7 +62,6 @@ class KademliaRPC:
self.protocol.data_store.add_peer_to_blob(
rpc_contact, blob_hash
)
self.stored_blob_metric.labels("global").set(len(self.protocol.data_store))
return b'OK'
def find_node(self, rpc_contact: 'KademliaPeer', key: bytes) -> typing.List[typing.Tuple[bytes, str, int]]:
@ -105,7 +97,7 @@ class KademliaRPC:
if not rpc_contact.tcp_port or peer.compact_address_tcp() != rpc_contact.compact_address_tcp()
]
# if we don't have k storing peers to return and we have this hash locally, include our contact information
if len(peers) < constants.K and key.hex() in self.protocol.data_store.completed_blobs:
if len(peers) < constants.K and binascii.hexlify(key).decode() in self.protocol.data_store.completed_blobs:
peers.append(self.compact_address())
if not peers:
response[PAGE_KEY] = 0
@ -218,10 +210,6 @@ class PingQueue:
def running(self):
return self._running
@property
def busy(self):
return self._running and (any(self._running_pings) or any(self._pending_contacts))
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
delay = delay if delay is not None else self._default_delay
now = self._loop.time()
@ -233,7 +221,7 @@ class PingQueue:
async def ping_task():
try:
if self._protocol.peer_manager.peer_is_good(peer):
if not self._protocol.routing_table.get_peer(peer.node_id):
if peer not in self._protocol.routing_table.get_peers():
self._protocol.add_peer(peer)
return
await self._protocol.get_rpc_peer(peer).ping()
@ -253,7 +241,7 @@ class PingQueue:
del self._pending_contacts[peer]
self.maybe_ping(peer)
break
await asyncio.sleep(1)
await asyncio.sleep(1, loop=self._loop)
def start(self):
assert not self._running
@ -272,33 +260,9 @@ class PingQueue:
class KademliaProtocol(DatagramProtocol):
request_sent_metric = Counter(
"request_sent", "Number of requests send from DHT RPC protocol", namespace="dht_node",
labelnames=("method",),
)
request_success_metric = Counter(
"request_success", "Number of successful requests", namespace="dht_node",
labelnames=("method",),
)
request_error_metric = Counter(
"request_error", "Number of errors returned from request to other peers", namespace="dht_node",
labelnames=("method",),
)
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 3.0, 3.5, 4.0, 4.50, 5.0, 5.50, 6.0, float('inf')
)
response_time_metric = Histogram(
"response_time", "Response times of DHT RPC requests", namespace="dht_node", buckets=HISTOGRAM_BUCKETS,
labelnames=("method",)
)
received_request_metric = Counter(
"received_request", "Number of received DHT RPC requests", namespace="dht_node",
labelnames=("method",),
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_boostrap_node: bool = False):
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
self.peer_manager = peer_manager
self.loop = loop
self.node_id = node_id
@ -313,16 +277,15 @@ class KademliaProtocol(DatagramProtocol):
self.transport: DatagramTransport = None
self.old_token_secret = constants.generate_id()
self.token_secret = constants.generate_id()
self.routing_table = TreeRoutingTable(
self.loop, self.peer_manager, self.node_id, split_buckets_under_index, is_bootstrap_node=is_boostrap_node)
self.routing_table = TreeRoutingTable(self.loop, self.peer_manager, self.node_id, split_buckets_under_index)
self.data_store = DictDataStore(self.loop, self.peer_manager)
self.ping_queue = PingQueue(self.loop, self)
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
self.rpc_timeout = rpc_timeout
self._split_lock = asyncio.Lock()
self._split_lock = asyncio.Lock(loop=self.loop)
self._to_remove: typing.Set['KademliaPeer'] = set()
self._to_add: typing.Set['KademliaPeer'] = set()
self._wakeup_routing_task = asyncio.Event()
self._wakeup_routing_task = asyncio.Event(loop=self.loop)
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
@functools.lru_cache(128)
@ -361,10 +324,72 @@ class KademliaProtocol(DatagramProtocol):
return args, {}
async def _add_peer(self, peer: 'KademliaPeer'):
async def probe(some_peer: 'KademliaPeer'):
rpc_peer = self.get_rpc_peer(some_peer)
await rpc_peer.ping()
return await self.routing_table.add_peer(peer, probe)
if not peer.node_id:
log.warning("Tried adding a peer with no node id!")
return False
for my_peer in self.routing_table.get_peers():
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
self.routing_table.remove_peer(my_peer)
self.routing_table.join_buckets()
bucket_index = self.routing_table.kbucket_index(peer.node_id)
if self.routing_table.buckets[bucket_index].add_peer(peer):
return True
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
if self.routing_table.should_split(bucket_index, peer.node_id):
self.routing_table.split_bucket(bucket_index)
# Retry the insertion attempt
result = await self._add_peer(peer)
self.routing_table.join_buckets()
return result
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
#
# A reasonable extension to this is BEP 0005, which extends the above:
#
# Not all nodes that we learn about are equal. Some are "good" and some are not.
# Many nodes using the DHT are able to send queries and receive responses,
# but are not able to respond to queries from other nodes. It is important that
# each node's routing table must contain only known good nodes. A good node is
# a node has responded to one of our queries within the last 15 minutes. A node
# is also good if it has ever responded to one of our queries and has sent us a
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
# questionable. Nodes become bad when they fail to respond to multiple queries
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
#
# When there are bad or questionable nodes in the bucket, the least recent is selected for
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
# is ignored if the pinged node replies.
not_good_contacts = self.routing_table.buckets[bucket_index].get_bad_or_unknown_peers()
not_recently_replied = []
for my_peer in not_good_contacts:
last_replied = self.peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
if not last_replied or last_replied + 60 < self.loop.time():
not_recently_replied.append(my_peer)
if not_recently_replied:
to_replace = not_recently_replied[0]
else:
to_replace = self.routing_table.buckets[bucket_index].peers[0]
last_replied = self.peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
if last_replied and last_replied + 60 > self.loop.time():
return False
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
try:
to_replace_rpc = self.get_rpc_peer(to_replace)
await to_replace_rpc.ping()
return False
except asyncio.TimeoutError:
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.routing_table.buckets[bucket_index]:
self.routing_table.buckets[bucket_index].remove_peer(to_replace)
return await self._add_peer(peer)
def add_peer(self, peer: 'KademliaPeer'):
if peer.node_id == self.node_id:
@ -382,15 +407,16 @@ class KademliaProtocol(DatagramProtocol):
async with self._split_lock:
peer = self._to_remove.pop()
self.routing_table.remove_peer(peer)
self.routing_table.join_buckets()
while self._to_add:
async with self._split_lock:
await self._add_peer(self._to_add.pop())
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1))
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop)
self._wakeup_routing_task.clear()
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
assert sender_contact.node_id != self.node_id, (sender_contact.node_id.hex()[:8],
self.node_id.hex()[:8])
assert sender_contact.node_id != self.node_id, (binascii.hexlify(sender_contact.node_id)[:8].decode(),
binascii.hexlify(self.node_id)[:8].decode())
method = message.method
if method not in [b'ping', b'store', b'findNode', b'findValue']:
raise AttributeError('Invalid method: %s' % message.method.decode())
@ -422,15 +448,11 @@ class KademliaProtocol(DatagramProtocol):
def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram):
# This is an RPC method request
self.received_request_metric.labels(method=request_datagram.method).inc()
self.peer_manager.report_last_requested(address[0], address[1])
peer = self.routing_table.get_peer(request_datagram.node_id)
if not peer:
try:
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
except ValueError as err:
log.warning("error replying to %s: %s", address[0], str(err))
return
try:
peer = self.routing_table.get_peer(request_datagram.node_id)
except IndexError:
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
try:
self._handle_rpc(peer, request_datagram)
# if the contact is not known to be bad (yet) and we haven't yet queried it, send it a ping so that it
@ -530,12 +552,12 @@ class KademliaProtocol(DatagramProtocol):
address[0], address[1], OLD_PROTOCOL_ERRORS[error_datagram.response]
)
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: # pylint: disable=arguments-renamed
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: # pylint: disable=arguments-differ
try:
message = decode_datagram(datagram)
except (ValueError, TypeError, DecodeError):
self.peer_manager.report_failure(address[0], address[1])
log.warning("Couldn't decode dht datagram from %s: %s", address, datagram.hex())
log.warning("Couldn't decode dht datagram from %s: %s", address, binascii.hexlify(datagram).decode())
return
if isinstance(message, RequestDatagram):
@ -550,19 +572,14 @@ class KademliaProtocol(DatagramProtocol):
self._send(peer, request)
response_fut = self.sent_messages[request.rpc_id][1]
try:
self.request_sent_metric.labels(method=request.method).inc()
start = time.perf_counter()
response = await asyncio.wait_for(response_fut, self.rpc_timeout)
self.response_time_metric.labels(method=request.method).observe(time.perf_counter() - start)
self.peer_manager.report_last_replied(peer.address, peer.udp_port)
self.request_success_metric.labels(method=request.method).inc()
return response
except asyncio.CancelledError:
if not response_fut.done():
response_fut.cancel()
raise
except (asyncio.TimeoutError, RemoteException):
self.request_error_metric.labels(method=request.method).inc()
self.peer_manager.report_failure(peer.address, peer.udp_port)
if self.peer_manager.peer_is_good(peer) is False:
self.remove_peer(peer)
@ -582,7 +599,7 @@ class KademliaProtocol(DatagramProtocol):
if len(data) > constants.MSG_SIZE_LIMIT:
log.warning("cannot send datagram larger than %i bytes (packet is %i bytes)",
constants.MSG_SIZE_LIMIT, len(data))
log.debug("Packet is too large to send: %s", data[:3500].hex())
log.debug("Packet is too large to send: %s", binascii.hexlify(data[:3500]).decode())
raise ValueError(
f"cannot send datagram larger than {constants.MSG_SIZE_LIMIT} bytes (packet is {len(data)} bytes)"
)
@ -642,13 +659,13 @@ class KademliaProtocol(DatagramProtocol):
res = await self.get_rpc_peer(peer).store(hash_value)
if res != b"OK":
raise ValueError(res)
log.debug("Stored %s to %s", hash_value.hex()[:8], peer)
log.debug("Stored %s to %s", binascii.hexlify(hash_value).decode()[:8], peer)
return peer.node_id, True
try:
return await __store()
except asyncio.TimeoutError:
log.debug("Timeout while storing blob_hash %s at %s", hash_value.hex()[:8], peer)
log.debug("Timeout while storing blob_hash %s at %s", binascii.hexlify(hash_value).decode()[:8], peer)
return peer.node_id, False
except ValueError as err:
log.error("Unexpected response: %s", err)

View file

@ -4,11 +4,7 @@ import logging
import typing
import itertools
from prometheus_client import Gauge
from lbry import utils
from lbry.dht import constants
from lbry.dht.error import RemoteException
from lbry.dht.protocol.distance import Distance
if typing.TYPE_CHECKING:
from lbry.dht.peer import KademliaPeer, PeerManager
@ -17,20 +13,10 @@ log = logging.getLogger(__name__)
class KBucket:
""" Description - later
"""
Kademlia K-bucket implementation.
"""
peer_in_routing_table_metric = Gauge(
"peers_in_routing_table", "Number of peers on routing table", namespace="dht_node",
labelnames=("scope",)
)
peer_with_x_bit_colliding_metric = Gauge(
"peer_x_bit_colliding", "Number of peers with at least X bits colliding with this node id",
namespace="dht_node", labelnames=("amount",)
)
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int,
node_id: bytes, capacity: int = constants.K):
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, node_id: bytes):
"""
@param range_min: The lower boundary for the range in the n-bit ID
space covered by this k-bucket
@ -38,12 +24,12 @@ class KBucket:
covered by this k-bucket
"""
self._peer_manager = peer_manager
self.last_accessed = 0
self.range_min = range_min
self.range_max = range_max
self.peers: typing.List['KademliaPeer'] = []
self._node_id = node_id
self._distance_to_self = Distance(node_id)
self.capacity = capacity
def add_peer(self, peer: 'KademliaPeer') -> bool:
""" Add contact to _contact list in the right order. This will move the
@ -64,25 +50,24 @@ class KBucket:
self.peers.append(peer)
return True
else:
for i, _ in enumerate(self.peers):
for i in range(len(self.peers)):
local_peer = self.peers[i]
if local_peer.node_id == peer.node_id:
self.peers.remove(local_peer)
self.peers.append(peer)
return True
if len(self.peers) < self.capacity:
if len(self.peers) < constants.K:
self.peers.append(peer)
self.peer_in_routing_table_metric.labels("global").inc()
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).inc()
return True
else:
return False
# raise BucketFull("No space in bucket to insert contact")
def get_peer(self, node_id: bytes) -> 'KademliaPeer':
for peer in self.peers:
if peer.node_id == node_id:
return peer
raise IndexError(node_id)
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
""" Returns a list containing up to the first count number of contacts
@ -139,9 +124,6 @@ class KBucket:
def remove_peer(self, peer: 'KademliaPeer') -> None:
self.peers.remove(peer)
self.peer_in_routing_table_metric.labels("global").dec()
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).dec()
def key_in_range(self, key: bytes) -> bool:
""" Tests whether the specified key (i.e. node ID) is in the range
@ -179,36 +161,24 @@ class TreeRoutingTable:
version of the Kademlia paper, in section 2.4. It does, however, use the
ping RPC-based k-bucket eviction algorithm described in section 2.2 of
that paper.
BOOTSTRAP MODE: if set to True, we always add all peers. This is so a
bootstrap node does not get a bias towards its own node id and replies are
the best it can provide (joining peer knows its neighbors immediately).
Over time, this will need to be optimized so we use the disk as holding
everything in memory won't be feasible anymore.
See: https://github.com/bittorrent/bootstrap-dht
"""
bucket_in_routing_table_metric = Gauge(
"buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False):
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
self._loop = loop
self._peer_manager = peer_manager
self._parent_node_id = parent_node_id
self._split_buckets_under_index = split_buckets_under_index
self.buckets: typing.List[KBucket] = [
KBucket(
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id,
capacity=1 << 32 if is_bootstrap_node else constants.K
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id
)
]
def get_peers(self) -> typing.List['KademliaPeer']:
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
def _should_split(self, bucket_index: int, to_add: bytes) -> bool:
def should_split(self, bucket_index: int, to_add: bytes) -> bool:
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
if bucket_index < self._split_buckets_under_index:
return True
@ -233,32 +203,39 @@ class TreeRoutingTable:
return []
def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
return self.buckets[self._kbucket_index(contact_id)].get_peer(contact_id)
"""
@raise IndexError: No contact with the specified contact ID is known
by this node
"""
return self.buckets[self.kbucket_index(contact_id)].get_peer(contact_id)
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
bucket_index = start_index
refresh_ids = []
for offset, _ in enumerate(self.buckets[start_index:]):
refresh_ids.append(self._midpoint_id_in_bucket_range(start_index + offset))
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
# populate/split the buckets further
buckets_with_contacts = self.buckets_with_contacts()
if buckets_with_contacts <= 3:
for i in range(buckets_with_contacts):
refresh_ids.append(self._random_id_in_bucket_range(i))
refresh_ids.append(self._random_id_in_bucket_range(i))
now = int(self._loop.time())
for bucket in self.buckets[start_index:]:
if force or now - bucket.last_accessed >= constants.REFRESH_INTERVAL:
to_search = self.midpoint_id_in_bucket_range(bucket_index)
refresh_ids.append(to_search)
bucket_index += 1
return refresh_ids
def remove_peer(self, peer: 'KademliaPeer') -> None:
if not peer.node_id:
return
bucket_index = self._kbucket_index(peer.node_id)
bucket_index = self.kbucket_index(peer.node_id)
try:
self.buckets[bucket_index].remove_peer(peer)
self._join_buckets()
except ValueError:
return
def _kbucket_index(self, key: bytes) -> int:
def touch_kbucket(self, key: bytes) -> None:
self.touch_kbucket_by_index(self.kbucket_index(key))
def touch_kbucket_by_index(self, bucket_index: int):
self.buckets[bucket_index].last_accessed = int(self._loop.time())
def kbucket_index(self, key: bytes) -> int:
i = 0
for bucket in self.buckets:
if bucket.key_in_range(key):
@ -267,19 +244,19 @@ class TreeRoutingTable:
i += 1
return i
def _random_id_in_bucket_range(self, bucket_index: int) -> bytes:
def random_id_in_bucket_range(self, bucket_index: int) -> bytes:
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
return Distance(
self._parent_node_id
)(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big')
def _midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
def midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
return Distance(self._parent_node_id)(
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big')
).to_bytes(constants.HASH_LENGTH, 'big')
def _split_bucket(self, old_bucket_index: int) -> None:
def split_bucket(self, old_bucket_index: int) -> None:
""" Splits the specified k-bucket into two new buckets which together
cover the same range in the key/ID space
@ -302,9 +279,8 @@ class TreeRoutingTable:
# ...and remove them from the old bucket
for contact in new_bucket.peers:
old_bucket.remove_peer(contact)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
def _join_buckets(self):
def join_buckets(self):
if len(self.buckets) == 1:
return
to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0]
@ -326,8 +302,14 @@ class TreeRoutingTable:
elif can_go_higher:
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
self.buckets.remove(bucket)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
return self._join_buckets()
return self.join_buckets()
def contact_in_routing_table(self, address_tuple: typing.Tuple[str, int]) -> bool:
for bucket in self.buckets:
for contact in bucket.get_peers(sort_distance_to=False):
if address_tuple[0] == contact.address and address_tuple[1] == contact.udp_port:
return True
return False
def buckets_with_contacts(self) -> int:
count = 0
@ -335,70 +317,3 @@ class TreeRoutingTable:
if len(bucket) > 0:
count += 1
return count
async def add_peer(self, peer: 'KademliaPeer', probe: typing.Callable[['KademliaPeer'], typing.Awaitable]):
if not peer.node_id:
log.warning("Tried adding a peer with no node id!")
return False
for my_peer in self.get_peers():
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
self.remove_peer(my_peer)
self._join_buckets()
bucket_index = self._kbucket_index(peer.node_id)
if self.buckets[bucket_index].add_peer(peer):
return True
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
if self._should_split(bucket_index, peer.node_id):
self._split_bucket(bucket_index)
# Retry the insertion attempt
result = await self.add_peer(peer, probe)
self._join_buckets()
return result
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
#
# A reasonable extension to this is BEP 0005, which extends the above:
#
# Not all nodes that we learn about are equal. Some are "good" and some are not.
# Many nodes using the DHT are able to send queries and receive responses,
# but are not able to respond to queries from other nodes. It is important that
# each node's routing table must contain only known good nodes. A good node is
# a node has responded to one of our queries within the last 15 minutes. A node
# is also good if it has ever responded to one of our queries and has sent us a
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
# questionable. Nodes become bad when they fail to respond to multiple queries
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
#
# When there are bad or questionable nodes in the bucket, the least recent is selected for
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
# is ignored if the pinged node replies.
not_good_contacts = self.buckets[bucket_index].get_bad_or_unknown_peers()
not_recently_replied = []
for my_peer in not_good_contacts:
last_replied = self._peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
if not last_replied or last_replied + 60 < self._loop.time():
not_recently_replied.append(my_peer)
if not_recently_replied:
to_replace = not_recently_replied[0]
else:
to_replace = self.buckets[bucket_index].peers[0]
last_replied = self._peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
if last_replied and last_replied + 60 > self._loop.time():
return False
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
try:
await probe(to_replace)
return False
except (asyncio.TimeoutError, RemoteException):
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.buckets[bucket_index]:
self.buckets[bucket_index].remove_peer(to_replace)
return await self.add_peer(peer, probe)

View file

@ -181,7 +181,7 @@ def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDa
def make_compact_ip(address: str) -> bytearray:
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
if len(compact_ip) != 4:
raise ValueError("invalid IPv4 length")
raise ValueError(f"invalid IPv4 length")
return compact_ip
@ -190,7 +190,7 @@ def make_compact_address(node_id: bytes, address: str, port: int) -> bytearray:
if not 0 < port < 65536:
raise ValueError(f'Invalid port: {port}')
if len(node_id) != constants.HASH_BITS // 8:
raise ValueError("invalid node node_id length")
raise ValueError(f"invalid node node_id length")
return compact_ip + port.to_bytes(2, 'big') + node_id
@ -201,5 +201,5 @@ def decode_compact_address(compact_address: bytes) -> typing.Tuple[bytes, str, i
if not 0 < port < 65536:
raise ValueError(f'Invalid port: {port}')
if len(node_id) != constants.HASH_BITS // 8:
raise ValueError("invalid node node_id length")
raise ValueError(f"invalid node node_id length")
return node_id, address, port

View file

@ -34,11 +34,6 @@ Code | Name | Message
**11x** | InputValue(ValueError) | Invalid argument value provided to command.
111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid.
112 | InputValueIsNone | None or null is not valid value for argument '{argument}'.
113 | ConflictingInputValue | Only '{first_argument}' or '{second_argument}' is allowed, not both.
114 | InputStringIsBlank | {argument} cannot be blank.
115 | EmptyPublishedFile | Cannot publish empty file: {file_path}
116 | MissingPublishedFile | File does not exist: {file_path}
117 | InvalidStreamURL | Invalid LBRY stream URL: '{url}' -- When an URL cannot be downloaded, such as '@Channel/' or a collection
**2xx** | Configuration | Configuration errors.
201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues.
202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args.
@ -56,22 +51,15 @@ Code | Name | Message
405 | ChannelKeyNotFound | Channel signing key not found.
406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key.
407 | DataDownload | Failed to download blob. *generic*
408 | PrivateKeyNotFound | Couldn't find private key for {key} '{value}'.
410 | Resolve | Failed to resolve '{url}'.
411 | ResolveTimeout | Failed to resolve '{url}' within the timeout.
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{censor_id}'.
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{claim_id(censor_hash)}'.
420 | KeyFeeAboveMaxAllowed | {message}
421 | InvalidPassword | Password is invalid.
422 | IncompatibleWalletServer | '{server}:{port}' has an incompatibly old version.
423 | TooManyClaimSearchParameters | {key} cant have more than {limit} items.
424 | AlreadyPurchased | You already have a purchase for claim_id '{claim_id_hex}'. Use --allow-duplicate-purchase flag to override.
431 | ServerPaymentInvalidAddress | Invalid address from wallet server: '{address}' - skipping payment round.
432 | ServerPaymentWalletLocked | Cannot spend funds with locked wallet, skipping payment round.
433 | ServerPaymentFeeAboveMaxAllowed | Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.
434 | WalletNotLoaded | Wallet {wallet_id} is not loaded.
435 | WalletAlreadyLoaded | Wallet {wallet_path} is already loaded.
436 | WalletNotFound | Wallet not found at {wallet_path}.
437 | WalletAlreadyExists | Wallet {wallet_path} already exists, use `wallet_add` to load it.
**5xx** | Blob | **Blobs**
500 | BlobNotFound | Blob not found.
501 | BlobPermissionDenied | Permission denied to read blob.

View file

@ -76,45 +76,6 @@ class InputValueIsNoneError(InputValueError):
super().__init__(f"None or null is not valid value for argument '{argument}'.")
class ConflictingInputValueError(InputValueError):
def __init__(self, first_argument, second_argument):
self.first_argument = first_argument
self.second_argument = second_argument
super().__init__(f"Only '{first_argument}' or '{second_argument}' is allowed, not both.")
class InputStringIsBlankError(InputValueError):
def __init__(self, argument):
self.argument = argument
super().__init__(f"{argument} cannot be blank.")
class EmptyPublishedFileError(InputValueError):
def __init__(self, file_path):
self.file_path = file_path
super().__init__(f"Cannot publish empty file: {file_path}")
class MissingPublishedFileError(InputValueError):
def __init__(self, file_path):
self.file_path = file_path
super().__init__(f"File does not exist: {file_path}")
class InvalidStreamURLError(InputValueError):
"""
When an URL cannot be downloaded, such as '@Channel/' or a collection
"""
def __init__(self, url):
self.url = url
super().__init__(f"Invalid LBRY stream URL: '{url}'")
class ConfigurationError(BaseError):
"""
Configuration errors.
@ -238,14 +199,6 @@ class DataDownloadError(WalletError):
super().__init__("Failed to download blob. *generic*")
class PrivateKeyNotFoundError(WalletError):
def __init__(self, key, value):
self.key = key
self.value = value
super().__init__(f"Couldn't find private key for {key} '{value}'.")
class ResolveError(WalletError):
def __init__(self, url):
@ -262,11 +215,10 @@ class ResolveTimeoutError(WalletError):
class ResolveCensoredError(WalletError):
def __init__(self, url, censor_id, censor_row):
def __init__(self, url, censor_hash):
self.url = url
self.censor_id = censor_id
self.censor_row = censor_row
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.")
self.censor_hash = censor_hash
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{claim_id(censor_hash)}'.")
class KeyFeeAboveMaxAllowedError(WalletError):
@ -290,24 +242,6 @@ class IncompatibleWalletServerError(WalletError):
super().__init__(f"'{server}:{port}' has an incompatibly old version.")
class TooManyClaimSearchParametersError(WalletError):
def __init__(self, key, limit):
self.key = key
self.limit = limit
super().__init__(f"{key} cant have more than {limit} items.")
class AlreadyPurchasedError(WalletError):
"""
allow-duplicate-purchase flag to override.
"""
def __init__(self, claim_id_hex):
self.claim_id_hex = claim_id_hex
super().__init__(f"You already have a purchase for claim_id '{claim_id_hex}'. Use")
class ServerPaymentInvalidAddressError(WalletError):
def __init__(self, address):
@ -329,34 +263,6 @@ class ServerPaymentFeeAboveMaxAllowedError(WalletError):
super().__init__(f"Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.")
class WalletNotLoadedError(WalletError):
def __init__(self, wallet_id):
self.wallet_id = wallet_id
super().__init__(f"Wallet {wallet_id} is not loaded.")
class WalletAlreadyLoadedError(WalletError):
def __init__(self, wallet_path):
self.wallet_path = wallet_path
super().__init__(f"Wallet {wallet_path} is already loaded.")
class WalletNotFoundError(WalletError):
def __init__(self, wallet_path):
self.wallet_path = wallet_path
super().__init__(f"Wallet not found at {wallet_path}.")
class WalletAlreadyExistsError(WalletError):
def __init__(self, wallet_path):
self.wallet_path = wallet_path
super().__init__(f"Wallet {wallet_path} already exists, use `wallet_add` to load it.")
class BlobError(BaseError):
"""
**Blobs**

View file

@ -63,7 +63,7 @@ class ErrorClass:
@staticmethod
def get_fields(args):
if len(args) > 1:
return ''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
return f''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
return ''
@staticmethod

View file

@ -101,7 +101,7 @@ class ArgumentParser(argparse.ArgumentParser):
self._optionals.title = 'Options'
if group_name is None:
self.epilog = (
"Run 'lbrynet COMMAND --help' for more information on a command or group."
f"Run 'lbrynet COMMAND --help' for more information on a command or group."
)
else:
self.epilog = (
@ -226,9 +226,6 @@ def get_argument_parser():
def ensure_directory_exists(path: str):
if not os.path.isdir(path):
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
use_effective_ids = os.access in os.supports_effective_ids
if not os.access(path, os.W_OK, effective_ids=use_effective_ids):
raise PermissionError(f"The following directory is not writable: {path}")
LOG_MODULES = 'lbry', 'aioupnp'

View file

@ -18,7 +18,6 @@ DOWNLOAD_STARTED = 'Download Started'
DOWNLOAD_ERRORED = 'Download Errored'
DOWNLOAD_FINISHED = 'Download Finished'
HEARTBEAT = 'Heartbeat'
DISK_SPACE = 'Disk Space'
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
NEW_CHANNEL = 'New Channel'
CREDITS_SENT = 'Credits Sent'
@ -170,15 +169,6 @@ class AnalyticsManager:
})
)
async def send_disk_space_used(self, storage_used, storage_limit, is_from_network_quota):
await self.track(
self._event(DISK_SPACE, {
'used': storage_used,
'limit': storage_limit,
'from_network_quota': is_from_network_quota
})
)
async def send_server_startup(self):
await self.track(self._event(SERVER_STARTUP))

View file

@ -1,5 +1,5 @@
from lbry.extras.cli import execute_command
from lbry.conf import Config
from lbry.extras.cli import execute_command
def daemon_rpc(conf: Config, method: str, **kwargs):

View file

@ -0,0 +1,79 @@
import logging
import time
import hashlib
import binascii
import ecdsa
from lbry import utils
from lbry.crypto.hash import sha256
from lbry.wallet.transaction import Output
log = logging.getLogger(__name__)
def get_encoded_signature(signature):
signature = signature.encode() if isinstance(signature, str) else signature
r = int(signature[:int(len(signature) / 2)], 16)
s = int(signature[int(len(signature) / 2):], 16)
return ecdsa.util.sigencode_der(r, s, len(signature) * 4)
def cid2hash(claim_id: str) -> bytes:
return binascii.unhexlify(claim_id.encode())[::-1]
def is_comment_signed_by_channel(comment: dict, channel: Output, sign_comment_id=False):
if isinstance(channel, Output):
try:
signing_field = comment['comment_id'] if sign_comment_id else comment['comment']
return verify(channel, signing_field.encode(), comment, cid2hash(comment['channel_id']))
except KeyError:
pass
return False
def verify(channel, data, signature, channel_hash=None):
pieces = [
signature['signing_ts'].encode(),
channel_hash or channel.claim_hash,
data
]
return Output.is_signature_valid(
get_encoded_signature(signature['signature']),
sha256(b''.join(pieces)),
channel.claim.channel.public_key_bytes
)
def sign_comment(comment: dict, channel: Output, sign_comment_id=False):
signing_field = comment['comment_id'] if sign_comment_id else comment['comment']
comment.update(sign(channel, signing_field.encode()))
def sign(channel, data):
timestamp = str(int(time.time()))
pieces = [timestamp.encode(), channel.claim_hash, data]
digest = sha256(b''.join(pieces))
signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
return {
'signature': binascii.hexlify(signature).decode(),
'signing_ts': timestamp
}
def sign_reaction(reaction: dict, channel: Output):
signing_field = reaction['channel_name']
reaction.update(sign(channel, signing_field.encode()))
async def jsonrpc_post(url: str, method: str, params: dict = None, **kwargs) -> any:
params = params or {}
params.update(kwargs)
json_body = {'jsonrpc': '2.0', 'id': 1, 'method': method, 'params': params}
async with utils.aiohttp_request('POST', url, json=json_body) as response:
try:
result = await response.json()
return result['result'] if 'result' in result else result
except Exception as cte:
log.exception('Unable to decode response from server: %s', cte)
return await response.text()

View file

@ -37,7 +37,7 @@ class Component(metaclass=ComponentType):
def running(self):
return self._running
async def get_status(self): # pylint: disable=no-self-use
async def get_status(self):
return
async def start(self):

View file

@ -42,7 +42,7 @@ class ComponentManager:
self.analytics_manager = analytics_manager
self.component_classes = {}
self.components = set()
self.started = asyncio.Event()
self.started = asyncio.Event(loop=self.loop)
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
for component_name, component_class in self.default_component_classes.items():
@ -118,7 +118,7 @@ class ComponentManager:
component._setup() for component in stage if not component.running
]
if needing_start:
await asyncio.wait(map(asyncio.create_task, needing_start))
await asyncio.wait(needing_start)
self.started.set()
async def stop(self):
@ -131,7 +131,7 @@ class ComponentManager:
component._stop() for component in stage if component.running
]
if needing_stop:
await asyncio.wait(map(asyncio.create_task, needing_stop))
await asyncio.wait(needing_stop)
def all_components_running(self, *component_names):
"""

View file

@ -4,7 +4,6 @@ import asyncio
import logging
import binascii
import typing
import base58
from aioupnp import __version__ as aioupnp_version
@ -16,9 +15,7 @@ from lbry.dht.node import Node
from lbry.dht.peer import is_valid_public_ipv4
from lbry.dht.blob_announcer import BlobAnnouncer
from lbry.blob.blob_manager import BlobManager
from lbry.blob.disk_space_manager import DiskSpaceManager
from lbry.blob_exchange.server import BlobServer
from lbry.stream.background_downloader import BackgroundDownloader
from lbry.stream.stream_manager import StreamManager
from lbry.file.file_manager import FileManager
from lbry.extras.daemon.component import Component
@ -27,8 +24,10 @@ from lbry.extras.daemon.storage import SQLiteStorage
from lbry.torrent.torrent_manager import TorrentManager
from lbry.wallet import WalletManager
from lbry.wallet.usage_payment import WalletServerPayer
from lbry.torrent.tracker import TrackerClient
from lbry.torrent.session import TorrentSession
try:
from lbry.torrent.session import TorrentSession
except ImportError:
TorrentSession = None
log = logging.getLogger(__name__)
@ -41,12 +40,9 @@ WALLET_SERVER_PAYMENTS_COMPONENT = "wallet_server_payments"
DHT_COMPONENT = "dht"
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
FILE_MANAGER_COMPONENT = "file_manager"
DISK_SPACE_COMPONENT = "disk_space"
BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
UPNP_COMPONENT = "upnp"
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
TRACKER_ANNOUNCER_COMPONENT = "tracker_announcer_component"
LIBTORRENT_COMPONENT = "libtorrent_component"
@ -63,7 +59,7 @@ class DatabaseComponent(Component):
@staticmethod
def get_current_db_revision():
return 15
return 14
@property
def revision_filename(self):
@ -142,7 +138,7 @@ class WalletComponent(Component):
'availability': session.available,
} for session in sessions
],
'known_servers': len(self.wallet_manager.ledger.network.known_hubs),
'known_servers': len(self.wallet_manager.ledger.network.config['default_servers']),
'available_servers': 1 if is_connected else 0
}
@ -293,7 +289,6 @@ class DHTComponent(Component):
peer_port=self.external_peer_port,
rpc_timeout=self.conf.node_rpc_timeout,
split_buckets_under_index=self.conf.split_buckets_under_index,
is_bootstrap_node=self.conf.is_bootstrap_node,
storage=storage
)
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
@ -357,6 +352,10 @@ class FileManagerComponent(Component):
wallet = self.component_manager.get_component(WALLET_COMPONENT)
node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None
try:
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT) if TorrentSession else None
except NameError:
torrent = None
log.info('Starting the file manager')
loop = asyncio.get_event_loop()
self.file_manager = FileManager(
@ -365,8 +364,7 @@ class FileManagerComponent(Component):
self.file_manager.source_managers['stream'] = StreamManager(
loop, self.conf, blob_manager, wallet, storage, node,
)
if self.component_manager.has_component(LIBTORRENT_COMPONENT):
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
if TorrentSession and LIBTORRENT_COMPONENT not in self.conf.components_to_skip:
self.file_manager.source_managers['torrent'] = TorrentManager(
loop, self.conf, torrent, storage, self.component_manager.analytics_manager
)
@ -374,106 +372,7 @@ class FileManagerComponent(Component):
log.info('Done setting up file manager')
async def stop(self):
await self.file_manager.stop()
class BackgroundDownloaderComponent(Component):
MIN_PREFIX_COLLIDING_BITS = 8
component_name = BACKGROUND_DOWNLOADER_COMPONENT
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.background_task: typing.Optional[asyncio.Task] = None
self.download_loop_delay_seconds = 60
self.ongoing_download: typing.Optional[asyncio.Task] = None
self.space_manager: typing.Optional[DiskSpaceManager] = None
self.blob_manager: typing.Optional[BlobManager] = None
self.background_downloader: typing.Optional[BackgroundDownloader] = None
self.dht_node: typing.Optional[Node] = None
self.space_available: typing.Optional[int] = None
@property
def is_busy(self):
return bool(self.ongoing_download and not self.ongoing_download.done())
@property
def component(self) -> 'BackgroundDownloaderComponent':
return self
async def get_status(self):
return {'running': self.background_task is not None and not self.background_task.done(),
'available_free_space_mb': self.space_available,
'ongoing_download': self.is_busy}
async def download_blobs_in_background(self):
while True:
self.space_available = await self.space_manager.get_free_space_mb(True)
if not self.is_busy and self.space_available > 10:
self._download_next_close_blob_hash()
await asyncio.sleep(self.download_loop_delay_seconds)
def _download_next_close_blob_hash(self):
node_id = self.dht_node.protocol.node_id
for blob_hash in self.dht_node.stored_blob_hashes:
if blob_hash.hex() in self.blob_manager.completed_blob_hashes:
continue
if utils.get_colliding_prefix_bits(node_id, blob_hash) >= self.MIN_PREFIX_COLLIDING_BITS:
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash.hex()))
return
async def start(self):
self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT)
if not self.component_manager.has_component(DHT_COMPONENT):
return
self.dht_node = self.component_manager.get_component(DHT_COMPONENT)
self.blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
storage = self.component_manager.get_component(DATABASE_COMPONENT)
self.background_downloader = BackgroundDownloader(self.conf, storage, self.blob_manager, self.dht_node)
self.background_task = asyncio.create_task(self.download_blobs_in_background())
async def stop(self):
if self.ongoing_download and not self.ongoing_download.done():
self.ongoing_download.cancel()
if self.background_task:
self.background_task.cancel()
class DiskSpaceComponent(Component):
component_name = DISK_SPACE_COMPONENT
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.disk_space_manager: typing.Optional[DiskSpaceManager] = None
@property
def component(self) -> typing.Optional[DiskSpaceManager]:
return self.disk_space_manager
async def get_status(self):
if self.disk_space_manager:
space_used = await self.disk_space_manager.get_space_used_mb(cached=True)
return {
'total_used_mb': space_used['total'],
'published_blobs_storage_used_mb': space_used['private_storage'],
'content_blobs_storage_used_mb': space_used['content_storage'],
'seed_blobs_storage_used_mb': space_used['network_storage'],
'running': self.disk_space_manager.running,
}
return {'space_used': '0', 'network_seeding_space_used': '0', 'running': False}
async def start(self):
db = self.component_manager.get_component(DATABASE_COMPONENT)
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
self.disk_space_manager = DiskSpaceManager(
self.conf, db, blob_manager,
analytics=self.component_manager.analytics_manager
)
await self.disk_space_manager.start()
async def stop(self):
await self.disk_space_manager.stop()
self.file_manager.stop()
class TorrentComponent(Component):
@ -495,8 +394,9 @@ class TorrentComponent(Component):
}
async def start(self):
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
await self.torrent_session.bind() # TODO: specify host/port
if TorrentSession:
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
await self.torrent_session.bind() # TODO: specify host/port
async def stop(self):
if self.torrent_session:
@ -551,7 +451,7 @@ class UPnPComponent(Component):
while True:
if now:
await self._maintain_redirects()
await asyncio.sleep(360)
await asyncio.sleep(360, loop=self.component_manager.loop)
async def _maintain_redirects(self):
# setup the gateway if necessary
@ -560,6 +460,8 @@ class UPnPComponent(Component):
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.warning("upnp discovery failed: %s", err)
self.upnp = None
@ -579,10 +481,6 @@ class UPnPComponent(Component):
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
if external_ip:
self.external_ip = external_ip
dht_component = self.component_manager.get_component(DHT_COMPONENT)
if dht_component:
dht_node = dht_component.component
dht_node.protocol.external_ip = external_ip
# assert self.external_ip is not None # TODO: handle going/starting offline
if not self.upnp_redirects and self.upnp: # setup missing redirects
@ -641,10 +539,8 @@ class UPnPComponent(Component):
success = False
await self._maintain_redirects()
if self.upnp:
if not self.upnp_redirects and not all(
x in self.component_manager.skip_components
for x in (DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)
):
if not self.upnp_redirects and not all([x in self.component_manager.skip_components for x in
(DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)]):
log.error("failed to setup upnp")
else:
success = True
@ -671,7 +567,7 @@ class UPnPComponent(Component):
log.info("Removing upnp redirects: %s", self.upnp_redirects)
await asyncio.wait([
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
])
], loop=self.component_manager.loop)
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
self._maintain_redirects_task.cancel()
@ -702,49 +598,3 @@ class ExchangeRateManagerComponent(Component):
async def stop(self):
self.exchange_rate_manager.stop()
class TrackerAnnouncerComponent(Component):
component_name = TRACKER_ANNOUNCER_COMPONENT
depends_on = [FILE_MANAGER_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.file_manager = None
self.announce_task = None
self.tracker_client: typing.Optional[TrackerClient] = None
@property
def component(self):
return self.tracker_client
@property
def running(self):
return self._running and self.announce_task and not self.announce_task.done()
async def announce_forever(self):
while True:
sleep_seconds = 60.0
announce_sd_hashes = []
for file in self.file_manager.get_filtered():
if not file.downloader:
continue
announce_sd_hashes.append(bytes.fromhex(file.sd_hash))
await self.tracker_client.announce_many(*announce_sd_hashes)
await asyncio.sleep(sleep_seconds)
async def start(self):
node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None
node_id = node.protocol.node_id if node else None
self.tracker_client = TrackerClient(node_id, self.conf.tcp_port, lambda: self.conf.tracker_servers)
await self.tracker_client.start()
self.file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
self.announce_task = asyncio.create_task(self.announce_forever())
async def stop(self):
self.file_manager = None
if self.announce_task and not self.announce_task.done():
self.announce_task.cancel()
self.announce_task = None
self.tracker_client.stop()

File diff suppressed because it is too large Load diff

View file

@ -5,7 +5,7 @@ import logging
from statistics import median
from decimal import Decimal
from typing import Optional, Iterable, Type
from aiohttp.client_exceptions import ContentTypeError, ClientConnectionError
from aiohttp.client_exceptions import ContentTypeError
from lbry.error import InvalidExchangeRateResponseError, CurrencyConversionError
from lbry.utils import aiohttp_request
from lbry.wallet.dewies import lbc_to_dewies
@ -79,21 +79,18 @@ class MarketFeed:
log.debug("Saving rate update %f for %s from %s", rate, self.market, self.name)
self.rate = ExchangeRate(self.market, rate, int(time.time()))
self.last_check = time.time()
self.event.set()
return self.rate
except asyncio.CancelledError:
raise
except asyncio.TimeoutError:
log.warning("Timed out fetching exchange rate from %s.", self.name)
except json.JSONDecodeError as e:
msg = e.doc if '<html>' not in e.doc else 'unexpected content type.'
log.warning("Could not parse exchange rate response from %s: %s", self.name, msg)
log.debug(e.doc)
log.warning("Could not parse exchange rate response from %s: %s", self.name, e.doc)
except InvalidExchangeRateResponseError as e:
log.warning(str(e))
except ClientConnectionError as e:
log.warning("Error trying to connect to exchange rate %s: %s", self.name, str(e))
except Exception as e:
log.exception("Exchange rate error (%s from %s):", self.market, self.name)
finally:
self.event.set()
async def keep_updated(self):
while True:
@ -133,6 +130,27 @@ class BittrexUSDFeed(BaseBittrexFeed):
url = "https://api.bittrex.com/v3/markets/LBC-USD/ticker"
class BaseCryptonatorFeed(MarketFeed):
name = "Cryptonator"
market = None
url = None
def get_rate_from_response(self, json_response):
if 'ticker' not in json_response or 'price' not in json_response['ticker']:
raise InvalidExchangeRateResponseError(self.name, 'result not found')
return float(json_response['ticker']['price'])
class CryptonatorBTCFeed(BaseCryptonatorFeed):
market = "BTCLBC"
url = "https://api.cryptonator.com/api/ticker/btc-lbc"
class CryptonatorUSDFeed(BaseCryptonatorFeed):
market = "USDLBC"
url = "https://api.cryptonator.com/api/ticker/usd-lbc"
class BaseCoinExFeed(MarketFeed):
name = "CoinEx"
market = None
@ -184,7 +202,7 @@ class UPbitBTCFeed(MarketFeed):
params = {"markets": "BTC-LBC"}
def get_rate_from_response(self, json_response):
if "error" in json_response or len(json_response) != 1 or 'trade_price' not in json_response[0]:
if len(json_response) != 1 or 'trade_price' not in json_response[0]:
raise InvalidExchangeRateResponseError(self.name, 'result not found')
return 1.0 / float(json_response[0]['trade_price'])
@ -192,11 +210,13 @@ class UPbitBTCFeed(MarketFeed):
FEEDS: Iterable[Type[MarketFeed]] = (
BittrexBTCFeed,
BittrexUSDFeed,
CryptonatorBTCFeed,
CryptonatorUSDFeed,
CoinExBTCFeed,
CoinExUSDFeed,
# HotbitBTCFeed,
# HotbitUSDFeed,
# UPbitBTCFeed,
HotbitBTCFeed,
HotbitUSDFeed,
UPbitBTCFeed,
)

View file

@ -10,7 +10,7 @@ from lbry.schema.claim import Claim
from lbry.schema.support import Support
from lbry.torrent.torrent_manager import TorrentSource
from lbry.wallet import Wallet, Ledger, Account, Transaction, Output
from lbry.wallet.bip32 import PublicKey
from lbry.wallet.bip32 import PubKey
from lbry.wallet.dewies import dewies_to_lbc
from lbry.stream.managed_stream import ManagedStream
@ -123,7 +123,7 @@ class JSONResponseEncoder(JSONEncoder):
self.ledger = ledger
self.include_protobuf = include_protobuf
def default(self, obj): # pylint: disable=method-hidden,arguments-renamed,too-many-return-statements
def default(self, obj): # pylint: disable=method-hidden,arguments-differ,too-many-return-statements
if isinstance(obj, Account):
return self.encode_account(obj)
if isinstance(obj, Wallet):
@ -138,7 +138,7 @@ class JSONResponseEncoder(JSONEncoder):
return self.encode_claim(obj)
if isinstance(obj, Support):
return obj.to_dict()
if isinstance(obj, PublicKey):
if isinstance(obj, PubKey):
return obj.extended_key_string()
if isinstance(obj, datetime):
return obj.strftime("%Y%m%dT%H:%M:%S")
@ -234,6 +234,8 @@ class JSONResponseEncoder(JSONEncoder):
output['value_type'] = txo.claim.claim_type
if txo.claim.is_channel:
output['has_signing_key'] = txo.has_private_key
elif txo.script.is_support_claim_data:
output['value_type'] = 'emoji'
if check_signature and txo.signable.is_signed:
if txo.channel is not None:
output['signing_channel'] = self.encode_output(txo.channel)
@ -328,8 +330,8 @@ class JSONResponseEncoder(JSONEncoder):
result.update({
'streaming_url': managed_stream.stream_url,
'stream_hash': managed_stream.stream_hash,
'stream_name': managed_stream.stream_name,
'suggested_file_name': managed_stream.suggested_file_name,
'stream_name': managed_stream.descriptor.stream_name,
'suggested_file_name': managed_stream.descriptor.suggested_file_name,
'sd_hash': managed_stream.descriptor.sd_hash,
'mime_type': managed_stream.mime_type,
'key': managed_stream.descriptor.key,

View file

@ -35,10 +35,6 @@ def migrate_db(conf, start, end):
from .migrate12to13 import do_migration
elif current == 13:
from .migrate13to14 import do_migration
elif current == 14:
from .migrate14to15 import do_migration
elif current == 15:
from .migrate15to16 import do_migration
else:
raise Exception(f"DB migration of version {current} to {current+1} is not available")
try:

View file

@ -1,16 +0,0 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("""
alter table blob add column added_on integer not null default 0;
alter table blob add column is_mine integer not null default 1;
""")
connection.commit()
connection.close()

View file

@ -1,17 +0,0 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("""
update blob set should_announce=0
where should_announce=1 and
blob.blob_hash in (select stream_blob.blob_hash from stream_blob where position=0);
""")
connection.commit()
connection.close()

View file

@ -20,7 +20,7 @@ def do_migration(conf):
"left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall()
blobs_by_stream = {}
for stream_hash, position, iv, blob_hash, blob_length in blobs:
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, 0, blob_hash))
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, blob_hash))
for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams:
sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename,

View file

@ -170,8 +170,8 @@ def get_all_lbry_files(transaction: sqlite3.Connection) -> typing.List[typing.Di
def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
# add all blobs, except the last one, which is empty
transaction.executemany(
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0, blob.added_on, blob.is_mine)
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0)
for blob in (descriptor.blobs[:-1] if len(descriptor.blobs) > 1 else descriptor.blobs) + [sd_blob])
).fetchall()
# associate the blobs to the stream
@ -187,8 +187,8 @@ def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descripto
).fetchall()
# ensure should_announce is set regardless if insert was ignored
transaction.execute(
"update blob set should_announce=1 where blob_hash in (?)",
(sd_blob.blob_hash,)
"update blob set should_announce=1 where blob_hash in (?, ?)",
(sd_blob.blob_hash, descriptor.blobs[0].blob_hash,)
).fetchall()
@ -242,9 +242,7 @@ class SQLiteStorage(SQLiteMixin):
should_announce integer not null default 0,
status text not null,
last_announced_time integer,
single_announce integer,
added_on integer not null,
is_mine integer not null default 0
single_announce integer
);
create table if not exists stream (
@ -337,7 +335,6 @@ class SQLiteStorage(SQLiteMixin):
tcp_port integer,
unique (address, udp_port)
);
create index if not exists blob_data on blob(blob_hash, blob_length, is_mine);
"""
def __init__(self, conf: Config, path, loop=None, time_getter: typing.Optional[typing.Callable[[], float]] = None):
@ -359,19 +356,19 @@ class SQLiteStorage(SQLiteMixin):
# # # # # # # # # blob functions # # # # # # # # #
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int, int, int], finished=False):
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int], finished=False):
def _add_blobs(transaction: sqlite3.Connection):
transaction.executemany(
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
(
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0, added_on, is_mine)
for blob_hash, length, added_on, is_mine in blob_hashes_and_lengths
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0)
for blob_hash, length in blob_hashes_and_lengths
)
).fetchall()
if finished:
transaction.executemany(
"update blob set status='finished' where blob.blob_hash=?", (
(blob_hash, ) for blob_hash, _, _, _ in blob_hashes_and_lengths
(blob_hash, ) for blob_hash, _ in blob_hashes_and_lengths
)
).fetchall()
return await self.db.run(_add_blobs)
@ -381,11 +378,6 @@ class SQLiteStorage(SQLiteMixin):
"select status from blob where blob_hash=?", blob_hash
)
def set_announce(self, *blob_hashes):
return self.db.execute_fetchall(
"update blob set should_announce=1 where blob_hash in (?, ?)", blob_hashes
)
def update_last_announced_blobs(self, blob_hashes: typing.List[str]):
def _update_last_announced_blobs(transaction: sqlite3.Connection):
last_announced = self.time_getter()
@ -443,62 +435,6 @@ class SQLiteStorage(SQLiteMixin):
def get_all_blob_hashes(self):
return self.run_and_return_list("select blob_hash from blob")
async def get_stored_blobs(self, is_mine: bool, is_network_blob=False):
is_mine = 1 if is_mine else 0
if is_network_blob:
return await self.db.execute_fetchall(
"select blob.blob_hash, blob.blob_length, blob.added_on "
"from blob left join stream_blob using (blob_hash) "
"where stream_blob.stream_hash is null and blob.is_mine=? and blob.status='finished'"
"order by blob.blob_length desc, blob.added_on asc",
(is_mine,)
)
sd_blobs = await self.db.execute_fetchall(
"select blob.blob_hash, blob.blob_length, blob.added_on "
"from blob join stream on blob.blob_hash=stream.sd_hash join file using (stream_hash) "
"where blob.is_mine=? order by blob.added_on asc",
(is_mine,)
)
content_blobs = await self.db.execute_fetchall(
"select blob.blob_hash, blob.blob_length, blob.added_on "
"from blob join stream_blob using (blob_hash) cross join stream using (stream_hash)"
"cross join file using (stream_hash)"
"where blob.is_mine=? and blob.status='finished' order by blob.added_on asc, blob.blob_length asc",
(is_mine,)
)
return content_blobs + sd_blobs
async def get_stored_blob_disk_usage(self):
total, network_size, content_size, private_size = await self.db.execute_fetchone("""
select coalesce(sum(blob_length), 0) as total,
coalesce(sum(case when
stream_blob.stream_hash is null
then blob_length else 0 end), 0) as network_storage,
coalesce(sum(case when
stream_blob.blob_hash is not null and is_mine=0
then blob_length else 0 end), 0) as content_storage,
coalesce(sum(case when
is_mine=1
then blob_length else 0 end), 0) as private_storage
from blob left join stream_blob using (blob_hash)
where blob_hash not in (select sd_hash from stream) and blob.status="finished"
""")
return {
'network_storage': network_size,
'content_storage': content_size,
'private_storage': private_size,
'total': total
}
async def update_blob_ownership(self, sd_hash, is_mine: bool):
is_mine = 1 if is_mine else 0
await self.db.execute_fetchall(
"update blob set is_mine = ? where blob_hash in ("
" select blob_hash from blob natural join stream_blob natural join stream where sd_hash = ?"
") OR blob_hash = ?", (is_mine, sd_hash, sd_hash)
)
def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]:
def _sync_blobs(transaction: sqlite3.Connection) -> typing.Set[str]:
finished_blob_hashes = tuple(
@ -534,8 +470,7 @@ class SQLiteStorage(SQLiteMixin):
def _get_blobs_for_stream(transaction):
crypt_blob_infos = []
stream_blobs = transaction.execute(
"select s.blob_hash, s.position, s.iv, b.added_on "
"from stream_blob s left outer join blob b on b.blob_hash=s.blob_hash where stream_hash=? "
"select blob_hash, position, iv from stream_blob where stream_hash=? "
"order by position asc", (stream_hash, )
).fetchall()
if only_completed:
@ -555,10 +490,9 @@ class SQLiteStorage(SQLiteMixin):
for blob_hash, length in lengths:
blob_length_dict[blob_hash] = length
current_time = time.time()
for blob_hash, position, iv, added_on in stream_blobs:
for blob_hash, position, iv in stream_blobs:
blob_length = blob_length_dict.get(blob_hash, 0)
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, added_on or current_time, blob_hash))
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, blob_hash))
if not blob_hash:
break
return crypt_blob_infos
@ -636,10 +570,6 @@ class SQLiteStorage(SQLiteMixin):
log.debug("update file status %s -> %s", stream_hash, new_status)
return self.db.execute_fetchall("update file set status=? where stream_hash=?", (new_status, stream_hash))
def stop_all_files(self):
log.debug("stopping all files")
return self.db.execute_fetchall("update file set status=?", ("stopped",))
async def change_file_download_dir_and_file_name(self, stream_hash: str, download_dir: typing.Optional[str],
file_name: typing.Optional[str]):
if not file_name or not download_dir:
@ -687,7 +617,7 @@ class SQLiteStorage(SQLiteMixin):
).fetchall()
download_dir = binascii.hexlify(self.conf.download_dir.encode()).decode()
transaction.executemany(
"update file set download_directory=? where stream_hash=?",
f"update file set download_directory=? where stream_hash=?",
((download_dir, stream_hash) for stream_hash in stream_hashes)
).fetchall()
await self.db.run_with_foreign_keys_disabled(_recover)
@ -793,7 +723,7 @@ class SQLiteStorage(SQLiteMixin):
await self.db.run(_save_claims)
if update_file_callbacks:
await asyncio.wait(map(asyncio.create_task, update_file_callbacks))
await asyncio.wait(update_file_callbacks)
if claim_id_to_supports:
await self.save_supports(claim_id_to_supports)
@ -931,6 +861,6 @@ class SQLiteStorage(SQLiteMixin):
transaction.execute('delete from peer').fetchall()
transaction.executemany(
'insert into peer(node_id, address, udp_port, tcp_port) values (?, ?, ?, ?)',
((binascii.hexlify(p.node_id), p.address, p.udp_port, p.tcp_port) for p in peers)
tuple([(binascii.hexlify(p.node_id), p.address, p.udp_port, p.tcp_port) for p in peers])
).fetchall()
return await self.db.run(_save_kademlia_peers)

View file

@ -5,7 +5,6 @@ from typing import Optional
from aiohttp.web import Request
from lbry.error import ResolveError, DownloadSDTimeoutError, InsufficientFundsError
from lbry.error import ResolveTimeoutError, DownloadDataTimeoutError, KeyFeeAboveMaxAllowedError
from lbry.error import InvalidStreamURLError
from lbry.stream.managed_stream import ManagedStream
from lbry.torrent.torrent_manager import TorrentSource
from lbry.utils import cache_concurrent
@ -13,12 +12,11 @@ from lbry.schema.url import URL
from lbry.wallet.dewies import dewies_to_lbc
from lbry.file.source_manager import SourceManager
from lbry.file.source import ManagedDownloadSource
from lbry.extras.daemon.storage import StoredContentClaim
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.extras.daemon.analytics import AnalyticsManager
from lbry.extras.daemon.storage import SQLiteStorage
from lbry.wallet import WalletManager
from lbry.wallet import WalletManager, Output
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
log = logging.getLogger(__name__)
@ -50,10 +48,10 @@ class FileManager:
await manager.started.wait()
self.started.set()
async def stop(self):
def stop(self):
for manager in self.source_managers.values():
# fixme: pop or not?
await manager.stop()
manager.stop()
self.started.clear()
@cache_concurrent
@ -83,11 +81,8 @@ class FileManager:
payment = None
try:
# resolve the claim
try:
if not URL.parse(uri).has_stream:
raise InvalidStreamURLError(uri)
except ValueError:
raise InvalidStreamURLError(uri)
if not URL.parse(uri).has_stream:
raise ResolveError("cannot download a channel claim, specify a /path")
try:
resolved_result = await asyncio.wait_for(
self.wallet_manager.ledger.resolve(
@ -99,6 +94,8 @@ class FileManager:
except asyncio.TimeoutError:
raise ResolveTimeoutError(uri)
except Exception as err:
if isinstance(err, asyncio.CancelledError):
raise
log.exception("Unexpected error resolving stream:")
raise ResolveError(f"Unexpected error resolving stream: {str(err)}")
if 'error' in resolved_result:
@ -120,11 +117,9 @@ class FileManager:
if claim.stream.source.bt_infohash:
source_manager = self.source_managers['torrent']
existing = source_manager.get_filtered(bt_infohash=claim.stream.source.bt_infohash)
elif claim.stream.source.sd_hash:
else:
source_manager = self.source_managers['stream']
existing = source_manager.get_filtered(sd_hash=claim.stream.source.sd_hash)
else:
raise ResolveError(f"There is nothing to download at {uri} - Source is unknown or unset")
# resume or update an existing stream, if the stream changed: download it and delete the old one after
to_replace, updated_stream = None, None
@ -193,24 +188,21 @@ class FileManager:
####################
# make downloader and wait for start
####################
# temporary with fields we know so downloader can start. Missing fields are populated later.
stored_claim = StoredContentClaim(outpoint=outpoint, claim_id=txo.claim_id, name=txo.claim_name,
amount=txo.amount, height=txo.tx_ref.height,
serialized=claim.to_bytes().hex())
if not claim.stream.source.bt_infohash:
# fixme: this shouldnt be here
stream = ManagedStream(
self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash,
download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment,
analytics_manager=self.analytics_manager, claim=stored_claim
analytics_manager=self.analytics_manager
)
stream.downloader.node = source_manager.node
else:
stream = TorrentSource(
self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash,
file_name=file_name, download_directory=download_directory or self.config.download_dir,
status=ManagedStream.STATUS_RUNNING, claim=stored_claim, analytics_manager=self.analytics_manager,
status=ManagedStream.STATUS_RUNNING,
analytics_manager=self.analytics_manager,
torrent_session=source_manager.torrent_session
)
log.info("starting download for %s", uri)
@ -242,14 +234,15 @@ class FileManager:
claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier)
stream.set_claim(claim_info, claim)
if save_file:
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download))
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download),
loop=self.loop)
return stream
except asyncio.TimeoutError:
error = DownloadDataTimeoutError(stream.sd_hash)
raise error
except (Exception, asyncio.CancelledError) as err: # forgive data timeout, don't delete stream
except Exception as err: # forgive data timeout, don't delete stream
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError)
KeyFeeAboveMaxAllowedError)
if isinstance(err, expected):
log.warning("Failed to download %s: %s", uri, str(err))
elif isinstance(err, asyncio.CancelledError):

View file

@ -45,12 +45,11 @@ class ManagedDownloadSource:
self.purchase_receipt = None
self._added_on = added_on
self.analytics_manager = analytics_manager
self.downloader = None
self.saving = asyncio.Event()
self.finished_writing = asyncio.Event()
self.started_writing = asyncio.Event()
self.finished_write_attempt = asyncio.Event()
self.saving = asyncio.Event(loop=self.loop)
self.finished_writing = asyncio.Event(loop=self.loop)
self.started_writing = asyncio.Event(loop=self.loop)
self.finished_write_attempt = asyncio.Event(loop=self.loop)
# @classmethod
# async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str,
@ -67,7 +66,7 @@ class ManagedDownloadSource:
async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None):
raise NotImplementedError()
async def stop_tasks(self):
def stop_tasks(self):
raise NotImplementedError()
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):

View file

@ -54,16 +54,16 @@ class SourceManager:
self.storage = storage
self.analytics_manager = analytics_manager
self._sources: typing.Dict[str, ManagedDownloadSource] = {}
self.started = asyncio.Event()
self.started = asyncio.Event(loop=self.loop)
def add(self, source: ManagedDownloadSource):
self._sources[source.identifier] = source
async def remove(self, source: ManagedDownloadSource):
def remove(self, source: ManagedDownloadSource):
if source.identifier not in self._sources:
return
self._sources.pop(source.identifier)
await source.stop_tasks()
source.stop_tasks()
async def initialize_from_database(self):
raise NotImplementedError()
@ -72,10 +72,10 @@ class SourceManager:
await self.initialize_from_database()
self.started.set()
async def stop(self):
def stop(self):
while self._sources:
_, source = self._sources.popitem()
await source.stop_tasks()
source.stop_tasks()
self.started.clear()
async def create(self, file_path: str, key: Optional[bytes] = None,
@ -83,7 +83,7 @@ class SourceManager:
raise NotImplementedError()
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
await self.remove(source)
self.remove(source)
if delete_file and source.output_file_exists:
os.remove(source.full_path)
@ -132,7 +132,7 @@ class SourceManager:
else:
streams = list(self._sources.values())
if sort_by:
streams.sort(key=lambda s: getattr(s, sort_by) or "")
streams.sort(key=lambda s: getattr(s, sort_by))
if reverse:
streams.reverse()
return streams

View file

@ -69,8 +69,8 @@ class VideoFileAnalyzer:
version = str(e)
if code != 0 or not version.startswith("ffmpeg"):
log.warning("Unable to run ffmpeg, but it was requested. Code: %d; Message: %s", code, version)
raise FileNotFoundError("Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
"and ensure that it is callable via PATH or conf.ffmpeg_path")
raise FileNotFoundError(f"Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
f"and ensure that it is callable via PATH or conf.ffmpeg_path")
log.debug("Using %s at %s", version.splitlines()[0].split(" Copyright")[0], self._which_ffmpeg)
return version

View file

@ -2,5 +2,4 @@ build:
rm types/v2/* -rf
touch types/v2/__init__.py
cd types/v2/ && protoc --python_out=. -I ../../../../../types/v2/proto/ ../../../../../types/v2/proto/*.proto
cd types/v2/ && cp ../../../../../types/jsonschema/* ./
sed -e 's/^import\ \(.*\)_pb2\ /from . import\ \1_pb2\ /g' -i types/v2/*.py

View file

@ -1,24 +0,0 @@
Schema
=====
Those files are generated from the [types repo](https://github.com/lbryio/types). If you are modifying/adding a new type, make sure it is cloned in the same root folder as the SDK repo, like:
```
repos/
- lbry-sdk/
- types/
```
Then, [download protoc 3.2.0](https://github.com/protocolbuffers/protobuf/releases/tag/v3.2.0), add it to your PATH. On linux it is:
```bash
cd ~/.local/bin
wget https://github.com/protocolbuffers/protobuf/releases/download/v3.2.0/protoc-3.2.0-linux-x86_64.zip
unzip protoc-3.2.0-linux-x86_64.zip bin/protoc -d..
```
Finally, `make` should update everything in place.
### Why protoc 3.2.0?
Different/newer versions will generate larger diffs and we need to make sure they are good. In theory, we can just update to latest and it will all work, but it is a good practice to check blockchain data and retro compatibility before bumping versions (if you do, please update this section!).

View file

@ -10,7 +10,6 @@ from google.protobuf.json_format import MessageToDict
from lbry.crypto.base58 import Base58
from lbry.constants import COIN
from lbry.error import MissingPublishedFileError, EmptyPublishedFileError
from lbry.schema.mime_types import guess_media_type
from lbry.schema.base import Metadata, BaseMessageList
@ -33,17 +32,6 @@ def calculate_sha384_file_hash(file_path):
return sha384.digest()
def country_int_to_str(country: int) -> str:
r = LocationMessage.Country.Name(country)
return r[1:] if r.startswith('R') else r
def country_str_to_int(country: str) -> int:
if len(country) == 3:
country = 'R' + country
return LocationMessage.Country.Value(country)
class Dimmensional(Metadata):
__slots__ = ()
@ -140,10 +128,10 @@ class Source(Metadata):
self.name = os.path.basename(file_path)
self.media_type, stream_type = guess_media_type(file_path)
if not os.path.isfile(file_path):
raise MissingPublishedFileError(file_path)
raise Exception(f"File does not exist: {file_path}")
self.size = os.path.getsize(file_path)
if self.size == 0:
raise EmptyPublishedFileError(file_path)
raise Exception(f"Cannot publish empty file: {file_path}")
self.file_hash_bytes = calculate_sha384_file_hash(file_path)
return stream_type
@ -435,11 +423,14 @@ class Language(Metadata):
@property
def region(self) -> str:
if self.message.region:
return country_int_to_str(self.message.region)
r = LocationMessage.Country.Name(self.message.region)
return r[1:] if r.startswith('R') else r
@region.setter
def region(self, region: str):
self.message.region = country_str_to_int(region)
if len(region) == 3:
region = 'R'+region
self.message.region = LocationMessage.Country.Value(region)
class LanguageList(BaseMessageList[Language]):

View file

@ -2,9 +2,6 @@ import logging
from typing import List
from binascii import hexlify, unhexlify
from asn1crypto.keys import PublicKeyInfo
from coincurve import PublicKey as cPublicKey
from google.protobuf.json_format import MessageToDict
from google.protobuf.message import DecodeError
from hachoir.core.log import log as hachoir_log
@ -306,10 +303,6 @@ class Stream(BaseClaim):
def has_fee(self) -> bool:
return self.message.HasField('fee')
@property
def has_source(self) -> bool:
return self.message.HasField('source')
@property
def source(self) -> Source:
return Source(self.message.source)
@ -349,7 +342,7 @@ class Channel(BaseClaim):
@property
def public_key(self) -> str:
return hexlify(self.public_key_bytes).decode()
return hexlify(self.message.public_key).decode()
@public_key.setter
def public_key(self, sd_public_key: str):
@ -357,11 +350,7 @@ class Channel(BaseClaim):
@property
def public_key_bytes(self) -> bytes:
if len(self.message.public_key) == 33:
return self.message.public_key
public_key_info = PublicKeyInfo.load(self.message.public_key)
public_key = cPublicKey(public_key_info.native['public_key'])
return public_key.format(compressed=True)
return self.message.public_key
@public_key_bytes.setter
def public_key_bytes(self, public_key: bytes):
@ -398,12 +387,6 @@ class Repost(BaseClaim):
claim_type = Claim.REPOST
def to_dict(self):
claim = super().to_dict()
if claim.pop('claim_hash', None):
claim['claim_id'] = self.reference.claim_id
return claim
@property
def reference(self) -> ClaimReference:
return ClaimReference(self.message)

View file

@ -1,6 +1,4 @@
import os
import filetype
import logging
types_map = {
# http://www.iana.org/assignments/media-types
@ -168,38 +166,10 @@ types_map = {
'.wmv': ('video/x-ms-wmv', 'video')
}
# maps detected extensions to the possible analogs
# i.e. .cbz file is actually a .zip
synonyms_map = {
'.zip': ['.cbz'],
'.rar': ['.cbr'],
'.ar': ['.a']
}
log = logging.getLogger(__name__)
def guess_media_type(path):
_, ext = os.path.splitext(path)
extension = ext.strip().lower()
try:
kind = filetype.guess(path)
if kind:
real_extension = f".{kind.extension}"
if extension != real_extension:
if extension:
log.warning(f"file extension does not match it's contents: {path}, identified as {real_extension}")
else:
log.debug(f"file {path} does not have extension, identified by it's contents as {real_extension}")
if extension not in synonyms_map.get(real_extension, []):
extension = real_extension
except OSError as error:
pass
if extension[1:]:
if extension in types_map:
return types_map[extension]

View file

@ -1,5 +1,6 @@
import base64
from typing import List, Union, Optional, NamedTuple
import struct
from typing import List
from binascii import hexlify
from itertools import chain
@ -15,70 +16,54 @@ BLOCKED = ErrorMessage.Code.Name(ErrorMessage.BLOCKED)
def set_reference(reference, claim_hash, rows):
if claim_hash:
for txo in rows:
if claim_hash == txo.claim_hash:
reference.tx_hash = txo.tx_hash
reference.nout = txo.position
reference.height = txo.height
if claim_hash == txo['claim_hash']:
reference.tx_hash = txo['txo_hash'][:32]
reference.nout = struct.unpack('<I', txo['txo_hash'][32:])[0]
reference.height = txo['height']
return
class ResolveResult(NamedTuple):
name: str
normalized_name: str
claim_hash: bytes
tx_num: int
position: int
tx_hash: bytes
height: int
amount: int
short_url: str
is_controlling: bool
canonical_url: str
creation_height: int
activation_height: int
expiration_height: int
effective_amount: int
support_amount: int
reposted: int
last_takeover_height: Optional[int]
claims_in_channel: Optional[int]
channel_hash: Optional[bytes]
reposted_claim_hash: Optional[bytes]
signature_valid: Optional[bool]
class Censor:
NOT_CENSORED = 0
SEARCH = 1
RESOLVE = 2
__slots__ = 'streams', 'channels', 'limit_claims_per_channel', 'censored', 'claims_in_channel', 'total'
__slots__ = 'censor_type', 'censored'
def __init__(self, censor_type):
self.censor_type = censor_type
def __init__(self, streams: dict = None, channels: dict = None, limit_claims_per_channel: int = None):
self.streams = streams or {}
self.channels = channels or {}
self.limit_claims_per_channel = limit_claims_per_channel # doesn't count as censored
self.censored = {}
self.claims_in_channel = {}
self.total = 0
def is_censored(self, row):
return (row.get('censor_type') or self.NOT_CENSORED) >= self.censor_type
def censor(self, row) -> bool:
was_censored = False
for claim_hash, lookup in (
(row['claim_hash'], self.streams),
(row['claim_hash'], self.channels),
(row['channel_hash'], self.channels),
(row['reposted_claim_hash'], self.streams),
(row['reposted_claim_hash'], self.channels)):
censoring_channel_hash = lookup.get(claim_hash)
if censoring_channel_hash:
was_censored = True
self.censored.setdefault(censoring_channel_hash, 0)
self.censored[censoring_channel_hash] += 1
break
if was_censored:
self.total += 1
if not was_censored and self.limit_claims_per_channel is not None and row['channel_hash']:
self.claims_in_channel.setdefault(row['channel_hash'], 0)
self.claims_in_channel[row['channel_hash']] += 1
if self.claims_in_channel[row['channel_hash']] > self.limit_claims_per_channel:
return True
return was_censored
def apply(self, rows):
return [row for row in rows if not self.censor(row)]
def censor(self, row) -> Optional[bytes]:
if self.is_censored(row):
censoring_channel_hash = bytes.fromhex(row['censoring_channel_id'])[::-1]
self.censored.setdefault(censoring_channel_hash, set())
self.censored[censoring_channel_hash].add(row['tx_hash'])
return censoring_channel_hash
return None
def to_message(self, outputs: OutputsMessage, extra_txo_rows: dict):
def to_message(self, outputs: OutputsMessage, extra_txo_rows):
outputs.blocked_total = self.total
for censoring_channel_hash, count in self.censored.items():
blocked = outputs.blocked.add()
blocked.count = len(count)
blocked.count = count
set_reference(blocked.channel, censoring_channel_hash, extra_txo_rows)
outputs.blocked_total += len(count)
class Outputs:
@ -142,10 +127,10 @@ class Outputs:
'expiration_height': claim.expiration_height,
'effective_amount': claim.effective_amount,
'support_amount': claim.support_amount,
# 'trending_group': claim.trending_group,
# 'trending_mixed': claim.trending_mixed,
# 'trending_local': claim.trending_local,
# 'trending_global': claim.trending_global,
'trending_group': claim.trending_group,
'trending_mixed': claim.trending_mixed,
'trending_local': claim.trending_local,
'trending_global': claim.trending_global,
}
if claim.HasField('channel'):
txo.channel = tx_map[claim.channel.tx_hash].outputs[claim.channel.nout]
@ -189,54 +174,44 @@ class Outputs:
page.total = total
if blocked is not None:
blocked.to_message(page, extra_txo_rows)
for row in extra_txo_rows:
txo_message: 'OutputsMessage' = page.extra_txos.add()
if not isinstance(row, Exception):
if row.channel_hash:
set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows)
if row.reposted_claim_hash:
set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows)
cls.encode_txo(txo_message, row)
for row in txo_rows:
# cls.row_to_message(row, page.txos.add(), extra_txo_rows)
txo_message: 'OutputsMessage' = page.txos.add()
cls.encode_txo(txo_message, row)
if not isinstance(row, Exception):
if row.channel_hash:
set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows)
if row.reposted_claim_hash:
set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows)
elif isinstance(row, ResolveCensoredError):
set_reference(txo_message.error.blocked.channel, row.censor_id, extra_txo_rows)
cls.row_to_message(row, page.txos.add(), extra_txo_rows)
for row in extra_txo_rows:
cls.row_to_message(row, page.extra_txos.add(), extra_txo_rows)
return page.SerializeToString()
@classmethod
def encode_txo(cls, txo_message, resolve_result: Union['ResolveResult', Exception]):
if isinstance(resolve_result, Exception):
txo_message.error.text = resolve_result.args[0]
if isinstance(resolve_result, ValueError):
def row_to_message(cls, txo, txo_message, extra_txo_rows):
if isinstance(txo, Exception):
txo_message.error.text = txo.args[0]
if isinstance(txo, ValueError):
txo_message.error.code = ErrorMessage.INVALID
elif isinstance(resolve_result, LookupError):
elif isinstance(txo, LookupError):
txo_message.error.code = ErrorMessage.NOT_FOUND
elif isinstance(resolve_result, ResolveCensoredError):
elif isinstance(txo, ResolveCensoredError):
txo_message.error.code = ErrorMessage.BLOCKED
set_reference(txo_message.error.blocked.channel, txo.censor_hash, extra_txo_rows)
return
txo_message.tx_hash = resolve_result.tx_hash
txo_message.nout = resolve_result.position
txo_message.height = resolve_result.height
txo_message.claim.short_url = resolve_result.short_url
txo_message.claim.reposted = resolve_result.reposted
txo_message.claim.is_controlling = resolve_result.is_controlling
txo_message.claim.creation_height = resolve_result.creation_height
txo_message.claim.activation_height = resolve_result.activation_height
txo_message.claim.expiration_height = resolve_result.expiration_height
txo_message.claim.effective_amount = resolve_result.effective_amount
txo_message.claim.support_amount = resolve_result.support_amount
if resolve_result.canonical_url is not None:
txo_message.claim.canonical_url = resolve_result.canonical_url
if resolve_result.last_takeover_height is not None:
txo_message.claim.take_over_height = resolve_result.last_takeover_height
if resolve_result.claims_in_channel is not None:
txo_message.claim.claims_in_channel = resolve_result.claims_in_channel
txo_message.tx_hash = txo['txo_hash'][:32]
txo_message.nout, = struct.unpack('<I', txo['txo_hash'][32:])
txo_message.height = txo['height']
txo_message.claim.short_url = txo['short_url']
txo_message.claim.reposted = txo['reposted']
if txo['canonical_url'] is not None:
txo_message.claim.canonical_url = txo['canonical_url']
txo_message.claim.is_controlling = bool(txo['is_controlling'])
if txo['last_take_over_height'] is not None:
txo_message.claim.take_over_height = txo['last_take_over_height']
txo_message.claim.creation_height = txo['creation_height']
txo_message.claim.activation_height = txo['activation_height']
txo_message.claim.expiration_height = txo['expiration_height']
if txo['claims_in_channel'] is not None:
txo_message.claim.claims_in_channel = txo['claims_in_channel']
txo_message.claim.effective_amount = txo['effective_amount']
txo_message.claim.support_amount = txo['support_amount']
txo_message.claim.trending_group = txo['trending_group']
txo_message.claim.trending_mixed = txo['trending_mixed']
txo_message.claim.trending_local = txo['trending_local']
txo_message.claim.trending_global = txo['trending_global']
set_reference(txo_message.claim.channel, txo['channel_hash'], extra_txo_rows)
set_reference(txo_message.claim.repost, txo['reposted_claim_hash'], extra_txo_rows)

View file

@ -13,11 +13,3 @@ class Support(Signable):
@emoji.setter
def emoji(self, emoji: str):
self.message.emoji = emoji
@property
def comment(self) -> str:
return self.message.comment
@comment.setter
def comment(self, comment: str):
self.message.comment = comment

View file

@ -1,11 +1,13 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: result.proto
"""Generated protocol buffer code."""
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
@ -17,10 +19,9 @@ DESCRIPTOR = _descriptor.FileDescriptor(
name='result.proto',
package='pb',
syntax='proto3',
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xe6\x02\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_score\x18\x16 \x01(\x01\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.OutputB&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
serialized_pb=_b('\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xaf\x03\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_group\x18\x16 \x01(\r\x12\x16\n\x0etrending_mixed\x18\x17 \x01(\x02\x12\x16\n\x0etrending_local\x18\x18 \x01(\x02\x12\x17\n\x0ftrending_global\x18\x19 \x01(\x02\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.Outputb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
@ -29,33 +30,28 @@ _ERROR_CODE = _descriptor.EnumDescriptor(
full_name='pb.Error.Code',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_CODE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_FOUND', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BLOCKED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=744,
serialized_end=809,
options=None,
serialized_start=817,
serialized_end=882,
)
_sym_db.RegisterEnumDescriptor(_ERROR_CODE)
@ -66,7 +62,6 @@ _OUTPUTS = _descriptor.Descriptor(
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='txos', full_name='pb.Outputs.txos', index=0,
@ -74,49 +69,49 @@ _OUTPUTS = _descriptor.Descriptor(
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='extra_txos', full_name='pb.Outputs.extra_txos', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='total', full_name='pb.Outputs.total', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='offset', full_name='pb.Outputs.offset', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='blocked', full_name='pb.Outputs.blocked', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='blocked_total', full_name='pb.Outputs.blocked_total', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
@ -133,59 +128,56 @@ _OUTPUT = _descriptor.Descriptor(
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tx_hash', full_name='pb.Output.tx_hash', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='nout', full_name='pb.Output.nout', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='pb.Output.height', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='claim', full_name='pb.Output.claim', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='pb.Output.error', index=4,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='meta', full_name='pb.Output.meta',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
index=0, containing_type=None, fields=[]),
],
serialized_start=174,
serialized_end=297,
@ -198,7 +190,6 @@ _CLAIMMETA = _descriptor.Descriptor(
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='channel', full_name='pb.ClaimMeta.channel', index=0,
@ -206,112 +197,133 @@ _CLAIMMETA = _descriptor.Descriptor(
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='repost', full_name='pb.ClaimMeta.repost', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='short_url', full_name='pb.ClaimMeta.short_url', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='canonical_url', full_name='pb.ClaimMeta.canonical_url', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='is_controlling', full_name='pb.ClaimMeta.is_controlling', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='take_over_height', full_name='pb.ClaimMeta.take_over_height', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='creation_height', full_name='pb.ClaimMeta.creation_height', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='activation_height', full_name='pb.ClaimMeta.activation_height', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='expiration_height', full_name='pb.ClaimMeta.expiration_height', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='claims_in_channel', full_name='pb.ClaimMeta.claims_in_channel', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='reposted', full_name='pb.ClaimMeta.reposted', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='effective_amount', full_name='pb.ClaimMeta.effective_amount', index=11,
number=20, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='support_amount', full_name='pb.ClaimMeta.support_amount', index=12,
number=21, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='trending_score', full_name='pb.ClaimMeta.trending_score', index=13,
number=22, type=1, cpp_type=5, label=1,
name='trending_group', full_name='pb.ClaimMeta.trending_group', index=13,
number=22, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trending_mixed', full_name='pb.ClaimMeta.trending_mixed', index=14,
number=23, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='trending_local', full_name='pb.ClaimMeta.trending_local', index=15,
number=24, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trending_global', full_name='pb.ClaimMeta.trending_global', index=16,
number=25, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=300,
serialized_end=658,
serialized_end=731,
)
@ -321,7 +333,6 @@ _ERROR = _descriptor.Descriptor(
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='pb.Error.code', index=0,
@ -329,21 +340,21 @@ _ERROR = _descriptor.Descriptor(
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='pb.Error.text', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='blocked', full_name='pb.Error.blocked', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
],
extensions=[
],
@ -351,14 +362,14 @@ _ERROR = _descriptor.Descriptor(
enum_types=[
_ERROR_CODE,
],
serialized_options=None,
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=661,
serialized_end=809,
serialized_start=734,
serialized_end=882,
)
@ -368,7 +379,6 @@ _BLOCKED = _descriptor.Descriptor(
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='count', full_name='pb.Blocked.count', index=0,
@ -376,28 +386,28 @@ _BLOCKED = _descriptor.Descriptor(
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
_descriptor.FieldDescriptor(
name='channel', full_name='pb.Blocked.channel', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=811,
serialized_end=864,
serialized_start=884,
serialized_end=937,
)
_OUTPUTS.fields_by_name['txos'].message_type = _OUTPUT
@ -422,43 +432,41 @@ DESCRIPTOR.message_types_by_name['Output'] = _OUTPUT
DESCRIPTOR.message_types_by_name['ClaimMeta'] = _CLAIMMETA
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
DESCRIPTOR.message_types_by_name['Blocked'] = _BLOCKED
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), {
'DESCRIPTOR' : _OUTPUTS,
'__module__' : 'result_pb2'
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), dict(
DESCRIPTOR = _OUTPUTS,
__module__ = 'result_pb2'
# @@protoc_insertion_point(class_scope:pb.Outputs)
})
))
_sym_db.RegisterMessage(Outputs)
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), {
'DESCRIPTOR' : _OUTPUT,
'__module__' : 'result_pb2'
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), dict(
DESCRIPTOR = _OUTPUT,
__module__ = 'result_pb2'
# @@protoc_insertion_point(class_scope:pb.Output)
})
))
_sym_db.RegisterMessage(Output)
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), {
'DESCRIPTOR' : _CLAIMMETA,
'__module__' : 'result_pb2'
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), dict(
DESCRIPTOR = _CLAIMMETA,
__module__ = 'result_pb2'
# @@protoc_insertion_point(class_scope:pb.ClaimMeta)
})
))
_sym_db.RegisterMessage(ClaimMeta)
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), {
'DESCRIPTOR' : _ERROR,
'__module__' : 'result_pb2'
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), dict(
DESCRIPTOR = _ERROR,
__module__ = 'result_pb2'
# @@protoc_insertion_point(class_scope:pb.Error)
})
))
_sym_db.RegisterMessage(Error)
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), {
'DESCRIPTOR' : _BLOCKED,
'__module__' : 'result_pb2'
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), dict(
DESCRIPTOR = _BLOCKED,
__module__ = 'result_pb2'
# @@protoc_insertion_point(class_scope:pb.Blocked)
})
))
_sym_db.RegisterMessage(Blocked)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)

View file

@ -19,7 +19,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
name='support.proto',
package='pb',
syntax='proto3',
serialized_pb=_b('\n\rsupport.proto\x12\x02pb\")\n\x07Support\x12\r\n\x05\x65moji\x18\x01 \x01(\t\x12\x0f\n\x07\x63omment\x18\x02 \x01(\tb\x06proto3')
serialized_pb=_b('\n\rsupport.proto\x12\x02pb\"\x18\n\x07Support\x12\r\n\x05\x65moji\x18\x01 \x01(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
@ -40,13 +40,6 @@ _SUPPORT = _descriptor.Descriptor(
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='comment', full_name='pb.Support.comment', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
@ -60,7 +53,7 @@ _SUPPORT = _descriptor.Descriptor(
oneofs=[
],
serialized_start=21,
serialized_end=62,
serialized_end=45,
)
DESCRIPTOR.message_types_by_name['Support'] = _SUPPORT

View file

@ -1,139 +0,0 @@
{
"title": "Wallet",
"description": "An LBC wallet",
"type": "object",
"required": ["name", "version", "accounts", "preferences"],
"additionalProperties": false,
"properties": {
"name": {
"description": "Human readable name for this wallet",
"type": "string"
},
"version": {
"description": "Wallet spec version",
"type": "integer",
"$comment": "Should this be a string? We may need some sort of decimal type if we want exact decimal versions."
},
"accounts": {
"description": "Accounts associated with this wallet",
"type": "array",
"items": {
"type": "object",
"required": ["address_generator", "certificates", "encrypted", "ledger", "modified_on", "name", "private_key", "public_key", "seed"],
"additionalProperties": false,
"properties": {
"address_generator": {
"description": "Higher level manager of either singular or deterministically generated addresses",
"type": "object",
"oneOf": [
{
"required": ["name", "change", "receiving"],
"additionalProperties": false,
"properties": {
"name": {
"description": "type of address generator: a deterministic chain of addresses",
"enum": ["deterministic-chain"],
"type": "string"
},
"change": {
"$ref": "#/$defs/address_manager",
"description": "Manager for deterministically generated change address (not used for single address)"
},
"receiving": {
"$ref": "#/$defs/address_manager",
"description": "Manager for deterministically generated receiving address (not used for single address)"
}
}
}, {
"required": ["name"],
"additionalProperties": false,
"properties": {
"name": {
"description": "type of address generator: a single address",
"enum": ["single-address"],
"type": "string"
}
}
}
]
},
"certificates": {
"type": "object",
"description": "Channel keys. Mapping from public key address to pem-formatted private key.",
"additionalProperties": {"type": "string"}
},
"encrypted": {
"type": "boolean",
"description": "Whether private key and seed are encrypted with a password"
},
"ledger": {
"description": "Which network to use",
"type": "string",
"examples": [
"lbc_mainnet",
"lbc_testnet"
]
},
"modified_on": {
"description": "last modified time in Unix Time",
"type": "integer"
},
"name": {
"description": "Name for account, possibly human readable",
"type": "string"
},
"private_key": {
"description": "Private key for address if `address_generator` is a single address. Root of chain of private keys for addresses if `address_generator` is a deterministic chain of addresses. Encrypted if `encrypted` is true.",
"type": "string"
},
"public_key": {
"description": "Public key for address if `address_generator` is a single address. Root of chain of public keys for addresses if `address_generator` is a deterministic chain of addresses.",
"type": "string"
},
"seed": {
"description": "Human readable representation of `private_key`. encrypted if `encrypted` is set to `true`",
"type": "string"
}
}
}
},
"preferences": {
"description": "Timestamped application-level preferences. Values can be objects or of a primitive type.",
"$comment": "enable-sync is seen in example wallet. encrypt-on-disk is seen in example wallet. they both have a boolean `value` field. Do we want them explicitly defined here? local and shared seem to have at least a similar structure (type, value [yes, again], version), value being the free-form part. Should we define those here? Or can there be any key under preferences, and `value` be literally be anything in any form?",
"type": "object",
"additionalProperties": {
"type": "object",
"required": ["ts", "value"],
"additionalProperties": false,
"properties": {
"ts": {
"type": "number",
"description": "When the item was set, in Unix time format.",
"$comment": "Do we want a string (decimal)?"
},
"value": {
"$comment": "Sometimes this has been an object, sometimes just a boolean. I don't want to prescribe anything."
}
}
}
}
},
"$defs": {
"address_manager": {
"description": "Manager for deterministically generated addresses",
"type": "object",
"required": ["gap", "maximum_uses_per_address"],
"additionalProperties": false,
"properties": {
"gap": {
"description": "Maximum allowed consecutive generated addresses with no transactions",
"type": "integer"
},
"maximum_uses_per_address": {
"description": "Maximum number of uses for each generated address",
"type": "integer"
}
}
}
}
}

View file

@ -55,14 +55,6 @@ class PathSegment(NamedTuple):
def normalized(self):
return normalize_name(self.name)
@property
def is_shortid(self):
return self.claim_id is not None and len(self.claim_id) < 40
@property
def is_fullid(self):
return self.claim_id is not None and len(self.claim_id) == 40
def to_dict(self):
q = {'name': self.name}
if self.claim_id is not None:

View file

@ -1,31 +0,0 @@
import asyncio
import logging
from lbry.stream.downloader import StreamDownloader
log = logging.getLogger(__name__)
class BackgroundDownloader:
def __init__(self, conf, storage, blob_manager, dht_node=None):
self.storage = storage
self.blob_manager = blob_manager
self.node = dht_node
self.conf = conf
async def download_blobs(self, sd_hash):
downloader = StreamDownloader(asyncio.get_running_loop(), self.conf, self.blob_manager, sd_hash)
try:
await downloader.start(self.node, save_stream=False)
for blob_info in downloader.descriptor.blobs[:-1]:
await downloader.download_stream_blob(blob_info)
except ValueError:
return
except asyncio.CancelledError:
log.debug("Cancelled background downloader")
raise
except Exception:
log.error("Unexpected download error on background downloader")
finally:
downloader.stop()

View file

@ -4,7 +4,6 @@ import binascii
import logging
import typing
import asyncio
import time
import re
from collections import OrderedDict
from cryptography.hazmat.primitives.ciphers.algorithms import AES
@ -153,19 +152,15 @@ class StreamDescriptor:
h.update(self.old_sort_json())
return h.hexdigest()
async def make_sd_blob(
self, blob_file_obj: typing.Optional[AbstractBlob] = None, old_sort: typing.Optional[bool] = False,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
added_on: float = None, is_mine: bool = False
):
async def make_sd_blob(self, blob_file_obj: typing.Optional[AbstractBlob] = None,
old_sort: typing.Optional[bool] = False,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None):
sd_hash = self.calculate_sd_hash() if not old_sort else self.calculate_old_sort_sd_hash()
if not old_sort:
sd_data = self.as_json()
else:
sd_data = self.old_sort_json()
sd_blob = blob_file_obj or BlobFile(
self.loop, sd_hash, len(sd_data), blob_completed_callback, self.blob_dir, added_on, is_mine
)
sd_blob = blob_file_obj or BlobFile(self.loop, sd_hash, len(sd_data), blob_completed_callback, self.blob_dir)
if blob_file_obj:
blob_file_obj.set_length(len(sd_data))
if not sd_blob.get_is_verified():
@ -188,19 +183,18 @@ class StreamDescriptor:
raise InvalidStreamDescriptorError("Does not decode as valid JSON")
if decoded['blobs'][-1]['length'] != 0:
raise InvalidStreamDescriptorError("Does not end with a zero-length blob.")
if any(blob_info['length'] == 0 for blob_info in decoded['blobs'][:-1]):
if any([blob_info['length'] == 0 for blob_info in decoded['blobs'][:-1]]):
raise InvalidStreamDescriptorError("Contains zero-length data blob")
if 'blob_hash' in decoded['blobs'][-1]:
raise InvalidStreamDescriptorError("Stream terminator blob should not have a hash")
if any(i != blob_info['blob_num'] for i, blob_info in enumerate(decoded['blobs'])):
if any([i != blob_info['blob_num'] for i, blob_info in enumerate(decoded['blobs'])]):
raise InvalidStreamDescriptorError("Stream contains out of order or skipped blobs")
added_on = time.time()
descriptor = cls(
loop, blob_dir,
binascii.unhexlify(decoded['stream_name']).decode(),
decoded['key'],
binascii.unhexlify(decoded['suggested_file_name']).decode(),
[BlobInfo(info['blob_num'], info['length'], info['iv'], added_on, info.get('blob_hash'))
[BlobInfo(info['blob_num'], info['length'], info['iv'], info.get('blob_hash'))
for info in decoded['blobs']],
decoded['stream_hash'],
blob.blob_hash
@ -258,25 +252,20 @@ class StreamDescriptor:
iv_generator = iv_generator or random_iv_generator()
key = key or os.urandom(AES.block_size // 8)
blob_num = -1
added_on = time.time()
async for blob_bytes in file_reader(file_path):
blob_num += 1
blob_info = await BlobFile.create_from_unencrypted(
loop, blob_dir, key, next(iv_generator), blob_bytes, blob_num, added_on, True, blob_completed_callback
loop, blob_dir, key, next(iv_generator), blob_bytes, blob_num, blob_completed_callback
)
blobs.append(blob_info)
blobs.append(
# add the stream terminator
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode(), added_on, None, True)
)
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode())) # add the stream terminator
file_name = os.path.basename(file_path)
suggested_file_name = sanitize_file_name(file_name)
descriptor = cls(
loop, blob_dir, file_name, binascii.hexlify(key).decode(), suggested_file_name, blobs
)
sd_blob = await descriptor.make_sd_blob(
old_sort=old_sort, blob_completed_callback=blob_completed_callback, added_on=added_on, is_mine=True
)
sd_blob = await descriptor.make_sd_blob(old_sort=old_sort, blob_completed_callback=blob_completed_callback)
descriptor.sd_hash = sd_blob.blob_hash
return descriptor

View file

@ -3,13 +3,11 @@ import typing
import logging
import binascii
from lbry.dht.node import get_kademlia_peers_from_hosts
from lbry.dht.peer import make_kademlia_peer
from lbry.error import DownloadSDTimeoutError
from lbry.utils import lru_cache_concurrent
from lbry.utils import resolve_host, lru_cache_concurrent
from lbry.stream.descriptor import StreamDescriptor
from lbry.blob_exchange.downloader import BlobDownloader
from lbry.torrent.tracker import enqueue_tracker_search
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.dht.node import Node
@ -27,8 +25,8 @@ class StreamDownloader:
self.config = config
self.blob_manager = blob_manager
self.sd_hash = sd_hash
self.search_queue = asyncio.Queue() # blob hashes to feed into the iterative finder
self.peer_queue = asyncio.Queue() # new peers to try
self.search_queue = asyncio.Queue(loop=loop) # blob hashes to feed into the iterative finder
self.peer_queue = asyncio.Queue(loop=loop) # new peers to try
self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue)
self.descriptor: typing.Optional[StreamDescriptor] = descriptor
self.node: typing.Optional['Node'] = None
@ -50,19 +48,26 @@ class StreamDownloader:
self.cached_read_blob = cached_read_blob
async def add_fixed_peers(self):
def _add_fixed_peers(fixed_peers):
self.peer_queue.put_nowait(fixed_peers)
def _delayed_add_fixed_peers():
self.added_fixed_peers = True
self.peer_queue.put_nowait([
make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
for address, port in addresses
])
if not self.config.fixed_peers:
return
addresses = [
(await resolve_host(url, port, proto='tcp'), port)
for url, port in self.config.fixed_peers
]
if 'dht' in self.config.components_to_skip or not self.node or not \
len(self.node.protocol.routing_table.get_peers()) > 0:
self.fixed_peers_delay = 0.0
else:
self.fixed_peers_delay = self.config.fixed_peer_delay
fixed_peers = await get_kademlia_peers_from_hosts(self.config.fixed_peers)
self.fixed_peers_handle = self.loop.call_later(self.fixed_peers_delay, _add_fixed_peers, fixed_peers)
self.fixed_peers_handle = self.loop.call_later(self.fixed_peers_delay, _delayed_add_fixed_peers)
async def load_descriptor(self, connection_id: int = 0):
# download or get the sd blob
@ -72,7 +77,7 @@ class StreamDownloader:
now = self.loop.time()
sd_blob = await asyncio.wait_for(
self.blob_downloader.download_blob(self.sd_hash, connection_id),
self.config.blob_download_timeout
self.config.blob_download_timeout, loop=self.loop
)
log.info("downloaded sd blob %s", self.sd_hash)
self.time_to_descriptor = self.loop.time() - now
@ -85,7 +90,7 @@ class StreamDownloader:
)
log.info("loaded stream manifest %s", self.sd_hash)
async def start(self, node: typing.Optional['Node'] = None, connection_id: int = 0, save_stream=True):
async def start(self, node: typing.Optional['Node'] = None, connection_id: int = 0):
# set up peer accumulation
self.node = node or self.node # fixme: this shouldnt be set here!
if self.node:
@ -93,7 +98,6 @@ class StreamDownloader:
self.accumulate_task.cancel()
_, self.accumulate_task = self.node.accumulate_peers(self.search_queue, self.peer_queue)
await self.add_fixed_peers()
enqueue_tracker_search(bytes.fromhex(self.sd_hash), self.peer_queue)
# start searching for peers for the sd hash
self.search_queue.put_nowait(self.sd_hash)
log.info("searching for peers for stream %s", self.sd_hash)
@ -101,7 +105,11 @@ class StreamDownloader:
if not self.descriptor:
await self.load_descriptor(connection_id)
if not await self.blob_manager.storage.stream_exists(self.sd_hash) and save_stream:
# add the head blob to the peer search
self.search_queue.put_nowait(self.descriptor.blobs[0].blob_hash)
log.info("added head blob to peer search for stream %s", self.sd_hash)
if not await self.blob_manager.storage.stream_exists(self.sd_hash):
await self.blob_manager.storage.store_stream(
self.blob_manager.get_blob(self.sd_hash, length=self.descriptor.length), self.descriptor
)
@ -111,7 +119,7 @@ class StreamDownloader:
raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}")
blob = await asyncio.wait_for(
self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id),
self.config.blob_download_timeout * 10
self.config.blob_download_timeout * 10, loop=self.loop
)
return blob

View file

@ -16,8 +16,10 @@ from lbry.file.source import ManagedDownloadSource
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.schema.claim import Claim
from lbry.blob.blob_manager import BlobManager
from lbry.blob.blob_info import BlobInfo
from lbry.dht.node import Node
from lbry.extras.daemon.analytics import AnalyticsManager
from lbry.wallet.transaction import Transaction
@ -60,9 +62,9 @@ class ManagedStream(ManagedDownloadSource):
self.file_output_task: typing.Optional[asyncio.Task] = None
self.delayed_stop_task: typing.Optional[asyncio.Task] = None
self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = []
self.fully_reflected = asyncio.Event()
self.streaming = asyncio.Event()
self._running = asyncio.Event()
self.fully_reflected = asyncio.Event(loop=self.loop)
self.streaming = asyncio.Event(loop=self.loop)
self._running = asyncio.Event(loop=self.loop)
@property
def sd_hash(self) -> str:
@ -82,19 +84,7 @@ class ManagedStream(ManagedDownloadSource):
@property
def file_name(self) -> Optional[str]:
return self._file_name or self.suggested_file_name
@property
def suggested_file_name(self) -> Optional[str]:
first_option = ((self.descriptor and self.descriptor.suggested_file_name) or '').strip()
return sanitize_file_name(first_option or (self.stream_claim_info and self.stream_claim_info.claim and
self.stream_claim_info.claim.stream.source.name))
@property
def stream_name(self) -> Optional[str]:
first_option = ((self.descriptor and self.descriptor.stream_name) or '').strip()
return first_option or (self.stream_claim_info and self.stream_claim_info.claim and
self.stream_claim_info.claim.stream.source.name)
return self._file_name or (self.descriptor.suggested_file_name if self.descriptor else None)
@property
def written_bytes(self) -> int:
@ -128,7 +118,7 @@ class ManagedStream(ManagedDownloadSource):
@property
def mime_type(self):
return guess_media_type(os.path.basename(self.suggested_file_name))[0]
return guess_media_type(os.path.basename(self.descriptor.suggested_file_name))[0]
@property
def download_path(self):
@ -161,7 +151,7 @@ class ManagedStream(ManagedDownloadSource):
log.info("start downloader for stream (sd hash: %s)", self.sd_hash)
self._running.set()
try:
await asyncio.wait_for(self.downloader.start(), timeout)
await asyncio.wait_for(self.downloader.start(), timeout, loop=self.loop)
except asyncio.TimeoutError:
self._running.clear()
raise DownloadSDTimeoutError(self.sd_hash)
@ -174,7 +164,7 @@ class ManagedStream(ManagedDownloadSource):
if not self._file_name:
self._file_name = await get_next_available_file_name(
self.loop, self.download_directory,
self._file_name or sanitize_file_name(self.suggested_file_name)
self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
)
file_name, download_dir = self._file_name, self.download_directory
else:
@ -191,7 +181,7 @@ class ManagedStream(ManagedDownloadSource):
Stop any running save/stream tasks as well as the downloader and update the status in the database
"""
await self.stop_tasks()
self.stop_tasks()
if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING:
await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED)
@ -264,7 +254,7 @@ class ManagedStream(ManagedDownloadSource):
self.finished_writing.clear()
self.started_writing.clear()
try:
open(output_path, 'wb').close() # pylint: disable=consider-using-with
open(output_path, 'wb').close()
async for blob_info, decrypted in self._aiter_read_stream(connection_id=self.SAVING_ID):
log.info("write blob %i/%i", blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
await self.loop.run_in_executor(None, self._write_decrypted_blob, output_path, decrypted)
@ -279,7 +269,7 @@ class ManagedStream(ManagedDownloadSource):
log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id,
self.sd_hash[:6], self.full_path)
await self.blob_manager.storage.set_saved_file(self.stream_hash)
except (Exception, asyncio.CancelledError) as err:
except Exception as err:
if os.path.isfile(output_path):
log.warning("removing incomplete download %s for %s", output_path, self.sd_hash)
os.remove(output_path)
@ -306,14 +296,14 @@ class ManagedStream(ManagedDownloadSource):
self.download_directory = download_directory or self.download_directory or self.config.download_dir
if not self.download_directory:
raise ValueError("no directory to download to")
if not (file_name or self._file_name or self.suggested_file_name):
if not (file_name or self._file_name or self.descriptor.suggested_file_name):
raise ValueError("no file name to download to")
if not os.path.isdir(self.download_directory):
log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory)
os.mkdir(self.download_directory)
self._file_name = await get_next_available_file_name(
self.loop, self.download_directory,
file_name or self._file_name or sanitize_file_name(self.suggested_file_name)
file_name or self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
)
await self.blob_manager.storage.change_file_download_dir_and_file_name(
self.stream_hash, self.download_directory, self.file_name
@ -321,16 +311,15 @@ class ManagedStream(ManagedDownloadSource):
await self.update_status(ManagedStream.STATUS_RUNNING)
self.file_output_task = self.loop.create_task(self._save_file(self.full_path))
try:
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout)
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout, loop=self.loop)
except asyncio.TimeoutError:
log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id)
await self.stop_tasks()
self.stop_tasks()
await self.update_status(ManagedStream.STATUS_STOPPED)
async def stop_tasks(self):
def stop_tasks(self):
if self.file_output_task and not self.file_output_task.done():
self.file_output_task.cancel()
await asyncio.gather(self.file_output_task, return_exceptions=True)
self.file_output_task = None
while self.streaming_responses:
req, response = self.streaming_responses.pop()
@ -367,7 +356,7 @@ class ManagedStream(ManagedDownloadSource):
return sent
except ConnectionError:
return sent
except (OSError, Exception, asyncio.CancelledError) as err:
except (OSError, Exception) as err:
if isinstance(err, asyncio.CancelledError):
log.warning("stopped uploading %s#%s to reflector", self.claim_name, self.claim_id)
elif isinstance(err, OSError):
@ -383,6 +372,9 @@ class ManagedStream(ManagedDownloadSource):
protocol.transport.close()
self.uploading_to_reflector = False
if not self.fully_reflected.is_set():
self.fully_reflected.set()
await self.blob_manager.storage.update_reflected_stream(self.sd_hash, f"{host}:{port}")
return sent
async def update_content_claim(self, claim_info: Optional[typing.Dict] = None):
@ -402,7 +394,7 @@ class ManagedStream(ManagedDownloadSource):
self.sd_hash[:6])
await self.stop()
return
await asyncio.sleep(1)
await asyncio.sleep(1, loop=self.loop)
def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]:
if '=' in get_range:

View file

@ -59,7 +59,7 @@ class StreamReflectorClient(asyncio.Protocol):
return
async def send_request(self, request_dict: typing.Dict, timeout: int = 180):
msg = json.dumps(request_dict, sort_keys=True)
msg = json.dumps(request_dict)
try:
self.transport.write(msg.encode())
self.pending_request = self.loop.create_task(asyncio.wait_for(self.response_queue.get(), timeout))

View file

@ -17,11 +17,11 @@ log = logging.getLogger(__name__)
class ReflectorServerProtocol(asyncio.Protocol):
def __init__(self, blob_manager: 'BlobManager', response_chunk_size: int = 10000,
stop_event: asyncio.Event = None, incoming_event: asyncio.Event = None,
not_incoming_event: asyncio.Event = None, partial_event: asyncio.Event = None):
not_incoming_event: asyncio.Event = None):
self.loop = asyncio.get_event_loop()
self.blob_manager = blob_manager
self.server_task: asyncio.Task = None
self.started_listening = asyncio.Event()
self.started_listening = asyncio.Event(loop=self.loop)
self.buf = b''
self.transport: asyncio.StreamWriter = None
self.writer: typing.Optional['HashBlobWriter'] = None
@ -29,12 +29,11 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.descriptor: typing.Optional['StreamDescriptor'] = None
self.sd_blob: typing.Optional['BlobFile'] = None
self.received = []
self.incoming = incoming_event or asyncio.Event()
self.not_incoming = not_incoming_event or asyncio.Event()
self.stop_event = stop_event or asyncio.Event()
self.incoming = incoming_event or asyncio.Event(loop=self.loop)
self.not_incoming = not_incoming_event or asyncio.Event(loop=self.loop)
self.stop_event = stop_event or asyncio.Event(loop=self.loop)
self.chunk_size = response_chunk_size
self.wait_for_stop_task: typing.Optional[asyncio.Task] = None
self.partial_event = partial_event
async def wait_for_stop(self):
await self.stop_event.wait()
@ -94,7 +93,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.incoming.set()
self.send_response({"send_sd_blob": True})
try:
await asyncio.wait_for(self.sd_blob.verified.wait(), 30)
await asyncio.wait_for(self.sd_blob.verified.wait(), 30, loop=self.loop)
self.descriptor = await StreamDescriptor.from_stream_descriptor_blob(
self.loop, self.blob_manager.blob_dir, self.sd_blob
)
@ -116,14 +115,10 @@ class ReflectorServerProtocol(asyncio.Protocol):
if self.writer:
self.writer.close_handle()
self.writer = None
needs = [blob.blob_hash
for blob in self.descriptor.blobs[:-1]
if not self.blob_manager.get_blob(blob.blob_hash).get_is_verified()]
if needs and not self.partial_event.is_set():
needs = needs[:3]
self.partial_event.set()
self.send_response({"send_sd_blob": False, 'needed_blobs': needs})
self.send_response({"send_sd_blob": False, 'needed': [
blob.blob_hash for blob in self.descriptor.blobs[:-1]
if not self.blob_manager.get_blob(blob.blob_hash).get_is_verified()
]})
return
return
elif self.descriptor:
@ -140,7 +135,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.incoming.set()
self.send_response({"send_blob": True})
try:
await asyncio.wait_for(blob.verified.wait(), 30)
await asyncio.wait_for(blob.verified.wait(), 30, loop=self.loop)
self.send_response({"received_blob": True})
except asyncio.TimeoutError:
self.send_response({"received_blob": False})
@ -158,29 +153,29 @@ class ReflectorServerProtocol(asyncio.Protocol):
class ReflectorServer:
def __init__(self, blob_manager: 'BlobManager', response_chunk_size: int = 10000,
stop_event: asyncio.Event = None, incoming_event: asyncio.Event = None,
not_incoming_event: asyncio.Event = None, partial_needs=False):
not_incoming_event: asyncio.Event = None):
self.loop = asyncio.get_event_loop()
self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event()
self.stopped_listening = asyncio.Event()
self.incoming_event = incoming_event or asyncio.Event()
self.not_incoming_event = not_incoming_event or asyncio.Event()
self.started_listening = asyncio.Event(loop=self.loop)
self.stopped_listening = asyncio.Event(loop=self.loop)
self.incoming_event = incoming_event or asyncio.Event(loop=self.loop)
self.not_incoming_event = not_incoming_event or asyncio.Event(loop=self.loop)
self.response_chunk_size = response_chunk_size
self.stop_event = stop_event
self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants
def start_server(self, port: int, interface: typing.Optional[str] = '0.0.0.0'):
if self.server_task is not None:
raise Exception("already running")
async def _start_server():
partial_event = asyncio.Event()
if not self.partial_needs:
partial_event.set()
server = await self.loop.create_server(lambda: ReflectorServerProtocol(
self.blob_manager, self.response_chunk_size, self.stop_event, self.incoming_event,
self.not_incoming_event, partial_event), interface, port)
server = await self.loop.create_server(
lambda: ReflectorServerProtocol(
self.blob_manager, self.response_chunk_size, self.stop_event, self.incoming_event,
self.not_incoming_event
),
interface, port
)
self.started_listening.set()
self.stopped_listening.clear()
log.info("Reflector server listening on TCP %s:%i", interface, port)

View file

@ -54,7 +54,7 @@ class StreamManager(SourceManager):
self.re_reflect_task: Optional[asyncio.Task] = None
self.update_stream_finished_futs: typing.List[asyncio.Future] = []
self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {}
self.started = asyncio.Event()
self.started = asyncio.Event(loop=self.loop)
@property
def streams(self):
@ -70,7 +70,6 @@ class StreamManager(SourceManager):
async def recover_streams(self, file_infos: typing.List[typing.Dict]):
to_restore = []
to_check = []
async def recover_stream(sd_hash: str, stream_hash: str, stream_name: str,
suggested_file_name: str, key: str,
@ -83,7 +82,6 @@ class StreamManager(SourceManager):
if not descriptor:
return
to_restore.append((descriptor, sd_blob, content_fee))
to_check.extend([sd_blob.blob_hash] + [blob.blob_hash for blob in descriptor.blobs[:-1]])
await asyncio.gather(*[
recover_stream(
@ -95,8 +93,6 @@ class StreamManager(SourceManager):
if to_restore:
await self.storage.recover_streams(to_restore, self.config.download_dir)
if to_check:
await self.blob_manager.ensure_completed_blobs_status(to_check)
# if self.blob_manager._save_blobs:
# log.info("Recovered %i/%i attempted streams", len(to_restore), len(file_infos))
@ -150,7 +146,7 @@ class StreamManager(SourceManager):
file_info['added_on'], file_info['fully_reflected']
)))
if add_stream_tasks:
await asyncio.gather(*add_stream_tasks)
await asyncio.gather(*add_stream_tasks, loop=self.loop)
log.info("Started stream manager with %i files", len(self._sources))
if not self.node:
log.info("no DHT node given, resuming downloads trusting that we can contact reflector")
@ -159,11 +155,14 @@ class StreamManager(SourceManager):
self.resume_saving_task = asyncio.ensure_future(asyncio.gather(
*(self._sources[sd_hash].save_file(file_name, download_directory)
for (file_name, download_directory, sd_hash) in to_resume_saving),
loop=self.loop
))
async def reflect_streams(self):
try:
return await self._reflect_streams()
except asyncio.CancelledError:
raise
except Exception:
log.exception("reflector task encountered an unexpected error!")
@ -183,21 +182,21 @@ class StreamManager(SourceManager):
batch.append(self.reflect_stream(stream))
if len(batch) >= self.config.concurrent_reflector_uploads:
log.debug("waiting for batch of %s reflecting streams", len(batch))
await asyncio.gather(*batch)
await asyncio.gather(*batch, loop=self.loop)
log.debug("done processing %s streams", len(batch))
batch = []
if batch:
log.debug("waiting for batch of %s reflecting streams", len(batch))
await asyncio.gather(*batch)
await asyncio.gather(*batch, loop=self.loop)
log.debug("done processing %s streams", len(batch))
await asyncio.sleep(300)
await asyncio.sleep(300, loop=self.loop)
async def start(self):
await super().start()
self.re_reflect_task = self.loop.create_task(self.reflect_streams())
async def stop(self):
await super().stop()
def stop(self):
super().stop()
if self.resume_saving_task and not self.resume_saving_task.done():
self.resume_saving_task.cancel()
if self.re_reflect_task and not self.re_reflect_task.done():
@ -216,7 +215,7 @@ class StreamManager(SourceManager):
server, port = random.choice(self.config.reflector_servers)
if stream.sd_hash in self.running_reflector_uploads:
return self.running_reflector_uploads[stream.sd_hash]
task = self.loop.create_task(self._retriable_reflect_stream(stream, server, port))
task = self.loop.create_task(stream.upload_to_reflector(server, port))
self.running_reflector_uploads[stream.sd_hash] = task
task.add_done_callback(
lambda _: None if stream.sd_hash not in self.running_reflector_uploads else
@ -224,14 +223,6 @@ class StreamManager(SourceManager):
)
return task
@staticmethod
async def _retriable_reflect_stream(stream, host, port):
sent = await stream.upload_to_reflector(host, port)
while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0:
stream.reflector_progress = 0
sent = await stream.upload_to_reflector(host, port)
return sent
async def create(self, file_path: str, key: Optional[bytes] = None,
iv_generator: Optional[typing.Generator[bytes, None, None]] = None) -> ManagedStream:
descriptor = await StreamDescriptor.create_stream(
@ -239,7 +230,7 @@ class StreamManager(SourceManager):
blob_completed_callback=self.blob_manager.blob_completed
)
await self.storage.store_stream(
self.blob_manager.get_blob(descriptor.sd_hash, is_mine=True), descriptor
self.blob_manager.get_blob(descriptor.sd_hash), descriptor
)
row_id = await self.storage.save_published_file(
descriptor.stream_hash, os.path.basename(file_path), os.path.dirname(file_path), 0
@ -260,7 +251,7 @@ class StreamManager(SourceManager):
return
if source.identifier in self.running_reflector_uploads:
self.running_reflector_uploads[source.identifier].cancel()
await source.stop_tasks()
source.stop_tasks()
if source.identifier in self.streams:
del self.streams[source.identifier]
blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]]

View file

@ -17,10 +17,8 @@ from functools import partial
from lbry.wallet import WalletManager, Wallet, Ledger, Account, Transaction
from lbry.conf import Config
from lbry.wallet.util import satoshis_to_coins
from lbry.wallet.dewies import lbc_to_dewies
from lbry.wallet.orchstr8 import Conductor
from lbry.wallet.orchstr8.node import LBCWalletNode, WalletNode
from lbry.schema.claim import Claim
from lbry.wallet.orchstr8.node import BlockchainNode, WalletNode
from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty
from lbry.extras.daemon.components import Component, WalletComponent
@ -86,7 +84,6 @@ class AsyncioTestCase(unittest.TestCase):
# https://bugs.python.org/issue32972
LOOP_SLOW_CALLBACK_DURATION = 0.2
TIMEOUT = 120.0
maxDiff = None
@ -134,18 +131,15 @@ class AsyncioTestCase(unittest.TestCase):
with outcome.testPartExecutor(self):
self.setUp()
self.add_timeout()
self.loop.run_until_complete(self.asyncSetUp())
if outcome.success:
outcome.expecting_failure = expecting_failure
with outcome.testPartExecutor(self, isTest=True):
maybe_coroutine = testMethod()
if asyncio.iscoroutine(maybe_coroutine):
self.add_timeout()
self.loop.run_until_complete(maybe_coroutine)
outcome.expecting_failure = False
with outcome.testPartExecutor(self):
self.add_timeout()
self.loop.run_until_complete(self.asyncTearDown())
self.tearDown()
@ -193,25 +187,8 @@ class AsyncioTestCase(unittest.TestCase):
with outcome.testPartExecutor(self):
maybe_coroutine = function(*args, **kwargs)
if asyncio.iscoroutine(maybe_coroutine):
self.add_timeout()
self.loop.run_until_complete(maybe_coroutine)
def cancel(self):
for task in asyncio.all_tasks(self.loop):
if not task.done():
task.print_stack()
task.cancel()
def add_timeout(self):
if self.TIMEOUT:
self.loop.call_later(self.TIMEOUT, self.check_timeout, time())
def check_timeout(self, started):
if time() - started >= self.TIMEOUT:
self.cancel()
else:
self.loop.call_later(self.TIMEOUT, self.check_timeout, started)
class AdvanceTimeTestCase(AsyncioTestCase):
@ -236,7 +213,7 @@ class IntegrationTestCase(AsyncioTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conductor: Optional[Conductor] = None
self.blockchain: Optional[LBCWalletNode] = None
self.blockchain: Optional[BlockchainNode] = None
self.wallet_node: Optional[WalletNode] = None
self.manager: Optional[WalletManager] = None
self.ledger: Optional[Ledger] = None
@ -245,15 +222,13 @@ class IntegrationTestCase(AsyncioTestCase):
async def asyncSetUp(self):
self.conductor = Conductor(seed=self.SEED)
await self.conductor.start_lbcd()
self.addCleanup(self.conductor.stop_lbcd)
await self.conductor.start_lbcwallet()
self.addCleanup(self.conductor.stop_lbcwallet)
await self.conductor.start_blockchain()
self.addCleanup(self.conductor.stop_blockchain)
await self.conductor.start_spv()
self.addCleanup(self.conductor.stop_spv)
await self.conductor.start_wallet()
self.addCleanup(self.conductor.stop_wallet)
self.blockchain = self.conductor.lbcwallet_node
self.blockchain = self.conductor.blockchain_node
self.wallet_node = self.conductor.wallet_node
self.manager = self.wallet_node.manager
self.ledger = self.wallet_node.ledger
@ -267,13 +242,6 @@ class IntegrationTestCase(AsyncioTestCase):
def broadcast(self, tx):
return self.ledger.broadcast(tx)
async def broadcast_and_confirm(self, tx, ledger=None):
ledger = ledger or self.ledger
notifications = asyncio.create_task(ledger.wait(tx))
await ledger.broadcast(tx)
await notifications
await self.generate_and_wait(1, [tx.id], ledger)
async def on_header(self, height):
if self.ledger.headers.height < height:
await self.ledger.on_header.where(
@ -281,29 +249,11 @@ class IntegrationTestCase(AsyncioTestCase):
)
return True
async def send_to_address_and_wait(self, address, amount, blocks_to_generate=0, ledger=None):
tx_watch = []
txid = None
done = False
watcher = (ledger or self.ledger).on_transaction.where(
lambda e: e.tx.id == txid or done or tx_watch.append(e.tx.id)
def on_transaction_id(self, txid, ledger=None):
return (ledger or self.ledger).on_transaction.where(
lambda e: e.tx.id == txid
)
txid = await self.blockchain.send_to_address(address, amount)
done = txid in tx_watch
await watcher
await self.generate_and_wait(blocks_to_generate, [txid], ledger)
return txid
async def generate_and_wait(self, blocks_to_generate, txids, ledger=None):
if blocks_to_generate > 0:
watcher = (ledger or self.ledger).on_transaction.where(
lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda
)
await self.generate(blocks_to_generate)
await watcher
def on_address_update(self, address):
return self.ledger.on_transaction.where(
lambda e: e.address == address
@ -314,22 +264,6 @@ class IntegrationTestCase(AsyncioTestCase):
lambda e: e.tx.id == tx.id and e.address == address
)
async def generate(self, blocks):
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
self.conductor.spv_node.server.synchronized.clear()
await self.blockchain.generate(blocks)
height = self.blockchain.block_expected
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
while True:
await self.conductor.spv_node.server.synchronized.wait()
self.conductor.spv_node.server.synchronized.clear()
if self.conductor.spv_node.server.db.db_height < height:
continue
if self.conductor.spv_node.server._es_height < height:
continue
break
class FakeExchangeRateManager(ExchangeRateManager):
@ -390,28 +324,24 @@ class CommandTestCase(IntegrationTestCase):
self.skip_libtorrent = True
async def asyncSetUp(self):
await super().asyncSetUp()
logging.getLogger('lbry.blob_exchange').setLevel(self.VERBOSITY)
logging.getLogger('lbry.daemon').setLevel(self.VERBOSITY)
logging.getLogger('lbry.stream').setLevel(self.VERBOSITY)
logging.getLogger('lbry.wallet').setLevel(self.VERBOSITY)
await super().asyncSetUp()
self.daemon = await self.add_daemon(self.wallet_node)
await self.account.ensure_address_gap()
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
await self.send_to_address_and_wait(address, 10, 6)
sendtxid = await self.blockchain.send_to_address(address, 10)
await self.confirm_tx(sendtxid)
await self.generate(5)
server_tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, server_tmp_dir)
self.server_config = Config(
data_dir=server_tmp_dir,
wallet_dir=server_tmp_dir,
save_files=True,
download_dir=server_tmp_dir
)
self.server_config = Config()
self.server_config.transaction_cache_size = 10000
self.server_storage = SQLiteStorage(self.server_config, ':memory:')
await self.server_storage.open()
@ -435,7 +365,6 @@ class CommandTestCase(IntegrationTestCase):
await daemon.stop()
async def add_daemon(self, wallet_node=None, seed=None):
start_wallet_node = False
if wallet_node is None:
wallet_node = WalletNode(
self.wallet_node.manager_class,
@ -443,24 +372,22 @@ class CommandTestCase(IntegrationTestCase):
port=self.extra_wallet_node_port
)
self.extra_wallet_node_port += 1
start_wallet_node = True
await wallet_node.start(self.conductor.spv_node, seed=seed)
self.extra_wallet_nodes.append(wallet_node)
upload_dir = os.path.join(wallet_node.data_path, 'uploads')
os.mkdir(upload_dir)
conf = Config(
# needed during instantiation to access known_hubs path
data_dir=wallet_node.data_path,
wallet_dir=wallet_node.data_path,
save_files=True,
download_dir=wallet_node.data_path
)
conf = Config()
conf.data_dir = wallet_node.data_path
conf.wallet_dir = wallet_node.data_path
conf.download_dir = wallet_node.data_path
conf.upload_dir = upload_dir # not a real conf setting
conf.share_usage_data = False
conf.use_upnp = False
conf.reflect_streams = True
conf.blockchain_name = 'lbrycrd_regtest'
conf.lbryum_servers = [(self.conductor.spv_node.hostname, self.conductor.spv_node.port)]
conf.lbryum_servers = [('127.0.0.1', 50001)]
conf.reflector_servers = [('127.0.0.1', 5566)]
conf.fixed_peers = [('127.0.0.1', 5567)]
conf.known_dht_nodes = []
@ -472,13 +399,7 @@ class CommandTestCase(IntegrationTestCase):
]
if self.skip_libtorrent:
conf.components_to_skip.append(LIBTORRENT_COMPONENT)
if start_wallet_node:
await wallet_node.start(self.conductor.spv_node, seed=seed, config=conf)
self.extra_wallet_nodes.append(wallet_node)
else:
wallet_node.manager.config = conf
wallet_node.manager.ledger.config['known_hubs'] = conf.known_hubs
wallet_node.manager.config = conf
def wallet_maker(component_manager):
wallet_component = WalletComponent(component_manager)
@ -499,14 +420,9 @@ class CommandTestCase(IntegrationTestCase):
async def confirm_tx(self, txid, ledger=None):
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
# await (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
on_tx = (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
await asyncio.wait([self.generate(1), on_tx], timeout=5)
# # actually, if it's in the mempool or in the block we're fine
# await self.generate_and_wait(1, [txid], ledger=ledger)
# return txid
await self.on_transaction_id(txid, ledger)
await self.generate(1)
await self.on_transaction_id(txid, ledger)
return txid
async def on_transaction_dict(self, tx):
@ -521,6 +437,11 @@ class CommandTestCase(IntegrationTestCase):
addresses.add(txo['address'])
return list(addresses)
async def generate(self, blocks):
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
await self.blockchain.generate(blocks)
await self.ledger.on_header.where(self.blockchain.is_expected_block)
async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True):
txid = await self.blockchain._cli_cmnd('claimname', name, value, amount)
if confirm:
@ -541,27 +462,12 @@ class CommandTestCase(IntegrationTestCase):
""" Synchronous version of `out` method. """
return json.loads(jsonrpc_dumps_pretty(value, ledger=self.ledger))['result']
async def confirm_and_render(self, awaitable, confirm, return_tx=False) -> Transaction:
async def confirm_and_render(self, awaitable, confirm) -> Transaction:
tx = await awaitable
if confirm:
await self.ledger.wait(tx)
await self.generate(1)
await self.ledger.wait(tx, self.blockchain.block_expected)
if not return_tx:
return self.sout(tx)
return tx
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None, blocking=False):
account = (daemon or self.daemon).wallet_manager.default_account
claim_address = await account.receiving.get_or_create_usable_address()
claim = Claim()
claim.channel.public_key_bytes = pubkey_bytes
tx = await Transaction.claim_create(
name, claim, lbc_to_dewies(price),
claim_address, [self.account], self.account
)
await tx.sign([self.account])
await (daemon or self.daemon).broadcast_or_release(tx, blocking)
return self.sout(tx)
def create_upload_file(self, data, prefix=None, suffix=None):
@ -573,19 +479,19 @@ class CommandTestCase(IntegrationTestCase):
async def stream_create(
self, name='hovercraft', bid='1.0', file_path=None,
data=b'hi!', confirm=True, prefix=None, suffix=None, return_tx=False, **kwargs):
if file_path is None and data is not None:
data=b'hi!', confirm=True, prefix=None, suffix=None, **kwargs):
if file_path is None:
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
return await self.confirm_and_render(
self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm, return_tx
self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm
)
async def stream_update(
self, claim_id, data=None, prefix=None, suffix=None, confirm=True, return_tx=False, **kwargs):
self, claim_id, data=None, prefix=None, suffix=None, confirm=True, **kwargs):
if data is not None:
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
return await self.confirm_and_render(
self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm, return_tx
self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm
)
return await self.confirm_and_render(
self.daemon.jsonrpc_stream_update(claim_id, **kwargs), confirm
@ -661,11 +567,6 @@ class CommandTestCase(IntegrationTestCase):
self.daemon.jsonrpc_support_abandon(*args, **kwargs), confirm
)
async def account_send(self, *args, confirm=True, **kwargs):
return await self.confirm_and_render(
self.daemon.jsonrpc_account_send(*args, **kwargs), confirm
)
async def wallet_send(self, *args, confirm=True, **kwargs):
return await self.confirm_and_render(
self.daemon.jsonrpc_wallet_send(*args, **kwargs), confirm
@ -679,21 +580,12 @@ class CommandTestCase(IntegrationTestCase):
await asyncio.wait([self.ledger.wait(tx, self.blockchain.block_expected) for tx in txs])
return self.sout(txs)
async def blob_clean(self):
return await self.out(self.daemon.jsonrpc_blob_clean())
async def status(self):
return await self.out(self.daemon.jsonrpc_status())
async def resolve(self, uri, **kwargs):
return (await self.out(self.daemon.jsonrpc_resolve(uri, **kwargs)))[uri]
async def claim_search(self, **kwargs):
return (await self.out(self.daemon.jsonrpc_claim_search(**kwargs)))['items']
async def get_claim_by_claim_id(self, claim_id):
return await self.out(self.ledger.get_claim_by_claim_id(claim_id))
async def file_list(self, *args, **kwargs):
return (await self.out(self.daemon.jsonrpc_file_list(*args, **kwargs)))['items']
@ -718,9 +610,6 @@ class CommandTestCase(IntegrationTestCase):
async def transaction_list(self, *args, **kwargs):
return (await self.out(self.daemon.jsonrpc_transaction_list(*args, **kwargs)))['items']
async def blob_list(self, *args, **kwargs):
return (await self.out(self.daemon.jsonrpc_blob_list(*args, **kwargs)))['items']
@staticmethod
def get_claim_id(tx):
return tx['outputs'][0]['claim_id']

View file

@ -10,13 +10,47 @@ from typing import Optional
import libtorrent
NOTIFICATION_MASKS = [
"error",
"peer",
"port_mapping",
"storage",
"tracker",
"debug",
"status",
"progress",
"ip_block",
"dht",
"stats",
"session_log",
"torrent_log",
"peer_log",
"incoming_request",
"dht_log",
"dht_operation",
"port_mapping_log",
"picker_log",
"file_progress",
"piece_progress",
"upload",
"block_progress"
]
log = logging.getLogger(__name__)
DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted?
libtorrent.add_torrent_params_flags_t.flag_auto_managed
| libtorrent.add_torrent_params_flags_t.flag_update_subscribe
)
def get_notification_type(notification) -> str:
for i, notification_type in enumerate(NOTIFICATION_MASKS):
if (1 << i) & notification:
return notification_type
raise ValueError("unrecognized notification type")
class TorrentHandle:
def __init__(self, loop, executor, handle):
self._loop = loop
@ -87,7 +121,7 @@ class TorrentHandle:
self._show_status()
if self.finished.is_set():
break
await asyncio.sleep(0.1)
await asyncio.sleep(0.1, loop=self._loop)
async def pause(self):
await self._loop.run_in_executor(
@ -122,8 +156,10 @@ class TorrentSession:
async def bind(self, interface: str = '0.0.0.0', port: int = 10889):
settings = {
'listen_interfaces': f"{interface}:{port}",
'enable_natpmp': False,
'enable_upnp': False
'enable_outgoing_utp': True,
'enable_incoming_utp': True,
'enable_outgoing_tcp': False,
'enable_incoming_tcp': False
}
self._session = await self._loop.run_in_executor(
self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member
@ -150,7 +186,7 @@ class TorrentSession:
await self._loop.run_in_executor(
self._executor, self._pop_alerts
)
await asyncio.sleep(1)
await asyncio.sleep(1, loop=self._loop)
async def pause(self):
await self._loop.run_in_executor(

View file

@ -36,7 +36,7 @@ class Torrent:
def __init__(self, loop, handle):
self._loop = loop
self._handle = handle
self.finished = asyncio.Event()
self.finished = asyncio.Event(loop=loop)
def _threaded_update_status(self):
status = self._handle.status()
@ -58,7 +58,7 @@ class Torrent:
log.info("finished downloading torrent!")
await self.pause()
break
await asyncio.sleep(1)
await asyncio.sleep(1, loop=self._loop)
async def pause(self):
log.info("pause torrent")

View file

@ -74,7 +74,7 @@ class TorrentSource(ManagedDownloadSource):
def bt_infohash(self):
return self.identifier
async def stop_tasks(self):
def stop_tasks(self):
pass
@property
@ -118,8 +118,8 @@ class TorrentManager(SourceManager):
async def start(self):
await super().start()
async def stop(self):
await super().stop()
def stop(self):
super().stop()
log.info("finished stopping the torrent manager")
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):

View file

@ -1,285 +0,0 @@
import random
import socket
import string
import struct
import asyncio
import logging
import time
import ipaddress
from collections import namedtuple
from functools import reduce
from typing import Optional
from lbry.dht.node import get_kademlia_peers_from_hosts
from lbry.utils import resolve_host, async_timed_cache, cache_concurrent
from lbry.wallet.stream import StreamController
from lbry import version
log = logging.getLogger(__name__)
CONNECTION_EXPIRES_AFTER_SECONDS = 50
PREFIX = 'LB' # todo: PR BEP20 to add ourselves
DEFAULT_TIMEOUT_SECONDS = 10.0
DEFAULT_CONCURRENCY_LIMIT = 100
# see: http://bittorrent.org/beps/bep_0015.html and http://xbtt.sourceforge.net/udp_tracker_protocol.html
ConnectRequest = namedtuple("ConnectRequest", ["connection_id", "action", "transaction_id"])
ConnectResponse = namedtuple("ConnectResponse", ["action", "transaction_id", "connection_id"])
AnnounceRequest = namedtuple("AnnounceRequest",
["connection_id", "action", "transaction_id", "info_hash", "peer_id", "downloaded", "left",
"uploaded", "event", "ip_addr", "key", "num_want", "port"])
AnnounceResponse = namedtuple("AnnounceResponse",
["action", "transaction_id", "interval", "leechers", "seeders", "peers"])
CompactIPv4Peer = namedtuple("CompactPeer", ["address", "port"])
ScrapeRequest = namedtuple("ScrapeRequest", ["connection_id", "action", "transaction_id", "infohashes"])
ScrapeResponse = namedtuple("ScrapeResponse", ["action", "transaction_id", "items"])
ScrapeResponseItem = namedtuple("ScrapeResponseItem", ["seeders", "completed", "leechers"])
ErrorResponse = namedtuple("ErrorResponse", ["action", "transaction_id", "message"])
structs = {
ConnectRequest: struct.Struct(">QII"),
ConnectResponse: struct.Struct(">IIQ"),
AnnounceRequest: struct.Struct(">QII20s20sQQQIIIiH"),
AnnounceResponse: struct.Struct(">IIIII"),
CompactIPv4Peer: struct.Struct(">IH"),
ScrapeRequest: struct.Struct(">QII"),
ScrapeResponse: struct.Struct(">II"),
ScrapeResponseItem: struct.Struct(">III"),
ErrorResponse: struct.Struct(">II")
}
def decode(cls, data, offset=0):
decoder = structs[cls]
if cls is AnnounceResponse:
return AnnounceResponse(*decoder.unpack_from(data, offset),
peers=[decode(CompactIPv4Peer, data, index) for index in range(20, len(data), 6)])
elif cls is ScrapeResponse:
return ScrapeResponse(*decoder.unpack_from(data, offset),
items=[decode(ScrapeResponseItem, data, index) for index in range(8, len(data), 12)])
elif cls is ErrorResponse:
return ErrorResponse(*decoder.unpack_from(data, offset), data[decoder.size:])
return cls(*decoder.unpack_from(data, offset))
def encode(obj):
if isinstance(obj, ScrapeRequest):
return structs[ScrapeRequest].pack(*obj[:-1]) + b''.join(obj.infohashes)
elif isinstance(obj, ErrorResponse):
return structs[ErrorResponse].pack(*obj[:-1]) + obj.message
elif isinstance(obj, AnnounceResponse):
return structs[AnnounceResponse].pack(*obj[:-1]) + b''.join([encode(peer) for peer in obj.peers])
return structs[type(obj)].pack(*obj)
def make_peer_id(random_part: Optional[str] = None) -> bytes:
# see https://wiki.theory.org/BitTorrentSpecification#peer_id and https://www.bittorrent.org/beps/bep_0020.html
# not to confuse with node id; peer id identifies uniquely the software, version and instance
random_part = random_part or ''.join(random.choice(string.ascii_letters) for _ in range(20))
return f"{PREFIX}-{'-'.join(map(str, version))}-{random_part}"[:20].encode()
class UDPTrackerClientProtocol(asyncio.DatagramProtocol):
def __init__(self, timeout: float = DEFAULT_TIMEOUT_SECONDS):
self.transport = None
self.data_queue = {}
self.timeout = timeout
self.semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY_LIMIT)
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
async def request(self, obj, tracker_ip, tracker_port):
self.data_queue[obj.transaction_id] = asyncio.get_running_loop().create_future()
try:
async with self.semaphore:
self.transport.sendto(encode(obj), (tracker_ip, tracker_port))
return await asyncio.wait_for(self.data_queue[obj.transaction_id], self.timeout)
finally:
self.data_queue.pop(obj.transaction_id, None)
async def connect(self, tracker_ip, tracker_port):
transaction_id = random.getrandbits(32)
return decode(ConnectResponse,
await self.request(ConnectRequest(0x41727101980, 0, transaction_id), tracker_ip, tracker_port))
@cache_concurrent
@async_timed_cache(CONNECTION_EXPIRES_AFTER_SECONDS)
async def ensure_connection_id(self, peer_id, tracker_ip, tracker_port):
# peer_id is just to ensure cache coherency
return (await self.connect(tracker_ip, tracker_port)).connection_id
async def announce(self, info_hash, peer_id, port, tracker_ip, tracker_port, stopped=False):
connection_id = await self.ensure_connection_id(peer_id, tracker_ip, tracker_port)
# this should make the key deterministic but unique per info hash + peer id
key = int.from_bytes(info_hash[:4], "big") ^ int.from_bytes(peer_id[:4], "big") ^ port
transaction_id = random.getrandbits(32)
req = AnnounceRequest(
connection_id, 1, transaction_id, info_hash, peer_id, 0, 0, 0, 3 if stopped else 1, 0, key, -1, port)
return decode(AnnounceResponse, await self.request(req, tracker_ip, tracker_port))
async def scrape(self, infohashes, tracker_ip, tracker_port, connection_id=None):
connection_id = await self.ensure_connection_id(None, tracker_ip, tracker_port)
transaction_id = random.getrandbits(32)
reply = await self.request(
ScrapeRequest(connection_id, 2, transaction_id, infohashes), tracker_ip, tracker_port)
return decode(ScrapeResponse, reply), connection_id
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 8:
return
transaction_id = int.from_bytes(data[4:8], byteorder="big", signed=False)
if transaction_id in self.data_queue:
if not self.data_queue[transaction_id].done():
if data[3] == 3:
return self.data_queue[transaction_id].set_exception(Exception(decode(ErrorResponse, data).message))
return self.data_queue[transaction_id].set_result(data)
log.debug("unexpected packet (can be a response for a previously timed out request): %s", data.hex())
def connection_lost(self, exc: Exception = None) -> None:
self.transport = None
class TrackerClient:
event_controller = StreamController()
def __init__(self, node_id, announce_port, get_servers, timeout=10.0):
self.client = UDPTrackerClientProtocol(timeout=timeout)
self.transport = None
self.peer_id = make_peer_id(node_id.hex() if node_id else None)
self.announce_port = announce_port
self._get_servers = get_servers
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
self.tasks = {}
async def start(self):
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
lambda: self.client, local_addr=("0.0.0.0", 0))
self.event_controller.stream.listen(
lambda request: self.on_hash(request[1], request[2]) if request[0] == 'search' else None)
def stop(self):
while self.tasks:
self.tasks.popitem()[1].cancel()
if self.transport is not None:
self.transport.close()
self.client = None
self.transport = None
self.event_controller.close()
def on_hash(self, info_hash, on_announcement=None):
if info_hash not in self.tasks:
task = asyncio.create_task(self.get_peer_list(info_hash, on_announcement=on_announcement))
task.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
self.tasks[info_hash] = task
async def announce_many(self, *info_hashes, stopped=False):
await asyncio.gather(
*[self._announce_many(server, info_hashes, stopped=stopped) for server in self._get_servers()],
return_exceptions=True)
async def _announce_many(self, server, info_hashes, stopped=False):
tracker_ip = await resolve_host(*server, 'udp')
still_good_info_hashes = {
info_hash for (info_hash, (next_announcement, _)) in self.results.get(tracker_ip, {}).items()
if time.time() < next_announcement
}
results = await asyncio.gather(
*[self._probe_server(info_hash, tracker_ip, server[1], stopped=stopped)
for info_hash in info_hashes if info_hash not in still_good_info_hashes],
return_exceptions=True)
if results:
errors = sum([1 for result in results if result is None or isinstance(result, Exception)])
log.info("Tracker: finished announcing %d files to %s:%d, %d errors", len(results), *server, errors)
async def get_peer_list(self, info_hash, stopped=False, on_announcement=None, no_port=False):
found = []
probes = [self._probe_server(info_hash, *server, stopped, no_port) for server in self._get_servers()]
for done in asyncio.as_completed(probes):
result = await done
if result is not None:
await asyncio.gather(*filter(asyncio.iscoroutine, [on_announcement(result)] if on_announcement else []))
found.append(result)
return found
async def get_kademlia_peer_list(self, info_hash):
responses = await self.get_peer_list(info_hash, no_port=True)
return await announcement_to_kademlia_peers(*responses)
async def _probe_server(self, info_hash, tracker_host, tracker_port, stopped=False, no_port=False):
result = None
try:
tracker_host = await resolve_host(tracker_host, tracker_port, 'udp')
except socket.error:
log.warning("DNS failure while resolving tracker host: %s, skipping.", tracker_host)
return
self.results.setdefault(tracker_host, {})
if info_hash in self.results[tracker_host]:
next_announcement, result = self.results[tracker_host][info_hash]
if time.time() < next_announcement:
return result
try:
result = await self.client.announce(
info_hash, self.peer_id, 0 if no_port else self.announce_port, tracker_host, tracker_port, stopped)
self.results[tracker_host][info_hash] = (time.time() + result.interval, result)
except asyncio.TimeoutError: # todo: this is UDP, timeout is common, we need a better metric for failures
self.results[tracker_host][info_hash] = (time.time() + 60.0, result)
log.debug("Tracker timed out: %s:%d", tracker_host, tracker_port)
return None
log.debug("Announced: %s found %d peers for %s", tracker_host, len(result.peers), info_hash.hex()[:8])
return result
def enqueue_tracker_search(info_hash: bytes, peer_q: asyncio.Queue):
async def on_announcement(announcement: AnnounceResponse):
peers = await announcement_to_kademlia_peers(announcement)
log.info("Found %d peers from tracker for %s", len(peers), info_hash.hex()[:8])
peer_q.put_nowait(peers)
TrackerClient.event_controller.add(('search', info_hash, on_announcement))
def announcement_to_kademlia_peers(*announcements: AnnounceResponse):
peers = [
(str(ipaddress.ip_address(peer.address)), peer.port)
for announcement in announcements for peer in announcement.peers if peer.port > 1024 # no privileged or 0
]
return get_kademlia_peers_from_hosts(peers)
class UDPTrackerServerProtocol(asyncio.DatagramProtocol): # for testing. Not suitable for production
def __init__(self):
self.transport = None
self.known_conns = set()
self.peers = {}
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
def add_peer(self, info_hash, ip_address: str, port: int):
self.peers.setdefault(info_hash, [])
self.peers[info_hash].append(encode_peer(ip_address, port))
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 16:
return
action = int.from_bytes(data[8:12], "big", signed=False)
if action == 0:
req = decode(ConnectRequest, data)
connection_id = random.getrandbits(32)
self.known_conns.add(connection_id)
return self.transport.sendto(encode(ConnectResponse(0, req.transaction_id, connection_id)), addr)
elif action == 1:
req = decode(AnnounceRequest, data)
if req.connection_id not in self.known_conns:
resp = encode(ErrorResponse(3, req.transaction_id, b'Connection ID missmatch.\x00'))
else:
compact_address = encode_peer(addr[0], req.port)
if req.event != 3:
self.add_peer(req.info_hash, addr[0], req.port)
elif compact_address in self.peers.get(req.info_hash, []):
self.peers[req.info_hash].remove(compact_address)
peers = [decode(CompactIPv4Peer, peer) for peer in self.peers[req.info_hash]]
resp = encode(AnnounceResponse(1, req.transaction_id, 1700, 0, len(peers), peers))
return self.transport.sendto(resp, addr)
def encode_peer(ip_address: str, port: int):
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), ip_address.split('.'), bytearray())
return compact_ip + port.to_bytes(2, "big", signed=False)

View file

@ -21,6 +21,7 @@ import pkg_resources
import certifi
import aiohttp
from prometheus_client import Counter
from prometheus_client.registry import REGISTRY
from lbry.schema.claim import Claim
@ -104,6 +105,10 @@ def check_connection(server="lbry.com", port=80, timeout=5) -> bool:
return False
async def async_check_connection(server="lbry.com", port=80, timeout=1) -> bool:
return await asyncio.get_event_loop().run_in_executor(None, check_connection, server, port, timeout)
def random_string(length=10, chars=string.ascii_lowercase):
return ''.join([random.choice(chars) for _ in range(length)])
@ -130,16 +135,21 @@ def get_sd_hash(stream_info):
def json_dumps_pretty(obj, **kwargs):
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '), **kwargs)
try:
# the standard contextlib.aclosing() is available in 3.10+
from contextlib import aclosing # pylint: disable=unused-import
except ImportError:
@contextlib.asynccontextmanager
async def aclosing(thing):
try:
yield thing
finally:
await thing.aclose()
def cancel_task(task: typing.Optional[asyncio.Task]):
if task and not task.done():
task.cancel()
def cancel_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
for task in tasks:
cancel_task(task)
def drain_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
while tasks:
cancel_task(tasks.pop())
def async_timed_cache(duration: int):
def wrapper(func):
@ -150,7 +160,7 @@ def async_timed_cache(duration: int):
async def _inner(*args, **kwargs) -> typing.Any:
loop = asyncio.get_running_loop()
time_now = loop.time()
key = (args, tuple(kwargs.items()))
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
if key in cache and (time_now - cache[key][1] < duration):
return cache[key][0]
to_cache = await func(*args, **kwargs)
@ -168,7 +178,7 @@ def cache_concurrent(async_fn):
@functools.wraps(async_fn)
async def wrapper(*args, **kwargs):
key = (args, tuple(kwargs.items()))
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
cache[key] = cache.get(key) or asyncio.create_task(async_fn(*args, **kwargs))
try:
return await cache[key]
@ -270,6 +280,12 @@ class LRUCacheWithMetrics:
def __del__(self):
self.clear()
if self._track_metrics: # needed for tests
try:
REGISTRY.unregister(self.hits)
REGISTRY.unregister(self.misses)
except AttributeError:
pass
class LRUCache:
@ -298,14 +314,11 @@ class LRUCache:
self.cache.popitem(last=False)
self.cache[key] = value
def items(self):
return self.cache.items()
def clear(self):
self.cache.clear()
def pop(self, key, default=None):
return self.cache.pop(key, default)
def pop(self, key):
return self.cache.pop(key)
def __setitem__(self, key, value):
return self.set(key, value)
@ -337,7 +350,7 @@ def lru_cache_concurrent(cache_size: typing.Optional[int] = None,
@functools.wraps(async_fn)
async def _inner(*args, **kwargs):
key = (args, tuple(kwargs.items()))
key = tuple([args, tuple([tuple([k, kwargs[k]]) for k in kwargs])])
if key in lru_cache:
return lru_cache.get(key)
@ -371,15 +384,13 @@ CARRIER_GRADE_NAT_SUBNET = ipaddress.ip_network('100.64.0.0/10')
IPV4_TO_6_RELAY_SUBNET = ipaddress.ip_network('192.88.99.0/24')
def is_valid_public_ipv4(address, allow_localhost: bool = False, allow_lan: bool = False):
def is_valid_public_ipv4(address, allow_localhost: bool = False):
try:
parsed_ip = ipaddress.ip_address(address)
if parsed_ip.is_loopback and allow_localhost:
return True
if allow_lan and parsed_ip.is_private:
return True
if any((parsed_ip.version != 4, parsed_ip.is_unspecified, parsed_ip.is_link_local, parsed_ip.is_loopback,
parsed_ip.is_multicast, parsed_ip.is_reserved, parsed_ip.is_private)):
parsed_ip.is_multicast, parsed_ip.is_reserved, parsed_ip.is_private, parsed_ip.is_reserved)):
return False
else:
return not any((CARRIER_GRADE_NAT_SUBNET.supernet_of(ipaddress.ip_network(f"{address}/32")),
@ -400,7 +411,7 @@ async def fallback_get_external_ip(): # used if spv servers can't be used for i
async def _get_external_ip(default_servers) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
# used if upnp is disabled or non-functioning
from lbry.wallet.udp import SPVStatusClientProtocol # pylint: disable=C0415
from lbry.wallet.server.udp import SPVStatusClientProtocol # pylint: disable=C0415
hostname_to_ip = {}
ip_to_hostnames = collections.defaultdict(list)
@ -450,8 +461,8 @@ def is_running_from_bundle():
class LockWithMetrics(asyncio.Lock):
def __init__(self, acquire_metric, held_time_metric):
super().__init__()
def __init__(self, acquire_metric, held_time_metric, loop=None):
super().__init__(loop=loop)
self._acquire_metric = acquire_metric
self._lock_held_time_metric = held_time_metric
self._lock_acquired_time = None
@ -469,18 +480,3 @@ class LockWithMetrics(asyncio.Lock):
return super().release()
finally:
self._lock_held_time_metric.observe(time.perf_counter() - self._lock_acquired_time)
def get_colliding_prefix_bits(first_value: bytes, second_value: bytes):
"""
Calculates the amount of colliding prefix bits between <first_value> and <second_value>.
This is given by the amount of bits that are the same until the first different one (via XOR),
starting from the most significant bit to the least significant bit.
:param first_value: first value to compare, bigger than size.
:param second_value: second value to compare, bigger than size.
:return: amount of prefix colliding bits.
"""
assert len(first_value) == len(second_value), "length should be the same"
size = len(first_value) * 8
first_value, second_value = int.from_bytes(first_value, "big"), int.from_bytes(second_value, "big")
return size - (first_value ^ second_value).bit_length()

View file

@ -1,23 +1,17 @@
__lbcd__ = 'lbcd'
__lbcctl__ = 'lbcctl'
__lbcwallet__ = 'lbcwallet'
__lbcd_url__ = (
'https://github.com/lbryio/lbcd/releases/download/' +
'v0.22.100-rc.0/lbcd_0.22.100-rc.0_TARGET_PLATFORM.tar.gz'
)
__lbcwallet_url__ = (
'https://github.com/lbryio/lbcwallet/releases/download/' +
'v0.13.100-alpha.0/lbcwallet_0.13.100-alpha.0_TARGET_PLATFORM.tar.gz'
__node_daemon__ = 'lbrycrdd'
__node_cli__ = 'lbrycrd-cli'
__node_bin__ = ''
__node_url__ = (
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.4.6/lbrycrd-linux-1746.zip'
)
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
from lbry.wallet.wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
from lbry.wallet.manager import WalletManager
from lbry.wallet.network import Network
from lbry.wallet.ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
from lbry.wallet.account import Account, AddressManager, SingleKey, HierarchicalDeterministic, \
DeterministicChannelKeyManager
from lbry.wallet.transaction import Transaction, Output, Input
from lbry.wallet.script import OutputScript, InputScript
from lbry.wallet.database import SQLiteMixin, Database
from lbry.wallet.header import Headers
from .wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
from .manager import WalletManager
from .network import Network
from .ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
from .account import Account, AddressManager, SingleKey, HierarchicalDeterministic
from .transaction import Transaction, Output, Input
from .script import OutputScript, InputScript
from .database import SQLiteMixin, Database
from .header import Headers

View file

@ -5,16 +5,18 @@ import logging
import typing
import asyncio
import random
from functools import partial
from hashlib import sha256
from string import hexdigits
from typing import Type, Dict, Tuple, Optional, Any, List
import ecdsa
from lbry.error import InvalidPasswordError
from lbry.crypto.crypt import aes_encrypt, aes_decrypt
from .bip32 import PrivateKey, PublicKey, KeyPath, from_extended_key_string
from .bip32 import PrivateKey, PubKey, from_extended_key_string
from .mnemonic import Mnemonic
from .constants import COIN, TXO_TYPES
from .constants import COIN, CLAIM_TYPES, TXO_TYPES
from .transaction import Transaction, Input, Output
if typing.TYPE_CHECKING:
@ -33,49 +35,6 @@ def validate_claim_id(claim_id):
raise Exception("Claim id is not hex encoded")
class DeterministicChannelKeyManager:
def __init__(self, account: 'Account'):
self.account = account
self.last_known = 0
self.cache = {}
self._private_key: Optional[PrivateKey] = None
@property
def private_key(self):
if self._private_key is None:
if self.account.private_key is not None:
self._private_key = self.account.private_key.child(KeyPath.CHANNEL)
return self._private_key
def maybe_generate_deterministic_key_for_channel(self, txo):
if self.private_key is None:
return
next_private_key = self.private_key.child(self.last_known)
public_key = next_private_key.public_key
public_key_bytes = public_key.pubkey_bytes
if txo.claim.channel.public_key_bytes == public_key_bytes:
self.cache[public_key.address] = next_private_key
self.last_known += 1
async def ensure_cache_primed(self):
if self.private_key is not None:
await self.generate_next_key()
async def generate_next_key(self) -> PrivateKey:
db = self.account.ledger.db
while True:
next_private_key = self.private_key.child(self.last_known)
public_key = next_private_key.public_key
self.cache[public_key.address] = next_private_key
if not await db.is_channel_key_used(self.account, public_key):
return next_private_key
self.last_known += 1
def get_private_key_from_pubkey_hash(self, pubkey_hash) -> PrivateKey:
return self.cache.get(pubkey_hash)
class AddressManager:
name: str
@ -121,7 +80,7 @@ class AddressManager:
def get_private_key(self, index: int) -> PrivateKey:
raise NotImplementedError
def get_public_key(self, index: int) -> PublicKey:
def get_public_key(self, index: int) -> PubKey:
raise NotImplementedError
async def get_max_gap(self):
@ -138,8 +97,7 @@ class AddressManager:
return [r['address'] for r in records]
async def get_or_create_usable_address(self) -> str:
async with self.address_generator_lock:
addresses = await self.get_addresses(only_usable=True, limit=10)
addresses = await self.get_addresses(only_usable=True, limit=10)
if addresses:
return random.choice(addresses)
addresses = await self.ensure_address_gap()
@ -161,8 +119,8 @@ class HierarchicalDeterministic(AddressManager):
@classmethod
def from_dict(cls, account: 'Account', d: dict) -> Tuple[AddressManager, AddressManager]:
return (
cls(account, KeyPath.RECEIVE, **d.get('receiving', {'gap': 20, 'maximum_uses_per_address': 1})),
cls(account, KeyPath.CHANGE, **d.get('change', {'gap': 6, 'maximum_uses_per_address': 1}))
cls(account, 0, **d.get('receiving', {'gap': 20, 'maximum_uses_per_address': 1})),
cls(account, 1, **d.get('change', {'gap': 6, 'maximum_uses_per_address': 1}))
)
def merge(self, d: dict):
@ -175,7 +133,7 @@ class HierarchicalDeterministic(AddressManager):
def get_private_key(self, index: int) -> PrivateKey:
return self.account.private_key.child(self.chain_number).child(index)
def get_public_key(self, index: int) -> PublicKey:
def get_public_key(self, index: int) -> PubKey:
return self.account.public_key.child(self.chain_number).child(index)
async def get_max_gap(self) -> int:
@ -235,7 +193,7 @@ class SingleKey(AddressManager):
@classmethod
def from_dict(cls, account: 'Account', d: dict) \
-> Tuple[AddressManager, AddressManager]:
same_address_manager = cls(account, account.public_key, KeyPath.RECEIVE)
same_address_manager = cls(account, account.public_key, 0)
return same_address_manager, same_address_manager
def to_dict_instance(self):
@ -244,7 +202,7 @@ class SingleKey(AddressManager):
def get_private_key(self, index: int) -> PrivateKey:
return self.account.private_key
def get_public_key(self, index: int) -> PublicKey:
def get_public_key(self, index: int) -> PubKey:
return self.account.public_key
async def get_max_gap(self) -> int:
@ -266,6 +224,9 @@ class SingleKey(AddressManager):
class Account:
mnemonic_class = Mnemonic
private_key_class = PrivateKey
public_key_class = PubKey
address_generators: Dict[str, Type[AddressManager]] = {
SingleKey.name: SingleKey,
HierarchicalDeterministic.name: HierarchicalDeterministic,
@ -273,7 +234,7 @@ class Account:
def __init__(self, ledger: 'Ledger', wallet: 'Wallet', name: str,
seed: str, private_key_string: str, encrypted: bool,
private_key: Optional[PrivateKey], public_key: PublicKey,
private_key: Optional[PrivateKey], public_key: PubKey,
address_generator: dict, modified_on: float, channel_keys: dict) -> None:
self.ledger = ledger
self.wallet = wallet
@ -284,14 +245,13 @@ class Account:
self.private_key_string = private_key_string
self.init_vectors: Dict[str, bytes] = {}
self.encrypted = encrypted
self.private_key: Optional[PrivateKey] = private_key
self.public_key: PublicKey = public_key
self.private_key = private_key
self.public_key = public_key
generator_name = address_generator.get('name', HierarchicalDeterministic.name)
self.address_generator = self.address_generators[generator_name]
self.receiving, self.change = self.address_generator.from_dict(self, address_generator)
self.address_managers = {am.chain_number: am for am in (self.receiving, self.change)}
self.address_managers = {am.chain_number: am for am in {self.receiving, self.change}}
self.channel_keys = channel_keys
self.deterministic_channel_keys = DeterministicChannelKeyManager(self)
ledger.add_account(self)
wallet.add_account(self)
@ -306,19 +266,19 @@ class Account:
name: str = None, address_generator: dict = None):
return cls.from_dict(ledger, wallet, {
'name': name,
'seed': Mnemonic().make_seed(),
'seed': cls.mnemonic_class().make_seed(),
'address_generator': address_generator or {}
})
@classmethod
def get_private_key_from_seed(cls, ledger: 'Ledger', seed: str, password: str):
return PrivateKey.from_seed(
ledger, Mnemonic.mnemonic_to_seed(seed, password or 'lbryum')
return cls.private_key_class.from_seed(
ledger, cls.mnemonic_class.mnemonic_to_seed(seed, password or 'lbryum')
)
@classmethod
def keys_from_dict(cls, ledger: 'Ledger', d: dict) \
-> Tuple[str, Optional[PrivateKey], PublicKey]:
-> Tuple[str, Optional[PrivateKey], PubKey]:
seed = d.get('seed', '')
private_key_string = d.get('private_key', '')
private_key = None
@ -489,7 +449,7 @@ class Account:
assert not self.encrypted, "Cannot get private key on encrypted wallet account."
return self.address_managers[chain].get_private_key(index)
def get_public_key(self, chain: int, index: int) -> PublicKey:
def get_public_key(self, chain: int, index: int) -> PubKey:
return self.address_managers[chain].get_public_key(index)
def get_balance(self, confirmations=0, include_claims=False, read_only=False, **constraints):
@ -560,30 +520,33 @@ class Account:
return tx
async def generate_channel_private_key(self):
return await self.deterministic_channel_keys.generate_next_key()
def add_channel_private_key(self, private_key):
public_key_bytes = private_key.get_verifying_key().to_der()
channel_pubkey_hash = self.ledger.public_key_to_address(public_key_bytes)
self.channel_keys[channel_pubkey_hash] = private_key.to_pem().decode()
def add_channel_private_key(self, private_key: PrivateKey):
self.channel_keys[private_key.address] = private_key.to_pem().decode()
async def get_channel_private_key(self, public_key_bytes) -> PrivateKey:
async def get_channel_private_key(self, public_key_bytes):
channel_pubkey_hash = self.ledger.public_key_to_address(public_key_bytes)
private_key_pem = self.channel_keys.get(channel_pubkey_hash)
if private_key_pem:
return PrivateKey.from_pem(self.ledger, private_key_pem)
return self.deterministic_channel_keys.get_private_key_from_pubkey_hash(channel_pubkey_hash)
return await asyncio.get_event_loop().run_in_executor(
None, ecdsa.SigningKey.from_pem, private_key_pem, sha256
)
async def maybe_migrate_certificates(self):
def to_der(private_key_pem):
return ecdsa.SigningKey.from_pem(private_key_pem, hashfunc=sha256).get_verifying_key().to_der()
if not self.channel_keys:
return
channel_keys = {}
for private_key_pem in self.channel_keys.values():
if not isinstance(private_key_pem, str):
continue
if not private_key_pem.startswith("-----BEGIN"):
if "-----BEGIN EC PRIVATE KEY-----" not in private_key_pem:
continue
private_key = PrivateKey.from_pem(self.ledger, private_key_pem)
channel_keys[private_key.address] = private_key_pem
public_key_der = await asyncio.get_event_loop().run_in_executor(None, to_der, private_key_pem)
channel_keys[self.ledger.public_key_to_address(public_key_der)] = private_key_pem
if self.channel_keys != channel_keys:
self.channel_keys = channel_keys
self.wallet.save()
@ -603,14 +566,35 @@ class Account:
if gap_changed:
self.wallet.save()
async def get_detailed_balance(self, confirmations=0, read_only=False):
constraints = {}
if confirmations > 0:
height = self.ledger.headers.height - (confirmations-1)
constraints.update({'height__lte': height, 'height__gt': 0})
return await self.ledger.db.get_detailed_balance(
accounts=[self], read_only=read_only, **constraints
)
async def get_detailed_balance(self, confirmations=0, reserved_subtotals=False, read_only=False):
tips_balance, supports_balance, claims_balance = 0, 0, 0
get_total_balance = partial(self.get_balance, read_only=read_only, confirmations=confirmations,
include_claims=True)
total = await get_total_balance()
if reserved_subtotals:
claims_balance = await get_total_balance(txo_type__in=CLAIM_TYPES)
for txo in await self.get_support_summary():
if confirmations > 0 and not 0 < txo.tx_ref.height <= self.ledger.headers.height - (confirmations - 1):
continue
if txo.is_my_input:
supports_balance += txo.amount
else:
tips_balance += txo.amount
reserved = claims_balance + supports_balance + tips_balance
else:
reserved = await self.get_balance(
confirmations=confirmations, include_claims=True, txo_type__gt=0
)
return {
'total': total,
'available': total - reserved,
'reserved': reserved,
'reserved_subtotals': {
'claims': claims_balance,
'supports': supports_balance,
'tips': tips_balance
} if reserved_subtotals else None
}
def get_transaction_history(self, read_only=False, **constraints):
return self.ledger.get_transaction_history(

View file

@ -1,21 +1,10 @@
from asn1crypto.keys import PrivateKeyInfo, ECPrivateKey
from coincurve import PublicKey as cPublicKey, PrivateKey as cPrivateKey
from coincurve.utils import (
pem_to_der, lib as libsecp256k1, ffi as libsecp256k1_ffi
)
from coincurve.ecdsa import CDATA_SIG_LENGTH
from coincurve import PublicKey, PrivateKey as _PrivateKey
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
from lbry.crypto.base58 import Base58
from .util import cachedproperty
class KeyPath:
RECEIVE = 0
CHANGE = 1
CHANNEL = 2
class DerivationError(Exception):
""" Raised when an invalid derivation occurs. """
@ -57,11 +46,9 @@ class _KeyBase:
if len(raw_serkey) != 33:
raise ValueError('raw_serkey must have length 33')
return (
ver_bytes + bytes((self.depth,))
+ self.parent_fingerprint() + self.n.to_bytes(4, 'big')
+ self.chain_code + raw_serkey
)
return (ver_bytes + bytes((self.depth,))
+ self.parent_fingerprint() + self.n.to_bytes(4, 'big')
+ self.chain_code + raw_serkey)
def identifier(self):
raise NotImplementedError
@ -82,30 +69,26 @@ class _KeyBase:
return Base58.encode_check(self.extended_key())
class PublicKey(_KeyBase):
class PubKey(_KeyBase):
""" A BIP32 public key. """
def __init__(self, ledger, pubkey, chain_code, n, depth, parent=None):
super().__init__(ledger, chain_code, n, depth, parent)
if isinstance(pubkey, cPublicKey):
if isinstance(pubkey, PublicKey):
self.verifying_key = pubkey
else:
self.verifying_key = self._verifying_key_from_pubkey(pubkey)
@classmethod
def from_compressed(cls, public_key_bytes, ledger=None) -> 'PublicKey':
return cls(ledger, public_key_bytes, bytes((0,)*32), 0, 0)
@classmethod
def _verifying_key_from_pubkey(cls, pubkey):
""" Converts a 33-byte compressed pubkey into an coincurve.PublicKey object. """
""" Converts a 33-byte compressed pubkey into an PublicKey object. """
if not isinstance(pubkey, (bytes, bytearray)):
raise TypeError('pubkey must be raw bytes')
if len(pubkey) != 33:
raise ValueError('pubkey must be 33 bytes')
if pubkey[0] not in (2, 3):
raise ValueError('invalid pubkey prefix byte')
return cPublicKey(pubkey)
return PublicKey(pubkey)
@cachedproperty
def pubkey_bytes(self):
@ -120,7 +103,7 @@ class PublicKey(_KeyBase):
def ec_point(self):
return self.verifying_key.point()
def child(self, n: int) -> 'PublicKey':
def child(self, n: int):
""" Return the derived child extended pubkey at index N. """
if not 0 <= n < (1 << 31):
raise ValueError('invalid BIP32 public key child number')
@ -128,7 +111,7 @@ class PublicKey(_KeyBase):
msg = self.pubkey_bytes + n.to_bytes(4, 'big')
L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name
derived_key = self.verifying_key.add(L_b)
return PublicKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
return PubKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
def identifier(self):
""" Return the key's identifier as 20 bytes. """
@ -141,36 +124,6 @@ class PublicKey(_KeyBase):
self.pubkey_bytes
)
def verify(self, signature, digest) -> bool:
""" Verify that a signature is valid for a 32 byte digest. """
if len(signature) != 64:
raise ValueError('Signature must be 64 bytes long.')
if len(digest) != 32:
raise ValueError('Digest must be 32 bytes long.')
key = self.verifying_key
raw_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
parsed = libsecp256k1.secp256k1_ecdsa_signature_parse_compact(
key.context.ctx, raw_signature, signature
)
assert parsed == 1
normalized_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
libsecp256k1.secp256k1_ecdsa_signature_normalize(
key.context.ctx, normalized_signature, raw_signature
)
verified = libsecp256k1.secp256k1_ecdsa_verify(
key.context.ctx, normalized_signature, digest, key.public_key
)
return bool(verified)
class PrivateKey(_KeyBase):
"""A BIP32 private key."""
@ -179,7 +132,7 @@ class PrivateKey(_KeyBase):
def __init__(self, ledger, privkey, chain_code, n, depth, parent=None):
super().__init__(ledger, chain_code, n, depth, parent)
if isinstance(privkey, cPrivateKey):
if isinstance(privkey, _PrivateKey):
self.signing_key = privkey
else:
self.signing_key = self._signing_key_from_privkey(privkey)
@ -187,7 +140,7 @@ class PrivateKey(_KeyBase):
@classmethod
def _signing_key_from_privkey(cls, private_key):
""" Converts a 32-byte private key into an coincurve.PrivateKey object. """
return cPrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
return _PrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
@classmethod
def _private_key_secret_exponent(cls, private_key):
@ -199,40 +152,24 @@ class PrivateKey(_KeyBase):
return int.from_bytes(private_key, 'big')
@classmethod
def from_seed(cls, ledger, seed) -> 'PrivateKey':
def from_seed(cls, ledger, seed):
# This hard-coded message string seems to be coin-independent...
hmac = hmac_sha512(b'Bitcoin seed', seed)
privkey, chain_code = hmac[:32], hmac[32:]
return cls(ledger, privkey, chain_code, 0, 0)
@classmethod
def from_pem(cls, ledger, pem) -> 'PrivateKey':
der = pem_to_der(pem.encode())
try:
key_int = ECPrivateKey.load(der).native['private_key']
except ValueError:
key_int = PrivateKeyInfo.load(der).native['private_key']['private_key']
private_key = cPrivateKey.from_int(key_int)
return cls(ledger, private_key, bytes((0,)*32), 0, 0)
@classmethod
def from_bytes(cls, ledger, key_bytes) -> 'PrivateKey':
return cls(ledger, cPrivateKey(key_bytes), bytes((0,)*32), 0, 0)
@cachedproperty
def private_key_bytes(self):
""" Return the serialized private key (no leading zero byte). """
return self.signing_key.secret
@cachedproperty
def public_key(self) -> PublicKey:
def public_key(self):
""" Return the corresponding extended public key. """
verifying_key = self.signing_key.public_key
parent_pubkey = self.parent.public_key if self.parent else None
return PublicKey(
self.ledger, verifying_key, self.chain_code,
self.n, self.depth, parent_pubkey
)
return PubKey(self.ledger, verifying_key, self.chain_code, self.n, self.depth,
parent_pubkey)
def ec_point(self):
return self.public_key.ec_point()
@ -245,12 +182,11 @@ class PrivateKey(_KeyBase):
""" Return the private key encoded in Wallet Import Format. """
return self.ledger.private_key_to_wif(self.private_key_bytes)
@property
def address(self):
""" The public key as a P2PKH address. """
return self.public_key.address
def child(self, n) -> 'PrivateKey':
def child(self, n):
""" Return the derived child extended private key at index N."""
if not 0 <= n < (1 << 32):
raise ValueError('invalid BIP32 private key child number')
@ -269,28 +205,6 @@ class PrivateKey(_KeyBase):
""" Produce a signature for piece of data by double hashing it and signing the hash. """
return self.signing_key.sign(data, hasher=double_sha256)
def sign_compact(self, digest):
""" Produce a compact signature. """
key = self.signing_key
signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
signed = libsecp256k1.secp256k1_ecdsa_sign(
key.context.ctx, signature, digest, key.secret,
libsecp256k1_ffi.NULL, libsecp256k1_ffi.NULL
)
if not signed:
raise ValueError('The private key was invalid.')
serialized = libsecp256k1_ffi.new('unsigned char[%d]' % CDATA_SIG_LENGTH)
compacted = libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(
key.context.ctx, serialized, signature
)
if compacted != 1:
raise ValueError('The signature could not be compacted.')
return bytes(libsecp256k1_ffi.buffer(serialized, CDATA_SIG_LENGTH))
def identifier(self):
"""Return the key's identifier as 20 bytes."""
return self.public_key.identifier()
@ -302,12 +216,9 @@ class PrivateKey(_KeyBase):
b'\0' + self.private_key_bytes
)
def to_pem(self):
return self.signing_key.to_pem()
def _from_extended_key(ledger, ekey):
"""Return a PublicKey or PrivateKey from an extended key raw bytes."""
"""Return a PubKey or PrivateKey from an extended key raw bytes."""
if not isinstance(ekey, (bytes, bytearray)):
raise TypeError('extended key must be raw bytes')
if len(ekey) != 78:
@ -319,7 +230,7 @@ def _from_extended_key(ledger, ekey):
if ekey[:4] == ledger.extended_public_key_prefix:
pubkey = ekey[45:]
key = PublicKey(ledger, pubkey, chain_code, n, depth)
key = PubKey(ledger, pubkey, chain_code, n, depth)
elif ekey[:4] == ledger.extended_private_key_prefix:
if ekey[45] != 0:
raise ValueError('invalid extended private key prefix byte')
@ -337,6 +248,6 @@ def from_extended_key_string(ledger, ekey_str):
xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd
3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL
return a PublicKey or PrivateKey.
return a PubKey or PrivateKey.
"""
return _from_extended_key(ledger, Base58.decode_check(ekey_str))

View file

@ -881,365 +881,4 @@ HASHES = {
879000: '0eb0810f4b81d1845b0a88f05449408df2e45715c9210a656f45278c5fdf7956',
880000: 'e7d613027e3b4ca38d09bbef07998b57db237c6d67f1e8ea50024d2e0d9a1a72',
881000: '21af4d355d8756b8bf0369b2d79b5c824148ae069026ba5c14f9dd6b7555e1db',
882000: 'bc26f028e547ec44fc3864925bd1493211773b5cb9a9583ba4c1909b89fe0d33',
883000: '170a624f4be04cd2fd435cfb6ba1f31b9ef5d7b084a25dfa23cd118c2752029e',
884000: '46cccb7a12b4d01d07c211b7b8db41321cd73f30069df27bcdb3bb600c0272b0',
885000: '7c27f79d5a99baf0f81f2b09eb5c1bf905976a0f872e02bd4ca9e82f0ed50cb0',
886000: '256e3e00cecc72dbbfef5cea627ecf1d43b56edd5fd1642a2bc4e97c17056f34',
887000: '658ebac7dfa62bc7a22b1a9ba4e5b425a866f7550a6b40fd07de47119fd1f7e8',
888000: '497a9d02868605b9ff6e7f15948a83a7e07606829107e63c2e091c90c7a7b4d4',
889000: '561daaa7ebc87e586d37a96ecfbc72484d7eb602824f38f484ed333e78208e9e',
890000: 'ab5a8cb625b28343f8fac858eab6576c856dab88bde8cda02b80b3edfd307d71',
891000: '2e81d9fc885ddc09222b298ac9efbb73638a5721802b9256de6505ecf122dbaa',
892000: '73be08881b8832e986c0bb9a06c70fff346edb2afaf69630e47e4a4a90c5fece',
893000: 'd39079dcaa4d8af1c26f0edf7e16df43cd857a31e0aa4c4123226793f1ab497f',
894000: '0a3b677d72c590d4b1ff7a9b4098d6b52d0dc10d64c30c2766d18e6eb02872cd',
895000: 'a3bbba831f48c5b68e494ee63015b487782c64c5c24bb29436283360c28fd1e0',
896000: '20af178a192ca43975ab6c838fe97ca42ba6c682682eddbc6481efd153ecb0a2',
897000: '8d0ee14b9fdb853a09ab2951d26b8f7cb8bc8038b09513bd330ee4b0bdcc4780',
898000: 'c97fbb70f804408b131a98f9fb4c04cdf2df1655d3e8ff2e0d58ed8537349f4e',
899000: 'eba2be80478e8dec2d66ca40b853580c5dad040351c64c177e3d8c25aff6c1b6',
900000: 'c4dc344a993558418b93b3f60aaef0030e2a4116086577fbf1e2f544bdbddae1',
901000: '36d84229afa63045875fc8fea0c55de8eb90694b3a37cceb825c87abf1fea998',
902000: '8ca4890ecfc5e3f9d767e4fcdf318a1e3e3597675bbcfe534d64e76bc4e8fbf4',
903000: '8b9f6a7514033c57668ca94fb3758cc6d1ef37ac982c2ff5a9f0f206fcd8d0a8',
904000: 'e9ae813991f35ca89af2fe1f1b6adf9e93c6b1dd6a74f003ebbe699a30b252ea',
905000: 'd426489d01d4f4c829f2eb68a67721d2c0e1c71e8c33ef9253593447e8603462',
906000: '63000bbed97451e68d64485c02c1c3d90b4156237dac315f4e012ffb538e375b',
907000: '96759653a4e514541effa7ef86d9f22a272ddde7b069149d17e9d9203a1edafb',
908000: 'eec6477d2f3b71bde76dc2380d6e06aa8aa306ca56ba1dd15a31c22ae0db501b',
909000: 'd5c2984cf130335aa29296ba5b17672d00360fe0ec73977326180014908c0b55',
910000: '7b99cb1c94144f606937903e173bd9ef63bfffd3db8110693fa4c2caa0abc21f',
911000: '95eed0d9dd9869ac6f83fa67863e77f24df69bcb90fef70918f30b2400e24ea8',
912000: '34c3c8780c54ecced50f0a6b394309d09ee6ce37cd98794699c63771d1d91144',
913000: '536052ddcd445702160288ef3f669ce56868c085315556c9f5ca081ef0c0b9e1',
914000: '1bcd1fe9632f93a0a1fe7d8a1891a4fc6ef1be40ccf887524a9095ed7aa9fa44',
915000: '139bad9fa12ec72a37b62ad8511300ebfda89330fa5d5a83861f864b6adeae67',
916000: '81d15282214ff83e2a034212eb58abeafcb5664d3734bff13b22b4c093b20fea',
917000: 'f31081031cebe450e4450ef397d91790fc0068e98e6746cd0aab86d17e4448f5',
918000: '4af8eb28616ef0e859b5471650c7f8e910cd692a6b4ff3a7171a709db2f18e4e',
919000: '78a197b5f9733e9e4dc9820e1c79bd335beb19f6b87056e48e8e21fbe27d83d6',
920000: '33d20f86d1367f07d6731e1e2cc9305252b281b1b092403133924cc1052f501d',
921000: '6926f1e31e7fe9b8f7a81efa73d5635f8f28c1db1708e4d57f6e7ead951a4beb',
922000: '811e2335798eb54696a4b11ca3a44b9d79486262119383d542491afa9ae80204',
923000: '8f47ac365bc380885db809f2818ffc7dd2076aaa0f9bf6c180df1b4358dc842e',
924000: '535e79802c10630c17fb8fddec3ba2bf85eedbc0c076f3575f8189fe887ba993',
925000: 'ca43bd24d17d75d55e72e45549384b395c62e1daf0d3f58f296e18168b918fbf',
926000: '9a03be89e0725877d42296e6c995d9c48bb5f4bbd971f5a9add191af2d1c144b',
927000: 'a14e0ef6bd1bc221dbba99031c16ddbbd76394186677c29bdf07b89fa2a6efac',
928000: 'b16931bd7392e9db26be975b072024210fb5fe6ee22fc0809d51980aa8068a98',
929000: '4da56a2e66fcd98a70039d9061ea5eb0fb6d9460b437d2191e47441182419a04',
930000: '87e820e2237a54c4ea100bdd0145598f05add92185cd3d0929aa2d5099f4d5e0',
931000: '515b22c91172157c443a47cf213014aff144181a77e276e291535ab3762bb1ae',
932000: 'e130c6a9eb416f96256d1f90256a148957daa32f56af228d2d9ce6ff27ce2011',
933000: '30c992ec7a9a320fb4db260373121efc7b5e7fc744f4b31defbe6a7608e0749e',
934000: 'ec490fa0de6b1d78a4121a5044f501bbb3bd9e448c18121cea87eb8e3cadba41',
935000: '603e4ae6a6d936c79b3f1c9f9e88305930953b9b390dac442976a6e8395fc520',
936000: '2b756fe2de4328e598ed511b8828e5c2c6b5cdda1b5e7c1c26f8e0424c81afa9',
937000: '1ae0f15f14a0d4819e34a6c18de9428a9e43e17d75383bffa9ffb18358e93b63',
938000: 'cbd7001825ec87b8c6917d6e9e7dc5c8d7767788b6ffd61a61d0c612dbe5de66',
939000: 'd770d0395aa79076044783fb37a1bb173cb95c93ff1ba82c34a72c4d8e425a03',
940000: '3341d0a0349d091d88d233cd6ea6e0ad553d52039b4d47af51b8a8e7573a7916',
941000: '16123b8758e99344ebe6670cd95826881b274c31d4da2a051052955a32bade3a',
942000: 'ac7430961e77f902918fe79a52cbf6b523e3f2804ec83d0b17908e131ea9ea68',
943000: '2ad08a6877e4687dcb7a623adeddc88403e8082efd6de28328b351282dc141e2',
944000: '81382e8c1f47fa7c03fa1726f9b09ed1cd38140fe50683896eaa1b403d7e5fe3',
945000: '152bfbb166da04dab16030af28ae65b3275819eed1d0bbfc11eba65616ebefd6',
946000: '25b3da0962f87a0d3e4aec8b16483efbcab9514893a42fd31f4cb544ddc45a1f',
947000: '2cb738ba342436628ff292797e3d36c4752d71bdc1af87fe758d469d06e36e0e',
948000: 'b3683e18570fcc8b986720514539181ec43fb5dbc20fe314c56ab6bd31ab766a',
949000: '94ced5bfba55ccffc909bf098d537e047d8d4cbb79f5e2a74146073f39804865',
950000: 'b11543cd2aedae27f6ddc3d2b431c897fdcfe59ed3c926b0777bc1e99de4d12a',
951000: '21508881a7f80fcd0b9b27bbcfba634b39c6525f5313968c4605cd55b4fec446',
952000: 'f9b3ed919c9ca20cd2927d899ee7a86c93c2dd919dafb6fdb792f2d9f1895cb0',
953000: 'cf578d8e80eec4102dc1b5321f10b36020b3b32f4b5d4664c90c412ca2ef6b42',
954000: 'ed17c919ae5c4be835966b47f667d6082c75917b95584b2d2aff0e32f5c8aa98',
955000: '948ea467fa01a20122e2146669214fdd3bb025038554609f7299ece5bca63e39',
956000: 'b50ff4c02957ed8764215d25f206f6f1fe6d0eb712a378b937ff952dd479afd2',
957000: '169922a3e51517ba6104a883d29aac03a9d20b4d448bd2773137b0d790e3db6b',
958000: '92258ac2e8b53167dc30436d93f385d432bd549711ab9790ba4e8263c5c54382',
959000: '7ca824697459eb302bcd7fba9d255fb269555abe7cf9d2dd5e54e196d751e682',
960000: '89f9ec925d23698076d84f9e852ab04fc956ac4465827303de0c3bb0b685eb32',
961000: '41cf75cd71bc12b93674c416e8b01b7410eb9e09eb8727ad93ff0b833c9966c9',
962000: '7db1f1dbff3e389713067879bfedf9513ec74bb1e128b13fc2fe23ad55fd0306',
963000: 'a35e71c611b2227adeac824d151d2f09bdbecd5765a4e62c6e74a3e4290abc66',
964000: 'dc1811130e249d2208d6f85838512b4e5482efb0bd2f619164a68a0c60d7f248',
965000: '92f5e25dd1c03102720dd0c3136b1a0769901bf89fcc0262a5e24405f349ca07',
966000: '08243d780d8ba96a940f409b87d9c6b8a95c92804173b9156ada0dad35b628dc',
967000: 'cb769a8935bb6faeb981da74f4079babbbb89476f825cc897f43e79790295260',
968000: 'ff3fc27d2998f4dc4ac1ff378afe14c7d0f43cc328deb9c978ec0e067d1dfaf9',
969000: 'e41a3452f45d5f025627d08c9c41017679e9c4804371dd1cc02f3ed49f85dbb2',
970000: 'f5eaaf7ba6b47245a4a8096a7785c7b25dc6db342ac2ccbba0c321e97ab58284',
971000: '75414062f1d4ed675dadc8f04ba10147a484aaca1ae316dc0b896a92809b3db6',
972000: '5bcf2ee00133774c7d060a1a1863dfccc20d5127ecb542470f607dec2504fe6f',
973000: '07d15b9656ecde2cd86a9d22c3de8b6505d6bab2aa5a94560b0db9119f1f6f6c',
974000: '2059e7924d7a210a88f5a65abc61152506a82edccd27416e796c81b9b8003f13',
975000: '7fcf5d8b2c0e51cfbdaa2502a9da0bdb323646899dad37dacc39af9f9e16fc5c',
976000: '02acb8cf87a0900436eccfca50371948531041d7b8b410a902205f84dd7fb88e',
977000: '2636dfd5a47016c893265473e78ecbf2000769d886f0d01ee7a91e9397210d15',
978000: 'ce92f52a35096b94bea73a7d4e113bc4564a4a589b66f1ab86f61c822cf9ee76',
979000: '21b8102f5b76be0c8e20d537ebc78ebe46bfcea6b6d2dda950ce5b48e85f72d7',
980000: 'f4df0bd63b36105705de62266d654612d9804bad7069d41344de269657e6f084',
981000: 'f006cd2718d98d774a5cd18394db7744c812fa149c8a63e76bab934aee89f571',
982000: 'da5d6609265d9153022d823b0260aa07e7511ceff7a3fd2ca7ce83cb3900a661',
983000: '3a26f3f02aa145fa8c5268fbe10dd9c3546d7dda57489ca5d4b161beb0d5a6e2',
984000: '968e8cd37a1137797d40f39f106cae62d1e252b46c7473b9434ad5f870ee88fb',
985000: '3129c3bf20deace1a9c92646a9d769da7a07f18dcd5b7a7b1e8cf5fd5390f8e1',
986000: '6ce830ca5da322ddbb97fc572ea03218913d070e5910516b33c6113b02b23c21',
987000: '7fb1a8635623847132ab766a99b792953379f782d1115b9649f5f9c5a742ca04',
988000: '5e8e6c6da7f271129c20c4dd891dcb1df4f9d690ee7cf391c6b7fbd028a0da4c',
989000: '12919e34bb9a9ac1d2a01e221eb8c511117fc4e1b3ae15355d95caf4673bdb08',
990000: '016f8b18227a0c09da55594a98638ad5b0fbb4896e2ab6163ac40b6015b2811e',
991000: 'ddf8cd6e2f4ee07530ae7567cef4fa2c2fd4a655cb20e20422e66fd49bde6489',
992000: 'dca77707c0caa3a9605f3dadf593402339c29448869907fb31f6c624e942dcbd',
993000: 'de9acc4c7c482ecac741fd6acbbc3a333afab52f3fe5eea4130c0770299a56dd',
994000: '54420631f8a801a1b8f391088f599ee22cedc06f24bf67f18272feb8fe70c682',
995000: '4b44b26e3e2495716dfd86fc42594cd4b1e4b70bdab4f0905cce4cb9556e008a',
996000: 'd6e41fd301fc5f519c343ceb39c9ff845656a4482e4e182abdcd3963fd5fde1c',
997000: 'd68b6a509d742b182ffb5a98b0e585a2320a5d3fe6977ad3e6cd06835ef2ea55',
998000: '1efcdcbadbec54ce3a93a1857253614536c34f05a0b1924f24bff194dc3392e1',
999000: '10a7713e46f47527f3819b4a9257a03f3e207d18e4917d6bcb43fdea3ba82b9a',
1000000: '1b4ddb1436df05f07807d6337b93ee1aa8b600fd6a910a8fd5313a39e0440eec',
1001000: 'cde0df1abdae26d2c2bdc111be15fb33231c5e167bb8b8f8eec667d71379fee4',
1002000: 'd7ce7a96a3ca73a4dfd6a1780e23f834f339142519ea7f45d256c113e27e4857',
1003000: 'b1a9b1c562ec62b9dd746d336b4211afc37482d0274ff692a44fa17ac9fe9a28',
1004000: '7afd6d0fb0014fbe16a31c84d3f1731736eaeef35e40bb1a1f232fb00345deae',
1005000: '4af61ce4cda5de58277f7a67cadea5d3f6ce56e54785b188e32306e00b0414df',
1006000: '08e1fb7295efd4a48cb999d899a3d481b682ddbce738fecd88a6d32cbe8234f0',
1007000: '14a367a41603dd690541daee8aa4a2882260059e3f85bd8978b7431e8f7db844',
1008000: 'e673230e62aaefad0678611f94ff35ee8a6e18eb96438bdfb4b614f54f54dba7',
1009000: 'e191af8fb71d0d91419abd19443af3d3f23ee4fe359bb8c390429cc838132bde',
1010000: 'ffdba58f184cf60838b75b7899b6633e7cfd34cf36eded572c0133d07387bc49',
1011000: '40801af3a5546cb9d53e05e21b74be09de9a421b762ca1d52d2266f5c2055ce8',
1012000: '552519acebed0e38102f5270dc60b1da7a123600b6b94169ae74462ae454693f',
1013000: '1eee96f48418929927eaa9642777bc806d326cfffaf077bc8695a7ecd438d631',
1014000: 'a471093e1de2a8db586412d7351c8d88e44ea890f46e9b43251af427a0a4a879',
1015000: '57532f5a522295cc139f008bdcb7a1e6d02e6035d5221b2687c7c216f06297a2',
1016000: 'ec46dba07addcb6e62f58456a53c513d876f1c49ae7d76d230adb8debd26027d',
1017000: '33ea8d25f342a7465ed71e4bab2b91007991e0994c61d321e3625301a1390322',
1018000: '4871c03cc95d4ce0a39bd2cebbb001b2ea1cce1b3561bb841d88f43bb9d12ffd',
1019000: 'f5248257576eb2ff4139d6374cc7ce34121cc942598cf9e04d2bd572e09189bb',
1020000: 'e7785286897c85cfb0276957bff216039eeb11bc1ebca89d0bb586022caa5750',
1021000: 'a30220f17d060634c5f6a1ddc5ea34b01c18fb5eb7e0e8267b66bf5a49525627',
1022000: '6083ea49e64ac0d4507c674237cf87d30b90b285ec63d082e626df0223eb7c9c',
1023000: '1dc5596d716bc33ee0f56fc40c1f073155a58a7692935c9e5854ef3b65b76828',
1024000: '065adfee40dc33abff07fb55339571712b959bc1830dc60b6691e36eab1508ae',
1025000: 'bb6903752d31278570e774b80a80782179c78f099e58c3dc4cba7afea7a471c4',
1026000: 'f3050f3c2f3a76f5084856b0f089383517caa3f51530fbc29335308f5f170625',
1027000: '746ed3701510d07958d11a06f22dbb839d9858373dc5a33249dd69e91bab01fd',
1028000: '43f7a96ea6a45b78c29ad4a2f8680ef184438c2bd3686172b0564e0ae6dd7ba1',
1029000: 'cbb9916099c59e14fe61d284374f4feaa3d43afec59e4698ed92143576f24b34',
1030000: '2e805fc2331e32e586ea692bc3d4e6b11e1ec3f1cab6e331b459f9f1ac9a1f1e',
1031000: '04f324f8f6d4f9901cf65f78dc91d6010ea6cf125f5ac0253b57b5f1f79e81e0',
1032000: '60ca62f52fdfd858b0ee0fdb380648bde85ca14e2a73565205ed4ee0bc861c77',
1033000: 'eb60aac23d599d3099cf98ed8fc3213f1bc06bc1c677429b303e9c81f79f1340',
1034000: 'f0328df2daf119ce673ddfa7a39a84576985f701f7a7dec3f56f58c2019ebd4d',
1035000: 'f9d3cbce3854de168d8835c96917c01be6244c8f82641e8d9398dfffec4e7107',
1036000: '7dca97e6e1d6ed70aa7805f74b768009a270e7ebe1dd951e8727d1d2f2d271f2',
1037000: '5329504126b2845b3044f423b521e77ff58d7d242f24bf87c87f4d8d4e03a947',
1038000: '5bad3ad55e3daa415f3182a1f2a099fe1767e8fae34e9bb95d47e242b8971434',
1039000: 'c29729b8ba49ac0043fe4aa6fc971f8ac3eda68ff92970957ada39a2989b2491',
1040000: 'f303aebfc9267600c081d0c021065743f93790df6f5c924a86b773788e0c45be',
1041000: 'a1cbe5059fa2275707785b77970c36d79b12c1ba93121bc9064ab9b64abacf7b',
1042000: '004b0dd4e438abc54ae832d733df32a6ba35b75e6d3e0c9c1dee5a7950507295',
1043000: '31893a3fe7bb4f6dd546c7a8de4a65990e94046aab442d18c68b6bf6acd54518',
1044000: '2c4dd479948acc42946f94050810000b0539864ad24a67a7251bff1c4971b035',
1045000: '1cea782d60df35a88b30ae205ce37e30abc7cad2b22181722be150bd92c53814',
1046000: 'ee808f0efb0f2ef93e8599d8b7f0e2e7c3cdc42353e4ea5165028b961f43d548',
1047000: '75f057e2a8cb1d46e5c943d63cc56936a6bac8b1cb89300593845a20baf39765',
1048000: '2abcd227f5314baed85e3c5b49d3888a60085c1845c955a8bf96aa3dd6394798',
1049000: '5d0ec24b9acd5ab21b42f68e1f3142b7bf83433b98f2fa9794586c8eff45893e',
1050000: '1d364b13a4c17bd67a6d1e5f77c26d02faa014d7cd152b4da70380f168b8e0ff',
1051000: 'b9a20cec21de84433be9b85817dd4803e875d9275dbc02907b29888431859bae',
1052000: '424cb56b00407d73b309b2081dd0bf89213cf024e3aafb3090506aa0ba10f835',
1053000: '6df3041a32fafd6a4e08778546d077cf591e1a2a16e77fe7a610efc2b542a9ff',
1054000: '78f8dee794f3d4366019339d7ba74ad2b543ecd25dc575620f66e1d535411971',
1055000: '43b8e9dae5addd58a7cccf62ba57ab46ffdaa2dcd113cc8ca537e9101b54c096',
1056000: '86b7f3741343f85d93410b78cc3fbf03d49b60a664e908703016aa56a206ae7e',
1057000: 'b033cf6ec622be6a99dff536a2cf73b36d3c3f8c3835ee17e0dd357403e85c41',
1058000: 'a65a6db692a8358e399a5ac3c818902fdb60595262ae05531084848febead249',
1059000: 'f6d781d2e2fdb4b7b074d1d8123875d899cdbd6be375cb4288e86f1d14a929f6',
1060000: 'cd9019bb1de4926cca16a7bef1a46786f10a3260d467cda0775f73361795abc9',
1061000: 'ed4f5dc6f475f95b40595632fafd9e7e5eef388b6cc15772204c0b0e9ee4e542',
1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80',
1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465',
1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6',
1065000: '563188accc4a6e311bd5046516a92a233f11f891b2304d37f151c5a6002b6958',
1066000: '333f1b4e996fac87e32dec667533715b31f1736b4342806a81d568b5c5238456',
1067000: 'df59a0b7319d5269bdf55043d91ec62bbb30829bb7054da623717a394b6ed678',
1068000: '06d8b674a205393edaf20c1d837baadc9caf0b0a675645246263cc163302241d',
1069000: 'ac065c48fad1383039d39e23c8367bad7cf9a37e07a5294cd7b04af5827b9961',
1070000: '90cd8b50f94208bc459081356474a961f6b764a1217f8fd291f5e4828081b730',
1071000: '3c0aa207ba9eea45458ab4fa26d6a027862592adb9bcce30915816e777dc6cfc',
1072000: '3d556c08f2300b67b704d3cbf46e22866e3ac164472b5930e2ada23b08475a0f',
1073000: 'a39b5c54c24efe3066aa203358b96baea405cd59aac6b0b48930e77799b4dd7d',
1074000: 'e8c8273d5a50a60e8744716c9f31496fb29eca87b4d68643f4ecd7ec4e400e23',
1075000: 'b8043ae41a1d0d7d4310c85764fcba1424733df347ffc2e8cbda1fe6ccbb5153',
1076000: '58468db1f91805e767d334824d6bffe54e0f900d1fb2a89b105086a493053b3d',
1077000: '04a78749b58465efa3a56d1735cd082c1f0f796e26486c7136950dbaf6effaa4',
1078000: 'e1dd6b58c75b01a67d4a4594dc7b4b2ee9e7d7fa7b25fd6246ce0e86eff33c75',
1079000: 'd239af017a6bb664485b14ad15e0eb703775e43018a045a8612b3697794460da',
1080000: '29ae5503f8c1249fefeb63fd967a71a70588ee0db1c97497e16366163a684341',
1081000: '05103ab27469e0859cbcd3daf42faa2bae798f522534697c7f2b34f7a050ee0f',
1082000: '4553d2cb7e90b6db11d242e287fe96822e6cd60e6388b94bf9006411f202ba03',
1083000: '97995acd178b2a142d571d5ae1c2a3deaf93a909fd91fb9c541d57f73e32dc99',
1084000: '9e3f23376af14d76ab24cd54e321dec019af73ad61067d959ff90043acc5ffcc',
1085000: '81c056b14f13cee0d6d6c8079fdd5a1a84c3a5c76cc9448612e8ef6d3531300e',
1086000: '8a0004f6809bdd075915a804e43991dfe8f22e05679d2fdaf8e373f101bac5c2',
1087000: '27c45a4c9ad24e038f2ebe40835a1c49ac7221d7185082866ee354351ba87c7a',
1088000: 'fd27e21747117b00b4ada1cba161ac49edb57cca540f86ac5ba885050f08f824',
1089000: 'bff867335767103bc3ed15ede5b9fde88016f8ede15dc5bf3e81ea40dcfc61ae',
1090000: '608f75016d1db08888dd59640f63e838c19bdfa833c0cc177ad3d2b818b0db5b',
1091000: '90750b452bd4dedaab6b57fecbfe88f71ce3d5437fad7f9ec0fdd270445c7526',
1092000: '98287b39f9f1233017dc5d932e5c77f0521ca84587eb3f39f0e7b6c297c749af',
1093000: '68a5846ed05c9bb142197849106838765f90f15c10b2cc938eef49b95eaa9d33',
1094000: '5660a1aac2fc763a417fc656c8887fc8186bf613ae1ccbb1a664fb43ce1fa1d6',
1095000: '62bad3db418b3f4cad3596881b645b72479c71deb0d39c7a4c8bd1577dc225fd',
1096000: 'e0e4b2b183591f10dd5614c289412f2fb5e320b7d3278f7c028f42f591872666',
1097000: 'a233a233fc2aa5dab9e75106d91388343ef969458ea974f1409a2ab5fc441911',
1098000: '16dfa5fa6cbd1188e562697b5f00ac206960d0851ed84adf37ae975fd5ffdd6a',
1099000: 'b8a870b7dc6d3263730c00f59d52aa6cce35dc59aa8fba715034cc2d14927260',
1100000: 'a3cd7749743da22a3846dcc2edbf1df21b938e829419389e3bc09284797c5b43',
1101000: '75b14c2a95e2a095949729b7c0b624bd725a2de98404a8e3247b60c977d0198e',
1102000: '4d3af64d37064dd5f57e25d61f248a1e21c1b1cadd7bb1404e35c9fbe06f1fd4',
1103000: 'd73c92bfed358dfcd7659228974ab75ea2fc86f2301ee47133adad8075203872',
1104000: '30cd82354f37bc0b412123867c7e1835206022a7501853bf8c0d3df02f291645',
1105000: '1d2ef984f26693dce77460cd2694e5da46e675077e91a1cea26051733b01a7ef',
1106000: '51c076c304222fe3ca308ba6968c46fef448f85be13a095cecb75b90e7954698',
1107000: '99e2221339e16acc34c9816f2ef7b866c2dd753aa3cbe484ae831959a23ece68',
1108000: '0f1227c250296bfe88eb7eb41703f99f633cfe02870816111e0cadfe778ddb19',
1109000: 'b35447f1ad76f95bc4f5886e4028d33acb3ad7b5000dd15516d3f11ce4baa990',
1110000: 'ac7baff996062bfaaaddd7d496b17e3ec1c8d34b2143095645ff22fb3888ae00',
1111000: '430bbbdcca36b2d69b6a2dd8b07c583a060a467e5f9acbc6de62462e1f7c7036',
1112000: 'e5274dea029dc44baff55c05b0555f91b74d29ffd40e3a8c4e2c5b57f9d40bef',
1113000: 'cf43863249fa42cfe108220dd40169dac702b0dd9cf5cb699cf2fc96feda8371',
1114000: 'fa1c0e551784d21c451564124d2d730e616724f3e535de3c186bcdeb47e80a8f',
1115000: '49fe6ecee35a397b83b5a704e950ad028cfb4b7e7a524021e789f4acc0fd6ffe',
1116000: '74ecded36751aa8b7901b31f0d16d75d111fc3c40b567f649c04f74ed028aa5c',
1117000: 'd9ca760a22190bdf545766b47d963c738a4edcc27f4d15ca801b35751577cfa7',
1118000: 'c28d42f871682800ac4e867608227cfb6bc4c00b618e83a8556f201a1c28813c',
1119000: 'c5fafc4e1785b0b9e84bb052e392154a5ba1aefe612998017e90772bcd554e08',
1120000: 'aa054d428bc9ccee0761da92163817163413065fe1e67ef79a056c5233ea3476',
1121000: '0df295bb944218503bd1bf66d2ece0c50fd22dae3391b80673a7ad1e4e5c3934',
1122000: 'a13abb350a26673b3933b1de307a60a6845ca594d502599548c6253e21a6d8e8',
1123000: 'a4bc6a3abf9ed1f4b14338ff0f03f83456312bc91a93fa89ae6db493050115e1',
1124000: '65869938df99adf0dda76200291ce09a54c9bcc787e4bb62cd72c367db58f4f0',
1125000: 'ea5e918233b14c3c73d488a906e3741c61bdcafe0393bd0404168fe80c950a46',
1126000: 'ce88cd35104fcec51bcee77302e03162dc694802536f5b668786b2245e61bca5',
1127000: 'ea19c0c8d205be4be87d02c5301c9ed331e7d75e25b93d1c2137c248882af515',
1128000: '006f32d63c2a3adcf4fbad0b0629c97f1beab6446a9c27fbde9472f2d066219e',
1129000: '218e5392e1ecf471c3bbc3d79c24dee30ac8db315dbeb61317318efb3f221163',
1130000: '30b9da0bd8364e9cd5551b2529341a01a3b7257a238d15b2560e2c99fdb324e8',
1131000: '8a7f382cfa023d2eba6639443e67206f8883b57d23ce7e1339234b8bb3098a82',
1132000: 'bf9af68a6fe2112d8fe311dfd52334ae2e7b0bac6675c9ebfddb1f386c212668',
1133000: '1a30951e2be633502a47c255a93ddbb9ed231d6bb4c55a807c0e910b437766b3',
1134000: 'a9bcaf3300b7915e701a8e396eb13f0c7287576323420be7aab3c3ba48020f76',
1135000: '337eed9ed072b5ad862af2d3d651f1b49fa852abc590b7e1c2dc381b496f438a',
1136000: '208761dbc29ec58302d722a05e937a3cf9e78bfb6495be395dd7b54f02e169dc',
1137000: '4e5b67ff3324b64e268049fdc3d82982b847ee359d409ade6368864c38a111e5',
1138000: '55d1d0833021a664e85eec8cc90a0985e67cc80d28841aaa8c2231ec28087ebb',
1139000: 'e750ada1ec9fa0f2f2461ed68958c7d116a699a82ec12911da5563139f8df19e',
1140000: '9cf81407b6ccc8046f0233f97484166945758f7392bb54841c912fcb34cf205c',
1141000: 'fccf32b2fae03e3b6b562483776625f9843cd68734c55659e2069cde7e383170',
1142000: 'c3608c215dd6569da6c1871c4d72a09ab1caa9663647f2a9454b5693d5d72a65',
1143000: 'bd39cb8c4e529d15bbea6baeec66afe52ca18afe32bd812f28fbb0676647cdff',
1144000: '6e42d02538565ce7e2d9bf31a304f1fd0ac122d35d17a030160575815901b0b1',
1145000: 'b9722e1de2904ce1219140fffb1f4f9f5a041f885faa634404238d103c738b4c',
1146000: 'd4de4271459966cee774f538a243d7db0689b213b296463d42e45c93194d7861',
1147000: '51fadf109f22bb85574d0fbcbd0b20992983e89aee3d415a7b1c37c44775d9a9',
1148000: '137e1fe8da31680d21a42e7421eb608a883a497314e4404625ce44b0edadde6a',
1149000: 'cb87867eb04203ce15e0763a2f4389376cea75e0a2877f55e2911c575bef07a8',
1150000: '977528ca7953a2c9c19fefaa3aab7ebdec3ac324d74a07d83764ba25d9be0689',
1151000: 'a09c51c832600ded63a19201df008075273ea248fd406886e93a2cbaa3bba46b',
1152000: '0e5367cfa0f00dd932a5bcc00dcc807fa6825161806bed588e16a57947b4b32d',
1153000: '55a9de3dcde2efb56a3c5fea7d22b98c1e180db9a4d4f4f6be7aae1f1cbd7608',
1154000: 'abc58cf71c4691ebfaef920252730cf69abbe9de88b424c03051b9b03e85d45a',
1155000: '4f074ce73c8a096620b8a32498362eb66a072eae95d561f2d53557cd513ae785',
1156000: '540a838a0f0a8834466b17dd456d35b8acae2ec8419f8bd9a704d9ea439062ac',
1157000: 'd5310ac671abdb658ea028db86c23fc729af965f91d67a37218c1412cf32a1f5',
1158000: '162d906a07e6c35e7c3ebf7069a200521605a97920f5b589d31b19bfd7766ee2',
1159000: '600bd8f5e1e62219e220f4dcb650db5812e79956f95ae8a50e83126932685ee0',
1160000: '91319398d1a805fac8582c8485e6d84e7490d6cfa6e44e2c630665b6bce0e6b8',
1161000: 'f7ad3cff6ee76e1e3df4abe70c600e4af66e1df55bf7b03aee12251d4455a1d4',
1162000: '85b9fbba669c2a4d3f85cdb5123f9538c05bd66172b7236d756703f99258454d',
1163000: '966085d767d1e5e2e8baf8eda8c11472ec5351181c418b503585284009aaea79',
1164000: '1c94e1b531215c019b12caf407296d8868481f49524b7180c7161b0363c1f789',
1165000: '803b6bf93735aeae2cf607824e2adf0d754b58da2516c2da1e485c697e472143',
1166000: '872561a82f7991633d0927d25cb659d096bbe556fe6dac7a0b6a679820733069',
1167000: '6bd7cdd605a3179b54c8af88d1638bf8133fab12cbf0a78d37cf21eddf4395a1',
1168000: '79946f5758c1817239cc642d27298bd710983551a8236e49832c6d818b097337',
1169000: 'b0994c60728e74de4aa361f37fa85e5296ce3188ae4e0b66d7b34fe86a239c9c',
1170000: 'a54188a5a64e0cf8da2406d16a0ac3983b087fc7d6231b6f8abf92cf11dc78cd',
1171000: 'ec2924d98e470cc6359821e6468df2c15d60301861d443188730342581230ef2',
1172000: 'b4ac11116aa73ce19428009a80e583e19dc9bcd380f7f7ce272a92921d5868d2',
1173000: '501d3551f762999dd5a799f3c5658fff2a7f3aff0511488272cd7693fefb8f9d',
1174000: '4660074ea48a78ae453cb14b694b2844cc0fb63ed9352ed20d11158bbb5c1f28',
1175000: '0727f6b1d9f8fe5677a9ffa0d475f53f5a419ef90b80896c22c2c95de22175de',
1176000: '150633d6a35496c24a93c9e19817e90f649c56b7e2558f99e97325bfd5df8b17',
1177000: '0849e19f22571b62dba8ff02f6b5a064a7ac36e7ed491321b3663567e8e17294',
1178000: '770dd463e7bad80f689f12934e4ae06e24378d1545dcf211fd143beaef49464e',
1179000: '059d383dcc60a49b658b674d92fc35cab07b06329c58d73818b6387cb0c06534',
1180000: 'e547cb3c636243ca9ae4cfb92c30a0f583eda84e329a5c1e5f64a26fc6fc791e',
1181000: '4521a4396ab02f73d45d7a3393ea1c602d255778d52c12079c88bfbad32aab43',
1182000: '051cfe993e4b0b34233403a9e8c397dd50e8b78a30fb07e9c260604ee9e624a9',
1183000: '44a69c99bb8b85e84ae279f2d8e5400d51cb3d5f0bcd178db49d55548cd66191',
1184000: '2a1d23c9bb3c71a533e0c9d25b03bfa7e9db8e014645f3e7fbede6d99fff0191',
1185000: 'bb90d6c6d77819163a9e909ee621d874707cdb21c91b1d9e861b204cf37d0ffa',
1186000: '4a92051b738ea0e28c64c64f1eb6f0405bc7c3427bef91ff20f4c43cf084d750',
1187000: 'f782ac330ca20fb5d8a094ee0f0f8c086a76e3f03ecc6a2c42f8fd07e52e0f41',
1188000: '94cb7b653dd3d838c186420158cf0e73db73ec28deaf67d9a2ca902caba4141a',
1189000: 'c8128e59b9ec948de890184578a113478ea63f7d57cb75c2c8d5c001a5a724c0',
1190000: '4da643bd35e5b98932ae21515a6bffb9c72f2cd8d514cd2d7eac1922af785c3f',
1191000: '0f922d86658ac3f53c5f9db360c68ab3f3253a925f23e1323820e3384214719a',
1192000: '4c3ab631cf5ba0c236f7c64af6f790fc24448319de6f75dbd28df4e2648d0b7d',
1193000: 'eda118d1fac3470a1f8f01f5c78108c8ecdcd6420be30f6d20f1d1831e7b6975',
1194000: '5723fff88abd9bb5088476fa5f4221a61c6f8a718703a92f13248ad350abeea2',
1195000: '1715846f82d011919e3446c6ce675a65fb80338bd791d4e735702c4767d9adc4',
1196000: 'b497667996aee2db61e88f442e728be15ab0b2b64cfd43198691fcf6cdafacc8',
1197000: '309a6170d837b8cb334fb888a64ed4e47e6592747e93c8e9d1bf7d608cfef87d',
1198000: '3ea918ef64a67dec20051519e6aefaeb7aca2d8583baca9ad5c5bd07073e513a',
1199000: '4ec7b7361b0243e5b2996a16e3b27acd662126b95fe542a487c7030e47ea3667',
1200000: 'b829c742686fcd642d0f9443336d7e2c4eab81667c90ce553df1350ed10b4233',
1201000: '44c022887f1e126fd281b1cae26b2017fa6415a64b105762c87643204ce165a5',
1202000: 'b11cc739eb28a14f4e47be125aa7e62d6d6f90c8f8014ee70044ed506d53d938',
1203000: '997a7c5fd7a98b39c9ca0790519924d73c3567656b605c97a6fdb7b406c3c64d',
1204000: '7d25d872e17195ee277243f7a5a39aa64d8750cec62e4777146acf61a8e76b04',
1205000: 'ce8486ae745a4645bee081ef3291d9505174bed05b0668d963b2998b7643dbb0',
1206000: '46a0bcea3c411c600dffe3e06e3d1dfbf5879a7ec4dcf3848e794cefcbf2bc0b',
1207000: '37e6297bf6e4e2bdd40401d4d7f95e3e3bdafd4a7f76b9c52865cefc6b82b20b',
1208000: 'd09e3982a9827b8cf56a5a2f4031dc6b082926c1fd57b63beaaa6cfd534eb902',
1209000: '54ae9010a9f146c83464e7ee60b30d9dbee36418561abc4e8d61bce9baa2d21d',
1210000: '5dcfd33f8e5ac21c9ba8553758b8cd8afae7961cad428530b5109c2db2ebf39f',
1211000: '91c952348bb2c3dfac0d6531a3dac770ea6dab571af257530e9c55493c96bdd9',
1212000: 'e62cc3fe044a7f5de4c04a8aed5619548f9d5c6fad9f989d3382cb96de1d780d',
1213000: '66b46ffdca8acf1dd04528dadb28b6ac4ce38807c1b84abd685d4ddb3dc59a34',
1214000: '2ce4091756ad23746bab4906f46545953cadaf61deae0d78e8a10d4eb51866b1',
1215000: '83ce3ca087799cdc4b4c5e7cfeb4a127708724a7ca76aa5f7f4ec1ed48b5fca6',
1216000: '7d07b739b7991fbd74926281bf51bba9d5721afab39598720f9ff5f7410a6721',
1217000: '76adf49491670d0e8379058eacf0228f330f3c18955dfea1ebe43bc11ee065f3',
1218000: '77f422e7301a81692dec69e5c6d35fa988a00a4d820ad0ebb1d595add36558cc',
1219000: '8ba9d944f8c468c81799294aeea8dc05ed1bb90bb26552fcd190bd88fedcddf2',
1220000: '00330367c255e0fe51b374597995c53353bc5700ad7d603cbd4197141933fe9c',
1221000: '3ba8b316b7964f31fdf628ed869a6fd023680cca6611257a31efe22e4d17e578',
1222000: '016e58d3fb6a29a3f9281789359460e776e9feb2f0db500482b6e231e1272aef',
1223000: 'fdfe767c29a3de7acd913b627d1e5fa887a1af9974f6a8a6474db822468c785c',
1224000: '92239f6207bff3689c554e92b24fe2e7be4a2203104ad8ef08b2c6bedd9aeccf',
1225000: '9a2f2dd9527b533d3d743efc55236e73e15192171bc8d0cd910918d1ab00aef7',
1226000: 'eb8269c75b8c5f66e6ea88ad70883dddcf8a75a45198ca7a46eb0ec606a791bb',
1227000: '5c82e624390cd57942dc9d64344eaa3d8991e0437e01802473053245b706290c',
1228000: '51e9a7d727f07fc01be7c03e3dd854eb666697f05bf89259baac628520d4402c',
1229000: 'c4bfdb651c9abdeda717fb9c8a4c8a6c9c0f78c13d3e6cae3f24f504d734c643',
1230000: '9f1ce781d16f2334567cbfb22fff42c14d2b9290cc2883746f435a1fb127021d',
1231000: '5c996634b377412ae0a3d8f541f3cc4a354aab72c198aa23a5cfc2678cbabf09',
1232000: '86702316a2d1730fbae01a08f36fffe5bf6d3ebb7d76b35a1617713766698b46',
1233000: 'fb16b63916c0287cb9b01d0c5aad626ced1b73c49a374c9009703aa90fd27a82',
1234000: '7c6f7904602ccd86bfb05cb8d6b5547c989c57cb2e214e93f1220fa4fe29bcb0',
1235000: '898b0f20811f52aa5a6bd0c35eff86fca3fbe3b066e423644fa77b2e269d9513',
1236000: '39128910ef624b6a8bbd390a311b5587c0991cda834eed996d814fe410cac352',
1237000: 'a0709afeedb64af4168ce8cf3dbda667a248df8e91da96acb2333686a2b89325',
1238000: 'e00075e7ba8c18cc277bfc5115ae6ff6b9678e6e99efd6e45f549ef8a3981a3d',
1239000: '3fba891600738f2d37e279209d52bbe6dc7ce005eeed62048247c96f370e7cd5',
1240000: 'def9bf1bec9325db90bb070f532972cfdd74e814c2b5e74a4d5a7c09a963a5f1',
1241000: '6a5d187e32bc189ac786959e1fe846031b97ae1ce202c22e1bdb1d2a963005fd',
1242000: 'a74d7c0b104eaf76c53a3a31ce51b75bbd8e05b5e84c31f593f505a13d83634c',
}

View file

@ -141,7 +141,7 @@ class CoinSelector:
_) -> List[OutputEffectiveAmountEstimator]:
""" Accumulate UTXOs at random until there is enough to cover the target. """
target = self.target + self.cost_of_change
self.random.shuffle(txos, random=self.random.random) # pylint: disable=deprecated-argument
self.random.shuffle(txos, self.random.random)
selection = []
amount = 0
for coin in txos:

View file

@ -2,7 +2,6 @@ NULL_HASH32 = b'\x00'*32
CENT = 1000000
COIN = 100*CENT
DUST = 1000
TIMEOUT = 30.0

View file

@ -9,17 +9,16 @@ from dataclasses import dataclass
from contextvars import ContextVar
from typing import Tuple, List, Union, Callable, Any, Awaitable, Iterable, Dict, Optional
from datetime import date
from prometheus_client import Gauge, Counter, Histogram
from lbry.utils import LockWithMetrics
from .bip32 import PublicKey
from .bip32 import PubKey
from .transaction import Transaction, Output, OutputScript, TXRefImmutable, Input
from .constants import TXO_TYPES, CLAIM_TYPES
from .util import date_to_julian_day
from concurrent.futures.thread import ThreadPoolExecutor # pylint: disable=wrong-import-order
if platform.system() == 'Windows' or ({'ANDROID_ARGUMENT', 'KIVY_BUILD'} & os.environ.keys()):
if platform.system() == 'Windows' or 'ANDROID_ARGUMENT' or 'KIVY_BUILD' in os.environ:
from concurrent.futures.thread import ThreadPoolExecutor as ReaderExecutorClass # pylint: disable=reimported
else:
from concurrent.futures.process import ProcessPoolExecutor as ReaderExecutorClass
@ -83,10 +82,10 @@ class AIOSQLite:
"read_count", "Number of database reads", namespace="daemon_database"
)
acquire_write_lock_metric = Histogram(
'write_lock_acquired', 'Time to acquire the write lock', namespace="daemon_database", buckets=HISTOGRAM_BUCKETS
f'write_lock_acquired', 'Time to acquire the write lock', namespace="daemon_database", buckets=HISTOGRAM_BUCKETS
)
held_write_lock_metric = Histogram(
'write_lock_held', 'Length of time the write lock is held for', namespace="daemon_database",
f'write_lock_held', 'Length of time the write lock is held for', namespace="daemon_database",
buckets=HISTOGRAM_BUCKETS
)
@ -447,10 +446,6 @@ class SQLiteMixin:
version = await self.db.execute_fetchone("SELECT version FROM version LIMIT 1;")
if version == (self.SCHEMA_VERSION,):
return
if version == ("1.5",) and self.SCHEMA_VERSION == "1.6":
await self.db.execute("ALTER TABLE txo ADD COLUMN has_source bool DEFAULT 1;")
await self.db.execute("UPDATE version SET version = ?", (self.SCHEMA_VERSION,))
return
await self.db.executescript('\n'.join(
f"DROP TABLE {table};" for table in tables
) + '\n' + 'PRAGMA WAL_CHECKPOINT(FULL);' + '\n' + 'VACUUM;')
@ -507,7 +502,7 @@ def _get_spendable_utxos(transaction: sqlite3.Connection, accounts: List, decode
amount_to_reserve: int, reserved_amount: int, floor: int, ceiling: int,
fee_per_byte: int) -> int:
accounts_fmt = ",".join(["?"] * len(accounts))
txo_query = """
txo_query = f"""
SELECT tx.txid, txo.txoid, tx.raw, tx.height, txo.position as nout, tx.is_verified, txo.amount FROM txo
INNER JOIN account_address USING (address)
LEFT JOIN txi USING (txoid)
@ -597,7 +592,7 @@ def get_and_reserve_spendable_utxos(transaction: sqlite3.Connection, accounts: L
class Database(SQLiteMixin):
SCHEMA_VERSION = "1.6"
SCHEMA_VERSION = "1.5"
PRAGMAS = """
pragma journal_mode=WAL;
@ -651,7 +646,6 @@ class Database(SQLiteMixin):
txo_type integer not null default 0,
claim_id text,
claim_name text,
has_source bool,
channel_id text,
reposted_claim_id text
@ -696,8 +690,7 @@ class Database(SQLiteMixin):
'address': txo.get_address(self.ledger),
'position': txo.position,
'amount': txo.amount,
'script': sqlite3.Binary(txo.script.source),
'has_source': False,
'script': sqlite3.Binary(txo.script.source)
}
if txo.is_claim:
if txo.can_decode_claim:
@ -705,11 +698,8 @@ class Database(SQLiteMixin):
row['txo_type'] = TXO_TYPES.get(claim.claim_type, TXO_TYPES['stream'])
if claim.is_repost:
row['reposted_claim_id'] = claim.repost.reference.claim_id
row['has_source'] = True
if claim.is_signed:
row['channel_id'] = claim.signing_channel_id
if claim.is_stream:
row['has_source'] = claim.stream.has_source
else:
row['txo_type'] = TXO_TYPES['stream']
elif txo.is_support:
@ -770,10 +760,9 @@ class Database(SQLiteMixin):
conn.execute(*self._insert_sql(
"txo", self.txo_to_row(tx, txo), ignore_duplicate=True
)).fetchall()
elif txo.script.is_pay_script_hash and is_my_input:
conn.execute(*self._insert_sql(
"txo", self.txo_to_row(tx, txo), ignore_duplicate=True
)).fetchall()
elif txo.script.is_pay_script_hash:
# TODO: implement script hash payments
log.warning('Database.save_transaction_io: pay script hash is not implemented!')
def save_transaction_io(self, tx: Transaction, address, txhash, history):
return self.save_transaction_io_batch([tx], address, txhash, history)
@ -976,9 +965,7 @@ class Database(SQLiteMixin):
sql.append("LEFT JOIN txi ON (txi.position=0 AND txi.txid=txo.txid)")
return await self.db.execute_fetchall(*query(' '.join(sql), **constraints), read_only=read_only)
async def get_txos(
self, wallet=None, no_tx=False, no_channel_info=False, read_only=False, **constraints
) -> List[Output]:
async def get_txos(self, wallet=None, no_tx=False, no_channel_info=False, read_only=False, **constraints):
include_is_spent = constraints.get('include_is_spent', False)
include_is_my_input = constraints.get('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False)
@ -1028,7 +1015,7 @@ class Database(SQLiteMixin):
if 'order_by' not in constraints or constraints['order_by'] == 'height':
constraints['order_by'] = [
"tx.height in (0, -1) DESC", "tx.height DESC", "tx.position DESC", "txo.position"
"tx.height=0 DESC", "tx.height DESC", "tx.position DESC", "txo.position"
]
elif constraints.get('order_by', None) == 'none':
del constraints['order_by']
@ -1155,41 +1142,6 @@ class Database(SQLiteMixin):
)
return balance[0]['total'] or 0
async def get_detailed_balance(self, accounts, read_only=False, **constraints):
constraints['accounts'] = accounts
result = (await self.select_txos(
f"COALESCE(SUM(amount), 0) AS total,"
f"COALESCE(SUM("
f" CASE WHEN"
f" txo_type NOT IN ({TXO_TYPES['other']}, {TXO_TYPES['purchase']})"
f" THEN amount ELSE 0 END), 0) AS reserved,"
f"COALESCE(SUM("
f" CASE WHEN"
f" txo_type IN ({','.join(map(str, CLAIM_TYPES))})"
f" THEN amount ELSE 0 END), 0) AS claims,"
f"COALESCE(SUM(CASE WHEN txo_type = {TXO_TYPES['support']} THEN amount ELSE 0 END), 0) AS supports,"
f"COALESCE(SUM("
f" CASE WHEN"
f" txo_type = {TXO_TYPES['support']} AND"
f" TXI.address IS NOT NULL AND"
f" TXI.address IN (SELECT address FROM account_address WHERE account = :$account__in0)"
f" THEN amount ELSE 0 END), 0) AS my_supports",
is_spent=False,
include_is_my_input=True,
read_only=read_only,
**constraints
))[0]
return {
"total": result["total"],
"available": result["total"] - result["reserved"],
"reserved": result["reserved"],
"reserved_subtotals": {
"claims": result["claims"],
"supports": result["my_supports"],
"tips": result["supports"] - result["my_supports"]
}
}
async def select_addresses(self, cols, read_only=False, **constraints):
return await self.db.execute_fetchall(*query(
f"SELECT {cols} FROM pubkey_address JOIN account_address USING (address)",
@ -1204,14 +1156,13 @@ class Database(SQLiteMixin):
addresses = await self.select_addresses(', '.join(cols), read_only=read_only, **constraints)
if 'pubkey' in cols:
for address in addresses:
address['pubkey'] = PublicKey(
address['pubkey'] = PubKey(
self.ledger, address.pop('pubkey'), address.pop('chain_code'),
address.pop('n'), address.pop('depth')
)
return addresses
async def get_address_count(self, cols=None, read_only=False, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
return count[0]['total'] or 0
@ -1245,18 +1196,6 @@ class Database(SQLiteMixin):
async def set_address_history(self, address, history):
await self._set_address_history(address, history)
async def is_channel_key_used(self, account, key: PublicKey):
channels = await self.get_txos(
accounts=[account], txo_type=TXO_TYPES['channel'],
no_tx=True, no_channel_info=True
)
other_key_bytes = key.pubkey_bytes
for channel in channels:
claim = channel.can_decode_claim
if claim and claim.channel.public_key_bytes == other_key_bytes:
return True
return False
@staticmethod
def constrain_purchases(constraints):
accounts = constraints.pop('accounts', None)

View file

@ -16,18 +16,18 @@ from lbry.crypto.hash import hash160, double_sha256, sha256
from lbry.crypto.base58 import Base58
from lbry.utils import LRUCacheWithMetrics
from lbry.wallet.tasks import TaskGroup
from lbry.wallet.database import Database
from lbry.wallet.stream import StreamController
from lbry.wallet.dewies import dewies_to_lbc
from lbry.wallet.account import Account, AddressManager, SingleKey
from lbry.wallet.network import Network
from lbry.wallet.transaction import Transaction, Output
from lbry.wallet.header import Headers, UnvalidatedHeaders
from lbry.wallet.checkpoints import HASHES
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
from lbry.wallet.bip32 import PublicKey, PrivateKey
from lbry.wallet.coinselection import CoinSelector
from .tasks import TaskGroup
from .database import Database
from .stream import StreamController
from .dewies import dewies_to_lbc
from .account import Account, AddressManager, SingleKey
from .network import Network
from .transaction import Transaction, Output
from .header import Headers, UnvalidatedHeaders
from .checkpoints import HASHES
from .constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
from .bip32 import PubKey, PrivateKey
from .coinselection import CoinSelector
log = logging.getLogger(__name__)
@ -106,7 +106,7 @@ class Ledger(metaclass=LedgerRegistry):
target_timespan = 150
default_fee_per_byte = 50
default_fee_per_name_char = 0
default_fee_per_name_char = 200000
checkpoints = HASHES
@ -178,25 +178,15 @@ class Ledger(metaclass=LedgerRegistry):
raw_address = cls.pubkey_address_prefix + h160
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
@classmethod
def hash160_to_script_address(cls, h160):
raw_address = cls.script_address_prefix + h160
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
@staticmethod
def address_to_hash160(address):
return Base58.decode(address)[1:21]
@classmethod
def is_pubkey_address(cls, address):
def is_valid_address(cls, address):
decoded = Base58.decode_check(address)
return decoded[0] == cls.pubkey_address_prefix[0]
@classmethod
def is_script_address(cls, address):
decoded = Base58.decode_check(address)
return decoded[0] == cls.script_address_prefix[0]
@classmethod
def public_key_to_address(cls, public_key):
return cls.hash160_to_address(hash160(public_key))
@ -226,7 +216,7 @@ class Ledger(metaclass=LedgerRegistry):
return account.get_private_key(address_info['chain'], address_info['pubkey'].n)
return None
async def get_public_key_for_address(self, wallet, address) -> Optional[PublicKey]:
async def get_public_key_for_address(self, wallet, address) -> Optional[PubKey]:
match = await self._get_account_and_address_info_for_address(wallet, address)
if match:
_, address_info = match
@ -329,10 +319,10 @@ class Ledger(metaclass=LedgerRegistry):
async def start(self):
if not os.path.exists(self.path):
os.mkdir(self.path)
await asyncio.wait(map(asyncio.create_task, [
await asyncio.wait([
self.db.open(),
self.headers.open()
]))
])
fully_synced = self.on_ready.first
asyncio.create_task(self.network.start())
await self.network.on_connected.first
@ -365,10 +355,6 @@ class Ledger(metaclass=LedgerRegistry):
await self.db.close()
await self.headers.close()
async def tasks_are_done(self):
await self._update_tasks.done.wait()
await self._other_tasks.done.wait()
@property
def local_height_including_downloaded_height(self):
return max(self.headers.height, self._download_height)
@ -466,15 +452,14 @@ class Ledger(metaclass=LedgerRegistry):
async def subscribe_accounts(self):
if self.network.is_connected and self.accounts:
log.info("Subscribe to %i accounts", len(self.accounts))
await asyncio.wait(map(asyncio.create_task, [
await asyncio.wait([
self.subscribe_account(a) for a in self.accounts
]))
])
async def subscribe_account(self, account: Account):
for address_manager in account.address_managers.values():
await self.subscribe_addresses(address_manager, await address_manager.get_addresses())
await account.ensure_address_gap()
await account.deterministic_channel_keys.ensure_cache_primed()
async def unsubscribe_account(self, account: Account):
for address in await account.get_addresses():
@ -553,16 +538,15 @@ class Ledger(metaclass=LedgerRegistry):
"request %i transactions, %i/%i for %s are already synced", len(to_request), len(already_synced),
len(remote_history), address
)
remote_history_txids = {txid for txid, _ in remote_history}
remote_history_txids = set(txid for txid, _ in remote_history)
async for tx in self.request_synced_transactions(to_request, remote_history_txids, address):
self.maybe_has_channel_key(tx)
pending_synced_history[tx_indexes[tx.id]] = f"{tx.id}:{tx.height}:"
if len(pending_synced_history) % 100 == 0:
log.info("Syncing address %s: %d/%d", address, len(pending_synced_history), len(to_request))
log.info("Sync finished for address %s: %d/%d", address, len(pending_synced_history), len(to_request))
assert len(pending_synced_history) == len(remote_history), \
f"{len(pending_synced_history)} vs {len(remote_history)} for {address}"
f"{len(pending_synced_history)} vs {len(remote_history)}"
synced_history = ""
for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())):
assert i == remote_i, f"{i} vs {remote_i}"
@ -623,12 +607,6 @@ class Ledger(metaclass=LedgerRegistry):
tx.is_verified = merkle_root == header['merkle_root']
return tx
def maybe_has_channel_key(self, tx):
for txo in tx._outputs:
if txo.can_decode_claim and txo.claim.is_channel:
for account in self.accounts:
account.deterministic_channel_keys.maybe_generate_deterministic_key_for_channel(txo)
async def request_transactions(self, to_request: Tuple[Tuple[str, int], ...], cached=False):
batches = [[]]
remote_heights = {}
@ -722,15 +700,6 @@ class Ledger(metaclass=LedgerRegistry):
return account.address_managers[details['chain']]
return None
async def broadcast_or_release(self, tx, blocking=False):
try:
await self.broadcast(tx)
except:
await self.release_tx(tx)
raise
if blocking:
await self.wait(tx, timeout=None)
def broadcast(self, tx):
# broadcast can't be a retriable call yet
return self.network.broadcast(hexlify(tx.raw).decode())
@ -744,15 +713,13 @@ class Ledger(metaclass=LedgerRegistry):
self.hash160_to_address(txi.txo_ref.txo.pubkey_hash)
)
for txo in tx.outputs:
if txo.is_pubkey_hash:
if txo.has_address:
addresses.add(self.hash160_to_address(txo.pubkey_hash))
elif txo.is_script_hash:
addresses.add(self.hash160_to_script_address(txo.script_hash))
start = int(time.perf_counter())
while timeout and (int(time.perf_counter()) - start) <= timeout:
if await self._wait_round(tx, height, addresses):
return
raise asyncio.TimeoutError(f'Timed out waiting for transaction. {tx.id}')
raise asyncio.TimeoutError('Timed out waiting for transaction.')
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
records = await self.db.get_addresses(address__in=addresses)
@ -771,7 +738,7 @@ class Ledger(metaclass=LedgerRegistry):
))[1] if record['history'] else []
for txid, local_height in local_history:
if txid == tx.id:
if local_height >= height or (local_height == 0 and height > local_height):
if local_height >= height:
return True
log.warning(
"local history has higher height than remote for %s (%i vs %i)", txid,
@ -791,7 +758,7 @@ class Ledger(metaclass=LedgerRegistry):
include_sent_tips=False,
include_received_tips=False) -> Tuple[List[Output], dict, int, int]:
encoded_outputs = await query
outputs = Outputs.from_base64(encoded_outputs or '') # TODO: why is the server returning None?
outputs = Outputs.from_base64(encoded_outputs or b'') # TODO: why is the server returning None?
txs: List[Transaction] = []
if len(outputs.txs) > 0:
async for tx in self.request_transactions(tuple(outputs.txs), cached=True):
@ -867,10 +834,13 @@ class Ledger(metaclass=LedgerRegistry):
txo.received_tips = tips
return txos, blocked, outputs.offset, outputs.total
async def resolve(self, accounts, urls, **kwargs):
async def resolve(self, accounts, urls, new_sdk_server=None, **kwargs):
txos = []
urls_copy = list(urls)
resolve = partial(self.network.retriable_call, self.network.resolve)
if new_sdk_server:
resolve = partial(self.network.new_resolve, new_sdk_server)
else:
resolve = partial(self.network.retriable_call, self.network.resolve)
while urls_copy:
batch, urls_copy = urls_copy[:100], urls_copy[100:]
txos.extend(
@ -895,31 +865,21 @@ class Ledger(metaclass=LedgerRegistry):
return await self.network.sum_supports(new_sdk_server, **kwargs)
async def claim_search(
self, accounts,
include_purchase_receipt=False,
include_is_my_output=False,
**kwargs) -> Tuple[List[Output], dict, int, int]:
self, accounts, include_purchase_receipt=False, include_is_my_output=False,
new_sdk_server=None, **kwargs) -> Tuple[List[Output], dict, int, int]:
if new_sdk_server:
claim_search = partial(self.network.new_claim_search, new_sdk_server)
else:
claim_search = self.network.claim_search
return await self._inflate_outputs(
self.network.claim_search(**kwargs), accounts,
include_purchase_receipt=include_purchase_receipt,
include_is_my_output=include_is_my_output
)
# async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
# return await self.network.get_claim_by_id(claim_id)
async def get_claim_by_claim_id(self, claim_id, accounts=None, include_purchase_receipt=False,
include_is_my_output=False):
accounts = accounts or []
# return await self.network.get_claim_by_id(claim_id)
inflated = await self._inflate_outputs(
self.network.get_claim_by_id(claim_id), accounts,
claim_search(**kwargs), accounts,
include_purchase_receipt=include_purchase_receipt,
include_is_my_output=include_is_my_output,
)
txos = inflated[0]
if txos:
return txos[0]
async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
for claim in (await self.claim_search(accounts, claim_id=claim_id, **kwargs))[0]:
return claim
async def _report_state(self):
try:
@ -938,7 +898,9 @@ class Ledger(metaclass=LedgerRegistry):
"%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ",
account.id, balance, total_receiving, account.receiving.gap, total_change,
account.change.gap, channel_count, len(account.channel_keys), claim_count)
except Exception:
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception(
'Failed to display wallet state, please file issue '
'for this bug along with the traceback you see below:')
@ -961,7 +923,9 @@ class Ledger(metaclass=LedgerRegistry):
claim_ids = [p.purchased_claim_id for p in purchases]
try:
resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception:
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up purchased claim ids:")
resolved = []
lookup = {claim.claim_id: claim for claim in resolved}
@ -1041,7 +1005,9 @@ class Ledger(metaclass=LedgerRegistry):
claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset]
try:
resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception:
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up collection claim ids:")
return []
claims = []
@ -1056,10 +1022,8 @@ class Ledger(metaclass=LedgerRegistry):
claims.append(None)
return claims
async def get_collections(self, resolve_claims=0, resolve=False, **constraints):
async def get_collections(self, resolve_claims=0, **constraints):
collections = await self.db.get_collections(**constraints)
if resolve:
collections = await self._resolve_for_local_results(constraints.get('accounts', []), collections)
if resolve_claims > 0:
for collection in collections:
collection.claims = await self.resolve_collection(collection, page_size=resolve_claims)
@ -1094,7 +1058,7 @@ class Ledger(metaclass=LedgerRegistry):
'abandon_info': [],
'purchase_info': []
}
is_my_inputs = all(txi.is_my_input for txi in tx.inputs)
is_my_inputs = all([txi.is_my_input for txi in tx.inputs])
if is_my_inputs:
# fees only matter if we are the ones paying them
item['value'] = dewies_to_lbc(tx.net_account_balance + tx.fee)
@ -1205,7 +1169,7 @@ class Ledger(metaclass=LedgerRegistry):
balance = self._balance_cache.get(account.id)
if not balance:
balance = self._balance_cache[account.id] = \
await account.get_detailed_balance(confirmations)
await account.get_detailed_balance(confirmations, reserved_subtotals=True)
for key, value in balance.items():
if key == 'reserved_subtotals':
for subkey, subvalue in value.items():
@ -1214,7 +1178,6 @@ class Ledger(metaclass=LedgerRegistry):
result[key] += value
return result
class TestNetLedger(Ledger):
network_name = 'testnet'
pubkey_address_prefix = bytes((111,))
@ -1223,7 +1186,6 @@ class TestNetLedger(Ledger):
extended_private_key_prefix = unhexlify('04358394')
checkpoints = {}
class RegTestLedger(Ledger):
network_name = 'regtest'
headers_class = UnvalidatedHeaders

View file

@ -3,21 +3,20 @@ import json
import typing
import logging
import asyncio
from binascii import unhexlify
from decimal import Decimal
from typing import List, Type, MutableSequence, MutableMapping, Optional
from lbry.error import KeyFeeAboveMaxAllowedError, WalletNotLoadedError
from lbry.conf import Config, NOT_SET
from lbry.error import KeyFeeAboveMaxAllowedError
from lbry.conf import Config
from lbry.wallet.dewies import dewies_to_lbc
from lbry.wallet.account import Account
from lbry.wallet.ledger import Ledger, LedgerRegistry
from lbry.wallet.transaction import Transaction, Output
from lbry.wallet.database import Database
from lbry.wallet.wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
from lbry.wallet.rpc.jsonrpc import CodeMessageError
from .dewies import dewies_to_lbc
from .account import Account
from .ledger import Ledger, LedgerRegistry
from .transaction import Transaction, Output
from .database import Database
from .wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
from .rpc.jsonrpc import CodeMessageError
if typing.TYPE_CHECKING:
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
@ -96,7 +95,7 @@ class WalletManager:
for wallet in self.wallets:
if wallet.id == wallet_id:
return wallet
raise WalletNotLoadedError(wallet_id)
raise ValueError(f"Couldn't find wallet: {wallet_id}.")
@staticmethod
def get_balance(wallet):
@ -183,17 +182,10 @@ class WalletManager:
ledger_config = {
'auto_connect': True,
'explicit_servers': [],
'hub_timeout': config.hub_timeout,
'default_servers': config.lbryum_servers,
'known_hubs': config.known_hubs,
'jurisdiction': config.jurisdiction,
'concurrent_hub_requests': config.concurrent_hub_requests,
'data_path': config.wallet_dir,
'tx_cache_size': config.transaction_cache_size
}
if 'LBRY_FEE_PER_NAME_CHAR' in os.environ:
ledger_config['fee_per_name_char'] = int(os.environ.get('LBRY_FEE_PER_NAME_CHAR'))
wallets_directory = os.path.join(config.wallet_dir, 'wallets')
if not os.path.exists(wallets_directory):
@ -203,10 +195,6 @@ class WalletManager:
os.path.join(wallets_directory, 'default_wallet')
)
if Config.lbryum_servers.is_set_to_default(config):
with config.update_config() as c:
c.lbryum_servers = NOT_SET
manager = cls.from_config({
'ledgers': {ledger_id: ledger_config},
'wallets': [
@ -237,16 +225,9 @@ class WalletManager:
async def reset(self):
self.ledger.config = {
'auto_connect': True,
'explicit_servers': [],
'default_servers': Config.lbryum_servers.default,
'known_hubs': self.config.known_hubs,
'jurisdiction': self.config.jurisdiction,
'hub_timeout': self.config.hub_timeout,
'concurrent_hub_requests': self.config.concurrent_hub_requests,
'default_servers': self.config.lbryum_servers,
'data_path': self.config.wallet_dir,
}
if Config.lbryum_servers.is_set(self.config):
self.ledger.config['explicit_servers'] = self.config.lbryum_servers
await self.ledger.stop()
await self.ledger.start()
@ -317,4 +298,10 @@ class WalletManager:
)
async def broadcast_or_release(self, tx, blocking=False):
await self.ledger.broadcast_or_release(tx, blocking=blocking)
try:
await self.ledger.broadcast(tx)
except:
await self.ledger.release_tx(tx)
raise
if blocking:
await self.ledger.wait(tx, timeout=None)

View file

@ -2,7 +2,6 @@ import logging
import asyncio
import json
import socket
import random
from time import perf_counter
from collections import defaultdict
from typing import Dict, Optional, Tuple
@ -13,14 +12,13 @@ from lbry.utils import resolve_host
from lbry.error import IncompatibleWalletServerError
from lbry.wallet.rpc import RPCSession as BaseClientSession, Connector, RPCError, ProtocolError
from lbry.wallet.stream import StreamController
from lbry.wallet.udp import SPVStatusClientProtocol, SPVPong
from lbry.conf import KnownHubsList
from lbry.wallet.server.udp import SPVStatusClientProtocol, SPVPong
log = logging.getLogger(__name__)
class ClientSession(BaseClientSession):
def __init__(self, *args, network: 'Network', server, timeout=30, concurrency=32, **kwargs):
def __init__(self, *args, network: 'Network', server, timeout=30, **kwargs):
self.network = network
self.server = server
super().__init__(*args, **kwargs)
@ -30,11 +28,7 @@ class ClientSession(BaseClientSession):
self.response_time: Optional[float] = None
self.connection_latency: Optional[float] = None
self._response_samples = 0
self._concurrency = asyncio.Semaphore(concurrency)
@property
def concurrency(self):
return self._concurrency._value
self.pending_amount = 0
@property
def available(self):
@ -60,9 +54,9 @@ class ClientSession(BaseClientSession):
return result
async def send_request(self, method, args=()):
self.pending_amount += 1
log.debug("send %s%s to %s:%i (%i timeout)", method, tuple(args), self.server[0], self.server[1], self.timeout)
try:
await self._concurrency.acquire()
if method == 'server.version':
return await self.send_timed_server_version_request(args, self.timeout)
request = asyncio.ensure_future(super().send_request(method, args))
@ -96,7 +90,7 @@ class ClientSession(BaseClientSession):
# self.synchronous_close()
raise
finally:
self._concurrency.release()
self.pending_amount -= 1
async def ensure_server_version(self, required=None, timeout=3):
required = required or self.network.PROTOCOL_VERSION
@ -117,9 +111,9 @@ class ClientSession(BaseClientSession):
)
else:
await asyncio.sleep(max(0, max_idle - (now - self.last_send)))
except (Exception, asyncio.CancelledError) as err:
except Exception as err:
if isinstance(err, asyncio.CancelledError):
log.info("closing connection to %s:%i", *self.server)
log.warning("closing connection to %s:%i", *self.server)
else:
log.exception("lost connection to spv")
finally:
@ -137,7 +131,7 @@ class ClientSession(BaseClientSession):
controller.add(request.args)
def connection_lost(self, exc):
log.debug("Connection lost: %s:%d", *self.server)
log.warning("Connection lost: %s:%d", *self.server)
super().connection_lost(exc)
self.response_time = None
self.connection_latency = None
@ -159,6 +153,7 @@ class Network:
# self._switch_task: Optional[asyncio.Task] = None
self.running = False
self.remote_height: int = 0
self._concurrency = asyncio.Semaphore(16)
self._on_connected_controller = StreamController()
self.on_connected = self._on_connected_controller.stream
@ -169,13 +164,9 @@ class Network:
self._on_status_controller = StreamController(merge_repeated_events=True)
self.on_status = self._on_status_controller.stream
self._on_hub_controller = StreamController(merge_repeated_events=True)
self.on_hub = self._on_hub_controller.stream
self.subscription_controllers = {
'blockchain.headers.subscribe': self._on_header_controller,
'blockchain.address.subscribe': self._on_status_controller,
'blockchain.peers.subscribe': self._on_hub_controller,
}
self.aiohttp_session: Optional[aiohttp.ClientSession] = None
@ -187,16 +178,6 @@ class Network:
def config(self):
return self.ledger.config
@property
def known_hubs(self):
if 'known_hubs' not in self.config:
return KnownHubsList()
return self.config['known_hubs']
@property
def jurisdiction(self):
return self.config.get("jurisdiction")
def disconnect(self):
if self._keepalive_task and not self._keepalive_task.done():
self._keepalive_task.cancel()
@ -207,14 +188,13 @@ class Network:
self.running = True
self.aiohttp_session = aiohttp.ClientSession()
self.on_header.listen(self._update_remote_height)
self.on_hub.listen(self._update_hubs)
self._loop_task = asyncio.create_task(self.network_loop())
self._urgent_need_reconnect.set()
def loop_task_done_callback(f):
try:
f.result()
except (Exception, asyncio.CancelledError):
except Exception:
if self.running:
log.exception("wallet server connection loop crashed")
@ -235,25 +215,18 @@ class Network:
log.exception("error looking up dns for spv server %s:%i", server, port)
# accumulate the dns results
if self.config.get('explicit_servers', []):
hubs = self.config['explicit_servers']
elif self.known_hubs:
hubs = self.known_hubs
else:
hubs = self.config['default_servers']
await asyncio.gather(*(resolve_spv(server, port) for (server, port) in hubs))
await asyncio.gather(*(resolve_spv(server, port) for (server, port) in self.config['default_servers']))
return hostname_to_ip, ip_to_hostnames
async def get_n_fastest_spvs(self, timeout=3.0) -> Dict[Tuple[str, int], Optional[SPVPong]]:
async def get_n_fastest_spvs(self, n=5, timeout=3.0) -> Dict[Tuple[str, int], SPVPong]:
loop = asyncio.get_event_loop()
pong_responses = asyncio.Queue()
connection = SPVStatusClientProtocol(pong_responses)
sent_ping_timestamps = {}
_, ip_to_hostnames = await self.resolve_spv_dns()
n = len(ip_to_hostnames)
log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config.get('explicit_servers', [])))
log.info("%i possible spv servers to try (%i urls in config)", len(ip_to_hostnames),
len(self.config['default_servers']))
pongs = {}
known_hubs = self.known_hubs
try:
await loop.create_datagram_endpoint(lambda: connection, ('0.0.0.0', 0))
# could raise OSError if it cant bind
@ -268,39 +241,28 @@ class Network:
'/'.join(ip_to_hostnames[remote]), remote[1], round(latency * 1000, 2),
pong.available, pong.height)
known_hubs.hubs.setdefault((ip_to_hostnames[remote][0], remote[1]), {}).update(
{"country": pong.country_name}
)
if pong.available:
pongs[(ip_to_hostnames[remote][0], remote[1])] = pong
pongs[remote] = pong
return pongs
except asyncio.TimeoutError:
if pongs:
log.info("%i/%i probed spv servers are accepting connections", len(pongs), len(ip_to_hostnames))
return pongs
else:
log.warning("%i spv status probes failed, retrying later. servers tried: %s",
len(sent_ping_timestamps),
', '.join('/'.join(hosts) + f' ({ip})' for ip, hosts in ip_to_hostnames.items()))
random_server = random.choice(list(ip_to_hostnames.keys()))
host, port = random_server
log.warning("trying fallback to randomly selected spv: %s:%i", host, port)
known_hubs.hubs.setdefault((host, port), {})
return {(host, port): None}
return pongs
finally:
connection.close()
async def connect_to_fastest(self) -> Optional[ClientSession]:
fastest_spvs = await self.get_n_fastest_spvs()
for (host, port), pong in fastest_spvs.items():
if (pong is not None and self.jurisdiction is not None) and \
(pong.country_name != self.jurisdiction):
continue
client = ClientSession(network=self, server=(host, port), timeout=self.config.get('hub_timeout', 30),
concurrency=self.config.get('concurrent_hub_requests', 30))
for (host, port) in fastest_spvs:
client = ClientSession(network=self, server=(host, port))
try:
await client.create_connection()
log.info("Connected to spv server %s:%i", host, port)
log.warning("Connected to spv server %s:%i", host, port)
await client.ensure_server_version()
return client
except (asyncio.TimeoutError, ConnectionError, OSError, IncompatibleWalletServerError, RPCError):
@ -312,8 +274,7 @@ class Network:
sleep_delay = 30
while self.running:
await asyncio.wait(
map(asyncio.create_task, [asyncio.sleep(30), self._urgent_need_reconnect.wait()]),
return_when=asyncio.FIRST_COMPLETED
[asyncio.sleep(30), self._urgent_need_reconnect.wait()], return_when=asyncio.FIRST_COMPLETED
)
if self._urgent_need_reconnect.is_set():
sleep_delay = 30
@ -328,8 +289,6 @@ class Network:
log.debug("get spv server features %s:%i", *client.server)
features = await client.send_request('server.features', [])
self.client, self.server_features = client, features
log.debug("discover other hubs %s:%i", *client.server)
await self._update_hubs(await client.send_request('server.peers.subscribe', []))
log.info("subscribe to headers %s:%i", *client.server)
self._update_remote_height((await self.subscribe_headers(),))
self._on_connected_controller.add(True)
@ -337,15 +296,13 @@ class Network:
log.info("maintaining connection to spv server %s", server_str)
self._keepalive_task = asyncio.create_task(self.client.keepalive_loop())
try:
if not self._urgent_need_reconnect.is_set():
await asyncio.wait(
[self._keepalive_task, asyncio.create_task(self._urgent_need_reconnect.wait())],
return_when=asyncio.FIRST_COMPLETED
)
else:
await self._keepalive_task
await asyncio.wait(
[self._keepalive_task, self._urgent_need_reconnect.wait()],
return_when=asyncio.FIRST_COMPLETED
)
if self._urgent_need_reconnect.is_set():
log.warning("urgent reconnect needed")
self._urgent_need_reconnect.clear()
if self._keepalive_task and not self._keepalive_task.done():
self._keepalive_task.cancel()
except asyncio.CancelledError:
@ -354,7 +311,7 @@ class Network:
self._keepalive_task = None
self.client = None
self.server_features = None
log.info("connection lost to %s", server_str)
log.warning("connection lost to %s", server_str)
log.info("network loop finished")
async def stop(self):
@ -380,30 +337,24 @@ class Network:
raise ConnectionError("Attempting to send rpc request when connection is not available.")
async def retriable_call(self, function, *args, **kwargs):
while self.running:
if not self.is_connected:
log.warning("Wallet server unavailable, waiting for it to come back and retry.")
self._urgent_need_reconnect.set()
await self.on_connected.first
try:
return await function(*args, **kwargs)
except asyncio.TimeoutError:
log.warning("Wallet server call timed out, retrying.")
except ConnectionError:
log.warning("connection error")
async with self._concurrency:
while self.running:
if not self.is_connected:
log.warning("Wallet server unavailable, waiting for it to come back and retry.")
self._urgent_need_reconnect.set()
await self.on_connected.first
try:
return await function(*args, **kwargs)
except asyncio.TimeoutError:
log.warning("Wallet server call timed out, retrying.")
except ConnectionError:
log.warning("connection error")
raise asyncio.CancelledError() # if we got here, we are shutting down
def _update_remote_height(self, header_args):
self.remote_height = header_args[0]["height"]
async def _update_hubs(self, hubs):
if hubs and hubs != ['']:
try:
if self.known_hubs.add_hubs(hubs):
self.known_hubs.save()
except Exception:
log.exception("could not add hubs: %s", hubs)
def get_transaction(self, tx_hash, known_height=None):
# use any server if its old, otherwise restrict to who gave us the history
restricted = known_height in (None, -1, 0) or 0 > known_height > self.remote_height - 10
@ -461,11 +412,8 @@ class Network:
def get_server_features(self):
return self.rpc('server.features', (), restricted=True)
# def get_claims_by_ids(self, claim_ids):
# return self.rpc('blockchain.claimtrie.getclaimsbyids', claim_ids)
def get_claim_by_id(self, claim_id):
return self.rpc('blockchain.claimtrie.getclaimbyid', [claim_id])
def get_claims_by_ids(self, claim_ids):
return self.rpc('blockchain.claimtrie.getclaimsbyids', claim_ids)
def resolve(self, urls, session_override=None):
return self.rpc('blockchain.claimtrie.resolve', urls, False, session_override)
@ -473,6 +421,19 @@ class Network:
def claim_search(self, session_override=None, **kwargs):
return self.rpc('blockchain.claimtrie.search', kwargs, False, session_override)
async def new_resolve(self, server, urls):
message = {"method": "resolve", "params": {"urls": urls, "protobuf": True}}
async with self.aiohttp_session.post(server, json=message) as r:
result = await r.json()
return result['result']
async def new_claim_search(self, server, **kwargs):
kwargs['protobuf'] = True
message = {"method": "claim_search", "params": kwargs}
async with self.aiohttp_session.post(server, json=message) as r:
result = await r.json()
return result['result']
async def sum_supports(self, server, **kwargs):
message = {"method": "support_sum", "params": kwargs}
async with self.aiohttp_session.post(server, json=message) as r:

View file

@ -1,2 +1,2 @@
from lbry.wallet.orchstr8.node import Conductor
from lbry.wallet.orchstr8.service import ConductorService
from .node import Conductor
from .service import ConductorService

View file

@ -5,9 +5,7 @@ import aiohttp
from lbry import wallet
from lbry.wallet.orchstr8.node import (
Conductor,
get_lbcd_node_from_ledger,
get_lbcwallet_node_from_ledger
Conductor, get_blockchain_node_from_ledger
)
from lbry.wallet.orchstr8.service import ConductorService
@ -18,11 +16,10 @@ def get_argument_parser():
)
subparsers = parser.add_subparsers(dest='command', help='sub-command help')
subparsers.add_parser("download", help="Download lbcd and lbcwallet node binaries.")
subparsers.add_parser("download", help="Download blockchain node binary.")
start = subparsers.add_parser("start", help="Start orchstr8 service.")
start.add_argument("--lbcd", help="Hostname to start lbcd node.")
start.add_argument("--lbcwallet", help="Hostname to start lbcwallet node.")
start.add_argument("--blockchain", help="Hostname to start blockchain node.")
start.add_argument("--spv", help="Hostname to start SPV server.")
start.add_argument("--wallet", help="Hostname to start wallet daemon.")
@ -50,8 +47,7 @@ def main():
if command == 'download':
logging.getLogger('blockchain').setLevel(logging.INFO)
get_lbcd_node_from_ledger(wallet).ensure()
get_lbcwallet_node_from_ledger(wallet).ensure()
get_blockchain_node_from_ledger(wallet).ensure()
elif command == 'generate':
loop.run_until_complete(run_remote_command(
@ -61,12 +57,9 @@ def main():
elif command == 'start':
conductor = Conductor()
if getattr(args, 'lbcd', False):
conductor.lbcd_node.hostname = args.lbcd
loop.run_until_complete(conductor.start_lbcd())
if getattr(args, 'lbcwallet', False):
conductor.lbcwallet_node.hostname = args.lbcwallet
loop.run_until_complete(conductor.start_lbcwallet())
if getattr(args, 'blockchain', False):
conductor.blockchain_node.hostname = args.blockchain
loop.run_until_complete(conductor.start_blockchain())
if getattr(args, 'spv', False):
conductor.spv_node.hostname = args.spv
loop.run_until_complete(conductor.start_spv())

View file

@ -1,4 +1,3 @@
# pylint: disable=import-error
import os
import json
import shutil
@ -8,44 +7,31 @@ import tarfile
import logging
import tempfile
import subprocess
import platform
import importlib
from binascii import hexlify
from typing import Type, Optional
import urllib.request
from uuid import uuid4
import lbry
from lbry.wallet.server.server import Server
from lbry.wallet.server.env import Env
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
from lbry.conf import KnownHubsList, Config
log = logging.getLogger(__name__)
try:
from hub.herald.env import ServerEnv
from hub.scribe.env import BlockchainEnv
from hub.elastic_sync.env import ElasticEnv
from hub.herald.service import HubServerService
from hub.elastic_sync.service import ElasticSyncService
from hub.scribe.service import BlockchainProcessorService
except ImportError:
pass
def get_spvserver_from_ledger(ledger_module):
spvserver_path, regtest_class_name = ledger_module.__spvserver__.rsplit('.', 1)
spvserver_module = importlib.import_module(spvserver_path)
return getattr(spvserver_module, regtest_class_name)
def get_lbcd_node_from_ledger(ledger_module):
return LBCDNode(
ledger_module.__lbcd_url__,
ledger_module.__lbcd__,
ledger_module.__lbcctl__
)
def get_lbcwallet_node_from_ledger(ledger_module):
return LBCWalletNode(
ledger_module.__lbcwallet_url__,
ledger_module.__lbcwallet__,
ledger_module.__lbcctl__
def get_blockchain_node_from_ledger(ledger_module):
return BlockchainNode(
ledger_module.__node_url__,
os.path.join(ledger_module.__node_bin__, ledger_module.__node_daemon__),
os.path.join(ledger_module.__node_bin__, ledger_module.__node_cli__)
)
@ -53,37 +39,40 @@ class Conductor:
def __init__(self, seed=None):
self.manager_module = WalletManager
self.lbcd_node = get_lbcd_node_from_ledger(lbry.wallet)
self.lbcwallet_node = get_lbcwallet_node_from_ledger(lbry.wallet)
self.spv_node = SPVNode()
self.spv_module = get_spvserver_from_ledger(lbry.wallet)
self.blockchain_node = get_blockchain_node_from_ledger(lbry.wallet)
self.spv_node = SPVNode(self.spv_module)
self.wallet_node = WalletNode(
self.manager_module, RegTestLedger, default_seed=seed
)
self.lbcd_started = False
self.lbcwallet_started = False
self.blockchain_started = False
self.spv_started = False
self.wallet_started = False
self.log = log.getChild('conductor')
async def start_lbcd(self):
if not self.lbcd_started:
await self.lbcd_node.start()
self.lbcd_started = True
async def start_blockchain(self):
if not self.blockchain_started:
asyncio.create_task(self.blockchain_node.start())
await self.blockchain_node.running.wait()
await self.blockchain_node.generate(200)
self.blockchain_started = True
async def stop_lbcd(self, cleanup=True):
if self.lbcd_started:
await self.lbcd_node.stop(cleanup)
self.lbcd_started = False
async def stop_blockchain(self):
if self.blockchain_started:
await self.blockchain_node.stop(cleanup=True)
self.blockchain_started = False
async def start_spv(self):
if not self.spv_started:
await self.spv_node.start(self.lbcwallet_node)
await self.spv_node.start(self.blockchain_node)
self.spv_started = True
async def stop_spv(self, cleanup=True):
async def stop_spv(self):
if self.spv_started:
await self.spv_node.stop(cleanup)
await self.spv_node.stop(cleanup=True)
self.spv_started = False
async def start_wallet(self):
@ -91,30 +80,13 @@ class Conductor:
await self.wallet_node.start(self.spv_node)
self.wallet_started = True
async def stop_wallet(self, cleanup=True):
async def stop_wallet(self):
if self.wallet_started:
await self.wallet_node.stop(cleanup)
await self.wallet_node.stop(cleanup=True)
self.wallet_started = False
async def start_lbcwallet(self, clean=True):
if not self.lbcwallet_started:
await self.lbcwallet_node.start()
if clean:
mining_addr = await self.lbcwallet_node.get_new_address()
self.lbcwallet_node.mining_addr = mining_addr
await self.lbcwallet_node.generate(200)
# unlock the wallet for the next 1 hour
await self.lbcwallet_node.wallet_passphrase("password", 3600)
self.lbcwallet_started = True
async def stop_lbcwallet(self, cleanup=True):
if self.lbcwallet_started:
await self.lbcwallet_node.stop(cleanup)
self.lbcwallet_started = False
async def start(self):
await self.start_lbcd()
await self.start_lbcwallet()
await self.start_blockchain()
await self.start_spv()
await self.start_wallet()
@ -122,8 +94,7 @@ class Conductor:
all_the_stops = [
self.stop_wallet,
self.stop_spv,
self.stop_lbcwallet,
self.stop_lbcd
self.stop_blockchain
]
for stop in all_the_stops:
try:
@ -131,18 +102,11 @@ class Conductor:
except Exception as e:
log.exception('Exception raised while stopping services:', exc_info=e)
async def clear_mempool(self):
await self.stop_lbcwallet(cleanup=False)
await self.stop_lbcd(cleanup=False)
await self.start_lbcd()
await self.start_lbcwallet(clean=False)
class WalletNode:
def __init__(self, manager_class: Type[WalletManager], ledger_class: Type[Ledger],
verbose: bool = False, port: int = 5280, default_seed: str = None,
data_path: str = None) -> None:
verbose: bool = False, port: int = 5280, default_seed: str = None) -> None:
self.manager_class = manager_class
self.ledger_class = ledger_class
self.verbose = verbose
@ -150,34 +114,27 @@ class WalletNode:
self.ledger: Optional[Ledger] = None
self.wallet: Optional[Wallet] = None
self.account: Optional[Account] = None
self.data_path: str = data_path or tempfile.mkdtemp()
self.data_path: Optional[str] = None
self.port = port
self.default_seed = default_seed
self.known_hubs = KnownHubsList()
async def start(self, spv_node: 'SPVNode', seed=None, connect=True, config=None):
async def start(self, spv_node: 'SPVNode', seed=None, connect=True):
self.data_path = tempfile.mkdtemp()
wallets_dir = os.path.join(self.data_path, 'wallets')
os.mkdir(wallets_dir)
wallet_file_name = os.path.join(wallets_dir, 'my_wallet.json')
if not os.path.isdir(wallets_dir):
os.mkdir(wallets_dir)
with open(wallet_file_name, 'w') as wallet_file:
wallet_file.write('{"version": 1, "accounts": []}\n')
with open(wallet_file_name, 'w') as wallet_file:
wallet_file.write('{"version": 1, "accounts": []}\n')
self.manager = self.manager_class.from_config({
'ledgers': {
self.ledger_class.get_id(): {
'api_port': self.port,
'explicit_servers': [(spv_node.hostname, spv_node.port)],
'default_servers': Config.lbryum_servers.default,
'data_path': self.data_path,
'known_hubs': config.known_hubs if config else KnownHubsList(),
'hub_timeout': 30,
'concurrent_hub_requests': 32,
'fee_per_name_char': 200000
'default_servers': [(spv_node.hostname, spv_node.port)],
'data_path': self.data_path
}
},
'wallets': [wallet_file_name]
})
self.manager.config = config
self.ledger = self.manager.ledgers[self.ledger_class]
self.wallet = self.manager.default_wallet
if not self.wallet:
@ -203,83 +160,44 @@ class WalletNode:
class SPVNode:
def __init__(self, node_number=1):
self.node_number = node_number
def __init__(self, coin_class, node_number=1):
self.coin_class = coin_class
self.controller = None
self.data_path = None
self.server: Optional[HubServerService] = None
self.writer: Optional[BlockchainProcessorService] = None
self.es_writer: Optional[ElasticSyncService] = None
self.server = None
self.hostname = 'localhost'
self.port = 50001 + node_number # avoid conflict with default daemon
self.udp_port = self.port
self.elastic_notifier_port = 19080 + node_number
self.elastic_services = f'localhost:9200/localhost:{self.elastic_notifier_port}'
self.session_timeout = 600
self.stopped = True
self.index_name = uuid4().hex
self.rpc_port = '0' # disabled by default
async def start(self, lbcwallet_node: 'LBCWalletNode', extraconf=None):
if not self.stopped:
log.warning("spv node is already running")
return
self.stopped = False
try:
self.data_path = tempfile.mkdtemp()
conf = {
'description': '',
'payment_address': '',
'daily_fee': '0',
'db_dir': self.data_path,
'daemon_url': lbcwallet_node.rpc_url,
'reorg_limit': 100,
'host': self.hostname,
'tcp_port': self.port,
'udp_port': self.udp_port,
'elastic_services': self.elastic_services,
'session_timeout': self.session_timeout,
'max_query_workers': 0,
'es_index_prefix': self.index_name,
'chain': 'regtest',
'index_address_status': False
}
if extraconf:
conf.update(extraconf)
self.writer = BlockchainProcessorService(
BlockchainEnv(db_dir=self.data_path, daemon_url=lbcwallet_node.rpc_url,
reorg_limit=100, max_query_workers=0, chain='regtest', index_address_status=False)
)
self.server = HubServerService(ServerEnv(**conf))
self.es_writer = ElasticSyncService(
ElasticEnv(
db_dir=self.data_path, reorg_limit=100, max_query_workers=0, chain='regtest',
elastic_notifier_port=self.elastic_notifier_port,
es_index_prefix=self.index_name,
filtering_channel_ids=(extraconf or {}).get('filtering_channel_ids'),
blocking_channel_ids=(extraconf or {}).get('blocking_channel_ids')
)
)
await self.writer.start()
await self.es_writer.start()
await self.server.start()
except Exception as e:
self.stopped = True
log.exception("failed to start spv node")
raise e
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None):
self.data_path = tempfile.mkdtemp()
conf = {
'DESCRIPTION': '',
'PAYMENT_ADDRESS': '',
'DAILY_FEE': '0',
'DB_DIRECTORY': self.data_path,
'DAEMON_URL': blockchain_node.rpc_url,
'REORG_LIMIT': '100',
'HOST': self.hostname,
'TCP_PORT': str(self.port),
'SESSION_TIMEOUT': str(self.session_timeout),
'MAX_QUERY_WORKERS': '0',
'INDIVIDUAL_TAG_INDEXES': '',
'RPC_PORT': self.rpc_port
}
if extraconf:
conf.update(extraconf)
# TODO: don't use os.environ
os.environ.update(conf)
self.server = Server(Env(self.coin_class))
self.server.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5
await self.server.start()
async def stop(self, cleanup=True):
if self.stopped:
log.warning("spv node is already stopped")
return
try:
await self.server.stop()
await self.es_writer.delete_index()
await self.es_writer.stop()
await self.writer.stop()
self.stopped = True
except Exception as e:
log.exception("failed to stop spv node")
raise e
finally:
cleanup and self.cleanup()
@ -287,19 +205,18 @@ class SPVNode:
shutil.rmtree(self.data_path, ignore_errors=True)
class LBCDProcess(asyncio.SubprocessProtocol):
class BlockchainProcess(asyncio.SubprocessProtocol):
IGNORE_OUTPUT = [
b'keypool keep',
b'keypool reserve',
b'keypool return',
b'Block submitted',
]
def __init__(self):
self.ready = asyncio.Event()
self.stopped = asyncio.Event()
self.log = log.getChild('lbcd')
self.log = log.getChild('blockchain')
def pipe_data_received(self, fd, data):
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
@ -310,7 +227,7 @@ class LBCDProcess(asyncio.SubprocessProtocol):
if b'Error:' in data:
self.ready.set()
raise SystemError(data.decode())
if b'RPCS: RPC server listening on' in data:
if b'Done loading' in data:
self.ready.set()
def process_exited(self):
@ -318,57 +235,39 @@ class LBCDProcess(asyncio.SubprocessProtocol):
self.ready.set()
class WalletProcess(asyncio.SubprocessProtocol):
class BlockchainNode:
IGNORE_OUTPUT = [
]
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
BECH32_ADDRESS = "bech32"
def __init__(self):
self.ready = asyncio.Event()
self.stopped = asyncio.Event()
self.log = log.getChild('lbcwallet')
self.transport: Optional[asyncio.transports.SubprocessTransport] = None
def pipe_data_received(self, fd, data):
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
if b'Error:' in data:
self.log.error(data.decode())
else:
self.log.info(data.decode())
if b'Error:' in data:
self.ready.set()
raise SystemError(data.decode())
if b'WLLT: Finished rescan' in data:
self.ready.set()
def process_exited(self):
self.stopped.set()
self.ready.set()
class LBCDNode:
def __init__(self, url, daemon, cli):
self.latest_release_url = url
self.project_dir = os.path.dirname(os.path.dirname(__file__))
self.bin_dir = os.path.join(self.project_dir, 'bin')
self.daemon_bin = os.path.join(self.bin_dir, daemon)
self.cli_bin = os.path.join(self.bin_dir, cli)
self.log = log.getChild('lbcd')
self.data_path = tempfile.mkdtemp()
self.log = log.getChild('blockchain')
self.data_path = None
self.protocol = None
self.transport = None
self.block_expected = 0
self.hostname = 'localhost'
self.peerport = 29246
self.rpcport = 29245
self.peerport = 9246 + 2 # avoid conflict with default peer port
self.rpcport = 9245 + 2 # avoid conflict with default rpc port
self.rpcuser = 'rpcuser'
self.rpcpassword = 'rpcpassword'
self.stopped = True
self.stopped = False
self.restart_ready = asyncio.Event()
self.restart_ready.set()
self.running = asyncio.Event()
@property
def rpc_url(self):
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.rpcport}/'
def is_expected_block(self, e: BlockHeightEvent):
return self.block_expected == e.height
@property
def exists(self):
return (
@ -377,12 +276,6 @@ class LBCDNode:
)
def download(self):
uname = platform.uname()
target_os = str.lower(uname.system)
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
target_platform = target_os + '_' + target_arch
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
downloaded_file = os.path.join(
self.bin_dir,
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
@ -415,207 +308,73 @@ class LBCDNode:
def ensure(self):
return self.exists or self.download()
async def start(self):
if not self.stopped:
return
self.stopped = False
try:
assert self.ensure()
loop = asyncio.get_event_loop()
asyncio.get_child_watcher().attach_loop(loop)
command = [
self.daemon_bin,
'--notls',
f'--datadir={self.data_path}',
'--regtest', f'--listen=127.0.0.1:{self.peerport}', f'--rpclisten=127.0.0.1:{self.rpcport}',
'--txindex', f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}'
]
self.log.info(' '.join(command))
self.transport, self.protocol = await loop.subprocess_exec(
LBCDProcess, *command
)
await self.protocol.ready.wait()
assert not self.protocol.stopped.is_set()
self.running.set()
except asyncio.CancelledError:
self.running.clear()
self.stopped = True
raise
except Exception as e:
self.running.clear()
self.stopped = True
log.exception('failed to start lbcd', exc_info=e)
raise
async def stop(self, cleanup=True):
if self.stopped:
return
try:
if self.transport:
self.transport.terminate()
await self.protocol.stopped.wait()
self.transport.close()
except Exception as e:
log.exception('failed to stop lbcd', exc_info=e)
raise
finally:
self.log.info("Done shutting down " + self.daemon_bin)
self.stopped = True
if cleanup:
self.cleanup()
self.running.clear()
def cleanup(self):
assert self.stopped
shutil.rmtree(self.data_path, ignore_errors=True)
class LBCWalletNode:
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
BECH32_ADDRESS = "bech32"
def __init__(self, url, lbcwallet, cli):
self.latest_release_url = url
self.project_dir = os.path.dirname(os.path.dirname(__file__))
self.bin_dir = os.path.join(self.project_dir, 'bin')
self.lbcwallet_bin = os.path.join(self.bin_dir, lbcwallet)
self.cli_bin = os.path.join(self.bin_dir, cli)
self.log = log.getChild('lbcwallet')
self.protocol = None
self.transport = None
self.hostname = 'localhost'
self.lbcd_rpcport = 29245
self.lbcwallet_rpcport = 29244
self.rpcuser = 'rpcuser'
self.rpcpassword = 'rpcpassword'
self.data_path = tempfile.mkdtemp()
self.stopped = True
self.running = asyncio.Event()
self.block_expected = 0
self.mining_addr = ''
@property
def rpc_url(self):
# FIXME: somehow the hub/sdk doesn't learn the blocks through the Walet RPC port, why?
# return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcwallet_rpcport}/'
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcd_rpcport}/'
def is_expected_block(self, e: BlockHeightEvent):
return self.block_expected == e.height
@property
def exists(self):
return (
os.path.exists(self.lbcwallet_bin)
)
def download(self):
uname = platform.uname()
target_os = str.lower(uname.system)
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
target_platform = target_os + '_' + target_arch
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
downloaded_file = os.path.join(
self.bin_dir,
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
)
if not os.path.exists(self.bin_dir):
os.mkdir(self.bin_dir)
if not os.path.exists(downloaded_file):
self.log.info('Downloading: %s', self.latest_release_url)
with urllib.request.urlopen(self.latest_release_url) as response:
with open(downloaded_file, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
self.log.info('Extracting: %s', downloaded_file)
if downloaded_file.endswith('.zip'):
with zipfile.ZipFile(downloaded_file) as dotzip:
dotzip.extractall(self.bin_dir)
# zipfile bug https://bugs.python.org/issue15795
os.chmod(self.lbcwallet_bin, 0o755)
elif downloaded_file.endswith('.tar.gz'):
with tarfile.open(downloaded_file) as tar:
tar.extractall(self.bin_dir)
return self.exists
def ensure(self):
return self.exists or self.download()
async def start(self):
assert self.ensure()
self.data_path = tempfile.mkdtemp()
loop = asyncio.get_event_loop()
asyncio.get_child_watcher().attach_loop(loop)
command = [
self.lbcwallet_bin,
'--noservertls', '--noclienttls',
'--regtest',
f'--rpcconnect=127.0.0.1:{self.lbcd_rpcport}', f'--rpclisten=127.0.0.1:{self.lbcwallet_rpcport}',
'--createtemp', f'--appdata={self.data_path}',
f'--username={self.rpcuser}', f'--password={self.rpcpassword}'
self.daemon_bin,
f'-datadir={self.data_path}', '-printtoconsole', '-regtest', '-server', '-txindex',
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}',
f'-port={self.peerport}'
]
self.log.info(' '.join(command))
try:
self.transport, self.protocol = await loop.subprocess_exec(
WalletProcess, *command
)
self.protocol.transport = self.transport
await self.protocol.ready.wait()
assert not self.protocol.stopped.is_set()
self.running.set()
self.stopped = False
except asyncio.CancelledError:
self.running.clear()
raise
except Exception as e:
self.running.clear()
log.exception('failed to start lbcwallet', exc_info=e)
def cleanup(self):
assert self.stopped
shutil.rmtree(self.data_path, ignore_errors=True)
while not self.stopped:
if self.running.is_set():
await asyncio.sleep(1)
continue
await self.restart_ready.wait()
try:
self.transport, self.protocol = await loop.subprocess_exec(
BlockchainProcess, *command
)
await self.protocol.ready.wait()
assert not self.protocol.stopped.is_set()
self.running.set()
except asyncio.CancelledError:
self.running.clear()
raise
except Exception as e:
self.running.clear()
log.exception('failed to start lbrycrdd', exc_info=e)
async def stop(self, cleanup=True):
if self.stopped:
return
self.stopped = True
try:
self.transport.terminate()
await self.protocol.stopped.wait()
self.transport.close()
except Exception as e:
log.exception('failed to stop lbcwallet', exc_info=e)
raise
finally:
self.log.info("Done shutting down " + self.lbcwallet_bin)
self.stopped = True
if cleanup:
self.cleanup()
self.running.clear()
async def clear_mempool(self):
self.restart_ready.clear()
self.transport.terminate()
await self.protocol.stopped.wait()
self.transport.close()
self.running.clear()
os.remove(os.path.join(self.data_path, 'regtest', 'mempool.dat'))
self.restart_ready.set()
await self.running.wait()
def cleanup(self):
shutil.rmtree(self.data_path, ignore_errors=True)
async def _cli_cmnd(self, *args):
cmnd_args = [
self.cli_bin,
f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}', '--notls', '--regtest', '--wallet'
self.cli_bin, f'-datadir={self.data_path}', '-regtest',
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}'
] + list(args)
self.log.info(' '.join(cmnd_args))
loop = asyncio.get_event_loop()
asyncio.get_child_watcher().attach_loop(loop)
process = await asyncio.create_subprocess_exec(
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
out, err = await process.communicate()
out, _ = await process.communicate()
result = out.decode().strip()
err = err.decode().strip()
if len(result) <= 0 and err.startswith('-'):
raise Exception(err)
if err and 'creating a default config file' not in err:
log.warning(err)
self.log.info(result)
if result.startswith('error code'):
raise Exception(result)
@ -623,14 +382,7 @@ class LBCWalletNode:
def generate(self, blocks):
self.block_expected += blocks
return self._cli_cmnd('generatetoaddress', str(blocks), self.mining_addr)
def generate_to_address(self, blocks, addr):
self.block_expected += blocks
return self._cli_cmnd('generatetoaddress', str(blocks), addr)
def wallet_passphrase(self, passphrase, timeout):
return self._cli_cmnd('walletpassphrase', passphrase, str(timeout))
return self._cli_cmnd('generate', str(blocks))
def invalidate_block(self, blockhash):
return self._cli_cmnd('invalidateblock', blockhash)
@ -647,11 +399,11 @@ class LBCWalletNode:
def get_raw_change_address(self):
return self._cli_cmnd('getrawchangeaddress')
def get_new_address(self, address_type='legacy'):
def get_new_address(self, address_type):
return self._cli_cmnd('getnewaddress', "", address_type)
async def get_balance(self):
return await self._cli_cmnd('getbalance')
return float(await self._cli_cmnd('getbalance'))
def send_to_address(self, address, amount):
return self._cli_cmnd('sendtoaddress', address, str(amount))
@ -663,10 +415,7 @@ class LBCWalletNode:
return self._cli_cmnd('createrawtransaction', json.dumps(inputs), json.dumps(outputs))
async def sign_raw_transaction_with_wallet(self, tx):
# the "withwallet" portion should only come into play if we are doing segwit.
# and "withwallet" doesn't exist on lbcd yet.
result = await self._cli_cmnd('signrawtransaction', tx)
return json.loads(result)['hex'].encode()
return json.loads(await self._cli_cmnd('signrawtransactionwithwallet', tx))['hex'].encode()
def decode_raw_transaction(self, tx):
return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode())

View file

@ -61,10 +61,8 @@ class ConductorService:
#set_logging(
# self.stack.ledger_module, logging.DEBUG, WebSocketLogHandler(self.send_message)
#)
self.stack.lbcd_started or await self.stack.start_lbcd()
self.send_message({'type': 'service', 'name': 'lbcd', 'port': self.stack.lbcd_node.port})
self.stack.lbcwallet_started or await self.stack.start_lbcwallet()
self.send_message({'type': 'service', 'name': 'lbcwallet', 'port': self.stack.lbcwallet_node.port})
self.stack.blockchain_started or await self.stack.start_blockchain()
self.send_message({'type': 'service', 'name': 'blockchain', 'port': self.stack.blockchain_node.port})
self.stack.spv_started or await self.stack.start_spv()
self.send_message({'type': 'service', 'name': 'spv', 'port': self.stack.spv_node.port})
self.stack.wallet_started or await self.stack.start_wallet()
@ -76,7 +74,7 @@ class ConductorService:
async def generate(self, request):
data = await request.post()
blocks = data.get('blocks', 1)
await self.stack.lbcwallet_node.generate(int(blocks))
await self.stack.blockchain_node.generate(int(blocks))
return json_response({'blocks': blocks})
async def transfer(self, request):
@ -87,14 +85,11 @@ class ConductorService:
if not address:
raise ValueError("No address was provided.")
amount = data.get('amount', 1)
txid = await self.stack.blockchain_node.send_to_address(address, amount)
if self.stack.wallet_started:
watcher = self.stack.wallet_node.ledger.on_transaction.where(
lambda e: e.address == address # and e.tx.id == txid -- might stall; see send_to_address_and_wait
await self.stack.wallet_node.ledger.on_transaction.where(
lambda e: e.tx.id == txid and e.address == address
)
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
await watcher
else:
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
return json_response({
'address': address,
'amount': amount,
@ -103,7 +98,7 @@ class ConductorService:
async def balance(self, _):
return json_response({
'balance': await self.stack.lbcwallet_node.get_balance()
'balance': await self.stack.blockchain_node.get_balance()
})
async def log(self, request):
@ -134,7 +129,7 @@ class ConductorService:
'type': 'status',
'height': self.stack.wallet_node.ledger.headers.height,
'balance': satoshis_to_coins(await self.stack.wallet_node.account.get_balance()),
'miner': await self.stack.lbcwallet_node.get_balance()
'miner': await self.stack.blockchain_node.get_balance()
})
def send_message(self, msg):

View file

@ -108,6 +108,9 @@ class Response:
class CodeMessageError(Exception):
def __init__(self, code, message):
super().__init__(code, message)
@property
def code(self):
return self.args[0]

View file

@ -395,8 +395,8 @@ class RPCSession(SessionBase):
namespace=NAMESPACE, labelnames=("version",)
)
def __init__(self, *, framer=None, connection=None):
super().__init__(framer=framer)
def __init__(self, *, framer=None, loop=None, connection=None):
super().__init__(framer=framer, loop=loop)
self.connection = connection or self.default_connection()
self.client_version = 'unknown'
@ -436,8 +436,7 @@ class RPCSession(SessionBase):
except CancelledError:
raise
except Exception:
reqstr = str(request)
self.logger.exception(f'exception handling {reqstr[:16_000]}')
self.logger.exception(f'exception handling {request}')
result = RPCError(JSONRPC.INTERNAL_ERROR,
'internal server error')
if isinstance(request, Request):
@ -496,17 +495,6 @@ class RPCSession(SessionBase):
self.abort()
return False
async def send_notifications(self, notifications) -> bool:
"""Send an RPC notification over the network."""
message, _ = self.connection.send_batch(notifications)
try:
await self._send_message(message)
return True
except asyncio.TimeoutError:
self.logger.info("timeout sending address notification to %s", self.peer_address_str(for_log=True))
self.abort()
return False
def send_batch(self, raise_errors=False):
"""Return a BatchRequest. Intended to be used like so:

Some files were not shown because too many files have changed in this diff Show more