forked from LBRYCommunity/lbry-sdk
Compare commits
276 commits
master
...
fwss_fix_l
Author | SHA1 | Date | |
---|---|---|---|
|
151337fa36 | ||
|
7ebb9d06df | ||
|
b99de6b872 | ||
|
889464e51d | ||
|
35c3ff1e30 | ||
|
aa75b9bb25 | ||
|
f995ceae8b | ||
|
c1803434aa | ||
|
4dfbdcc2d7 | ||
|
272940b6d6 | ||
|
baf384d6e0 | ||
|
a012c04974 | ||
|
1fa117a104 | ||
|
33a157959d | ||
|
747eace4ab | ||
|
8fa2d746e7 | ||
|
a77b1f9997 | ||
|
c914f24089 | ||
|
e41a71a64e | ||
|
e59e238fd5 | ||
|
4c331d00e7 | ||
|
f56229bcce | ||
|
abf1247f61 | ||
|
4b230a97f9 | ||
|
bf7ac1562f | ||
|
2000d75c7a | ||
|
1259002b51 | ||
|
70d9f4cf79 | ||
|
23723f8041 | ||
|
51a0f7ddc8 | ||
|
1228700487 | ||
|
67c5c192f3 | ||
|
6b9cf5b48c | ||
|
0a91bd35c5 | ||
|
84639cfb2e | ||
|
0b245aab31 | ||
|
ee3db31541 | ||
|
8a3b960a85 | ||
|
2952609972 | ||
|
9ab8a7dd81 | ||
|
a7555932a9 | ||
|
9411b26fd3 | ||
|
85db7d3ce7 | ||
|
87f1d5b0ae | ||
|
64b8caeb5c | ||
|
3315175d1c | ||
|
a802d1f686 | ||
|
684e389283 | ||
|
895719a13d | ||
|
8f2cce7f61 | ||
|
9114a9794d | ||
|
a1f3254261 | ||
|
dbc0da2817 | ||
|
412ace1c6f | ||
|
8100efb48c | ||
|
8d164dfed3 | ||
|
ffea76cdd5 | ||
|
72ddb0c195 | ||
|
c57080711e | ||
|
96aea579ac | ||
|
e2aae23575 | ||
|
2ae700feb3 | ||
|
d1ac066c6d | ||
|
b61424979d | ||
|
ca10874006 | ||
|
a4680878c4 | ||
|
1c29ae7204 | ||
|
86069b10ca | ||
|
9c5e2a8c8d | ||
|
622a3b77ef | ||
|
0dff82c31c | ||
|
8e683c9cd0 | ||
|
69c45d43d3 | ||
|
fab7b5579c | ||
|
9e87394fca | ||
|
8c6633de17 | ||
|
3af71a2674 | ||
|
b792b134a2 | ||
|
f50196d395 | ||
|
248e04089b | ||
|
8fd92cb649 | ||
|
af4138ff51 | ||
|
462daf4dc4 | ||
|
e63151a370 | ||
|
09a2b2fa46 | ||
|
a3d91329fe | ||
|
7bf96fd637 | ||
|
5157b2535b | ||
|
0151ce8040 | ||
|
5328ed105e | ||
|
7f01b1cb84 | ||
|
862c51946a | ||
|
1790ee3018 | ||
|
7a4e5dcb05 | ||
|
24a88db595 | ||
|
915233c96c | ||
|
aa9365f218 | ||
|
15b8891fce | ||
|
f8a8a75ae9 | ||
|
d18ed6c19b | ||
|
4aa44d3b5a | ||
|
34a9dff141 | ||
|
7d9bf03574 | ||
|
3fe1981657 | ||
|
192c79c49c | ||
|
5883c9bc6c | ||
|
4b50d1e329 | ||
|
39d8a20fd5 | ||
|
9ccf00f56b | ||
|
46662b55c7 | ||
|
b45a222f98 | ||
|
434c1bc6b3 | ||
|
2495df8859 | ||
|
635aebfeeb | ||
|
81926a42f9 | ||
|
f2ff4410dc | ||
|
564018c937 | ||
|
4d1eafc0a4 | ||
|
211f8b2e59 | ||
|
9500be26fd | ||
|
017ef5b41a | ||
|
4b19861a74 | ||
|
5a0a987f0c | ||
|
4cb4659489 | ||
|
e64b108404 | ||
|
7870abaef4 | ||
|
71e14c8e63 | ||
|
b3b6361429 | ||
|
0d5441f3bf | ||
|
9757c69189 | ||
|
d8fb31aedd | ||
|
54a0bf9290 | ||
|
a3ef8d7411 | ||
|
4810ff5f94 | ||
|
2306edebf7 | ||
|
db5a33dc3f | ||
|
597bebb5be | ||
|
73ff1d3b3a | ||
|
9198877098 | ||
|
46da2584ca | ||
|
53b7d0a58b | ||
|
d1a243247d | ||
|
18dc5fbc9f | ||
|
410212c17a | ||
|
a39f87b3c5 | ||
|
147b9d5ad1 | ||
|
1f210c0b0b | ||
|
86df4bdd11 | ||
|
096f74d79b | ||
|
ae8bc59c65 | ||
|
01dbbb4c3a | ||
|
96d1926da4 | ||
|
2c10e71774 | ||
|
ea6be53071 | ||
|
cb5250f630 | ||
|
54e83daa59 | ||
|
df44d6ef56 | ||
|
aa4ef94e15 | ||
|
fad144cb96 | ||
|
1fe444bca2 | ||
|
55196ccb6b | ||
|
b3cb50aff0 | ||
|
ebf36f513c | ||
|
47d207ff77 | ||
|
d99e4221f2 | ||
|
56ff1342c4 | ||
|
a8c8614948 | ||
|
0e9184048c | ||
|
c01dceebcd | ||
|
e3cc6ea224 | ||
|
44bbd9578d | ||
|
7855f9c93f | ||
|
76e21f65df | ||
|
53c8876b5e | ||
|
2927875830 | ||
|
b9d954a394 | ||
|
a0fb3424aa | ||
|
269c0f714e | ||
|
99d2a3f42b | ||
|
a1aa578bc0 | ||
|
82062b5601 | ||
|
db1f984558 | ||
|
e66445b46e | ||
|
82b69109bd | ||
|
ec4e36446c | ||
|
3ef83febc0 | ||
|
ffecd02fbc | ||
|
ebddb1f0f5 | ||
|
9d0b9805b2 | ||
|
8653839c16 | ||
|
29b1f93699 | ||
|
97c285f22b | ||
|
c12e07de11 | ||
|
726bae97b0 | ||
|
5efb36ffd2 | ||
|
b4d6b14599 | ||
|
9a40381f5a | ||
|
47f6d542c5 | ||
|
8799caa0e4 | ||
|
a042377a7b | ||
|
596ed08395 | ||
|
2af29b892b | ||
|
7ffb169376 | ||
|
d11f4f9bed | ||
|
f2e844c476 | ||
|
83f7eab0e7 | ||
|
4a9f9906a0 | ||
|
b7ff6569e4 | ||
|
12915143b8 | ||
|
06d93e667a | ||
|
b341187b14 | ||
|
a996e65eff | ||
|
0886a7946e | ||
|
be6ebf0047 | ||
|
7c4f943bcb | ||
|
5b5c45ea76 | ||
|
6986211c1e | ||
|
8ac78990d8 | ||
|
4f879bbbae | ||
|
e3f080a7ad | ||
|
d232eeaf81 | ||
|
5d6388b366 | ||
|
955e44631d | ||
|
8dc5150dbe | ||
|
d488bfd9d4 | ||
|
391b95fd12 | ||
|
4b172e4180 | ||
|
29ef4425b0 | ||
|
558b1aeadf | ||
|
41ce3e4ad8 | ||
|
8c91777e5d | ||
|
db89607e4e | ||
|
d476f08d13 | ||
|
2a0c653c37 | ||
|
219c7cf37d | ||
|
2f575a393f | ||
|
713c665588 | ||
|
9554b66a37 | ||
|
533f31cc89 | ||
|
fef09c1773 | ||
|
6a33d86bfe | ||
|
ccd32eae70 | ||
|
c61c9726b0 | ||
|
fd5be69d55 | ||
|
1f72751a88 | ||
|
362ab67186 | ||
|
ffe7fcf124 | ||
|
1f5dbc3eb8 | ||
|
4dd85a169b | ||
|
fe547f1b0e | ||
|
ba154c799e | ||
|
e2ffd24d51 | ||
|
8545ab880b | ||
|
4e85f34353 | ||
|
777c6342f8 | ||
|
10c262a095 | ||
|
74e3471bd9 | ||
|
4048cfb3e8 | ||
|
8b7b284c0d | ||
|
2d4d51388b | ||
|
06b75d07dc | ||
|
87a44fd41c | ||
|
70693f4d1a | ||
|
42224dadb6 | ||
|
53fc94d688 | ||
|
0b6d01fecc | ||
|
ae9d4af8c0 | ||
|
2309d6354c | ||
|
fd2f9846e9 | ||
|
66666e1167 | ||
|
4c0fbb84d6 | ||
|
fd3448ffb8 | ||
|
6d93f97b51 | ||
|
2ee13ce39f | ||
|
bad4320ddf | ||
|
a220736dea |
250 changed files with 30336 additions and 37982 deletions
200
.github/workflows/main.yml
vendored
200
.github/workflows/main.yml
vendored
|
@ -1,5 +1,5 @@
|
|||
name: ci
|
||||
on: pull_request
|
||||
on: push
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
|
@ -10,45 +10,19 @@ jobs:
|
|||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
- run: make install tools
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- run: |
|
||||
pip install --user --upgrade pip wheel
|
||||
pip install -e .[lint]
|
||||
- run: make lint
|
||||
|
||||
tests-unit:
|
||||
name: "tests / unit"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
- run: make install tools
|
||||
- working-directory: lbry
|
||||
env:
|
||||
HOME: /tmp
|
||||
run: coverage run -p --source=lbry -m unittest discover -vv tests.unit
|
||||
|
||||
tests-integration:
|
||||
name: "tests / integration"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
test:
|
||||
- datanetwork
|
||||
- blockchain
|
||||
- other
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
- if: matrix.test == 'other'
|
||||
run: sudo apt install -y --no-install-recommends ffmpeg
|
||||
- run: pip install tox-travis
|
||||
- run: tox -e ${{ matrix.test }}
|
||||
|
||||
build:
|
||||
needs: ["lint", "tests-unit", "tests-integration"]
|
||||
name: "build"
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
|
@ -61,19 +35,163 @@ jobs:
|
|||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
- name: set pip cache dir
|
||||
id: pip-cache
|
||||
run: echo "::set-output name=dir::$(pip cache dir)"
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- run: |
|
||||
pip install --user --upgrade pip wheel
|
||||
pip install -e .[test]
|
||||
- env:
|
||||
HOME: /tmp
|
||||
run: coverage run -m unittest discover -v tests.unit
|
||||
- env:
|
||||
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COVERALLS_PARALLEL: true
|
||||
name: Submit to coveralls
|
||||
run: |
|
||||
pip install https://github.com/bboe/coveralls-python/archive/github_actions.zip
|
||||
coveralls
|
||||
|
||||
tests-integration:
|
||||
name: "tests / integration"
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
test:
|
||||
# - datanetwork
|
||||
- blockchain
|
||||
# - other
|
||||
db:
|
||||
- sqlite
|
||||
- postgres
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:12
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: postgres
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
- if: matrix.test == 'other'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --no-install-recommends ffmpeg
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ./.tox
|
||||
key: tox-integration-${{ matrix.test }}-${{ matrix.db }}-${{ hashFiles('setup.py') }}
|
||||
restore-keys: txo-integration-${{ matrix.test }}-${{ matrix.db }}-
|
||||
- run: pip install tox
|
||||
- env:
|
||||
TEST_DB: ${{ matrix.db }}
|
||||
run: tox -e ${{ matrix.test }}
|
||||
- env:
|
||||
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COVERALLS_PARALLEL: true
|
||||
name: Submit to coveralls
|
||||
run: |
|
||||
pip install https://github.com/bboe/coveralls-python/archive/github_actions.zip
|
||||
coverage combine tests
|
||||
coveralls
|
||||
|
||||
coveralls-finished:
|
||||
needs: ["tests-unit", "tests-integration"]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Coveralls Finished
|
||||
uses: coverallsapp/github-action@57daa114
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
parallel-finished: true
|
||||
|
||||
build:
|
||||
needs: ["lint", "tests-unit", "tests-integration"]
|
||||
name: "build"
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-16.04
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.7'
|
||||
- name: set pip cache dir
|
||||
id: pip-cache
|
||||
run: echo "::set-output name=dir::$(pip cache dir)"
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- name: Setup
|
||||
run: |
|
||||
pip install pyinstaller
|
||||
pip install --user --upgrade pip wheel
|
||||
pip install sqlalchemy@git+https://github.com/eukreign/pyinstaller.git@sqlalchemy
|
||||
- if: startsWith(runner.os, 'linux')
|
||||
run: |
|
||||
sudo apt-get install libzmq3-dev
|
||||
pip install -e .[postgres]
|
||||
- if: startsWith(runner.os, 'mac')
|
||||
run: |
|
||||
brew install zeromq
|
||||
pip install -e .
|
||||
- if: startsWith(matrix.os, 'windows') == false
|
||||
- if: startsWith(runner.os, 'linux') || startsWith(runner.os, 'mac')
|
||||
name: Build & Run (Unix)
|
||||
run: |
|
||||
pyinstaller --onefile --name lbrynet lbry/extras/cli.py
|
||||
pyinstaller --onefile --name lbrynet lbry/cli.py
|
||||
chmod +x dist/lbrynet
|
||||
dist/lbrynet --version
|
||||
- if: startsWith(matrix.os, 'windows')
|
||||
- if: startsWith(runner.os, 'windows')
|
||||
name: Build & Run (Windows)
|
||||
run: |
|
||||
pip install pywin32
|
||||
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
|
||||
pip install -e .
|
||||
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/cli.py
|
||||
dist/lbrynet.exe --version
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lbrynet-${{ matrix.os }}
|
||||
path: dist/
|
||||
|
||||
docker:
|
||||
needs: ["build"]
|
||||
name: "build (docker)"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: fetch lbrynet binary
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: lbrynet-ubuntu-16.04
|
||||
- run: |
|
||||
chmod +x lbrynet
|
||||
mv lbrynet docker
|
||||
- name: build and push docker image
|
||||
uses: docker/build-push-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
repository: lbry/lbrynet
|
||||
path: docker
|
||||
tag_with_ref: true
|
||||
tag_with_sha: true
|
||||
add_git_labels: true
|
||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -14,4 +14,5 @@ _trial_temp/
|
|||
/tests/integration/blockchain/files
|
||||
/tests/.coverage.*
|
||||
|
||||
/lbry/wallet/bin
|
||||
/lbry/blockchain/bin
|
||||
/lbry/wallet/bin
|
||||
|
|
21
Makefile
21
Makefile
|
@ -1,15 +1,4 @@
|
|||
.PHONY: install tools lint test idea
|
||||
|
||||
install:
|
||||
CFLAGS="-DSQLITE_MAX_VARIABLE_NUMBER=2500000" pip install -U https://github.com/rogerbinns/apsw/releases/download/3.30.1-r1/apsw-3.30.1-r1.zip \
|
||||
--global-option=fetch \
|
||||
--global-option=--version --global-option=3.30.1 --global-option=--all \
|
||||
--global-option=build --global-option=--enable --global-option=fts5
|
||||
pip install -e .
|
||||
|
||||
tools:
|
||||
pip install mypy==0.701
|
||||
pip install coverage astroid pylint
|
||||
.PHONY: tools lint test idea
|
||||
|
||||
lint:
|
||||
pylint --rcfile=setup.cfg lbry
|
||||
|
@ -21,3 +10,11 @@ test:
|
|||
idea:
|
||||
mkdir -p .idea
|
||||
cp -r scripts/idea/* .idea
|
||||
|
||||
start:
|
||||
dropdb lbry
|
||||
createdb lbry
|
||||
lbrynet start --full-node \
|
||||
--db-url=postgresql:///lbry --workers=28 --console=advanced --no-spv-address-filters \
|
||||
--lbrycrd-rpc-user=lbry --lbrycrd-rpc-pass=somethingelse \
|
||||
--lbrycrd-dir=${HOME}/.lbrycrd --data-dir=/tmp/tmp-lbrynet
|
||||
|
|
5
docker/Dockerfile
Normal file
5
docker/Dockerfile
Normal file
|
@ -0,0 +1,5 @@
|
|||
FROM ubuntu:20.04
|
||||
COPY lbrynet /bin
|
||||
RUN lbrynet --version
|
||||
ENTRYPOINT ["lbrynet"]
|
||||
CMD ["start", "--full-node"]
|
8
docker/Dockerfile.lbrycrd
Normal file
8
docker/Dockerfile.lbrycrd
Normal file
|
@ -0,0 +1,8 @@
|
|||
FROM ubuntu:20.04
|
||||
RUN apt-get update && \
|
||||
apt-get install -y wget unzip && \
|
||||
wget -nv https://build.lbry.io/lbrycrd/block_info_fix_try2/lbrycrd-linux.zip && \
|
||||
unzip -d /bin lbrycrd-linux.zip && \
|
||||
rm -rf lbrycrd-linux.zip /var/lib/apt/lists/*
|
||||
RUN lbrycrdd --version
|
||||
ENTRYPOINT ["lbrycrdd"]
|
|
@ -1,52 +0,0 @@
|
|||
FROM ubuntu:19.10
|
||||
|
||||
ARG user=lbry
|
||||
ARG db_dir=/database
|
||||
ARG projects_dir=/home/$user
|
||||
|
||||
ARG DOCKER_TAG
|
||||
ARG DOCKER_COMMIT=docker
|
||||
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y --no-install-recommends install \
|
||||
wget \
|
||||
tar unzip \
|
||||
build-essential \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-wheel \
|
||||
python3-setuptools && \
|
||||
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
||||
RUN mkdir -p $db_dir
|
||||
RUN chown -R $user:$user $db_dir
|
||||
|
||||
COPY . $projects_dir
|
||||
RUN chown -R $user:$user $projects_dir
|
||||
|
||||
USER $user
|
||||
WORKDIR $projects_dir
|
||||
|
||||
RUN pip install uvloop
|
||||
RUN make install
|
||||
RUN python3 docker/set_build.py
|
||||
RUN rm ~/.cache -rf
|
||||
|
||||
# entry point
|
||||
ARG host=0.0.0.0
|
||||
ARG tcp_port=50001
|
||||
ARG daemon_url=http://lbry:lbry@localhost:9245/
|
||||
VOLUME $db_dir
|
||||
ENV TCP_PORT=$tcp_port
|
||||
ENV HOST=$host
|
||||
ENV DAEMON_URL=$daemon_url
|
||||
ENV DB_DIRECTORY=$db_dir
|
||||
ENV MAX_SESSIONS=1000000000
|
||||
ENV MAX_SEND=1000000000000000000
|
||||
ENV EVENT_LOOP_POLICY=uvloop
|
||||
COPY ./docker/wallet_server_entrypoint.sh /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -1,34 +0,0 @@
|
|||
version: "3"
|
||||
|
||||
volumes:
|
||||
lbrycrd:
|
||||
wallet_server:
|
||||
|
||||
services:
|
||||
lbrycrd:
|
||||
image: lbry/lbrycrd:${LBRYCRD_TAG:-latest-release}
|
||||
restart: always
|
||||
ports: # accessible from host
|
||||
- "9246:9246" # rpc port
|
||||
expose: # internal to docker network. also this doesn't do anything. its for documentation only.
|
||||
- "9245" # node-to-node comms port
|
||||
volumes:
|
||||
- "lbrycrd:/data/.lbrycrd"
|
||||
environment:
|
||||
- RUN_MODE=default
|
||||
- SNAPSHOT_URL=${LBRYCRD_SNAPSHOT_URL-https://lbry.com/snapshot/blockchain}
|
||||
- RPC_ALLOW_IP=0.0.0.0/0
|
||||
wallet_server:
|
||||
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
|
||||
depends_on:
|
||||
- lbrycrd
|
||||
restart: always
|
||||
ports:
|
||||
- "50001:50001" # rpc port
|
||||
- "50005:50005" # websocket port
|
||||
#- "2112:2112" # uncomment to enable prometheus
|
||||
volumes:
|
||||
- "wallet_server:/database"
|
||||
environment:
|
||||
- SNAPSHOT_URL=${WALLET_SERVER_SNAPSHOT_URL-https://lbry.com/snapshot/wallet}
|
||||
- DAEMON_URL=http://lbry:lbry@lbrycrd:9245
|
41
docker/docker-compose.yml
Normal file
41
docker/docker-compose.yml
Normal file
|
@ -0,0 +1,41 @@
|
|||
version: "3.8"
|
||||
volumes:
|
||||
lbrycrd-data:
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:12
|
||||
environment:
|
||||
POSTGRES_USER: lbry
|
||||
POSTGRES_PASSWORD: lbry
|
||||
lbrycrd:
|
||||
image: lbry/lbrycrd
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.lbrycrd
|
||||
volumes:
|
||||
- lbrycrd-data:/root/.lbrycrd
|
||||
command: >
|
||||
-rpcbind=lbrycrd
|
||||
-rpcallowip=0.0.0.0/0
|
||||
-rpcuser=lbryuser
|
||||
-rpcpassword=lbrypass
|
||||
-zmqpubhashblock=tcp://lbrycrd:29000
|
||||
lbrynet:
|
||||
image: lbry/lbrynet:fast_wallet_server_sync
|
||||
depends_on:
|
||||
- postgres
|
||||
- lbrycrd
|
||||
volumes:
|
||||
- lbrycrd-data:/lbrycrd
|
||||
command: >
|
||||
start
|
||||
--full-node
|
||||
--api=0.0.0.0:5279
|
||||
--db-url=postgresql://lbry:lbry@postgres:5432/lbry
|
||||
--workers=12
|
||||
--console=basic
|
||||
--no-spv-address-filters
|
||||
--lbrycrd-rpc-host=lbrycrd
|
||||
--lbrycrd-rpc-user=lbryuser
|
||||
--lbrycrd-rpc-pass=lbrypass
|
||||
--lbrycrd-dir=/lbrycrd
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
cd "$DIR/../.." ## make sure we're in the right place. Docker Hub screws this up sometimes
|
||||
echo "docker build dir: $(pwd)"
|
||||
|
||||
docker build --build-arg DOCKER_TAG=$DOCKER_TAG --build-arg DOCKER_COMMIT=$SOURCE_COMMIT -f $DOCKERFILE_PATH -t $IMAGE_NAME .
|
|
@ -1,11 +0,0 @@
|
|||
# requires powershell and .NET 4+. see https://chocolatey.org/install for more info.
|
||||
|
||||
$chocoVersion = powershell choco -v
|
||||
if(-not($chocoVersion)){
|
||||
Write-Output "Chocolatey is not installed, installing now"
|
||||
Write-Output "IF YOU KEEP GETTING THIS MESSAGE ON EVERY BUILD, TRY RESTARTING THE GITLAB RUNNER SO IT GETS CHOCO INTO IT'S ENV"
|
||||
Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
|
||||
}
|
||||
else{
|
||||
Write-Output "Chocolatey version $chocoVersion is already installed"
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# entrypoint for wallet server Docker image
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
|
||||
|
||||
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/claims.db ]]; then
|
||||
files="$(ls)"
|
||||
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
|
||||
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
|
||||
echo "Extracting snapshot..."
|
||||
filename="$(grep -vf <(echo "$files") <(ls))" # finds the file that was not there before
|
||||
case "$filename" in
|
||||
*.tgz|*.tar.gz|*.tar.bz2 ) tar xvf "$filename" --directory /database ;;
|
||||
*.zip ) unzip "$filename" -d /database ;;
|
||||
* ) echo "Don't know how to extract ${filename}. SNAPSHOT COULD NOT BE LOADED" && exit 1 ;;
|
||||
esac
|
||||
rm "$filename"
|
||||
fi
|
||||
|
||||
/home/lbry/.local/bin/torba-server "$@"
|
350
docs/api.json
350
docs/api.json
File diff suppressed because one or more lines are too long
|
@ -1,7 +0,0 @@
|
|||
.git
|
||||
.tox
|
||||
__pycache__
|
||||
dist
|
||||
lbry.egg-info
|
||||
docs
|
||||
tests
|
|
@ -1,2 +1,8 @@
|
|||
__version__ = "0.68.0"
|
||||
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
|
||||
__version__ = "1.0.0"
|
||||
from lbry.wallet import Account, Wallet, WalletManager
|
||||
from lbry.blockchain import Ledger, RegTestLedger, TestNetLedger
|
||||
from lbry.blockchain import Transaction, Output, Input
|
||||
from lbry.blockchain import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc
|
||||
from lbry.service import API, Daemon, FullNode, LightClient
|
||||
from lbry.db.database import Database
|
||||
from lbry.conf import Config
|
||||
|
|
|
@ -10,7 +10,11 @@ from lbry.connection_manager import ConnectionManager
|
|||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.dht.protocol.data_store import DictDataStore
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
|
||||
|
||||
class SQLiteStorage:
|
||||
pass
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
4
lbry/blockchain/__init__.py
Normal file
4
lbry/blockchain/__init__.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
from .ledger import Ledger, RegTestLedger, TestNetLedger
|
||||
from .transaction import Transaction, Output, Input
|
||||
from .bcd_data_stream import BCDataStream
|
||||
from .dewies import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc
|
|
@ -4,8 +4,11 @@ from io import BytesIO
|
|||
|
||||
class BCDataStream:
|
||||
|
||||
def __init__(self, data=None):
|
||||
self.data = BytesIO(data)
|
||||
def __init__(self, data=None, fp=None):
|
||||
self.data = fp or BytesIO(data)
|
||||
|
||||
def tell(self):
|
||||
return self.data.tell()
|
||||
|
||||
def reset(self):
|
||||
self.data.seek(0)
|
61
lbry/blockchain/block.py
Normal file
61
lbry/blockchain/block.py
Normal file
|
@ -0,0 +1,61 @@
|
|||
import struct
|
||||
from typing import Set
|
||||
from typing import NamedTuple, List
|
||||
|
||||
from chiabip158 import PyBIP158 # pylint: disable=no-name-in-module
|
||||
|
||||
from lbry.crypto.hash import double_sha256
|
||||
from lbry.blockchain.transaction import Transaction
|
||||
from lbry.blockchain.bcd_data_stream import BCDataStream
|
||||
|
||||
|
||||
ZERO_BLOCK = bytes((0,)*32)
|
||||
|
||||
|
||||
def create_block_filter(address_hashes: Set[bytes]) -> bytes:
|
||||
return bytes(PyBIP158([bytearray(a) for a in address_hashes]).GetEncoded())
|
||||
|
||||
|
||||
def get_block_filter(block_filter: bytes) -> PyBIP158:
|
||||
return PyBIP158(bytearray(block_filter))
|
||||
|
||||
|
||||
class Block(NamedTuple):
|
||||
height: int
|
||||
version: int
|
||||
file_number: int
|
||||
block_hash: bytes
|
||||
prev_block_hash: bytes
|
||||
merkle_root: bytes
|
||||
claim_trie_root: bytes
|
||||
timestamp: int
|
||||
bits: int
|
||||
nonce: int
|
||||
txs: List[Transaction]
|
||||
|
||||
@staticmethod
|
||||
def from_data_stream(stream: BCDataStream, height: int, file_number: int):
|
||||
header = stream.data.read(112)
|
||||
version, = struct.unpack('<I', header[:4])
|
||||
timestamp, bits, nonce = struct.unpack('<III', header[100:112])
|
||||
tx_count = stream.read_compact_size()
|
||||
return Block(
|
||||
height=height,
|
||||
version=version,
|
||||
file_number=file_number,
|
||||
block_hash=double_sha256(header),
|
||||
prev_block_hash=header[4:36],
|
||||
merkle_root=header[36:68],
|
||||
claim_trie_root=header[68:100][::-1],
|
||||
timestamp=timestamp,
|
||||
bits=bits,
|
||||
nonce=nonce,
|
||||
txs=[
|
||||
Transaction(height=height, position=i, timestamp=timestamp).deserialize(stream)
|
||||
for i in range(tx_count)
|
||||
]
|
||||
)
|
||||
|
||||
@property
|
||||
def is_first_block(self):
|
||||
return self.prev_block_hash == ZERO_BLOCK
|
242
lbry/blockchain/database.py
Normal file
242
lbry/blockchain/database.py
Normal file
|
@ -0,0 +1,242 @@
|
|||
import os.path
|
||||
import asyncio
|
||||
import sqlite3
|
||||
from typing import List, Optional
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from lbry.schema.url import normalize_name
|
||||
|
||||
from .bcd_data_stream import BCDataStream
|
||||
|
||||
|
||||
FILES = [
|
||||
'claims',
|
||||
'block_index',
|
||||
]
|
||||
|
||||
|
||||
def make_short_url(r):
|
||||
try:
|
||||
return f'{normalize_name(r["name"].decode())}#{r["shortestID"] or r["claimID"][::-1].hex()[0]}'
|
||||
except UnicodeDecodeError:
|
||||
# print(f'failed making short url due to name parse error for claim_id: {r["claimID"][::-1].hex()}')
|
||||
return "INVALID NAME"
|
||||
|
||||
|
||||
class FindShortestID:
|
||||
__slots__ = 'short_id', 'new_id'
|
||||
|
||||
def __init__(self):
|
||||
self.short_id = ''
|
||||
self.new_id = None
|
||||
|
||||
def step(self, other_id, new_id):
|
||||
other_id = other_id[::-1].hex()
|
||||
if self.new_id is None:
|
||||
self.new_id = new_id[::-1].hex()
|
||||
for i in range(len(self.new_id)):
|
||||
if other_id[i] != self.new_id[i]:
|
||||
if i > len(self.short_id)-1:
|
||||
self.short_id = self.new_id[:i+1]
|
||||
break
|
||||
|
||||
def finalize(self):
|
||||
return self.short_id
|
||||
|
||||
|
||||
class BlockchainDB:
|
||||
|
||||
def __init__(self, directory: str):
|
||||
self.directory = directory
|
||||
self.connection: Optional[sqlite3.Connection] = None
|
||||
self.executor: Optional[ThreadPoolExecutor] = None
|
||||
|
||||
async def run_in_executor(self, *args):
|
||||
return await asyncio.get_running_loop().run_in_executor(self.executor, *args)
|
||||
|
||||
def sync_open(self):
|
||||
self.connection = sqlite3.connect(
|
||||
os.path.join(self.directory, FILES[0]+'.sqlite'),
|
||||
timeout=60.0 * 5
|
||||
)
|
||||
for file in FILES[1:]:
|
||||
self.connection.execute(
|
||||
f"ATTACH DATABASE '{os.path.join(self.directory, file+'.sqlite')}' AS {file}"
|
||||
)
|
||||
self.connection.create_aggregate("find_shortest_id", 2, FindShortestID)
|
||||
self.connection.execute("CREATE INDEX IF NOT EXISTS claim_originalheight ON claim (originalheight);")
|
||||
self.connection.execute("CREATE INDEX IF NOT EXISTS claim_updateheight ON claim (updateheight);")
|
||||
self.connection.execute("create index IF NOT EXISTS support_blockheight on support (blockheight);")
|
||||
self.connection.row_factory = sqlite3.Row
|
||||
|
||||
async def open(self):
|
||||
assert self.executor is None, "Database is already open."
|
||||
self.executor = ThreadPoolExecutor(max_workers=1)
|
||||
return await self.run_in_executor(self.sync_open)
|
||||
|
||||
def sync_close(self):
|
||||
self.connection.close()
|
||||
self.connection = None
|
||||
|
||||
async def close(self):
|
||||
if self.executor is not None:
|
||||
if self.connection is not None:
|
||||
await self.run_in_executor(self.sync_close)
|
||||
self.executor.shutdown()
|
||||
self.executor = None
|
||||
|
||||
async def commit(self):
|
||||
await self.run_in_executor(self.connection.commit)
|
||||
|
||||
def sync_execute(self, sql: str, *args):
|
||||
return self.connection.execute(sql, *args)
|
||||
|
||||
async def execute(self, sql: str, *args):
|
||||
return await self.run_in_executor(self.sync_execute, sql, *args)
|
||||
|
||||
def sync_execute_fetchall(self, sql: str, *args) -> List[dict]:
|
||||
return self.connection.execute(sql, *args).fetchall()
|
||||
|
||||
async def execute_fetchall(self, sql: str, *args) -> List[dict]:
|
||||
return await self.run_in_executor(self.sync_execute_fetchall, sql, *args)
|
||||
|
||||
def sync_get_best_height(self) -> int:
|
||||
sql = "SELECT MAX(height) FROM block_info"
|
||||
return self.connection.execute(sql).fetchone()[0]
|
||||
|
||||
async def get_best_height(self) -> int:
|
||||
return await self.run_in_executor(self.sync_get_best_height)
|
||||
|
||||
def sync_get_block_files(self, file_number: int = None, start_height: int = None) -> List[dict]:
|
||||
sql = """
|
||||
SELECT
|
||||
file as file_number,
|
||||
COUNT(hash) as blocks,
|
||||
SUM(txcount) as txs,
|
||||
MAX(height) as best_height,
|
||||
MIN(height) as start_height
|
||||
FROM block_info
|
||||
WHERE status&1 AND status&4
|
||||
"""
|
||||
args = ()
|
||||
if file_number is not None and start_height is not None:
|
||||
sql += "AND file = ? AND height >= ?"
|
||||
args = (file_number, start_height)
|
||||
return [dict(r) for r in self.sync_execute_fetchall(sql + " GROUP BY file ORDER BY file ASC;", args)]
|
||||
|
||||
async def get_block_files(self, file_number: int = None, start_height: int = None) -> List[dict]:
|
||||
return await self.run_in_executor(
|
||||
self.sync_get_block_files, file_number, start_height
|
||||
)
|
||||
|
||||
def sync_get_blocks_in_file(self, block_file: int, start_height=0) -> List[dict]:
|
||||
return [dict(r) for r in self.sync_execute_fetchall(
|
||||
"""
|
||||
SELECT datapos as data_offset, height, hash as block_hash, txCount as txs
|
||||
FROM block_info
|
||||
WHERE file = ? AND height >= ? AND status&1 AND status&4
|
||||
ORDER BY datapos ASC;
|
||||
""", (block_file, start_height)
|
||||
)]
|
||||
|
||||
async def get_blocks_in_file(self, block_file: int, start_height=0) -> List[dict]:
|
||||
return await self.run_in_executor(self.sync_get_blocks_in_file, block_file, start_height)
|
||||
|
||||
def sync_get_claim_support_txo_hashes(self, at_height: int) -> set:
|
||||
return {
|
||||
r['txID'] + BCDataStream.uint32.pack(r['txN'])
|
||||
for r in self.connection.execute(
|
||||
"""
|
||||
SELECT txID, txN FROM claim WHERE updateHeight = ?
|
||||
UNION
|
||||
SELECT txID, txN FROM support WHERE blockHeight = ?
|
||||
""", (at_height, at_height)
|
||||
).fetchall()
|
||||
}
|
||||
|
||||
def sync_get_takeover_count(self, start_height: int, end_height: int) -> int:
|
||||
sql = """
|
||||
SELECT COUNT(*) FROM claim WHERE name IN (
|
||||
SELECT name FROM takeover
|
||||
WHERE name IS NOT NULL AND height BETWEEN ? AND ?
|
||||
)
|
||||
""", (start_height, end_height)
|
||||
return self.connection.execute(*sql).fetchone()[0]
|
||||
|
||||
async def get_takeover_count(self, start_height: int, end_height: int) -> int:
|
||||
return await self.run_in_executor(self.sync_get_takeover_count, start_height, end_height)
|
||||
|
||||
def sync_get_takeovers(self, start_height: int, end_height: int) -> List[dict]:
|
||||
sql = """
|
||||
SELECT name, claimID, MAX(height) AS height FROM takeover
|
||||
WHERE name IS NOT NULL AND height BETWEEN ? AND ?
|
||||
GROUP BY name
|
||||
""", (start_height, end_height)
|
||||
return [{
|
||||
'normalized': normalize_name(r['name'].decode()),
|
||||
'claim_hash': r['claimID'],
|
||||
'height': r['height']
|
||||
} for r in self.sync_execute_fetchall(*sql)]
|
||||
|
||||
async def get_takeovers(self, start_height: int, end_height: int) -> List[dict]:
|
||||
return await self.run_in_executor(self.sync_get_takeovers, start_height, end_height)
|
||||
|
||||
def sync_get_claim_metadata_count(self, start_height: int, end_height: int) -> int:
|
||||
sql = "SELECT COUNT(*) FROM claim WHERE originalHeight BETWEEN ? AND ?"
|
||||
return self.connection.execute(sql, (start_height, end_height)).fetchone()[0]
|
||||
|
||||
async def get_claim_metadata_count(self, start_height: int, end_height: int) -> int:
|
||||
return await self.run_in_executor(self.sync_get_claim_metadata_count, start_height, end_height)
|
||||
|
||||
def sync_get_claim_metadata(self, claim_hashes) -> List[dict]:
|
||||
sql = f"""
|
||||
SELECT
|
||||
name, claimID, activationHeight, expirationHeight, originalHeight,
|
||||
(SELECT
|
||||
CASE WHEN takeover.claimID = claim.claimID THEN takeover.height END
|
||||
FROM takeover WHERE takeover.name = claim.name
|
||||
ORDER BY height DESC LIMIT 1
|
||||
) AS takeoverHeight,
|
||||
(SELECT find_shortest_id(c.claimid, claim.claimid) FROM claim AS c
|
||||
WHERE
|
||||
c.nodename = claim.nodename AND
|
||||
c.originalheight <= claim.originalheight AND
|
||||
c.claimid != claim.claimid
|
||||
) AS shortestID
|
||||
FROM claim
|
||||
WHERE claimID IN ({','.join(['?' for _ in claim_hashes])})
|
||||
ORDER BY claimID
|
||||
""", claim_hashes
|
||||
return [{
|
||||
"name": r["name"],
|
||||
"claim_hash": r["claimID"],
|
||||
"activation_height": r["activationHeight"],
|
||||
"expiration_height": r["expirationHeight"],
|
||||
"takeover_height": r["takeoverHeight"],
|
||||
"creation_height": r["originalHeight"],
|
||||
"short_url": make_short_url(r),
|
||||
} for r in self.sync_execute_fetchall(*sql)]
|
||||
|
||||
async def get_claim_metadata(self, start_height: int, end_height: int) -> List[dict]:
|
||||
return await self.run_in_executor(self.sync_get_claim_metadata, start_height, end_height)
|
||||
|
||||
def sync_get_support_metadata_count(self, start_height: int, end_height: int) -> int:
|
||||
sql = "SELECT COUNT(*) FROM support WHERE blockHeight BETWEEN ? AND ?"
|
||||
return self.connection.execute(sql, (start_height, end_height)).fetchone()[0]
|
||||
|
||||
async def get_support_metadata_count(self, start_height: int, end_height: int) -> int:
|
||||
return await self.run_in_executor(self.sync_get_support_metadata_count, start_height, end_height)
|
||||
|
||||
def sync_get_support_metadata(self, start_height: int, end_height: int) -> List[dict]:
|
||||
sql = """
|
||||
SELECT name, txid, txn, activationHeight, expirationHeight
|
||||
FROM support WHERE blockHeight BETWEEN ? AND ?
|
||||
""", (start_height, end_height)
|
||||
return [{
|
||||
"name": r['name'],
|
||||
"txo_hash_pk": r['txID'] + BCDataStream.uint32.pack(r['txN']),
|
||||
"activation_height": r['activationHeight'],
|
||||
"expiration_height": r['expirationHeight'],
|
||||
} for r in self.sync_execute_fetchall(*sql)]
|
||||
|
||||
async def get_support_metadata(self, start_height: int, end_height: int) -> List[dict]:
|
||||
return await self.run_in_executor(self.sync_get_support_metadata, start_height, end_height)
|
|
@ -1,10 +1,19 @@
|
|||
import re
|
||||
import textwrap
|
||||
from .util import coins_to_satoshis, satoshis_to_coins
|
||||
from decimal import Decimal
|
||||
|
||||
from lbry.constants import COIN
|
||||
|
||||
|
||||
def lbc_to_dewies(lbc: str) -> int:
|
||||
try:
|
||||
return coins_to_satoshis(lbc)
|
||||
if not isinstance(lbc, str):
|
||||
raise ValueError("{coins} must be a string")
|
||||
result = re.search(r'^(\d{1,10})\.(\d{1,8})$', lbc)
|
||||
if result is not None:
|
||||
whole, fractional = result.groups()
|
||||
return int(whole + fractional.ljust(8, "0"))
|
||||
raise ValueError(f"'{lbc}' is not a valid coin decimal")
|
||||
except ValueError:
|
||||
raise ValueError(textwrap.dedent(
|
||||
f"""
|
||||
|
@ -30,13 +39,17 @@ def lbc_to_dewies(lbc: str) -> int:
|
|||
|
||||
|
||||
def dewies_to_lbc(dewies) -> str:
|
||||
return satoshis_to_coins(dewies)
|
||||
coins = '{:.8f}'.format(dewies / COIN).rstrip('0')
|
||||
if coins.endswith('.'):
|
||||
return coins+'0'
|
||||
else:
|
||||
return coins
|
||||
|
||||
|
||||
def dict_values_to_lbc(d):
|
||||
lbc_dict = {}
|
||||
for key, value in d.items():
|
||||
if isinstance(value, int):
|
||||
if isinstance(value, (int, Decimal)):
|
||||
lbc_dict[key] = dewies_to_lbc(value)
|
||||
elif isinstance(value, dict):
|
||||
lbc_dict[key] = dict_values_to_lbc(value)
|
|
@ -1,5 +1,5 @@
|
|||
from binascii import hexlify, unhexlify
|
||||
from .constants import NULL_HASH32
|
||||
from lbry.constants import NULL_HASH32
|
||||
|
||||
|
||||
class TXRef:
|
||||
|
@ -29,28 +29,35 @@ class TXRef:
|
|||
|
||||
class TXRefImmutable(TXRef):
|
||||
|
||||
__slots__ = ('_height',)
|
||||
__slots__ = ('_height', '_timestamp')
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._height = -1
|
||||
self._timestamp = -1
|
||||
|
||||
@classmethod
|
||||
def from_hash(cls, tx_hash: bytes, height: int) -> 'TXRefImmutable':
|
||||
def from_hash(cls, tx_hash: bytes, height: int, timestamp: int) -> 'TXRefImmutable':
|
||||
ref = cls()
|
||||
ref._hash = tx_hash
|
||||
ref._id = hexlify(tx_hash[::-1]).decode()
|
||||
ref._height = height
|
||||
ref._timestamp = timestamp
|
||||
return ref
|
||||
|
||||
@classmethod
|
||||
def from_id(cls, tx_id: str, height: int) -> 'TXRefImmutable':
|
||||
def from_id(cls, tx_id: str, height: int, timestamp: int) -> 'TXRefImmutable':
|
||||
ref = cls()
|
||||
ref._id = tx_id
|
||||
ref._hash = unhexlify(tx_id)[::-1]
|
||||
ref._height = height
|
||||
ref._timestamp = timestamp
|
||||
return ref
|
||||
|
||||
@property
|
||||
def height(self):
|
||||
return self._height
|
||||
|
||||
@property
|
||||
def timestamp(self):
|
||||
return self._timestamp
|
|
@ -12,7 +12,7 @@ from typing import Optional, Iterator, Tuple, Callable
|
|||
from binascii import hexlify, unhexlify
|
||||
|
||||
from lbry.crypto.hash import sha512, double_sha256, ripemd160
|
||||
from lbry.wallet.util import ArithUint256, date_to_julian_day
|
||||
from lbry.blockchain.util import ArithUint256
|
||||
from .checkpoints import HASHES
|
||||
|
||||
|
||||
|
@ -140,8 +140,8 @@ class Headers:
|
|||
return
|
||||
return int(self.first_block_timestamp + (height * self.timestamp_average_offset))
|
||||
|
||||
def estimated_julian_day(self, height):
|
||||
return date_to_julian_day(date.fromtimestamp(self.estimated_timestamp(height)))
|
||||
def estimated_date(self, height):
|
||||
return date.fromtimestamp(self.estimated_timestamp(height))
|
||||
|
||||
async def get_raw_header(self, height) -> bytes:
|
||||
if self.chunk_getter:
|
310
lbry/blockchain/lbrycrd.py
Normal file
310
lbry/blockchain/lbrycrd.py
Normal file
|
@ -0,0 +1,310 @@
|
|||
import os
|
||||
import struct
|
||||
import shutil
|
||||
import asyncio
|
||||
import logging
|
||||
import zipfile
|
||||
import tempfile
|
||||
import urllib.request
|
||||
from typing import Optional
|
||||
from binascii import hexlify
|
||||
|
||||
import aiohttp
|
||||
import zmq
|
||||
import zmq.asyncio
|
||||
|
||||
from lbry.conf import Config
|
||||
from lbry.event import EventController
|
||||
from lbry.error import LbrycrdEventSubscriptionError, LbrycrdUnauthorizedError
|
||||
|
||||
from .database import BlockchainDB
|
||||
from .ledger import Ledger, RegTestLedger
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
DOWNLOAD_URL = (
|
||||
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.4.5/lbrycrd-linux-1745.zip'
|
||||
)
|
||||
|
||||
|
||||
class Process(asyncio.SubprocessProtocol):
|
||||
|
||||
IGNORE_OUTPUT = [
|
||||
b'keypool keep',
|
||||
b'keypool reserve',
|
||||
b'keypool return',
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||
if b'Error:' in data:
|
||||
log.error(data.decode())
|
||||
else:
|
||||
for line in data.decode().splitlines():
|
||||
log.debug(line.rstrip())
|
||||
if b'Error:' in data:
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'Done loading' in data:
|
||||
self.ready.set()
|
||||
|
||||
def process_exited(self):
|
||||
self.stopped.set()
|
||||
self.ready.set()
|
||||
|
||||
|
||||
ZMQ_BLOCK_EVENT = 'pubhashblock'
|
||||
|
||||
|
||||
class Lbrycrd:
|
||||
|
||||
def __init__(self, ledger: Ledger):
|
||||
self.ledger, self.conf = ledger, ledger.conf
|
||||
self.data_dir = self.actual_data_dir = ledger.conf.lbrycrd_dir
|
||||
if self.is_regtest:
|
||||
self.actual_data_dir = os.path.join(self.data_dir, 'regtest')
|
||||
self.blocks_dir = os.path.join(self.actual_data_dir, 'blocks')
|
||||
self.bin_dir = os.path.join(os.path.dirname(__file__), 'bin')
|
||||
self.daemon_bin = os.path.join(self.bin_dir, 'lbrycrdd')
|
||||
self.cli_bin = os.path.join(self.bin_dir, 'lbrycrd-cli')
|
||||
self.protocol = None
|
||||
self.transport = None
|
||||
self.subscribed = False
|
||||
self.subscription: Optional[asyncio.Task] = None
|
||||
self.default_generate_address = None
|
||||
self._on_block_controller = EventController()
|
||||
self.on_block = self._on_block_controller.stream
|
||||
self.on_block.listen(lambda e: log.info('%s %s', hexlify(e['hash']), e['msg']))
|
||||
|
||||
self.db = BlockchainDB(self.actual_data_dir)
|
||||
self.session: Optional[aiohttp.ClientSession] = None
|
||||
|
||||
@classmethod
|
||||
def temp_regtest(cls):
|
||||
return cls(RegTestLedger(
|
||||
Config.with_same_dir(tempfile.mkdtemp()).set(
|
||||
lbrycrd_rpc_port=9245 + 2, # avoid conflict with default rpc port
|
||||
lbrycrd_peer_port=9246 + 2, # avoid conflict with default peer port
|
||||
lbrycrd_zmq_blocks="tcp://127.0.0.1:29000"
|
||||
)
|
||||
))
|
||||
|
||||
@staticmethod
|
||||
def get_block_file_name(block_file_number):
|
||||
return f'blk{block_file_number:05}.dat'
|
||||
|
||||
def get_block_file_path(self, block_file_number):
|
||||
return os.path.join(
|
||||
self.actual_data_dir, 'blocks',
|
||||
self.get_block_file_name(block_file_number)
|
||||
)
|
||||
|
||||
@property
|
||||
def is_regtest(self):
|
||||
return isinstance(self.ledger, RegTestLedger)
|
||||
|
||||
@property
|
||||
def rpc_url(self):
|
||||
return (
|
||||
f'http://{self.conf.lbrycrd_rpc_user}:{self.conf.lbrycrd_rpc_pass}'
|
||||
f'@{self.conf.lbrycrd_rpc_host}:{self.conf.lbrycrd_rpc_port}/'
|
||||
)
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
os.path.exists(self.cli_bin) and
|
||||
os.path.exists(self.daemon_bin)
|
||||
)
|
||||
|
||||
async def download(self):
|
||||
downloaded_file = os.path.join(
|
||||
self.bin_dir, DOWNLOAD_URL[DOWNLOAD_URL.rfind('/')+1:]
|
||||
)
|
||||
|
||||
if not os.path.exists(self.bin_dir):
|
||||
os.mkdir(self.bin_dir)
|
||||
|
||||
if not os.path.exists(downloaded_file):
|
||||
log.info('Downloading: %s', DOWNLOAD_URL)
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(DOWNLOAD_URL) as response:
|
||||
with open(downloaded_file, 'wb') as out_file:
|
||||
while True:
|
||||
chunk = await response.content.read(4096)
|
||||
if not chunk:
|
||||
break
|
||||
out_file.write(chunk)
|
||||
with urllib.request.urlopen(DOWNLOAD_URL) as response:
|
||||
with open(downloaded_file, 'wb') as out_file:
|
||||
shutil.copyfileobj(response, out_file)
|
||||
|
||||
log.info('Extracting: %s', downloaded_file)
|
||||
|
||||
with zipfile.ZipFile(downloaded_file) as dotzip:
|
||||
dotzip.extractall(self.bin_dir)
|
||||
# zipfile bug https://bugs.python.org/issue15795
|
||||
os.chmod(self.cli_bin, 0o755)
|
||||
os.chmod(self.daemon_bin, 0o755)
|
||||
|
||||
return self.exists
|
||||
|
||||
async def ensure(self):
|
||||
return self.exists or await self.download()
|
||||
|
||||
def get_start_command(self, *args):
|
||||
if self.is_regtest:
|
||||
args += ('-regtest',)
|
||||
if self.conf.lbrycrd_zmq_blocks:
|
||||
args += (f'-zmqpubhashblock={self.conf.lbrycrd_zmq_blocks}',)
|
||||
return (
|
||||
self.daemon_bin,
|
||||
f'-datadir={self.data_dir}',
|
||||
f'-port={self.conf.lbrycrd_peer_port}',
|
||||
f'-rpcport={self.conf.lbrycrd_rpc_port}',
|
||||
f'-rpcuser={self.conf.lbrycrd_rpc_user}',
|
||||
f'-rpcpassword={self.conf.lbrycrd_rpc_pass}',
|
||||
'-server', '-printtoconsole',
|
||||
*args
|
||||
)
|
||||
|
||||
async def open(self):
|
||||
self.session = aiohttp.ClientSession()
|
||||
await self.db.open()
|
||||
|
||||
async def close(self):
|
||||
await self.db.close()
|
||||
if self.session is not None:
|
||||
await self.session.close()
|
||||
|
||||
async def start(self, *args):
|
||||
loop = asyncio.get_running_loop()
|
||||
command = self.get_start_command(*args)
|
||||
log.info(' '.join(command))
|
||||
self.transport, self.protocol = await loop.subprocess_exec(Process, *command)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
await self.open()
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
try:
|
||||
await self.close()
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
assert self.transport.get_returncode() == 0, "lbrycrd daemon exit with error"
|
||||
self.transport.close()
|
||||
finally:
|
||||
if cleanup:
|
||||
await self.cleanup()
|
||||
|
||||
async def cleanup(self):
|
||||
await asyncio.get_running_loop().run_in_executor(
|
||||
None, shutil.rmtree, self.data_dir, True
|
||||
)
|
||||
|
||||
async def ensure_subscribable(self):
|
||||
zmq_notifications = await self.get_zmq_notifications()
|
||||
subs = {e['type']: e['address'] for e in zmq_notifications}
|
||||
if ZMQ_BLOCK_EVENT not in subs:
|
||||
raise LbrycrdEventSubscriptionError(ZMQ_BLOCK_EVENT)
|
||||
if not self.conf.lbrycrd_zmq_blocks:
|
||||
self.conf.lbrycrd_zmq_blocks = subs[ZMQ_BLOCK_EVENT]
|
||||
|
||||
async def subscribe(self):
|
||||
if not self.subscribed:
|
||||
self.subscribed = True
|
||||
ctx = zmq.asyncio.Context.instance()
|
||||
sock = ctx.socket(zmq.SUB) # pylint: disable=no-member
|
||||
sock.connect(self.conf.lbrycrd_zmq_blocks)
|
||||
sock.subscribe("hashblock")
|
||||
self.subscription = asyncio.create_task(self.subscription_handler(sock))
|
||||
|
||||
async def subscription_handler(self, sock):
|
||||
try:
|
||||
while self.subscribed:
|
||||
msg = await sock.recv_multipart()
|
||||
await self._on_block_controller.add({
|
||||
'hash': msg[1],
|
||||
'msg': struct.unpack('<I', msg[2])[0]
|
||||
})
|
||||
except asyncio.CancelledError:
|
||||
sock.close()
|
||||
raise
|
||||
|
||||
def unsubscribe(self):
|
||||
if self.subscribed:
|
||||
self.subscribed = False
|
||||
self.subscription.cancel()
|
||||
self.subscription = None
|
||||
|
||||
async def rpc(self, method, params=None):
|
||||
message = {
|
||||
"jsonrpc": "1.0",
|
||||
"id": "1",
|
||||
"method": method,
|
||||
"params": params or []
|
||||
}
|
||||
async with self.session.post(self.rpc_url, json=message) as resp:
|
||||
if resp.status == 401:
|
||||
raise LbrycrdUnauthorizedError()
|
||||
try:
|
||||
result = await resp.json()
|
||||
except aiohttp.ContentTypeError as e:
|
||||
raise Exception(await resp.text()) from e
|
||||
if not result['error']:
|
||||
return result['result']
|
||||
else:
|
||||
result['error'].update(method=method, params=params)
|
||||
raise Exception(result['error'])
|
||||
|
||||
async def get_zmq_notifications(self):
|
||||
return await self.rpc("getzmqnotifications")
|
||||
|
||||
async def generate(self, blocks):
|
||||
if self.default_generate_address is None:
|
||||
self.default_generate_address = await self.get_new_address()
|
||||
return await self.generate_to_address(blocks, self.default_generate_address)
|
||||
|
||||
async def get_new_address(self):
|
||||
return await self.rpc("getnewaddress")
|
||||
|
||||
async def generate_to_address(self, blocks, address):
|
||||
return await self.rpc("generatetoaddress", [blocks, address])
|
||||
|
||||
async def send_to_address(self, address, amount):
|
||||
return await self.rpc("sendtoaddress", [address, amount])
|
||||
|
||||
async def get_block(self, block_hash):
|
||||
return await self.rpc("getblock", [block_hash])
|
||||
|
||||
async def get_raw_transaction(self, txid):
|
||||
return await self.rpc("getrawtransaction", [txid])
|
||||
|
||||
async def fund_raw_transaction(self, tx):
|
||||
return await self.rpc("fundrawtransaction", [tx])
|
||||
|
||||
async def sign_raw_transaction_with_wallet(self, tx):
|
||||
return await self.rpc("signrawtransactionwithwallet", [tx])
|
||||
|
||||
async def send_raw_transaction(self, tx):
|
||||
return await self.rpc("sendrawtransaction", [tx])
|
||||
|
||||
async def claim_name(self, name, data, amount):
|
||||
return await self.rpc("claimname", [name, data, amount])
|
||||
|
||||
async def update_claim(self, txid, data, amount):
|
||||
return await self.rpc("updateclaim", [txid, data, amount])
|
||||
|
||||
async def abandon_claim(self, txid, address):
|
||||
return await self.rpc("abandonclaim", [txid, address])
|
||||
|
||||
async def support_claim(self, name, claim_id, amount, value="", istip=False):
|
||||
return await self.rpc("supportclaim", [name, claim_id, amount, value, istip])
|
||||
|
||||
async def abandon_support(self, txid, address):
|
||||
return await self.rpc("abandonsupport", [txid, address])
|
166
lbry/blockchain/ledger.py
Normal file
166
lbry/blockchain/ledger.py
Normal file
|
@ -0,0 +1,166 @@
|
|||
import typing
|
||||
from binascii import unhexlify
|
||||
from string import hexdigits
|
||||
|
||||
from lbry.crypto.hash import hash160, double_sha256
|
||||
from lbry.crypto.base58 import Base58
|
||||
from lbry.schema.url import URL
|
||||
from .header import Headers, UnvalidatedHeaders
|
||||
from .checkpoints import HASHES
|
||||
from .dewies import lbc_to_dewies
|
||||
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
|
||||
|
||||
class Ledger:
|
||||
name = 'LBRY Credits'
|
||||
symbol = 'LBC'
|
||||
network_name = 'mainnet'
|
||||
|
||||
headers_class = Headers
|
||||
|
||||
secret_prefix = bytes((0x1c,))
|
||||
pubkey_address_prefix = bytes((0x55,))
|
||||
script_address_prefix = bytes((0x7a,))
|
||||
extended_public_key_prefix = unhexlify('0488b21e')
|
||||
extended_private_key_prefix = unhexlify('0488ade4')
|
||||
|
||||
max_target = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
|
||||
genesis_hash = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
|
||||
genesis_bits = 0x1f00ffff
|
||||
target_timespan = 150
|
||||
|
||||
fee_per_byte = 50
|
||||
fee_per_name_char = 200000
|
||||
|
||||
checkpoints = HASHES
|
||||
|
||||
def __init__(self, conf: 'Config'):
|
||||
self.conf = conf
|
||||
self.coin_selection_strategy = None
|
||||
|
||||
@classmethod
|
||||
def get_id(cls):
|
||||
return '{}_{}'.format(cls.symbol.lower(), cls.network_name.lower())
|
||||
|
||||
@classmethod
|
||||
def hash160_to_address(cls, h160):
|
||||
raw_address = cls.pubkey_address_prefix + h160
|
||||
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
|
||||
|
||||
@staticmethod
|
||||
def address_to_hash160(address):
|
||||
return Base58.decode(address)[1:21]
|
||||
|
||||
@classmethod
|
||||
def is_valid_address(cls, address):
|
||||
decoded = Base58.decode_check(address)
|
||||
return decoded[0] == cls.pubkey_address_prefix[0]
|
||||
|
||||
@classmethod
|
||||
def valid_address_or_error(cls, address):
|
||||
try:
|
||||
assert cls.is_valid_address(address)
|
||||
except:
|
||||
raise Exception(f"'{address}' is not a valid address")
|
||||
|
||||
@staticmethod
|
||||
def valid_claim_id(claim_id: str):
|
||||
if not len(claim_id) == 40:
|
||||
raise Exception(f"Incorrect claimid length: {len(claim_id)}")
|
||||
if set(claim_id).difference(hexdigits):
|
||||
raise Exception("Claim id is not hex encoded")
|
||||
|
||||
@staticmethod
|
||||
def valid_channel_name_or_error(name: str):
|
||||
try:
|
||||
if not name:
|
||||
raise Exception("Channel name cannot be blank.")
|
||||
parsed = URL.parse(name)
|
||||
if not parsed.has_channel:
|
||||
raise Exception("Channel names must start with '@' symbol.")
|
||||
if parsed.channel.name != name:
|
||||
raise Exception("Channel name has invalid character")
|
||||
except (TypeError, ValueError):
|
||||
raise Exception("Invalid channel name.")
|
||||
|
||||
@staticmethod
|
||||
def valid_stream_name_or_error(name: str):
|
||||
try:
|
||||
if not name:
|
||||
raise Exception('Stream name cannot be blank.')
|
||||
parsed = URL.parse(name)
|
||||
if parsed.has_channel:
|
||||
raise Exception(
|
||||
"Stream names cannot start with '@' symbol. This is reserved for channels claims."
|
||||
)
|
||||
if not parsed.has_stream or parsed.stream.name != name:
|
||||
raise Exception('Stream name has invalid characters.')
|
||||
except (TypeError, ValueError):
|
||||
raise Exception("Invalid stream name.")
|
||||
|
||||
@staticmethod
|
||||
def valid_collection_name_or_error(name: str):
|
||||
try:
|
||||
if not name:
|
||||
raise Exception('Collection name cannot be blank.')
|
||||
parsed = URL.parse(name)
|
||||
if parsed.has_channel:
|
||||
raise Exception(
|
||||
"Collection names cannot start with '@' symbol. This is reserved for channels claims."
|
||||
)
|
||||
if not parsed.has_stream or parsed.stream.name != name:
|
||||
raise Exception('Collection name has invalid characters.')
|
||||
except (TypeError, ValueError):
|
||||
raise Exception("Invalid collection name.")
|
||||
|
||||
@staticmethod
|
||||
def get_dewies_or_error(argument: str, lbc: str, positive_value=False):
|
||||
try:
|
||||
dewies = lbc_to_dewies(lbc)
|
||||
if positive_value and dewies <= 0:
|
||||
raise ValueError(f"'{argument}' value must be greater than 0.0")
|
||||
return dewies
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid value for '{argument}': {e.args[0]}")
|
||||
|
||||
def get_fee_address(self, kwargs: dict, claim_address: str) -> str:
|
||||
if 'fee_address' in kwargs:
|
||||
self.valid_address_or_error(kwargs['fee_address'])
|
||||
return kwargs['fee_address']
|
||||
if 'fee_currency' in kwargs or 'fee_amount' in kwargs:
|
||||
return claim_address
|
||||
|
||||
@classmethod
|
||||
def public_key_to_address(cls, public_key):
|
||||
return cls.hash160_to_address(hash160(public_key))
|
||||
|
||||
@staticmethod
|
||||
def private_key_to_wif(private_key):
|
||||
return b'\x1c' + private_key + b'\x01'
|
||||
|
||||
|
||||
class TestNetLedger(Ledger):
|
||||
network_name = 'testnet'
|
||||
pubkey_address_prefix = bytes((111,))
|
||||
script_address_prefix = bytes((196,))
|
||||
extended_public_key_prefix = unhexlify('043587cf')
|
||||
extended_private_key_prefix = unhexlify('04358394')
|
||||
checkpoints = {}
|
||||
|
||||
|
||||
class RegTestLedger(Ledger):
|
||||
network_name = 'regtest'
|
||||
headers_class = UnvalidatedHeaders
|
||||
pubkey_address_prefix = bytes((111,))
|
||||
script_address_prefix = bytes((196,))
|
||||
extended_public_key_prefix = unhexlify('043587cf')
|
||||
extended_private_key_prefix = unhexlify('04358394')
|
||||
|
||||
max_target = 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
|
||||
genesis_hash = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
|
||||
genesis_bits = 0x207fffff
|
||||
target_timespan = 1
|
||||
checkpoints = {}
|
|
@ -294,20 +294,25 @@ class Template:
|
|||
|
||||
class Script:
|
||||
|
||||
__slots__ = 'source', '_template', '_values', '_template_hint'
|
||||
__slots__ = 'source', 'offset', '_template', '_values', '_template_hint'
|
||||
|
||||
templates: List[Template] = []
|
||||
|
||||
NO_SCRIPT = Template('no_script', None) # special case
|
||||
|
||||
def __init__(self, source=None, template=None, values=None, template_hint=None):
|
||||
def __init__(self, source=None, template=None, values=None, template_hint=None, offset=None):
|
||||
self.source = source
|
||||
self.offset = offset
|
||||
self._template = template
|
||||
self._values = values
|
||||
self._template_hint = template_hint
|
||||
if source is None and template and values:
|
||||
self.generate()
|
||||
|
||||
@property
|
||||
def length(self):
|
||||
return len(self.source)
|
||||
|
||||
@property
|
||||
def template(self):
|
||||
if self._template is None:
|
||||
|
@ -438,6 +443,17 @@ class OutputScript(Script):
|
|||
SUPPORT_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes
|
||||
))
|
||||
|
||||
SUPPORT_CLAIM_DATA_OPCODES = (
|
||||
OP_SUPPORT_CLAIM, PUSH_SINGLE('claim_name'), PUSH_SINGLE('claim_id'), PUSH_SINGLE('support'),
|
||||
OP_2DROP, OP_2DROP
|
||||
)
|
||||
SUPPORT_CLAIM_DATA_PUBKEY = Template('support_claim+data+pay_pubkey_hash', (
|
||||
SUPPORT_CLAIM_DATA_OPCODES + PAY_PUBKEY_HASH.opcodes
|
||||
))
|
||||
SUPPORT_CLAIM_DATA_SCRIPT = Template('support_claim+data+pay_script_hash', (
|
||||
SUPPORT_CLAIM_DATA_OPCODES + PAY_SCRIPT_HASH.opcodes
|
||||
))
|
||||
|
||||
UPDATE_CLAIM_OPCODES = (
|
||||
OP_UPDATE_CLAIM, PUSH_SINGLE('claim_name'), PUSH_SINGLE('claim_id'), PUSH_SINGLE('claim'),
|
||||
OP_2DROP, OP_2DROP
|
||||
|
@ -474,6 +490,8 @@ class OutputScript(Script):
|
|||
CLAIM_NAME_SCRIPT,
|
||||
SUPPORT_CLAIM_PUBKEY,
|
||||
SUPPORT_CLAIM_SCRIPT,
|
||||
SUPPORT_CLAIM_DATA_PUBKEY,
|
||||
SUPPORT_CLAIM_DATA_SCRIPT,
|
||||
UPDATE_CLAIM_PUBKEY,
|
||||
UPDATE_CLAIM_SCRIPT,
|
||||
SELL_CLAIM, SELL_SCRIPT,
|
||||
|
@ -527,6 +545,16 @@ class OutputScript(Script):
|
|||
'pubkey_hash': pubkey_hash
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def pay_support_data_pubkey_hash(
|
||||
cls, claim_name: bytes, claim_id: bytes, support, pubkey_hash: bytes):
|
||||
return cls(template=cls.SUPPORT_CLAIM_DATA_PUBKEY, values={
|
||||
'claim_name': claim_name,
|
||||
'claim_id': claim_id,
|
||||
'support': support,
|
||||
'pubkey_hash': pubkey_hash
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def sell_script(cls, price):
|
||||
return cls(template=cls.SELL_SCRIPT, values={
|
||||
|
@ -575,6 +603,10 @@ class OutputScript(Script):
|
|||
def is_support_claim(self):
|
||||
return self.template.name.startswith('support_claim+')
|
||||
|
||||
@property
|
||||
def is_support_claim_data(self):
|
||||
return self.template.name.startswith('support_claim+data+')
|
||||
|
||||
@property
|
||||
def is_sell_claim(self):
|
||||
return self.template.name.startswith('sell_claim+')
|
1
lbry/blockchain/sync/__init__.py
Normal file
1
lbry/blockchain/sync/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
from .synchronizer import BlockchainSync
|
211
lbry/blockchain/sync/blocks.py
Normal file
211
lbry/blockchain/sync/blocks.py
Normal file
|
@ -0,0 +1,211 @@
|
|||
import logging
|
||||
|
||||
from sqlalchemy import table, bindparam, text, func, union
|
||||
from sqlalchemy.future import select
|
||||
from sqlalchemy.schema import CreateTable
|
||||
|
||||
from lbry.db.tables import Block as BlockTable, TX, TXO, TXI, Claim, Tag, Support
|
||||
from lbry.db.tables import (
|
||||
pg_add_tx_constraints_and_indexes,
|
||||
pg_add_txo_constraints_and_indexes,
|
||||
pg_add_txi_constraints_and_indexes,
|
||||
)
|
||||
from lbry.db.query_context import ProgressContext, event_emitter, context
|
||||
from lbry.db.sync import set_input_addresses, update_spent_outputs
|
||||
from lbry.blockchain.block import Block, create_block_filter
|
||||
from lbry.blockchain.bcd_data_stream import BCDataStream
|
||||
|
||||
from .context import get_or_initialize_lbrycrd
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_best_block_height_for_file(file_number):
|
||||
return context().fetchone(
|
||||
select(func.coalesce(func.max(BlockTable.c.height), -1).label('height'))
|
||||
.where(BlockTable.c.file_number == file_number)
|
||||
)['height']
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.blocks.file", "blocks", "txs", throttle=100)
|
||||
def sync_block_file(
|
||||
file_number: int, start_height: int, txs: int, flush_size: int, p: ProgressContext
|
||||
):
|
||||
chain = get_or_initialize_lbrycrd(p.ctx)
|
||||
new_blocks = chain.db.sync_get_blocks_in_file(file_number, start_height)
|
||||
if not new_blocks:
|
||||
return -1
|
||||
file_name = chain.get_block_file_name(file_number)
|
||||
p.start(len(new_blocks), txs, progress_id=file_number, label=file_name)
|
||||
block_file_path = chain.get_block_file_path(file_number)
|
||||
done_blocks = done_txs = 0
|
||||
last_block_processed, loader = -1, p.ctx.get_bulk_loader()
|
||||
with open(block_file_path, "rb") as fp:
|
||||
stream = BCDataStream(fp=fp)
|
||||
for done_blocks, block_info in enumerate(new_blocks, start=1):
|
||||
block_height = block_info["height"]
|
||||
fp.seek(block_info["data_offset"])
|
||||
block = Block.from_data_stream(stream, block_height, file_number)
|
||||
loader.add_block(block)
|
||||
if len(loader.txs) >= flush_size:
|
||||
done_txs += loader.flush(TX)
|
||||
p.step(done_blocks, done_txs)
|
||||
last_block_processed = block_height
|
||||
if p.ctx.stop_event.is_set():
|
||||
return last_block_processed
|
||||
if loader.txs:
|
||||
done_txs += loader.flush(TX)
|
||||
p.step(done_blocks, done_txs)
|
||||
return last_block_processed
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.spends.main", "steps")
|
||||
def sync_spends(initial_sync: bool, p: ProgressContext):
|
||||
if initial_sync:
|
||||
p.start(
|
||||
7 +
|
||||
len(pg_add_tx_constraints_and_indexes) +
|
||||
len(pg_add_txi_constraints_and_indexes) +
|
||||
len(pg_add_txo_constraints_and_indexes)
|
||||
)
|
||||
# 1. tx table stuff
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM ANALYZE tx;"))
|
||||
p.step()
|
||||
for constraint in pg_add_tx_constraints_and_indexes:
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute(text(constraint))
|
||||
p.step()
|
||||
# A. Update TXIs to have the address of TXO they are spending.
|
||||
# 2. txi table reshuffling
|
||||
p.ctx.execute(text("ALTER TABLE txi RENAME TO old_txi;"))
|
||||
p.ctx.execute(CreateTable(TXI, include_foreign_key_constraints=[]))
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute(text("ALTER TABLE txi DROP CONSTRAINT txi_pkey;"))
|
||||
p.step()
|
||||
# 3. insert
|
||||
old_txi = table("old_txi", *(c.copy() for c in TXI.columns)) # pylint: disable=not-an-iterable
|
||||
columns = [c for c in old_txi.columns if c.name != "address"] + [TXO.c.address]
|
||||
join_txi_on_txo = old_txi.join(TXO, old_txi.c.txo_hash == TXO.c.txo_hash)
|
||||
select_txis = select(*columns).select_from(join_txi_on_txo)
|
||||
insert_txis = TXI.insert().from_select(columns, select_txis)
|
||||
p.ctx.execute(insert_txis)
|
||||
p.step()
|
||||
# 4. drop old txi and vacuum
|
||||
p.ctx.execute(text("DROP TABLE old_txi;"))
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM ANALYZE txi;"))
|
||||
p.step()
|
||||
for constraint in pg_add_txi_constraints_and_indexes:
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute(text(constraint))
|
||||
p.step()
|
||||
# B. Update TXOs to have the height at which they were spent (if they were).
|
||||
# 5. txo table reshuffling
|
||||
p.ctx.execute(text("ALTER TABLE txo RENAME TO old_txo;"))
|
||||
p.ctx.execute(CreateTable(TXO, include_foreign_key_constraints=[]))
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute(text("ALTER TABLE txo DROP CONSTRAINT txo_pkey;"))
|
||||
p.step()
|
||||
# 6. insert
|
||||
old_txo = table("old_txo", *(c.copy() for c in TXO.columns)) # pylint: disable=not-an-iterable
|
||||
columns = [c for c in old_txo.columns if c.name != "spent_height"]
|
||||
insert_columns = columns + [TXO.c.spent_height]
|
||||
select_columns = columns + [func.coalesce(TXI.c.height, 0).label("spent_height")]
|
||||
join_txo_on_txi = old_txo.join(TXI, old_txo.c.txo_hash == TXI.c.txo_hash, isouter=True)
|
||||
select_txos = select(*select_columns).select_from(join_txo_on_txi)
|
||||
insert_txos = TXO.insert().from_select(insert_columns, select_txos)
|
||||
p.ctx.execute(insert_txos)
|
||||
p.step()
|
||||
# 7. drop old txo
|
||||
p.ctx.execute(text("DROP TABLE old_txo;"))
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM ANALYZE txo;"))
|
||||
p.step()
|
||||
for constraint in pg_add_txo_constraints_and_indexes:
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute(text(constraint))
|
||||
p.step()
|
||||
else:
|
||||
p.start(5)
|
||||
# 1. Update spent TXOs setting spent_height
|
||||
update_spent_outputs(p.ctx)
|
||||
p.step()
|
||||
# 2. Update TXIs to have the address of TXO they are spending.
|
||||
set_input_addresses(p.ctx)
|
||||
p.step()
|
||||
# 3. Update tx visibility map, which speeds up index-only scans.
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM tx;"))
|
||||
p.step()
|
||||
# 4. Update txi visibility map, which speeds up index-only scans.
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM txi;"))
|
||||
p.step()
|
||||
# 4. Update txo visibility map, which speeds up index-only scans.
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM txo;"))
|
||||
p.step()
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.filter.generate", "blocks")
|
||||
def sync_block_filters(p: ProgressContext):
|
||||
blocks = []
|
||||
all_filters = []
|
||||
all_addresses = []
|
||||
for block in get_blocks_without_filters():
|
||||
addresses = {
|
||||
p.ctx.ledger.address_to_hash160(r["address"])
|
||||
for r in get_block_tx_addresses(block_hash=block["block_hash"])
|
||||
}
|
||||
all_addresses.extend(addresses)
|
||||
block_filter = create_block_filter(addresses)
|
||||
all_filters.append(block_filter)
|
||||
blocks.append({"pk": block["block_hash"], "block_filter": block_filter})
|
||||
p.ctx.execute(
|
||||
BlockTable.update().where(BlockTable.c.block_hash == bindparam("pk")), blocks
|
||||
)
|
||||
|
||||
|
||||
def get_blocks_without_filters():
|
||||
return context().fetchall(
|
||||
select(BlockTable.c.block_hash)
|
||||
.where(BlockTable.c.block_filter.is_(None))
|
||||
)
|
||||
|
||||
|
||||
def get_block_tx_addresses(block_hash=None, tx_hash=None):
|
||||
if block_hash is not None:
|
||||
constraint = (TX.c.block_hash == block_hash)
|
||||
elif tx_hash is not None:
|
||||
constraint = (TX.c.tx_hash == tx_hash)
|
||||
else:
|
||||
raise ValueError('block_hash or tx_hash must be provided.')
|
||||
return context().fetchall(
|
||||
union(
|
||||
select(TXO.c.address).select_from(TXO.join(TX))
|
||||
.where((TXO.c.address.isnot_(None)) & constraint),
|
||||
select(TXI.c.address).select_from(TXI.join(TX))
|
||||
.where((TXI.c.address.isnot_(None)) & constraint),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.rewind.main", "steps")
|
||||
def rewind(height: int, p: ProgressContext):
|
||||
deletes = [
|
||||
BlockTable.delete().where(BlockTable.c.height >= height),
|
||||
TXI.delete().where(TXI.c.height >= height),
|
||||
TXO.delete().where(TXO.c.height >= height),
|
||||
TX.delete().where(TX.c.height >= height),
|
||||
Tag.delete().where(
|
||||
Tag.c.claim_hash.in_(
|
||||
select(Claim.c.claim_hash).where(Claim.c.height >= height)
|
||||
)
|
||||
),
|
||||
Claim.delete().where(Claim.c.height >= height),
|
||||
Support.delete().where(Support.c.height >= height),
|
||||
]
|
||||
for delete in p.iter(deletes):
|
||||
p.ctx.execute(delete)
|
257
lbry/blockchain/sync/claims.py
Normal file
257
lbry/blockchain/sync/claims.py
Normal file
|
@ -0,0 +1,257 @@
|
|||
import logging
|
||||
from typing import Tuple
|
||||
|
||||
from sqlalchemy import case, func, desc, text
|
||||
from sqlalchemy.future import select
|
||||
|
||||
from lbry.db.queries.txio import (
|
||||
minimum_txo_columns, row_to_txo,
|
||||
where_unspent_txos, where_claims_with_changed_supports,
|
||||
count_unspent_txos, where_channels_with_changed_content,
|
||||
where_abandoned_claims, count_channels_with_changed_content
|
||||
)
|
||||
from lbry.db.query_context import ProgressContext, event_emitter
|
||||
from lbry.db.tables import TX, TXO, Claim, Support, pg_add_claim_and_tag_constraints_and_indexes
|
||||
from lbry.db.utils import least
|
||||
from lbry.db.constants import TXO_TYPES, CLAIM_TYPE_CODES
|
||||
from lbry.blockchain.transaction import Output
|
||||
|
||||
from .context import get_or_initialize_lbrycrd
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def channel_content_count_calc(signable):
|
||||
return (
|
||||
select(func.count(signable.c.claim_hash))
|
||||
.where((signable.c.channel_hash == Claim.c.claim_hash) & signable.c.is_signature_valid)
|
||||
.scalar_subquery()
|
||||
)
|
||||
|
||||
|
||||
support = TXO.alias('support')
|
||||
|
||||
|
||||
def staked_support_aggregation(aggregate):
|
||||
return (
|
||||
select(aggregate).where(
|
||||
(support.c.txo_type == TXO_TYPES['support']) &
|
||||
(support.c.spent_height == 0)
|
||||
).scalar_subquery()
|
||||
)
|
||||
|
||||
|
||||
def staked_support_amount_calc(other):
|
||||
return (
|
||||
staked_support_aggregation(func.coalesce(func.sum(support.c.amount), 0))
|
||||
.where(support.c.claim_hash == other.c.claim_hash)
|
||||
)
|
||||
|
||||
|
||||
def staked_support_count_calc(other):
|
||||
return (
|
||||
staked_support_aggregation(func.coalesce(func.count('*'), 0))
|
||||
.where(support.c.claim_hash == other.c.claim_hash)
|
||||
)
|
||||
|
||||
|
||||
def make_label(action, blocks):
|
||||
if blocks[0] == blocks[-1]:
|
||||
return f"{action} {blocks[0]:>6}"
|
||||
else:
|
||||
return f"{action} {blocks[0]:>6}-{blocks[-1]:>6}"
|
||||
|
||||
|
||||
def select_claims_for_saving(
|
||||
blocks: Tuple[int, int],
|
||||
missing_in_claims_table=False,
|
||||
missing_or_stale_in_claims_table=False,
|
||||
):
|
||||
channel_txo = TXO.alias('channel_txo')
|
||||
return select(
|
||||
*minimum_txo_columns, TXO.c.claim_hash,
|
||||
staked_support_amount_calc(TXO).label('staked_support_amount'),
|
||||
staked_support_count_calc(TXO).label('staked_support_count'),
|
||||
TXO.c.signature, TXO.c.signature_digest,
|
||||
case([(
|
||||
TXO.c.channel_hash.isnot(None),
|
||||
select(channel_txo.c.public_key).select_from(channel_txo).where(
|
||||
(channel_txo.c.txo_type == TXO_TYPES['channel']) &
|
||||
(channel_txo.c.claim_hash == TXO.c.channel_hash) &
|
||||
(channel_txo.c.height <= TXO.c.height)
|
||||
).order_by(desc(channel_txo.c.height)).limit(1).scalar_subquery()
|
||||
)]).label('channel_public_key')
|
||||
).where(
|
||||
where_unspent_txos(
|
||||
CLAIM_TYPE_CODES, blocks,
|
||||
missing_in_claims_table=missing_in_claims_table,
|
||||
missing_or_stale_in_claims_table=missing_or_stale_in_claims_table,
|
||||
)
|
||||
).select_from(TXO.join(TX))
|
||||
|
||||
|
||||
def row_to_claim_for_saving(row) -> Tuple[Output, dict]:
|
||||
return row_to_txo(row), {
|
||||
'staked_support_amount': int(row.staked_support_amount),
|
||||
'staked_support_count': int(row.staked_support_count),
|
||||
'signature': row.signature,
|
||||
'signature_digest': row.signature_digest,
|
||||
'channel_public_key': row.channel_public_key
|
||||
}
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.claims.insert", "claims")
|
||||
def claims_insert(
|
||||
blocks: Tuple[int, int],
|
||||
missing_in_claims_table: bool,
|
||||
flush_size: int,
|
||||
p: ProgressContext
|
||||
):
|
||||
chain = get_or_initialize_lbrycrd(p.ctx)
|
||||
|
||||
p.start(
|
||||
count_unspent_txos(
|
||||
CLAIM_TYPE_CODES, blocks,
|
||||
missing_in_claims_table=missing_in_claims_table,
|
||||
), progress_id=blocks[0], label=make_label("add claims", blocks)
|
||||
)
|
||||
|
||||
with p.ctx.connect_streaming() as c:
|
||||
loader = p.ctx.get_bulk_loader()
|
||||
cursor = c.execute(select_claims_for_saving(
|
||||
blocks, missing_in_claims_table=missing_in_claims_table
|
||||
).order_by(TXO.c.claim_hash))
|
||||
for rows in cursor.partitions(900):
|
||||
claim_metadata = chain.db.sync_get_claim_metadata(
|
||||
claim_hashes=[row['claim_hash'] for row in rows]
|
||||
)
|
||||
i = 0
|
||||
for row in rows:
|
||||
metadata = claim_metadata[i] if i < len(claim_metadata) else {}
|
||||
if metadata and metadata['claim_hash'] == row.claim_hash:
|
||||
i += 1
|
||||
txo, extra = row_to_claim_for_saving(row)
|
||||
extra.update({
|
||||
'short_url': metadata.get('short_url'),
|
||||
'creation_height': metadata.get('creation_height'),
|
||||
'activation_height': metadata.get('activation_height'),
|
||||
'expiration_height': metadata.get('expiration_height'),
|
||||
'takeover_height': metadata.get('takeover_height'),
|
||||
})
|
||||
loader.add_claim(txo, **extra)
|
||||
if len(loader.claims) >= flush_size:
|
||||
p.add(loader.flush(Claim))
|
||||
p.add(loader.flush(Claim))
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.claims.indexes", "steps")
|
||||
def claims_constraints_and_indexes(p: ProgressContext):
|
||||
p.start(2 + len(pg_add_claim_and_tag_constraints_and_indexes))
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM ANALYZE claim;"))
|
||||
p.step()
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM ANALYZE tag;"))
|
||||
p.step()
|
||||
for constraint in pg_add_claim_and_tag_constraints_and_indexes:
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute(text(constraint))
|
||||
p.step()
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.claims.vacuum", "steps")
|
||||
def claims_vacuum(p: ProgressContext):
|
||||
p.start(2)
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM claim;"))
|
||||
p.step()
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM tag;"))
|
||||
p.step()
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.claims.update", "claims")
|
||||
def claims_update(blocks: Tuple[int, int], p: ProgressContext):
|
||||
p.start(
|
||||
count_unspent_txos(CLAIM_TYPE_CODES, blocks, missing_or_stale_in_claims_table=True),
|
||||
progress_id=blocks[0], label=make_label("mod claims", blocks)
|
||||
)
|
||||
with p.ctx.connect_streaming() as c:
|
||||
loader = p.ctx.get_bulk_loader()
|
||||
cursor = c.execute(select_claims_for_saving(
|
||||
blocks, missing_or_stale_in_claims_table=True
|
||||
))
|
||||
for row in cursor:
|
||||
txo, extra = row_to_claim_for_saving(row)
|
||||
loader.update_claim(txo, **extra)
|
||||
if len(loader.update_claims) >= 25:
|
||||
p.add(loader.flush(Claim))
|
||||
p.add(loader.flush(Claim))
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.claims.delete", "claims")
|
||||
def claims_delete(claims, p: ProgressContext):
|
||||
p.start(claims, label="del claims")
|
||||
deleted = p.ctx.execute(Claim.delete().where(where_abandoned_claims()))
|
||||
p.step(deleted.rowcount)
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.claims.takeovers", "claims")
|
||||
def update_takeovers(blocks: Tuple[int, int], takeovers, p: ProgressContext):
|
||||
p.start(takeovers, label=make_label("mod winner", blocks))
|
||||
chain = get_or_initialize_lbrycrd(p.ctx)
|
||||
with p.ctx.engine.begin() as c:
|
||||
for takeover in chain.db.sync_get_takeovers(start_height=blocks[0], end_height=blocks[-1]):
|
||||
update_claims = (
|
||||
Claim.update()
|
||||
.where(Claim.c.normalized == takeover['normalized'])
|
||||
.values(
|
||||
is_controlling=case(
|
||||
[(Claim.c.claim_hash == takeover['claim_hash'], True)],
|
||||
else_=False
|
||||
),
|
||||
takeover_height=case(
|
||||
[(Claim.c.claim_hash == takeover['claim_hash'], takeover['height'])],
|
||||
else_=None
|
||||
),
|
||||
activation_height=least(Claim.c.activation_height, takeover['height']),
|
||||
)
|
||||
)
|
||||
result = c.execute(update_claims)
|
||||
p.add(result.rowcount)
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.claims.stakes", "claims")
|
||||
def update_stakes(blocks: Tuple[int, int], claims: int, p: ProgressContext):
|
||||
p.start(claims)
|
||||
sql = (
|
||||
Claim.update()
|
||||
.where(where_claims_with_changed_supports(blocks))
|
||||
.values(
|
||||
staked_support_amount=staked_support_amount_calc(Claim),
|
||||
staked_support_count=staked_support_count_calc(Claim),
|
||||
)
|
||||
)
|
||||
result = p.ctx.execute(sql)
|
||||
p.step(result.rowcount)
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.claims.channels", "channels")
|
||||
def update_channel_stats(blocks: Tuple[int, int], initial_sync: int, p: ProgressContext):
|
||||
update_sql = Claim.update().values(
|
||||
signed_claim_count=channel_content_count_calc(Claim.alias('content')),
|
||||
signed_support_count=channel_content_count_calc(Support),
|
||||
)
|
||||
if initial_sync:
|
||||
p.start(p.ctx.fetchtotal(Claim.c.claim_type == TXO_TYPES['channel']), label="channel stats")
|
||||
update_sql = update_sql.where(Claim.c.claim_type == TXO_TYPES['channel'])
|
||||
elif blocks:
|
||||
p.start(count_channels_with_changed_content(blocks), label="channel stats")
|
||||
update_sql = update_sql.where(where_channels_with_changed_content(blocks))
|
||||
else:
|
||||
return
|
||||
result = p.ctx.execute(update_sql)
|
||||
if result.rowcount and p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM claim;"))
|
||||
p.step(result.rowcount)
|
17
lbry/blockchain/sync/context.py
Normal file
17
lbry/blockchain/sync/context.py
Normal file
|
@ -0,0 +1,17 @@
|
|||
from contextvars import ContextVar
|
||||
from lbry.db import query_context
|
||||
|
||||
from lbry.blockchain.lbrycrd import Lbrycrd
|
||||
|
||||
|
||||
_chain: ContextVar[Lbrycrd] = ContextVar('chain')
|
||||
|
||||
|
||||
def get_or_initialize_lbrycrd(ctx=None) -> Lbrycrd:
|
||||
chain = _chain.get(None)
|
||||
if chain is not None:
|
||||
return chain
|
||||
chain = Lbrycrd((ctx or query_context.context()).ledger)
|
||||
chain.db.sync_open()
|
||||
_chain.set(chain)
|
||||
return chain
|
95
lbry/blockchain/sync/supports.py
Normal file
95
lbry/blockchain/sync/supports.py
Normal file
|
@ -0,0 +1,95 @@
|
|||
import logging
|
||||
from typing import Tuple
|
||||
|
||||
from sqlalchemy import case, desc, text
|
||||
from sqlalchemy.future import select
|
||||
|
||||
from lbry.db.tables import TX, TXO, Support, pg_add_support_constraints_and_indexes
|
||||
from lbry.db.query_context import ProgressContext, event_emitter
|
||||
from lbry.db.queries import row_to_txo
|
||||
from lbry.db.constants import TXO_TYPES
|
||||
from lbry.db.queries.txio import (
|
||||
minimum_txo_columns,
|
||||
where_unspent_txos, where_abandoned_supports,
|
||||
count_unspent_txos,
|
||||
)
|
||||
|
||||
from .claims import make_label
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.supports.insert", "supports")
|
||||
def supports_insert(
|
||||
blocks: Tuple[int, int],
|
||||
missing_in_supports_table: bool,
|
||||
flush_size: int,
|
||||
p: ProgressContext
|
||||
):
|
||||
p.start(
|
||||
count_unspent_txos(
|
||||
TXO_TYPES['support'], blocks,
|
||||
missing_in_supports_table=missing_in_supports_table,
|
||||
), progress_id=blocks[0], label=make_label("add supprt", blocks)
|
||||
)
|
||||
channel_txo = TXO.alias('channel_txo')
|
||||
select_supports = select(
|
||||
*minimum_txo_columns, TXO.c.claim_hash,
|
||||
TXO.c.signature, TXO.c.signature_digest,
|
||||
case([(
|
||||
TXO.c.channel_hash.isnot(None),
|
||||
select(channel_txo.c.public_key).select_from(channel_txo).where(
|
||||
(channel_txo.c.txo_type == TXO_TYPES['channel']) &
|
||||
(channel_txo.c.claim_hash == TXO.c.channel_hash) &
|
||||
(channel_txo.c.height <= TXO.c.height)
|
||||
).order_by(desc(channel_txo.c.height)).limit(1).scalar_subquery()
|
||||
)]).label('channel_public_key'),
|
||||
).select_from(
|
||||
TXO.join(TX)
|
||||
).where(
|
||||
where_unspent_txos(
|
||||
TXO_TYPES['support'], blocks,
|
||||
missing_in_supports_table=missing_in_supports_table,
|
||||
)
|
||||
)
|
||||
with p.ctx.connect_streaming() as c:
|
||||
loader = p.ctx.get_bulk_loader()
|
||||
for row in c.execute(select_supports):
|
||||
txo = row_to_txo(row)
|
||||
loader.add_support(
|
||||
txo,
|
||||
signature=row.signature,
|
||||
signature_digest=row.signature_digest,
|
||||
channel_public_key=row.channel_public_key
|
||||
)
|
||||
if len(loader.supports) >= flush_size:
|
||||
p.add(loader.flush(Support))
|
||||
p.add(loader.flush(Support))
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.supports.delete", "supports")
|
||||
def supports_delete(supports, p: ProgressContext):
|
||||
p.start(supports, label="del supprt")
|
||||
deleted = p.ctx.execute(Support.delete().where(where_abandoned_supports()))
|
||||
p.step(deleted.rowcount)
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.supports.indexes", "steps")
|
||||
def supports_constraints_and_indexes(p: ProgressContext):
|
||||
p.start(1 + len(pg_add_support_constraints_and_indexes))
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM ANALYZE support;"))
|
||||
p.step()
|
||||
for constraint in pg_add_support_constraints_and_indexes:
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute(text(constraint))
|
||||
p.step()
|
||||
|
||||
|
||||
@event_emitter("blockchain.sync.supports.vacuum", "steps")
|
||||
def supports_vacuum(p: ProgressContext):
|
||||
p.start(1)
|
||||
if p.ctx.is_postgres:
|
||||
p.ctx.execute_notx(text("VACUUM support;"))
|
||||
p.step()
|
309
lbry/blockchain/sync/synchronizer.py
Normal file
309
lbry/blockchain/sync/synchronizer.py
Normal file
|
@ -0,0 +1,309 @@
|
|||
import os
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Optional, Tuple, Set, List, Coroutine
|
||||
|
||||
from lbry.db import Database
|
||||
from lbry.db import queries as q
|
||||
from lbry.db.constants import TXO_TYPES, CLAIM_TYPE_CODES
|
||||
from lbry.db.query_context import Event, Progress
|
||||
from lbry.event import BroadcastSubscription
|
||||
from lbry.service.base import Sync, BlockEvent
|
||||
from lbry.blockchain.lbrycrd import Lbrycrd
|
||||
|
||||
from . import blocks as block_phase, claims as claim_phase, supports as support_phase
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
BLOCKS_INIT_EVENT = Event.add("blockchain.sync.blocks.init", "steps")
|
||||
BLOCKS_MAIN_EVENT = Event.add("blockchain.sync.blocks.main", "blocks", "txs")
|
||||
FILTER_INIT_EVENT = Event.add("blockchain.sync.filter.init", "steps")
|
||||
FILTER_MAIN_EVENT = Event.add("blockchain.sync.filter.main", "blocks")
|
||||
CLAIMS_INIT_EVENT = Event.add("blockchain.sync.claims.init", "steps")
|
||||
CLAIMS_MAIN_EVENT = Event.add("blockchain.sync.claims.main", "claims")
|
||||
TRENDS_INIT_EVENT = Event.add("blockchain.sync.trends.init", "steps")
|
||||
TRENDS_MAIN_EVENT = Event.add("blockchain.sync.trends.main", "blocks")
|
||||
SUPPORTS_INIT_EVENT = Event.add("blockchain.sync.supports.init", "steps")
|
||||
SUPPORTS_MAIN_EVENT = Event.add("blockchain.sync.supports.main", "supports")
|
||||
|
||||
|
||||
class BlockchainSync(Sync):
|
||||
|
||||
TX_FLUSH_SIZE = 25_000 # flush to db after processing this many TXs and update progress
|
||||
CLAIM_FLUSH_SIZE = 25_000 # flush to db after processing this many claims and update progress
|
||||
SUPPORT_FLUSH_SIZE = 25_000 # flush to db after processing this many supports and update progress
|
||||
FILTER_FLUSH_SIZE = 10_000 # flush to db after processing this many filters and update progress
|
||||
|
||||
def __init__(self, chain: Lbrycrd, db: Database):
|
||||
super().__init__(chain.ledger, db)
|
||||
self.chain = chain
|
||||
self.pid = os.getpid()
|
||||
self.on_block_subscription: Optional[BroadcastSubscription] = None
|
||||
self.advance_loop_task: Optional[asyncio.Task] = None
|
||||
self.advance_loop_event = asyncio.Event()
|
||||
|
||||
async def start(self):
|
||||
self.db.stop_event.clear()
|
||||
await self.chain.ensure_subscribable()
|
||||
self.advance_loop_task = asyncio.create_task(self.advance())
|
||||
await self.advance_loop_task
|
||||
await self.chain.subscribe()
|
||||
self.advance_loop_task = asyncio.create_task(self.advance_loop())
|
||||
self.on_block_subscription = self.chain.on_block.listen(
|
||||
lambda e: self.advance_loop_event.set()
|
||||
)
|
||||
|
||||
async def stop(self):
|
||||
self.chain.unsubscribe()
|
||||
if self.on_block_subscription is not None:
|
||||
self.on_block_subscription.cancel()
|
||||
self.db.stop_event.set()
|
||||
if self.advance_loop_task is not None:
|
||||
self.advance_loop_task.cancel()
|
||||
|
||||
async def run_tasks(self, tasks: List[Coroutine]) -> Optional[Set[asyncio.Future]]:
|
||||
done, pending = await asyncio.wait(
|
||||
tasks, return_when=asyncio.FIRST_EXCEPTION
|
||||
)
|
||||
if pending:
|
||||
self.db.stop_event.set()
|
||||
for future in pending:
|
||||
future.cancel()
|
||||
for future in done:
|
||||
future.result()
|
||||
return
|
||||
return done
|
||||
|
||||
async def get_best_block_height_for_file(self, file_number) -> int:
|
||||
return await self.db.run(
|
||||
block_phase.get_best_block_height_for_file, file_number
|
||||
)
|
||||
|
||||
async def sync_blocks(self) -> Optional[Tuple[int, int]]:
|
||||
tasks = []
|
||||
starting_height = None
|
||||
tx_count = block_count = 0
|
||||
with Progress(self.db.message_queue, BLOCKS_INIT_EVENT) as p:
|
||||
ending_height = await self.chain.db.get_best_height()
|
||||
for chain_file in p.iter(await self.chain.db.get_block_files()):
|
||||
# block files may be read and saved out of order, need to check
|
||||
# each file individually to see if we have missing blocks
|
||||
our_best_file_height = await self.get_best_block_height_for_file(
|
||||
chain_file['file_number']
|
||||
)
|
||||
if our_best_file_height == chain_file['best_height']:
|
||||
# we have all blocks in this file, skipping
|
||||
continue
|
||||
if -1 < our_best_file_height < chain_file['best_height']:
|
||||
# we have some blocks, need to figure out what we're missing
|
||||
# call get_block_files again limited to this file and current_height
|
||||
chain_file = (await self.chain.db.get_block_files(
|
||||
file_number=chain_file['file_number'], start_height=our_best_file_height+1,
|
||||
))[0]
|
||||
tx_count += chain_file['txs']
|
||||
block_count += chain_file['blocks']
|
||||
file_start_height = chain_file['start_height']
|
||||
starting_height = min(
|
||||
file_start_height if starting_height is None else starting_height,
|
||||
file_start_height
|
||||
)
|
||||
tasks.append(self.db.run(
|
||||
block_phase.sync_block_file, chain_file['file_number'], file_start_height,
|
||||
chain_file['txs'], self.TX_FLUSH_SIZE
|
||||
))
|
||||
with Progress(self.db.message_queue, BLOCKS_MAIN_EVENT) as p:
|
||||
p.start(block_count, tx_count, extra={
|
||||
"starting_height": starting_height,
|
||||
"ending_height": ending_height,
|
||||
"files": len(tasks),
|
||||
"claims": await self.chain.db.get_claim_metadata_count(starting_height, ending_height),
|
||||
"supports": await self.chain.db.get_support_metadata_count(starting_height, ending_height),
|
||||
})
|
||||
completed = await self.run_tasks(tasks)
|
||||
if completed:
|
||||
best_height_processed = max(f.result() for f in completed)
|
||||
return starting_height, best_height_processed
|
||||
|
||||
async def sync_filters(self):
|
||||
if not self.conf.spv_address_filters:
|
||||
return
|
||||
with Progress(self.db.message_queue, FILTER_MAIN_EVENT) as p:
|
||||
blocks = 0
|
||||
tasks = []
|
||||
# for chunk in range(select min(height), max(height) from block where filter is null):
|
||||
# tasks.append(self.db.run(block_phase.sync_filters, chunk, self.FILTER_FLUSH_SIZE))
|
||||
p.start(blocks)
|
||||
await self.run_tasks(tasks)
|
||||
|
||||
async def sync_spends(self, blocks_added):
|
||||
if blocks_added:
|
||||
await self.db.run(block_phase.sync_spends, blocks_added[0] == 0)
|
||||
|
||||
async def count_unspent_txos(
|
||||
self,
|
||||
txo_types: Tuple[int, ...],
|
||||
blocks: Tuple[int, int] = None,
|
||||
missing_in_supports_table: bool = False,
|
||||
missing_in_claims_table: bool = False,
|
||||
missing_or_stale_in_claims_table: bool = False,
|
||||
) -> int:
|
||||
return await self.db.run(
|
||||
q.count_unspent_txos, txo_types, blocks,
|
||||
missing_in_supports_table,
|
||||
missing_in_claims_table,
|
||||
missing_or_stale_in_claims_table,
|
||||
)
|
||||
|
||||
async def distribute_unspent_txos(
|
||||
self,
|
||||
txo_types: Tuple[int, ...],
|
||||
blocks: Tuple[int, int] = None,
|
||||
missing_in_supports_table: bool = False,
|
||||
missing_in_claims_table: bool = False,
|
||||
missing_or_stale_in_claims_table: bool = False,
|
||||
) -> int:
|
||||
return await self.db.run(
|
||||
q.distribute_unspent_txos, txo_types, blocks,
|
||||
missing_in_supports_table,
|
||||
missing_in_claims_table,
|
||||
missing_or_stale_in_claims_table,
|
||||
self.db.workers
|
||||
)
|
||||
|
||||
async def count_abandoned_supports(self) -> int:
|
||||
return await self.db.run(q.count_abandoned_supports)
|
||||
|
||||
async def count_abandoned_claims(self) -> int:
|
||||
return await self.db.run(q.count_abandoned_claims)
|
||||
|
||||
async def count_claims_with_changed_supports(self, blocks) -> int:
|
||||
return await self.db.run(q.count_claims_with_changed_supports, blocks)
|
||||
|
||||
async def count_channels_with_changed_content(self, blocks) -> int:
|
||||
return await self.db.run(q.count_channels_with_changed_content, blocks)
|
||||
|
||||
async def count_takeovers(self, blocks) -> int:
|
||||
return await self.chain.db.get_takeover_count(
|
||||
start_height=blocks[0], end_height=blocks[-1]
|
||||
)
|
||||
|
||||
async def sync_claims(self, blocks) -> bool:
|
||||
delete_claims = takeovers = claims_with_changed_supports = 0
|
||||
initial_sync = not await self.db.has_claims()
|
||||
with Progress(self.db.message_queue, CLAIMS_INIT_EVENT) as p:
|
||||
if initial_sync:
|
||||
total, batches = await self.distribute_unspent_txos(CLAIM_TYPE_CODES)
|
||||
elif blocks:
|
||||
p.start(4)
|
||||
# 1. content claims to be inserted or updated
|
||||
total = await self.count_unspent_txos(
|
||||
CLAIM_TYPE_CODES, blocks, missing_or_stale_in_claims_table=True
|
||||
)
|
||||
batches = [blocks] if total else []
|
||||
p.step()
|
||||
# 2. claims to be deleted
|
||||
delete_claims = await self.count_abandoned_claims()
|
||||
total += delete_claims
|
||||
p.step()
|
||||
# 3. claims to be updated with new support totals
|
||||
claims_with_changed_supports = await self.count_claims_with_changed_supports(blocks)
|
||||
total += claims_with_changed_supports
|
||||
p.step()
|
||||
# 5. claims to be updated due to name takeovers
|
||||
takeovers = await self.count_takeovers(blocks)
|
||||
total += takeovers
|
||||
p.step()
|
||||
else:
|
||||
return initial_sync
|
||||
with Progress(self.db.message_queue, CLAIMS_MAIN_EVENT) as p:
|
||||
p.start(total)
|
||||
if batches:
|
||||
await self.run_tasks([
|
||||
self.db.run(claim_phase.claims_insert, batch, not initial_sync, self.CLAIM_FLUSH_SIZE)
|
||||
for batch in batches
|
||||
])
|
||||
if not initial_sync:
|
||||
await self.run_tasks([
|
||||
self.db.run(claim_phase.claims_update, batch) for batch in batches
|
||||
])
|
||||
if delete_claims:
|
||||
await self.db.run(claim_phase.claims_delete, delete_claims)
|
||||
if takeovers:
|
||||
await self.db.run(claim_phase.update_takeovers, blocks, takeovers)
|
||||
if claims_with_changed_supports:
|
||||
await self.db.run(claim_phase.update_stakes, blocks, claims_with_changed_supports)
|
||||
if initial_sync:
|
||||
await self.db.run(claim_phase.claims_constraints_and_indexes)
|
||||
else:
|
||||
await self.db.run(claim_phase.claims_vacuum)
|
||||
return initial_sync
|
||||
|
||||
async def sync_supports(self, blocks):
|
||||
delete_supports = 0
|
||||
initial_sync = not await self.db.has_supports()
|
||||
with Progress(self.db.message_queue, SUPPORTS_INIT_EVENT) as p:
|
||||
if initial_sync:
|
||||
total, support_batches = await self.distribute_unspent_txos(TXO_TYPES['support'])
|
||||
elif blocks:
|
||||
p.start(2)
|
||||
# 1. supports to be inserted
|
||||
total = await self.count_unspent_txos(
|
||||
TXO_TYPES['support'], blocks, missing_in_supports_table=True
|
||||
)
|
||||
support_batches = [blocks] if total else []
|
||||
p.step()
|
||||
# 2. supports to be deleted
|
||||
delete_supports = await self.count_abandoned_supports()
|
||||
total += delete_supports
|
||||
p.step()
|
||||
else:
|
||||
return
|
||||
with Progress(self.db.message_queue, SUPPORTS_MAIN_EVENT) as p:
|
||||
p.start(total)
|
||||
if support_batches:
|
||||
await self.run_tasks([
|
||||
self.db.run(
|
||||
support_phase.supports_insert, batch, not initial_sync, self.SUPPORT_FLUSH_SIZE
|
||||
) for batch in support_batches
|
||||
])
|
||||
if delete_supports:
|
||||
await self.db.run(support_phase.supports_delete, delete_supports)
|
||||
if initial_sync:
|
||||
await self.db.run(support_phase.supports_constraints_and_indexes)
|
||||
else:
|
||||
await self.db.run(support_phase.supports_vacuum)
|
||||
|
||||
async def sync_channel_stats(self, blocks, initial_sync):
|
||||
await self.db.run(claim_phase.update_channel_stats, blocks, initial_sync)
|
||||
|
||||
async def sync_trends(self):
|
||||
pass
|
||||
|
||||
async def advance(self):
|
||||
blocks_added = await self.sync_blocks()
|
||||
sync_filters_task = asyncio.create_task(self.sync_filters())
|
||||
sync_trends_task = asyncio.create_task(self.sync_trends())
|
||||
await self.sync_spends(blocks_added)
|
||||
initial_claim_sync = await self.sync_claims(blocks_added)
|
||||
await self.sync_supports(blocks_added)
|
||||
await self.sync_channel_stats(blocks_added, initial_claim_sync)
|
||||
await sync_trends_task
|
||||
await sync_filters_task
|
||||
if blocks_added:
|
||||
await self._on_block_controller.add(BlockEvent(blocks_added[-1]))
|
||||
|
||||
async def advance_loop(self):
|
||||
while True:
|
||||
await self.advance_loop_event.wait()
|
||||
self.advance_loop_event.clear()
|
||||
try:
|
||||
await self.advance()
|
||||
except asyncio.CancelledError:
|
||||
return
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
await self.stop()
|
||||
|
||||
async def rewind(self, height):
|
||||
await self.db.run(block_phase.rewind, height)
|
|
@ -1,9 +1,10 @@
|
|||
import struct
|
||||
import hashlib
|
||||
import logging
|
||||
import typing
|
||||
import asyncio
|
||||
from datetime import date
|
||||
from binascii import hexlify, unhexlify
|
||||
from typing import List, Iterable, Optional, Tuple
|
||||
from typing import List, Iterable, Optional, Union
|
||||
|
||||
import ecdsa
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
|
@ -13,24 +14,19 @@ from cryptography.hazmat.primitives.asymmetric import ec
|
|||
from cryptography.hazmat.primitives.asymmetric.utils import Prehashed
|
||||
from cryptography.exceptions import InvalidSignature
|
||||
|
||||
from lbry.error import InsufficientFundsError
|
||||
from lbry.crypto.hash import hash160, sha256
|
||||
from lbry.crypto.base58 import Base58
|
||||
from lbry.schema.url import normalize_name
|
||||
from lbry.schema.claim import Claim
|
||||
from lbry.schema.base import Signable
|
||||
from lbry.schema.purchase import Purchase
|
||||
from lbry.schema.support import Support
|
||||
|
||||
from .script import InputScript, OutputScript
|
||||
from .constants import COIN, NULL_HASH32
|
||||
from .bcd_data_stream import BCDataStream
|
||||
from .hash import TXRef, TXRefImmutable
|
||||
from .util import ReadOnlyList
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.wallet.account import Account
|
||||
from lbry.wallet.ledger import Ledger
|
||||
from lbry.wallet.wallet import Wallet
|
||||
|
||||
log = logging.getLogger()
|
||||
|
||||
|
||||
|
@ -58,6 +54,10 @@ class TXRefMutable(TXRef):
|
|||
def height(self):
|
||||
return self.tx.height
|
||||
|
||||
@property
|
||||
def timestamp(self):
|
||||
return self.tx.timestamp
|
||||
|
||||
def reset(self):
|
||||
self._id = None
|
||||
self._hash = None
|
||||
|
@ -107,7 +107,7 @@ class InputOutput:
|
|||
|
||||
__slots__ = 'tx_ref', 'position'
|
||||
|
||||
def __init__(self, tx_ref: TXRef = None, position: int = None) -> None:
|
||||
def __init__(self, tx_ref: Union[TXRef, TXRefImmutable] = None, position: int = None) -> None:
|
||||
self.tx_ref = tx_ref
|
||||
self.position = position
|
||||
|
||||
|
@ -129,6 +129,7 @@ class Input(InputOutput):
|
|||
|
||||
NULL_SIGNATURE = b'\x00'*72
|
||||
NULL_PUBLIC_KEY = b'\x00'*33
|
||||
NULL_HASH32 = b'\x00'*32
|
||||
|
||||
__slots__ = 'txo_ref', 'sequence', 'coinbase', 'script'
|
||||
|
||||
|
@ -151,6 +152,12 @@ class Input(InputOutput):
|
|||
script = InputScript.redeem_pubkey_hash(cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY)
|
||||
return cls(txo.ref, script)
|
||||
|
||||
@classmethod
|
||||
def create_coinbase(cls) -> 'Input':
|
||||
tx_ref = TXRefImmutable.from_hash(cls.NULL_HASH32, 0, 0)
|
||||
txo_ref = TXORef(tx_ref, 0)
|
||||
return cls(txo_ref, b'beef')
|
||||
|
||||
@property
|
||||
def amount(self) -> int:
|
||||
""" Amount this input adds to the transaction. """
|
||||
|
@ -167,7 +174,7 @@ class Input(InputOutput):
|
|||
|
||||
@classmethod
|
||||
def deserialize_from(cls, stream):
|
||||
tx_ref = TXRefImmutable.from_hash(stream.read(32), -1)
|
||||
tx_ref = TXRefImmutable.from_hash(stream.read(32), -1, -1)
|
||||
position = stream.read_uint32()
|
||||
script = stream.read_string()
|
||||
sequence = stream.read_uint32()
|
||||
|
@ -190,32 +197,18 @@ class Input(InputOutput):
|
|||
stream.write_uint32(self.sequence)
|
||||
|
||||
|
||||
class OutputEffectiveAmountEstimator:
|
||||
|
||||
__slots__ = 'txo', 'txi', 'fee', 'effective_amount'
|
||||
|
||||
def __init__(self, ledger: 'Ledger', txo: 'Output') -> None:
|
||||
self.txo = txo
|
||||
self.txi = Input.spend(txo)
|
||||
self.fee: int = self.txi.get_fee(ledger)
|
||||
self.effective_amount: int = txo.amount - self.fee
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.effective_amount < other.effective_amount
|
||||
|
||||
|
||||
class Output(InputOutput):
|
||||
|
||||
__slots__ = (
|
||||
'amount', 'script', 'is_internal_transfer', 'is_spent', 'is_my_output', 'is_my_input',
|
||||
'amount', 'script', 'is_internal_transfer', 'spent_height', 'is_my_output', 'is_my_input',
|
||||
'channel', 'private_key', 'meta', 'sent_supports', 'sent_tips', 'received_tips',
|
||||
'purchase', 'purchased_claim', 'purchase_receipt',
|
||||
'reposted_claim', 'claims',
|
||||
'reposted_claim', 'claims', '_signable'
|
||||
)
|
||||
|
||||
def __init__(self, amount: int, script: OutputScript,
|
||||
tx_ref: TXRef = None, position: int = None,
|
||||
is_internal_transfer: Optional[bool] = None, is_spent: Optional[bool] = None,
|
||||
is_internal_transfer: Optional[bool] = None, spent_height: Optional[bool] = None,
|
||||
is_my_output: Optional[bool] = None, is_my_input: Optional[bool] = None,
|
||||
sent_supports: Optional[int] = None, sent_tips: Optional[int] = None,
|
||||
received_tips: Optional[int] = None,
|
||||
|
@ -225,7 +218,7 @@ class Output(InputOutput):
|
|||
self.amount = amount
|
||||
self.script = script
|
||||
self.is_internal_transfer = is_internal_transfer
|
||||
self.is_spent = is_spent
|
||||
self.spent_height = spent_height
|
||||
self.is_my_output = is_my_output
|
||||
self.is_my_input = is_my_input
|
||||
self.sent_supports = sent_supports
|
||||
|
@ -238,12 +231,13 @@ class Output(InputOutput):
|
|||
self.purchase_receipt: 'Output' = None # txo representing purchase receipt for this claim
|
||||
self.reposted_claim: 'Output' = None # txo representing claim being reposted
|
||||
self.claims: List['Output'] = None # resolved claims for collection
|
||||
self._signable: Optional[Signable] = None
|
||||
self.meta = {}
|
||||
|
||||
def update_annotations(self, annotated: 'Output'):
|
||||
if annotated is None:
|
||||
self.is_internal_transfer = None
|
||||
self.is_spent = None
|
||||
self.spent_height = None
|
||||
self.is_my_output = None
|
||||
self.is_my_input = None
|
||||
self.sent_supports = None
|
||||
|
@ -251,7 +245,7 @@ class Output(InputOutput):
|
|||
self.received_tips = None
|
||||
else:
|
||||
self.is_internal_transfer = annotated.is_internal_transfer
|
||||
self.is_spent = annotated.is_spent
|
||||
self.spent_height = annotated.spent_height
|
||||
self.is_my_output = annotated.is_my_output
|
||||
self.is_my_input = annotated.is_my_input
|
||||
self.sent_supports = annotated.sent_supports
|
||||
|
@ -268,6 +262,15 @@ class Output(InputOutput):
|
|||
def id(self):
|
||||
return self.ref.id
|
||||
|
||||
@property
|
||||
def hash(self):
|
||||
return self.ref.hash
|
||||
|
||||
@property
|
||||
def is_spent(self):
|
||||
if self.spent_height is not None:
|
||||
return self.spent_height > 0
|
||||
|
||||
@property
|
||||
def pubkey_hash(self):
|
||||
return self.script.values['pubkey_hash']
|
||||
|
@ -279,19 +282,17 @@ class Output(InputOutput):
|
|||
def get_address(self, ledger):
|
||||
return ledger.hash160_to_address(self.pubkey_hash)
|
||||
|
||||
def get_estimator(self, ledger):
|
||||
return OutputEffectiveAmountEstimator(ledger, self)
|
||||
|
||||
@classmethod
|
||||
def pay_pubkey_hash(cls, amount, pubkey_hash):
|
||||
return cls(amount, OutputScript.pay_pubkey_hash(pubkey_hash))
|
||||
|
||||
@classmethod
|
||||
def deserialize_from(cls, stream):
|
||||
return cls(
|
||||
amount=stream.read_uint64(),
|
||||
script=OutputScript(stream.read_string())
|
||||
)
|
||||
def deserialize_from(cls, stream, transaction_offset: int = 0):
|
||||
amount = stream.read_uint64()
|
||||
length = stream.read_compact_size()
|
||||
offset = stream.tell()-transaction_offset
|
||||
script = OutputScript(stream.read(length), offset=offset)
|
||||
return cls(amount=amount, script=script)
|
||||
|
||||
def serialize_to(self, stream, alternate_script=None):
|
||||
stream.write_uint64(self.amount)
|
||||
|
@ -311,6 +312,10 @@ class Output(InputOutput):
|
|||
def is_support(self) -> bool:
|
||||
return self.script.is_support_claim
|
||||
|
||||
@property
|
||||
def is_support_data(self) -> bool:
|
||||
return self.script.is_support_claim_data
|
||||
|
||||
@property
|
||||
def claim_hash(self) -> bytes:
|
||||
if self.script.is_claim_name:
|
||||
|
@ -346,7 +351,38 @@ class Output(InputOutput):
|
|||
def can_decode_claim(self):
|
||||
try:
|
||||
return self.claim
|
||||
except: # pylint: disable=bare-except
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
@property
|
||||
def support(self) -> Support:
|
||||
if self.is_support_data:
|
||||
if not isinstance(self.script.values['support'], Support):
|
||||
self.script.values['support'] = Support.from_bytes(self.script.values['support'])
|
||||
return self.script.values['support']
|
||||
raise ValueError('Only supports with data can be represented as Supports.')
|
||||
|
||||
@property
|
||||
def can_decode_support(self):
|
||||
try:
|
||||
return self.support
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
@property
|
||||
def signable(self) -> Signable:
|
||||
if self._signable is None:
|
||||
if self.is_claim:
|
||||
self._signable = self.claim
|
||||
elif self.is_support_data:
|
||||
self._signable = self.support
|
||||
return self._signable
|
||||
|
||||
@property
|
||||
def can_decode_signable(self) -> Signable:
|
||||
try:
|
||||
return self.signable
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
@property
|
||||
|
@ -360,22 +396,22 @@ class Output(InputOutput):
|
|||
return self.private_key is not None
|
||||
|
||||
def get_signature_digest(self, ledger):
|
||||
if self.claim.unsigned_payload:
|
||||
if self.signable.unsigned_payload:
|
||||
pieces = [
|
||||
Base58.decode(self.get_address(ledger)),
|
||||
self.claim.unsigned_payload,
|
||||
self.claim.signing_channel_hash[::-1]
|
||||
self.signable.unsigned_payload,
|
||||
self.signable.signing_channel_hash[::-1]
|
||||
]
|
||||
else:
|
||||
pieces = [
|
||||
self.tx_ref.tx.inputs[0].txo_ref.hash,
|
||||
self.claim.signing_channel_hash,
|
||||
self.claim.to_message_bytes()
|
||||
self.signable.signing_channel_hash,
|
||||
self.signable.to_message_bytes()
|
||||
]
|
||||
return sha256(b''.join(pieces))
|
||||
|
||||
def get_encoded_signature(self):
|
||||
signature = hexlify(self.claim.signature)
|
||||
signature = hexlify(self.signable.signature)
|
||||
r = int(signature[:int(len(signature)/2)], 16)
|
||||
s = int(signature[int(len(signature)/2):], 16)
|
||||
return ecdsa.util.sigencode_der(r, s, len(signature)*4)
|
||||
|
@ -384,7 +420,10 @@ class Output(InputOutput):
|
|||
def is_signature_valid(encoded_signature, signature_digest, public_key_bytes):
|
||||
try:
|
||||
public_key = load_der_public_key(public_key_bytes, default_backend())
|
||||
public_key.verify(encoded_signature, signature_digest, ec.ECDSA(Prehashed(hashes.SHA256())))
|
||||
public_key.verify( # pylint: disable=no-value-for-parameter
|
||||
encoded_signature, signature_digest,
|
||||
ec.ECDSA(Prehashed(hashes.SHA256()))
|
||||
)
|
||||
return True
|
||||
except (ValueError, InvalidSignature):
|
||||
pass
|
||||
|
@ -399,22 +438,31 @@ class Output(InputOutput):
|
|||
|
||||
def sign(self, channel: 'Output', first_input_id=None):
|
||||
self.channel = channel
|
||||
self.claim.signing_channel_hash = channel.claim_hash
|
||||
self.signable.signing_channel_hash = channel.claim_hash
|
||||
digest = sha256(b''.join([
|
||||
first_input_id or self.tx_ref.tx.inputs[0].txo_ref.hash,
|
||||
self.claim.signing_channel_hash,
|
||||
self.claim.to_message_bytes()
|
||||
self.signable.signing_channel_hash,
|
||||
self.signable.to_message_bytes()
|
||||
]))
|
||||
self.claim.signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
|
||||
self.signable.signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
|
||||
self.script.generate()
|
||||
|
||||
def clear_signature(self):
|
||||
self.channel = None
|
||||
self.claim.clear_signature()
|
||||
|
||||
def generate_channel_private_key(self):
|
||||
self.private_key = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
|
||||
self.claim.channel.public_key_bytes = self.private_key.get_verifying_key().to_der()
|
||||
@staticmethod
|
||||
def _sync_generate_channel_private_key():
|
||||
private_key = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
|
||||
public_key_bytes = private_key.get_verifying_key().to_der()
|
||||
return private_key, public_key_bytes
|
||||
|
||||
async def generate_channel_private_key(self):
|
||||
private_key, public_key_bytes = await asyncio.get_running_loop().run_in_executor(
|
||||
None, Output._sync_generate_channel_private_key
|
||||
)
|
||||
self.private_key = private_key
|
||||
self.claim.channel.public_key_bytes = public_key_bytes
|
||||
self.script.generate()
|
||||
return self.private_key
|
||||
|
||||
|
@ -443,6 +491,14 @@ class Output(InputOutput):
|
|||
)
|
||||
return cls(amount, script)
|
||||
|
||||
@classmethod
|
||||
def pay_support_data_pubkey_hash(
|
||||
cls, amount: int, claim_name: str, claim_id: str, support: Support, pubkey_hash: bytes) -> 'Output':
|
||||
script = OutputScript.pay_support_data_pubkey_hash(
|
||||
claim_name.encode(), unhexlify(claim_id)[::-1], support, pubkey_hash
|
||||
)
|
||||
return cls(amount, script)
|
||||
|
||||
@classmethod
|
||||
def add_purchase_data(cls, purchase: Purchase) -> 'Output':
|
||||
script = OutputScript.return_data(purchase)
|
||||
|
@ -477,6 +533,13 @@ class Output(InputOutput):
|
|||
if self.purchased_claim is not None:
|
||||
return self.purchased_claim.claim_id
|
||||
|
||||
@property
|
||||
def purchased_claim_hash(self):
|
||||
if self.purchase is not None:
|
||||
return self.purchase.purchase_data.claim_hash
|
||||
if self.purchased_claim is not None:
|
||||
return self.purchased_claim.claim_hash
|
||||
|
||||
@property
|
||||
def has_price(self):
|
||||
if self.can_decode_claim:
|
||||
|
@ -494,7 +557,7 @@ class Output(InputOutput):
|
|||
class Transaction:
|
||||
|
||||
def __init__(self, raw=None, version: int = 1, locktime: int = 0, is_verified: bool = False,
|
||||
height: int = -2, position: int = -1, julian_day: int = None) -> None:
|
||||
height: int = -2, position: int = -1, timestamp: int = 0) -> None:
|
||||
self._raw = raw
|
||||
self._raw_sans_segwit = None
|
||||
self.is_segwit_flag = 0
|
||||
|
@ -512,9 +575,13 @@ class Transaction:
|
|||
# +num: confirmed in a specific block (height)
|
||||
self.height = height
|
||||
self.position = position
|
||||
self._day = julian_day
|
||||
self.timestamp = timestamp
|
||||
self._day: int = 0
|
||||
if raw is not None:
|
||||
self._deserialize()
|
||||
self.deserialize()
|
||||
|
||||
def __repr__(self):
|
||||
return f"TX({self.id[:10]}...{self.id[-10:]})"
|
||||
|
||||
@property
|
||||
def is_broadcast(self):
|
||||
|
@ -536,9 +603,10 @@ class Transaction:
|
|||
def hash(self):
|
||||
return self.ref.hash
|
||||
|
||||
def get_julian_day(self, ledger):
|
||||
if self._day is None and self.height > 0:
|
||||
self._day = ledger.headers.estimated_julian_day(self.height)
|
||||
@property
|
||||
def day(self):
|
||||
if self._day is None and self.timestamp > 0:
|
||||
self._day = date.fromtimestamp(self.timestamp).toordinal()
|
||||
return self._day
|
||||
|
||||
@property
|
||||
|
@ -674,9 +742,10 @@ class Transaction:
|
|||
stream.write_uint32(self.signature_hash_type(1)) # signature hash type: SIGHASH_ALL
|
||||
return stream.get_bytes()
|
||||
|
||||
def _deserialize(self):
|
||||
if self._raw is not None:
|
||||
stream = BCDataStream(self._raw)
|
||||
def deserialize(self, stream=None):
|
||||
if self._raw is not None or stream is not None:
|
||||
stream = stream or BCDataStream(self._raw)
|
||||
start = stream.tell()
|
||||
self.version = stream.read_uint32()
|
||||
input_count = stream.read_compact_size()
|
||||
if input_count == 0:
|
||||
|
@ -687,7 +756,7 @@ class Transaction:
|
|||
])
|
||||
output_count = stream.read_compact_size()
|
||||
self._add(self._outputs, [
|
||||
Output.deserialize_from(stream) for _ in range(output_count)
|
||||
Output.deserialize_from(stream, start) for _ in range(output_count)
|
||||
])
|
||||
if self.is_segwit_flag:
|
||||
# drain witness portion of transaction
|
||||
|
@ -697,181 +766,12 @@ class Transaction:
|
|||
for _ in range(stream.read_compact_size()):
|
||||
self.witnesses.append(stream.read(stream.read_compact_size()))
|
||||
self.locktime = stream.read_uint32()
|
||||
|
||||
@classmethod
|
||||
def ensure_all_have_same_ledger_and_wallet(
|
||||
cls, funding_accounts: Iterable['Account'],
|
||||
change_account: 'Account' = None) -> Tuple['Ledger', 'Wallet']:
|
||||
ledger = wallet = None
|
||||
for account in funding_accounts:
|
||||
if ledger is None:
|
||||
ledger = account.ledger
|
||||
wallet = account.wallet
|
||||
if ledger != account.ledger:
|
||||
raise ValueError(
|
||||
'All funding accounts used to create a transaction must be on the same ledger.'
|
||||
)
|
||||
if wallet != account.wallet:
|
||||
raise ValueError(
|
||||
'All funding accounts used to create a transaction must be from the same wallet.'
|
||||
)
|
||||
if change_account is not None:
|
||||
if change_account.ledger != ledger:
|
||||
raise ValueError('Change account must use same ledger as funding accounts.')
|
||||
if change_account.wallet != wallet:
|
||||
raise ValueError('Change account must use same wallet as funding accounts.')
|
||||
if ledger is None:
|
||||
raise ValueError('No ledger found.')
|
||||
if wallet is None:
|
||||
raise ValueError('No wallet found.')
|
||||
return ledger, wallet
|
||||
|
||||
@classmethod
|
||||
async def create(cls, inputs: Iterable[Input], outputs: Iterable[Output],
|
||||
funding_accounts: Iterable['Account'], change_account: 'Account',
|
||||
sign: bool = True):
|
||||
""" Find optimal set of inputs when only outputs are provided; add change
|
||||
outputs if only inputs are provided or if inputs are greater than outputs. """
|
||||
|
||||
tx = cls() \
|
||||
.add_inputs(inputs) \
|
||||
.add_outputs(outputs)
|
||||
|
||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
||||
|
||||
# value of the outputs plus associated fees
|
||||
cost = (
|
||||
tx.get_base_fee(ledger) +
|
||||
tx.get_total_output_sum(ledger)
|
||||
)
|
||||
# value of the inputs less the cost to spend those inputs
|
||||
payment = tx.get_effective_input_sum(ledger)
|
||||
|
||||
try:
|
||||
|
||||
for _ in range(5):
|
||||
|
||||
if payment < cost:
|
||||
deficit = cost - payment
|
||||
spendables = await ledger.get_spendable_utxos(deficit, funding_accounts)
|
||||
if not spendables:
|
||||
raise InsufficientFundsError()
|
||||
payment += sum(s.effective_amount for s in spendables)
|
||||
tx.add_inputs(s.txi for s in spendables)
|
||||
|
||||
cost_of_change = (
|
||||
tx.get_base_fee(ledger) +
|
||||
Output.pay_pubkey_hash(COIN, NULL_HASH32).get_fee(ledger)
|
||||
)
|
||||
if payment > cost:
|
||||
change = payment - cost
|
||||
if change > cost_of_change:
|
||||
change_address = await change_account.change.get_or_create_usable_address()
|
||||
change_hash160 = change_account.ledger.address_to_hash160(change_address)
|
||||
change_amount = change - cost_of_change
|
||||
change_output = Output.pay_pubkey_hash(change_amount, change_hash160)
|
||||
change_output.is_internal_transfer = True
|
||||
tx.add_outputs([Output.pay_pubkey_hash(change_amount, change_hash160)])
|
||||
|
||||
if tx._outputs:
|
||||
break
|
||||
# this condition and the outer range(5) loop cover an edge case
|
||||
# whereby a single input is just enough to cover the fee and
|
||||
# has some change left over, but the change left over is less
|
||||
# than the cost_of_change: thus the input is completely
|
||||
# consumed and no output is added, which is an invalid tx.
|
||||
# to be able to spend this input we must increase the cost
|
||||
# of the TX and run through the balance algorithm a second time
|
||||
# adding an extra input and change output, making tx valid.
|
||||
# we do this 5 times in case the other UTXOs added are also
|
||||
# less than the fee, after 5 attempts we give up and go home
|
||||
cost += cost_of_change + 1
|
||||
|
||||
if sign:
|
||||
await tx.sign(funding_accounts)
|
||||
|
||||
except Exception as e:
|
||||
log.exception('Failed to create transaction:')
|
||||
await ledger.release_tx(tx)
|
||||
raise e
|
||||
|
||||
return tx
|
||||
return self
|
||||
|
||||
@staticmethod
|
||||
def signature_hash_type(hash_type):
|
||||
return hash_type
|
||||
|
||||
async def sign(self, funding_accounts: Iterable['Account']):
|
||||
ledger, wallet = self.ensure_all_have_same_ledger_and_wallet(funding_accounts)
|
||||
for i, txi in enumerate(self._inputs):
|
||||
assert txi.script is not None
|
||||
assert txi.txo_ref.txo is not None
|
||||
txo_script = txi.txo_ref.txo.script
|
||||
if txo_script.is_pay_pubkey_hash:
|
||||
address = ledger.hash160_to_address(txo_script.values['pubkey_hash'])
|
||||
private_key = await ledger.get_private_key_for_address(wallet, address)
|
||||
assert private_key is not None, 'Cannot find private key for signing output.'
|
||||
tx = self._serialize_for_signature(i)
|
||||
txi.script.values['signature'] = \
|
||||
private_key.sign(tx) + bytes((self.signature_hash_type(1),))
|
||||
txi.script.values['pubkey'] = private_key.public_key.pubkey_bytes
|
||||
txi.script.generate()
|
||||
else:
|
||||
raise NotImplementedError("Don't know how to spend this output.")
|
||||
self._reset()
|
||||
|
||||
@classmethod
|
||||
def pay(cls, amount: int, address: bytes, funding_accounts: List['Account'], change_account: 'Account'):
|
||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
||||
output = Output.pay_pubkey_hash(amount, ledger.address_to_hash160(address))
|
||||
return cls.create([], [output], funding_accounts, change_account)
|
||||
|
||||
@classmethod
|
||||
def claim_create(
|
||||
cls, name: str, claim: Claim, amount: int, holding_address: str,
|
||||
funding_accounts: List['Account'], change_account: 'Account', signing_channel: Output = None):
|
||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
||||
claim_output = Output.pay_claim_name_pubkey_hash(
|
||||
amount, name, claim, ledger.address_to_hash160(holding_address)
|
||||
)
|
||||
if signing_channel is not None:
|
||||
claim_output.sign(signing_channel, b'placeholder txid:nout')
|
||||
return cls.create([], [claim_output], funding_accounts, change_account, sign=False)
|
||||
|
||||
@classmethod
|
||||
def claim_update(
|
||||
cls, previous_claim: Output, claim: Claim, amount: int, holding_address: str,
|
||||
funding_accounts: List['Account'], change_account: 'Account', signing_channel: Output = None):
|
||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
||||
updated_claim = Output.pay_update_claim_pubkey_hash(
|
||||
amount, previous_claim.claim_name, previous_claim.claim_id,
|
||||
claim, ledger.address_to_hash160(holding_address)
|
||||
)
|
||||
if signing_channel is not None:
|
||||
updated_claim.sign(signing_channel, b'placeholder txid:nout')
|
||||
else:
|
||||
updated_claim.clear_signature()
|
||||
return cls.create(
|
||||
[Input.spend(previous_claim)], [updated_claim], funding_accounts, change_account, sign=False
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def support(cls, claim_name: str, claim_id: str, amount: int, holding_address: str,
|
||||
funding_accounts: List['Account'], change_account: 'Account'):
|
||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
||||
support_output = Output.pay_support_pubkey_hash(
|
||||
amount, claim_name, claim_id, ledger.address_to_hash160(holding_address)
|
||||
)
|
||||
return cls.create([], [support_output], funding_accounts, change_account)
|
||||
|
||||
@classmethod
|
||||
def purchase(cls, claim_id: str, amount: int, merchant_address: bytes,
|
||||
funding_accounts: List['Account'], change_account: 'Account'):
|
||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
||||
payment = Output.pay_pubkey_hash(amount, ledger.address_to_hash160(merchant_address))
|
||||
data = Output.add_purchase_data(Purchase(claim_id))
|
||||
return cls.create([], [payment, data], funding_accounts, change_account)
|
||||
|
||||
@property
|
||||
def my_inputs(self):
|
||||
for txi in self.inputs:
|
|
@ -1,28 +1,4 @@
|
|||
import re
|
||||
from typing import TypeVar, Sequence, Optional
|
||||
from .constants import COIN
|
||||
|
||||
|
||||
def date_to_julian_day(d):
|
||||
return d.toordinal() + 1721424.5
|
||||
|
||||
|
||||
def coins_to_satoshis(coins):
|
||||
if not isinstance(coins, str):
|
||||
raise ValueError("{coins} must be a string")
|
||||
result = re.search(r'^(\d{1,10})\.(\d{1,8})$', coins)
|
||||
if result is not None:
|
||||
whole, fractional = result.groups()
|
||||
return int(whole+fractional.ljust(8, "0"))
|
||||
raise ValueError("'{lbc}' is not a valid coin decimal")
|
||||
|
||||
|
||||
def satoshis_to_coins(satoshis):
|
||||
coins = '{:.8f}'.format(satoshis / COIN).rstrip('0')
|
||||
if coins.endswith('.'):
|
||||
return coins+'0'
|
||||
else:
|
||||
return coins
|
||||
|
||||
|
||||
T = TypeVar('T')
|
||||
|
@ -44,18 +20,6 @@ def subclass_tuple(name, base):
|
|||
return type(name, (base,), {'__slots__': ()})
|
||||
|
||||
|
||||
class cachedproperty:
|
||||
|
||||
def __init__(self, f):
|
||||
self.f = f
|
||||
|
||||
def __get__(self, obj, objtype):
|
||||
obj = obj or objtype
|
||||
value = self.f(obj)
|
||||
setattr(obj, self.f.__name__, value)
|
||||
return value
|
||||
|
||||
|
||||
class ArithUint256:
|
||||
# https://github.com/bitcoin/bitcoin/blob/master/src/arith_uint256.cpp
|
||||
|
|
@ -1,79 +1,20 @@
|
|||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import signal
|
||||
import pathlib
|
||||
import json
|
||||
import asyncio
|
||||
import pathlib
|
||||
import argparse
|
||||
import logging
|
||||
import logging.handlers
|
||||
import textwrap
|
||||
import subprocess
|
||||
|
||||
import aiohttp
|
||||
from aiohttp.web import GracefulExit
|
||||
from docopt import docopt
|
||||
|
||||
from lbry import __version__ as lbrynet_version
|
||||
from lbry.extras.daemon.loggly_handler import get_loggly_handler
|
||||
from lbry.extras.daemon.daemon import Daemon
|
||||
from lbry import __version__
|
||||
from lbry.conf import Config, CLIConfig
|
||||
|
||||
log = logging.getLogger('lbry')
|
||||
|
||||
|
||||
def display(data):
|
||||
print(json.dumps(data, indent=2))
|
||||
|
||||
|
||||
async def execute_command(conf, method, params, callback=display):
|
||||
async with aiohttp.ClientSession() as session:
|
||||
try:
|
||||
message = {'method': method, 'params': params}
|
||||
async with session.get(conf.api_connection_url, json=message) as resp:
|
||||
try:
|
||||
data = await resp.json()
|
||||
if 'result' in data:
|
||||
return callback(data['result'])
|
||||
elif 'error' in data:
|
||||
return callback(data['error'])
|
||||
except Exception as e:
|
||||
log.exception('Could not process response from server:', exc_info=e)
|
||||
except aiohttp.ClientConnectionError:
|
||||
print("Could not connect to daemon. Are you sure it's running?")
|
||||
|
||||
|
||||
def normalize_value(x, key=None):
|
||||
if not isinstance(x, str):
|
||||
return x
|
||||
if key in ('uri', 'channel_name', 'name', 'file_name', 'claim_name', 'download_directory'):
|
||||
return x
|
||||
if x.lower() == 'true':
|
||||
return True
|
||||
if x.lower() == 'false':
|
||||
return False
|
||||
if x.isdigit():
|
||||
return int(x)
|
||||
return x
|
||||
|
||||
|
||||
def remove_brackets(key):
|
||||
if key.startswith("<") and key.endswith(">"):
|
||||
return str(key[1:-1])
|
||||
return key
|
||||
|
||||
|
||||
def set_kwargs(parsed_args):
|
||||
kwargs = {}
|
||||
for key, arg in parsed_args.items():
|
||||
if arg is None:
|
||||
continue
|
||||
k = None
|
||||
if key.startswith("--") and remove_brackets(key[2:]) not in kwargs:
|
||||
k = remove_brackets(key[2:])
|
||||
elif remove_brackets(key) not in kwargs:
|
||||
k = remove_brackets(key)
|
||||
kwargs[k] = normalize_value(arg, k)
|
||||
return kwargs
|
||||
from lbry.service import Daemon, Client
|
||||
from lbry.service.metadata import interface
|
||||
from lbry.service.full_node import FullNode
|
||||
from lbry.blockchain.ledger import Ledger
|
||||
from lbry.console import Advanced as AdvancedConsole, Basic as BasicConsole
|
||||
|
||||
|
||||
def split_subparser_argument(parent, original, name, condition):
|
||||
|
@ -102,7 +43,7 @@ class ArgumentParser(argparse.ArgumentParser):
|
|||
self._optionals.title = 'Options'
|
||||
if group_name is None:
|
||||
self.epilog = (
|
||||
f"Run 'lbrynet COMMAND --help' for more information on a command or group."
|
||||
"Run 'lbrynet COMMAND --help' for more information on a command or group."
|
||||
)
|
||||
else:
|
||||
self.epilog = (
|
||||
|
@ -154,17 +95,10 @@ class HelpFormatter(argparse.HelpFormatter):
|
|||
)
|
||||
|
||||
|
||||
def add_command_parser(parent, command):
|
||||
subcommand = parent.add_parser(
|
||||
command['name'],
|
||||
help=command['doc'].strip().splitlines()[0]
|
||||
)
|
||||
subcommand.set_defaults(
|
||||
api_method_name=command['api_method_name'],
|
||||
command=command['name'],
|
||||
doc=command['doc'],
|
||||
replaced_by=command.get('replaced_by', None)
|
||||
)
|
||||
def add_command_parser(parent, method_name, command):
|
||||
short = command['desc']['text'][0] if command['desc'] else ''
|
||||
subcommand = parent.add_parser(command['name'], help=short)
|
||||
subcommand.set_defaults(api_method_name=method_name, command=command['name'], doc=command['help'])
|
||||
|
||||
|
||||
def get_argument_parser():
|
||||
|
@ -183,6 +117,10 @@ def get_argument_parser():
|
|||
usage='lbrynet start [--config FILE] [--data-dir DIR] [--wallet-dir DIR] [--download-dir DIR] ...',
|
||||
help='Start LBRY Network interface.'
|
||||
)
|
||||
start.add_argument(
|
||||
'--full-node', dest='full_node', action="store_true",
|
||||
help='Start a full node with local blockchain data, requires lbrycrd.'
|
||||
)
|
||||
start.add_argument(
|
||||
'--quiet', dest='quiet', action="store_true",
|
||||
help='Disable all console output.'
|
||||
|
@ -200,26 +138,32 @@ def get_argument_parser():
|
|||
'--initial-headers', dest='initial_headers',
|
||||
help='Specify path to initial blockchain headers, faster than downloading them on first run.'
|
||||
)
|
||||
install = sub.add_parser("install", help="Install lbrynet with various system services.")
|
||||
install.add_argument("system", choices=["systemd"])
|
||||
install.add_argument(
|
||||
"--global", dest="install_global", action="store_true",
|
||||
help="Install system wide (requires running as root), default is for current user only."
|
||||
)
|
||||
Config.contribute_to_argparse(start)
|
||||
start.set_defaults(command='start', start_parser=start, doc=start.format_help())
|
||||
install.set_defaults(command='install', install_parser=install, doc=install.format_help())
|
||||
|
||||
api = Daemon.get_api_definitions()
|
||||
groups = {}
|
||||
for group_name in sorted(api['groups']):
|
||||
group_parser = sub.add_parser(group_name, group_name=group_name, help=api['groups'][group_name])
|
||||
for group_name in sorted(interface['groups']):
|
||||
group_parser = sub.add_parser(group_name, group_name=group_name, help=interface['groups'][group_name])
|
||||
groups[group_name] = group_parser.add_subparsers(metavar='COMMAND')
|
||||
|
||||
nicer_order = ['stop', 'get', 'publish', 'resolve']
|
||||
for command_name in sorted(api['commands']):
|
||||
for command_name in sorted(interface['commands']):
|
||||
if command_name not in nicer_order:
|
||||
nicer_order.append(command_name)
|
||||
|
||||
for command_name in nicer_order:
|
||||
command = api['commands'][command_name]
|
||||
if command['group'] is None:
|
||||
add_command_parser(sub, command)
|
||||
command = interface['commands'][command_name]
|
||||
if command.get('group') is None:
|
||||
add_command_parser(sub, command_name, command)
|
||||
else:
|
||||
add_command_parser(groups[command['group']], command)
|
||||
add_command_parser(groups[command['group']], command_name, command)
|
||||
|
||||
return root
|
||||
|
||||
|
@ -229,65 +173,62 @@ def ensure_directory_exists(path: str):
|
|||
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
LOG_MODULES = 'lbry', 'aioupnp'
|
||||
async def execute_command(conf, method, params):
|
||||
client = Client(f"http://{conf.api}/ws")
|
||||
await client.connect()
|
||||
resp = await client.send(method, **params)
|
||||
print(await resp.first)
|
||||
await client.disconnect()
|
||||
|
||||
|
||||
def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config):
|
||||
default_formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(name)s:%(lineno)d: %(message)s")
|
||||
file_handler = logging.handlers.RotatingFileHandler(conf.log_file_path, maxBytes=2097152, backupCount=5)
|
||||
file_handler.setFormatter(default_formatter)
|
||||
for module_name in LOG_MODULES:
|
||||
logger.getChild(module_name).addHandler(file_handler)
|
||||
if not args.quiet:
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(default_formatter)
|
||||
for module_name in LOG_MODULES:
|
||||
logger.getChild(module_name).addHandler(handler)
|
||||
|
||||
logger.getChild('lbry').setLevel(logging.INFO)
|
||||
logger.getChild('aioupnp').setLevel(logging.WARNING)
|
||||
logger.getChild('aiohttp').setLevel(logging.CRITICAL)
|
||||
|
||||
if args.verbose is not None:
|
||||
if len(args.verbose) > 0:
|
||||
for module in args.verbose:
|
||||
logger.getChild(module).setLevel(logging.DEBUG)
|
||||
else:
|
||||
logger.getChild('lbry').setLevel(logging.DEBUG)
|
||||
|
||||
loggly_handler = get_loggly_handler(conf)
|
||||
loggly_handler.setLevel(logging.ERROR)
|
||||
logger.getChild('lbry').addHandler(loggly_handler)
|
||||
def normalize_value(x, key=None):
|
||||
if not isinstance(x, str):
|
||||
return x
|
||||
if key in ('uri', 'channel_name', 'name', 'file_name', 'claim_name', 'download_directory'):
|
||||
return x
|
||||
if x.lower() == 'true':
|
||||
return True
|
||||
if x.lower() == 'false':
|
||||
return False
|
||||
if x.isdigit():
|
||||
return int(x)
|
||||
return x
|
||||
|
||||
|
||||
def run_daemon(args: argparse.Namespace, conf: Config):
|
||||
loop = asyncio.get_event_loop()
|
||||
if args.verbose is not None:
|
||||
loop.set_debug(True)
|
||||
if not args.no_logging:
|
||||
setup_logging(logging.getLogger(), args, conf)
|
||||
daemon = Daemon(conf)
|
||||
def remove_brackets(key):
|
||||
if key.startswith("<") and key.endswith(">"):
|
||||
return str(key[1:-1])
|
||||
return key
|
||||
|
||||
def __exit():
|
||||
raise GracefulExit()
|
||||
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||
except NotImplementedError:
|
||||
pass # Not implemented on Windows
|
||||
def set_kwargs(parsed_args):
|
||||
kwargs = {}
|
||||
for key, arg in parsed_args.items():
|
||||
if arg is None:
|
||||
continue
|
||||
k = None
|
||||
if key.startswith("--") and remove_brackets(key[2:]) not in kwargs:
|
||||
k = remove_brackets(key[2:])
|
||||
elif remove_brackets(key) not in kwargs:
|
||||
k = remove_brackets(key)
|
||||
kwargs[k] = normalize_value(arg, k)
|
||||
return kwargs
|
||||
|
||||
try:
|
||||
loop.run_until_complete(daemon.start())
|
||||
loop.run_forever()
|
||||
except (GracefulExit, KeyboardInterrupt, asyncio.CancelledError):
|
||||
pass
|
||||
finally:
|
||||
loop.run_until_complete(daemon.stop())
|
||||
logging.shutdown()
|
||||
|
||||
if hasattr(loop, 'shutdown_asyncgens'):
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
def install_systemd_service():
|
||||
systemd_service = textwrap.dedent(f"""\
|
||||
[Unit]
|
||||
Description=LBRYnet
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart={sys.argv[0]} start --full-node
|
||||
""")
|
||||
subprocess.run(
|
||||
["systemctl", "edit", "--user", "--full", "--force", "lbrynet.service"],
|
||||
input=systemd_service, text=True, check=True,
|
||||
env=dict(os.environ, SYSTEMD_EDITOR="cp /dev/stdin"),
|
||||
)
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
|
@ -295,34 +236,36 @@ def main(argv=None):
|
|||
parser = get_argument_parser()
|
||||
args, command_args = parser.parse_known_args(argv)
|
||||
|
||||
conf = Config.create_from_arguments(args)
|
||||
conf = Config()
|
||||
conf.set_arguments(args)
|
||||
conf.set_environment()
|
||||
conf.set_default_paths()
|
||||
conf.set_persisted()
|
||||
for directory in (conf.data_dir, conf.download_dir, conf.wallet_dir):
|
||||
ensure_directory_exists(directory)
|
||||
|
||||
if args.cli_version:
|
||||
print(f"lbrynet {lbrynet_version}")
|
||||
print(f"lbrynet {__version__}")
|
||||
elif args.command == 'start':
|
||||
if args.help:
|
||||
args.start_parser.print_help()
|
||||
elif args.full_node:
|
||||
service = FullNode(Ledger(conf))
|
||||
if conf.console == "advanced":
|
||||
console = AdvancedConsole(service)
|
||||
else:
|
||||
console = BasicConsole(service)
|
||||
return Daemon(service, console).run()
|
||||
else:
|
||||
if args.initial_headers:
|
||||
ledger_path = os.path.join(conf.wallet_dir, 'lbc_mainnet')
|
||||
ensure_directory_exists(ledger_path)
|
||||
current_size = 0
|
||||
headers_path = os.path.join(ledger_path, 'headers')
|
||||
if os.path.exists(headers_path):
|
||||
current_size = os.stat(headers_path).st_size
|
||||
if os.stat(args.initial_headers).st_size > current_size:
|
||||
log.info('Copying header from %s to %s', args.initial_headers, headers_path)
|
||||
shutil.copy(args.initial_headers, headers_path)
|
||||
run_daemon(args, conf)
|
||||
print('Only `start --full-node` is currently supported.')
|
||||
elif args.command == 'install':
|
||||
if args.help:
|
||||
args.install_parser.print_help()
|
||||
elif args.system == 'systemd':
|
||||
install_systemd_service()
|
||||
elif args.command is not None:
|
||||
doc = args.doc
|
||||
api_method_name = args.api_method_name
|
||||
if args.replaced_by:
|
||||
print(f"{args.api_method_name} is deprecated, using {args.replaced_by['api_method_name']}.")
|
||||
doc = args.replaced_by['doc']
|
||||
api_method_name = args.replaced_by['api_method_name']
|
||||
if args.help:
|
||||
print(doc)
|
||||
else:
|
190
lbry/conf.py
190
lbry/conf.py
|
@ -1,16 +1,16 @@
|
|||
import os
|
||||
import re
|
||||
import sys
|
||||
import typing
|
||||
import logging
|
||||
from argparse import ArgumentParser
|
||||
from contextlib import contextmanager
|
||||
from typing import Tuple
|
||||
|
||||
import yaml
|
||||
from appdirs import user_data_dir, user_config_dir
|
||||
from lbry.utils.dirs import user_data_dir, user_download_dir
|
||||
from lbry.error import InvalidCurrencyError
|
||||
from lbry.dht import constants
|
||||
from lbry.wallet.coinselection import STRATEGIES
|
||||
from lbry.wallet.coinselection import COIN_SELECTION_STRATEGIES
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -192,7 +192,7 @@ class MaxKeyFee(Setting[dict]):
|
|||
)
|
||||
parser.add_argument(
|
||||
self.no_cli_name,
|
||||
help=f"Disable maximum key fee check.",
|
||||
help="Disable maximum key fee check.",
|
||||
dest=self.name,
|
||||
const=None,
|
||||
action="store_const",
|
||||
|
@ -325,7 +325,7 @@ class ConfigFileAccess:
|
|||
cls = type(self.configuration)
|
||||
with open(self.path, 'r') as config_file:
|
||||
raw = config_file.read()
|
||||
serialized = yaml.load(raw) or {}
|
||||
serialized = yaml.full_load(raw) or {}
|
||||
for key, value in serialized.items():
|
||||
attr = getattr(cls, key, None)
|
||||
if attr is None:
|
||||
|
@ -382,8 +382,12 @@ class BaseConfig:
|
|||
self.environment = {} # from environment variables
|
||||
self.persisted = {} # from config file
|
||||
self._updating_config = False
|
||||
self.set(**kwargs)
|
||||
|
||||
def set(self, **kwargs):
|
||||
for key, value in kwargs.items():
|
||||
setattr(self, key, value)
|
||||
return self
|
||||
|
||||
@contextmanager
|
||||
def update_config(self):
|
||||
|
@ -500,19 +504,29 @@ class CLIConfig(TranscodeConfig):
|
|||
|
||||
|
||||
class Config(CLIConfig):
|
||||
db_url = String("Database connection URL, uses a local file based SQLite by default.")
|
||||
workers = Integer(
|
||||
"Multiprocessing, specify number of worker processes lbrynet can start (including main process)."
|
||||
" (-1: threads only, 0: equal to number of CPUs, >1: specific number of processes)", -1
|
||||
)
|
||||
console = StringChoice(
|
||||
"Basic text console output or advanced colored output with progress bars.",
|
||||
["basic", "advanced"], "advanced"
|
||||
)
|
||||
|
||||
# directories
|
||||
data_dir = Path("Directory path to store blobs.", metavar='DIR')
|
||||
download_dir = Path(
|
||||
"Directory path to place assembled files downloaded from LBRY.",
|
||||
previous_names=['download_directory'], metavar='DIR'
|
||||
)
|
||||
wallet_dir = Path(
|
||||
"Directory containing a 'wallets' subdirectory with 'default_wallet' file.",
|
||||
previous_names=['lbryum_wallet_dir'], metavar='DIR'
|
||||
)
|
||||
download_dir = Path("Directory to store downloaded files.", metavar='DIR')
|
||||
data_dir = Path("Main directory containing blobs, wallets and blockchain data.", metavar='DIR')
|
||||
blob_dir = Path("Directory to store blobs (default: 'data_dir'/blobs).", metavar='DIR')
|
||||
wallet_dir = Path("Directory to store wallets (default: 'data_dir'/wallets).", metavar='DIR')
|
||||
wallets = Strings(
|
||||
"Wallet files in 'wallet_dir' to load at startup.",
|
||||
['default_wallet']
|
||||
"Wallet files in 'wallet_dir' to load at startup.", ['default_wallet']
|
||||
)
|
||||
create_default_wallet = Toggle(
|
||||
"Create an initial wallet if it does not exist on startup.", True
|
||||
)
|
||||
create_default_account = Toggle(
|
||||
"Create an initial account if it does not exist in the default wallet.", True
|
||||
)
|
||||
|
||||
# network
|
||||
|
@ -601,7 +615,19 @@ class Config(CLIConfig):
|
|||
comment_server = String("Comment server API URL", "https://comments.lbry.com/api")
|
||||
|
||||
# blockchain
|
||||
lbrycrd_rpc_user = String("Username for connecting to lbrycrd.", "rpcuser")
|
||||
lbrycrd_rpc_pass = String("Password for connecting to lbrycrd.", "rpcpassword")
|
||||
lbrycrd_rpc_host = String("Hostname for connecting to lbrycrd.", "localhost")
|
||||
lbrycrd_rpc_port = Integer("Port for connecting to lbrycrd.", 9245)
|
||||
lbrycrd_peer_port = Integer("Port for connecting to lbrycrd.", 9246)
|
||||
lbrycrd_zmq_blocks = String("ZMQ block events address.")
|
||||
lbrycrd_dir = Path("Directory containing lbrycrd data.", metavar='DIR')
|
||||
blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main')
|
||||
spv_address_filters = Toggle(
|
||||
"Generate Golomb-Rice coding filters for blocks and transactions. Enables "
|
||||
"light client to synchronize with a full node.",
|
||||
True
|
||||
)
|
||||
|
||||
# daemon
|
||||
save_files = Toggle("Save downloaded files when calling `get` by default", True)
|
||||
|
@ -620,7 +646,7 @@ class Config(CLIConfig):
|
|||
|
||||
coin_selection_strategy = StringChoice(
|
||||
"Strategy to use when selecting UTXOs for a transaction",
|
||||
STRATEGIES, "standard")
|
||||
COIN_SELECTION_STRATEGIES, "standard")
|
||||
|
||||
save_resolved_claims = Toggle(
|
||||
"Save content claims to the database when they are resolved to keep file_list up to date, "
|
||||
|
@ -635,9 +661,18 @@ class Config(CLIConfig):
|
|||
def streaming_port(self):
|
||||
return int(self.streaming_server.split(':')[1])
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.set_default_paths()
|
||||
@classmethod
|
||||
def with_null_dir(cls):
|
||||
return cls.with_same_dir('/dev/null')
|
||||
|
||||
@classmethod
|
||||
def with_same_dir(cls, same_dir):
|
||||
return cls(
|
||||
data_dir=same_dir,
|
||||
download_dir=same_dir,
|
||||
wallet_dir=same_dir,
|
||||
lbrycrd_dir=same_dir,
|
||||
)
|
||||
|
||||
def set_default_paths(self):
|
||||
if 'darwin' in sys.platform.lower():
|
||||
|
@ -649,61 +684,76 @@ class Config(CLIConfig):
|
|||
else:
|
||||
return
|
||||
cls = type(self)
|
||||
cls.data_dir.default, cls.wallet_dir.default, cls.download_dir.default = get_directories()
|
||||
cls.config.default = os.path.join(
|
||||
self.data_dir, 'daemon_settings.yml'
|
||||
)
|
||||
cls.data_dir.default, cls.wallet_dir.default,\
|
||||
cls.blob_dir.default, cls.download_dir.default = get_directories()
|
||||
old_settings_file = os.path.join(self.data_dir, 'daemon_settings.yml')
|
||||
if os.path.exists(old_settings_file):
|
||||
cls.config.default = old_settings_file
|
||||
else:
|
||||
cls.config.default = os.path.join(self.data_dir, 'settings.yml')
|
||||
if self.data_dir != cls.data_dir.default:
|
||||
cls.blob_dir.default = os.path.join(self.data_dir, 'blobs')
|
||||
cls.wallet_dir.default = os.path.join(self.data_dir, 'wallets')
|
||||
|
||||
@property
|
||||
def log_file_path(self):
|
||||
return os.path.join(self.data_dir, 'lbrynet.log')
|
||||
return os.path.join(self.data_dir, 'daemon.log')
|
||||
|
||||
@property
|
||||
def db_url_or_default(self):
|
||||
if self.db_url:
|
||||
return self.db_url
|
||||
return 'sqlite:///'+os.path.join(self.data_dir, f'{self.blockchain_name}.db')
|
||||
|
||||
|
||||
def get_windows_directories() -> typing.Tuple[str, str, str]:
|
||||
from lbry.winpaths import get_path, FOLDERID, UserHandle, \
|
||||
PathNotFoundException # pylint: disable=import-outside-toplevel
|
||||
|
||||
try:
|
||||
download_dir = get_path(FOLDERID.Downloads, UserHandle.current)
|
||||
except PathNotFoundException:
|
||||
download_dir = os.getcwd()
|
||||
|
||||
def get_windows_directories() -> Tuple[str, str, str, str]:
|
||||
# very old
|
||||
data_dir = user_data_dir('lbrynet', roaming=True)
|
||||
blob_dir = os.path.join(data_dir, 'blobfiles')
|
||||
wallet_dir = os.path.join(user_data_dir('lbryum', roaming=True), 'wallets')
|
||||
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
|
||||
return data_dir, wallet_dir, blob_dir, user_download_dir()
|
||||
# old
|
||||
appdata = get_path(FOLDERID.RoamingAppData, UserHandle.current)
|
||||
data_dir = os.path.join(appdata, 'lbrynet')
|
||||
lbryum_dir = os.path.join(appdata, 'lbryum')
|
||||
if os.path.isdir(data_dir) or os.path.isdir(lbryum_dir):
|
||||
return data_dir, lbryum_dir, download_dir
|
||||
|
||||
# new
|
||||
data_dir = user_data_dir('lbrynet', 'lbry')
|
||||
lbryum_dir = user_data_dir('lbryum', 'lbry')
|
||||
return data_dir, lbryum_dir, download_dir
|
||||
|
||||
|
||||
def get_darwin_directories() -> typing.Tuple[str, str, str]:
|
||||
data_dir = user_data_dir('LBRY')
|
||||
lbryum_dir = os.path.expanduser('~/.lbryum')
|
||||
download_dir = os.path.expanduser('~/Downloads')
|
||||
return data_dir, lbryum_dir, download_dir
|
||||
|
||||
|
||||
def get_linux_directories() -> typing.Tuple[str, str, str]:
|
||||
try:
|
||||
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
|
||||
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read()).group(1)
|
||||
down_dir = re.sub(r'\$HOME', os.getenv('HOME') or os.path.expanduser("~/"), down_dir)
|
||||
download_dir = re.sub('\"', '', down_dir)
|
||||
except OSError:
|
||||
download_dir = os.getenv('XDG_DOWNLOAD_DIR')
|
||||
if not download_dir:
|
||||
download_dir = os.path.expanduser('~/Downloads')
|
||||
|
||||
# old
|
||||
data_dir = os.path.expanduser('~/.lbrynet')
|
||||
lbryum_dir = os.path.expanduser('~/.lbryum')
|
||||
if os.path.isdir(data_dir) or os.path.isdir(lbryum_dir):
|
||||
return data_dir, lbryum_dir, download_dir
|
||||
|
||||
blob_dir = os.path.join(data_dir, 'blobfiles')
|
||||
wallet_dir = os.path.join(user_data_dir('lbryum', 'lbry'), 'wallets')
|
||||
if os.path.isdir(blob_dir) and os.path.isdir(wallet_dir):
|
||||
return data_dir, wallet_dir, blob_dir, user_download_dir()
|
||||
# new
|
||||
return user_data_dir('lbry/lbrynet'), user_data_dir('lbry/lbryum'), download_dir
|
||||
return get_universal_directories()
|
||||
|
||||
|
||||
def get_darwin_directories() -> Tuple[str, str, str, str]:
|
||||
data_dir = user_data_dir('LBRY')
|
||||
blob_dir = os.path.join(data_dir, 'blobfiles')
|
||||
wallet_dir = os.path.expanduser('~/.lbryum/wallets')
|
||||
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
|
||||
return data_dir, wallet_dir, blob_dir, user_download_dir()
|
||||
return get_universal_directories()
|
||||
|
||||
|
||||
def get_linux_directories() -> Tuple[str, str, str, str]:
|
||||
# very old
|
||||
data_dir = os.path.expanduser('~/.lbrynet')
|
||||
blob_dir = os.path.join(data_dir, 'blobfiles')
|
||||
wallet_dir = os.path.join(os.path.expanduser('~/.lbryum'), 'wallets')
|
||||
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
|
||||
return data_dir, wallet_dir, blob_dir, user_download_dir()
|
||||
# old
|
||||
data_dir = user_data_dir('lbry/lbrynet')
|
||||
blob_dir = os.path.join(data_dir, 'blobfiles')
|
||||
wallet_dir = user_data_dir('lbry/lbryum/wallets')
|
||||
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
|
||||
return data_dir, wallet_dir, blob_dir, user_download_dir()
|
||||
# new
|
||||
return get_universal_directories()
|
||||
|
||||
|
||||
def get_universal_directories() -> Tuple[str, str, str, str]:
|
||||
lbrynet_dir = user_data_dir('lbrynet', 'LBRY')
|
||||
return (
|
||||
lbrynet_dir,
|
||||
os.path.join(lbrynet_dir, 'wallets'),
|
||||
os.path.join(lbrynet_dir, 'blobs'),
|
||||
user_download_dir()
|
||||
)
|
||||
|
|
492
lbry/console.py
Normal file
492
lbry/console.py
Normal file
|
@ -0,0 +1,492 @@
|
|||
import os
|
||||
import sys
|
||||
import time
|
||||
import itertools
|
||||
import logging
|
||||
from typing import Dict, Any
|
||||
from tempfile import TemporaryFile
|
||||
|
||||
from tqdm.std import tqdm, Bar
|
||||
from tqdm.utils import FormatReplace, _unicode, disp_len, disp_trim, _is_ascii
|
||||
|
||||
from lbry import __version__
|
||||
from lbry.service.base import Service
|
||||
from lbry.service.full_node import FullNode
|
||||
from lbry.service.light_client import LightClient
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RedirectOutput:
|
||||
|
||||
silence_lines = [
|
||||
b'libprotobuf ERROR google/protobuf/wire_format_lite.cc:626',
|
||||
]
|
||||
|
||||
def __init__(self, stream_type: str):
|
||||
assert stream_type in ('stderr', 'stdout')
|
||||
self.stream_type = stream_type
|
||||
self.stream_no = getattr(sys, stream_type).fileno()
|
||||
self.last_flush = time.time()
|
||||
self.last_read = 0
|
||||
self.backup = None
|
||||
self.file = None
|
||||
|
||||
def __enter__(self):
|
||||
self.backup = os.dup(self.stream_no)
|
||||
setattr(sys, self.stream_type, os.fdopen(self.backup, 'w'))
|
||||
self.file = TemporaryFile()
|
||||
self.backup = os.dup2(self.file.fileno(), self.stream_no)
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.file.close()
|
||||
os.dup2(self.backup, self.stream_no)
|
||||
os.close(self.backup)
|
||||
setattr(sys, self.stream_type, os.fdopen(self.stream_no, 'w'))
|
||||
|
||||
def capture(self):
|
||||
self.__enter__()
|
||||
|
||||
def release(self):
|
||||
self.__exit__(None, None, None)
|
||||
|
||||
def flush(self, writer, force=False):
|
||||
if not force and (time.time() - self.last_flush) < 5:
|
||||
return
|
||||
self.file.seek(self.last_read)
|
||||
for line in self.file.readlines():
|
||||
silence = False
|
||||
for bad_line in self.silence_lines:
|
||||
if bad_line in line:
|
||||
silence = True
|
||||
break
|
||||
if not silence:
|
||||
writer(line.decode().rstrip())
|
||||
self.last_read = self.file.tell()
|
||||
self.last_flush = time.time()
|
||||
|
||||
|
||||
class Console:
|
||||
|
||||
def __init__(self, service: Service):
|
||||
self.service = service
|
||||
|
||||
def starting(self):
|
||||
pass
|
||||
|
||||
def stopping(self):
|
||||
pass
|
||||
|
||||
|
||||
class Basic(Console):
|
||||
|
||||
def __init__(self, service: Service):
|
||||
super().__init__(service)
|
||||
self.service.sync.on_progress.listen(self.on_sync_progress)
|
||||
self.tasks = {}
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
def starting(self):
|
||||
conf = self.service.conf
|
||||
s = [f'LBRY v{__version__}']
|
||||
if isinstance(self.service, FullNode):
|
||||
s.append('Full Node')
|
||||
elif isinstance(self.service, LightClient):
|
||||
s.append('Light Client')
|
||||
if conf.workers == -1:
|
||||
s.append('Threads Only')
|
||||
else:
|
||||
workers = os.cpu_count() if conf.workers == 0 else conf.workers
|
||||
s.append(f'{workers} Worker' if workers == 1 else f'{workers} Workers')
|
||||
s.append(f'({os.cpu_count()} CPUs available)')
|
||||
log.info(' '.join(s))
|
||||
|
||||
def stopping(self):
|
||||
log.info('exiting')
|
||||
|
||||
@staticmethod
|
||||
def maybe_log_progress(event, done, total, last):
|
||||
if done == 0:
|
||||
log.info("%s 0%%", event)
|
||||
return 0
|
||||
elif done == total:
|
||||
log.info("%s 100%%", event)
|
||||
return 1
|
||||
else:
|
||||
percent = done/total
|
||||
if percent >= 0.25 > last:
|
||||
log.info("%s 25%%", event)
|
||||
return 0.25
|
||||
elif percent >= 0.50 > last:
|
||||
log.info("%s 50%%", event)
|
||||
return 0.50
|
||||
elif percent >= 0.75 > last:
|
||||
log.info("%s 75%%", event)
|
||||
return 0.75
|
||||
return last
|
||||
|
||||
def on_sync_progress(self, event):
|
||||
e, data = event["event"], event["data"]
|
||||
name, current, total, last = e, data['done'][0], 0, 0
|
||||
if not e.endswith("init") and not e.endswith("main") and not e.endswith("indexes"):
|
||||
name = f"{e}#{data['id']}"
|
||||
if "total" in data:
|
||||
total, last = self.tasks[name] = (data["total"][0], last)
|
||||
elif name in self.tasks:
|
||||
total, last = self.tasks[name]
|
||||
elif total == 0:
|
||||
return
|
||||
progress_status = (total, self.maybe_log_progress(name, current, total, last))
|
||||
if progress_status[1] == 1:
|
||||
del self.tasks[name]
|
||||
else:
|
||||
self.tasks[name] = progress_status
|
||||
|
||||
|
||||
class Bar2(Bar):
|
||||
|
||||
def __init__(self, frac, default_len=10, charset=None):
|
||||
super().__init__(frac[0], default_len, charset)
|
||||
self.frac2 = frac[1]
|
||||
|
||||
def __format__(self, format_spec):
|
||||
width = self.default_len
|
||||
row1 = (1,)*int(self.frac2 * width * 2)
|
||||
row2 = (2,)*int(self.frac * width * 2)
|
||||
fill = []
|
||||
for one, two, _ in itertools.zip_longest(row1, row2, range(width*2)):
|
||||
fill.append((one or 0)+(two or 0))
|
||||
bar = []
|
||||
for i in range(0, width*2, 2):
|
||||
if fill[i] == 1:
|
||||
if fill[i+1] == 1:
|
||||
bar.append('▀')
|
||||
else:
|
||||
bar.append('▘')
|
||||
elif fill[i] == 2:
|
||||
if fill[i+1] == 2:
|
||||
bar.append('▄')
|
||||
else:
|
||||
bar.append('▖')
|
||||
elif fill[i] == 3:
|
||||
if fill[i+1] == 1:
|
||||
bar.append('▛')
|
||||
elif fill[i+1] == 2:
|
||||
bar.append('▙')
|
||||
elif fill[i+1] == 3:
|
||||
bar.append('█')
|
||||
else:
|
||||
bar.append('▌')
|
||||
else:
|
||||
bar.append(' ')
|
||||
return ''.join(bar)
|
||||
|
||||
|
||||
class tqdm2(tqdm): # pylint: disable=invalid-name
|
||||
|
||||
def __init__(self, initial=(0, 0), unit=('it', 'it'), total=(None, None), **kwargs):
|
||||
self.n2 = self.last_print_n2 = initial[1] # pylint: disable=invalid-name
|
||||
self.unit2 = unit[1]
|
||||
self.total2 = total[1]
|
||||
super().__init__(initial=initial[0], unit=unit[0], total=total[0], **kwargs)
|
||||
|
||||
@property
|
||||
def format_dict(self):
|
||||
d = super().format_dict
|
||||
d.update({
|
||||
'n2': self.n2,
|
||||
'unit2': self.unit2,
|
||||
'total2': self.total2,
|
||||
})
|
||||
return d
|
||||
|
||||
def update(self, n=(1, 1)):
|
||||
if self.disable:
|
||||
return
|
||||
last_last_print_t = self.last_print_t
|
||||
self.n2 += n[1]
|
||||
super().update(n[0])
|
||||
if last_last_print_t != self.last_print_t:
|
||||
self.last_print_n2 = self.n2
|
||||
|
||||
@staticmethod
|
||||
def format_meter(
|
||||
n, total, elapsed, ncols=None, prefix='', ascii=False, # pylint: disable=redefined-builtin
|
||||
unit='it', unit_scale=False, rate=None, bar_format=None,
|
||||
postfix=None, unit_divisor=1000, **extra_kwargs
|
||||
):
|
||||
|
||||
# sanity check: total
|
||||
if total and n >= (total + 0.5): # allow float imprecision (#849)
|
||||
total = None
|
||||
|
||||
# apply custom scale if necessary
|
||||
if unit_scale and unit_scale not in (True, 1):
|
||||
if total:
|
||||
total *= unit_scale
|
||||
n *= unit_scale
|
||||
if rate:
|
||||
rate *= unit_scale # by default rate = 1 / self.avg_time
|
||||
unit_scale = False
|
||||
|
||||
elapsed_str = tqdm.format_interval(elapsed)
|
||||
|
||||
# if unspecified, attempt to use rate = average speed
|
||||
# (we allow manual override since predicting time is an arcane art)
|
||||
if rate is None and elapsed:
|
||||
rate = n / elapsed
|
||||
inv_rate = 1 / rate if rate else None
|
||||
format_sizeof = tqdm.format_sizeof
|
||||
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
|
||||
'{0:5.2f}'.format(rate))
|
||||
if rate else '?') + unit + '/s'
|
||||
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
|
||||
'{0:5.2f}'.format(inv_rate))
|
||||
if inv_rate else '?') + 's/' + unit
|
||||
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
|
||||
|
||||
if unit_scale:
|
||||
n_fmt = format_sizeof(n, divisor=unit_divisor)
|
||||
total_fmt = format_sizeof(total, divisor=unit_divisor) \
|
||||
if total is not None else '?'
|
||||
else:
|
||||
n_fmt = str(n)
|
||||
total_fmt = str(total) if total is not None else '?'
|
||||
|
||||
try:
|
||||
postfix = ', ' + postfix if postfix else ''
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
remaining = (total - n) / rate if rate and total else 0
|
||||
remaining_str = tqdm.format_interval(remaining) if rate else '?'
|
||||
|
||||
# format the stats displayed to the left and right sides of the bar
|
||||
if prefix:
|
||||
# old prefix setup work around
|
||||
bool_prefix_colon_already = (prefix[-2:] == ": ")
|
||||
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
|
||||
else:
|
||||
l_bar = ''
|
||||
|
||||
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
|
||||
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
|
||||
|
||||
# Custom bar formatting
|
||||
# Populate a dict with all available progress indicators
|
||||
format_dict = dict(
|
||||
# slight extension of self.format_dict
|
||||
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
|
||||
elapsed=elapsed_str, elapsed_s=elapsed,
|
||||
ncols=ncols, desc=prefix or '', unit=unit,
|
||||
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
|
||||
rate_fmt=rate_fmt, rate_noinv=rate,
|
||||
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
|
||||
rate_inv_fmt=rate_inv_fmt,
|
||||
postfix=postfix, unit_divisor=unit_divisor,
|
||||
# plus more useful definitions
|
||||
remaining=remaining_str, remaining_s=remaining,
|
||||
l_bar=l_bar, r_bar=r_bar,
|
||||
**extra_kwargs)
|
||||
|
||||
# total is known: we can predict some stats
|
||||
if total:
|
||||
n2, total2 = extra_kwargs['n2'], extra_kwargs['total2'] # pylint: disable=invalid-name
|
||||
|
||||
# fractional and percentage progress
|
||||
frac = n / total
|
||||
frac2 = n2 / total2
|
||||
percentage = frac * 100
|
||||
|
||||
l_bar += '{0:3.0f}%|'.format(percentage)
|
||||
|
||||
if ncols == 0:
|
||||
return l_bar[:-1] + r_bar[1:]
|
||||
|
||||
format_dict.update(l_bar=l_bar)
|
||||
if bar_format:
|
||||
format_dict.update(percentage=percentage)
|
||||
|
||||
# auto-remove colon for empty `desc`
|
||||
if not prefix:
|
||||
bar_format = bar_format.replace("{desc}: ", '')
|
||||
else:
|
||||
bar_format = "{l_bar}{bar}{r_bar}"
|
||||
|
||||
full_bar = FormatReplace()
|
||||
try:
|
||||
nobar = bar_format.format(bar=full_bar, **format_dict)
|
||||
except UnicodeEncodeError:
|
||||
bar_format = _unicode(bar_format)
|
||||
nobar = bar_format.format(bar=full_bar, **format_dict)
|
||||
if not full_bar.format_called:
|
||||
# no {bar}, we can just format and return
|
||||
return nobar
|
||||
|
||||
# Formatting progress bar space available for bar's display
|
||||
full_bar = Bar2(
|
||||
(frac, frac2),
|
||||
max(1, ncols - disp_len(nobar))
|
||||
if ncols else 10,
|
||||
charset=Bar2.ASCII if ascii is True else ascii or Bar2.UTF)
|
||||
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
|
||||
bar_format = _unicode(bar_format)
|
||||
res = bar_format.format(bar=full_bar, **format_dict)
|
||||
return disp_trim(res, ncols) if ncols else res
|
||||
|
||||
elif bar_format:
|
||||
# user-specified bar_format but no total
|
||||
l_bar += '|'
|
||||
format_dict.update(l_bar=l_bar, percentage=0)
|
||||
full_bar = FormatReplace()
|
||||
nobar = bar_format.format(bar=full_bar, **format_dict)
|
||||
if not full_bar.format_called:
|
||||
return nobar
|
||||
full_bar = Bar2(
|
||||
(0, 0),
|
||||
max(1, ncols - disp_len(nobar))
|
||||
if ncols else 10,
|
||||
charset=Bar2.BLANK)
|
||||
res = bar_format.format(bar=full_bar, **format_dict)
|
||||
return disp_trim(res, ncols) if ncols else res
|
||||
else:
|
||||
# no total: no progressbar, ETA, just progress stats
|
||||
return ((prefix + ": ") if prefix else '') + \
|
||||
'{0}{1} [{2}, {3}{4}]'.format(
|
||||
n_fmt, unit, elapsed_str, rate_fmt, postfix)
|
||||
|
||||
|
||||
class Advanced(Basic):
|
||||
|
||||
FORMAT = '{l_bar}{bar}| {n_fmt:>8}/{total_fmt:>8} [{elapsed:>7}<{remaining:>8}, {rate_fmt:>17}]'
|
||||
|
||||
def __init__(self, service: Service):
|
||||
super().__init__(service)
|
||||
self.bars: Dict[Any, tqdm] = {}
|
||||
self.stderr = RedirectOutput('stderr')
|
||||
|
||||
def starting(self):
|
||||
self.stderr.capture()
|
||||
super().starting()
|
||||
|
||||
def stopping(self):
|
||||
for bar in self.bars.values():
|
||||
bar.close()
|
||||
super().stopping()
|
||||
#self.stderr.flush(self.bars['read'].write, True)
|
||||
#self.stderr.release()
|
||||
|
||||
def get_or_create_bar(self, name, desc, units, totals, leave=False, bar_format=None, postfix=None, position=None):
|
||||
bar = self.bars.get(name)
|
||||
if bar is None:
|
||||
if len(units) == 2:
|
||||
bar = self.bars[name] = tqdm2(
|
||||
desc=desc, unit=units, total=totals,
|
||||
bar_format=bar_format or self.FORMAT, leave=leave,
|
||||
postfix=postfix, position=position
|
||||
)
|
||||
else:
|
||||
bar = self.bars[name] = tqdm(
|
||||
desc=desc, unit=units[0], total=totals[0],
|
||||
bar_format=bar_format or self.FORMAT, leave=leave,
|
||||
postfix=postfix, position=position
|
||||
)
|
||||
return bar
|
||||
|
||||
def sync_init(self, name, d):
|
||||
bar_name = f"{name}#{d['id']}"
|
||||
bar = self.bars.get(bar_name)
|
||||
if bar is None:
|
||||
label = d.get('label', name[-11:])
|
||||
self.get_or_create_bar(bar_name, label, d['units'], d['total'], True)
|
||||
else:
|
||||
if d['done'][0] != -1:
|
||||
bar.update(d['done'][0] - bar.n)
|
||||
if d['done'][0] == -1 or d['done'][0] == bar.total:
|
||||
bar.close()
|
||||
self.bars.pop(bar_name)
|
||||
|
||||
def sync_main(self, name, d):
|
||||
bar = self.bars.get(name)
|
||||
if bar is None:
|
||||
label = d.get('label', name[-11:])
|
||||
self.get_or_create_bar(name, label, d['units'], d['total'], True)
|
||||
#self.last_stats = f"{d['txs']:,d} txs, {d['claims']:,d} claims and {d['supports']:,d} supports"
|
||||
#self.get_or_create_bar("read", "├─ blocks read", "blocks", d['blocks'], True)
|
||||
#self.get_or_create_bar("save", "└─┬ txs saved", "txs", d['txs'], True)
|
||||
else:
|
||||
if d['done'] == (-1,)*len(d['done']):
|
||||
base_name = name[:name.rindex('.')]
|
||||
for child_name, child_bar in self.bars.items():
|
||||
if child_name.startswith(base_name):
|
||||
child_bar.close()
|
||||
bar.close()
|
||||
self.bars.pop(name)
|
||||
else:
|
||||
if len(d['done']) == 2:
|
||||
bar.update((d['done'][0]-bar.n, d['done'][1]-bar.n2))
|
||||
else:
|
||||
bar.update(d['done'][0]-bar.n)
|
||||
|
||||
def sync_task(self, name, d):
|
||||
bar_name = f"{name}#{d['id']}"
|
||||
bar = self.bars.get(bar_name)
|
||||
if bar is None:
|
||||
#assert d['done'][0] == 0
|
||||
label = d.get('label', name[-11:])
|
||||
self.get_or_create_bar(
|
||||
f"{name}#{d['id']}", label, d['units'], d['total'],
|
||||
name.split('.')[-1] not in ('insert', 'update', 'file')
|
||||
)
|
||||
else:
|
||||
if d['done'][0] != -1:
|
||||
main_bar_name = f"{name[:name.rindex('.')]}.main"
|
||||
if len(d['done']) > 1:
|
||||
diff = tuple(a-b for a, b in zip(d['done'], (bar.n, bar.n2)))
|
||||
else:
|
||||
diff = d['done'][0] - bar.n
|
||||
if main_bar_name != name:
|
||||
main_bar = self.bars.get(main_bar_name)
|
||||
if main_bar and main_bar.unit == bar.unit:
|
||||
main_bar.update(diff)
|
||||
bar.update(diff)
|
||||
if d['done'][0] == -1 or d['done'][0] == bar.total:
|
||||
bar.close()
|
||||
self.bars.pop(bar_name)
|
||||
|
||||
def update_other_bars(self, e, d):
|
||||
if d['total'] == 0:
|
||||
return
|
||||
bar = self.bars.get(e)
|
||||
if not bar:
|
||||
name = (
|
||||
' '.join(e.split('.')[-2:])
|
||||
.replace('support', 'suprt')
|
||||
.replace('channels', 'chanls')
|
||||
.replace('signatures', 'sigs')
|
||||
)
|
||||
bar = self.get_or_create_bar(e, f"├─ {name:>12}", d['unit'], d['total'], True)
|
||||
diff = d['step']-bar.n
|
||||
bar.update(diff)
|
||||
#if d['step'] == d['total']:
|
||||
#bar.close()
|
||||
|
||||
def on_sync_progress(self, event):
|
||||
e, d = event['event'], event.get('data', {})
|
||||
if e.endswith(".init"):
|
||||
self.sync_init(e, d)
|
||||
elif e.endswith(".main"):
|
||||
self.sync_main(e, d)
|
||||
else:
|
||||
self.sync_task(e, d)
|
||||
|
||||
# if e.endswith("sync.start"):
|
||||
# self.sync_start(d)
|
||||
# self.stderr.flush(self.bars['read'].write)
|
||||
# elif e.endswith("sync.complete"):
|
||||
# self.stderr.flush(self.bars['read'].write, True)
|
||||
# self.sync_complete()
|
||||
# else:
|
||||
# self.stderr.flush(self.bars['read'].write)
|
||||
# self.update_progress(e, d)
|
|
@ -1,2 +1,4 @@
|
|||
NULL_HASH32 = b'\x00'*32
|
||||
|
||||
CENT = 1000000
|
||||
COIN = 100*CENT
|
||||
|
|
|
@ -2,7 +2,7 @@ from coincurve import PublicKey, PrivateKey as _PrivateKey
|
|||
|
||||
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
|
||||
from lbry.crypto.base58 import Base58
|
||||
from .util import cachedproperty
|
||||
from lbry.utils import cachedproperty
|
||||
|
||||
|
||||
class DerivationError(Exception):
|
5
lbry/db/__init__.py
Normal file
5
lbry/db/__init__.py
Normal file
|
@ -0,0 +1,5 @@
|
|||
from .database import Database, Result
|
||||
from .constants import (
|
||||
TXO_TYPES, SPENDABLE_TYPE_CODES,
|
||||
CLAIM_TYPE_CODES, CLAIM_TYPE_NAMES
|
||||
)
|
74
lbry/db/constants.py
Normal file
74
lbry/db/constants.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
MAX_QUERY_VARIABLES = 900
|
||||
|
||||
TXO_TYPES = {
|
||||
"other": 0,
|
||||
"stream": 1,
|
||||
"channel": 2,
|
||||
"support": 3,
|
||||
"purchase": 4,
|
||||
"collection": 5,
|
||||
"repost": 6,
|
||||
}
|
||||
|
||||
CLAIM_TYPE_NAMES = [
|
||||
'stream',
|
||||
'channel',
|
||||
'collection',
|
||||
'repost',
|
||||
]
|
||||
|
||||
CONTENT_TYPE_NAMES = [
|
||||
name for name in CLAIM_TYPE_NAMES if name != "channel"
|
||||
]
|
||||
|
||||
CLAIM_TYPE_CODES = [
|
||||
TXO_TYPES[name] for name in CLAIM_TYPE_NAMES
|
||||
]
|
||||
|
||||
CONTENT_TYPE_CODES = [
|
||||
TXO_TYPES[name] for name in CONTENT_TYPE_NAMES
|
||||
]
|
||||
|
||||
SPENDABLE_TYPE_CODES = [
|
||||
TXO_TYPES['other'],
|
||||
TXO_TYPES['purchase']
|
||||
]
|
||||
|
||||
STREAM_TYPES = {
|
||||
'video': 1,
|
||||
'audio': 2,
|
||||
'image': 3,
|
||||
'document': 4,
|
||||
'binary': 5,
|
||||
'model': 6
|
||||
}
|
||||
|
||||
MATURE_TAGS = (
|
||||
'nsfw', 'porn', 'xxx', 'mature', 'adult', 'sex'
|
||||
)
|
||||
|
||||
ATTRIBUTE_ARRAY_MAX_LENGTH = 100
|
||||
|
||||
SEARCH_INTEGER_PARAMS = {
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
|
||||
'tx_position', 'channel_join', 'reposted',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'trending_group', 'trending_mixed',
|
||||
'trending_local', 'trending_global',
|
||||
}
|
||||
|
||||
SEARCH_PARAMS = {
|
||||
'name', 'text', 'claim_id', 'claim_ids', 'txid', 'nout', 'channel', 'channel_ids', 'not_channel_ids',
|
||||
'public_key_id', 'claim_type', 'stream_types', 'media_types', 'fee_currency',
|
||||
'has_channel_signature', 'signature_valid',
|
||||
'any_tags', 'all_tags', 'not_tags', 'reposted_claim_id',
|
||||
'any_locations', 'all_locations', 'not_locations',
|
||||
'any_languages', 'all_languages', 'not_languages',
|
||||
'is_controlling', 'limit', 'offset', 'order_by',
|
||||
'no_totals',
|
||||
} | SEARCH_INTEGER_PARAMS
|
||||
|
||||
SEARCH_ORDER_FIELDS = {
|
||||
'name', 'claim_hash', 'claim_id'
|
||||
} | SEARCH_INTEGER_PARAMS
|
327
lbry/db/database.py
Normal file
327
lbry/db/database.py
Normal file
|
@ -0,0 +1,327 @@
|
|||
import os
|
||||
import asyncio
|
||||
import tempfile
|
||||
import multiprocessing as mp
|
||||
from typing import List, Optional, Iterable, Iterator, TypeVar, Generic, TYPE_CHECKING, Dict
|
||||
from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor
|
||||
from functools import partial
|
||||
|
||||
from sqlalchemy import create_engine, text
|
||||
|
||||
from lbry.event import EventController
|
||||
from lbry.crypto.bip32 import PubKey
|
||||
from lbry.blockchain.transaction import Transaction, Output
|
||||
from .constants import TXO_TYPES, CLAIM_TYPE_CODES
|
||||
from .query_context import initialize, uninitialize, ProgressPublisher
|
||||
from . import queries as q
|
||||
from . import sync
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from lbry.blockchain.ledger import Ledger
|
||||
|
||||
|
||||
def clean_wallet_account_ids(constraints):
|
||||
wallet = constraints.pop('wallet', None)
|
||||
account = constraints.pop('account', None)
|
||||
accounts = constraints.pop('accounts', [])
|
||||
if account and not accounts:
|
||||
accounts = [account]
|
||||
if wallet:
|
||||
constraints['wallet_account_ids'] = [account.id for account in wallet.accounts]
|
||||
if not accounts:
|
||||
accounts = wallet.accounts
|
||||
if accounts:
|
||||
constraints['account_ids'] = [account.id for account in accounts]
|
||||
|
||||
|
||||
async def add_channel_keys_to_txo_results(accounts: List, txos: Iterable[Output]):
|
||||
sub_channels = set()
|
||||
for txo in txos:
|
||||
if txo.claim.is_channel:
|
||||
for account in accounts:
|
||||
private_key = await account.get_channel_private_key(
|
||||
txo.claim.channel.public_key_bytes
|
||||
)
|
||||
if private_key:
|
||||
txo.private_key = private_key
|
||||
break
|
||||
if txo.channel is not None:
|
||||
sub_channels.add(txo.channel)
|
||||
if sub_channels:
|
||||
await add_channel_keys_to_txo_results(accounts, sub_channels)
|
||||
|
||||
ResultType = TypeVar('ResultType')
|
||||
|
||||
|
||||
class Result(Generic[ResultType]):
|
||||
|
||||
__slots__ = 'rows', 'total', 'censor'
|
||||
|
||||
def __init__(self, rows: List[ResultType], total, censor=None):
|
||||
self.rows = rows
|
||||
self.total = total
|
||||
self.censor = censor
|
||||
|
||||
def __getitem__(self, item: int) -> ResultType:
|
||||
return self.rows[item]
|
||||
|
||||
def __iter__(self) -> Iterator[ResultType]:
|
||||
return iter(self.rows)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.rows)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.rows)
|
||||
|
||||
|
||||
class Database:
|
||||
|
||||
def __init__(self, ledger: 'Ledger'):
|
||||
self.url = ledger.conf.db_url_or_default
|
||||
self.ledger = ledger
|
||||
self.workers = self._normalize_worker_processes(ledger.conf.workers)
|
||||
self.executor: Optional[Executor] = None
|
||||
self.message_queue = mp.Queue()
|
||||
self.stop_event = mp.Event()
|
||||
self._on_progress_controller = EventController()
|
||||
self.on_progress = self._on_progress_controller.stream
|
||||
self.progress_publisher = ProgressPublisher(
|
||||
self.message_queue, self._on_progress_controller
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_worker_processes(workers):
|
||||
if workers == 0:
|
||||
return os.cpu_count()
|
||||
elif workers > 0:
|
||||
return workers
|
||||
return 1
|
||||
|
||||
@classmethod
|
||||
def temp_from_url_regtest(cls, db_url, lbrycrd_dir=None):
|
||||
from lbry import Config, RegTestLedger # pylint: disable=import-outside-toplevel
|
||||
directory = tempfile.mkdtemp()
|
||||
conf = Config.with_same_dir(directory).set(db_url=db_url)
|
||||
if lbrycrd_dir is not None:
|
||||
conf.lbrycrd_dir = lbrycrd_dir
|
||||
ledger = RegTestLedger(conf)
|
||||
return cls(ledger)
|
||||
|
||||
@classmethod
|
||||
def temp_sqlite_regtest(cls, lbrycrd_dir=None):
|
||||
from lbry import Config, RegTestLedger # pylint: disable=import-outside-toplevel
|
||||
directory = tempfile.mkdtemp()
|
||||
conf = Config.with_same_dir(directory)
|
||||
if lbrycrd_dir is not None:
|
||||
conf.lbrycrd_dir = lbrycrd_dir
|
||||
ledger = RegTestLedger(conf)
|
||||
return cls(ledger)
|
||||
|
||||
@classmethod
|
||||
def temp_sqlite(cls):
|
||||
from lbry import Config, Ledger # pylint: disable=import-outside-toplevel
|
||||
conf = Config.with_same_dir(tempfile.mkdtemp())
|
||||
return cls(Ledger(conf))
|
||||
|
||||
@classmethod
|
||||
def from_url(cls, db_url):
|
||||
from lbry import Config, Ledger # pylint: disable=import-outside-toplevel
|
||||
return cls(Ledger(Config.with_null_dir().set(db_url=db_url)))
|
||||
|
||||
@classmethod
|
||||
def in_memory(cls):
|
||||
return cls.from_url('sqlite:///:memory:')
|
||||
|
||||
def sync_create(self, name):
|
||||
engine = create_engine(self.url)
|
||||
db = engine.connect()
|
||||
db.execute(text("COMMIT"))
|
||||
db.execute(text(f"CREATE DATABASE {name}"))
|
||||
|
||||
async def create(self, name):
|
||||
return await asyncio.get_running_loop().run_in_executor(None, self.sync_create, name)
|
||||
|
||||
def sync_drop(self, name):
|
||||
engine = create_engine(self.url)
|
||||
db = engine.connect()
|
||||
db.execute(text("COMMIT"))
|
||||
db.execute(text(f"DROP DATABASE IF EXISTS {name}"))
|
||||
|
||||
async def drop(self, name):
|
||||
return await asyncio.get_running_loop().run_in_executor(None, self.sync_drop, name)
|
||||
|
||||
async def open(self):
|
||||
assert self.executor is None, "Database already open."
|
||||
self.progress_publisher.start()
|
||||
kwargs = {
|
||||
"initializer": initialize,
|
||||
"initargs": (
|
||||
self.ledger,
|
||||
self.message_queue, self.stop_event
|
||||
)
|
||||
}
|
||||
if self.workers > 1:
|
||||
self.executor = ProcessPoolExecutor(max_workers=self.workers, **kwargs)
|
||||
else:
|
||||
self.executor = ThreadPoolExecutor(max_workers=1, **kwargs)
|
||||
return await self.run(q.check_version_and_create_tables)
|
||||
|
||||
async def close(self):
|
||||
self.progress_publisher.stop()
|
||||
if self.executor is not None:
|
||||
if isinstance(self.executor, ThreadPoolExecutor):
|
||||
await self.run(uninitialize)
|
||||
self.executor.shutdown()
|
||||
self.executor = None
|
||||
# fixes "OSError: handle is closed"
|
||||
# seems to only happen when running in PyCharm
|
||||
# https://github.com/python/cpython/pull/6084#issuecomment-564585446
|
||||
# TODO: delete this in Python 3.8/3.9?
|
||||
from concurrent.futures.process import _threads_wakeups # pylint: disable=import-outside-toplevel
|
||||
_threads_wakeups.clear()
|
||||
|
||||
async def run(self, func, *args, **kwargs):
|
||||
if kwargs:
|
||||
clean_wallet_account_ids(kwargs)
|
||||
return await asyncio.get_running_loop().run_in_executor(
|
||||
self.executor, partial(func, *args, **kwargs)
|
||||
)
|
||||
|
||||
async def fetch_result(self, func, *args, **kwargs) -> Result:
|
||||
rows, total = await self.run(func, *args, **kwargs)
|
||||
return Result(rows, total)
|
||||
|
||||
async def execute(self, sql):
|
||||
return await self.run(q.execute, sql)
|
||||
|
||||
async def execute_fetchall(self, sql):
|
||||
return await self.run(q.execute_fetchall, sql)
|
||||
|
||||
async def has_claims(self):
|
||||
return await self.run(q.has_claims)
|
||||
|
||||
async def has_supports(self):
|
||||
return await self.run(q.has_supports)
|
||||
|
||||
async def get_best_block_height(self) -> int:
|
||||
return await self.run(q.get_best_block_height)
|
||||
|
||||
async def process_all_things_after_sync(self):
|
||||
return await self.run(sync.process_all_things_after_sync)
|
||||
|
||||
async def insert_block(self, block):
|
||||
return await self.run(q.insert_block, block)
|
||||
|
||||
async def insert_transaction(self, block_hash, tx):
|
||||
return await self.run(q.insert_transaction, block_hash, tx)
|
||||
|
||||
async def update_address_used_times(self, addresses):
|
||||
return await self.run(q.update_address_used_times, addresses)
|
||||
|
||||
async def reserve_outputs(self, txos, is_reserved=True):
|
||||
txo_hashes = [txo.hash for txo in txos]
|
||||
if txo_hashes:
|
||||
return await self.run(
|
||||
q.reserve_outputs, txo_hashes, is_reserved
|
||||
)
|
||||
|
||||
async def release_outputs(self, txos):
|
||||
return await self.reserve_outputs(txos, is_reserved=False)
|
||||
|
||||
async def release_tx(self, tx):
|
||||
return await self.release_outputs([txi.txo_ref.txo for txi in tx.inputs])
|
||||
|
||||
async def release_all_outputs(self, account):
|
||||
return await self.run(q.release_all_outputs, account.id)
|
||||
|
||||
async def get_balance(self, **constraints):
|
||||
return await self.run(q.get_balance, **constraints)
|
||||
|
||||
async def get_report(self, accounts):
|
||||
return await self.run(q.get_report, accounts=accounts)
|
||||
|
||||
async def get_addresses(self, **constraints) -> Result[dict]:
|
||||
addresses = await self.fetch_result(q.get_addresses, **constraints)
|
||||
if addresses and 'pubkey' in addresses[0]:
|
||||
for address in addresses:
|
||||
address['pubkey'] = PubKey(
|
||||
self.ledger, bytes(address.pop('pubkey')), bytes(address.pop('chain_code')),
|
||||
address.pop('n'), address.pop('depth')
|
||||
)
|
||||
return addresses
|
||||
|
||||
async def get_all_addresses(self):
|
||||
return await self.run(q.get_all_addresses)
|
||||
|
||||
async def get_address(self, **constraints):
|
||||
for address in await self.get_addresses(limit=1, **constraints):
|
||||
return address
|
||||
|
||||
async def add_keys(self, account, chain, pubkeys):
|
||||
return await self.run(q.add_keys, account, chain, pubkeys)
|
||||
|
||||
async def get_transactions(self, **constraints) -> Result[Transaction]:
|
||||
return await self.fetch_result(q.get_transactions, **constraints)
|
||||
|
||||
async def get_transaction(self, **constraints) -> Optional[Transaction]:
|
||||
txs = await self.get_transactions(limit=1, **constraints)
|
||||
if txs:
|
||||
return txs[0]
|
||||
|
||||
async def get_purchases(self, **constraints) -> Result[Output]:
|
||||
return await self.fetch_result(q.get_purchases, **constraints)
|
||||
|
||||
async def search_claims(self, **constraints) -> Result[Output]:
|
||||
#assert set(constraints).issubset(SEARCH_PARAMS), \
|
||||
# f"Search query contains invalid arguments: {set(constraints).difference(SEARCH_PARAMS)}"
|
||||
claims, total, censor = await self.run(q.search_claims, **constraints)
|
||||
return Result(claims, total, censor)
|
||||
|
||||
async def protobuf_search_claims(self, **constraints) -> str:
|
||||
return await self.run(q.protobuf_search_claims, **constraints)
|
||||
|
||||
async def search_supports(self, **constraints) -> Result[Output]:
|
||||
return await self.fetch_result(q.search_supports, **constraints)
|
||||
|
||||
async def resolve(self, urls, **kwargs) -> Dict[str, Output]:
|
||||
return await self.run(q.resolve, urls, **kwargs)
|
||||
|
||||
async def protobuf_resolve(self, urls, **kwargs) -> str:
|
||||
return await self.run(q.protobuf_resolve, urls, **kwargs)
|
||||
|
||||
async def get_txo_sum(self, **constraints) -> int:
|
||||
return await self.run(q.get_txo_sum, **constraints)
|
||||
|
||||
async def get_txo_plot(self, **constraints) -> List[dict]:
|
||||
return await self.run(q.get_txo_plot, **constraints)
|
||||
|
||||
async def get_txos(self, **constraints) -> Result[Output]:
|
||||
txos = await self.fetch_result(q.get_txos, **constraints)
|
||||
if 'wallet' in constraints:
|
||||
await add_channel_keys_to_txo_results(constraints['wallet'].accounts, txos)
|
||||
return txos
|
||||
|
||||
async def get_utxos(self, **constraints) -> Result[Output]:
|
||||
return await self.get_txos(spent_height=0, **constraints)
|
||||
|
||||
async def get_supports(self, **constraints) -> Result[Output]:
|
||||
return await self.get_utxos(txo_type=TXO_TYPES['support'], **constraints)
|
||||
|
||||
async def get_claims(self, **constraints) -> Result[Output]:
|
||||
if 'txo_type' not in constraints:
|
||||
constraints['txo_type__in'] = CLAIM_TYPE_CODES
|
||||
txos = await self.fetch_result(q.get_txos, **constraints)
|
||||
if 'wallet' in constraints:
|
||||
await add_channel_keys_to_txo_results(constraints['wallet'].accounts, txos)
|
||||
return txos
|
||||
|
||||
async def get_streams(self, **constraints) -> Result[Output]:
|
||||
return await self.get_claims(txo_type=TXO_TYPES['stream'], **constraints)
|
||||
|
||||
async def get_channels(self, **constraints) -> Result[Output]:
|
||||
return await self.get_claims(txo_type=TXO_TYPES['channel'], **constraints)
|
||||
|
||||
async def get_collections(self, **constraints) -> Result[Output]:
|
||||
return await self.get_claims(txo_type=TXO_TYPES['collection'], **constraints)
|
5
lbry/db/queries/__init__.py
Normal file
5
lbry/db/queries/__init__.py
Normal file
|
@ -0,0 +1,5 @@
|
|||
from .base import *
|
||||
from .txio import *
|
||||
from .search import *
|
||||
from .resolve import *
|
||||
from .address import *
|
78
lbry/db/queries/address.py
Normal file
78
lbry/db/queries/address.py
Normal file
|
@ -0,0 +1,78 @@
|
|||
import logging
|
||||
from typing import Tuple, List, Optional
|
||||
|
||||
from sqlalchemy import func
|
||||
from sqlalchemy.future import select
|
||||
|
||||
from ..utils import query
|
||||
from ..query_context import context
|
||||
from ..tables import TXO, PubkeyAddress, AccountAddress
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def update_address_used_times(addresses):
|
||||
context().execute(
|
||||
PubkeyAddress.update()
|
||||
.values(used_times=(
|
||||
select(func.count(TXO.c.address))
|
||||
.where((TXO.c.address == PubkeyAddress.c.address)),
|
||||
))
|
||||
.where(PubkeyAddress.c.address._in(addresses))
|
||||
)
|
||||
|
||||
|
||||
def select_addresses(cols, **constraints):
|
||||
return context().fetchall(query(
|
||||
[AccountAddress, PubkeyAddress],
|
||||
select(*cols).select_from(PubkeyAddress.join(AccountAddress)),
|
||||
**constraints
|
||||
))
|
||||
|
||||
|
||||
def get_addresses(cols=None, include_total=False, **constraints) -> Tuple[List[dict], Optional[int]]:
|
||||
if cols is None:
|
||||
cols = (
|
||||
PubkeyAddress.c.address,
|
||||
PubkeyAddress.c.used_times,
|
||||
AccountAddress.c.account,
|
||||
AccountAddress.c.chain,
|
||||
AccountAddress.c.pubkey,
|
||||
AccountAddress.c.chain_code,
|
||||
AccountAddress.c.n,
|
||||
AccountAddress.c.depth
|
||||
)
|
||||
return (
|
||||
select_addresses(cols, **constraints),
|
||||
get_address_count(**constraints) if include_total else None
|
||||
)
|
||||
|
||||
|
||||
def get_address_count(**constraints):
|
||||
count = select_addresses([func.count().label('total')], **constraints)
|
||||
return count[0]['total'] or 0
|
||||
|
||||
|
||||
def get_all_addresses(self):
|
||||
return context().execute(select(PubkeyAddress.c.address))
|
||||
|
||||
|
||||
def add_keys(account, chain, pubkeys):
|
||||
c = context()
|
||||
c.execute(
|
||||
c.insert_or_ignore(PubkeyAddress)
|
||||
.values([{'address': k.address} for k in pubkeys])
|
||||
)
|
||||
c.execute(
|
||||
c.insert_or_ignore(AccountAddress)
|
||||
.values([{
|
||||
'account': account.id,
|
||||
'address': k.address,
|
||||
'chain': chain,
|
||||
'pubkey': k.pubkey_bytes,
|
||||
'chain_code': k.chain_code,
|
||||
'n': k.n,
|
||||
'depth': k.depth
|
||||
} for k in pubkeys])
|
||||
)
|
58
lbry/db/queries/base.py
Normal file
58
lbry/db/queries/base.py
Normal file
|
@ -0,0 +1,58 @@
|
|||
from sqlalchemy import text
|
||||
from sqlalchemy.future import select
|
||||
|
||||
from ..query_context import context
|
||||
from ..tables import SCHEMA_VERSION, metadata, Version, Claim, Support, Block, TX
|
||||
|
||||
|
||||
def execute(sql):
|
||||
return context().execute(text(sql))
|
||||
|
||||
|
||||
def execute_fetchall(sql):
|
||||
return context().fetchall(text(sql))
|
||||
|
||||
|
||||
def has_claims():
|
||||
return context().has_records(Claim)
|
||||
|
||||
|
||||
def has_supports():
|
||||
return context().has_records(Support)
|
||||
|
||||
|
||||
def get_best_block_height():
|
||||
context().fetchmax(Block.c.height, -1)
|
||||
|
||||
|
||||
def insert_block(block):
|
||||
context().get_bulk_loader().add_block(block).flush()
|
||||
|
||||
|
||||
def insert_transaction(block_hash, tx):
|
||||
context().get_bulk_loader().add_transaction(block_hash, tx).flush(TX)
|
||||
|
||||
|
||||
def check_version_and_create_tables():
|
||||
with context("db.connecting") as ctx:
|
||||
if ctx.has_table('version'):
|
||||
version = ctx.fetchone(select(Version.c.version).limit(1))
|
||||
if version and version['version'] == SCHEMA_VERSION:
|
||||
return
|
||||
metadata.drop_all(ctx.engine)
|
||||
metadata.create_all(ctx.engine)
|
||||
ctx.execute(Version.insert().values(version=SCHEMA_VERSION))
|
||||
for table in metadata.sorted_tables:
|
||||
disable_trigger_and_constraints(table.name)
|
||||
|
||||
|
||||
def disable_trigger_and_constraints(table_name):
|
||||
ctx = context()
|
||||
if ctx.is_postgres:
|
||||
ctx.execute(text(f"ALTER TABLE {table_name} DISABLE TRIGGER ALL;"))
|
||||
if table_name in ('tag', 'stake'):
|
||||
return
|
||||
if ctx.is_postgres:
|
||||
ctx.execute(text(
|
||||
f"ALTER TABLE {table_name} DROP CONSTRAINT {table_name}_pkey CASCADE;"
|
||||
))
|
95
lbry/db/queries/resolve.py
Normal file
95
lbry/db/queries/resolve.py
Normal file
|
@ -0,0 +1,95 @@
|
|||
import logging
|
||||
import itertools
|
||||
from operator import itemgetter
|
||||
from typing import List, Dict
|
||||
|
||||
from lbry.schema.url import URL
|
||||
from lbry.schema.result import Outputs as ResultOutput
|
||||
from lbry.error import ResolveCensoredError
|
||||
from lbry.blockchain.transaction import Output
|
||||
|
||||
from ..query_context import context
|
||||
from .search import search_claims
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_referenced_rows(txo_rows: List[dict], censor_channels: List[bytes]):
|
||||
# censor = context().get_resolve_censor()
|
||||
repost_hashes = set(filter(None, map(itemgetter('reposted_claim_hash'), txo_rows)))
|
||||
channel_hashes = set(itertools.chain(
|
||||
filter(None, map(itemgetter('channel_hash'), txo_rows)),
|
||||
censor_channels
|
||||
))
|
||||
|
||||
reposted_txos = []
|
||||
if repost_hashes:
|
||||
reposted_txos = search_claims(**{'claim.claim_hash__in': repost_hashes})
|
||||
channel_hashes |= set(filter(None, map(itemgetter('channel_hash'), reposted_txos)))
|
||||
|
||||
channel_txos = []
|
||||
if channel_hashes:
|
||||
channel_txos = search_claims(**{'claim.claim_hash__in': channel_hashes})
|
||||
|
||||
# channels must come first for client side inflation to work properly
|
||||
return channel_txos + reposted_txos
|
||||
|
||||
|
||||
def protobuf_resolve(urls, **kwargs) -> str:
|
||||
return ResultOutput.to_base64([resolve_url(raw_url) for raw_url in urls], [])
|
||||
|
||||
|
||||
def resolve(urls, **kwargs) -> Dict[str, Output]:
|
||||
return {url: resolve_url(url) for url in urls}
|
||||
#txo_rows = [resolve_url(raw_url) for raw_url in urls]
|
||||
#extra_txo_rows = _get_referenced_rows(
|
||||
# [txo for txo in txo_rows if isinstance(txo, dict)],
|
||||
# [txo.censor_hash for txo in txo_rows if isinstance(txo, ResolveCensoredError)]
|
||||
#)
|
||||
#return txo_rows, extra_txo_rows
|
||||
|
||||
|
||||
def resolve_url(raw_url):
|
||||
censor = context().get_resolve_censor()
|
||||
|
||||
try:
|
||||
url = URL.parse(raw_url)
|
||||
except ValueError as e:
|
||||
return e
|
||||
|
||||
channel = None
|
||||
|
||||
if url.has_channel:
|
||||
q = url.channel.to_dict()
|
||||
if set(q) == {'name'}:
|
||||
q['is_controlling'] = True
|
||||
else:
|
||||
q['order_by'] = ['^creation_height']
|
||||
#matches = search_claims(censor, **q, limit=1)
|
||||
matches = search_claims(**q, limit=1)[0]
|
||||
if matches:
|
||||
channel = matches[0]
|
||||
elif censor.censored:
|
||||
return ResolveCensoredError(raw_url, next(iter(censor.censored)))
|
||||
else:
|
||||
return LookupError(f'Could not find channel in "{raw_url}".')
|
||||
|
||||
if url.has_stream:
|
||||
q = url.stream.to_dict()
|
||||
if channel is not None:
|
||||
q['order_by'] = ['^creation_height']
|
||||
q['channel_hash'] = channel.claim_hash
|
||||
q['is_signature_valid'] = True
|
||||
elif set(q) == {'name'}:
|
||||
q['is_controlling'] = True
|
||||
# matches = search_claims(censor, **q, limit=1)
|
||||
matches = search_claims(**q, limit=1)[0]
|
||||
if matches:
|
||||
return matches[0]
|
||||
elif censor.censored:
|
||||
return ResolveCensoredError(raw_url, next(iter(censor.censored)))
|
||||
else:
|
||||
return LookupError(f'Could not find claim at "{raw_url}".')
|
||||
|
||||
return channel
|
371
lbry/db/queries/search.py
Normal file
371
lbry/db/queries/search.py
Normal file
|
@ -0,0 +1,371 @@
|
|||
import struct
|
||||
import logging
|
||||
from decimal import Decimal
|
||||
from binascii import unhexlify
|
||||
from typing import Tuple, List, Optional
|
||||
|
||||
from sqlalchemy import func, case
|
||||
from sqlalchemy.future import select, Select
|
||||
|
||||
from lbry.schema.tags import clean_tags
|
||||
from lbry.schema.result import Censor, Outputs as ResultOutput
|
||||
from lbry.schema.url import normalize_name
|
||||
from lbry.blockchain.transaction import Output
|
||||
|
||||
from ..utils import query
|
||||
from ..query_context import context
|
||||
from ..tables import TX, TXO, Claim, Support
|
||||
from ..constants import (
|
||||
TXO_TYPES, STREAM_TYPES, ATTRIBUTE_ARRAY_MAX_LENGTH,
|
||||
SEARCH_INTEGER_PARAMS, SEARCH_ORDER_FIELDS
|
||||
)
|
||||
|
||||
from .txio import BASE_SELECT_TXO_COLUMNS, rows_to_txos
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
BASE_SELECT_SUPPORT_COLUMNS = BASE_SELECT_TXO_COLUMNS + [
|
||||
Support.c.channel_hash,
|
||||
Support.c.is_signature_valid,
|
||||
]
|
||||
|
||||
|
||||
def select_supports(cols: List = None, **constraints) -> Select:
|
||||
if cols is None:
|
||||
cols = BASE_SELECT_SUPPORT_COLUMNS
|
||||
joins = Support.join(TXO, ).join(TX)
|
||||
return query([Support], select(*cols).select_from(joins), **constraints)
|
||||
|
||||
|
||||
def search_supports(**constraints) -> Tuple[List[Output], Optional[int]]:
|
||||
total = None
|
||||
if constraints.pop('include_total', False):
|
||||
total = search_support_count(**constraints)
|
||||
if 'claim_id' in constraints:
|
||||
constraints['claim_hash'] = unhexlify(constraints.pop('claim_id'))[::-1]
|
||||
rows = context().fetchall(select_supports(**constraints))
|
||||
txos = rows_to_txos(rows, include_tx=False)
|
||||
return txos, total
|
||||
|
||||
|
||||
def search_support_count(**constraints) -> int:
|
||||
constraints.pop('offset', None)
|
||||
constraints.pop('limit', None)
|
||||
constraints.pop('order_by', None)
|
||||
count = context().fetchall(select_supports([func.count().label('total')], **constraints))
|
||||
return count[0]['total'] or 0
|
||||
|
||||
|
||||
channel_claim = Claim.alias('channel')
|
||||
BASE_SELECT_CLAIM_COLUMNS = BASE_SELECT_TXO_COLUMNS + [
|
||||
Claim.c.activation_height,
|
||||
Claim.c.takeover_height,
|
||||
Claim.c.creation_height,
|
||||
Claim.c.is_controlling,
|
||||
Claim.c.channel_hash,
|
||||
Claim.c.reposted_count,
|
||||
Claim.c.reposted_claim_hash,
|
||||
Claim.c.short_url,
|
||||
Claim.c.signed_claim_count,
|
||||
Claim.c.signed_support_count,
|
||||
(Claim.c.amount + Claim.c.staked_support_amount).label('staked_amount'),
|
||||
Claim.c.staked_support_amount,
|
||||
Claim.c.staked_support_count,
|
||||
Claim.c.is_signature_valid,
|
||||
case([(
|
||||
channel_claim.c.short_url.isnot(None),
|
||||
channel_claim.c.short_url + '/' + Claim.c.short_url
|
||||
)]).label('canonical_url'),
|
||||
]
|
||||
|
||||
|
||||
def select_claims(cols: List = None, for_count=False, **constraints) -> Select:
|
||||
if cols is None:
|
||||
cols = BASE_SELECT_CLAIM_COLUMNS
|
||||
if 'order_by' in constraints:
|
||||
order_by_parts = constraints['order_by']
|
||||
if isinstance(order_by_parts, str):
|
||||
order_by_parts = [order_by_parts]
|
||||
sql_order_by = []
|
||||
for order_by in order_by_parts:
|
||||
is_asc = order_by.startswith('^')
|
||||
column = order_by[1:] if is_asc else order_by
|
||||
if column not in SEARCH_ORDER_FIELDS:
|
||||
raise NameError(f'{column} is not a valid order_by field')
|
||||
if column == 'name':
|
||||
column = 'claim_name'
|
||||
nulls_last = ''
|
||||
if column == 'release_time':
|
||||
nulls_last = ' NULLs LAST'
|
||||
sql_order_by.append(
|
||||
f"claim.{column} ASC{nulls_last}" if is_asc else f"claim.{column} DESC{nulls_last}"
|
||||
)
|
||||
constraints['order_by'] = sql_order_by
|
||||
|
||||
ops = {'<=': '__lte', '>=': '__gte', '<': '__lt', '>': '__gt'}
|
||||
for constraint in SEARCH_INTEGER_PARAMS:
|
||||
if constraint in constraints:
|
||||
value = constraints.pop(constraint)
|
||||
postfix = ''
|
||||
if isinstance(value, str):
|
||||
if len(value) >= 2 and value[:2] in ops:
|
||||
postfix, value = ops[value[:2]], value[2:]
|
||||
elif len(value) >= 1 and value[0] in ops:
|
||||
postfix, value = ops[value[0]], value[1:]
|
||||
if constraint == 'fee_amount':
|
||||
value = Decimal(value)*1000
|
||||
constraints[f'{constraint}{postfix}'] = int(value)
|
||||
|
||||
if 'sequence' in constraints:
|
||||
constraints['order_by'] = 'activation_height ASC'
|
||||
constraints['offset'] = int(constraints.pop('sequence')) - 1
|
||||
constraints['limit'] = 1
|
||||
if 'amount_order' in constraints:
|
||||
constraints['order_by'] = 'effective_amount DESC'
|
||||
constraints['offset'] = int(constraints.pop('amount_order')) - 1
|
||||
constraints['limit'] = 1
|
||||
|
||||
if 'claim_id' in constraints:
|
||||
claim_id = constraints.pop('claim_id')
|
||||
if len(claim_id) == 40:
|
||||
constraints['claim_id'] = claim_id
|
||||
else:
|
||||
constraints['claim_id__like'] = f'{claim_id[:40]}%'
|
||||
elif 'claim_ids' in constraints:
|
||||
constraints['claim_id__in'] = set(constraints.pop('claim_ids'))
|
||||
|
||||
if 'reposted_claim_id' in constraints:
|
||||
constraints['reposted_claim_hash'] = unhexlify(constraints.pop('reposted_claim_id'))[::-1]
|
||||
|
||||
if 'name' in constraints:
|
||||
constraints['normalized'] = normalize_name(constraints.pop('name'))
|
||||
|
||||
if 'public_key_id' in constraints:
|
||||
constraints['public_key_hash'] = (
|
||||
context().ledger.address_to_hash160(constraints.pop('public_key_id')))
|
||||
if 'channel_hash' in constraints:
|
||||
constraints['channel_hash'] = constraints.pop('channel_hash')
|
||||
if 'channel_ids' in constraints:
|
||||
channel_ids = constraints.pop('channel_ids')
|
||||
if channel_ids:
|
||||
constraints['channel_hash__in'] = {
|
||||
unhexlify(cid)[::-1] for cid in channel_ids
|
||||
}
|
||||
if 'not_channel_ids' in constraints:
|
||||
not_channel_ids = constraints.pop('not_channel_ids')
|
||||
if not_channel_ids:
|
||||
not_channel_ids_binary = {
|
||||
unhexlify(ncid)[::-1] for ncid in not_channel_ids
|
||||
}
|
||||
constraints['claim_hash__not_in#not_channel_ids'] = not_channel_ids_binary
|
||||
if constraints.get('has_channel_signature', False):
|
||||
constraints['channel_hash__not_in'] = not_channel_ids_binary
|
||||
else:
|
||||
constraints['null_or_not_channel__or'] = {
|
||||
'signature_valid__is_null': True,
|
||||
'channel_hash__not_in': not_channel_ids_binary
|
||||
}
|
||||
if 'signature_valid' in constraints:
|
||||
has_channel_signature = constraints.pop('has_channel_signature', False)
|
||||
if has_channel_signature:
|
||||
constraints['signature_valid'] = constraints.pop('signature_valid')
|
||||
else:
|
||||
constraints['null_or_signature__or'] = {
|
||||
'signature_valid__is_null': True,
|
||||
'signature_valid': constraints.pop('signature_valid')
|
||||
}
|
||||
elif constraints.pop('has_channel_signature', False):
|
||||
constraints['signature_valid__is_not_null'] = True
|
||||
|
||||
if 'txid' in constraints:
|
||||
tx_hash = unhexlify(constraints.pop('txid'))[::-1]
|
||||
nout = constraints.pop('nout', 0)
|
||||
constraints['txo_hash'] = tx_hash + struct.pack('<I', nout)
|
||||
|
||||
if 'claim_type' in constraints:
|
||||
claim_types = constraints.pop('claim_type')
|
||||
if isinstance(claim_types, str):
|
||||
claim_types = [claim_types]
|
||||
if claim_types:
|
||||
constraints['claim_type__in'] = {
|
||||
TXO_TYPES[claim_type] for claim_type in claim_types
|
||||
}
|
||||
if 'stream_types' in constraints:
|
||||
stream_types = constraints.pop('stream_types')
|
||||
if stream_types:
|
||||
constraints['stream_type__in'] = {
|
||||
STREAM_TYPES[stream_type] for stream_type in stream_types
|
||||
}
|
||||
if 'media_types' in constraints:
|
||||
media_types = constraints.pop('media_types')
|
||||
if media_types:
|
||||
constraints['media_type__in'] = set(media_types)
|
||||
|
||||
if 'fee_currency' in constraints:
|
||||
constraints['fee_currency'] = constraints.pop('fee_currency').lower()
|
||||
|
||||
_apply_constraints_for_array_attributes(constraints, 'tag', clean_tags, for_count)
|
||||
_apply_constraints_for_array_attributes(constraints, 'language', lambda _: _, for_count)
|
||||
_apply_constraints_for_array_attributes(constraints, 'location', lambda _: _, for_count)
|
||||
|
||||
if 'text' in constraints:
|
||||
# TODO: fix
|
||||
constraints["search"] = constraints.pop("text")
|
||||
|
||||
return query(
|
||||
[Claim],
|
||||
select(*cols)
|
||||
.select_from(
|
||||
Claim.join(TXO).join(TX)
|
||||
.join(channel_claim, Claim.c.channel_hash == channel_claim.c.claim_hash, isouter=True)
|
||||
), **constraints
|
||||
)
|
||||
|
||||
|
||||
def protobuf_search_claims(**constraints) -> str:
|
||||
txos, _, censor = search_claims(**constraints)
|
||||
return ResultOutput.to_base64(txos, [], blocked=censor)
|
||||
|
||||
|
||||
def search_claims(**constraints) -> Tuple[List[Output], Optional[int], Optional[Censor]]:
|
||||
total = None
|
||||
if constraints.pop('include_total', False):
|
||||
total = search_claim_count(**constraints)
|
||||
constraints['offset'] = abs(constraints.get('offset', 0))
|
||||
constraints['limit'] = min(abs(constraints.get('limit', 10)), 50)
|
||||
ctx = context()
|
||||
search_censor = ctx.get_search_censor()
|
||||
rows = context().fetchall(select_claims(**constraints))
|
||||
txos = rows_to_txos(rows, include_tx=False)
|
||||
return txos, total, search_censor
|
||||
|
||||
|
||||
def search_claim_count(**constraints) -> int:
|
||||
constraints.pop('offset', None)
|
||||
constraints.pop('limit', None)
|
||||
constraints.pop('order_by', None)
|
||||
count = context().fetchall(select_claims([func.count().label('total')], **constraints))
|
||||
return count[0]['total'] or 0
|
||||
|
||||
|
||||
CLAIM_HASH_OR_REPOST_HASH_SQL = f"""
|
||||
CASE WHEN claim.claim_type = {TXO_TYPES['repost']}
|
||||
THEN claim.reposted_claim_hash
|
||||
ELSE claim.claim_hash
|
||||
END
|
||||
"""
|
||||
|
||||
|
||||
def _apply_constraints_for_array_attributes(constraints, attr, cleaner, for_count=False):
|
||||
any_items = set(cleaner(constraints.pop(f'any_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
|
||||
all_items = set(cleaner(constraints.pop(f'all_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
|
||||
not_items = set(cleaner(constraints.pop(f'not_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
|
||||
|
||||
all_items = {item for item in all_items if item not in not_items}
|
||||
any_items = {item for item in any_items if item not in not_items}
|
||||
|
||||
any_queries = {}
|
||||
|
||||
# if attr == 'tag':
|
||||
# common_tags = any_items & COMMON_TAGS.keys()
|
||||
# if common_tags:
|
||||
# any_items -= common_tags
|
||||
# if len(common_tags) < 5:
|
||||
# for item in common_tags:
|
||||
# index_name = COMMON_TAGS[item]
|
||||
# any_queries[f'#_common_tag_{index_name}'] = f"""
|
||||
# EXISTS(
|
||||
# SELECT 1 FROM tag INDEXED BY tag_{index_name}_idx
|
||||
# WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=tag.claim_hash
|
||||
# AND tag = '{item}'
|
||||
# )
|
||||
# """
|
||||
# elif len(common_tags) >= 5:
|
||||
# constraints.update({
|
||||
# f'$any_common_tag{i}': item for i, item in enumerate(common_tags)
|
||||
# })
|
||||
# values = ', '.join(
|
||||
# f':$any_common_tag{i}' for i in range(len(common_tags))
|
||||
# )
|
||||
# any_queries[f'#_any_common_tags'] = f"""
|
||||
# EXISTS(
|
||||
# SELECT 1 FROM tag WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=tag.claim_hash
|
||||
# AND tag IN ({values})
|
||||
# )
|
||||
# """
|
||||
|
||||
if any_items:
|
||||
|
||||
constraints.update({
|
||||
f'$any_{attr}{i}': item for i, item in enumerate(any_items)
|
||||
})
|
||||
values = ', '.join(
|
||||
f':$any_{attr}{i}' for i in range(len(any_items))
|
||||
)
|
||||
if for_count or attr == 'tag':
|
||||
any_queries[f'#_any_{attr}'] = f"""
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL} IN (
|
||||
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
|
||||
)
|
||||
"""
|
||||
else:
|
||||
any_queries[f'#_any_{attr}'] = f"""
|
||||
EXISTS(
|
||||
SELECT 1 FROM {attr} WHERE
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
|
||||
AND {attr} IN ({values})
|
||||
)
|
||||
"""
|
||||
|
||||
if len(any_queries) == 1:
|
||||
constraints.update(any_queries)
|
||||
elif len(any_queries) > 1:
|
||||
constraints[f'ORed_{attr}_queries__any'] = any_queries
|
||||
|
||||
if all_items:
|
||||
constraints[f'$all_{attr}_count'] = len(all_items)
|
||||
constraints.update({
|
||||
f'$all_{attr}{i}': item for i, item in enumerate(all_items)
|
||||
})
|
||||
values = ', '.join(
|
||||
f':$all_{attr}{i}' for i in range(len(all_items))
|
||||
)
|
||||
if for_count:
|
||||
constraints[f'#_all_{attr}'] = f"""
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL} IN (
|
||||
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
|
||||
GROUP BY claim_hash HAVING COUNT({attr}) = :$all_{attr}_count
|
||||
)
|
||||
"""
|
||||
else:
|
||||
constraints[f'#_all_{attr}'] = f"""
|
||||
{len(all_items)}=(
|
||||
SELECT count(*) FROM {attr} WHERE
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
|
||||
AND {attr} IN ({values})
|
||||
)
|
||||
"""
|
||||
|
||||
if not_items:
|
||||
constraints.update({
|
||||
f'$not_{attr}{i}': item for i, item in enumerate(not_items)
|
||||
})
|
||||
values = ', '.join(
|
||||
f':$not_{attr}{i}' for i in range(len(not_items))
|
||||
)
|
||||
if for_count:
|
||||
constraints[f'#_not_{attr}'] = f"""
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL} NOT IN (
|
||||
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
|
||||
)
|
||||
"""
|
||||
else:
|
||||
constraints[f'#_not_{attr}'] = f"""
|
||||
NOT EXISTS(
|
||||
SELECT 1 FROM {attr} WHERE
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
|
||||
AND {attr} IN ({values})
|
||||
)
|
||||
"""
|
578
lbry/db/queries/txio.py
Normal file
578
lbry/db/queries/txio.py
Normal file
|
@ -0,0 +1,578 @@
|
|||
import logging
|
||||
from datetime import date
|
||||
from typing import Tuple, List, Optional, Union
|
||||
|
||||
from sqlalchemy import union, func, text, between, distinct
|
||||
from sqlalchemy.future import select, Select
|
||||
|
||||
from ...blockchain.transaction import (
|
||||
Transaction, Output, OutputScript, TXRefImmutable
|
||||
)
|
||||
from ..tables import (
|
||||
TX, TXO, TXI, txi_join_account, txo_join_account,
|
||||
Claim, Support, AccountAddress
|
||||
)
|
||||
from ..utils import query, in_account_ids
|
||||
from ..query_context import context
|
||||
from ..constants import TXO_TYPES, CLAIM_TYPE_CODES, MAX_QUERY_VARIABLES
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
minimum_txo_columns = (
|
||||
TXO.c.amount, TXO.c.position.label('txo_position'),
|
||||
TX.c.tx_hash, TX.c.height, TX.c.timestamp,
|
||||
func.substr(TX.c.raw, TXO.c.script_offset + 1, TXO.c.script_length).label('src'),
|
||||
)
|
||||
|
||||
|
||||
def row_to_txo(row):
|
||||
return Output(
|
||||
amount=row.amount,
|
||||
script=OutputScript(row.src),
|
||||
tx_ref=TXRefImmutable.from_hash(row.tx_hash, row.height, row.timestamp),
|
||||
position=row.txo_position,
|
||||
)
|
||||
|
||||
|
||||
def where_txo_type_in(txo_type: Optional[Union[tuple, int]] = None):
|
||||
if txo_type is not None:
|
||||
if isinstance(txo_type, int):
|
||||
return TXO.c.txo_type == txo_type
|
||||
assert len(txo_type) > 0
|
||||
if len(txo_type) == 1:
|
||||
return TXO.c.txo_type == txo_type[0]
|
||||
else:
|
||||
return TXO.c.txo_type.in_(txo_type)
|
||||
return TXO.c.txo_type.in_(CLAIM_TYPE_CODES)
|
||||
|
||||
|
||||
def where_unspent_txos(
|
||||
txo_types: Tuple[int, ...],
|
||||
blocks: Tuple[int, int] = None,
|
||||
missing_in_supports_table: bool = False,
|
||||
missing_in_claims_table: bool = False,
|
||||
missing_or_stale_in_claims_table: bool = False,
|
||||
):
|
||||
condition = where_txo_type_in(txo_types) & (TXO.c.spent_height == 0)
|
||||
if blocks is not None:
|
||||
condition &= between(TXO.c.height, *blocks)
|
||||
if missing_in_supports_table:
|
||||
condition &= TXO.c.txo_hash.notin_(select(Support.c.txo_hash))
|
||||
elif missing_or_stale_in_claims_table:
|
||||
condition &= TXO.c.txo_hash.notin_(select(Claim.c.txo_hash))
|
||||
elif missing_in_claims_table:
|
||||
condition &= TXO.c.claim_hash.notin_(select(Claim.c.claim_hash))
|
||||
return condition
|
||||
|
||||
|
||||
def where_abandoned_claims():
|
||||
return Claim.c.claim_hash.notin_(
|
||||
select(TXO.c.claim_hash).where(where_unspent_txos(CLAIM_TYPE_CODES))
|
||||
)
|
||||
|
||||
|
||||
def count_abandoned_claims():
|
||||
return context().fetchtotal(where_abandoned_claims())
|
||||
|
||||
|
||||
def where_abandoned_supports():
|
||||
return Support.c.txo_hash.notin_(
|
||||
select(TXO.c.txo_hash).where(where_unspent_txos(TXO_TYPES['support']))
|
||||
)
|
||||
|
||||
|
||||
def count_abandoned_supports():
|
||||
return context().fetchtotal(where_abandoned_supports())
|
||||
|
||||
|
||||
def count_unspent_txos(
|
||||
txo_types: Tuple[int, ...],
|
||||
blocks: Tuple[int, int] = None,
|
||||
missing_in_supports_table: bool = False,
|
||||
missing_in_claims_table: bool = False,
|
||||
missing_or_stale_in_claims_table: bool = False,
|
||||
):
|
||||
return context().fetchtotal(
|
||||
where_unspent_txos(
|
||||
txo_types, blocks,
|
||||
missing_in_supports_table,
|
||||
missing_in_claims_table,
|
||||
missing_or_stale_in_claims_table,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def distribute_unspent_txos(
|
||||
txo_types: Tuple[int, ...],
|
||||
blocks: Tuple[int, int] = None,
|
||||
missing_in_supports_table: bool = False,
|
||||
missing_in_claims_table: bool = False,
|
||||
missing_or_stale_in_claims_table: bool = False,
|
||||
number_of_buckets: int = 10
|
||||
) -> Tuple[int, List[Tuple[int, int]]]:
|
||||
chunks = (
|
||||
select(func.ntile(number_of_buckets).over(order_by=TXO.c.height).label('chunk'), TXO.c.height)
|
||||
.where(
|
||||
where_unspent_txos(
|
||||
txo_types, blocks,
|
||||
missing_in_supports_table,
|
||||
missing_in_claims_table,
|
||||
missing_or_stale_in_claims_table,
|
||||
)
|
||||
).cte('chunks')
|
||||
)
|
||||
sql = (
|
||||
select(
|
||||
func.count('*').label('items'),
|
||||
func.min(chunks.c.height).label('start_height'),
|
||||
func.max(chunks.c.height).label('end_height'),
|
||||
).group_by(chunks.c.chunk).order_by(chunks.c.chunk)
|
||||
)
|
||||
total = 0
|
||||
buckets = []
|
||||
for bucket in context().fetchall(sql):
|
||||
total += bucket['items']
|
||||
if len(buckets) > 0:
|
||||
if buckets[-1][-1] == bucket['start_height']:
|
||||
if bucket['start_height'] == bucket['end_height']:
|
||||
continue
|
||||
bucket['start_height'] += 1
|
||||
buckets.append((bucket['start_height'], bucket['end_height']))
|
||||
return total, buckets
|
||||
|
||||
|
||||
def where_changed_support_txos(blocks: Optional[Tuple[int, int]]):
|
||||
return (
|
||||
(TXO.c.txo_type == TXO_TYPES['support']) & (
|
||||
between(TXO.c.height, blocks[0], blocks[-1]) |
|
||||
between(TXO.c.spent_height, blocks[0], blocks[-1])
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def where_claims_with_changed_supports(blocks: Optional[Tuple[int, int]]):
|
||||
return Claim.c.claim_hash.in_(
|
||||
select(TXO.c.claim_hash).where(
|
||||
where_changed_support_txos(blocks)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def count_claims_with_changed_supports(blocks: Optional[Tuple[int, int]]) -> int:
|
||||
sql = (
|
||||
select(func.count(distinct(TXO.c.claim_hash)).label('total'))
|
||||
.where(where_changed_support_txos(blocks))
|
||||
)
|
||||
return context().fetchone(sql)['total']
|
||||
|
||||
|
||||
def where_changed_content_txos(blocks: Optional[Tuple[int, int]]):
|
||||
return (
|
||||
(TXO.c.channel_hash.isnot(None)) & (
|
||||
between(TXO.c.height, blocks[0], blocks[-1]) |
|
||||
between(TXO.c.spent_height, blocks[0], blocks[-1])
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def where_channels_with_changed_content(blocks: Optional[Tuple[int, int]]):
|
||||
return Claim.c.claim_hash.in_(
|
||||
select(TXO.c.channel_hash).where(
|
||||
where_changed_content_txos(blocks)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def count_channels_with_changed_content(blocks: Optional[Tuple[int, int]]):
|
||||
sql = (
|
||||
select(func.count(distinct(TXO.c.channel_hash)).label('total'))
|
||||
.where(where_changed_content_txos(blocks))
|
||||
)
|
||||
return context().fetchone(sql)['total']
|
||||
|
||||
|
||||
def select_transactions(cols, account_ids=None, **constraints):
|
||||
s: Select = select(*cols).select_from(TX)
|
||||
if not {'tx_hash', 'tx_hash__in'}.intersection(constraints):
|
||||
assert account_ids, (
|
||||
"'accounts' argument required when "
|
||||
"no 'tx_hash' constraint is present"
|
||||
)
|
||||
where = in_account_ids(account_ids)
|
||||
tx_hashes = union(
|
||||
select(TXO.c.tx_hash).select_from(txo_join_account).where(where),
|
||||
select(TXI.c.tx_hash).select_from(txi_join_account).where(where)
|
||||
)
|
||||
s = s.where(TX.c.tx_hash.in_(tx_hashes))
|
||||
return context().fetchall(query([TX], s, **constraints))
|
||||
|
||||
|
||||
TXO_NOT_MINE = Output(None, None, is_my_output=False)
|
||||
|
||||
|
||||
def get_raw_transactions(tx_hashes):
|
||||
return context().fetchall(
|
||||
select(TX.c.tx_hash, TX.c.raw).where(TX.c.tx_hash.in_(tx_hashes))
|
||||
)
|
||||
|
||||
|
||||
def get_transactions(**constraints) -> Tuple[List[Transaction], Optional[int]]:
|
||||
txs = []
|
||||
sql = select(TX.c.raw, TX.c.height, TX.c.position).select_from(TX)
|
||||
rows = context().fetchall(query([TX], sql, **constraints))
|
||||
for row in rows:
|
||||
txs.append(Transaction(row['raw'], height=row['height'], position=row['position']))
|
||||
return txs, 0
|
||||
|
||||
|
||||
def _get_transactions(
|
||||
wallet=None, include_total=False, **constraints
|
||||
) -> Tuple[List[Transaction], Optional[int]]:
|
||||
include_is_my_input = constraints.pop('include_is_my_input', False)
|
||||
include_is_my_output = constraints.pop('include_is_my_output', False)
|
||||
|
||||
tx_rows = select_transactions(
|
||||
[TX.c.tx_hash, TX.c.raw, TX.c.height, TX.c.position, TX.c.is_verified],
|
||||
order_by=constraints.pop('order_by', ["height=0 DESC", "height DESC", "position DESC"]),
|
||||
**constraints
|
||||
)
|
||||
|
||||
txids, txs, txi_txoids = [], [], []
|
||||
for row in tx_rows:
|
||||
txids.append(row['tx_hash'])
|
||||
txs.append(Transaction(
|
||||
raw=row['raw'], height=row['height'], position=row['position'],
|
||||
is_verified=bool(row['is_verified'])
|
||||
))
|
||||
for txi in txs[-1].inputs:
|
||||
txi_txoids.append(txi.txo_ref.hash)
|
||||
|
||||
annotated_txos = {}
|
||||
for offset in range(0, len(txids), MAX_QUERY_VARIABLES):
|
||||
annotated_txos.update({
|
||||
txo.id: txo for txo in
|
||||
get_txos(
|
||||
wallet=wallet,
|
||||
tx_hash__in=txids[offset:offset + MAX_QUERY_VARIABLES], order_by='txo.tx_hash',
|
||||
include_is_my_input=include_is_my_input,
|
||||
include_is_my_output=include_is_my_output,
|
||||
)[0]
|
||||
})
|
||||
|
||||
referenced_txos = {}
|
||||
for offset in range(0, len(txi_txoids), MAX_QUERY_VARIABLES):
|
||||
referenced_txos.update({
|
||||
txo.id: txo for txo in
|
||||
get_txos(
|
||||
wallet=wallet,
|
||||
txo_hash__in=txi_txoids[offset:offset + MAX_QUERY_VARIABLES], order_by='txo.txo_hash',
|
||||
include_is_my_output=include_is_my_output,
|
||||
)[0]
|
||||
})
|
||||
|
||||
for tx in txs:
|
||||
for txi in tx.inputs:
|
||||
txo = referenced_txos.get(txi.txo_ref.id)
|
||||
if txo:
|
||||
txi.txo_ref = txo.ref
|
||||
for txo in tx.outputs:
|
||||
_txo = annotated_txos.get(txo.id)
|
||||
if _txo:
|
||||
txo.update_annotations(_txo)
|
||||
else:
|
||||
txo.update_annotations(TXO_NOT_MINE)
|
||||
|
||||
for tx in txs:
|
||||
txos = tx.outputs
|
||||
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
|
||||
txos[0].purchase = txos[1]
|
||||
|
||||
return txs, get_transaction_count(**constraints) if include_total else None
|
||||
|
||||
|
||||
def get_transaction_count(**constraints):
|
||||
constraints.pop('wallet', None)
|
||||
constraints.pop('offset', None)
|
||||
constraints.pop('limit', None)
|
||||
constraints.pop('order_by', None)
|
||||
count = select_transactions([func.count().label('total')], **constraints)
|
||||
return count[0]['total'] or 0
|
||||
|
||||
|
||||
BASE_SELECT_TXO_COLUMNS = [
|
||||
TX.c.tx_hash, TX.c.raw, TX.c.height, TX.c.position.label('tx_position'),
|
||||
TX.c.is_verified, TX.c.timestamp,
|
||||
TXO.c.txo_type, TXO.c.position.label('txo_position'), TXO.c.amount, TXO.c.spent_height,
|
||||
TXO.c.script_offset, TXO.c.script_length,
|
||||
]
|
||||
|
||||
|
||||
def select_txos(
|
||||
cols=None, account_ids=None, is_my_input=None,
|
||||
is_my_output=True, is_my_input_or_output=None, exclude_internal_transfers=False,
|
||||
include_is_my_input=False, claim_id_not_in_claim_table=None,
|
||||
txo_id_not_in_claim_table=None, txo_id_not_in_support_table=None,
|
||||
**constraints
|
||||
) -> Select:
|
||||
if cols is None:
|
||||
cols = BASE_SELECT_TXO_COLUMNS
|
||||
s: Select = select(*cols)
|
||||
if account_ids:
|
||||
my_addresses = select(AccountAddress.c.address).where(in_account_ids(account_ids))
|
||||
if is_my_input_or_output:
|
||||
include_is_my_input = True
|
||||
s = s.where(
|
||||
TXO.c.address.in_(my_addresses) | (
|
||||
(TXI.c.address.isnot(None)) &
|
||||
(TXI.c.address.in_(my_addresses))
|
||||
)
|
||||
)
|
||||
else:
|
||||
if is_my_output:
|
||||
s = s.where(TXO.c.address.in_(my_addresses))
|
||||
elif is_my_output is False:
|
||||
s = s.where(TXO.c.address.notin_(my_addresses))
|
||||
if is_my_input:
|
||||
include_is_my_input = True
|
||||
s = s.where(
|
||||
(TXI.c.address.isnot(None)) &
|
||||
(TXI.c.address.in_(my_addresses))
|
||||
)
|
||||
elif is_my_input is False:
|
||||
include_is_my_input = True
|
||||
s = s.where(
|
||||
(TXI.c.address.is_(None)) |
|
||||
(TXI.c.address.notin_(my_addresses))
|
||||
)
|
||||
if exclude_internal_transfers:
|
||||
include_is_my_input = True
|
||||
s = s.where(
|
||||
(TXO.c.txo_type != TXO_TYPES['other']) |
|
||||
(TXO.c.address.notin_(my_addresses))
|
||||
(TXI.c.address.is_(None)) |
|
||||
(TXI.c.address.notin_(my_addresses))
|
||||
)
|
||||
joins = TXO.join(TX)
|
||||
#if constraints.get('is_spent', None) is False:
|
||||
# s = s.where((TXO.c.is_spent == False) & (TXO.c.is_reserved == False))
|
||||
if include_is_my_input:
|
||||
joins = joins.join(TXI, (TXI.c.position == 0) & (TXI.c.tx_hash == TXO.c.tx_hash), isouter=True)
|
||||
if claim_id_not_in_claim_table:
|
||||
s = s.where(TXO.c.claim_hash.notin_(select(Claim.c.claim_hash)))
|
||||
elif txo_id_not_in_claim_table:
|
||||
s = s.where(TXO.c.txo_hash.notin_(select(Claim.c.txo_hash)))
|
||||
elif txo_id_not_in_support_table:
|
||||
s = s.where(TXO.c.txo_hash.notin_(select(Support.c.txo_hash)))
|
||||
return query([TXO, TX], s.select_from(joins), **constraints)
|
||||
|
||||
|
||||
META_ATTRS = (
|
||||
'activation_height', 'takeover_height', 'creation_height', 'staked_amount',
|
||||
'short_url', 'canonical_url', 'staked_support_amount', 'staked_support_count',
|
||||
'signed_claim_count', 'signed_support_count', 'is_signature_valid',
|
||||
'reposted_count',
|
||||
)
|
||||
|
||||
|
||||
def rows_to_txos(rows: List[dict], include_tx=True) -> List[Output]:
|
||||
txos = []
|
||||
tx_cache = {}
|
||||
for row in rows:
|
||||
if include_tx:
|
||||
if row['tx_hash'] not in tx_cache:
|
||||
tx_cache[row['tx_hash']] = Transaction(
|
||||
row['raw'], height=row['height'], position=row['tx_position'],
|
||||
timestamp=row['timestamp'],
|
||||
is_verified=bool(row['is_verified']),
|
||||
)
|
||||
txo = tx_cache[row['tx_hash']].outputs[row['txo_position']]
|
||||
else:
|
||||
source = row['raw'][row['script_offset']:row['script_offset']+row['script_length']]
|
||||
txo = Output(
|
||||
amount=row['amount'],
|
||||
script=OutputScript(source),
|
||||
tx_ref=TXRefImmutable.from_hash(row['tx_hash'], row['height'], row['timestamp']),
|
||||
position=row['txo_position'],
|
||||
)
|
||||
txo.spent_height = bool(row['spent_height'])
|
||||
if 'is_my_input' in row:
|
||||
txo.is_my_input = bool(row['is_my_input'])
|
||||
if 'is_my_output' in row:
|
||||
txo.is_my_output = bool(row['is_my_output'])
|
||||
if 'is_my_input' in row and 'is_my_output' in row:
|
||||
if txo.is_my_input and txo.is_my_output and row['txo_type'] == TXO_TYPES['other']:
|
||||
txo.is_internal_transfer = True
|
||||
else:
|
||||
txo.is_internal_transfer = False
|
||||
if 'received_tips' in row:
|
||||
txo.received_tips = row['received_tips']
|
||||
for attr in META_ATTRS:
|
||||
if attr in row:
|
||||
txo.meta[attr] = row[attr]
|
||||
txos.append(txo)
|
||||
return txos
|
||||
|
||||
|
||||
def get_txos(no_tx=False, include_total=False, **constraints) -> Tuple[List[Output], Optional[int]]:
|
||||
wallet_account_ids = constraints.pop('wallet_account_ids', [])
|
||||
include_is_my_input = constraints.get('include_is_my_input', False)
|
||||
include_is_my_output = constraints.pop('include_is_my_output', False)
|
||||
include_received_tips = constraints.pop('include_received_tips', False)
|
||||
|
||||
select_columns = BASE_SELECT_TXO_COLUMNS + [
|
||||
TXO.c.claim_name
|
||||
]
|
||||
|
||||
my_accounts = None
|
||||
if wallet_account_ids:
|
||||
my_accounts = select(AccountAddress.c.address).where(in_account_ids(wallet_account_ids))
|
||||
|
||||
if include_is_my_output and my_accounts is not None:
|
||||
if constraints.get('is_my_output', None) in (True, False):
|
||||
select_columns.append(text(f"{1 if constraints['is_my_output'] else 0} AS is_my_output"))
|
||||
else:
|
||||
select_columns.append(TXO.c.address.in_(my_accounts).label('is_my_output'))
|
||||
|
||||
if include_is_my_input and my_accounts is not None:
|
||||
if constraints.get('is_my_input', None) in (True, False):
|
||||
select_columns.append(text(f"{1 if constraints['is_my_input'] else 0} AS is_my_input"))
|
||||
else:
|
||||
select_columns.append((
|
||||
(TXI.c.address.isnot(None)) &
|
||||
(TXI.c.address.in_(my_accounts))
|
||||
).label('is_my_input'))
|
||||
|
||||
if include_received_tips:
|
||||
support = TXO.alias('support')
|
||||
select_columns.append(
|
||||
select(func.coalesce(func.sum(support.c.amount), 0))
|
||||
.select_from(support).where(
|
||||
(support.c.claim_hash == TXO.c.claim_hash) &
|
||||
(support.c.txo_type == TXO_TYPES['support']) &
|
||||
(support.c.address.in_(my_accounts)) &
|
||||
(support.c.txo_hash.notin_(select(TXI.c.txo_hash)))
|
||||
).label('received_tips')
|
||||
)
|
||||
|
||||
if 'order_by' not in constraints or constraints['order_by'] == 'height':
|
||||
constraints['order_by'] = [
|
||||
"tx.height=0 DESC", "tx.height DESC", "tx.position DESC", "txo.position"
|
||||
]
|
||||
elif constraints.get('order_by', None) == 'none':
|
||||
del constraints['order_by']
|
||||
|
||||
rows = context().fetchall(select_txos(select_columns, **constraints))
|
||||
txos = rows_to_txos(rows, not no_tx)
|
||||
|
||||
channel_hashes = set()
|
||||
for txo in txos:
|
||||
if txo.is_claim and txo.can_decode_claim:
|
||||
if txo.claim.is_signed:
|
||||
channel_hashes.add(txo.claim.signing_channel_hash)
|
||||
|
||||
if channel_hashes:
|
||||
channels = {
|
||||
txo.claim_hash: txo for txo in
|
||||
get_txos(
|
||||
txo_type=TXO_TYPES['channel'], spent_height=0,
|
||||
wallet_account_ids=wallet_account_ids, claim_hash__in=channel_hashes
|
||||
)[0]
|
||||
}
|
||||
for txo in txos:
|
||||
if txo.is_claim and txo.can_decode_claim:
|
||||
txo.channel = channels.get(txo.claim.signing_channel_hash, None)
|
||||
|
||||
return txos, get_txo_count(**constraints) if include_total else None
|
||||
|
||||
|
||||
def _clean_txo_constraints_for_aggregation(constraints):
|
||||
constraints.pop('include_is_my_input', None)
|
||||
constraints.pop('include_is_my_output', None)
|
||||
constraints.pop('include_received_tips', None)
|
||||
constraints.pop('wallet_account_ids', None)
|
||||
constraints.pop('offset', None)
|
||||
constraints.pop('limit', None)
|
||||
constraints.pop('order_by', None)
|
||||
|
||||
|
||||
def get_txo_count(**constraints):
|
||||
_clean_txo_constraints_for_aggregation(constraints)
|
||||
count = context().fetchall(select_txos([func.count().label('total')], **constraints))
|
||||
return count[0]['total'] or 0
|
||||
|
||||
|
||||
def get_txo_sum(**constraints):
|
||||
_clean_txo_constraints_for_aggregation(constraints)
|
||||
result = context().fetchall(select_txos([func.sum(TXO.c.amount).label('total')], **constraints))
|
||||
return result[0]['total'] or 0
|
||||
|
||||
|
||||
def get_balance(**constraints):
|
||||
return get_txo_sum(spent_height=0, **constraints)
|
||||
|
||||
|
||||
def get_report(account_ids):
|
||||
return
|
||||
|
||||
|
||||
def get_txo_plot(start_day=None, days_back=0, end_day=None, days_after=None, **constraints):
|
||||
_clean_txo_constraints_for_aggregation(constraints)
|
||||
if start_day is None:
|
||||
# TODO: Fix
|
||||
current_ordinal = 0 # self.ledger.headers.estimated_date(self.ledger.headers.height).toordinal()
|
||||
constraints['day__gte'] = current_ordinal - days_back
|
||||
else:
|
||||
constraints['day__gte'] = date.fromisoformat(start_day).toordinal()
|
||||
if end_day is not None:
|
||||
constraints['day__lte'] = date.fromisoformat(end_day).toordinal()
|
||||
elif days_after is not None:
|
||||
constraints['day__lte'] = constraints['day__gte'] + days_after
|
||||
plot = context().fetchall(select_txos(
|
||||
[TX.c.day, func.sum(TXO.c.amount).label('total')],
|
||||
group_by='day', order_by='day', **constraints
|
||||
))
|
||||
for row in plot:
|
||||
row['day'] = date.fromordinal(row['day'])
|
||||
return plot
|
||||
|
||||
|
||||
def get_purchases(**constraints) -> Tuple[List[Output], Optional[int]]:
|
||||
accounts = constraints.pop('accounts', None)
|
||||
assert accounts, "'accounts' argument required to find purchases"
|
||||
if not {'purchased_claim_hash', 'purchased_claim_hash__in'}.intersection(constraints):
|
||||
constraints['purchased_claim_hash__is_not_null'] = True
|
||||
constraints['tx_hash__in'] = (
|
||||
select(TXI.c.tx_hash).select_from(txi_join_account).where(in_account_ids(accounts))
|
||||
)
|
||||
txs, count = get_transactions(**constraints)
|
||||
return [tx.outputs[0] for tx in txs], count
|
||||
|
||||
|
||||
def get_supports_summary(self, **constraints):
|
||||
return get_txos(
|
||||
txo_type=TXO_TYPES['support'],
|
||||
spent_height=0, is_my_output=True,
|
||||
include_is_my_input=True,
|
||||
no_tx=True,
|
||||
**constraints
|
||||
)
|
||||
|
||||
|
||||
def reserve_outputs(txo_hashes, is_reserved=True):
|
||||
context().execute(
|
||||
TXO.update()
|
||||
.values(is_reserved=is_reserved)
|
||||
.where(TXO.c.txo_hash.in_(txo_hashes))
|
||||
)
|
||||
|
||||
|
||||
def release_all_outputs(account_id):
|
||||
context().execute(
|
||||
TXO.update().values(is_reserved=False).where(
|
||||
TXO.c.is_reserved & TXO.c.address.in_(
|
||||
select(AccountAddress.c.address).where(in_account_ids(account_id))
|
||||
)
|
||||
)
|
||||
)
|
694
lbry/db/query_context.py
Normal file
694
lbry/db/query_context.py
Normal file
|
@ -0,0 +1,694 @@
|
|||
import os
|
||||
import time
|
||||
import traceback
|
||||
import functools
|
||||
from io import BytesIO
|
||||
import multiprocessing as mp
|
||||
from decimal import Decimal
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from dataclasses import dataclass, field
|
||||
from contextvars import ContextVar
|
||||
|
||||
from sqlalchemy import create_engine, inspect, bindparam, func, exists, event as sqlalchemy_event
|
||||
from sqlalchemy.future import select
|
||||
from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.sql import Insert
|
||||
try:
|
||||
from pgcopy import CopyManager
|
||||
except ImportError:
|
||||
CopyManager = None
|
||||
|
||||
from lbry.event import EventQueuePublisher
|
||||
from lbry.blockchain.ledger import Ledger
|
||||
from lbry.blockchain.transaction import Transaction, Output, Input
|
||||
from lbry.schema.tags import clean_tags
|
||||
from lbry.schema.result import Censor
|
||||
from lbry.schema.mime_types import guess_stream_type
|
||||
|
||||
from .utils import pg_insert
|
||||
from .tables import Block, TX, TXO, TXI, Claim, Tag, Support
|
||||
from .constants import TXO_TYPES, STREAM_TYPES
|
||||
|
||||
|
||||
_context: ContextVar['QueryContext'] = ContextVar('_context')
|
||||
|
||||
|
||||
@dataclass
|
||||
class QueryContext:
|
||||
engine: Engine
|
||||
ledger: Ledger
|
||||
message_queue: mp.Queue
|
||||
stop_event: mp.Event
|
||||
stack: List[List]
|
||||
metrics: Dict
|
||||
is_tracking_metrics: bool
|
||||
blocked_streams: Dict
|
||||
blocked_channels: Dict
|
||||
filtered_streams: Dict
|
||||
filtered_channels: Dict
|
||||
pid: int
|
||||
|
||||
# QueryContext __enter__/__exit__ state
|
||||
current_timer_name: Optional[str] = None
|
||||
current_timer_time: float = 0
|
||||
current_progress: Optional['ProgressContext'] = None
|
||||
|
||||
copy_managers: Dict[str, CopyManager] = field(default_factory=dict)
|
||||
|
||||
@property
|
||||
def is_postgres(self):
|
||||
return self.engine.dialect.name == 'postgresql'
|
||||
|
||||
@property
|
||||
def is_sqlite(self):
|
||||
return self.engine.dialect.name == 'sqlite'
|
||||
|
||||
def raise_unsupported_dialect(self):
|
||||
raise RuntimeError(f'Unsupported database dialect: {self.engine.dialect.name}.')
|
||||
|
||||
def get_resolve_censor(self) -> Censor:
|
||||
return Censor(self.blocked_streams, self.blocked_channels)
|
||||
|
||||
def get_search_censor(self) -> Censor:
|
||||
return Censor(self.filtered_streams, self.filtered_channels)
|
||||
|
||||
def pg_copy(self, table, rows):
|
||||
with self.engine.begin() as c:
|
||||
copy_manager = self.copy_managers.get(table.name)
|
||||
if copy_manager is None:
|
||||
self.copy_managers[table.name] = copy_manager = CopyManager(
|
||||
c.connection, table.name, rows[0].keys()
|
||||
)
|
||||
copy_manager.conn = c.connection
|
||||
copy_manager.copy(map(dict.values, rows), BytesIO)
|
||||
copy_manager.conn = None
|
||||
|
||||
def connect_without_transaction(self):
|
||||
return self.engine.connect().execution_options(isolation_level="AUTOCOMMIT")
|
||||
|
||||
def connect_streaming(self):
|
||||
return self.engine.connect().execution_options(stream_results=True)
|
||||
|
||||
def execute_notx(self, sql, *args):
|
||||
with self.connect_without_transaction() as c:
|
||||
return c.execute(sql, *args)
|
||||
|
||||
def execute(self, sql, *args):
|
||||
with self.engine.begin() as c:
|
||||
return c.execute(sql, *args)
|
||||
|
||||
def fetchone(self, sql, *args):
|
||||
with self.engine.begin() as c:
|
||||
row = c.execute(sql, *args).fetchone()
|
||||
return dict(row._mapping) if row else row
|
||||
|
||||
def fetchall(self, sql, *args):
|
||||
with self.engine.begin() as c:
|
||||
rows = c.execute(sql, *args).fetchall()
|
||||
return [dict(row._mapping) for row in rows]
|
||||
|
||||
def fetchtotal(self, condition) -> int:
|
||||
sql = select(func.count('*').label('total')).where(condition)
|
||||
return self.fetchone(sql)['total']
|
||||
|
||||
def fetchmax(self, column, default: int) -> int:
|
||||
sql = select(func.coalesce(func.max(column), default).label('max_result'))
|
||||
return self.fetchone(sql)['max_result']
|
||||
|
||||
def has_records(self, table) -> bool:
|
||||
sql = select(exists([1], from_obj=table).label('result'))
|
||||
return bool(self.fetchone(sql)['result'])
|
||||
|
||||
def insert_or_ignore(self, table):
|
||||
if self.is_sqlite:
|
||||
return table.insert().prefix_with("OR IGNORE")
|
||||
elif self.is_postgres:
|
||||
return pg_insert(table).on_conflict_do_nothing()
|
||||
else:
|
||||
self.raise_unsupported_dialect()
|
||||
|
||||
def insert_or_replace(self, table, replace):
|
||||
if self.is_sqlite:
|
||||
return table.insert().prefix_with("OR REPLACE")
|
||||
elif self.is_postgres:
|
||||
insert = pg_insert(table)
|
||||
return insert.on_conflict_do_update(
|
||||
table.primary_key, set_={col: getattr(insert.excluded, col) for col in replace}
|
||||
)
|
||||
else:
|
||||
self.raise_unsupported_dialect()
|
||||
|
||||
def has_table(self, table):
|
||||
return inspect(self.engine).has_table(table)
|
||||
|
||||
def get_bulk_loader(self) -> 'BulkLoader':
|
||||
return BulkLoader(self)
|
||||
|
||||
def reset_metrics(self):
|
||||
self.stack = []
|
||||
self.metrics = {}
|
||||
|
||||
def with_timer(self, timer_name: str) -> 'QueryContext':
|
||||
self.current_timer_name = timer_name
|
||||
return self
|
||||
|
||||
@property
|
||||
def elapsed(self):
|
||||
return time.perf_counter() - self.current_timer_time
|
||||
|
||||
def __enter__(self) -> 'QueryContext':
|
||||
self.current_timer_time = time.perf_counter()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.current_timer_name = None
|
||||
self.current_timer_time = 0
|
||||
self.current_progress = None
|
||||
|
||||
|
||||
def context(with_timer: str = None) -> 'QueryContext':
|
||||
if isinstance(with_timer, str):
|
||||
return _context.get().with_timer(with_timer)
|
||||
return _context.get()
|
||||
|
||||
|
||||
def set_postgres_settings(connection, _):
|
||||
cursor = connection.cursor()
|
||||
cursor.execute('SET work_mem="500MB";')
|
||||
cursor.execute('COMMIT;')
|
||||
cursor.close()
|
||||
|
||||
|
||||
def set_sqlite_settings(connection, _):
|
||||
connection.isolation_level = None
|
||||
cursor = connection.cursor()
|
||||
cursor.execute('PRAGMA journal_mode=WAL;')
|
||||
cursor.close()
|
||||
|
||||
|
||||
def do_sqlite_begin(connection):
|
||||
# see: https://bit.ly/3j4vvXm
|
||||
connection.exec_driver_sql("BEGIN")
|
||||
|
||||
|
||||
def initialize(
|
||||
ledger: Ledger, message_queue: mp.Queue, stop_event: mp.Event,
|
||||
track_metrics=False, block_and_filter=None):
|
||||
url = ledger.conf.db_url_or_default
|
||||
engine = create_engine(url)
|
||||
if engine.name == "postgresql":
|
||||
sqlalchemy_event.listen(engine, "connect", set_postgres_settings)
|
||||
elif engine.name == "sqlite":
|
||||
sqlalchemy_event.listen(engine, "connect", set_sqlite_settings)
|
||||
sqlalchemy_event.listen(engine, "begin", do_sqlite_begin)
|
||||
if block_and_filter is not None:
|
||||
blocked_streams, blocked_channels, filtered_streams, filtered_channels = block_and_filter
|
||||
else:
|
||||
blocked_streams = blocked_channels = filtered_streams = filtered_channels = {}
|
||||
_context.set(
|
||||
QueryContext(
|
||||
pid=os.getpid(), engine=engine,
|
||||
ledger=ledger, message_queue=message_queue, stop_event=stop_event,
|
||||
stack=[], metrics={}, is_tracking_metrics=track_metrics,
|
||||
blocked_streams=blocked_streams, blocked_channels=blocked_channels,
|
||||
filtered_streams=filtered_streams, filtered_channels=filtered_channels,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def uninitialize():
|
||||
ctx = _context.get(None)
|
||||
if ctx is not None:
|
||||
ctx.engine.dispose()
|
||||
_context.set(None)
|
||||
|
||||
|
||||
class Event:
|
||||
_events: List['Event'] = []
|
||||
__slots__ = 'id', 'name', 'units'
|
||||
|
||||
def __init__(self, name: str, units: Tuple[str]):
|
||||
self.id = None
|
||||
self.name = name
|
||||
self.units = units
|
||||
|
||||
@classmethod
|
||||
def get_by_id(cls, event_id) -> 'Event':
|
||||
return cls._events[event_id]
|
||||
|
||||
@classmethod
|
||||
def get_by_name(cls, name) -> 'Event':
|
||||
for event in cls._events:
|
||||
if event.name == name:
|
||||
return event
|
||||
|
||||
@classmethod
|
||||
def add(cls, name: str, *units: str) -> 'Event':
|
||||
assert cls.get_by_name(name) is None, f"Event {name} already exists."
|
||||
assert name.count('.') == 3, f"Event {name} does not follow pattern of: [module].sync.[phase].[task]"
|
||||
event = cls(name, units)
|
||||
cls._events.append(event)
|
||||
event.id = cls._events.index(event)
|
||||
return event
|
||||
|
||||
|
||||
def event_emitter(name: str, *units: str, throttle=1):
|
||||
event = Event.add(name, *units)
|
||||
|
||||
def wrapper(f):
|
||||
@functools.wraps(f)
|
||||
def with_progress(*args, **kwargs):
|
||||
with progress(event, throttle=throttle) as p:
|
||||
try:
|
||||
return f(*args, **kwargs, p=p)
|
||||
except BreakProgress:
|
||||
raise
|
||||
except:
|
||||
traceback.print_exc()
|
||||
raise
|
||||
return with_progress
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class ProgressPublisher(EventQueuePublisher):
|
||||
|
||||
def message_to_event(self, message):
|
||||
total, extra = None, None
|
||||
if len(message) == 3:
|
||||
event_id, progress_id, done = message
|
||||
elif len(message) == 5:
|
||||
event_id, progress_id, done, total, extra = message
|
||||
else:
|
||||
raise TypeError("progress message must be tuple of 3 or 5 values.")
|
||||
event = Event.get_by_id(event_id)
|
||||
d = {
|
||||
"event": event.name,
|
||||
"data": {"id": progress_id, "done": done}
|
||||
}
|
||||
if total is not None:
|
||||
d['data']['total'] = total
|
||||
d['data']['units'] = event.units
|
||||
if isinstance(extra, dict):
|
||||
d['data'].update(extra)
|
||||
return d
|
||||
|
||||
|
||||
class BreakProgress(Exception):
|
||||
"""Break out of progress when total is 0."""
|
||||
|
||||
|
||||
class Progress:
|
||||
|
||||
def __init__(self, message_queue: mp.Queue, event: Event, throttle=1):
|
||||
self.message_queue = message_queue
|
||||
self.event = event
|
||||
self.progress_id = 0
|
||||
self.throttle = throttle
|
||||
self.last_done = (0,)*len(event.units)
|
||||
self.last_done_queued = (0,)*len(event.units)
|
||||
self.totals = (0,)*len(event.units)
|
||||
|
||||
def __enter__(self) -> 'Progress':
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if self.last_done != self.last_done_queued:
|
||||
self.message_queue.put((self.event.id, self.progress_id, self.last_done))
|
||||
self.last_done_queued = self.last_done
|
||||
if exc_type == BreakProgress:
|
||||
return True
|
||||
if self.last_done != self.totals: # or exc_type is not None:
|
||||
# TODO: add exception info into closing message if there is any
|
||||
self.message_queue.put((
|
||||
self.event.id, self.progress_id, (-1,)*len(self.event.units)
|
||||
))
|
||||
|
||||
def start(self, *totals: int, progress_id=0, label=None, extra=None):
|
||||
assert len(totals) == len(self.event.units), \
|
||||
f"Totals {totals} do not match up with units {self.event.units}."
|
||||
if not any(totals):
|
||||
raise BreakProgress
|
||||
self.totals = totals
|
||||
self.progress_id = progress_id
|
||||
extra = {} if extra is None else extra.copy()
|
||||
if label is not None:
|
||||
extra['label'] = label
|
||||
self.step(*((0,)*len(totals)), force=True, extra=extra)
|
||||
|
||||
def step(self, *done: int, force=False, extra=None):
|
||||
if done == ():
|
||||
assert len(self.totals) == 1, "Incrementing step() only works with one unit progress."
|
||||
done = (self.last_done[0]+1,)
|
||||
assert len(done) == len(self.totals), \
|
||||
f"Done elements {done} don't match total elements {self.totals}."
|
||||
self.last_done = done
|
||||
send_condition = force or extra is not None or (
|
||||
# throttle rate of events being generated (only throttles first unit value)
|
||||
(self.throttle == 1 or done[0] % self.throttle == 0) and
|
||||
# deduplicate finish event by not sending a step where done == total
|
||||
any(i < j for i, j in zip(done, self.totals)) and
|
||||
# deduplicate same event
|
||||
done != self.last_done_queued
|
||||
)
|
||||
if send_condition:
|
||||
if extra is not None:
|
||||
self.message_queue.put_nowait(
|
||||
(self.event.id, self.progress_id, done, self.totals, extra)
|
||||
)
|
||||
else:
|
||||
self.message_queue.put_nowait(
|
||||
(self.event.id, self.progress_id, done)
|
||||
)
|
||||
self.last_done_queued = done
|
||||
|
||||
def add(self, *done: int, force=False, extra=None):
|
||||
assert len(done) == len(self.last_done), \
|
||||
f"Done elements {done} don't match total elements {self.last_done}."
|
||||
self.step(
|
||||
*(i+j for i, j in zip(self.last_done, done)),
|
||||
force=force, extra=extra
|
||||
)
|
||||
|
||||
def iter(self, items: List):
|
||||
self.start(len(items))
|
||||
for item in items:
|
||||
yield item
|
||||
self.step()
|
||||
|
||||
|
||||
class ProgressContext(Progress):
|
||||
|
||||
def __init__(self, ctx: QueryContext, event: Event, throttle=1):
|
||||
super().__init__(ctx.message_queue, event, throttle)
|
||||
self.ctx = ctx
|
||||
|
||||
def __enter__(self) -> 'ProgressContext':
|
||||
self.ctx.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
return any((
|
||||
self.ctx.__exit__(exc_type, exc_val, exc_tb),
|
||||
super().__exit__(exc_type, exc_val, exc_tb)
|
||||
))
|
||||
|
||||
|
||||
def progress(e: Event, throttle=1) -> ProgressContext:
|
||||
ctx = context(e.name)
|
||||
ctx.current_progress = ProgressContext(ctx, e, throttle=throttle)
|
||||
return ctx.current_progress
|
||||
|
||||
|
||||
class BulkLoader:
|
||||
|
||||
def __init__(self, ctx: QueryContext):
|
||||
self.ctx = ctx
|
||||
self.ledger = ctx.ledger
|
||||
self.blocks = []
|
||||
self.txs = []
|
||||
self.txos = []
|
||||
self.txis = []
|
||||
self.supports = []
|
||||
self.claims = []
|
||||
self.tags = []
|
||||
self.update_claims = []
|
||||
self.delete_tags = []
|
||||
|
||||
@staticmethod
|
||||
def block_to_row(block: Block) -> dict:
|
||||
return {
|
||||
'block_hash': block.block_hash,
|
||||
'previous_hash': block.prev_block_hash,
|
||||
'file_number': block.file_number,
|
||||
'height': 0 if block.is_first_block else block.height,
|
||||
'timestamp': block.timestamp,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def tx_to_row(block_hash: bytes, tx: Transaction) -> dict:
|
||||
row = {
|
||||
'tx_hash': tx.hash,
|
||||
'block_hash': block_hash,
|
||||
'raw': tx.raw,
|
||||
'height': tx.height,
|
||||
'position': tx.position,
|
||||
'is_verified': tx.is_verified,
|
||||
'timestamp': tx.timestamp,
|
||||
'day': tx.day,
|
||||
'purchased_claim_hash': None,
|
||||
}
|
||||
txos = tx.outputs
|
||||
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
|
||||
txos[0].purchase = txos[1]
|
||||
row['purchased_claim_hash'] = txos[1].purchase_data.claim_hash
|
||||
return row
|
||||
|
||||
@staticmethod
|
||||
def txi_to_row(tx: Transaction, txi: Input) -> dict:
|
||||
return {
|
||||
'tx_hash': tx.hash,
|
||||
'txo_hash': txi.txo_ref.hash,
|
||||
'position': txi.position,
|
||||
'height': tx.height,
|
||||
}
|
||||
|
||||
def txo_to_row(self, tx: Transaction, txo: Output) -> dict:
|
||||
row = {
|
||||
'tx_hash': tx.hash,
|
||||
'txo_hash': txo.hash,
|
||||
'address': txo.get_address(self.ledger) if txo.has_address else None,
|
||||
'position': txo.position,
|
||||
'amount': txo.amount,
|
||||
'height': tx.height,
|
||||
'script_offset': txo.script.offset,
|
||||
'script_length': txo.script.length,
|
||||
'txo_type': 0,
|
||||
'claim_id': None,
|
||||
'claim_hash': None,
|
||||
'claim_name': None,
|
||||
'channel_hash': None,
|
||||
'signature': None,
|
||||
'signature_digest': None,
|
||||
'public_key': None,
|
||||
'public_key_hash': None
|
||||
}
|
||||
if txo.is_claim:
|
||||
if txo.can_decode_claim:
|
||||
claim = txo.claim
|
||||
row['txo_type'] = TXO_TYPES.get(claim.claim_type, TXO_TYPES['stream'])
|
||||
if claim.is_channel:
|
||||
row['public_key'] = claim.channel.public_key_bytes
|
||||
row['public_key_hash'] = self.ledger.address_to_hash160(
|
||||
self.ledger.public_key_to_address(claim.channel.public_key_bytes)
|
||||
)
|
||||
else:
|
||||
row['txo_type'] = TXO_TYPES['stream']
|
||||
elif txo.is_support:
|
||||
row['txo_type'] = TXO_TYPES['support']
|
||||
elif txo.purchase is not None:
|
||||
row['txo_type'] = TXO_TYPES['purchase']
|
||||
row['claim_id'] = txo.purchased_claim_id
|
||||
row['claim_hash'] = txo.purchased_claim_hash
|
||||
if txo.script.is_claim_involved:
|
||||
signable = txo.can_decode_signable
|
||||
if signable and signable.is_signed:
|
||||
row['channel_hash'] = signable.signing_channel_hash
|
||||
row['signature'] = txo.get_encoded_signature()
|
||||
row['signature_digest'] = txo.get_signature_digest(self.ledger)
|
||||
row['claim_id'] = txo.claim_id
|
||||
row['claim_hash'] = txo.claim_hash
|
||||
try:
|
||||
row['claim_name'] = txo.claim_name.replace('\x00', '')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
return row
|
||||
|
||||
def claim_to_rows(
|
||||
self, txo: Output, staked_support_amount: int, staked_support_count: int,
|
||||
signature: bytes = None, signature_digest: bytes = None, channel_public_key: bytes = None,
|
||||
) -> Tuple[dict, List]:
|
||||
|
||||
tx = txo.tx_ref
|
||||
d = {
|
||||
'claim_type': None,
|
||||
'address': txo.get_address(self.ledger),
|
||||
'txo_hash': txo.hash,
|
||||
'amount': txo.amount,
|
||||
'height': tx.height,
|
||||
'timestamp': tx.timestamp,
|
||||
# support
|
||||
'staked_amount': txo.amount + staked_support_amount,
|
||||
'staked_support_amount': staked_support_amount,
|
||||
'staked_support_count': staked_support_count,
|
||||
# basic metadata
|
||||
'title': None,
|
||||
'description': None,
|
||||
'author': None,
|
||||
# streams
|
||||
'stream_type': None,
|
||||
'media_type': None,
|
||||
'duration': None,
|
||||
'release_time': None,
|
||||
'fee_amount': 0,
|
||||
'fee_currency': None,
|
||||
# reposts
|
||||
'reposted_claim_hash': None,
|
||||
# signed claims
|
||||
'channel_hash': None,
|
||||
'is_signature_valid': None,
|
||||
}
|
||||
|
||||
claim = txo.can_decode_claim
|
||||
if not claim:
|
||||
return d, []
|
||||
|
||||
if claim.is_stream:
|
||||
d['claim_type'] = TXO_TYPES['stream']
|
||||
d['stream_type'] = STREAM_TYPES[guess_stream_type(d['media_type'])]
|
||||
d['media_type'] = claim.stream.source.media_type
|
||||
d['title'] = claim.stream.title.replace('\x00', '')
|
||||
d['description'] = claim.stream.description.replace('\x00', '')
|
||||
d['author'] = claim.stream.author.replace('\x00', '')
|
||||
if claim.stream.video and claim.stream.video.duration:
|
||||
d['duration'] = claim.stream.video.duration
|
||||
if claim.stream.audio and claim.stream.audio.duration:
|
||||
d['duration'] = claim.stream.audio.duration
|
||||
if claim.stream.release_time:
|
||||
d['release_time'] = claim.stream.release_time
|
||||
if claim.stream.has_fee:
|
||||
fee = claim.stream.fee
|
||||
if isinstance(fee.amount, Decimal):
|
||||
d['fee_amount'] = int(fee.amount*1000)
|
||||
if isinstance(fee.currency, str):
|
||||
d['fee_currency'] = fee.currency.lower()
|
||||
elif claim.is_repost:
|
||||
d['claim_type'] = TXO_TYPES['repost']
|
||||
d['reposted_claim_hash'] = claim.repost.reference.claim_hash
|
||||
elif claim.is_channel:
|
||||
d['claim_type'] = TXO_TYPES['channel']
|
||||
if claim.is_signed:
|
||||
d['channel_hash'] = claim.signing_channel_hash
|
||||
d['is_signature_valid'] = (
|
||||
all((signature, signature_digest, channel_public_key)) and
|
||||
Output.is_signature_valid(
|
||||
signature, signature_digest, channel_public_key
|
||||
)
|
||||
)
|
||||
|
||||
tags = []
|
||||
if claim.message.tags:
|
||||
claim_hash = txo.claim_hash
|
||||
tags = [
|
||||
{'claim_hash': claim_hash, 'tag': tag}
|
||||
for tag in clean_tags(claim.message.tags)
|
||||
]
|
||||
|
||||
return d, tags
|
||||
|
||||
def support_to_row(
|
||||
self, txo: Output, channel_public_key: bytes = None,
|
||||
signature: bytes = None, signature_digest: bytes = None
|
||||
):
|
||||
tx = txo.tx_ref
|
||||
d = {
|
||||
'txo_hash': txo.ref.hash,
|
||||
'claim_hash': txo.claim_hash,
|
||||
'address': txo.get_address(self.ledger),
|
||||
'amount': txo.amount,
|
||||
'height': tx.height,
|
||||
'timestamp': tx.timestamp,
|
||||
'emoji': None,
|
||||
'channel_hash': None,
|
||||
'is_signature_valid': None,
|
||||
}
|
||||
support = txo.can_decode_support
|
||||
if support:
|
||||
d['emoji'] = support.emoji
|
||||
if support.is_signed:
|
||||
d['channel_hash'] = support.signing_channel_hash
|
||||
d['is_signature_valid'] = (
|
||||
all((signature, signature_digest, channel_public_key)) and
|
||||
Output.is_signature_valid(
|
||||
signature, signature_digest, channel_public_key
|
||||
)
|
||||
)
|
||||
return d
|
||||
|
||||
def add_block(self, block: Block):
|
||||
self.blocks.append(self.block_to_row(block))
|
||||
for tx in block.txs:
|
||||
self.add_transaction(block.block_hash, tx)
|
||||
return self
|
||||
|
||||
def add_transaction(self, block_hash: bytes, tx: Transaction):
|
||||
self.txs.append(self.tx_to_row(block_hash, tx))
|
||||
for txi in tx.inputs:
|
||||
if txi.coinbase is None:
|
||||
self.txis.append(self.txi_to_row(tx, txi))
|
||||
for txo in tx.outputs:
|
||||
self.txos.append(self.txo_to_row(tx, txo))
|
||||
return self
|
||||
|
||||
def add_support(self, txo: Output, **extra):
|
||||
self.supports.append(self.support_to_row(txo, **extra))
|
||||
|
||||
def add_claim(
|
||||
self, txo: Output, short_url: str,
|
||||
creation_height: int, activation_height: int, expiration_height: int,
|
||||
takeover_height: int = None, **extra
|
||||
):
|
||||
try:
|
||||
claim_name = txo.claim_name.replace('\x00', '')
|
||||
normalized_name = txo.normalized_name
|
||||
except UnicodeDecodeError:
|
||||
claim_name = normalized_name = ''
|
||||
d, tags = self.claim_to_rows(txo, **extra)
|
||||
d['claim_hash'] = txo.claim_hash
|
||||
d['claim_id'] = txo.claim_id
|
||||
d['claim_name'] = claim_name
|
||||
d['normalized'] = normalized_name
|
||||
d['short_url'] = short_url
|
||||
d['creation_height'] = creation_height
|
||||
d['activation_height'] = activation_height
|
||||
d['expiration_height'] = expiration_height
|
||||
d['takeover_height'] = takeover_height
|
||||
d['is_controlling'] = takeover_height is not None
|
||||
self.claims.append(d)
|
||||
self.tags.extend(tags)
|
||||
return self
|
||||
|
||||
def update_claim(self, txo: Output, **extra):
|
||||
d, tags = self.claim_to_rows(txo, **extra)
|
||||
d['pk'] = txo.claim_hash
|
||||
self.update_claims.append(d)
|
||||
self.delete_tags.append({'pk': txo.claim_hash})
|
||||
self.tags.extend(tags)
|
||||
return self
|
||||
|
||||
def get_queries(self):
|
||||
return (
|
||||
(Block.insert(), self.blocks),
|
||||
(TX.insert(), self.txs),
|
||||
(TXO.insert(), self.txos),
|
||||
(TXI.insert(), self.txis),
|
||||
(Claim.insert(), self.claims),
|
||||
(Tag.delete().where(Tag.c.claim_hash == bindparam('pk')), self.delete_tags),
|
||||
(Claim.update().where(Claim.c.claim_hash == bindparam('pk')), self.update_claims),
|
||||
(Tag.insert(), self.tags),
|
||||
(Support.insert(), self.supports),
|
||||
)
|
||||
|
||||
def flush(self, return_row_count_for_table) -> int:
|
||||
done = 0
|
||||
for sql, rows in self.get_queries():
|
||||
if not rows:
|
||||
continue
|
||||
if self.ctx.is_postgres and isinstance(sql, Insert):
|
||||
self.ctx.pg_copy(sql.table, rows)
|
||||
else:
|
||||
self.ctx.execute(sql, rows)
|
||||
if sql.table == return_row_count_for_table:
|
||||
done += len(rows)
|
||||
rows.clear()
|
||||
return done
|
103
lbry/db/sync.py
Normal file
103
lbry/db/sync.py
Normal file
|
@ -0,0 +1,103 @@
|
|||
from sqlalchemy.future import select
|
||||
|
||||
from lbry.db.query_context import progress, Event
|
||||
from lbry.db.tables import TX, TXI, TXO, Claim, Support
|
||||
from .constants import TXO_TYPES, CLAIM_TYPE_CODES
|
||||
from .queries import (
|
||||
BASE_SELECT_TXO_COLUMNS,
|
||||
rows_to_txos, where_unspent_txos,
|
||||
where_abandoned_supports,
|
||||
where_abandoned_claims
|
||||
)
|
||||
|
||||
|
||||
SPENDS_UPDATE_EVENT = Event.add("client.sync.spends.update", "steps")
|
||||
CLAIMS_INSERT_EVENT = Event.add("client.sync.claims.insert", "claims")
|
||||
CLAIMS_UPDATE_EVENT = Event.add("client.sync.claims.update", "claims")
|
||||
CLAIMS_DELETE_EVENT = Event.add("client.sync.claims.delete", "claims")
|
||||
SUPPORT_INSERT_EVENT = Event.add("client.sync.supports.insert", "supports")
|
||||
SUPPORT_UPDATE_EVENT = Event.add("client.sync.supports.update", "supports")
|
||||
SUPPORT_DELETE_EVENT = Event.add("client.sync.supports.delete", "supports")
|
||||
|
||||
|
||||
def process_all_things_after_sync():
|
||||
with progress(SPENDS_UPDATE_EVENT) as p:
|
||||
p.start(2)
|
||||
update_spent_outputs(p.ctx)
|
||||
p.step(1)
|
||||
set_input_addresses(p.ctx)
|
||||
p.step(2)
|
||||
with progress(SUPPORT_DELETE_EVENT) as p:
|
||||
p.start(1)
|
||||
sql = Support.delete().where(where_abandoned_supports())
|
||||
p.ctx.execute(sql)
|
||||
with progress(SUPPORT_INSERT_EVENT) as p:
|
||||
loader = p.ctx.get_bulk_loader()
|
||||
sql = (
|
||||
select(*BASE_SELECT_TXO_COLUMNS)
|
||||
.where(where_unspent_txos(TXO_TYPES['support'], missing_in_supports_table=True))
|
||||
.select_from(TXO.join(TX))
|
||||
)
|
||||
for support in rows_to_txos(p.ctx.fetchall(sql)):
|
||||
loader.add_support(support)
|
||||
loader.flush(Support)
|
||||
with progress(CLAIMS_DELETE_EVENT) as p:
|
||||
p.start(1)
|
||||
sql = Claim.delete().where(where_abandoned_claims())
|
||||
p.ctx.execute(sql)
|
||||
with progress(CLAIMS_INSERT_EVENT) as p:
|
||||
loader = p.ctx.get_bulk_loader()
|
||||
sql = (
|
||||
select(*BASE_SELECT_TXO_COLUMNS)
|
||||
.where(where_unspent_txos(CLAIM_TYPE_CODES, missing_in_claims_table=True))
|
||||
.select_from(TXO.join(TX))
|
||||
)
|
||||
for claim in rows_to_txos(p.ctx.fetchall(sql)):
|
||||
loader.add_claim(claim, '', 0, 0, 0, 0, staked_support_amount=0, staked_support_count=0)
|
||||
loader.flush(Claim)
|
||||
with progress(CLAIMS_UPDATE_EVENT) as p:
|
||||
loader = p.ctx.get_bulk_loader()
|
||||
sql = (
|
||||
select(*BASE_SELECT_TXO_COLUMNS)
|
||||
.where(where_unspent_txos(CLAIM_TYPE_CODES, missing_or_stale_in_claims_table=True))
|
||||
.select_from(TXO.join(TX))
|
||||
)
|
||||
for claim in rows_to_txos(p.ctx.fetchall(sql)):
|
||||
loader.update_claim(claim)
|
||||
loader.flush(Claim)
|
||||
|
||||
|
||||
def set_input_addresses(ctx):
|
||||
# Update TXIs to have the address of TXO they are spending.
|
||||
if ctx.is_sqlite:
|
||||
address_query = select(TXO.c.address).where(TXI.c.txo_hash == TXO.c.txo_hash)
|
||||
set_addresses = (
|
||||
TXI.update()
|
||||
.values(address=address_query.scalar_subquery())
|
||||
.where(TXI.c.address.is_(None))
|
||||
)
|
||||
else:
|
||||
set_addresses = (
|
||||
TXI.update()
|
||||
.values({TXI.c.address: TXO.c.address})
|
||||
.where((TXI.c.address.is_(None)) & (TXI.c.txo_hash == TXO.c.txo_hash))
|
||||
)
|
||||
ctx.execute(set_addresses)
|
||||
|
||||
|
||||
def update_spent_outputs(ctx):
|
||||
# Update spent TXOs setting spent_height
|
||||
set_spent_height = (
|
||||
TXO.update()
|
||||
.values({
|
||||
TXO.c.spent_height: (
|
||||
select(TXI.c.height)
|
||||
.where(TXI.c.txo_hash == TXO.c.txo_hash)
|
||||
.scalar_subquery()
|
||||
)
|
||||
}).where(
|
||||
(TXO.c.spent_height == 0) &
|
||||
(TXO.c.txo_hash.in_(select(TXI.c.txo_hash).where(TXI.c.address.is_(None))))
|
||||
)
|
||||
)
|
||||
ctx.execute(set_spent_height)
|
262
lbry/db/tables.py
Normal file
262
lbry/db/tables.py
Normal file
|
@ -0,0 +1,262 @@
|
|||
# pylint: skip-file
|
||||
|
||||
from sqlalchemy import (
|
||||
MetaData, Table, Column, ForeignKey,
|
||||
LargeBinary, Text, SmallInteger, Integer, BigInteger, Boolean,
|
||||
)
|
||||
from .constants import TXO_TYPES, CLAIM_TYPE_CODES
|
||||
|
||||
|
||||
SCHEMA_VERSION = '1.4'
|
||||
|
||||
|
||||
metadata = MetaData()
|
||||
|
||||
|
||||
Version = Table(
|
||||
'version', metadata,
|
||||
Column('version', Text, primary_key=True),
|
||||
)
|
||||
|
||||
|
||||
PubkeyAddress = Table(
|
||||
'pubkey_address', metadata,
|
||||
Column('address', Text, primary_key=True),
|
||||
Column('used_times', Integer, server_default='0'),
|
||||
)
|
||||
|
||||
|
||||
AccountAddress = Table(
|
||||
'account_address', metadata,
|
||||
Column('account', Text, primary_key=True),
|
||||
Column('address', Text, ForeignKey(PubkeyAddress.columns.address), primary_key=True),
|
||||
Column('chain', SmallInteger),
|
||||
Column('pubkey', LargeBinary),
|
||||
Column('chain_code', LargeBinary),
|
||||
Column('n', Integer),
|
||||
Column('depth', SmallInteger),
|
||||
)
|
||||
|
||||
|
||||
Block = Table(
|
||||
'block', metadata,
|
||||
Column('block_hash', LargeBinary, primary_key=True),
|
||||
Column('previous_hash', LargeBinary),
|
||||
Column('file_number', SmallInteger),
|
||||
Column('height', Integer),
|
||||
Column('timestamp', Integer),
|
||||
Column('block_filter', LargeBinary, nullable=True)
|
||||
)
|
||||
|
||||
|
||||
TX = Table(
|
||||
'tx', metadata,
|
||||
Column('block_hash', LargeBinary, nullable=True),
|
||||
Column('tx_hash', LargeBinary, primary_key=True),
|
||||
Column('raw', LargeBinary),
|
||||
Column('height', Integer),
|
||||
Column('position', SmallInteger),
|
||||
Column('timestamp', Integer, nullable=True),
|
||||
Column('day', Integer, nullable=True),
|
||||
Column('is_verified', Boolean, server_default='FALSE'),
|
||||
Column('purchased_claim_hash', LargeBinary, nullable=True),
|
||||
Column('tx_filter', LargeBinary, nullable=True)
|
||||
)
|
||||
|
||||
|
||||
pg_add_tx_constraints_and_indexes = [
|
||||
"ALTER TABLE tx ADD PRIMARY KEY (tx_hash);",
|
||||
]
|
||||
|
||||
|
||||
TXO = Table(
|
||||
'txo', metadata,
|
||||
Column('tx_hash', LargeBinary, ForeignKey(TX.columns.tx_hash)),
|
||||
Column('txo_hash', LargeBinary, primary_key=True),
|
||||
Column('address', Text),
|
||||
Column('position', SmallInteger),
|
||||
Column('amount', BigInteger),
|
||||
Column('height', Integer),
|
||||
Column('spent_height', Integer, server_default='0'),
|
||||
Column('script_offset', Integer),
|
||||
Column('script_length', Integer),
|
||||
Column('is_reserved', Boolean, server_default='0'),
|
||||
|
||||
# claims
|
||||
Column('txo_type', SmallInteger, server_default='0'),
|
||||
Column('claim_id', Text, nullable=True),
|
||||
Column('claim_hash', LargeBinary, nullable=True),
|
||||
Column('claim_name', Text, nullable=True),
|
||||
Column('channel_hash', LargeBinary, nullable=True), # claims in channel
|
||||
Column('signature', LargeBinary, nullable=True),
|
||||
Column('signature_digest', LargeBinary, nullable=True),
|
||||
|
||||
# channels
|
||||
Column('public_key', LargeBinary, nullable=True),
|
||||
Column('public_key_hash', LargeBinary, nullable=True),
|
||||
)
|
||||
|
||||
txo_join_account = TXO.join(AccountAddress, TXO.columns.address == AccountAddress.columns.address)
|
||||
|
||||
|
||||
pg_add_txo_constraints_and_indexes = [
|
||||
"ALTER TABLE txo ADD PRIMARY KEY (txo_hash);",
|
||||
# find appropriate channel public key for signing a content claim
|
||||
f"CREATE INDEX txo_channel_hash_by_height_desc_w_pub_key "
|
||||
f"ON txo (claim_hash, height desc) INCLUDE (public_key) "
|
||||
f"WHERE txo_type={TXO_TYPES['channel']};",
|
||||
# for calculating supports on a claim
|
||||
f"CREATE INDEX txo_unspent_supports ON txo (claim_hash) INCLUDE (amount) "
|
||||
f"WHERE spent_height = 0 AND txo_type={TXO_TYPES['support']};",
|
||||
# for finding modified claims in a block range
|
||||
f"CREATE INDEX txo_claim_changes "
|
||||
f"ON txo (height DESC) INCLUDE (claim_hash, txo_hash) "
|
||||
f"WHERE spent_height = 0 AND txo_type IN {tuple(CLAIM_TYPE_CODES)};",
|
||||
# for finding claims which need support totals re-calculated in a block range
|
||||
f"CREATE INDEX txo_added_supports_by_height ON txo (height DESC) "
|
||||
f"INCLUDE (claim_hash) WHERE txo_type={TXO_TYPES['support']};",
|
||||
f"CREATE INDEX txo_spent_supports_by_height ON txo (spent_height DESC) "
|
||||
f"INCLUDE (claim_hash) WHERE txo_type={TXO_TYPES['support']};",
|
||||
]
|
||||
|
||||
|
||||
TXI = Table(
|
||||
'txi', metadata,
|
||||
Column('tx_hash', LargeBinary, ForeignKey(TX.columns.tx_hash)),
|
||||
Column('txo_hash', LargeBinary, ForeignKey(TXO.columns.txo_hash), primary_key=True),
|
||||
Column('address', Text, nullable=True),
|
||||
Column('position', SmallInteger),
|
||||
Column('height', Integer),
|
||||
)
|
||||
|
||||
txi_join_account = TXI.join(AccountAddress, TXI.columns.address == AccountAddress.columns.address)
|
||||
|
||||
|
||||
pg_add_txi_constraints_and_indexes = [
|
||||
"ALTER TABLE txi ADD PRIMARY KEY (txo_hash);",
|
||||
]
|
||||
|
||||
|
||||
Claim = Table(
|
||||
'claim', metadata,
|
||||
Column('claim_hash', LargeBinary, primary_key=True),
|
||||
Column('claim_id', Text),
|
||||
Column('claim_name', Text),
|
||||
Column('normalized', Text),
|
||||
Column('address', Text),
|
||||
Column('txo_hash', LargeBinary, ForeignKey(TXO.columns.txo_hash)),
|
||||
Column('amount', BigInteger),
|
||||
Column('staked_amount', BigInteger),
|
||||
Column('timestamp', Integer), # last updated timestamp
|
||||
Column('creation_timestamp', Integer),
|
||||
Column('release_time', Integer, nullable=True),
|
||||
Column('height', Integer), # last updated height
|
||||
Column('creation_height', Integer),
|
||||
Column('activation_height', Integer),
|
||||
Column('expiration_height', Integer),
|
||||
Column('takeover_height', Integer, nullable=True),
|
||||
Column('is_controlling', Boolean),
|
||||
|
||||
# short_url: normalized#shortest-unique-claim_id
|
||||
Column('short_url', Text),
|
||||
# canonical_url: channel's-short_url/normalized#shortest-unique-claim_id-within-channel
|
||||
# canonical_url is computed dynamically
|
||||
|
||||
Column('title', Text, nullable=True),
|
||||
Column('author', Text, nullable=True),
|
||||
Column('description', Text, nullable=True),
|
||||
|
||||
Column('claim_type', SmallInteger),
|
||||
Column('claim_reposted_count', Integer, server_default='0'),
|
||||
Column('staked_support_count', Integer, server_default='0'),
|
||||
Column('staked_support_amount', BigInteger, server_default='0'),
|
||||
|
||||
# streams
|
||||
Column('stream_type', SmallInteger, nullable=True),
|
||||
Column('media_type', Text, nullable=True),
|
||||
Column('fee_amount', BigInteger, server_default='0'),
|
||||
Column('fee_currency', Text, nullable=True),
|
||||
Column('duration', Integer, nullable=True),
|
||||
|
||||
# reposts
|
||||
Column('reposted_claim_hash', LargeBinary, nullable=True),
|
||||
Column('reposted_count', Integer, server_default='0'),
|
||||
|
||||
# claims which are channels
|
||||
Column('signed_claim_count', Integer, server_default='0'),
|
||||
Column('signed_support_count', Integer, server_default='0'),
|
||||
|
||||
# claims which are inside channels
|
||||
Column('channel_hash', LargeBinary, nullable=True),
|
||||
Column('is_signature_valid', Boolean, nullable=True),
|
||||
|
||||
Column('trending_group', BigInteger, server_default='0'),
|
||||
Column('trending_mixed', BigInteger, server_default='0'),
|
||||
Column('trending_local', BigInteger, server_default='0'),
|
||||
Column('trending_global', BigInteger, server_default='0'),
|
||||
)
|
||||
|
||||
|
||||
Tag = Table(
|
||||
'tag', metadata,
|
||||
Column('claim_hash', LargeBinary),
|
||||
Column('tag', Text),
|
||||
)
|
||||
|
||||
|
||||
pg_add_claim_and_tag_constraints_and_indexes = [
|
||||
"ALTER TABLE claim ADD PRIMARY KEY (claim_hash);",
|
||||
# for checking if claim is up-to-date
|
||||
"CREATE UNIQUE INDEX claim_txo_hash ON claim (txo_hash);",
|
||||
# used by takeover process to reset winning claims
|
||||
"CREATE INDEX claim_normalized ON claim (normalized);",
|
||||
# ordering and search by release_time
|
||||
"CREATE INDEX claim_release_time ON claim (release_time DESC NULLs LAST);",
|
||||
# used to count()/sum() claims signed by channel
|
||||
"CREATE INDEX signed_content ON claim (channel_hash) "
|
||||
"INCLUDE (amount) WHERE is_signature_valid;",
|
||||
# basic tag indexes
|
||||
"ALTER TABLE tag ADD PRIMARY KEY (claim_hash, tag);",
|
||||
"CREATE INDEX tags ON tag (tag) INCLUDE (claim_hash);",
|
||||
]
|
||||
|
||||
|
||||
Support = Table(
|
||||
'support', metadata,
|
||||
|
||||
Column('txo_hash', LargeBinary, ForeignKey(TXO.columns.txo_hash), primary_key=True),
|
||||
Column('claim_hash', LargeBinary),
|
||||
Column('address', Text),
|
||||
Column('amount', BigInteger),
|
||||
Column('height', Integer),
|
||||
Column('timestamp', Integer),
|
||||
|
||||
# support metadata
|
||||
Column('emoji', Text),
|
||||
|
||||
# signed supports
|
||||
Column('channel_hash', LargeBinary, nullable=True),
|
||||
Column('signature', LargeBinary, nullable=True),
|
||||
Column('signature_digest', LargeBinary, nullable=True),
|
||||
Column('is_signature_valid', Boolean, nullable=True),
|
||||
)
|
||||
|
||||
|
||||
pg_add_support_constraints_and_indexes = [
|
||||
"ALTER TABLE support ADD PRIMARY KEY (txo_hash);",
|
||||
# used to count()/sum() supports signed by channel
|
||||
"CREATE INDEX signed_support ON support (channel_hash) "
|
||||
"INCLUDE (amount) WHERE is_signature_valid;",
|
||||
]
|
||||
|
||||
|
||||
Stake = Table(
|
||||
'stake', metadata,
|
||||
Column('claim_hash', LargeBinary),
|
||||
Column('height', Integer),
|
||||
Column('stake_min', BigInteger),
|
||||
Column('stake_max', BigInteger),
|
||||
Column('stake_sum', BigInteger),
|
||||
Column('stake_count', Integer),
|
||||
Column('stake_unique', Integer),
|
||||
)
|
173
lbry/db/utils.py
Normal file
173
lbry/db/utils.py
Normal file
|
@ -0,0 +1,173 @@
|
|||
from itertools import islice
|
||||
from typing import List, Union
|
||||
|
||||
from sqlalchemy import text, and_
|
||||
from sqlalchemy.sql.expression import Select, FunctionElement
|
||||
from sqlalchemy.types import Numeric
|
||||
from sqlalchemy.ext.compiler import compiles
|
||||
try:
|
||||
from sqlalchemy.dialects.postgresql import insert as pg_insert # pylint: disable=unused-import
|
||||
except ImportError:
|
||||
pg_insert = None
|
||||
|
||||
from .tables import AccountAddress
|
||||
|
||||
|
||||
class greatest(FunctionElement): # pylint: disable=invalid-name
|
||||
type = Numeric()
|
||||
name = 'greatest'
|
||||
|
||||
|
||||
@compiles(greatest)
|
||||
def default_greatest(element, compiler, **kw):
|
||||
return "greatest(%s)" % compiler.process(element.clauses, **kw)
|
||||
|
||||
|
||||
@compiles(greatest, 'sqlite')
|
||||
def sqlite_greatest(element, compiler, **kw):
|
||||
return "max(%s)" % compiler.process(element.clauses, **kw)
|
||||
|
||||
|
||||
class least(FunctionElement): # pylint: disable=invalid-name
|
||||
type = Numeric()
|
||||
name = 'least'
|
||||
|
||||
|
||||
@compiles(least)
|
||||
def default_least(element, compiler, **kw):
|
||||
return "least(%s)" % compiler.process(element.clauses, **kw)
|
||||
|
||||
|
||||
@compiles(least, 'sqlite')
|
||||
def sqlite_least(element, compiler, **kw):
|
||||
return "min(%s)" % compiler.process(element.clauses, **kw)
|
||||
|
||||
|
||||
def chunk(rows, step):
|
||||
it, total = iter(rows), len(rows)
|
||||
for _ in range(0, total, step):
|
||||
yield list(islice(it, step))
|
||||
total -= step
|
||||
|
||||
|
||||
def constrain_single_or_list(constraints, column, value, convert=lambda x: x):
|
||||
if value is not None:
|
||||
if isinstance(value, list):
|
||||
value = [convert(v) for v in value]
|
||||
if len(value) == 1:
|
||||
constraints[column] = value[0]
|
||||
elif len(value) > 1:
|
||||
constraints[f"{column}__in"] = value
|
||||
else:
|
||||
constraints[column] = convert(value)
|
||||
return constraints
|
||||
|
||||
|
||||
def in_account_ids(account_ids: Union[List[str], str]):
|
||||
if isinstance(account_ids, list):
|
||||
if len(account_ids) > 1:
|
||||
return AccountAddress.c.account.in_(account_ids)
|
||||
account_ids = account_ids[0]
|
||||
return AccountAddress.c.account == account_ids
|
||||
|
||||
|
||||
def query(table, s: Select, **constraints) -> Select:
|
||||
limit = constraints.pop('limit', None)
|
||||
if limit is not None:
|
||||
s = s.limit(limit)
|
||||
|
||||
offset = constraints.pop('offset', None)
|
||||
if offset is not None:
|
||||
s = s.offset(offset)
|
||||
|
||||
order_by = constraints.pop('order_by', None)
|
||||
if order_by:
|
||||
if isinstance(order_by, str):
|
||||
s = s.order_by(text(order_by))
|
||||
elif isinstance(order_by, list):
|
||||
s = s.order_by(text(', '.join(order_by)))
|
||||
else:
|
||||
raise ValueError("order_by must be string or list")
|
||||
|
||||
group_by = constraints.pop('group_by', None)
|
||||
if group_by is not None:
|
||||
s = s.group_by(text(group_by))
|
||||
|
||||
account_ids = constraints.pop('account_ids', [])
|
||||
if account_ids:
|
||||
s = s.where(in_account_ids(account_ids))
|
||||
|
||||
if constraints:
|
||||
s = s.where(
|
||||
constraints_to_clause(table, constraints)
|
||||
)
|
||||
|
||||
return s
|
||||
|
||||
|
||||
def constraints_to_clause(tables, constraints):
|
||||
clause = []
|
||||
for key, constraint in constraints.items():
|
||||
if key.endswith('__not'):
|
||||
col, op = key[:-len('__not')], '__ne__'
|
||||
elif key.endswith('__is_null'):
|
||||
col = key[:-len('__is_null')]
|
||||
op = '__eq__'
|
||||
constraint = None
|
||||
elif key.endswith('__is_not_null'):
|
||||
col = key[:-len('__is_not_null')]
|
||||
op = '__ne__'
|
||||
constraint = None
|
||||
elif key.endswith('__lt'):
|
||||
col, op = key[:-len('__lt')], '__lt__'
|
||||
elif key.endswith('__lte'):
|
||||
col, op = key[:-len('__lte')], '__le__'
|
||||
elif key.endswith('__gt'):
|
||||
col, op = key[:-len('__gt')], '__gt__'
|
||||
elif key.endswith('__gte'):
|
||||
col, op = key[:-len('__gte')], '__ge__'
|
||||
elif key.endswith('__like'):
|
||||
col, op = key[:-len('__like')], 'like'
|
||||
elif key.endswith('__not_like'):
|
||||
col, op = key[:-len('__not_like')], 'notlike'
|
||||
elif key.endswith('__in') or key.endswith('__not_in'):
|
||||
if key.endswith('__in'):
|
||||
col, op, one_val_op = key[:-len('__in')], 'in_', '__eq__'
|
||||
else:
|
||||
col, op, one_val_op = key[:-len('__not_in')], 'notin_', '__ne__'
|
||||
if isinstance(constraint, Select):
|
||||
pass
|
||||
elif constraint:
|
||||
if isinstance(constraint, (list, set, tuple)):
|
||||
if len(constraint) == 1:
|
||||
op = one_val_op
|
||||
constraint = next(iter(constraint))
|
||||
elif isinstance(constraint, str):
|
||||
constraint = text(constraint)
|
||||
else:
|
||||
raise ValueError(f"{col} requires a list, set or string as constraint value.")
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
col, op = key, '__eq__'
|
||||
attr = None
|
||||
if '.' in col:
|
||||
table_name, col = col.split('.')
|
||||
_table = None
|
||||
for table in tables:
|
||||
if table.name == table_name.lower():
|
||||
_table = table
|
||||
break
|
||||
if _table is not None:
|
||||
attr = getattr(_table.c, col)
|
||||
else:
|
||||
raise ValueError(f"Table '{table_name}' not available: {', '.join([t.name for t in tables])}.")
|
||||
else:
|
||||
for table in tables:
|
||||
attr = getattr(table.c, col, None)
|
||||
if attr is not None:
|
||||
break
|
||||
if attr is None:
|
||||
raise ValueError(f"Attribute '{col}' not found on tables: {', '.join([t.name for t in tables])}.")
|
||||
clause.append(getattr(attr, op)(constraint))
|
||||
return and_(*clause)
|
|
@ -3,11 +3,15 @@ import typing
|
|||
import logging
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.dht.node import Node
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SQLiteStorage:
|
||||
pass
|
||||
|
||||
|
||||
class BlobAnnouncer:
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
|
||||
self.loop = loop
|
||||
|
|
|
@ -171,7 +171,7 @@ def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDa
|
|||
def make_compact_ip(address: str) -> bytearray:
|
||||
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
|
||||
if len(compact_ip) != 4:
|
||||
raise ValueError(f"invalid IPv4 length")
|
||||
raise ValueError("invalid IPv4 length")
|
||||
return compact_ip
|
||||
|
||||
|
||||
|
@ -180,7 +180,7 @@ def make_compact_address(node_id: bytes, address: str, port: int) -> bytearray:
|
|||
if not 0 < port < 65536:
|
||||
raise ValueError(f'Invalid port: {port}')
|
||||
if len(node_id) != constants.HASH_BITS // 8:
|
||||
raise ValueError(f"invalid node node_id length")
|
||||
raise ValueError("invalid node node_id length")
|
||||
return compact_ip + port.to_bytes(2, 'big') + node_id
|
||||
|
||||
|
||||
|
@ -191,5 +191,5 @@ def decode_compact_address(compact_address: bytes) -> typing.Tuple[bytes, str, i
|
|||
if not 0 < port < 65536:
|
||||
raise ValueError(f'Invalid port: {port}')
|
||||
if len(node_id) != constants.HASH_BITS // 8:
|
||||
raise ValueError(f"invalid node node_id length")
|
||||
raise ValueError("invalid node node_id length")
|
||||
return node_id, address, port
|
||||
|
|
|
@ -81,3 +81,6 @@ Code | Name | Message
|
|||
701 | InvalidExchangeRateResponse | Failed to get exchange rate from {source}: {reason}
|
||||
702 | CurrencyConversion | {message}
|
||||
703 | InvalidCurrency | Invalid currency: {currency} is not a supported currency.
|
||||
**8xx** | Lbrycrd | **Lbrycrd**
|
||||
801 | LbrycrdUnauthorized | Failed to authenticate with lbrycrd. Perhaps wrong username or password?
|
||||
811 | LbrycrdEventSubscription | Lbrycrd is not publishing '{event}' events.
|
||||
|
|
|
@ -398,3 +398,22 @@ class InvalidCurrencyError(CurrencyExchangeError):
|
|||
def __init__(self, currency):
|
||||
self.currency = currency
|
||||
super().__init__(f"Invalid currency: {currency} is not a supported currency.")
|
||||
|
||||
|
||||
class LbrycrdError(BaseError):
|
||||
"""
|
||||
**Lbrycrd**
|
||||
"""
|
||||
|
||||
|
||||
class LbrycrdUnauthorizedError(LbrycrdError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Failed to authenticate with lbrycrd. Perhaps wrong username or password?")
|
||||
|
||||
|
||||
class LbrycrdEventSubscriptionError(LbrycrdError):
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
super().__init__(f"Lbrycrd is not publishing '{event}' events.")
|
||||
|
|
|
@ -63,7 +63,7 @@ class ErrorClass:
|
|||
@staticmethod
|
||||
def get_fields(args):
|
||||
if len(args) > 1:
|
||||
return f''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
|
||||
return ''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
|
||||
return ''
|
||||
|
||||
@staticmethod
|
||||
|
|
|
@ -1,9 +1,17 @@
|
|||
import time
|
||||
import asyncio
|
||||
import threading
|
||||
import logging
|
||||
from queue import Empty
|
||||
from multiprocessing import Queue
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BroadcastSubscription:
|
||||
|
||||
def __init__(self, controller, on_data, on_error, on_done):
|
||||
def __init__(self, controller: 'EventController', on_data, on_error, on_done):
|
||||
self._controller = controller
|
||||
self._previous = self._next = None
|
||||
self._on_data = on_data
|
||||
|
@ -43,10 +51,10 @@ class BroadcastSubscription:
|
|||
self.is_closed = True
|
||||
|
||||
|
||||
class StreamController:
|
||||
class EventController:
|
||||
|
||||
def __init__(self, merge_repeated_events=False):
|
||||
self.stream = Stream(self)
|
||||
self.stream = EventStream(self)
|
||||
self._first_subscription = None
|
||||
self._last_subscription = None
|
||||
self._last_event = None
|
||||
|
@ -61,37 +69,36 @@ class StreamController:
|
|||
next_sub = self._first_subscription
|
||||
while next_sub is not None:
|
||||
subscription = next_sub
|
||||
next_sub = next_sub._next
|
||||
yield subscription
|
||||
next_sub = next_sub._next
|
||||
|
||||
def _notify_and_ensure_future(self, notify):
|
||||
tasks = []
|
||||
for subscription in self._iterate_subscriptions:
|
||||
maybe_coroutine = notify(subscription)
|
||||
if asyncio.iscoroutine(maybe_coroutine):
|
||||
tasks.append(maybe_coroutine)
|
||||
if tasks:
|
||||
return asyncio.ensure_future(asyncio.wait(tasks))
|
||||
else:
|
||||
f = asyncio.get_event_loop().create_future()
|
||||
f.set_result(None)
|
||||
return f
|
||||
async def _notify(self, notify, *args):
|
||||
try:
|
||||
maybe_coroutine = notify(*args)
|
||||
if maybe_coroutine is not None and asyncio.iscoroutine(maybe_coroutine):
|
||||
await maybe_coroutine
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
raise
|
||||
|
||||
def add(self, event):
|
||||
skip = self._merge_repeated and event == self._last_event
|
||||
async def add(self, event):
|
||||
if self._merge_repeated and event == self._last_event:
|
||||
return
|
||||
self._last_event = event
|
||||
return self._notify_and_ensure_future(
|
||||
lambda subscription: None if skip else subscription._add(event)
|
||||
)
|
||||
|
||||
def add_error(self, exception):
|
||||
return self._notify_and_ensure_future(
|
||||
lambda subscription: subscription._add_error(exception)
|
||||
)
|
||||
|
||||
def close(self):
|
||||
for subscription in self._iterate_subscriptions:
|
||||
subscription._close()
|
||||
await self._notify(subscription._add, event)
|
||||
|
||||
async def add_all(self, events):
|
||||
for event in events:
|
||||
await self.add(event)
|
||||
|
||||
async def add_error(self, exception):
|
||||
for subscription in self._iterate_subscriptions:
|
||||
await self._notify(subscription._add_error, exception)
|
||||
|
||||
async def close(self):
|
||||
for subscription in self._iterate_subscriptions:
|
||||
await self._notify(subscription._close)
|
||||
|
||||
def _cancel(self, subscription):
|
||||
previous = subscription._previous
|
||||
|
@ -104,7 +111,6 @@ class StreamController:
|
|||
self._last_subscription = previous
|
||||
else:
|
||||
next_sub._previous = previous
|
||||
subscription._next = subscription._previous = subscription
|
||||
|
||||
def _listen(self, on_data, on_error, on_done):
|
||||
subscription = BroadcastSubscription(self, on_data, on_error, on_done)
|
||||
|
@ -119,16 +125,16 @@ class StreamController:
|
|||
return subscription
|
||||
|
||||
|
||||
class Stream:
|
||||
class EventStream:
|
||||
|
||||
def __init__(self, controller):
|
||||
def __init__(self, controller: EventController):
|
||||
self._controller = controller
|
||||
|
||||
def listen(self, on_data, on_error=None, on_done=None):
|
||||
def listen(self, on_data, on_error=None, on_done=None) -> BroadcastSubscription:
|
||||
return self._controller._listen(on_data, on_error, on_done)
|
||||
|
||||
def where(self, condition) -> asyncio.Future:
|
||||
future = asyncio.get_event_loop().create_future()
|
||||
future = asyncio.get_running_loop().create_future()
|
||||
|
||||
def where_test(value):
|
||||
if condition(value):
|
||||
|
@ -142,14 +148,31 @@ class Stream:
|
|||
return future
|
||||
|
||||
@property
|
||||
def first(self):
|
||||
future = asyncio.get_event_loop().create_future()
|
||||
def first(self) -> asyncio.Future:
|
||||
future = asyncio.get_running_loop().create_future()
|
||||
subscription = self.listen(
|
||||
lambda value: not future.done() and self._cancel_and_callback(subscription, future, value),
|
||||
lambda exception: not future.done() and self._cancel_and_error(subscription, future, exception)
|
||||
)
|
||||
return future
|
||||
|
||||
@property
|
||||
def last(self) -> asyncio.Future:
|
||||
future = asyncio.get_running_loop().create_future()
|
||||
value = None
|
||||
|
||||
def update_value(_value):
|
||||
nonlocal value
|
||||
value = _value
|
||||
|
||||
subscription = self.listen(
|
||||
update_value,
|
||||
lambda exception: not future.done() and self._cancel_and_error(subscription, future, exception),
|
||||
lambda: not future.done() and self._cancel_and_callback(subscription, future, value),
|
||||
)
|
||||
|
||||
return future
|
||||
|
||||
@staticmethod
|
||||
def _cancel_and_callback(subscription: BroadcastSubscription, future: asyncio.Future, value):
|
||||
subscription.cancel()
|
||||
|
@ -159,3 +182,66 @@ class Stream:
|
|||
def _cancel_and_error(subscription: BroadcastSubscription, future: asyncio.Future, exception):
|
||||
subscription.cancel()
|
||||
future.set_exception(exception)
|
||||
|
||||
|
||||
class EventQueuePublisher(threading.Thread):
|
||||
|
||||
STOP = 'STOP'
|
||||
|
||||
def __init__(self, queue: Queue, event_controller: EventController):
|
||||
super().__init__()
|
||||
self.queue = queue
|
||||
self.event_controller = event_controller
|
||||
self.loop = None
|
||||
|
||||
@staticmethod
|
||||
def message_to_event(message):
|
||||
return message
|
||||
|
||||
def start(self):
|
||||
self.loop = asyncio.get_running_loop()
|
||||
super().start()
|
||||
|
||||
def run(self):
|
||||
queue_get_timeout = 0.2
|
||||
buffer_drain_size = 100
|
||||
buffer_drain_timeout = 0.1
|
||||
|
||||
buffer = []
|
||||
last_drained_ms_ago = time.perf_counter()
|
||||
while True:
|
||||
|
||||
try:
|
||||
msg = self.queue.get(timeout=queue_get_timeout)
|
||||
if msg != self.STOP:
|
||||
buffer.append(msg)
|
||||
except Empty:
|
||||
msg = None
|
||||
|
||||
drain = any((
|
||||
len(buffer) >= buffer_drain_size,
|
||||
(time.perf_counter() - last_drained_ms_ago) >= buffer_drain_timeout,
|
||||
msg == self.STOP
|
||||
))
|
||||
if drain and buffer:
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self.event_controller.add_all([
|
||||
self.message_to_event(msg) for msg in buffer
|
||||
]), self.loop
|
||||
)
|
||||
buffer.clear()
|
||||
last_drained_ms_ago = time.perf_counter()
|
||||
|
||||
if msg == self.STOP:
|
||||
return
|
||||
|
||||
def stop(self):
|
||||
self.queue.put(self.STOP)
|
||||
if self.is_alive():
|
||||
self.join()
|
||||
|
||||
def __enter__(self):
|
||||
self.start()
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.stop()
|
|
@ -1,233 +0,0 @@
|
|||
import asyncio
|
||||
import collections
|
||||
import logging
|
||||
import typing
|
||||
import aiohttp
|
||||
from lbry import utils
|
||||
from lbry.conf import Config
|
||||
from lbry.extras import system_info
|
||||
|
||||
ANALYTICS_ENDPOINT = 'https://api.segment.io/v1'
|
||||
ANALYTICS_TOKEN = 'Ax5LZzR1o3q3Z3WjATASDwR5rKyHH0qOIRIbLmMXn2H='
|
||||
|
||||
# Things We Track
|
||||
SERVER_STARTUP = 'Server Startup'
|
||||
SERVER_STARTUP_SUCCESS = 'Server Startup Success'
|
||||
SERVER_STARTUP_ERROR = 'Server Startup Error'
|
||||
DOWNLOAD_STARTED = 'Download Started'
|
||||
DOWNLOAD_ERRORED = 'Download Errored'
|
||||
DOWNLOAD_FINISHED = 'Download Finished'
|
||||
HEARTBEAT = 'Heartbeat'
|
||||
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
|
||||
NEW_CHANNEL = 'New Channel'
|
||||
CREDITS_SENT = 'Credits Sent'
|
||||
UPNP_SETUP = "UPnP Setup"
|
||||
|
||||
BLOB_BYTES_UPLOADED = 'Blob Bytes Uploaded'
|
||||
|
||||
|
||||
TIME_TO_FIRST_BYTES = "Time To First Bytes"
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _event_properties(installation_id: str, session_id: str,
|
||||
event_properties: typing.Optional[typing.Dict]) -> typing.Dict:
|
||||
properties = {
|
||||
'lbry_id': installation_id,
|
||||
'session_id': session_id,
|
||||
}
|
||||
properties.update(event_properties or {})
|
||||
return properties
|
||||
|
||||
|
||||
def _download_properties(conf: Config, external_ip: str, resolve_duration: float,
|
||||
total_duration: typing.Optional[float], download_id: str, name: str,
|
||||
outpoint: str, active_peer_count: typing.Optional[int],
|
||||
tried_peers_count: typing.Optional[int], connection_failures_count: typing.Optional[int],
|
||||
added_fixed_peers: bool, fixed_peer_delay: float, sd_hash: str,
|
||||
sd_download_duration: typing.Optional[float] = None,
|
||||
head_blob_hash: typing.Optional[str] = None,
|
||||
head_blob_length: typing.Optional[int] = None,
|
||||
head_blob_download_duration: typing.Optional[float] = None,
|
||||
error: typing.Optional[str] = None, error_msg: typing.Optional[str] = None,
|
||||
wallet_server: typing.Optional[str] = None) -> typing.Dict:
|
||||
return {
|
||||
"external_ip": external_ip,
|
||||
"download_id": download_id,
|
||||
"total_duration": round(total_duration, 4),
|
||||
"resolve_duration": None if not resolve_duration else round(resolve_duration, 4),
|
||||
"error": error,
|
||||
"error_message": error_msg,
|
||||
'name': name,
|
||||
"outpoint": outpoint,
|
||||
|
||||
"node_rpc_timeout": conf.node_rpc_timeout,
|
||||
"peer_connect_timeout": conf.peer_connect_timeout,
|
||||
"blob_download_timeout": conf.blob_download_timeout,
|
||||
"use_fixed_peers": len(conf.reflector_servers) > 0,
|
||||
"fixed_peer_delay": fixed_peer_delay,
|
||||
"added_fixed_peers": added_fixed_peers,
|
||||
"active_peer_count": active_peer_count,
|
||||
"tried_peers_count": tried_peers_count,
|
||||
|
||||
"sd_blob_hash": sd_hash,
|
||||
"sd_blob_duration": None if not sd_download_duration else round(sd_download_duration, 4),
|
||||
|
||||
"head_blob_hash": head_blob_hash,
|
||||
"head_blob_length": head_blob_length,
|
||||
"head_blob_duration": None if not head_blob_download_duration else round(head_blob_download_duration, 4),
|
||||
|
||||
"connection_failures_count": connection_failures_count,
|
||||
"wallet_server": wallet_server
|
||||
}
|
||||
|
||||
|
||||
def _make_context(platform):
|
||||
# see https://segment.com/docs/spec/common/#context
|
||||
# they say they'll ignore fields outside the spec, but evidently they don't
|
||||
context = {
|
||||
'app': {
|
||||
'version': platform['lbrynet_version'],
|
||||
'build': platform['build'],
|
||||
},
|
||||
# TODO: expand os info to give linux/osx specific info
|
||||
'os': {
|
||||
'name': platform['os_system'],
|
||||
'version': platform['os_release']
|
||||
},
|
||||
}
|
||||
if 'desktop' in platform and 'distro' in platform:
|
||||
context['os']['desktop'] = platform['desktop']
|
||||
context['os']['distro'] = platform['distro']
|
||||
return context
|
||||
|
||||
|
||||
class AnalyticsManager:
|
||||
def __init__(self, conf: Config, installation_id: str, session_id: str):
|
||||
self.conf = conf
|
||||
self.cookies = {}
|
||||
self.url = ANALYTICS_ENDPOINT
|
||||
self._write_key = utils.deobfuscate(ANALYTICS_TOKEN)
|
||||
self._tracked_data = collections.defaultdict(list)
|
||||
self.context = _make_context(system_info.get_platform())
|
||||
self.installation_id = installation_id
|
||||
self.session_id = session_id
|
||||
self.task: typing.Optional[asyncio.Task] = None
|
||||
self.external_ip: typing.Optional[str] = None
|
||||
|
||||
@property
|
||||
def enabled(self):
|
||||
return self.conf.share_usage_data
|
||||
|
||||
@property
|
||||
def is_started(self):
|
||||
return self.task is not None
|
||||
|
||||
async def start(self):
|
||||
if self.task is None:
|
||||
self.task = asyncio.create_task(self.run())
|
||||
|
||||
async def run(self):
|
||||
while True:
|
||||
if self.enabled:
|
||||
self.external_ip = await utils.get_external_ip()
|
||||
await self._send_heartbeat()
|
||||
await asyncio.sleep(1800)
|
||||
|
||||
def stop(self):
|
||||
if self.task is not None and not self.task.done():
|
||||
self.task.cancel()
|
||||
|
||||
async def _post(self, data: typing.Dict):
|
||||
request_kwargs = {
|
||||
'method': 'POST',
|
||||
'url': self.url + '/track',
|
||||
'headers': {'Connection': 'Close'},
|
||||
'auth': aiohttp.BasicAuth(self._write_key, ''),
|
||||
'json': data,
|
||||
'cookies': self.cookies
|
||||
}
|
||||
try:
|
||||
async with utils.aiohttp_request(**request_kwargs) as response:
|
||||
self.cookies.update(response.cookies)
|
||||
except Exception as e:
|
||||
log.debug('Encountered an exception while POSTing to %s: ', self.url + '/track', exc_info=e)
|
||||
|
||||
async def track(self, event: typing.Dict):
|
||||
"""Send a single tracking event"""
|
||||
if self.enabled:
|
||||
log.debug('Sending track event: %s', event)
|
||||
await self._post(event)
|
||||
|
||||
async def send_upnp_setup_success_fail(self, success, status):
|
||||
await self.track(
|
||||
self._event(UPNP_SETUP, {
|
||||
'success': success,
|
||||
'status': status,
|
||||
})
|
||||
)
|
||||
|
||||
async def send_server_startup(self):
|
||||
await self.track(self._event(SERVER_STARTUP))
|
||||
|
||||
async def send_server_startup_success(self):
|
||||
await self.track(self._event(SERVER_STARTUP_SUCCESS))
|
||||
|
||||
async def send_server_startup_error(self, message):
|
||||
await self.track(self._event(SERVER_STARTUP_ERROR, {'message': message}))
|
||||
|
||||
async def send_time_to_first_bytes(self, resolve_duration: typing.Optional[float],
|
||||
total_duration: typing.Optional[float], download_id: str,
|
||||
name: str, outpoint: typing.Optional[str],
|
||||
found_peers_count: typing.Optional[int],
|
||||
tried_peers_count: typing.Optional[int],
|
||||
connection_failures_count: typing.Optional[int],
|
||||
added_fixed_peers: bool,
|
||||
fixed_peers_delay: float, sd_hash: str,
|
||||
sd_download_duration: typing.Optional[float] = None,
|
||||
head_blob_hash: typing.Optional[str] = None,
|
||||
head_blob_length: typing.Optional[int] = None,
|
||||
head_blob_duration: typing.Optional[int] = None,
|
||||
error: typing.Optional[str] = None,
|
||||
error_msg: typing.Optional[str] = None,
|
||||
wallet_server: typing.Optional[str] = None):
|
||||
await self.track(self._event(TIME_TO_FIRST_BYTES, _download_properties(
|
||||
self.conf, self.external_ip, resolve_duration, total_duration, download_id, name, outpoint,
|
||||
found_peers_count, tried_peers_count, connection_failures_count, added_fixed_peers, fixed_peers_delay,
|
||||
sd_hash, sd_download_duration, head_blob_hash, head_blob_length, head_blob_duration, error, error_msg,
|
||||
wallet_server
|
||||
)))
|
||||
|
||||
async def send_download_finished(self, download_id, name, sd_hash):
|
||||
await self.track(
|
||||
self._event(
|
||||
DOWNLOAD_FINISHED, {
|
||||
'download_id': download_id,
|
||||
'name': name,
|
||||
'stream_info': sd_hash
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
async def send_claim_action(self, action):
|
||||
await self.track(self._event(CLAIM_ACTION, {'action': action}))
|
||||
|
||||
async def send_new_channel(self):
|
||||
await self.track(self._event(NEW_CHANNEL))
|
||||
|
||||
async def send_credits_sent(self):
|
||||
await self.track(self._event(CREDITS_SENT))
|
||||
|
||||
async def _send_heartbeat(self):
|
||||
await self.track(self._event(HEARTBEAT))
|
||||
|
||||
def _event(self, event, properties: typing.Optional[typing.Dict] = None):
|
||||
return {
|
||||
'userId': 'lbry',
|
||||
'event': event,
|
||||
'properties': _event_properties(self.installation_id, self.session_id, properties),
|
||||
'context': self.context,
|
||||
'timestamp': utils.isonow()
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
from lbry.conf import Config
|
||||
from lbry.extras.cli import execute_command
|
||||
|
||||
|
||||
def daemon_rpc(conf: Config, method: str, **kwargs):
|
||||
return execute_command(conf, method, kwargs, callback=lambda data: data)
|
|
@ -1,66 +0,0 @@
|
|||
import logging
|
||||
import time
|
||||
import hashlib
|
||||
import binascii
|
||||
|
||||
import ecdsa
|
||||
from lbry import utils
|
||||
from lbry.crypto.hash import sha256
|
||||
from lbry.wallet.transaction import Output
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_encoded_signature(signature):
|
||||
signature = signature.encode() if isinstance(signature, str) else signature
|
||||
r = int(signature[:int(len(signature) / 2)], 16)
|
||||
s = int(signature[int(len(signature) / 2):], 16)
|
||||
return ecdsa.util.sigencode_der(r, s, len(signature) * 4)
|
||||
|
||||
|
||||
def cid2hash(claim_id: str) -> bytes:
|
||||
return binascii.unhexlify(claim_id.encode())[::-1]
|
||||
|
||||
|
||||
def is_comment_signed_by_channel(comment: dict, channel: Output, abandon=False):
|
||||
if isinstance(channel, Output):
|
||||
try:
|
||||
signing_field = comment['comment_id'] if abandon else comment['comment']
|
||||
pieces = [
|
||||
comment['signing_ts'].encode(),
|
||||
cid2hash(comment['channel_id']),
|
||||
signing_field.encode()
|
||||
]
|
||||
return Output.is_signature_valid(
|
||||
get_encoded_signature(comment['signature']),
|
||||
sha256(b''.join(pieces)),
|
||||
channel.claim.channel.public_key_bytes
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
def sign_comment(comment: dict, channel: Output, abandon=False):
|
||||
timestamp = str(int(time.time()))
|
||||
signing_field = comment['comment_id'] if abandon else comment['comment']
|
||||
pieces = [timestamp.encode(), channel.claim_hash, signing_field.encode()]
|
||||
digest = sha256(b''.join(pieces))
|
||||
signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
|
||||
comment.update({
|
||||
'signature': binascii.hexlify(signature).decode(),
|
||||
'signing_ts': timestamp
|
||||
})
|
||||
|
||||
|
||||
async def jsonrpc_post(url: str, method: str, params: dict = None, **kwargs) -> any:
|
||||
params = params or {}
|
||||
params.update(kwargs)
|
||||
json_body = {'jsonrpc': '2.0', 'id': None, 'method': method, 'params': params}
|
||||
async with utils.aiohttp_request('POST', url, json=json_body) as response:
|
||||
try:
|
||||
result = await response.json()
|
||||
return result['result'] if 'result' in result else result
|
||||
except Exception as cte:
|
||||
log.exception('Unable to decode response from server: %s', cte)
|
||||
return await response.text()
|
|
@ -1,75 +0,0 @@
|
|||
import asyncio
|
||||
import logging
|
||||
from lbry.conf import Config
|
||||
from lbry.extras.daemon.componentmanager import ComponentManager
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ComponentType(type):
|
||||
def __new__(mcs, name, bases, newattrs):
|
||||
klass = type.__new__(mcs, name, bases, newattrs)
|
||||
if name != "Component" and newattrs['__module__'] != 'lbry.testcase':
|
||||
ComponentManager.default_component_classes[klass.component_name] = klass
|
||||
return klass
|
||||
|
||||
|
||||
class Component(metaclass=ComponentType):
|
||||
"""
|
||||
lbry-daemon component helper
|
||||
|
||||
Inheriting classes will be automatically registered with the ComponentManager and must implement setup and stop
|
||||
methods
|
||||
"""
|
||||
|
||||
depends_on = []
|
||||
component_name = None
|
||||
|
||||
def __init__(self, component_manager):
|
||||
self.conf: Config = component_manager.conf
|
||||
self.component_manager = component_manager
|
||||
self._running = False
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.component_name < other.component_name
|
||||
|
||||
@property
|
||||
def running(self):
|
||||
return self._running
|
||||
|
||||
async def get_status(self):
|
||||
return
|
||||
|
||||
async def start(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
async def stop(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def component(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
async def _setup(self):
|
||||
try:
|
||||
result = await self.start()
|
||||
self._running = True
|
||||
return result
|
||||
except asyncio.CancelledError:
|
||||
log.info("Cancelled setup of %s component", self.__class__.__name__)
|
||||
raise
|
||||
except Exception as err:
|
||||
log.exception("Error setting up %s", self.component_name or self.__class__.__name__)
|
||||
raise err
|
||||
|
||||
async def _stop(self):
|
||||
try:
|
||||
result = await self.stop()
|
||||
self._running = False
|
||||
return result
|
||||
except asyncio.CancelledError:
|
||||
log.info("Cancelled stop of %s component", self.__class__.__name__)
|
||||
raise
|
||||
except Exception as err:
|
||||
log.exception("Error stopping %s", self.__class__.__name__)
|
||||
raise err
|
|
@ -1,171 +0,0 @@
|
|||
import logging
|
||||
import asyncio
|
||||
from lbry.conf import Config
|
||||
from lbry.error import ComponentStartConditionNotMetError
|
||||
from lbry.dht.peer import PeerManager
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RegisteredConditions:
|
||||
conditions = {}
|
||||
|
||||
|
||||
class RequiredConditionType(type):
|
||||
def __new__(mcs, name, bases, newattrs):
|
||||
klass = type.__new__(mcs, name, bases, newattrs)
|
||||
if name != "RequiredCondition":
|
||||
if klass.name in RegisteredConditions.conditions:
|
||||
raise SyntaxError("already have a component registered for \"%s\"" % klass.name)
|
||||
RegisteredConditions.conditions[klass.name] = klass
|
||||
return klass
|
||||
|
||||
|
||||
class RequiredCondition(metaclass=RequiredConditionType):
|
||||
name = ""
|
||||
component = ""
|
||||
message = ""
|
||||
|
||||
@staticmethod
|
||||
def evaluate(component):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class ComponentManager:
|
||||
default_component_classes = {}
|
||||
|
||||
def __init__(self, conf: Config, analytics_manager=None, skip_components=None,
|
||||
peer_manager=None, **override_components):
|
||||
self.conf = conf
|
||||
self.skip_components = skip_components or []
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.analytics_manager = analytics_manager
|
||||
self.component_classes = {}
|
||||
self.components = set()
|
||||
self.started = asyncio.Event(loop=self.loop)
|
||||
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
|
||||
|
||||
for component_name, component_class in self.default_component_classes.items():
|
||||
if component_name in override_components:
|
||||
component_class = override_components.pop(component_name)
|
||||
if component_name not in self.skip_components:
|
||||
self.component_classes[component_name] = component_class
|
||||
|
||||
if override_components:
|
||||
raise SyntaxError("unexpected components: %s" % override_components)
|
||||
|
||||
for component_class in self.component_classes.values():
|
||||
self.components.add(component_class(self))
|
||||
|
||||
def evaluate_condition(self, condition_name):
|
||||
if condition_name not in RegisteredConditions.conditions:
|
||||
raise NameError(condition_name)
|
||||
condition = RegisteredConditions.conditions[condition_name]
|
||||
try:
|
||||
component = self.get_component(condition.component)
|
||||
result = condition.evaluate(component)
|
||||
except Exception:
|
||||
log.exception('failed to evaluate condition:')
|
||||
result = False
|
||||
return result, "" if result else condition.message
|
||||
|
||||
def sort_components(self, reverse=False):
|
||||
"""
|
||||
Sort components by requirements
|
||||
"""
|
||||
steps = []
|
||||
staged = set()
|
||||
components = set(self.components)
|
||||
|
||||
# components with no requirements
|
||||
step = []
|
||||
for component in set(components):
|
||||
if not component.depends_on:
|
||||
step.append(component)
|
||||
staged.add(component.component_name)
|
||||
components.remove(component)
|
||||
|
||||
if step:
|
||||
step.sort()
|
||||
steps.append(step)
|
||||
|
||||
while components:
|
||||
step = []
|
||||
to_stage = set()
|
||||
for component in set(components):
|
||||
reqs_met = 0
|
||||
for needed in component.depends_on:
|
||||
if needed in staged:
|
||||
reqs_met += 1
|
||||
if reqs_met == len(component.depends_on):
|
||||
step.append(component)
|
||||
to_stage.add(component.component_name)
|
||||
components.remove(component)
|
||||
if step:
|
||||
step.sort()
|
||||
staged.update(to_stage)
|
||||
steps.append(step)
|
||||
elif components:
|
||||
raise ComponentStartConditionNotMetError(components)
|
||||
if reverse:
|
||||
steps.reverse()
|
||||
return steps
|
||||
|
||||
async def start(self):
|
||||
""" Start Components in sequence sorted by requirements """
|
||||
for stage in self.sort_components():
|
||||
needing_start = [
|
||||
component._setup() for component in stage if not component.running
|
||||
]
|
||||
if needing_start:
|
||||
await asyncio.wait(needing_start)
|
||||
self.started.set()
|
||||
|
||||
async def stop(self):
|
||||
"""
|
||||
Stop Components in reversed startup order
|
||||
"""
|
||||
stages = self.sort_components(reverse=True)
|
||||
for stage in stages:
|
||||
needing_stop = [
|
||||
component._stop() for component in stage if component.running
|
||||
]
|
||||
if needing_stop:
|
||||
await asyncio.wait(needing_stop)
|
||||
|
||||
def all_components_running(self, *component_names):
|
||||
"""
|
||||
Check if components are running
|
||||
|
||||
:return: (bool) True if all specified components are running
|
||||
"""
|
||||
components = {component.component_name: component for component in self.components}
|
||||
for component in component_names:
|
||||
if component not in components:
|
||||
raise NameError("%s is not a known Component" % component)
|
||||
if not components[component].running:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_components_status(self):
|
||||
"""
|
||||
List status of all the components, whether they are running or not
|
||||
|
||||
:return: (dict) {(str) component_name: (bool) True is running else False}
|
||||
"""
|
||||
return {
|
||||
component.component_name: component.running
|
||||
for component in self.components
|
||||
}
|
||||
|
||||
def get_actual_component(self, component_name):
|
||||
for component in self.components:
|
||||
if component.component_name == component_name:
|
||||
return component
|
||||
raise NameError(component_name)
|
||||
|
||||
def get_component(self, component_name):
|
||||
return self.get_actual_component(component_name).component
|
||||
|
||||
def has_component(self, component_name):
|
||||
return any(component for component in self.components if component_name == component.component_name)
|
|
@ -1,553 +0,0 @@
|
|||
import math
|
||||
import os
|
||||
import asyncio
|
||||
import logging
|
||||
import binascii
|
||||
import typing
|
||||
import base58
|
||||
|
||||
from aioupnp import __version__ as aioupnp_version
|
||||
from aioupnp.upnp import UPnP
|
||||
from aioupnp.fault import UPnPError
|
||||
|
||||
from lbry import utils
|
||||
from lbry.dht.node import Node
|
||||
from lbry.dht.peer import is_valid_public_ipv4
|
||||
from lbry.dht.blob_announcer import BlobAnnouncer
|
||||
from lbry.blob.blob_manager import BlobManager
|
||||
from lbry.blob_exchange.server import BlobServer
|
||||
from lbry.stream.stream_manager import StreamManager
|
||||
from lbry.extras.daemon.component import Component
|
||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
from lbry.wallet import WalletManager
|
||||
from lbry.wallet.usage_payment import WalletServerPayer
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# settings must be initialized before this file is imported
|
||||
|
||||
DATABASE_COMPONENT = "database"
|
||||
BLOB_COMPONENT = "blob_manager"
|
||||
WALLET_COMPONENT = "wallet"
|
||||
WALLET_SERVER_PAYMENTS_COMPONENT = "wallet_server_payments"
|
||||
DHT_COMPONENT = "dht"
|
||||
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
|
||||
STREAM_MANAGER_COMPONENT = "stream_manager"
|
||||
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
|
||||
UPNP_COMPONENT = "upnp"
|
||||
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
|
||||
|
||||
|
||||
class DatabaseComponent(Component):
|
||||
component_name = DATABASE_COMPONENT
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.storage = None
|
||||
|
||||
@property
|
||||
def component(self):
|
||||
return self.storage
|
||||
|
||||
@staticmethod
|
||||
def get_current_db_revision():
|
||||
return 14
|
||||
|
||||
@property
|
||||
def revision_filename(self):
|
||||
return os.path.join(self.conf.data_dir, 'db_revision')
|
||||
|
||||
def _write_db_revision_file(self, version_num):
|
||||
with open(self.revision_filename, mode='w') as db_revision:
|
||||
db_revision.write(str(version_num))
|
||||
|
||||
async def start(self):
|
||||
# check directories exist, create them if they don't
|
||||
log.info("Loading databases")
|
||||
|
||||
if not os.path.exists(self.revision_filename):
|
||||
log.info("db_revision file not found. Creating it")
|
||||
self._write_db_revision_file(self.get_current_db_revision())
|
||||
|
||||
# check the db migration and run any needed migrations
|
||||
with open(self.revision_filename, "r") as revision_read_handle:
|
||||
old_revision = int(revision_read_handle.read().strip())
|
||||
|
||||
if old_revision > self.get_current_db_revision():
|
||||
raise Exception('This version of lbrynet is not compatible with the database\n'
|
||||
'Your database is revision %i, expected %i' %
|
||||
(old_revision, self.get_current_db_revision()))
|
||||
if old_revision < self.get_current_db_revision():
|
||||
from lbry.extras.daemon.migrator import dbmigrator # pylint: disable=import-outside-toplevel
|
||||
log.info("Upgrading your databases (revision %i to %i)", old_revision, self.get_current_db_revision())
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
None, dbmigrator.migrate_db, self.conf, old_revision, self.get_current_db_revision()
|
||||
)
|
||||
self._write_db_revision_file(self.get_current_db_revision())
|
||||
log.info("Finished upgrading the databases.")
|
||||
|
||||
self.storage = SQLiteStorage(
|
||||
self.conf, os.path.join(self.conf.data_dir, "lbrynet.sqlite")
|
||||
)
|
||||
await self.storage.open()
|
||||
|
||||
async def stop(self):
|
||||
await self.storage.close()
|
||||
self.storage = None
|
||||
|
||||
|
||||
class WalletComponent(Component):
|
||||
component_name = WALLET_COMPONENT
|
||||
depends_on = [DATABASE_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.wallet_manager = None
|
||||
|
||||
@property
|
||||
def component(self):
|
||||
return self.wallet_manager
|
||||
|
||||
async def get_status(self):
|
||||
if self.wallet_manager is None:
|
||||
return
|
||||
session_pool = self.wallet_manager.ledger.network.session_pool
|
||||
sessions = session_pool.sessions
|
||||
connected = None
|
||||
if self.wallet_manager.ledger.network.client:
|
||||
addr_and_port = self.wallet_manager.ledger.network.client.server_address_and_port
|
||||
if addr_and_port:
|
||||
connected = f"{addr_and_port[0]}:{addr_and_port[1]}"
|
||||
result = {
|
||||
'connected': connected,
|
||||
'connected_features': self.wallet_manager.ledger.network.server_features,
|
||||
'servers': [
|
||||
{
|
||||
'host': session.server[0],
|
||||
'port': session.server[1],
|
||||
'latency': session.connection_latency,
|
||||
'availability': session.available,
|
||||
} for session in sessions
|
||||
],
|
||||
'known_servers': len(sessions),
|
||||
'available_servers': len(list(session_pool.available_sessions))
|
||||
}
|
||||
|
||||
if self.wallet_manager.ledger.network.remote_height:
|
||||
local_height = self.wallet_manager.ledger.local_height_including_downloaded_height
|
||||
disk_height = len(self.wallet_manager.ledger.headers)
|
||||
remote_height = self.wallet_manager.ledger.network.remote_height
|
||||
download_height, target_height = local_height - disk_height, remote_height - disk_height
|
||||
if target_height > 0:
|
||||
progress = min(max(math.ceil(float(download_height) / float(target_height) * 100), 0), 100)
|
||||
else:
|
||||
progress = 100
|
||||
best_hash = await self.wallet_manager.get_best_blockhash()
|
||||
result.update({
|
||||
'headers_synchronization_progress': progress,
|
||||
'blocks': max(local_height, 0),
|
||||
'blocks_behind': max(remote_height - local_height, 0),
|
||||
'best_blockhash': best_hash,
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
async def start(self):
|
||||
log.info("Starting wallet")
|
||||
self.wallet_manager = await WalletManager.from_lbrynet_config(self.conf)
|
||||
await self.wallet_manager.start()
|
||||
|
||||
async def stop(self):
|
||||
await self.wallet_manager.stop()
|
||||
self.wallet_manager = None
|
||||
|
||||
|
||||
class WalletServerPaymentsComponent(Component):
|
||||
component_name = WALLET_SERVER_PAYMENTS_COMPONENT
|
||||
depends_on = [WALLET_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.usage_payment_service = WalletServerPayer(
|
||||
max_fee=self.conf.max_wallet_server_fee, analytics_manager=self.component_manager.analytics_manager,
|
||||
)
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[WalletServerPayer]:
|
||||
return self.usage_payment_service
|
||||
|
||||
async def start(self):
|
||||
wallet_manager = self.component_manager.get_component(WALLET_COMPONENT)
|
||||
await self.usage_payment_service.start(wallet_manager.ledger, wallet_manager.default_wallet)
|
||||
|
||||
async def stop(self):
|
||||
await self.usage_payment_service.stop()
|
||||
|
||||
async def get_status(self):
|
||||
return {
|
||||
'max_fee': self.usage_payment_service.max_fee,
|
||||
'running': self.usage_payment_service.running
|
||||
}
|
||||
|
||||
|
||||
class BlobComponent(Component):
|
||||
component_name = BLOB_COMPONENT
|
||||
depends_on = [DATABASE_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.blob_manager: typing.Optional[BlobManager] = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[BlobManager]:
|
||||
return self.blob_manager
|
||||
|
||||
async def start(self):
|
||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
data_store = None
|
||||
if DHT_COMPONENT not in self.component_manager.skip_components:
|
||||
dht_node: Node = self.component_manager.get_component(DHT_COMPONENT)
|
||||
if dht_node:
|
||||
data_store = dht_node.protocol.data_store
|
||||
blob_dir = os.path.join(self.conf.data_dir, 'blobfiles')
|
||||
if not os.path.isdir(blob_dir):
|
||||
os.mkdir(blob_dir)
|
||||
self.blob_manager = BlobManager(self.component_manager.loop, blob_dir, storage, self.conf, data_store)
|
||||
return await self.blob_manager.setup()
|
||||
|
||||
async def stop(self):
|
||||
self.blob_manager.stop()
|
||||
|
||||
async def get_status(self):
|
||||
count = 0
|
||||
if self.blob_manager:
|
||||
count = len(self.blob_manager.completed_blob_hashes)
|
||||
return {
|
||||
'finished_blobs': count,
|
||||
'connections': {} if not self.blob_manager else self.blob_manager.connection_manager.status
|
||||
}
|
||||
|
||||
|
||||
class DHTComponent(Component):
|
||||
component_name = DHT_COMPONENT
|
||||
depends_on = [UPNP_COMPONENT, DATABASE_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.dht_node: typing.Optional[Node] = None
|
||||
self.external_udp_port = None
|
||||
self.external_peer_port = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[Node]:
|
||||
return self.dht_node
|
||||
|
||||
async def get_status(self):
|
||||
return {
|
||||
'node_id': None if not self.dht_node else binascii.hexlify(self.dht_node.protocol.node_id),
|
||||
'peers_in_routing_table': 0 if not self.dht_node else len(self.dht_node.protocol.routing_table.get_peers())
|
||||
}
|
||||
|
||||
def get_node_id(self):
|
||||
node_id_filename = os.path.join(self.conf.data_dir, "node_id")
|
||||
if os.path.isfile(node_id_filename):
|
||||
with open(node_id_filename, "r") as node_id_file:
|
||||
return base58.b58decode(str(node_id_file.read()).strip())
|
||||
node_id = utils.generate_id()
|
||||
with open(node_id_filename, "w") as node_id_file:
|
||||
node_id_file.write(base58.b58encode(node_id).decode())
|
||||
return node_id
|
||||
|
||||
async def start(self):
|
||||
log.info("start the dht")
|
||||
upnp_component = self.component_manager.get_component(UPNP_COMPONENT)
|
||||
self.external_peer_port = upnp_component.upnp_redirects.get("TCP", self.conf.tcp_port)
|
||||
self.external_udp_port = upnp_component.upnp_redirects.get("UDP", self.conf.udp_port)
|
||||
external_ip = upnp_component.external_ip
|
||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
if not external_ip:
|
||||
external_ip = await utils.get_external_ip()
|
||||
if not external_ip:
|
||||
log.warning("failed to get external ip")
|
||||
|
||||
self.dht_node = Node(
|
||||
self.component_manager.loop,
|
||||
self.component_manager.peer_manager,
|
||||
node_id=self.get_node_id(),
|
||||
internal_udp_port=self.conf.udp_port,
|
||||
udp_port=self.external_udp_port,
|
||||
external_ip=external_ip,
|
||||
peer_port=self.external_peer_port,
|
||||
rpc_timeout=self.conf.node_rpc_timeout,
|
||||
split_buckets_under_index=self.conf.split_buckets_under_index,
|
||||
storage=storage
|
||||
)
|
||||
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
|
||||
log.info("Started the dht")
|
||||
|
||||
async def stop(self):
|
||||
self.dht_node.stop()
|
||||
|
||||
|
||||
class HashAnnouncerComponent(Component):
|
||||
component_name = HASH_ANNOUNCER_COMPONENT
|
||||
depends_on = [DHT_COMPONENT, DATABASE_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.hash_announcer: typing.Optional[BlobAnnouncer] = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[BlobAnnouncer]:
|
||||
return self.hash_announcer
|
||||
|
||||
async def start(self):
|
||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
dht_node = self.component_manager.get_component(DHT_COMPONENT)
|
||||
self.hash_announcer = BlobAnnouncer(self.component_manager.loop, dht_node, storage)
|
||||
self.hash_announcer.start(self.conf.concurrent_blob_announcers)
|
||||
log.info("Started blob announcer")
|
||||
|
||||
async def stop(self):
|
||||
self.hash_announcer.stop()
|
||||
log.info("Stopped blob announcer")
|
||||
|
||||
async def get_status(self):
|
||||
return {
|
||||
'announce_queue_size': 0 if not self.hash_announcer else len(self.hash_announcer.announce_queue)
|
||||
}
|
||||
|
||||
|
||||
class StreamManagerComponent(Component):
|
||||
component_name = STREAM_MANAGER_COMPONENT
|
||||
depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.stream_manager: typing.Optional[StreamManager] = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[StreamManager]:
|
||||
return self.stream_manager
|
||||
|
||||
async def get_status(self):
|
||||
if not self.stream_manager:
|
||||
return
|
||||
return {
|
||||
'managed_files': len(self.stream_manager.streams),
|
||||
}
|
||||
|
||||
async def start(self):
|
||||
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
||||
wallet = self.component_manager.get_component(WALLET_COMPONENT)
|
||||
node = self.component_manager.get_component(DHT_COMPONENT) \
|
||||
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||
log.info('Starting the file manager')
|
||||
loop = asyncio.get_event_loop()
|
||||
self.stream_manager = StreamManager(
|
||||
loop, self.conf, blob_manager, wallet, storage, node, self.component_manager.analytics_manager
|
||||
)
|
||||
await self.stream_manager.start()
|
||||
log.info('Done setting up file manager')
|
||||
|
||||
async def stop(self):
|
||||
self.stream_manager.stop()
|
||||
|
||||
|
||||
class PeerProtocolServerComponent(Component):
|
||||
component_name = PEER_PROTOCOL_SERVER_COMPONENT
|
||||
depends_on = [UPNP_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.blob_server: typing.Optional[BlobServer] = None
|
||||
|
||||
@property
|
||||
def component(self) -> typing.Optional[BlobServer]:
|
||||
return self.blob_server
|
||||
|
||||
async def start(self):
|
||||
log.info("start blob server")
|
||||
blob_manager: BlobManager = self.component_manager.get_component(BLOB_COMPONENT)
|
||||
wallet: WalletManager = self.component_manager.get_component(WALLET_COMPONENT)
|
||||
peer_port = self.conf.tcp_port
|
||||
address = await wallet.get_unused_address()
|
||||
self.blob_server = BlobServer(asyncio.get_event_loop(), blob_manager, address)
|
||||
self.blob_server.start_server(peer_port, interface=self.conf.network_interface)
|
||||
await self.blob_server.started_listening.wait()
|
||||
|
||||
async def stop(self):
|
||||
if self.blob_server:
|
||||
self.blob_server.stop_server()
|
||||
|
||||
|
||||
class UPnPComponent(Component):
|
||||
component_name = UPNP_COMPONENT
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self._int_peer_port = self.conf.tcp_port
|
||||
self._int_dht_node_port = self.conf.udp_port
|
||||
self.use_upnp = self.conf.use_upnp
|
||||
self.upnp: typing.Optional[UPnP] = None
|
||||
self.upnp_redirects = {}
|
||||
self.external_ip: typing.Optional[str] = None
|
||||
self._maintain_redirects_task = None
|
||||
|
||||
@property
|
||||
def component(self) -> 'UPnPComponent':
|
||||
return self
|
||||
|
||||
async def _repeatedly_maintain_redirects(self, now=True):
|
||||
while True:
|
||||
if now:
|
||||
await self._maintain_redirects()
|
||||
await asyncio.sleep(360, loop=self.component_manager.loop)
|
||||
|
||||
async def _maintain_redirects(self):
|
||||
# setup the gateway if necessary
|
||||
if not self.upnp:
|
||||
try:
|
||||
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
|
||||
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
log.warning("upnp discovery failed: %s", err)
|
||||
self.upnp = None
|
||||
|
||||
# update the external ip
|
||||
external_ip = None
|
||||
if self.upnp:
|
||||
try:
|
||||
external_ip = await self.upnp.get_external_ip()
|
||||
if external_ip != "0.0.0.0" and not self.external_ip:
|
||||
log.info("got external ip from UPnP: %s", external_ip)
|
||||
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
||||
pass
|
||||
if external_ip and not is_valid_public_ipv4(external_ip):
|
||||
log.warning("UPnP returned a private/reserved ip - %s, checking lbry.com fallback", external_ip)
|
||||
external_ip = await utils.get_external_ip()
|
||||
if self.external_ip and self.external_ip != external_ip:
|
||||
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
|
||||
if external_ip:
|
||||
self.external_ip = external_ip
|
||||
# assert self.external_ip is not None # TODO: handle going/starting offline
|
||||
|
||||
if not self.upnp_redirects and self.upnp: # setup missing redirects
|
||||
log.info("add UPnP port mappings")
|
||||
upnp_redirects = {}
|
||||
if PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
||||
try:
|
||||
upnp_redirects["TCP"] = await self.upnp.get_next_mapping(
|
||||
self._int_peer_port, "TCP", "LBRY peer port", self._int_peer_port
|
||||
)
|
||||
except (UPnPError, asyncio.TimeoutError, NotImplementedError):
|
||||
pass
|
||||
if DHT_COMPONENT not in self.component_manager.skip_components:
|
||||
try:
|
||||
upnp_redirects["UDP"] = await self.upnp.get_next_mapping(
|
||||
self._int_dht_node_port, "UDP", "LBRY DHT port", self._int_dht_node_port
|
||||
)
|
||||
except (UPnPError, asyncio.TimeoutError, NotImplementedError):
|
||||
pass
|
||||
if upnp_redirects:
|
||||
log.info("set up redirects: %s", upnp_redirects)
|
||||
self.upnp_redirects.update(upnp_redirects)
|
||||
elif self.upnp: # check existing redirects are still active
|
||||
found = set()
|
||||
mappings = await self.upnp.get_redirects()
|
||||
for mapping in mappings:
|
||||
proto = mapping.protocol
|
||||
if proto in self.upnp_redirects and mapping.external_port == self.upnp_redirects[proto]:
|
||||
if mapping.lan_address == self.upnp.lan_address:
|
||||
found.add(proto)
|
||||
if 'UDP' not in found and DHT_COMPONENT not in self.component_manager.skip_components:
|
||||
try:
|
||||
udp_port = await self.upnp.get_next_mapping(self._int_dht_node_port, "UDP", "LBRY DHT port")
|
||||
self.upnp_redirects['UDP'] = udp_port
|
||||
log.info("refreshed upnp redirect for dht port: %i", udp_port)
|
||||
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
||||
del self.upnp_redirects['UDP']
|
||||
if 'TCP' not in found and PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
||||
try:
|
||||
tcp_port = await self.upnp.get_next_mapping(self._int_peer_port, "TCP", "LBRY peer port")
|
||||
self.upnp_redirects['TCP'] = tcp_port
|
||||
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
|
||||
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
||||
del self.upnp_redirects['TCP']
|
||||
if ('TCP' in self.upnp_redirects and
|
||||
PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components) and \
|
||||
('UDP' in self.upnp_redirects and DHT_COMPONENT not in self.component_manager.skip_components):
|
||||
if self.upnp_redirects:
|
||||
log.debug("upnp redirects are still active")
|
||||
|
||||
async def start(self):
|
||||
log.info("detecting external ip")
|
||||
if not self.use_upnp:
|
||||
self.external_ip = await utils.get_external_ip()
|
||||
return
|
||||
success = False
|
||||
await self._maintain_redirects()
|
||||
if self.upnp:
|
||||
if not self.upnp_redirects and not all([x in self.component_manager.skip_components for x in
|
||||
(DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)]):
|
||||
log.error("failed to setup upnp")
|
||||
else:
|
||||
success = True
|
||||
if self.upnp_redirects:
|
||||
log.debug("set up upnp port redirects for gateway: %s", self.upnp.gateway.manufacturer_string)
|
||||
else:
|
||||
log.error("failed to setup upnp")
|
||||
if not self.external_ip:
|
||||
self.external_ip = await utils.get_external_ip()
|
||||
if self.external_ip:
|
||||
log.info("detected external ip using lbry.com fallback")
|
||||
if self.component_manager.analytics_manager:
|
||||
self.component_manager.loop.create_task(
|
||||
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
|
||||
success, await self.get_status()
|
||||
)
|
||||
)
|
||||
self._maintain_redirects_task = self.component_manager.loop.create_task(
|
||||
self._repeatedly_maintain_redirects(now=False)
|
||||
)
|
||||
|
||||
async def stop(self):
|
||||
if self.upnp_redirects:
|
||||
log.info("Removing upnp redirects: %s", self.upnp_redirects)
|
||||
await asyncio.wait([
|
||||
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
|
||||
], loop=self.component_manager.loop)
|
||||
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
|
||||
self._maintain_redirects_task.cancel()
|
||||
|
||||
async def get_status(self):
|
||||
return {
|
||||
'aioupnp_version': aioupnp_version,
|
||||
'redirects': self.upnp_redirects,
|
||||
'gateway': 'No gateway found' if not self.upnp else self.upnp.gateway.manufacturer_string,
|
||||
'dht_redirect_set': 'UDP' in self.upnp_redirects,
|
||||
'peer_redirect_set': 'TCP' in self.upnp_redirects,
|
||||
'external_ip': self.external_ip
|
||||
}
|
||||
|
||||
|
||||
class ExchangeRateManagerComponent(Component):
|
||||
component_name = EXCHANGE_RATE_MANAGER_COMPONENT
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.exchange_rate_manager = ExchangeRateManager()
|
||||
|
||||
@property
|
||||
def component(self) -> ExchangeRateManager:
|
||||
return self.exchange_rate_manager
|
||||
|
||||
async def start(self):
|
||||
self.exchange_rate_manager.start()
|
||||
|
||||
async def stop(self):
|
||||
self.exchange_rate_manager.stop()
|
File diff suppressed because it is too large
Load diff
|
@ -1,95 +0,0 @@
|
|||
import asyncio
|
||||
import json
|
||||
import logging.handlers
|
||||
import traceback
|
||||
|
||||
import typing
|
||||
from aiohttp.client_exceptions import ClientError
|
||||
import aiohttp
|
||||
from lbry import utils, __version__
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
|
||||
LOGGLY_TOKEN = 'BQEzZmMzLJHgAGxkBF00LGD0YGuyATVgAmqxAQEuAQZ2BQH4'
|
||||
|
||||
|
||||
class JsonFormatter(logging.Formatter):
|
||||
"""Format log records using json serialization"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__()
|
||||
self.attributes = kwargs
|
||||
|
||||
def format(self, record):
|
||||
data = {
|
||||
'loggerName': record.name,
|
||||
'asciTime': self.formatTime(record),
|
||||
'fileName': record.filename,
|
||||
'functionName': record.funcName,
|
||||
'levelNo': record.levelno,
|
||||
'lineNo': record.lineno,
|
||||
'levelName': record.levelname,
|
||||
'message': record.getMessage(),
|
||||
}
|
||||
data.update(self.attributes)
|
||||
if record.exc_info:
|
||||
data['exc_info'] = self.formatException(record.exc_info)
|
||||
return json.dumps(data)
|
||||
|
||||
|
||||
class HTTPSLogglyHandler(logging.Handler):
|
||||
def __init__(self, loggly_token: str, config: 'Config'):
|
||||
super().__init__()
|
||||
self.cookies = {}
|
||||
self.url = "https://logs-01.loggly.com/inputs/{token}/tag/{tag}".format(
|
||||
token=utils.deobfuscate(loggly_token), tag='lbrynet-' + __version__
|
||||
)
|
||||
self._loop = asyncio.get_event_loop()
|
||||
self._session = aiohttp.ClientSession()
|
||||
self._config = config
|
||||
|
||||
@property
|
||||
def enabled(self):
|
||||
return self._config.share_usage_data
|
||||
|
||||
@staticmethod
|
||||
def get_full_message(record):
|
||||
if record.exc_info:
|
||||
return '\n'.join(traceback.format_exception(*record.exc_info))
|
||||
else:
|
||||
return record.getMessage()
|
||||
|
||||
async def _emit(self, record, retry=True):
|
||||
data = self.format(record).encode()
|
||||
try:
|
||||
async with self._session.post(self.url, data=data,
|
||||
cookies=self.cookies) as response:
|
||||
self.cookies.update(response.cookies)
|
||||
except ClientError:
|
||||
if self._loop.is_running() and retry and self.enabled:
|
||||
await self._session.close()
|
||||
self._session = aiohttp.ClientSession()
|
||||
return await self._emit(record, retry=False)
|
||||
|
||||
def emit(self, record):
|
||||
if not self.enabled:
|
||||
return
|
||||
try:
|
||||
asyncio.ensure_future(self._emit(record), loop=self._loop)
|
||||
except RuntimeError: # TODO: use a second loop
|
||||
print(f"\nfailed to send traceback to loggly, please file an issue with the following traceback:\n"
|
||||
f"{self.format(record)}")
|
||||
|
||||
def close(self):
|
||||
super().close()
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(self._session.close())
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
|
||||
def get_loggly_handler(config):
|
||||
handler = HTTPSLogglyHandler(LOGGLY_TOKEN, config=config)
|
||||
handler.setFormatter(JsonFormatter())
|
||||
return handler
|
|
@ -1,70 +0,0 @@
|
|||
# pylint: skip-file
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def migrate_db(conf, start, end):
|
||||
current = start
|
||||
while current < end:
|
||||
if current == 1:
|
||||
from .migrate1to2 import do_migration
|
||||
elif current == 2:
|
||||
from .migrate2to3 import do_migration
|
||||
elif current == 3:
|
||||
from .migrate3to4 import do_migration
|
||||
elif current == 4:
|
||||
from .migrate4to5 import do_migration
|
||||
elif current == 5:
|
||||
from .migrate5to6 import do_migration
|
||||
elif current == 6:
|
||||
from .migrate6to7 import do_migration
|
||||
elif current == 7:
|
||||
from .migrate7to8 import do_migration
|
||||
elif current == 8:
|
||||
from .migrate8to9 import do_migration
|
||||
elif current == 9:
|
||||
from .migrate9to10 import do_migration
|
||||
elif current == 10:
|
||||
from .migrate10to11 import do_migration
|
||||
elif current == 11:
|
||||
from .migrate11to12 import do_migration
|
||||
elif current == 12:
|
||||
from .migrate12to13 import do_migration
|
||||
elif current == 13:
|
||||
from .migrate13to14 import do_migration
|
||||
else:
|
||||
raise Exception(f"DB migration of version {current} to {current+1} is not available")
|
||||
try:
|
||||
do_migration(conf)
|
||||
except Exception:
|
||||
log.exception("failed to migrate database")
|
||||
if os.path.exists(os.path.join(conf.data_dir, "lbrynet.sqlite")):
|
||||
backup_name = f"rev_{current}_unmigrated_database"
|
||||
count = 0
|
||||
while os.path.exists(os.path.join(conf.data_dir, backup_name + ".sqlite")):
|
||||
count += 1
|
||||
backup_name = f"rev_{current}_unmigrated_database_{count}"
|
||||
backup_path = os.path.join(conf.data_dir, backup_name + ".sqlite")
|
||||
os.rename(os.path.join(conf.data_dir, "lbrynet.sqlite"), backup_path)
|
||||
log.info("made a backup of the unmigrated database: %s", backup_path)
|
||||
if os.path.isfile(os.path.join(conf.data_dir, "db_revision")):
|
||||
os.remove(os.path.join(conf.data_dir, "db_revision"))
|
||||
return None
|
||||
current += 1
|
||||
log.info("successfully migrated the database from revision %i to %i", current - 1, current)
|
||||
return None
|
||||
|
||||
|
||||
def run_migration_script():
|
||||
log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s"
|
||||
logging.basicConfig(level=logging.DEBUG, format=log_format, filename="migrator.log")
|
||||
sys.stdout = open("migrator.out.log", 'w')
|
||||
sys.stderr = open("migrator.err.log", 'w')
|
||||
migrate_db(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_migration_script()
|
|
@ -1,54 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
import binascii
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
current_columns = []
|
||||
for col_info in cursor.execute("pragma table_info('file');").fetchall():
|
||||
current_columns.append(col_info[1])
|
||||
if 'content_fee' in current_columns or 'saved_file' in current_columns:
|
||||
connection.close()
|
||||
print("already migrated")
|
||||
return
|
||||
|
||||
cursor.execute(
|
||||
"pragma foreign_keys=off;"
|
||||
)
|
||||
|
||||
cursor.execute("""
|
||||
create table if not exists new_file (
|
||||
stream_hash text primary key not null references stream,
|
||||
file_name text,
|
||||
download_directory text,
|
||||
blob_data_rate real not null,
|
||||
status text not null,
|
||||
saved_file integer not null,
|
||||
content_fee text
|
||||
);
|
||||
""")
|
||||
for (stream_hash, file_name, download_dir, data_rate, status) in cursor.execute("select * from file").fetchall():
|
||||
saved_file = 0
|
||||
if download_dir != '{stream}' and file_name != '{stream}':
|
||||
try:
|
||||
if os.path.isfile(os.path.join(binascii.unhexlify(download_dir).decode(),
|
||||
binascii.unhexlify(file_name).decode())):
|
||||
saved_file = 1
|
||||
else:
|
||||
download_dir, file_name = None, None
|
||||
except Exception:
|
||||
download_dir, file_name = None, None
|
||||
else:
|
||||
download_dir, file_name = None, None
|
||||
cursor.execute(
|
||||
"insert into new_file values (?, ?, ?, ?, ?, ?, NULL)",
|
||||
(stream_hash, file_name, download_dir, data_rate, status, saved_file)
|
||||
)
|
||||
cursor.execute("drop table file")
|
||||
cursor.execute("alter table new_file rename to file")
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,69 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
import time
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, 'lbrynet.sqlite')
|
||||
connection = sqlite3.connect(db_path)
|
||||
connection.row_factory = sqlite3.Row
|
||||
cursor = connection.cursor()
|
||||
|
||||
current_columns = []
|
||||
for col_info in cursor.execute("pragma table_info('file');").fetchall():
|
||||
current_columns.append(col_info[1])
|
||||
|
||||
if 'added_on' in current_columns:
|
||||
connection.close()
|
||||
print('already migrated')
|
||||
return
|
||||
|
||||
# follow 12 step schema change procedure
|
||||
cursor.execute("pragma foreign_keys=off")
|
||||
|
||||
# we don't have any indexes, views or triggers, so step 3 is skipped.
|
||||
cursor.execute("drop table if exists new_file")
|
||||
cursor.execute("""
|
||||
create table if not exists new_file (
|
||||
stream_hash text not null primary key references stream,
|
||||
file_name text,
|
||||
download_directory text,
|
||||
blob_data_rate text not null,
|
||||
status text not null,
|
||||
saved_file integer not null,
|
||||
content_fee text,
|
||||
added_on integer not null
|
||||
);
|
||||
|
||||
|
||||
""")
|
||||
|
||||
# step 5: transfer content from old to new
|
||||
select = "select * from file"
|
||||
for (stream_hash, file_name, download_dir, blob_rate, status, saved_file, fee) \
|
||||
in cursor.execute(select).fetchall():
|
||||
added_on = int(time.time())
|
||||
cursor.execute(
|
||||
"insert into new_file values (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
(stream_hash, file_name, download_dir, blob_rate, status, saved_file, fee, added_on)
|
||||
)
|
||||
|
||||
# step 6: drop old table
|
||||
cursor.execute("drop table file")
|
||||
|
||||
# step 7: rename new table to old table
|
||||
cursor.execute("alter table new_file rename to file")
|
||||
|
||||
# step 8: we aren't using indexes, views or triggers so skip
|
||||
# step 9: no views so skip
|
||||
# step 10: foreign key check
|
||||
cursor.execute("pragma foreign_key_check;")
|
||||
|
||||
# step 11: commit transaction
|
||||
connection.commit()
|
||||
|
||||
# step 12: re-enable foreign keys
|
||||
connection.execute("pragma foreign_keys=on;")
|
||||
|
||||
# done :)
|
||||
connection.close()
|
|
@ -1,80 +0,0 @@
|
|||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
current_columns = []
|
||||
for col_info in cursor.execute("pragma table_info('file');").fetchall():
|
||||
current_columns.append(col_info[1])
|
||||
if 'bt_infohash' in current_columns:
|
||||
connection.close()
|
||||
print("already migrated")
|
||||
return
|
||||
|
||||
cursor.executescript("""
|
||||
pragma foreign_keys=off;
|
||||
|
||||
create table if not exists torrent (
|
||||
bt_infohash char(20) not null primary key,
|
||||
tracker text,
|
||||
length integer not null,
|
||||
name text not null
|
||||
);
|
||||
|
||||
create table if not exists torrent_node ( -- BEP-0005
|
||||
bt_infohash char(20) not null references torrent,
|
||||
host text not null,
|
||||
port integer not null
|
||||
);
|
||||
|
||||
create table if not exists torrent_tracker ( -- BEP-0012
|
||||
bt_infohash char(20) not null references torrent,
|
||||
tracker text not null
|
||||
);
|
||||
|
||||
create table if not exists torrent_http_seed ( -- BEP-0017
|
||||
bt_infohash char(20) not null references torrent,
|
||||
http_seed text not null
|
||||
);
|
||||
|
||||
create table if not exists new_file (
|
||||
stream_hash char(96) references stream,
|
||||
bt_infohash char(20) references torrent,
|
||||
file_name text,
|
||||
download_directory text,
|
||||
blob_data_rate real not null,
|
||||
status text not null,
|
||||
saved_file integer not null,
|
||||
content_fee text,
|
||||
added_on integer not null
|
||||
);
|
||||
|
||||
create table if not exists new_content_claim (
|
||||
stream_hash char(96) references stream,
|
||||
bt_infohash char(20) references torrent,
|
||||
claim_outpoint text unique not null references claim
|
||||
);
|
||||
|
||||
insert into new_file (stream_hash, bt_infohash, file_name, download_directory, blob_data_rate, status,
|
||||
saved_file, content_fee, added_on) select
|
||||
stream_hash, NULL, file_name, download_directory, blob_data_rate, status, saved_file, content_fee,
|
||||
added_on
|
||||
from file;
|
||||
|
||||
insert or ignore into new_content_claim (stream_hash, bt_infohash, claim_outpoint)
|
||||
select stream_hash, NULL, claim_outpoint from content_claim;
|
||||
|
||||
drop table file;
|
||||
drop table content_claim;
|
||||
alter table new_file rename to file;
|
||||
alter table new_content_claim rename to content_claim;
|
||||
|
||||
pragma foreign_keys=on;
|
||||
""")
|
||||
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,21 +0,0 @@
|
|||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
cursor.executescript("""
|
||||
create table if not exists peer (
|
||||
node_id char(96) not null primary key,
|
||||
address text not null,
|
||||
udp_port integer not null,
|
||||
tcp_port integer,
|
||||
unique (address, udp_port)
|
||||
);
|
||||
""")
|
||||
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,77 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
UNSET_NOUT = -1
|
||||
|
||||
def do_migration(conf):
|
||||
log.info("Doing the migration")
|
||||
migrate_blockchainname_db(conf.data_dir)
|
||||
log.info("Migration succeeded")
|
||||
|
||||
|
||||
def migrate_blockchainname_db(db_dir):
|
||||
blockchainname_db = os.path.join(db_dir, "blockchainname.db")
|
||||
# skip migration on fresh installs
|
||||
if not os.path.isfile(blockchainname_db):
|
||||
return
|
||||
temp_db = sqlite3.connect(":memory:")
|
||||
db_file = sqlite3.connect(blockchainname_db)
|
||||
file_cursor = db_file.cursor()
|
||||
mem_cursor = temp_db.cursor()
|
||||
|
||||
mem_cursor.execute("create table if not exists name_metadata ("
|
||||
" name text, "
|
||||
" txid text, "
|
||||
" n integer, "
|
||||
" sd_hash text)")
|
||||
mem_cursor.execute("create table if not exists claim_ids ("
|
||||
" claimId text, "
|
||||
" name text, "
|
||||
" txid text, "
|
||||
" n integer)")
|
||||
temp_db.commit()
|
||||
|
||||
name_metadata = file_cursor.execute("select * from name_metadata").fetchall()
|
||||
claim_metadata = file_cursor.execute("select * from claim_ids").fetchall()
|
||||
|
||||
# fill n as V1_UNSET_NOUT, Wallet.py will be responsible for filling in correct n
|
||||
for name, txid, sd_hash in name_metadata:
|
||||
mem_cursor.execute(
|
||||
"insert into name_metadata values (?, ?, ?, ?) ",
|
||||
(name, txid, UNSET_NOUT, sd_hash))
|
||||
|
||||
for claim_id, name, txid in claim_metadata:
|
||||
mem_cursor.execute(
|
||||
"insert into claim_ids values (?, ?, ?, ?)",
|
||||
(claim_id, name, txid, UNSET_NOUT))
|
||||
temp_db.commit()
|
||||
|
||||
new_name_metadata = mem_cursor.execute("select * from name_metadata").fetchall()
|
||||
new_claim_metadata = mem_cursor.execute("select * from claim_ids").fetchall()
|
||||
|
||||
file_cursor.execute("drop table name_metadata")
|
||||
file_cursor.execute("create table name_metadata ("
|
||||
" name text, "
|
||||
" txid text, "
|
||||
" n integer, "
|
||||
" sd_hash text)")
|
||||
|
||||
for name, txid, n, sd_hash in new_name_metadata:
|
||||
file_cursor.execute(
|
||||
"insert into name_metadata values (?, ?, ?, ?) ", (name, txid, n, sd_hash))
|
||||
|
||||
file_cursor.execute("drop table claim_ids")
|
||||
file_cursor.execute("create table claim_ids ("
|
||||
" claimId text, "
|
||||
" name text, "
|
||||
" txid text, "
|
||||
" n integer)")
|
||||
|
||||
for claim_id, name, txid, n in new_claim_metadata:
|
||||
file_cursor.execute("insert into claim_ids values (?, ?, ?, ?)", (claim_id, name, txid, n))
|
||||
|
||||
db_file.commit()
|
||||
db_file.close()
|
||||
temp_db.close()
|
|
@ -1,42 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
log.info("Doing the migration")
|
||||
migrate_blockchainname_db(conf.data_dir)
|
||||
log.info("Migration succeeded")
|
||||
|
||||
|
||||
def migrate_blockchainname_db(db_dir):
|
||||
blockchainname_db = os.path.join(db_dir, "blockchainname.db")
|
||||
# skip migration on fresh installs
|
||||
if not os.path.isfile(blockchainname_db):
|
||||
return
|
||||
|
||||
db_file = sqlite3.connect(blockchainname_db)
|
||||
file_cursor = db_file.cursor()
|
||||
|
||||
tables = file_cursor.execute("SELECT tbl_name FROM sqlite_master "
|
||||
"WHERE type='table'").fetchall()
|
||||
|
||||
if 'tmp_name_metadata_table' in tables and 'name_metadata' not in tables:
|
||||
file_cursor.execute("ALTER TABLE tmp_name_metadata_table RENAME TO name_metadata")
|
||||
else:
|
||||
file_cursor.executescript(
|
||||
"CREATE TABLE IF NOT EXISTS tmp_name_metadata_table "
|
||||
" (name TEXT UNIQUE NOT NULL, "
|
||||
" txid TEXT NOT NULL, "
|
||||
" n INTEGER NOT NULL, "
|
||||
" sd_hash TEXT NOT NULL); "
|
||||
"INSERT OR IGNORE INTO tmp_name_metadata_table "
|
||||
" (name, txid, n, sd_hash) "
|
||||
" SELECT name, txid, n, sd_hash FROM name_metadata; "
|
||||
"DROP TABLE name_metadata; "
|
||||
"ALTER TABLE tmp_name_metadata_table RENAME TO name_metadata;"
|
||||
)
|
||||
db_file.commit()
|
||||
db_file.close()
|
|
@ -1,85 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
log.info("Doing the migration")
|
||||
migrate_blobs_db(conf.data_dir)
|
||||
log.info("Migration succeeded")
|
||||
|
||||
|
||||
def migrate_blobs_db(db_dir):
|
||||
"""
|
||||
We migrate the blobs.db used in BlobManager to have a "should_announce" column,
|
||||
and set this to True for blobs that are sd_hash's or head blobs (first blob in stream)
|
||||
"""
|
||||
|
||||
blobs_db = os.path.join(db_dir, "blobs.db")
|
||||
lbryfile_info_db = os.path.join(db_dir, 'lbryfile_info.db')
|
||||
|
||||
# skip migration on fresh installs
|
||||
if not os.path.isfile(blobs_db) and not os.path.isfile(lbryfile_info_db):
|
||||
return
|
||||
|
||||
# if blobs.db doesn't exist, skip migration
|
||||
if not os.path.isfile(blobs_db):
|
||||
log.info("blobs.db was not found but lbryfile_info.db was found, skipping migration")
|
||||
return
|
||||
|
||||
blobs_db_file = sqlite3.connect(blobs_db)
|
||||
blobs_db_cursor = blobs_db_file.cursor()
|
||||
|
||||
# check if new columns exist (it shouldn't) and create it
|
||||
try:
|
||||
blobs_db_cursor.execute("SELECT should_announce FROM blobs")
|
||||
except sqlite3.OperationalError:
|
||||
blobs_db_cursor.execute(
|
||||
"ALTER TABLE blobs ADD COLUMN should_announce integer NOT NULL DEFAULT 0")
|
||||
else:
|
||||
log.warning("should_announce already exists somehow, proceeding anyways")
|
||||
|
||||
# if lbryfile_info.db doesn't exist, skip marking blobs as should_announce = True
|
||||
if not os.path.isfile(lbryfile_info_db):
|
||||
log.error("lbryfile_info.db was not found, skipping check for should_announce")
|
||||
return
|
||||
|
||||
lbryfile_info_file = sqlite3.connect(lbryfile_info_db)
|
||||
lbryfile_info_cursor = lbryfile_info_file.cursor()
|
||||
|
||||
# find blobs that are stream descriptors
|
||||
lbryfile_info_cursor.execute('SELECT * FROM lbry_file_descriptors')
|
||||
descriptors = lbryfile_info_cursor.fetchall()
|
||||
should_announce_blob_hashes = []
|
||||
for d in descriptors:
|
||||
sd_blob_hash = (d[0],)
|
||||
should_announce_blob_hashes.append(sd_blob_hash)
|
||||
|
||||
# find blobs that are the first blob in a stream
|
||||
lbryfile_info_cursor.execute('SELECT * FROM lbry_file_blobs WHERE position = 0')
|
||||
blobs = lbryfile_info_cursor.fetchall()
|
||||
head_blob_hashes = []
|
||||
for b in blobs:
|
||||
blob_hash = (b[0],)
|
||||
should_announce_blob_hashes.append(blob_hash)
|
||||
|
||||
# now mark them as should_announce = True
|
||||
blobs_db_cursor.executemany('UPDATE blobs SET should_announce=1 WHERE blob_hash=?',
|
||||
should_announce_blob_hashes)
|
||||
|
||||
# Now run some final checks here to make sure migration succeeded
|
||||
try:
|
||||
blobs_db_cursor.execute("SELECT should_announce FROM blobs")
|
||||
except sqlite3.OperationalError:
|
||||
raise Exception('Migration failed, cannot find should_announce')
|
||||
|
||||
blobs_db_cursor.execute("SELECT * FROM blobs WHERE should_announce=1")
|
||||
blobs = blobs_db_cursor.fetchall()
|
||||
if len(blobs) != len(should_announce_blob_hashes):
|
||||
log.error("Some how not all blobs were marked as announceable")
|
||||
|
||||
blobs_db_file.commit()
|
||||
blobs_db_file.close()
|
||||
lbryfile_info_file.close()
|
|
@ -1,62 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
log.info("Doing the migration")
|
||||
add_lbry_file_metadata(conf.data_dir)
|
||||
log.info("Migration succeeded")
|
||||
|
||||
|
||||
def add_lbry_file_metadata(db_dir):
|
||||
"""
|
||||
We migrate the blobs.db used in BlobManager to have a "should_announce" column,
|
||||
and set this to True for blobs that are sd_hash's or head blobs (first blob in stream)
|
||||
"""
|
||||
|
||||
name_metadata = os.path.join(db_dir, "blockchainname.db")
|
||||
lbryfile_info_db = os.path.join(db_dir, 'lbryfile_info.db')
|
||||
|
||||
if not os.path.isfile(name_metadata) and not os.path.isfile(lbryfile_info_db):
|
||||
return
|
||||
|
||||
if not os.path.isfile(lbryfile_info_db):
|
||||
log.info("blockchainname.db was not found but lbryfile_info.db was found, skipping migration")
|
||||
return
|
||||
|
||||
name_metadata_db = sqlite3.connect(name_metadata)
|
||||
lbryfile_db = sqlite3.connect(lbryfile_info_db)
|
||||
name_metadata_cursor = name_metadata_db.cursor()
|
||||
lbryfile_cursor = lbryfile_db.cursor()
|
||||
|
||||
lbryfile_db.executescript(
|
||||
"create table if not exists lbry_file_metadata (" +
|
||||
" lbry_file integer primary key, " +
|
||||
" txid text, " +
|
||||
" n integer, " +
|
||||
" foreign key(lbry_file) references lbry_files(rowid)"
|
||||
")")
|
||||
|
||||
_files = lbryfile_cursor.execute("select rowid, stream_hash from lbry_files").fetchall()
|
||||
|
||||
lbry_files = {x[1]: x[0] for x in _files}
|
||||
for (sd_hash, stream_hash) in lbryfile_cursor.execute("select * "
|
||||
"from lbry_file_descriptors").fetchall():
|
||||
lbry_file_id = lbry_files[stream_hash]
|
||||
outpoint = name_metadata_cursor.execute("select txid, n from name_metadata "
|
||||
"where sd_hash=?",
|
||||
(sd_hash,)).fetchall()
|
||||
if outpoint:
|
||||
txid, nout = outpoint[0]
|
||||
lbryfile_cursor.execute("insert into lbry_file_metadata values (?, ?, ?)",
|
||||
(lbry_file_id, txid, nout))
|
||||
else:
|
||||
lbryfile_cursor.execute("insert into lbry_file_metadata values (?, ?, ?)",
|
||||
(lbry_file_id, None, None))
|
||||
lbryfile_db.commit()
|
||||
|
||||
lbryfile_db.close()
|
||||
name_metadata_db.close()
|
|
@ -1,326 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from binascii import hexlify
|
||||
from lbry.schema.claim import Claim
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
CREATE_TABLES_QUERY = """
|
||||
pragma foreign_keys=on;
|
||||
pragma journal_mode=WAL;
|
||||
|
||||
create table if not exists blob (
|
||||
blob_hash char(96) primary key not null,
|
||||
blob_length integer not null,
|
||||
next_announce_time integer not null,
|
||||
should_announce integer not null default 0,
|
||||
status text not null
|
||||
);
|
||||
|
||||
create table if not exists stream (
|
||||
stream_hash char(96) not null primary key,
|
||||
sd_hash char(96) not null references blob,
|
||||
stream_key text not null,
|
||||
stream_name text not null,
|
||||
suggested_filename text not null
|
||||
);
|
||||
|
||||
create table if not exists stream_blob (
|
||||
stream_hash char(96) not null references stream,
|
||||
blob_hash char(96) references blob,
|
||||
position integer not null,
|
||||
iv char(32) not null,
|
||||
primary key (stream_hash, blob_hash)
|
||||
);
|
||||
|
||||
create table if not exists claim (
|
||||
claim_outpoint text not null primary key,
|
||||
claim_id char(40) not null,
|
||||
claim_name text not null,
|
||||
amount integer not null,
|
||||
height integer not null,
|
||||
serialized_metadata blob not null,
|
||||
channel_claim_id text,
|
||||
address text not null,
|
||||
claim_sequence integer not null
|
||||
);
|
||||
|
||||
create table if not exists file (
|
||||
stream_hash text primary key not null references stream,
|
||||
file_name text not null,
|
||||
download_directory text not null,
|
||||
blob_data_rate real not null,
|
||||
status text not null
|
||||
);
|
||||
|
||||
create table if not exists content_claim (
|
||||
stream_hash text unique not null references file,
|
||||
claim_outpoint text not null references claim,
|
||||
primary key (stream_hash, claim_outpoint)
|
||||
);
|
||||
|
||||
create table if not exists support (
|
||||
support_outpoint text not null primary key,
|
||||
claim_id text not null,
|
||||
amount integer not null,
|
||||
address text not null
|
||||
);
|
||||
"""
|
||||
|
||||
|
||||
def run_operation(db):
|
||||
def _decorate(fn):
|
||||
def _wrapper(*args):
|
||||
cursor = db.cursor()
|
||||
try:
|
||||
result = fn(cursor, *args)
|
||||
db.commit()
|
||||
return result
|
||||
except sqlite3.IntegrityError:
|
||||
db.rollback()
|
||||
raise
|
||||
return _wrapper
|
||||
return _decorate
|
||||
|
||||
|
||||
def verify_sd_blob(sd_hash, blob_dir):
|
||||
with open(os.path.join(blob_dir, sd_hash), "r") as sd_file:
|
||||
data = sd_file.read()
|
||||
sd_length = len(data)
|
||||
decoded = json.loads(data)
|
||||
assert set(decoded.keys()) == {
|
||||
'stream_name', 'blobs', 'stream_type', 'key', 'suggested_file_name', 'stream_hash'
|
||||
}, "invalid sd blob"
|
||||
for blob in sorted(decoded['blobs'], key=lambda x: int(x['blob_num']), reverse=True):
|
||||
if blob['blob_num'] == len(decoded['blobs']) - 1:
|
||||
assert {'length', 'blob_num', 'iv'} == set(blob.keys()), 'invalid stream terminator'
|
||||
assert blob['length'] == 0, 'non zero length stream terminator'
|
||||
else:
|
||||
assert {'blob_hash', 'length', 'blob_num', 'iv'} == set(blob.keys()), 'invalid stream blob'
|
||||
assert blob['length'] > 0, 'zero length stream blob'
|
||||
return decoded, sd_length
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
new_db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(new_db_path)
|
||||
|
||||
metadata_db = sqlite3.connect(os.path.join(conf.data_dir, "blockchainname.db"))
|
||||
lbryfile_db = sqlite3.connect(os.path.join(conf.data_dir, 'lbryfile_info.db'))
|
||||
blobs_db = sqlite3.connect(os.path.join(conf.data_dir, 'blobs.db'))
|
||||
|
||||
name_metadata_cursor = metadata_db.cursor()
|
||||
lbryfile_cursor = lbryfile_db.cursor()
|
||||
blobs_db_cursor = blobs_db.cursor()
|
||||
|
||||
old_rowid_to_outpoint = {
|
||||
rowid: (txid, nout) for (rowid, txid, nout) in
|
||||
lbryfile_cursor.execute("select * from lbry_file_metadata").fetchall()
|
||||
}
|
||||
|
||||
old_sd_hash_to_outpoint = {
|
||||
sd_hash: (txid, nout) for (txid, nout, sd_hash) in
|
||||
name_metadata_cursor.execute("select txid, n, sd_hash from name_metadata").fetchall()
|
||||
}
|
||||
|
||||
sd_hash_to_stream_hash = dict(
|
||||
lbryfile_cursor.execute("select sd_blob_hash, stream_hash from lbry_file_descriptors").fetchall()
|
||||
)
|
||||
|
||||
stream_hash_to_stream_blobs = {}
|
||||
|
||||
for (blob_hash, stream_hash, position, iv, length) in lbryfile_db.execute(
|
||||
"select * from lbry_file_blobs").fetchall():
|
||||
stream_blobs = stream_hash_to_stream_blobs.get(stream_hash, [])
|
||||
stream_blobs.append((blob_hash, length, position, iv))
|
||||
stream_hash_to_stream_blobs[stream_hash] = stream_blobs
|
||||
|
||||
claim_outpoint_queries = {}
|
||||
|
||||
for claim_query in metadata_db.execute(
|
||||
"select distinct c.txid, c.n, c.claimId, c.name, claim_cache.claim_sequence, claim_cache.claim_address, "
|
||||
"claim_cache.height, claim_cache.amount, claim_cache.claim_pb "
|
||||
"from claim_cache inner join claim_ids c on claim_cache.claim_id=c.claimId"):
|
||||
txid, nout = claim_query[0], claim_query[1]
|
||||
if (txid, nout) in claim_outpoint_queries:
|
||||
continue
|
||||
claim_outpoint_queries[(txid, nout)] = claim_query
|
||||
|
||||
@run_operation(connection)
|
||||
def _populate_blobs(transaction, blob_infos):
|
||||
transaction.executemany(
|
||||
"insert into blob values (?, ?, ?, ?, ?)",
|
||||
[(blob_hash, blob_length, int(next_announce_time), should_announce, "finished")
|
||||
for (blob_hash, blob_length, _, next_announce_time, should_announce) in blob_infos]
|
||||
)
|
||||
|
||||
@run_operation(connection)
|
||||
def _import_file(transaction, sd_hash, stream_hash, key, stream_name, suggested_file_name, data_rate,
|
||||
status, stream_blobs):
|
||||
try:
|
||||
transaction.execute(
|
||||
"insert or ignore into stream values (?, ?, ?, ?, ?)",
|
||||
(stream_hash, sd_hash, key, stream_name, suggested_file_name)
|
||||
)
|
||||
except sqlite3.IntegrityError:
|
||||
# failed because the sd isn't a known blob, we'll try to read the blob file and recover it
|
||||
return sd_hash
|
||||
|
||||
# insert any stream blobs that were missing from the blobs table
|
||||
transaction.executemany(
|
||||
"insert or ignore into blob values (?, ?, ?, ?, ?)",
|
||||
[
|
||||
(blob_hash, length, 0, 0, "pending")
|
||||
for (blob_hash, length, position, iv) in stream_blobs
|
||||
]
|
||||
)
|
||||
|
||||
# insert the stream blobs
|
||||
for blob_hash, length, position, iv in stream_blobs:
|
||||
transaction.execute(
|
||||
"insert or ignore into stream_blob values (?, ?, ?, ?)",
|
||||
(stream_hash, blob_hash, position, iv)
|
||||
)
|
||||
|
||||
download_dir = conf.download_dir
|
||||
if not isinstance(download_dir, bytes):
|
||||
download_dir = download_dir.encode()
|
||||
|
||||
# insert the file
|
||||
transaction.execute(
|
||||
"insert or ignore into file values (?, ?, ?, ?, ?)",
|
||||
(stream_hash, stream_name, hexlify(download_dir),
|
||||
data_rate, status)
|
||||
)
|
||||
|
||||
@run_operation(connection)
|
||||
def _add_recovered_blobs(transaction, blob_infos, sd_hash, sd_length):
|
||||
transaction.execute(
|
||||
"insert or replace into blob values (?, ?, ?, ?, ?)", (sd_hash, sd_length, 0, 1, "finished")
|
||||
)
|
||||
for blob in sorted(blob_infos, key=lambda x: x['blob_num'], reverse=True):
|
||||
if blob['blob_num'] < len(blob_infos) - 1:
|
||||
transaction.execute(
|
||||
"insert or ignore into blob values (?, ?, ?, ?, ?)",
|
||||
(blob['blob_hash'], blob['length'], 0, 0, "pending")
|
||||
)
|
||||
|
||||
@run_operation(connection)
|
||||
def _make_db(new_db):
|
||||
# create the new tables
|
||||
new_db.executescript(CREATE_TABLES_QUERY)
|
||||
|
||||
# first migrate the blobs
|
||||
blobs = blobs_db_cursor.execute("select * from blobs").fetchall()
|
||||
_populate_blobs(blobs) # pylint: disable=no-value-for-parameter
|
||||
log.info("migrated %i blobs", new_db.execute("select count(*) from blob").fetchone()[0])
|
||||
|
||||
# used to store the query arguments if we need to try re-importing the lbry file later
|
||||
file_args = {} # <sd_hash>: args tuple
|
||||
|
||||
file_outpoints = {} # <outpoint tuple>: sd_hash
|
||||
|
||||
# get the file and stream queries ready
|
||||
for (rowid, sd_hash, stream_hash, key, stream_name, suggested_file_name, data_rate, status) in \
|
||||
lbryfile_db.execute(
|
||||
"select distinct lbry_files.rowid, d.sd_blob_hash, lbry_files.*, o.blob_data_rate, o.status "
|
||||
"from lbry_files "
|
||||
"inner join lbry_file_descriptors d on lbry_files.stream_hash=d.stream_hash "
|
||||
"inner join lbry_file_options o on lbry_files.stream_hash=o.stream_hash"):
|
||||
|
||||
# this is try to link the file to a content claim after we've imported all the files
|
||||
if rowid in old_rowid_to_outpoint:
|
||||
file_outpoints[old_rowid_to_outpoint[rowid]] = sd_hash
|
||||
elif sd_hash in old_sd_hash_to_outpoint:
|
||||
file_outpoints[old_sd_hash_to_outpoint[sd_hash]] = sd_hash
|
||||
|
||||
sd_hash_to_stream_hash[sd_hash] = stream_hash
|
||||
if stream_hash in stream_hash_to_stream_blobs:
|
||||
file_args[sd_hash] = (
|
||||
sd_hash, stream_hash, key, stream_name,
|
||||
suggested_file_name, data_rate or 0.0,
|
||||
status, stream_hash_to_stream_blobs.pop(stream_hash)
|
||||
)
|
||||
|
||||
# used to store the query arguments if we need to try re-importing the claim
|
||||
claim_queries = {} # <sd_hash>: claim query tuple
|
||||
|
||||
# get the claim queries ready, only keep those with associated files
|
||||
for outpoint, sd_hash in file_outpoints.items():
|
||||
if outpoint in claim_outpoint_queries:
|
||||
claim_queries[sd_hash] = claim_outpoint_queries[outpoint]
|
||||
|
||||
# insert the claims
|
||||
new_db.executemany(
|
||||
"insert or ignore into claim values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
[
|
||||
(
|
||||
"%s:%i" % (claim_arg_tup[0], claim_arg_tup[1]), claim_arg_tup[2], claim_arg_tup[3],
|
||||
claim_arg_tup[7], claim_arg_tup[6], claim_arg_tup[8],
|
||||
Claim.from_bytes(claim_arg_tup[8]).signing_channel_id, claim_arg_tup[5], claim_arg_tup[4]
|
||||
)
|
||||
for sd_hash, claim_arg_tup in claim_queries.items() if claim_arg_tup
|
||||
] # sd_hash, (txid, nout, claim_id, name, sequence, address, height, amount, serialized)
|
||||
)
|
||||
|
||||
log.info("migrated %i claims", new_db.execute("select count(*) from claim").fetchone()[0])
|
||||
|
||||
damaged_stream_sds = []
|
||||
# import the files and get sd hashes of streams to attempt recovering
|
||||
for sd_hash, file_query in file_args.items():
|
||||
failed_sd = _import_file(*file_query)
|
||||
if failed_sd:
|
||||
damaged_stream_sds.append(failed_sd)
|
||||
|
||||
# recover damaged streams
|
||||
if damaged_stream_sds:
|
||||
blob_dir = os.path.join(conf.data_dir, "blobfiles")
|
||||
damaged_sds_on_disk = [] if not os.path.isdir(blob_dir) else list({p for p in os.listdir(blob_dir)
|
||||
if p in damaged_stream_sds})
|
||||
for damaged_sd in damaged_sds_on_disk:
|
||||
try:
|
||||
decoded, sd_length = verify_sd_blob(damaged_sd, blob_dir)
|
||||
blobs = decoded['blobs']
|
||||
_add_recovered_blobs(blobs, damaged_sd, sd_length) # pylint: disable=no-value-for-parameter
|
||||
_import_file(*file_args[damaged_sd])
|
||||
damaged_stream_sds.remove(damaged_sd)
|
||||
except (OSError, ValueError, TypeError, AssertionError, sqlite3.IntegrityError):
|
||||
continue
|
||||
|
||||
log.info("migrated %i files", new_db.execute("select count(*) from file").fetchone()[0])
|
||||
|
||||
# associate the content claims to their respective files
|
||||
for claim_arg_tup in claim_queries.values():
|
||||
if claim_arg_tup and (claim_arg_tup[0], claim_arg_tup[1]) in file_outpoints \
|
||||
and file_outpoints[(claim_arg_tup[0], claim_arg_tup[1])] in sd_hash_to_stream_hash:
|
||||
try:
|
||||
new_db.execute(
|
||||
"insert or ignore into content_claim values (?, ?)",
|
||||
(
|
||||
sd_hash_to_stream_hash.get(file_outpoints.get((claim_arg_tup[0], claim_arg_tup[1]))),
|
||||
"%s:%i" % (claim_arg_tup[0], claim_arg_tup[1])
|
||||
)
|
||||
)
|
||||
except sqlite3.IntegrityError:
|
||||
continue
|
||||
|
||||
log.info("migrated %i content claims", new_db.execute("select count(*) from content_claim").fetchone()[0])
|
||||
try:
|
||||
_make_db() # pylint: disable=no-value-for-parameter
|
||||
except sqlite3.OperationalError as err:
|
||||
if err.message == "table blob has 7 columns but 5 values were supplied":
|
||||
log.warning("detected a failed previous migration to revision 6, repairing it")
|
||||
connection.close()
|
||||
os.remove(new_db_path)
|
||||
return do_migration(conf)
|
||||
raise err
|
||||
|
||||
connection.close()
|
||||
blobs_db.close()
|
||||
lbryfile_db.close()
|
||||
metadata_db.close()
|
||||
# os.remove(os.path.join(db_dir, "blockchainname.db"))
|
||||
# os.remove(os.path.join(db_dir, 'lbryfile_info.db'))
|
||||
# os.remove(os.path.join(db_dir, 'blobs.db'))
|
|
@ -1,13 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
cursor.executescript("alter table blob add last_announced_time integer;")
|
||||
cursor.executescript("alter table blob add single_announce integer;")
|
||||
cursor.execute("update blob set next_announce_time=0")
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,21 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
cursor.executescript(
|
||||
"""
|
||||
create table reflected_stream (
|
||||
sd_hash text not null,
|
||||
reflector_address text not null,
|
||||
timestamp integer,
|
||||
primary key (sd_hash, reflector_address)
|
||||
);
|
||||
"""
|
||||
)
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,47 +0,0 @@
|
|||
import sqlite3
|
||||
import logging
|
||||
import os
|
||||
from lbry.blob.blob_info import BlobInfo
|
||||
from lbry.stream.descriptor import StreamDescriptor
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
blob_dir = os.path.join(conf.data_dir, "blobfiles")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
query = "select stream_name, stream_key, suggested_filename, sd_hash, stream_hash from stream"
|
||||
streams = cursor.execute(query).fetchall()
|
||||
|
||||
blobs = cursor.execute("select s.stream_hash, s.position, s.iv, b.blob_hash, b.blob_length from stream_blob s "
|
||||
"left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall()
|
||||
blobs_by_stream = {}
|
||||
for stream_hash, position, iv, blob_hash, blob_length in blobs:
|
||||
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, blob_hash))
|
||||
|
||||
for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams:
|
||||
sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename,
|
||||
blobs_by_stream[stream_hash], stream_hash, sd_hash)
|
||||
if sd_hash != sd.calculate_sd_hash():
|
||||
log.info("Stream for descriptor %s is invalid, cleaning it up", sd_hash)
|
||||
blob_hashes = [blob.blob_hash for blob in blobs_by_stream[stream_hash]]
|
||||
delete_stream(cursor, stream_hash, sd_hash, blob_hashes, blob_dir)
|
||||
|
||||
connection.commit()
|
||||
connection.close()
|
||||
|
||||
|
||||
def delete_stream(transaction, stream_hash, sd_hash, blob_hashes, blob_dir):
|
||||
transaction.execute("delete from content_claim where stream_hash=? ", (stream_hash,))
|
||||
transaction.execute("delete from file where stream_hash=? ", (stream_hash, ))
|
||||
transaction.execute("delete from stream_blob where stream_hash=?", (stream_hash, ))
|
||||
transaction.execute("delete from stream where stream_hash=? ", (stream_hash, ))
|
||||
transaction.execute("delete from blob where blob_hash=?", (sd_hash, ))
|
||||
for blob_hash in blob_hashes:
|
||||
transaction.execute("delete from blob where blob_hash=?", (blob_hash, ))
|
||||
file_path = os.path.join(blob_dir, blob_hash)
|
||||
if os.path.isfile(file_path):
|
||||
os.unlink(file_path)
|
|
@ -1,20 +0,0 @@
|
|||
import sqlite3
|
||||
import os
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
query = "select stream_hash, sd_hash from main.stream"
|
||||
for stream_hash, sd_hash in cursor.execute(query).fetchall():
|
||||
head_blob_hash = cursor.execute(
|
||||
"select blob_hash from stream_blob where position = 0 and stream_hash = ?",
|
||||
(stream_hash,)
|
||||
).fetchone()
|
||||
if not head_blob_hash:
|
||||
continue
|
||||
cursor.execute("update blob set should_announce=1 where blob_hash in (?, ?)", (sd_hash, head_blob_hash[0],))
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -1,835 +0,0 @@
|
|||
import os
|
||||
import logging
|
||||
import sqlite3
|
||||
import typing
|
||||
import asyncio
|
||||
import binascii
|
||||
import time
|
||||
from typing import Optional
|
||||
from lbry.wallet import SQLiteMixin
|
||||
from lbry.conf import Config
|
||||
from lbry.wallet.dewies import dewies_to_lbc, lbc_to_dewies
|
||||
from lbry.wallet.transaction import Transaction
|
||||
from lbry.schema.claim import Claim
|
||||
from lbry.dht.constants import DATA_EXPIRATION
|
||||
from lbry.blob.blob_info import BlobInfo
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.blob.blob_file import BlobFile
|
||||
from lbry.stream.descriptor import StreamDescriptor
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def calculate_effective_amount(amount: str, supports: typing.Optional[typing.List[typing.Dict]] = None) -> str:
|
||||
return dewies_to_lbc(
|
||||
lbc_to_dewies(amount) + sum([lbc_to_dewies(support['amount']) for support in supports])
|
||||
)
|
||||
|
||||
|
||||
class StoredContentClaim:
|
||||
def __init__(self, outpoint: Optional[str] = None, claim_id: Optional[str] = None, name: Optional[str] = None,
|
||||
amount: Optional[int] = None, height: Optional[int] = None, serialized: Optional[str] = None,
|
||||
channel_claim_id: Optional[str] = None, address: Optional[str] = None,
|
||||
claim_sequence: Optional[int] = None, channel_name: Optional[str] = None):
|
||||
self.claim_id = claim_id
|
||||
self.outpoint = outpoint
|
||||
self.claim_name = name
|
||||
self.amount = amount
|
||||
self.height = height
|
||||
self.claim: typing.Optional[Claim] = None if not serialized else Claim.from_bytes(
|
||||
binascii.unhexlify(serialized)
|
||||
)
|
||||
self.claim_address = address
|
||||
self.claim_sequence = claim_sequence
|
||||
self.channel_claim_id = channel_claim_id
|
||||
self.channel_name = channel_name
|
||||
|
||||
@property
|
||||
def txid(self) -> typing.Optional[str]:
|
||||
return None if not self.outpoint else self.outpoint.split(":")[0]
|
||||
|
||||
@property
|
||||
def nout(self) -> typing.Optional[int]:
|
||||
return None if not self.outpoint else int(self.outpoint.split(":")[1])
|
||||
|
||||
def as_dict(self) -> typing.Dict:
|
||||
return {
|
||||
"name": self.claim_name,
|
||||
"claim_id": self.claim_id,
|
||||
"address": self.claim_address,
|
||||
"claim_sequence": self.claim_sequence,
|
||||
"value": self.claim,
|
||||
"height": self.height,
|
||||
"amount": dewies_to_lbc(self.amount),
|
||||
"nout": self.nout,
|
||||
"txid": self.txid,
|
||||
"channel_claim_id": self.channel_claim_id,
|
||||
"channel_name": self.channel_name
|
||||
}
|
||||
|
||||
|
||||
def _get_content_claims(transaction: sqlite3.Connection, query: str,
|
||||
source_hashes: typing.List[str]) -> typing.Dict[str, StoredContentClaim]:
|
||||
claims = {}
|
||||
for claim_info in _batched_select(transaction, query, source_hashes):
|
||||
claims[claim_info[0]] = StoredContentClaim(*claim_info[1:])
|
||||
return claims
|
||||
|
||||
|
||||
def get_claims_from_stream_hashes(transaction: sqlite3.Connection,
|
||||
stream_hashes: typing.List[str]) -> typing.Dict[str, StoredContentClaim]:
|
||||
query = (
|
||||
"select content_claim.stream_hash, c.*, case when c.channel_claim_id is not null then "
|
||||
" (select claim_name from claim where claim_id==c.channel_claim_id) "
|
||||
" else null end as channel_name "
|
||||
" from content_claim "
|
||||
" inner join claim c on c.claim_outpoint=content_claim.claim_outpoint and content_claim.stream_hash in {}"
|
||||
" order by c.rowid desc"
|
||||
)
|
||||
return _get_content_claims(transaction, query, stream_hashes)
|
||||
|
||||
|
||||
def get_claims_from_torrent_info_hashes(transaction: sqlite3.Connection,
|
||||
info_hashes: typing.List[str]) -> typing.Dict[str, StoredContentClaim]:
|
||||
query = (
|
||||
"select content_claim.bt_infohash, c.*, case when c.channel_claim_id is not null then "
|
||||
" (select claim_name from claim where claim_id==c.channel_claim_id) "
|
||||
" else null end as channel_name "
|
||||
" from content_claim "
|
||||
" inner join claim c on c.claim_outpoint=content_claim.claim_outpoint and content_claim.bt_infohash in {}"
|
||||
" order by c.rowid desc"
|
||||
)
|
||||
return _get_content_claims(transaction, query, info_hashes)
|
||||
|
||||
|
||||
def _batched_select(transaction, query, parameters, batch_size=900):
|
||||
for start_index in range(0, len(parameters), batch_size):
|
||||
current_batch = parameters[start_index:start_index+batch_size]
|
||||
bind = "({})".format(','.join(['?'] * len(current_batch)))
|
||||
yield from transaction.execute(query.format(bind), current_batch)
|
||||
|
||||
|
||||
def _get_lbry_file_stream_dict(rowid, added_on, stream_hash, file_name, download_dir, data_rate, status,
|
||||
sd_hash, stream_key, stream_name, suggested_file_name, claim, saved_file,
|
||||
raw_content_fee, fully_reflected):
|
||||
return {
|
||||
"rowid": rowid,
|
||||
"added_on": added_on,
|
||||
"stream_hash": stream_hash,
|
||||
"file_name": file_name, # hex
|
||||
"download_directory": download_dir, # hex
|
||||
"blob_data_rate": data_rate,
|
||||
"status": status,
|
||||
"sd_hash": sd_hash,
|
||||
"key": stream_key,
|
||||
"stream_name": stream_name, # hex
|
||||
"suggested_file_name": suggested_file_name, # hex
|
||||
"claim": claim,
|
||||
"saved_file": bool(saved_file),
|
||||
"content_fee": None if not raw_content_fee else Transaction(
|
||||
binascii.unhexlify(raw_content_fee)
|
||||
),
|
||||
"fully_reflected": fully_reflected
|
||||
}
|
||||
|
||||
|
||||
def get_all_lbry_files(transaction: sqlite3.Connection) -> typing.List[typing.Dict]:
|
||||
files = []
|
||||
signed_claims = {}
|
||||
for (rowid, stream_hash, _, file_name, download_dir, data_rate, status, saved_file, raw_content_fee,
|
||||
added_on, _, sd_hash, stream_key, stream_name, suggested_file_name, *claim_args) in transaction.execute(
|
||||
"select file.rowid, file.*, stream.*, c.*, "
|
||||
" case when (SELECT 1 FROM reflected_stream r WHERE r.sd_hash=stream.sd_hash) "
|
||||
" is null then 0 else 1 end as fully_reflected "
|
||||
"from file inner join stream on file.stream_hash=stream.stream_hash "
|
||||
"inner join content_claim cc on file.stream_hash=cc.stream_hash "
|
||||
"inner join claim c on cc.claim_outpoint=c.claim_outpoint "
|
||||
"order by c.rowid desc").fetchall():
|
||||
claim_args, fully_reflected = tuple(claim_args[:-1]), claim_args[-1]
|
||||
claim = StoredContentClaim(*claim_args)
|
||||
if claim.channel_claim_id:
|
||||
if claim.channel_claim_id not in signed_claims:
|
||||
signed_claims[claim.channel_claim_id] = []
|
||||
signed_claims[claim.channel_claim_id].append(claim)
|
||||
files.append(
|
||||
_get_lbry_file_stream_dict(
|
||||
rowid, added_on, stream_hash, file_name, download_dir, data_rate, status,
|
||||
sd_hash, stream_key, stream_name, suggested_file_name, claim, saved_file,
|
||||
raw_content_fee, fully_reflected
|
||||
)
|
||||
)
|
||||
for claim_name, claim_id in _batched_select(
|
||||
transaction, "select c.claim_name, c.claim_id from claim c where c.claim_id in {}",
|
||||
tuple(signed_claims.keys())):
|
||||
for claim in signed_claims[claim_id]:
|
||||
claim.channel_name = claim_name
|
||||
return files
|
||||
|
||||
|
||||
def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
|
||||
# add all blobs, except the last one, which is empty
|
||||
transaction.executemany(
|
||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
|
||||
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0)
|
||||
for blob in (descriptor.blobs[:-1] if len(descriptor.blobs) > 1 else descriptor.blobs) + [sd_blob])
|
||||
).fetchall()
|
||||
# associate the blobs to the stream
|
||||
transaction.execute("insert or ignore into stream values (?, ?, ?, ?, ?)",
|
||||
(descriptor.stream_hash, sd_blob.blob_hash, descriptor.key,
|
||||
binascii.hexlify(descriptor.stream_name.encode()).decode(),
|
||||
binascii.hexlify(descriptor.suggested_file_name.encode()).decode())).fetchall()
|
||||
# add the stream
|
||||
transaction.executemany(
|
||||
"insert or ignore into stream_blob values (?, ?, ?, ?)",
|
||||
((descriptor.stream_hash, blob.blob_hash, blob.blob_num, blob.iv)
|
||||
for blob in descriptor.blobs)
|
||||
).fetchall()
|
||||
# ensure should_announce is set regardless if insert was ignored
|
||||
transaction.execute(
|
||||
"update blob set should_announce=1 where blob_hash in (?, ?)",
|
||||
(sd_blob.blob_hash, descriptor.blobs[0].blob_hash,)
|
||||
).fetchall()
|
||||
|
||||
|
||||
def delete_stream(transaction: sqlite3.Connection, descriptor: 'StreamDescriptor'):
|
||||
blob_hashes = [(blob.blob_hash, ) for blob in descriptor.blobs[:-1]]
|
||||
blob_hashes.append((descriptor.sd_hash, ))
|
||||
transaction.execute("delete from content_claim where stream_hash=? ", (descriptor.stream_hash,)).fetchall()
|
||||
transaction.execute("delete from file where stream_hash=? ", (descriptor.stream_hash,)).fetchall()
|
||||
transaction.execute("delete from stream_blob where stream_hash=?", (descriptor.stream_hash,)).fetchall()
|
||||
transaction.execute("delete from stream where stream_hash=? ", (descriptor.stream_hash,)).fetchall()
|
||||
transaction.executemany("delete from blob where blob_hash=?", blob_hashes).fetchall()
|
||||
|
||||
|
||||
def delete_torrent(transaction: sqlite3.Connection, bt_infohash: str):
|
||||
transaction.execute("delete from content_claim where bt_infohash=?", (bt_infohash, )).fetchall()
|
||||
transaction.execute("delete from torrent_tracker where bt_infohash=?", (bt_infohash,)).fetchall()
|
||||
transaction.execute("delete from torrent_node where bt_infohash=?", (bt_infohash,)).fetchall()
|
||||
transaction.execute("delete from torrent_http_seed where bt_infohash=?", (bt_infohash,)).fetchall()
|
||||
transaction.execute("delete from file where bt_infohash=?", (bt_infohash,)).fetchall()
|
||||
transaction.execute("delete from torrent where bt_infohash=?", (bt_infohash,)).fetchall()
|
||||
|
||||
|
||||
def store_file(transaction: sqlite3.Connection, stream_hash: str, file_name: typing.Optional[str],
|
||||
download_directory: typing.Optional[str], data_payment_rate: float, status: str,
|
||||
content_fee: typing.Optional[Transaction], added_on: typing.Optional[int] = None) -> int:
|
||||
if not file_name and not download_directory:
|
||||
encoded_file_name, encoded_download_dir = None, None
|
||||
else:
|
||||
encoded_file_name = binascii.hexlify(file_name.encode()).decode()
|
||||
encoded_download_dir = binascii.hexlify(download_directory.encode()).decode()
|
||||
time_added = added_on or int(time.time())
|
||||
transaction.execute(
|
||||
"insert or replace into file values (?, NULL, ?, ?, ?, ?, ?, ?, ?)",
|
||||
(stream_hash, encoded_file_name, encoded_download_dir, data_payment_rate, status,
|
||||
1 if (file_name and download_directory and os.path.isfile(os.path.join(download_directory, file_name))) else 0,
|
||||
None if not content_fee else binascii.hexlify(content_fee.raw).decode(), time_added)
|
||||
).fetchall()
|
||||
|
||||
return transaction.execute("select rowid from file where stream_hash=?", (stream_hash, )).fetchone()[0]
|
||||
|
||||
|
||||
class SQLiteStorage(SQLiteMixin):
|
||||
CREATE_TABLES_QUERY = """
|
||||
pragma foreign_keys=on;
|
||||
pragma journal_mode=WAL;
|
||||
|
||||
create table if not exists blob (
|
||||
blob_hash char(96) primary key not null,
|
||||
blob_length integer not null,
|
||||
next_announce_time integer not null,
|
||||
should_announce integer not null default 0,
|
||||
status text not null,
|
||||
last_announced_time integer,
|
||||
single_announce integer
|
||||
);
|
||||
|
||||
create table if not exists stream (
|
||||
stream_hash char(96) not null primary key,
|
||||
sd_hash char(96) not null references blob,
|
||||
stream_key text not null,
|
||||
stream_name text not null,
|
||||
suggested_filename text not null
|
||||
);
|
||||
|
||||
create table if not exists stream_blob (
|
||||
stream_hash char(96) not null references stream,
|
||||
blob_hash char(96) references blob,
|
||||
position integer not null,
|
||||
iv char(32) not null,
|
||||
primary key (stream_hash, blob_hash)
|
||||
);
|
||||
|
||||
create table if not exists claim (
|
||||
claim_outpoint text not null primary key,
|
||||
claim_id char(40) not null,
|
||||
claim_name text not null,
|
||||
amount integer not null,
|
||||
height integer not null,
|
||||
serialized_metadata blob not null,
|
||||
channel_claim_id text,
|
||||
address text not null,
|
||||
claim_sequence integer not null
|
||||
);
|
||||
|
||||
create table if not exists torrent (
|
||||
bt_infohash char(20) not null primary key,
|
||||
tracker text,
|
||||
length integer not null,
|
||||
name text not null
|
||||
);
|
||||
|
||||
create table if not exists torrent_node ( -- BEP-0005
|
||||
bt_infohash char(20) not null references torrent,
|
||||
host text not null,
|
||||
port integer not null
|
||||
);
|
||||
|
||||
create table if not exists torrent_tracker ( -- BEP-0012
|
||||
bt_infohash char(20) not null references torrent,
|
||||
tracker text not null
|
||||
);
|
||||
|
||||
create table if not exists torrent_http_seed ( -- BEP-0017
|
||||
bt_infohash char(20) not null references torrent,
|
||||
http_seed text not null
|
||||
);
|
||||
|
||||
create table if not exists file (
|
||||
stream_hash char(96) references stream,
|
||||
bt_infohash char(20) references torrent,
|
||||
file_name text,
|
||||
download_directory text,
|
||||
blob_data_rate real not null,
|
||||
status text not null,
|
||||
saved_file integer not null,
|
||||
content_fee text,
|
||||
added_on integer not null
|
||||
);
|
||||
|
||||
create table if not exists content_claim (
|
||||
stream_hash char(96) references stream,
|
||||
bt_infohash char(20) references torrent,
|
||||
claim_outpoint text unique not null references claim
|
||||
);
|
||||
|
||||
create table if not exists support (
|
||||
support_outpoint text not null primary key,
|
||||
claim_id text not null,
|
||||
amount integer not null,
|
||||
address text not null
|
||||
);
|
||||
|
||||
create table if not exists reflected_stream (
|
||||
sd_hash text not null,
|
||||
reflector_address text not null,
|
||||
timestamp integer,
|
||||
primary key (sd_hash, reflector_address)
|
||||
);
|
||||
|
||||
create table if not exists peer (
|
||||
node_id char(96) not null primary key,
|
||||
address text not null,
|
||||
udp_port integer not null,
|
||||
tcp_port integer,
|
||||
unique (address, udp_port)
|
||||
);
|
||||
"""
|
||||
|
||||
def __init__(self, conf: Config, path, loop=None, time_getter: typing.Optional[typing.Callable[[], float]] = None):
|
||||
super().__init__(path)
|
||||
self.conf = conf
|
||||
self.content_claim_callbacks = {}
|
||||
self.loop = loop or asyncio.get_event_loop()
|
||||
self.time_getter = time_getter or time.time
|
||||
|
||||
async def run_and_return_one_or_none(self, query, *args):
|
||||
for row in await self.db.execute_fetchall(query, args):
|
||||
if len(row) == 1:
|
||||
return row[0]
|
||||
return row
|
||||
|
||||
async def run_and_return_list(self, query, *args):
|
||||
rows = list(await self.db.execute_fetchall(query, args))
|
||||
return [col[0] for col in rows] if rows else []
|
||||
|
||||
# # # # # # # # # blob functions # # # # # # # # #
|
||||
|
||||
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int], finished=False):
|
||||
def _add_blobs(transaction: sqlite3.Connection):
|
||||
transaction.executemany(
|
||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
|
||||
(
|
||||
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0)
|
||||
for blob_hash, length in blob_hashes_and_lengths
|
||||
)
|
||||
).fetchall()
|
||||
if finished:
|
||||
transaction.executemany(
|
||||
"update blob set status='finished' where blob.blob_hash=?", (
|
||||
(blob_hash, ) for blob_hash, _ in blob_hashes_and_lengths
|
||||
)
|
||||
).fetchall()
|
||||
return await self.db.run(_add_blobs)
|
||||
|
||||
def get_blob_status(self, blob_hash: str):
|
||||
return self.run_and_return_one_or_none(
|
||||
"select status from blob where blob_hash=?", blob_hash
|
||||
)
|
||||
|
||||
def update_last_announced_blobs(self, blob_hashes: typing.List[str]):
|
||||
def _update_last_announced_blobs(transaction: sqlite3.Connection):
|
||||
last_announced = self.time_getter()
|
||||
return transaction.executemany(
|
||||
"update blob set next_announce_time=?, last_announced_time=?, single_announce=0 "
|
||||
"where blob_hash=?",
|
||||
((int(last_announced + (DATA_EXPIRATION / 2)), int(last_announced), blob_hash)
|
||||
for blob_hash in blob_hashes)
|
||||
).fetchall()
|
||||
return self.db.run(_update_last_announced_blobs)
|
||||
|
||||
def should_single_announce_blobs(self, blob_hashes, immediate=False):
|
||||
def set_single_announce(transaction):
|
||||
now = int(self.time_getter())
|
||||
for blob_hash in blob_hashes:
|
||||
if immediate:
|
||||
transaction.execute(
|
||||
"update blob set single_announce=1, next_announce_time=? "
|
||||
"where blob_hash=? and status='finished'", (int(now), blob_hash)
|
||||
).fetchall()
|
||||
else:
|
||||
transaction.execute(
|
||||
"update blob set single_announce=1 where blob_hash=? and status='finished'", (blob_hash,)
|
||||
).fetchall()
|
||||
return self.db.run(set_single_announce)
|
||||
|
||||
def get_blobs_to_announce(self):
|
||||
def get_and_update(transaction):
|
||||
timestamp = int(self.time_getter())
|
||||
if self.conf.announce_head_and_sd_only:
|
||||
r = transaction.execute(
|
||||
"select blob_hash from blob "
|
||||
"where blob_hash is not null and "
|
||||
"(should_announce=1 or single_announce=1) and next_announce_time<? and status='finished' "
|
||||
"order by next_announce_time asc limit ?",
|
||||
(timestamp, int(self.conf.concurrent_blob_announcers * 10))
|
||||
).fetchall()
|
||||
else:
|
||||
r = transaction.execute(
|
||||
"select blob_hash from blob where blob_hash is not null "
|
||||
"and next_announce_time<? and status='finished' "
|
||||
"order by next_announce_time asc limit ?",
|
||||
(timestamp, int(self.conf.concurrent_blob_announcers * 10))
|
||||
).fetchall()
|
||||
return [b[0] for b in r]
|
||||
return self.db.run(get_and_update)
|
||||
|
||||
def delete_blobs_from_db(self, blob_hashes):
|
||||
def delete_blobs(transaction):
|
||||
transaction.executemany(
|
||||
"delete from blob where blob_hash=?;", ((blob_hash,) for blob_hash in blob_hashes)
|
||||
).fetchall()
|
||||
return self.db.run_with_foreign_keys_disabled(delete_blobs)
|
||||
|
||||
def get_all_blob_hashes(self):
|
||||
return self.run_and_return_list("select blob_hash from blob")
|
||||
|
||||
def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]:
|
||||
def _sync_blobs(transaction: sqlite3.Connection) -> typing.Set[str]:
|
||||
finished_blob_hashes = tuple(
|
||||
blob_hash for (blob_hash, ) in transaction.execute(
|
||||
"select blob_hash from blob where status='finished'"
|
||||
).fetchall()
|
||||
)
|
||||
finished_blobs_set = set(finished_blob_hashes)
|
||||
to_update_set = finished_blobs_set.difference(blob_files)
|
||||
transaction.executemany(
|
||||
"update blob set status='pending' where blob_hash=?",
|
||||
((blob_hash, ) for blob_hash in to_update_set)
|
||||
).fetchall()
|
||||
return blob_files.intersection(finished_blobs_set)
|
||||
return self.db.run(_sync_blobs)
|
||||
|
||||
# # # # # # # # # stream functions # # # # # # # # #
|
||||
|
||||
async def stream_exists(self, sd_hash: str) -> bool:
|
||||
streams = await self.run_and_return_one_or_none("select stream_hash from stream where sd_hash=?", sd_hash)
|
||||
return streams is not None
|
||||
|
||||
async def file_exists(self, sd_hash: str) -> bool:
|
||||
streams = await self.run_and_return_one_or_none("select f.stream_hash from file f "
|
||||
"inner join stream s on "
|
||||
"s.stream_hash=f.stream_hash and s.sd_hash=?", sd_hash)
|
||||
return streams is not None
|
||||
|
||||
def store_stream(self, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
|
||||
return self.db.run(store_stream, sd_blob, descriptor)
|
||||
|
||||
def get_blobs_for_stream(self, stream_hash, only_completed=False) -> typing.Awaitable[typing.List[BlobInfo]]:
|
||||
def _get_blobs_for_stream(transaction):
|
||||
crypt_blob_infos = []
|
||||
stream_blobs = transaction.execute(
|
||||
"select blob_hash, position, iv from stream_blob where stream_hash=? "
|
||||
"order by position asc", (stream_hash, )
|
||||
).fetchall()
|
||||
if only_completed:
|
||||
lengths = transaction.execute(
|
||||
"select b.blob_hash, b.blob_length from blob b "
|
||||
"inner join stream_blob s ON b.blob_hash=s.blob_hash and b.status='finished' and s.stream_hash=?",
|
||||
(stream_hash, )
|
||||
).fetchall()
|
||||
else:
|
||||
lengths = transaction.execute(
|
||||
"select b.blob_hash, b.blob_length from blob b "
|
||||
"inner join stream_blob s ON b.blob_hash=s.blob_hash and s.stream_hash=?",
|
||||
(stream_hash, )
|
||||
).fetchall()
|
||||
|
||||
blob_length_dict = {}
|
||||
for blob_hash, length in lengths:
|
||||
blob_length_dict[blob_hash] = length
|
||||
|
||||
for blob_hash, position, iv in stream_blobs:
|
||||
blob_length = blob_length_dict.get(blob_hash, 0)
|
||||
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, blob_hash))
|
||||
if not blob_hash:
|
||||
break
|
||||
return crypt_blob_infos
|
||||
return self.db.run(_get_blobs_for_stream)
|
||||
|
||||
def get_sd_blob_hash_for_stream(self, stream_hash):
|
||||
return self.run_and_return_one_or_none(
|
||||
"select sd_hash from stream where stream_hash=?", stream_hash
|
||||
)
|
||||
|
||||
def get_stream_hash_for_sd_hash(self, sd_blob_hash):
|
||||
return self.run_and_return_one_or_none(
|
||||
"select stream_hash from stream where sd_hash = ?", sd_blob_hash
|
||||
)
|
||||
|
||||
def delete_stream(self, descriptor: 'StreamDescriptor'):
|
||||
return self.db.run_with_foreign_keys_disabled(delete_stream, descriptor)
|
||||
|
||||
async def delete_torrent(self, bt_infohash: str):
|
||||
return await self.db.run(delete_torrent, bt_infohash)
|
||||
|
||||
# # # # # # # # # file stuff # # # # # # # # #
|
||||
|
||||
def save_downloaded_file(self, stream_hash: str, file_name: typing.Optional[str],
|
||||
download_directory: typing.Optional[str], data_payment_rate: float,
|
||||
content_fee: typing.Optional[Transaction] = None,
|
||||
added_on: typing.Optional[int] = None) -> typing.Awaitable[int]:
|
||||
return self.save_published_file(
|
||||
stream_hash, file_name, download_directory, data_payment_rate, status="running",
|
||||
content_fee=content_fee, added_on=added_on
|
||||
)
|
||||
|
||||
def save_published_file(self, stream_hash: str, file_name: typing.Optional[str],
|
||||
download_directory: typing.Optional[str], data_payment_rate: float,
|
||||
status: str = "finished",
|
||||
content_fee: typing.Optional[Transaction] = None,
|
||||
added_on: typing.Optional[int] = None) -> typing.Awaitable[int]:
|
||||
return self.db.run(store_file, stream_hash, file_name, download_directory, data_payment_rate, status,
|
||||
content_fee, added_on)
|
||||
|
||||
async def update_manually_removed_files_since_last_run(self):
|
||||
"""
|
||||
Update files that have been removed from the downloads directory since the last run
|
||||
"""
|
||||
def update_manually_removed_files(transaction: sqlite3.Connection):
|
||||
files = {}
|
||||
query = "select stream_hash, download_directory, file_name from file where saved_file=1 " \
|
||||
"and stream_hash is not null"
|
||||
for (stream_hash, download_directory, file_name) in transaction.execute(query).fetchall():
|
||||
if download_directory and file_name:
|
||||
files[stream_hash] = download_directory, file_name
|
||||
return files
|
||||
|
||||
def detect_removed(files):
|
||||
return [
|
||||
stream_hash for stream_hash, (download_directory, file_name) in files.items()
|
||||
if not os.path.isfile(os.path.join(binascii.unhexlify(download_directory).decode(),
|
||||
binascii.unhexlify(file_name).decode()))
|
||||
]
|
||||
|
||||
def update_db_removed(transaction: sqlite3.Connection, removed):
|
||||
query = "update file set file_name=null, download_directory=null, saved_file=0 where stream_hash in {}"
|
||||
for cur in _batched_select(transaction, query, removed):
|
||||
cur.fetchall()
|
||||
|
||||
stream_and_file = await self.db.run(update_manually_removed_files)
|
||||
removed = await self.loop.run_in_executor(None, detect_removed, stream_and_file)
|
||||
if removed:
|
||||
await self.db.run(update_db_removed, removed)
|
||||
|
||||
def get_all_lbry_files(self) -> typing.Awaitable[typing.List[typing.Dict]]:
|
||||
return self.db.run(get_all_lbry_files)
|
||||
|
||||
def change_file_status(self, stream_hash: str, new_status: str):
|
||||
log.debug("update file status %s -> %s", stream_hash, new_status)
|
||||
return self.db.execute_fetchall("update file set status=? where stream_hash=?", (new_status, stream_hash))
|
||||
|
||||
async def change_file_download_dir_and_file_name(self, stream_hash: str, download_dir: typing.Optional[str],
|
||||
file_name: typing.Optional[str]):
|
||||
if not file_name or not download_dir:
|
||||
encoded_file_name, encoded_download_dir = None, None
|
||||
else:
|
||||
encoded_file_name = binascii.hexlify(file_name.encode()).decode()
|
||||
encoded_download_dir = binascii.hexlify(download_dir.encode()).decode()
|
||||
return await self.db.execute_fetchall("update file set download_directory=?, file_name=? where stream_hash=?", (
|
||||
encoded_download_dir, encoded_file_name, stream_hash,
|
||||
))
|
||||
|
||||
async def save_content_fee(self, stream_hash: str, content_fee: Transaction):
|
||||
return await self.db.execute_fetchall("update file set content_fee=? where stream_hash=?", (
|
||||
binascii.hexlify(content_fee.raw), stream_hash,
|
||||
))
|
||||
|
||||
async def set_saved_file(self, stream_hash: str):
|
||||
return await self.db.execute_fetchall("update file set saved_file=1 where stream_hash=?", (
|
||||
stream_hash,
|
||||
))
|
||||
|
||||
async def clear_saved_file(self, stream_hash: str):
|
||||
return await self.db.execute_fetchall("update file set saved_file=0 where stream_hash=?", (
|
||||
stream_hash,
|
||||
))
|
||||
|
||||
async def recover_streams(self, descriptors_and_sds: typing.List[typing.Tuple['StreamDescriptor', 'BlobFile',
|
||||
typing.Optional[Transaction]]],
|
||||
download_directory: str):
|
||||
def _recover(transaction: sqlite3.Connection):
|
||||
stream_hashes = [x[0].stream_hash for x in descriptors_and_sds]
|
||||
for descriptor, sd_blob, content_fee in descriptors_and_sds:
|
||||
content_claim = transaction.execute(
|
||||
"select * from content_claim where stream_hash=?", (descriptor.stream_hash, )
|
||||
).fetchone()
|
||||
delete_stream(transaction, descriptor) # this will also delete the content claim
|
||||
store_stream(transaction, sd_blob, descriptor)
|
||||
store_file(transaction, descriptor.stream_hash, os.path.basename(descriptor.suggested_file_name),
|
||||
download_directory, 0.0, 'stopped', content_fee=content_fee)
|
||||
if content_claim:
|
||||
transaction.execute("insert or ignore into content_claim values (?, ?, ?)", content_claim)
|
||||
transaction.executemany(
|
||||
"update file set status='stopped' where stream_hash=?",
|
||||
((stream_hash, ) for stream_hash in stream_hashes)
|
||||
).fetchall()
|
||||
download_dir = binascii.hexlify(self.conf.download_dir.encode()).decode()
|
||||
transaction.executemany(
|
||||
f"update file set download_directory=? where stream_hash=?",
|
||||
((download_dir, stream_hash) for stream_hash in stream_hashes)
|
||||
).fetchall()
|
||||
await self.db.run_with_foreign_keys_disabled(_recover)
|
||||
|
||||
def get_all_stream_hashes(self):
|
||||
return self.run_and_return_list("select stream_hash from stream")
|
||||
|
||||
# # # # # # # # # support functions # # # # # # # # #
|
||||
|
||||
def save_supports(self, claim_id_to_supports: dict):
|
||||
# TODO: add 'address' to support items returned for a claim from lbrycrdd and lbryum-server
|
||||
def _save_support(transaction):
|
||||
bind = "({})".format(','.join(['?'] * len(claim_id_to_supports)))
|
||||
transaction.execute(
|
||||
f"delete from support where claim_id in {bind}", tuple(claim_id_to_supports.keys())
|
||||
).fetchall()
|
||||
for claim_id, supports in claim_id_to_supports.items():
|
||||
for support in supports:
|
||||
transaction.execute(
|
||||
"insert into support values (?, ?, ?, ?)",
|
||||
("%s:%i" % (support['txid'], support['nout']), claim_id, lbc_to_dewies(support['amount']),
|
||||
support.get('address', ""))
|
||||
).fetchall()
|
||||
return self.db.run(_save_support)
|
||||
|
||||
def get_supports(self, *claim_ids):
|
||||
def _format_support(outpoint, supported_id, amount, address):
|
||||
return {
|
||||
"txid": outpoint.split(":")[0],
|
||||
"nout": int(outpoint.split(":")[1]),
|
||||
"claim_id": supported_id,
|
||||
"amount": dewies_to_lbc(amount),
|
||||
"address": address,
|
||||
}
|
||||
|
||||
def _get_supports(transaction):
|
||||
return [
|
||||
_format_support(*support_info)
|
||||
for support_info in _batched_select(
|
||||
transaction,
|
||||
"select * from support where claim_id in {}",
|
||||
claim_ids
|
||||
)
|
||||
]
|
||||
|
||||
return self.db.run(_get_supports)
|
||||
|
||||
# # # # # # # # # claim functions # # # # # # # # #
|
||||
|
||||
async def save_claims(self, claim_infos):
|
||||
claim_id_to_supports = {}
|
||||
update_file_callbacks = []
|
||||
|
||||
def _save_claims(transaction):
|
||||
content_claims_to_update = []
|
||||
for claim_info in claim_infos:
|
||||
outpoint = "%s:%i" % (claim_info['txid'], claim_info['nout'])
|
||||
claim_id = claim_info['claim_id']
|
||||
name = claim_info['name']
|
||||
amount = lbc_to_dewies(claim_info['amount'])
|
||||
height = claim_info['height']
|
||||
address = claim_info['address']
|
||||
sequence = claim_info['claim_sequence']
|
||||
certificate_id = claim_info['value'].signing_channel_id
|
||||
try:
|
||||
source_hash = claim_info['value'].stream.source.sd_hash
|
||||
except (AttributeError, ValueError):
|
||||
source_hash = None
|
||||
serialized = binascii.hexlify(claim_info['value'].to_bytes())
|
||||
transaction.execute(
|
||||
"insert or replace into claim values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
(outpoint, claim_id, name, amount, height, serialized, certificate_id, address, sequence)
|
||||
).fetchall()
|
||||
# if this response doesn't have support info don't overwrite the existing
|
||||
# support info
|
||||
if 'supports' in claim_info:
|
||||
claim_id_to_supports[claim_id] = claim_info['supports']
|
||||
if not source_hash:
|
||||
continue
|
||||
stream_hash = transaction.execute(
|
||||
"select file.stream_hash from stream "
|
||||
"inner join file on file.stream_hash=stream.stream_hash where sd_hash=?", (source_hash,)
|
||||
).fetchone()
|
||||
if not stream_hash:
|
||||
continue
|
||||
stream_hash = stream_hash[0]
|
||||
known_outpoint = transaction.execute(
|
||||
"select claim_outpoint from content_claim where stream_hash=?", (stream_hash,)
|
||||
).fetchone()
|
||||
known_claim_id = transaction.execute(
|
||||
"select claim_id from claim "
|
||||
"inner join content_claim c3 ON claim.claim_outpoint=c3.claim_outpoint "
|
||||
"where c3.stream_hash=?", (stream_hash,)
|
||||
).fetchone()
|
||||
if not known_claim_id:
|
||||
content_claims_to_update.append((stream_hash, outpoint))
|
||||
elif known_outpoint != outpoint:
|
||||
content_claims_to_update.append((stream_hash, outpoint))
|
||||
for stream_hash, outpoint in content_claims_to_update:
|
||||
self._save_content_claim(transaction, outpoint, stream_hash)
|
||||
if stream_hash in self.content_claim_callbacks:
|
||||
update_file_callbacks.append(self.content_claim_callbacks[stream_hash]())
|
||||
|
||||
await self.db.run(_save_claims)
|
||||
if update_file_callbacks:
|
||||
await asyncio.wait(update_file_callbacks)
|
||||
if claim_id_to_supports:
|
||||
await self.save_supports(claim_id_to_supports)
|
||||
|
||||
def save_claims_for_resolve(self, claim_infos):
|
||||
to_save = {}
|
||||
for info in claim_infos:
|
||||
if 'value' in info:
|
||||
if info['value']:
|
||||
to_save[info['claim_id']] = info
|
||||
else:
|
||||
for key in ('certificate', 'claim'):
|
||||
if info.get(key, {}).get('value'):
|
||||
to_save[info[key]['claim_id']] = info[key]
|
||||
return self.save_claims(to_save.values())
|
||||
|
||||
@staticmethod
|
||||
def _save_content_claim(transaction, claim_outpoint, stream_hash):
|
||||
# get the claim id and serialized metadata
|
||||
claim_info = transaction.execute(
|
||||
"select claim_id, serialized_metadata from claim where claim_outpoint=?", (claim_outpoint,)
|
||||
).fetchone()
|
||||
if not claim_info:
|
||||
raise Exception("claim not found")
|
||||
new_claim_id, claim = claim_info[0], Claim.from_bytes(binascii.unhexlify(claim_info[1]))
|
||||
|
||||
# certificate claims should not be in the content_claim table
|
||||
if not claim.is_stream:
|
||||
raise Exception("claim does not contain a stream")
|
||||
|
||||
# get the known sd hash for this stream
|
||||
known_sd_hash = transaction.execute(
|
||||
"select sd_hash from stream where stream_hash=?", (stream_hash,)
|
||||
).fetchone()
|
||||
if not known_sd_hash:
|
||||
raise Exception("stream not found")
|
||||
# check the claim contains the same sd hash
|
||||
if known_sd_hash[0] != claim.stream.source.sd_hash:
|
||||
raise Exception("stream mismatch")
|
||||
|
||||
# if there is a current claim associated to the file, check that the new claim is an update to it
|
||||
current_associated_content = transaction.execute(
|
||||
"select claim_outpoint from content_claim where stream_hash=?", (stream_hash,)
|
||||
).fetchone()
|
||||
if current_associated_content:
|
||||
current_associated_claim_id = transaction.execute(
|
||||
"select claim_id from claim where claim_outpoint=?", current_associated_content
|
||||
).fetchone()[0]
|
||||
if current_associated_claim_id != new_claim_id:
|
||||
raise Exception(
|
||||
f"mismatching claim ids when updating stream {current_associated_claim_id} vs {new_claim_id}"
|
||||
)
|
||||
|
||||
# update the claim associated to the file
|
||||
transaction.execute("delete from content_claim where stream_hash=?", (stream_hash, )).fetchall()
|
||||
transaction.execute(
|
||||
"insert into content_claim values (?, NULL, ?)", (stream_hash, claim_outpoint)
|
||||
).fetchall()
|
||||
|
||||
async def save_content_claim(self, stream_hash, claim_outpoint):
|
||||
await self.db.run(self._save_content_claim, claim_outpoint, stream_hash)
|
||||
# update corresponding ManagedEncryptedFileDownloader object
|
||||
if stream_hash in self.content_claim_callbacks:
|
||||
await self.content_claim_callbacks[stream_hash]()
|
||||
|
||||
async def get_content_claim(self, stream_hash: str, include_supports: typing.Optional[bool] = True) -> typing.Dict:
|
||||
claims = await self.db.run(get_claims_from_stream_hashes, [stream_hash])
|
||||
claim = None
|
||||
if claims:
|
||||
claim = claims[stream_hash].as_dict()
|
||||
if include_supports:
|
||||
supports = await self.get_supports(claim['claim_id'])
|
||||
claim['supports'] = supports
|
||||
claim['effective_amount'] = calculate_effective_amount(claim['amount'], supports)
|
||||
return claim
|
||||
|
||||
# # # # # # # # # reflector functions # # # # # # # # #
|
||||
|
||||
def update_reflected_stream(self, sd_hash, reflector_address, success=True):
|
||||
if success:
|
||||
return self.db.execute_fetchall(
|
||||
"insert or replace into reflected_stream values (?, ?, ?)",
|
||||
(sd_hash, reflector_address, self.time_getter())
|
||||
)
|
||||
return self.db.execute_fetchall(
|
||||
"delete from reflected_stream where sd_hash=? and reflector_address=?",
|
||||
(sd_hash, reflector_address)
|
||||
)
|
||||
|
||||
def get_streams_to_re_reflect(self):
|
||||
return self.run_and_return_list(
|
||||
"select s.sd_hash from stream s "
|
||||
"left outer join reflected_stream r on s.sd_hash=r.sd_hash "
|
||||
"where r.timestamp is null or r.timestamp < ?",
|
||||
int(self.time_getter()) - 86400
|
||||
)
|
||||
|
||||
# # # # # # # # # # dht functions # # # # # # # # # # #
|
||||
async def get_persisted_kademlia_peers(self) -> typing.List[typing.Tuple[bytes, str, int, int]]:
|
||||
query = 'select node_id, address, udp_port, tcp_port from peer'
|
||||
return [(binascii.unhexlify(n), a, u, t) for n, a, u, t in await self.db.execute_fetchall(query)]
|
||||
|
||||
async def save_kademlia_peers(self, peers: typing.List['KademliaPeer']):
|
||||
def _save_kademlia_peers(transaction: sqlite3.Connection):
|
||||
transaction.execute('delete from peer').fetchall()
|
||||
transaction.executemany(
|
||||
'insert into peer(node_id, address, udp_port, tcp_port) values (?, ?, ?, ?)',
|
||||
tuple([(binascii.hexlify(p.node_id), p.address, p.udp_port, p.tcp_port) for p in peers])
|
||||
).fetchall()
|
||||
return await self.db.run(_save_kademlia_peers)
|
|
@ -1,62 +0,0 @@
|
|||
# Copyright 2016-2017 Ionuț Arțăriși <ionut@artarisi.eu>
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# This came from https://github.com/mapleoin/undecorated
|
||||
|
||||
from inspect import isfunction, ismethod, isclass
|
||||
|
||||
__version__ = '0.3.0'
|
||||
|
||||
|
||||
def undecorated(o):
|
||||
"""Remove all decorators from a function, method or class"""
|
||||
# class decorator
|
||||
if isinstance(o, type):
|
||||
return o
|
||||
|
||||
try:
|
||||
# python2
|
||||
closure = o.func_closure
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
# python3
|
||||
closure = o.__closure__
|
||||
except AttributeError:
|
||||
return
|
||||
|
||||
if closure:
|
||||
for cell in closure:
|
||||
# avoid infinite recursion
|
||||
if cell.cell_contents is o:
|
||||
continue
|
||||
|
||||
# check if the contents looks like a decorator; in that case
|
||||
# we need to go one level down into the dream, otherwise it
|
||||
# might just be a different closed-over variable, which we
|
||||
# can ignore.
|
||||
|
||||
# Note: this favors supporting decorators defined without
|
||||
# @wraps to the detriment of function/method/class closures
|
||||
if looks_like_a_decorator(cell.cell_contents):
|
||||
undecd = undecorated(cell.cell_contents)
|
||||
if undecd:
|
||||
return undecd
|
||||
return o
|
||||
|
||||
|
||||
def looks_like_a_decorator(a):
|
||||
return isfunction(a) or ismethod(a) or isclass(a)
|
|
@ -1,29 +0,0 @@
|
|||
import platform
|
||||
import os
|
||||
import logging.handlers
|
||||
|
||||
from lbry import build_info, __version__ as lbrynet_version
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_platform() -> dict:
|
||||
os_system = platform.system()
|
||||
if os.environ and 'ANDROID_ARGUMENT' in os.environ:
|
||||
os_system = 'android'
|
||||
d = {
|
||||
"processor": platform.processor(),
|
||||
"python_version": platform.python_version(),
|
||||
"platform": platform.platform(),
|
||||
"os_release": platform.release(),
|
||||
"os_system": os_system,
|
||||
"lbrynet_version": lbrynet_version,
|
||||
"version": lbrynet_version,
|
||||
"build": build_info.BUILD, # CI server sets this during build step
|
||||
}
|
||||
if d["os_system"] == "Linux":
|
||||
import distro # pylint: disable=import-outside-toplevel
|
||||
d["distro"] = distro.info()
|
||||
d["desktop"] = os.environ.get('XDG_CURRENT_DESKTOP', 'Unknown')
|
||||
|
||||
return d
|
|
@ -54,11 +54,11 @@ class VideoFileAnalyzer:
|
|||
# We work around that issue here by using run_in_executor. Check it again in Python 3.8.
|
||||
async def _execute_ffmpeg(self, arguments):
|
||||
arguments = self._which_ffmpeg + " " + arguments
|
||||
return await asyncio.get_event_loop().run_in_executor(None, self._execute, arguments, self._env_copy)
|
||||
return await asyncio.get_running_loop().run_in_executor(None, self._execute, arguments, self._env_copy)
|
||||
|
||||
async def _execute_ffprobe(self, arguments):
|
||||
arguments = self._which_ffprobe + " " + arguments
|
||||
return await asyncio.get_event_loop().run_in_executor(None, self._execute, arguments, self._env_copy)
|
||||
return await asyncio.get_running_loop().run_in_executor(None, self._execute, arguments, self._env_copy)
|
||||
|
||||
async def _verify_executables(self):
|
||||
try:
|
||||
|
@ -69,8 +69,8 @@ class VideoFileAnalyzer:
|
|||
version = str(e)
|
||||
if code != 0 or not version.startswith("ffmpeg"):
|
||||
log.warning("Unable to run ffmpeg, but it was requested. Code: %d; Message: %s", code, version)
|
||||
raise FileNotFoundError(f"Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
|
||||
f"and ensure that it is callable via PATH or conf.ffmpeg_path")
|
||||
raise FileNotFoundError("Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
|
||||
"and ensure that it is callable via PATH or conf.ffmpeg_path")
|
||||
log.debug("Using %s at %s", version.splitlines()[0].split(" Copyright")[0], self._which_ffmpeg)
|
||||
return version
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
from .claim import Claim
|
|
@ -30,14 +30,10 @@ class Claim(Signable):
|
|||
COLLECTION = 'collection'
|
||||
REPOST = 'repost'
|
||||
|
||||
__slots__ = 'version',
|
||||
__slots__ = ()
|
||||
|
||||
message_class = ClaimMessage
|
||||
|
||||
def __init__(self, message=None):
|
||||
super().__init__(message)
|
||||
self.version = 2
|
||||
|
||||
@property
|
||||
def claim_type(self) -> str:
|
||||
return self.message.WhichOneof('type')
|
||||
|
@ -139,7 +135,10 @@ class BaseClaim:
|
|||
field = getattr(self, l)
|
||||
if kwargs.pop(f'clear_{l}', False):
|
||||
del field[:]
|
||||
items = kwargs.pop(l, None)
|
||||
if l in ('tags', 'languages', 'locations'):
|
||||
items = kwargs.pop(l[:-1], None)
|
||||
else:
|
||||
items = kwargs.pop(l, None)
|
||||
if items is not None:
|
||||
if isinstance(items, str):
|
||||
field.append(items)
|
||||
|
@ -151,6 +150,8 @@ class BaseClaim:
|
|||
for key, value in kwargs.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
return self
|
||||
|
||||
@property
|
||||
def title(self) -> str:
|
||||
return self.claim.message.title
|
||||
|
@ -267,6 +268,8 @@ class Stream(BaseClaim):
|
|||
|
||||
super().update(**kwargs)
|
||||
|
||||
return self
|
||||
|
||||
@property
|
||||
def author(self) -> str:
|
||||
return self.message.author
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import json
|
||||
import warnings
|
||||
from decimal import Decimal
|
||||
|
||||
from google.protobuf.message import DecodeError
|
||||
|
@ -8,6 +9,9 @@ from lbry.schema.types.v1.certificate_pb2 import KeyType
|
|||
from lbry.schema.types.v1.fee_pb2 import Fee as FeeMessage
|
||||
|
||||
|
||||
warnings.filterwarnings("ignore", category=RuntimeWarning, module="lbry.schema.compat")
|
||||
|
||||
|
||||
def from_old_json_schema(claim, payload: bytes):
|
||||
try:
|
||||
value = json.loads(payload)
|
||||
|
|
|
@ -148,7 +148,7 @@ class Outputs:
|
|||
for txo_message in chain(outputs.txos, outputs.extra_txos):
|
||||
if txo_message.WhichOneof('meta') == 'error':
|
||||
continue
|
||||
txs.add((hexlify(txo_message.tx_hash[::-1]).decode(), txo_message.height))
|
||||
txs.add((txo_message.tx_hash, txo_message.height))
|
||||
return cls(
|
||||
outputs.txos, outputs.extra_txos, txs,
|
||||
outputs.offset, outputs.total,
|
||||
|
@ -185,26 +185,26 @@ class Outputs:
|
|||
txo_message.error.code = ErrorMessage.BLOCKED
|
||||
set_reference(txo_message.error.blocked.channel, txo.censor_hash, extra_txo_rows)
|
||||
return
|
||||
txo_message.tx_hash = txo['txo_hash'][:32]
|
||||
txo_message.nout, = struct.unpack('<I', txo['txo_hash'][32:])
|
||||
txo_message.height = txo['height']
|
||||
txo_message.claim.short_url = txo['short_url']
|
||||
txo_message.claim.reposted = txo['reposted']
|
||||
if txo['canonical_url'] is not None:
|
||||
txo_message.claim.canonical_url = txo['canonical_url']
|
||||
txo_message.claim.is_controlling = bool(txo['is_controlling'])
|
||||
if txo['last_take_over_height'] is not None:
|
||||
txo_message.claim.take_over_height = txo['last_take_over_height']
|
||||
txo_message.claim.creation_height = txo['creation_height']
|
||||
txo_message.claim.activation_height = txo['activation_height']
|
||||
txo_message.claim.expiration_height = txo['expiration_height']
|
||||
if txo['claims_in_channel'] is not None:
|
||||
txo_message.claim.claims_in_channel = txo['claims_in_channel']
|
||||
txo_message.claim.effective_amount = txo['effective_amount']
|
||||
txo_message.claim.support_amount = txo['support_amount']
|
||||
txo_message.claim.trending_group = txo['trending_group']
|
||||
txo_message.claim.trending_mixed = txo['trending_mixed']
|
||||
txo_message.claim.trending_local = txo['trending_local']
|
||||
txo_message.claim.trending_global = txo['trending_global']
|
||||
set_reference(txo_message.claim.channel, txo['channel_hash'], extra_txo_rows)
|
||||
set_reference(txo_message.claim.repost, txo['reposted_claim_hash'], extra_txo_rows)
|
||||
txo_message.tx_hash = txo.tx_ref.hash
|
||||
txo_message.nout = txo.position
|
||||
txo_message.height = txo.tx_ref.height
|
||||
txo_message.claim.short_url = txo.meta['short_url']
|
||||
txo_message.claim.reposted = txo.meta['reposted_count']
|
||||
if txo.meta['canonical_url'] is not None:
|
||||
txo_message.claim.canonical_url = txo.meta['canonical_url']
|
||||
txo_message.claim.is_controlling = bool(txo.meta['takeover_height'])
|
||||
#if txo['last_take_over_height'] is not None:
|
||||
# txo_message.claim.take_over_height = txo['last_take_over_height']
|
||||
txo_message.claim.creation_height = txo.meta['creation_height']
|
||||
txo_message.claim.activation_height = txo.meta['activation_height']
|
||||
#txo_message.claim.expiration_height = txo['expiration_height']
|
||||
if txo.meta['signed_claim_count'] is not None:
|
||||
txo_message.claim.claims_in_channel = txo.meta['signed_claim_count']
|
||||
txo_message.claim.effective_amount = txo.meta['staked_amount']
|
||||
txo_message.claim.support_amount = txo.meta['staked_support_amount']
|
||||
#txo_message.claim.trending_group = txo['trending_group']
|
||||
#txo_message.claim.trending_mixed = txo['trending_mixed']
|
||||
#txo_message.claim.trending_local = txo['trending_local']
|
||||
#txo_message.claim.trending_global = txo['trending_global']
|
||||
#set_reference(txo_message.claim.channel, txo['channel_hash'], extra_txo_rows)
|
||||
#set_reference(txo_message.claim.repost, txo['reposted_claim_hash'], extra_txo_rows)
|
||||
|
|
|
@ -1,6 +1,19 @@
|
|||
from lbry.schema.base import Signable
|
||||
from lbry.schema.types.v2.support_pb2 import Support as SupportMessage
|
||||
|
||||
|
||||
class Support(Signable):
|
||||
__slots__ = ()
|
||||
message_class = None # TODO: add support protobufs
|
||||
message_class = SupportMessage
|
||||
|
||||
def __init__(self, emoji='👍', message=None):
|
||||
super().__init__(message)
|
||||
self.emoji = emoji
|
||||
|
||||
@property
|
||||
def emoji(self) -> str:
|
||||
return self.message.emoji
|
||||
|
||||
@emoji.setter
|
||||
def emoji(self, emoji: str):
|
||||
self.message.emoji = emoji
|
||||
|
|
|
@ -6,7 +6,11 @@ WEIRD_CHARS_RE = re.compile(r"[#!~]")
|
|||
|
||||
|
||||
def normalize_tag(tag: str):
|
||||
return MULTI_SPACE_RE.sub(' ', WEIRD_CHARS_RE.sub(' ', tag.lower().replace("'", ""))).strip()
|
||||
return MULTI_SPACE_RE.sub(
|
||||
' ', WEIRD_CHARS_RE.sub(
|
||||
' ', tag.lower().replace("'", "").replace('\x00', '')
|
||||
)
|
||||
).strip()
|
||||
|
||||
|
||||
def clean_tags(tags: List[str]):
|
||||
|
|
69
lbry/schema/types/v2/support_pb2.py
Normal file
69
lbry/schema/types/v2/support_pb2.py
Normal file
|
@ -0,0 +1,69 @@
|
|||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: support.proto
|
||||
|
||||
import sys
|
||||
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
from google.protobuf import descriptor_pb2
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor.FileDescriptor(
|
||||
name='support.proto',
|
||||
package='pb',
|
||||
syntax='proto3',
|
||||
serialized_pb=_b('\n\rsupport.proto\x12\x02pb\"\x18\n\x07Support\x12\r\n\x05\x65moji\x18\x01 \x01(\tb\x06proto3')
|
||||
)
|
||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||
|
||||
|
||||
|
||||
|
||||
_SUPPORT = _descriptor.Descriptor(
|
||||
name='Support',
|
||||
full_name='pb.Support',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='emoji', full_name='pb.Support.emoji', index=0,
|
||||
number=1, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=21,
|
||||
serialized_end=45,
|
||||
)
|
||||
|
||||
DESCRIPTOR.message_types_by_name['Support'] = _SUPPORT
|
||||
|
||||
Support = _reflection.GeneratedProtocolMessageType('Support', (_message.Message,), dict(
|
||||
DESCRIPTOR = _SUPPORT,
|
||||
__module__ = 'support_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Support)
|
||||
))
|
||||
_sym_db.RegisterMessage(Support)
|
||||
|
||||
|
||||
# @@protoc_insertion_point(module_scope)
|
|
@ -44,7 +44,7 @@ URL_REGEX = _create_url_regex()
|
|||
|
||||
|
||||
def normalize_name(name):
|
||||
return unicodedata.normalize('NFD', name).casefold()
|
||||
return unicodedata.normalize('NFD', name).casefold().replace('\x00', '')
|
||||
|
||||
|
||||
class PathSegment(NamedTuple):
|
||||
|
|
4
lbry/service/__init__.py
Normal file
4
lbry/service/__init__.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
from .api import API, Client
|
||||
from .daemon import Daemon, jsonrpc_dumps_pretty
|
||||
from .full_node import FullNode
|
||||
from .light_client import LightClient
|
3536
lbry/service/api.py
Normal file
3536
lbry/service/api.py
Normal file
File diff suppressed because it is too large
Load diff
260
lbry/service/base.py
Normal file
260
lbry/service/base.py
Normal file
|
@ -0,0 +1,260 @@
|
|||
import os
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import List, Optional, Tuple, NamedTuple
|
||||
|
||||
from lbry.db import Database, Result
|
||||
from lbry.db.constants import TXO_TYPES
|
||||
from lbry.schema.result import Censor
|
||||
from lbry.blockchain.transaction import Transaction, Output
|
||||
from lbry.blockchain.ledger import Ledger
|
||||
from lbry.wallet import WalletManager
|
||||
from lbry.event import EventController
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlockEvent(NamedTuple):
|
||||
height: int
|
||||
|
||||
|
||||
class Sync:
|
||||
|
||||
def __init__(self, ledger: Ledger, db: Database):
|
||||
self.ledger = ledger
|
||||
self.conf = ledger.conf
|
||||
self.db = db
|
||||
|
||||
self._on_block_controller = EventController()
|
||||
self.on_block = self._on_block_controller.stream
|
||||
|
||||
self._on_progress_controller = db._on_progress_controller
|
||||
self.on_progress = db.on_progress
|
||||
|
||||
self._on_ready_controller = EventController()
|
||||
self.on_ready = self._on_ready_controller.stream
|
||||
|
||||
def on_bulk_started(self):
|
||||
return self.on_progress.where() # filter for bulk started event
|
||||
|
||||
def on_bulk_finished(self):
|
||||
return self.on_progress.where() # filter for bulk finished event
|
||||
|
||||
async def start(self):
|
||||
raise NotImplementedError
|
||||
|
||||
async def stop(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Service:
|
||||
"""
|
||||
Base class for light client and full node LBRY service implementations.
|
||||
"""
|
||||
|
||||
sync: Sync
|
||||
|
||||
def __init__(self, ledger: Ledger):
|
||||
self.ledger, self.conf = ledger, ledger.conf
|
||||
self.db = Database(ledger)
|
||||
self.wallets = WalletManager(ledger, self.db)
|
||||
|
||||
#self.on_address = sync.on_address
|
||||
#self.accounts = sync.accounts
|
||||
#self.on_header = sync.on_header
|
||||
#self.on_ready = sync.on_ready
|
||||
#self.on_transaction = sync.on_transaction
|
||||
|
||||
# sync has established connection with a source from which it can synchronize
|
||||
# for full service this is lbrycrd (or sync service) and for light this is full node
|
||||
self._on_connected_controller = EventController()
|
||||
self.on_connected = self._on_connected_controller.stream
|
||||
|
||||
async def start(self):
|
||||
await self.db.open()
|
||||
await self.wallets.ensure_path_exists()
|
||||
await self.wallets.load()
|
||||
await self.sync.start()
|
||||
|
||||
async def stop(self):
|
||||
await self.sync.stop()
|
||||
await self.db.close()
|
||||
|
||||
async def get_status(self):
|
||||
pass
|
||||
|
||||
def get_version(self):
|
||||
pass
|
||||
|
||||
async def find_ffmpeg(self):
|
||||
pass
|
||||
|
||||
async def get(self, uri, **kwargs):
|
||||
pass
|
||||
|
||||
def create_wallet(self, file_name):
|
||||
path = os.path.join(self.conf.wallet_dir, file_name)
|
||||
return self.wallets.add_from_path(path)
|
||||
|
||||
async def get_addresses(self, **constraints):
|
||||
return await self.db.get_addresses(**constraints)
|
||||
|
||||
def reserve_outputs(self, txos):
|
||||
return self.db.reserve_outputs(txos)
|
||||
|
||||
def release_outputs(self, txos):
|
||||
return self.db.release_outputs(txos)
|
||||
|
||||
def release_tx(self, tx):
|
||||
return self.release_outputs([txi.txo_ref.txo for txi in tx.inputs])
|
||||
|
||||
def get_utxos(self, **constraints):
|
||||
self.constraint_spending_utxos(constraints)
|
||||
return self.db.get_utxos(**constraints)
|
||||
|
||||
async def get_txos(self, resolve=False, **constraints) -> Result[Output]:
|
||||
txos = await self.db.get_txos(**constraints)
|
||||
if resolve:
|
||||
return await self._resolve_for_local_results(constraints.get('accounts', []), txos)
|
||||
return txos
|
||||
|
||||
def get_txo_sum(self, **constraints):
|
||||
return self.db.get_txo_sum(**constraints)
|
||||
|
||||
def get_txo_plot(self, **constraints):
|
||||
return self.db.get_txo_plot(**constraints)
|
||||
|
||||
def get_transactions(self, **constraints):
|
||||
return self.db.get_transactions(**constraints)
|
||||
|
||||
async def get_transaction(self, tx_hash: bytes):
|
||||
tx = await self.db.get_transaction(tx_hash=tx_hash)
|
||||
if tx:
|
||||
return tx
|
||||
# try:
|
||||
# raw, merkle = await self.ledger.network.get_transaction_and_merkle(tx_hash)
|
||||
# except CodeMessageError as e:
|
||||
# if 'No such mempool or blockchain transaction.' in e.message:
|
||||
# return {'success': False, 'code': 404, 'message': 'transaction not found'}
|
||||
# return {'success': False, 'code': e.code, 'message': e.message}
|
||||
# height = merkle.get('block_height')
|
||||
# tx = Transaction(unhexlify(raw), height=height)
|
||||
# if height and height > 0:
|
||||
# await self.ledger.maybe_verify_transaction(tx, height, merkle)
|
||||
# return tx
|
||||
|
||||
async def search_transactions(self, txids):
|
||||
raise NotImplementedError
|
||||
|
||||
async def announce_addresses(self, address_manager, addresses: List[str]):
|
||||
await self.ledger.announce_addresses(address_manager, addresses)
|
||||
|
||||
async def get_address_manager_for_address(self, address):
|
||||
details = await self.db.get_address(address=address)
|
||||
for wallet in self.wallets:
|
||||
for account in wallet.accounts:
|
||||
if account.id == details['account']:
|
||||
return account.address_managers[details['chain']]
|
||||
return None
|
||||
|
||||
async def reset(self):
|
||||
self.ledger.conf = {
|
||||
'auto_connect': True,
|
||||
'default_servers': self.conf.lbryum_servers,
|
||||
'data_path': self.conf.wallet_dir,
|
||||
}
|
||||
await self.ledger.stop()
|
||||
await self.ledger.start()
|
||||
|
||||
async def get_best_blockhash(self):
|
||||
if len(self.ledger.headers) <= 0:
|
||||
return self.ledger.genesis_hash
|
||||
return (await self.ledger.headers.hash(self.ledger.headers.height)).decode()
|
||||
|
||||
async def maybe_broadcast_or_release(self, tx, preview=False, no_wait=False):
|
||||
if preview:
|
||||
return await self.release_tx(tx)
|
||||
try:
|
||||
await self.broadcast(tx)
|
||||
if not no_wait:
|
||||
await self.wait(tx)
|
||||
except Exception:
|
||||
await self.release_tx(tx)
|
||||
raise
|
||||
|
||||
async def broadcast(self, tx):
|
||||
raise NotImplementedError
|
||||
|
||||
async def wait(self, tx: Transaction, height=-1, timeout=1):
|
||||
raise NotImplementedError
|
||||
|
||||
async def resolve(self, urls, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
async def search_claims(self, accounts, **kwargs) -> Tuple[List[Output], Optional[int], Censor]:
|
||||
raise NotImplementedError
|
||||
|
||||
async def search_supports(self, accounts, **kwargs) -> Tuple[List[Output], Optional[int]]:
|
||||
raise NotImplementedError
|
||||
|
||||
async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
|
||||
for claim in (await self.search_claims(accounts, claim_id=claim_id, **kwargs))[0]:
|
||||
return claim
|
||||
|
||||
@staticmethod
|
||||
def constraint_spending_utxos(constraints):
|
||||
constraints['txo_type__in'] = (0, TXO_TYPES['purchase'])
|
||||
|
||||
async def get_purchases(self, wallet, resolve=False, **constraints):
|
||||
purchases = await wallet.get_purchases(**constraints)
|
||||
if resolve:
|
||||
claim_ids = [p.purchased_claim_id for p in purchases]
|
||||
try:
|
||||
resolved, _, _ = await self.search_claims([], claim_ids=claim_ids)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
log.exception("Resolve failed while looking up purchased claim ids:")
|
||||
resolved = []
|
||||
lookup = {claim.claim_id: claim for claim in resolved}
|
||||
for purchase in purchases:
|
||||
purchase.purchased_claim = lookup.get(purchase.purchased_claim_id)
|
||||
return purchases
|
||||
|
||||
async def _resolve_for_local_results(self, accounts, txos: Result) -> Result:
|
||||
results = []
|
||||
response = await self.resolve(
|
||||
[txo.permanent_url for txo in txos if txo.can_decode_claim], accounts=accounts
|
||||
)
|
||||
for txo in txos:
|
||||
resolved = response.get(txo.permanent_url) if txo.can_decode_claim else None
|
||||
if isinstance(resolved, Output):
|
||||
resolved.update_annotations(txo)
|
||||
results.append(resolved)
|
||||
else:
|
||||
if isinstance(resolved, dict) and 'error' in resolved:
|
||||
txo.meta['error'] = resolved['error']
|
||||
results.append(txo)
|
||||
txos.rows = results
|
||||
return txos
|
||||
|
||||
async def resolve_collection(self, collection, offset=0, page_size=1):
|
||||
claim_ids = collection.claim.collection.claims.ids[offset:page_size+offset]
|
||||
try:
|
||||
resolve_results, _, _ = await self.search_claims([], claim_ids=claim_ids)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
log.exception("Resolve failed while looking up collection claim ids:")
|
||||
return []
|
||||
claims = []
|
||||
for claim_id in claim_ids:
|
||||
found = False
|
||||
for txo in resolve_results:
|
||||
if txo.claim_id == claim_id:
|
||||
claims.append(txo)
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
claims.append(None)
|
||||
return claims
|
180
lbry/service/daemon.py
Normal file
180
lbry/service/daemon.py
Normal file
|
@ -0,0 +1,180 @@
|
|||
import json
|
||||
import signal
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from weakref import WeakSet
|
||||
from asyncio.runners import _cancel_all_tasks
|
||||
|
||||
from aiohttp.web import Application, AppRunner, WebSocketResponse, TCPSite, Response
|
||||
from aiohttp.http_websocket import WSMsgType, WSCloseCode
|
||||
|
||||
from lbry.service.json_encoder import JSONResponseEncoder
|
||||
from lbry.service.base import Service
|
||||
from lbry.service.api import API
|
||||
from lbry.console import Console
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def jsonrpc_dumps_pretty(obj, **kwargs):
|
||||
#if not isinstance(obj, dict):
|
||||
# data = {"jsonrpc": "2.0", "error": obj.to_dict()}
|
||||
#else:
|
||||
data = {"jsonrpc": "2.0", "result": obj}
|
||||
return json.dumps(data, cls=JSONResponseEncoder, sort_keys=True, indent=2, **kwargs) + "\n"
|
||||
|
||||
|
||||
class WebSocketLogHandler(logging.Handler):
|
||||
|
||||
def __init__(self, send_message):
|
||||
super().__init__()
|
||||
self.send_message = send_message
|
||||
|
||||
def emit(self, record):
|
||||
try:
|
||||
self.send_message({
|
||||
'type': 'log',
|
||||
'name': record.name,
|
||||
'message': self.format(record)
|
||||
})
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
|
||||
|
||||
class WebSocketManager(WebSocketResponse):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def subscribe(self, requested: list, subscriptions):
|
||||
for request in requested:
|
||||
if request == '*':
|
||||
for _, component in subscriptions.items():
|
||||
for _, sockets in component.items():
|
||||
sockets.add(self)
|
||||
elif '.' not in request:
|
||||
for _, sockets in subscriptions[request].items():
|
||||
sockets.add(self)
|
||||
elif request.count('.') == 1:
|
||||
component, stream = request.split('.')
|
||||
subscriptions[component][stream].add(self)
|
||||
|
||||
def unsubscribe(self, subscriptions):
|
||||
for _, component in subscriptions.items():
|
||||
for _, sockets in component.items():
|
||||
sockets.discard(self)
|
||||
|
||||
|
||||
class Daemon:
|
||||
|
||||
def __init__(self, service: Service, console: Console):
|
||||
self.service = service
|
||||
self.conf = service.conf
|
||||
self.console = console
|
||||
self.api = API(service)
|
||||
self.app = Application()
|
||||
self.app['websockets'] = WeakSet()
|
||||
self.app['subscriptions'] = {}
|
||||
self.components = {}
|
||||
#for component in components:
|
||||
# streams = self.app['subscriptions'][component.name] = {}
|
||||
# for event_name, event_stream in component.event_streams.items():
|
||||
# streams[event_name] = WeakSet()
|
||||
# event_stream.listen(partial(self.broadcast_event, component.name, event_name))
|
||||
self.app.router.add_get('/ws', self.on_connect)
|
||||
self.app.router.add_post('/api', self.on_rpc)
|
||||
self.app.on_shutdown.append(self.on_shutdown)
|
||||
self.runner = AppRunner(self.app)
|
||||
|
||||
def run(self):
|
||||
loop = asyncio.new_event_loop()
|
||||
for sig in (signal.SIGHUP, signal.SIGTERM, signal.SIGINT):
|
||||
loop.add_signal_handler(sig, loop.stop)
|
||||
try:
|
||||
loop.run_until_complete(self.start())
|
||||
loop.run_forever()
|
||||
finally:
|
||||
try:
|
||||
loop.run_until_complete(self.stop())
|
||||
finally:
|
||||
try:
|
||||
_cancel_all_tasks(loop)
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
async def start(self):
|
||||
self.console.starting()
|
||||
await self.runner.setup()
|
||||
site = TCPSite(self.runner, self.conf.api_host, self.conf.api_port)
|
||||
await site.start()
|
||||
await self.service.start()
|
||||
|
||||
async def stop(self):
|
||||
await self.service.stop()
|
||||
await self.runner.cleanup()
|
||||
|
||||
async def on_rpc(self, request):
|
||||
data = await request.json()
|
||||
params = data.get('params', {})
|
||||
method = getattr(self.api, data['method'])
|
||||
result = await method(**params)
|
||||
encoded_result = jsonrpc_dumps_pretty(result, service=self.service)
|
||||
return Response(
|
||||
text=encoded_result,
|
||||
content_type='application/json'
|
||||
)
|
||||
|
||||
async def on_connect(self, request):
|
||||
web_socket = WebSocketManager()
|
||||
await web_socket.prepare(request)
|
||||
self.app['websockets'].add(web_socket)
|
||||
try:
|
||||
async for msg in web_socket:
|
||||
if msg.type == WSMsgType.TEXT:
|
||||
asyncio.create_task(self.on_message(web_socket, msg.json()))
|
||||
elif msg.type == WSMsgType.ERROR:
|
||||
print('web socket connection closed with exception %s' %
|
||||
web_socket.exception())
|
||||
finally:
|
||||
web_socket.unsubscribe(self.app['subscriptions'])
|
||||
self.app['websockets'].discard(web_socket)
|
||||
return web_socket
|
||||
|
||||
async def on_message(self, web_socket: WebSocketManager, msg: dict):
|
||||
if msg['method'] == 'subscribe':
|
||||
streams = msg['streams']
|
||||
if isinstance(streams, str):
|
||||
streams = [streams]
|
||||
web_socket.subscribe(streams, self.app['subscriptions'])
|
||||
else:
|
||||
params = msg.get('params', {})
|
||||
method = getattr(self.api, msg['method'])
|
||||
result = await method(**params)
|
||||
encoded_result = jsonrpc_dumps_pretty(result, service=self.service)
|
||||
await web_socket.send_json({
|
||||
'id': msg.get('id', ''),
|
||||
'result': encoded_result
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
async def on_shutdown(app):
|
||||
for web_socket in set(app['websockets']):
|
||||
await web_socket.close(code=WSCloseCode.GOING_AWAY, message='Server shutdown')
|
||||
|
||||
def broadcast_event(self, module, stream, payload):
|
||||
for web_socket in self.app['subscriptions'][module][stream]:
|
||||
asyncio.create_task(web_socket.send_json({
|
||||
'module': module,
|
||||
'stream': stream,
|
||||
'payload': payload
|
||||
}))
|
||||
|
||||
def broadcast_message(self, msg):
|
||||
for web_socket in self.app['websockets']:
|
||||
asyncio.create_task(web_socket.send_json({
|
||||
'module': 'blockchain_sync',
|
||||
'payload': msg
|
||||
}))
|
|
@ -7,7 +7,7 @@ from typing import Optional, Iterable, Type
|
|||
from aiohttp.client_exceptions import ContentTypeError
|
||||
from lbry.error import InvalidExchangeRateResponseError, CurrencyConversionError
|
||||
from lbry.utils import aiohttp_request
|
||||
from lbry.wallet.dewies import lbc_to_dewies
|
||||
from lbry.blockchain.dewies import lbc_to_dewies
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
67
lbry/service/full_node.py
Normal file
67
lbry/service/full_node.py
Normal file
|
@ -0,0 +1,67 @@
|
|||
import logging
|
||||
from binascii import hexlify, unhexlify
|
||||
|
||||
from lbry.blockchain.lbrycrd import Lbrycrd
|
||||
from lbry.blockchain.sync import BlockchainSync
|
||||
from lbry.blockchain.ledger import Ledger
|
||||
from lbry.blockchain.transaction import Transaction
|
||||
|
||||
from .base import Service
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FullNode(Service):
|
||||
|
||||
sync: BlockchainSync
|
||||
|
||||
def __init__(self, ledger: Ledger, chain: Lbrycrd = None):
|
||||
super().__init__(ledger)
|
||||
self.chain = chain or Lbrycrd(ledger)
|
||||
self.sync = BlockchainSync(self.chain, self.db)
|
||||
|
||||
async def start(self):
|
||||
await self.chain.open()
|
||||
await super().start()
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
await self.chain.close()
|
||||
|
||||
async def get_status(self):
|
||||
return 'everything is wonderful'
|
||||
|
||||
# async def get_block_address_filters(self):
|
||||
# return {
|
||||
# hexlify(f['block_hash']).decode(): hexlify(f['block_filter']).decode()
|
||||
# for f in await self.db.get_block_address_filters()
|
||||
# }
|
||||
|
||||
async def search_transactions(self, txids):
|
||||
tx_hashes = [unhexlify(txid)[::-1] for txid in txids]
|
||||
return {
|
||||
hexlify(tx['tx_hash'][::-1]).decode(): hexlify(tx['raw']).decode()
|
||||
for tx in await self.db.get_transactions(tx_hashes=tx_hashes)
|
||||
}
|
||||
|
||||
async def broadcast(self, tx):
|
||||
return await self.chain.send_raw_transaction(hexlify(tx.raw).decode())
|
||||
|
||||
async def wait(self, tx: Transaction, height=-1, timeout=1):
|
||||
pass
|
||||
|
||||
async def search_claims(self, accounts, **kwargs):
|
||||
return await self.db.search_claims(**kwargs)
|
||||
|
||||
async def protobuf_search_claims(self, **kwargs):
|
||||
return await self.db.protobuf_search_claims(**kwargs)
|
||||
|
||||
async def search_supports(self, accounts, **kwargs):
|
||||
return await self.db.search_supports(**kwargs)
|
||||
|
||||
async def resolve(self, urls, **kwargs):
|
||||
return await self.db.resolve(urls, **kwargs)
|
||||
|
||||
async def protobuf_resolve(self, urls, **kwargs):
|
||||
return await self.db.protobuf_resolve(urls, **kwargs)
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue