Compare commits
408 commits
master
...
staked-amo
Author | SHA1 | Date | |
---|---|---|---|
|
4e9977ac23 | ||
|
784343878b | ||
|
350eec35ee | ||
|
76d0e31544 | ||
|
cd3f189cf6 | ||
|
240f0a9a05 | ||
|
94d76cb13d | ||
|
4592e21424 | ||
|
6df67f225f | ||
|
c484a8abf5 | ||
|
f4a8be6c19 | ||
|
8968893216 | ||
|
09aa3039e9 | ||
|
c1c9c2883a | ||
|
9c269c90bb | ||
|
e4449d2ec7 | ||
|
4e0728572d | ||
|
d3ec200e46 | ||
|
90e7821283 | ||
|
a5eccb9a92 | ||
|
f83289f876 | ||
|
6bb8a69efc | ||
|
76b3bfe975 | ||
|
f3710fa153 | ||
|
2b62330980 | ||
|
036e16d1fc | ||
|
ea691e78b8 | ||
|
435a80d4c3 | ||
|
ec89baa831 | ||
|
85cf19bb2d | ||
|
1971539369 | ||
|
5d4e61d089 | ||
|
2ee65752b2 | ||
|
1d31a96c9b | ||
|
e6a9417988 | ||
|
1e4613fd8a | ||
|
d459d6a26b | ||
|
d8f5b11158 | ||
|
6764d52634 | ||
|
4d8cc494e1 | ||
|
22a04fab24 | ||
|
9f9fdd2d1a | ||
|
2ec91dbca7 | ||
|
1a4a2db1b1 | ||
|
cc2837c021 | ||
|
5cdfbcc88e | ||
|
6ed2fa20ec | ||
|
4356d23cc1 | ||
|
2f4b5d2ffe | ||
|
0a06810f36 | ||
|
9ce8910b42 | ||
|
fa85558d71 | ||
|
999e4209fa | ||
|
7d82a4573f | ||
|
c1aa9b8150 | ||
|
daed032bb7 | ||
|
abebc0d878 | ||
|
4bb0344e05 | ||
|
0215ce6b79 | ||
|
b9cf130344 | ||
|
85b8877933 | ||
|
bba9aec4f2 | ||
|
2853bb437d | ||
|
2d7929f113 | ||
|
24386c74f4 | ||
|
0a2c161ace | ||
|
18b25f5146 | ||
|
1a5292aaf4 | ||
|
39ba2a8b7f | ||
|
25b63c988f | ||
|
fdac6416a1 | ||
|
4783e6da1f | ||
|
9c1d593e54 | ||
|
fa34ff88bc | ||
|
6690e63ea4 | ||
|
c7eb60619f | ||
|
cb98fbc619 | ||
|
4fcfa0b193 | ||
|
8cde120928 | ||
|
052ef73226 | ||
|
082a91dc15 | ||
|
d10a88c79b | ||
|
62a3022a3d | ||
|
4e6bdb64a8 | ||
|
0b34c4ea86 | ||
|
5b9e40c0c0 | ||
|
30dff02674 | ||
|
20c7fe1e82 | ||
|
23f186e6a1 | ||
|
14436b3955 | ||
|
a96ceba6f5 | ||
|
f26aadbd44 | ||
|
144eb248e3 | ||
|
a554c8838c | ||
|
2a0089a4dd | ||
|
d6bcbd631f | ||
|
39a4c4e590 | ||
|
cb60cd99f4 | ||
|
a5c117b542 | ||
|
47a8c005d9 | ||
|
6e50e83e28 | ||
|
50c8fc269b | ||
|
336a0f6ae1 | ||
|
e1c33dccab | ||
|
2f56f7f0e0 | ||
|
28413742cc | ||
|
6484894b36 | ||
|
c5fd9643f1 | ||
|
aa6d78b515 | ||
|
33e266a0f4 | ||
|
b4ee07162d | ||
|
0f63103db5 | ||
|
c3884352db | ||
|
8c525b6dfc | ||
|
d89b074615 | ||
|
f775b0ed55 | ||
|
60c333c6e2 | ||
|
42b7f8ff71 | ||
|
c981c767b9 | ||
|
10ad4ed8d1 | ||
|
e436ae7edd | ||
|
c498619cca | ||
|
382facf264 | ||
|
7304d24dfd | ||
|
b330c35282 | ||
|
6dfa78afa3 | ||
|
1c79daaafc | ||
|
e1b55f017b | ||
|
6bbfb45de7 | ||
|
9b15799c72 | ||
|
bc678f2146 | ||
|
7c5211d420 | ||
|
70e50780c3 | ||
|
7ebb9d06df | ||
|
b99de6b872 | ||
|
889464e51d | ||
|
35c3ff1e30 | ||
|
aa75b9bb25 | ||
|
f995ceae8b | ||
|
c1803434aa | ||
|
4dfbdcc2d7 | ||
|
272940b6d6 | ||
|
baf384d6e0 | ||
|
a012c04974 | ||
|
1fa117a104 | ||
|
33a157959d | ||
|
747eace4ab | ||
|
8fa2d746e7 | ||
|
a77b1f9997 | ||
|
c914f24089 | ||
|
e41a71a64e | ||
|
e59e238fd5 | ||
|
4c331d00e7 | ||
|
f56229bcce | ||
|
abf1247f61 | ||
|
4b230a97f9 | ||
|
bf7ac1562f | ||
|
2000d75c7a | ||
|
1259002b51 | ||
|
70d9f4cf79 | ||
|
23723f8041 | ||
|
51a0f7ddc8 | ||
|
1228700487 | ||
|
67c5c192f3 | ||
|
6b9cf5b48c | ||
|
0a91bd35c5 | ||
|
84639cfb2e | ||
|
0b245aab31 | ||
|
ee3db31541 | ||
|
8a3b960a85 | ||
|
2952609972 | ||
|
9ab8a7dd81 | ||
|
a7555932a9 | ||
|
9411b26fd3 | ||
|
85db7d3ce7 | ||
|
87f1d5b0ae | ||
|
64b8caeb5c | ||
|
3315175d1c | ||
|
a802d1f686 | ||
|
684e389283 | ||
|
895719a13d | ||
|
8f2cce7f61 | ||
|
9114a9794d | ||
|
a1f3254261 | ||
|
dbc0da2817 | ||
|
412ace1c6f | ||
|
8100efb48c | ||
|
8d164dfed3 | ||
|
ffea76cdd5 | ||
|
72ddb0c195 | ||
|
c57080711e | ||
|
96aea579ac | ||
|
e2aae23575 | ||
|
2ae700feb3 | ||
|
d1ac066c6d | ||
|
b61424979d | ||
|
ca10874006 | ||
|
a4680878c4 | ||
|
1c29ae7204 | ||
|
86069b10ca | ||
|
9c5e2a8c8d | ||
|
622a3b77ef | ||
|
0dff82c31c | ||
|
8e683c9cd0 | ||
|
69c45d43d3 | ||
|
fab7b5579c | ||
|
9e87394fca | ||
|
8c6633de17 | ||
|
3af71a2674 | ||
|
b792b134a2 | ||
|
f50196d395 | ||
|
248e04089b | ||
|
8fd92cb649 | ||
|
af4138ff51 | ||
|
462daf4dc4 | ||
|
e63151a370 | ||
|
09a2b2fa46 | ||
|
a3d91329fe | ||
|
7bf96fd637 | ||
|
5157b2535b | ||
|
0151ce8040 | ||
|
5328ed105e | ||
|
7f01b1cb84 | ||
|
862c51946a | ||
|
1790ee3018 | ||
|
7a4e5dcb05 | ||
|
24a88db595 | ||
|
915233c96c | ||
|
aa9365f218 | ||
|
15b8891fce | ||
|
f8a8a75ae9 | ||
|
d18ed6c19b | ||
|
4aa44d3b5a | ||
|
34a9dff141 | ||
|
7d9bf03574 | ||
|
3fe1981657 | ||
|
192c79c49c | ||
|
5883c9bc6c | ||
|
4b50d1e329 | ||
|
39d8a20fd5 | ||
|
9ccf00f56b | ||
|
46662b55c7 | ||
|
b45a222f98 | ||
|
434c1bc6b3 | ||
|
2495df8859 | ||
|
635aebfeeb | ||
|
81926a42f9 | ||
|
f2ff4410dc | ||
|
564018c937 | ||
|
4d1eafc0a4 | ||
|
211f8b2e59 | ||
|
9500be26fd | ||
|
017ef5b41a | ||
|
4b19861a74 | ||
|
5a0a987f0c | ||
|
4cb4659489 | ||
|
e64b108404 | ||
|
7870abaef4 | ||
|
71e14c8e63 | ||
|
b3b6361429 | ||
|
0d5441f3bf | ||
|
9757c69189 | ||
|
d8fb31aedd | ||
|
54a0bf9290 | ||
|
a3ef8d7411 | ||
|
4810ff5f94 | ||
|
2306edebf7 | ||
|
db5a33dc3f | ||
|
597bebb5be | ||
|
73ff1d3b3a | ||
|
9198877098 | ||
|
46da2584ca | ||
|
53b7d0a58b | ||
|
d1a243247d | ||
|
18dc5fbc9f | ||
|
410212c17a | ||
|
a39f87b3c5 | ||
|
147b9d5ad1 | ||
|
1f210c0b0b | ||
|
86df4bdd11 | ||
|
096f74d79b | ||
|
ae8bc59c65 | ||
|
01dbbb4c3a | ||
|
96d1926da4 | ||
|
2c10e71774 | ||
|
ea6be53071 | ||
|
cb5250f630 | ||
|
54e83daa59 | ||
|
df44d6ef56 | ||
|
aa4ef94e15 | ||
|
fad144cb96 | ||
|
1fe444bca2 | ||
|
55196ccb6b | ||
|
b3cb50aff0 | ||
|
ebf36f513c | ||
|
47d207ff77 | ||
|
d99e4221f2 | ||
|
56ff1342c4 | ||
|
a8c8614948 | ||
|
0e9184048c | ||
|
c01dceebcd | ||
|
e3cc6ea224 | ||
|
44bbd9578d | ||
|
7855f9c93f | ||
|
76e21f65df | ||
|
53c8876b5e | ||
|
2927875830 | ||
|
b9d954a394 | ||
|
a0fb3424aa | ||
|
269c0f714e | ||
|
99d2a3f42b | ||
|
a1aa578bc0 | ||
|
82062b5601 | ||
|
db1f984558 | ||
|
e66445b46e | ||
|
82b69109bd | ||
|
ec4e36446c | ||
|
3ef83febc0 | ||
|
ffecd02fbc | ||
|
ebddb1f0f5 | ||
|
9d0b9805b2 | ||
|
8653839c16 | ||
|
29b1f93699 | ||
|
97c285f22b | ||
|
c12e07de11 | ||
|
726bae97b0 | ||
|
5efb36ffd2 | ||
|
b4d6b14599 | ||
|
9a40381f5a | ||
|
47f6d542c5 | ||
|
8799caa0e4 | ||
|
a042377a7b | ||
|
596ed08395 | ||
|
2af29b892b | ||
|
7ffb169376 | ||
|
d11f4f9bed | ||
|
f2e844c476 | ||
|
83f7eab0e7 | ||
|
4a9f9906a0 | ||
|
b7ff6569e4 | ||
|
12915143b8 | ||
|
06d93e667a | ||
|
b341187b14 | ||
|
a996e65eff | ||
|
0886a7946e | ||
|
be6ebf0047 | ||
|
7c4f943bcb | ||
|
5b5c45ea76 | ||
|
6986211c1e | ||
|
8ac78990d8 | ||
|
4f879bbbae | ||
|
e3f080a7ad | ||
|
d232eeaf81 | ||
|
5d6388b366 | ||
|
955e44631d | ||
|
8dc5150dbe | ||
|
d488bfd9d4 | ||
|
391b95fd12 | ||
|
4b172e4180 | ||
|
29ef4425b0 | ||
|
558b1aeadf | ||
|
41ce3e4ad8 | ||
|
8c91777e5d | ||
|
db89607e4e | ||
|
d476f08d13 | ||
|
2a0c653c37 | ||
|
219c7cf37d | ||
|
2f575a393f | ||
|
713c665588 | ||
|
9554b66a37 | ||
|
533f31cc89 | ||
|
fef09c1773 | ||
|
6a33d86bfe | ||
|
ccd32eae70 | ||
|
c61c9726b0 | ||
|
fd5be69d55 | ||
|
1f72751a88 | ||
|
362ab67186 | ||
|
ffe7fcf124 | ||
|
1f5dbc3eb8 | ||
|
4dd85a169b | ||
|
fe547f1b0e | ||
|
ba154c799e | ||
|
e2ffd24d51 | ||
|
8545ab880b | ||
|
4e85f34353 | ||
|
777c6342f8 | ||
|
10c262a095 | ||
|
74e3471bd9 | ||
|
4048cfb3e8 | ||
|
8b7b284c0d | ||
|
2d4d51388b | ||
|
06b75d07dc | ||
|
87a44fd41c | ||
|
70693f4d1a | ||
|
42224dadb6 | ||
|
53fc94d688 | ||
|
0b6d01fecc | ||
|
ae9d4af8c0 | ||
|
2309d6354c | ||
|
fd2f9846e9 | ||
|
66666e1167 | ||
|
4c0fbb84d6 | ||
|
fd3448ffb8 | ||
|
6d93f97b51 | ||
|
2ee13ce39f | ||
|
bad4320ddf | ||
|
a220736dea |
255 changed files with 32824 additions and 39807 deletions
200
.github/workflows/main.yml
vendored
200
.github/workflows/main.yml
vendored
|
@ -1,5 +1,5 @@
|
||||||
name: ci
|
name: ci
|
||||||
on: pull_request
|
on: push
|
||||||
jobs:
|
jobs:
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
|
@ -10,45 +10,19 @@ jobs:
|
||||||
- uses: actions/setup-python@v1
|
- uses: actions/setup-python@v1
|
||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
python-version: '3.7'
|
||||||
- run: make install tools
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: ~/.cache/pip
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pip-
|
||||||
|
- run: |
|
||||||
|
pip install --user --upgrade pip wheel
|
||||||
|
pip install -e .[lint]
|
||||||
- run: make lint
|
- run: make lint
|
||||||
|
|
||||||
tests-unit:
|
tests-unit:
|
||||||
name: "tests / unit"
|
name: "tests / unit"
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v1
|
|
||||||
- uses: actions/setup-python@v1
|
|
||||||
with:
|
|
||||||
python-version: '3.7'
|
|
||||||
- run: make install tools
|
|
||||||
- working-directory: lbry
|
|
||||||
env:
|
|
||||||
HOME: /tmp
|
|
||||||
run: coverage run -p --source=lbry -m unittest discover -vv tests.unit
|
|
||||||
|
|
||||||
tests-integration:
|
|
||||||
name: "tests / integration"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
test:
|
|
||||||
- datanetwork
|
|
||||||
- blockchain
|
|
||||||
- other
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v1
|
|
||||||
- uses: actions/setup-python@v1
|
|
||||||
with:
|
|
||||||
python-version: '3.7'
|
|
||||||
- if: matrix.test == 'other'
|
|
||||||
run: sudo apt install -y --no-install-recommends ffmpeg
|
|
||||||
- run: pip install tox-travis
|
|
||||||
- run: tox -e ${{ matrix.test }}
|
|
||||||
|
|
||||||
build:
|
|
||||||
needs: ["lint", "tests-unit", "tests-integration"]
|
|
||||||
name: "build"
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os:
|
os:
|
||||||
|
@ -61,19 +35,163 @@ jobs:
|
||||||
- uses: actions/setup-python@v1
|
- uses: actions/setup-python@v1
|
||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
python-version: '3.7'
|
||||||
|
- name: set pip cache dir
|
||||||
|
id: pip-cache
|
||||||
|
run: echo "::set-output name=dir::$(pip cache dir)"
|
||||||
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: ${{ steps.pip-cache.outputs.dir }}
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pip-
|
||||||
|
- run: |
|
||||||
|
pip install --user --upgrade pip wheel
|
||||||
|
pip install -e .[test]
|
||||||
|
- env:
|
||||||
|
HOME: /tmp
|
||||||
|
run: coverage run -m unittest discover -v tests.unit
|
||||||
|
- env:
|
||||||
|
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
COVERALLS_PARALLEL: true
|
||||||
|
name: Submit to coveralls
|
||||||
|
run: |
|
||||||
|
pip install https://github.com/bboe/coveralls-python/archive/github_actions.zip
|
||||||
|
coveralls
|
||||||
|
|
||||||
|
tests-integration:
|
||||||
|
name: "tests / integration"
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
test:
|
||||||
|
# - datanetwork
|
||||||
|
- blockchain
|
||||||
|
# - other
|
||||||
|
db:
|
||||||
|
- sqlite
|
||||||
|
- postgres
|
||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
image: postgres:12
|
||||||
|
env:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
ports:
|
||||||
|
- 5432:5432
|
||||||
|
options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- uses: actions/setup-python@v1
|
||||||
|
with:
|
||||||
|
python-version: '3.7'
|
||||||
|
- if: matrix.test == 'other'
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y --no-install-recommends ffmpeg
|
||||||
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: ./.tox
|
||||||
|
key: tox-integration-${{ matrix.test }}-${{ matrix.db }}-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: txo-integration-${{ matrix.test }}-${{ matrix.db }}-
|
||||||
|
- run: pip install tox
|
||||||
|
- env:
|
||||||
|
TEST_DB: ${{ matrix.db }}
|
||||||
|
run: tox -e ${{ matrix.test }}
|
||||||
|
- env:
|
||||||
|
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
COVERALLS_PARALLEL: true
|
||||||
|
name: Submit to coveralls
|
||||||
|
run: |
|
||||||
|
pip install https://github.com/bboe/coveralls-python/archive/github_actions.zip
|
||||||
|
coverage combine tests
|
||||||
|
coveralls
|
||||||
|
|
||||||
|
coveralls-finished:
|
||||||
|
needs: ["tests-unit", "tests-integration"]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Coveralls Finished
|
||||||
|
uses: coverallsapp/github-action@57daa114
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
parallel-finished: true
|
||||||
|
|
||||||
|
build:
|
||||||
|
needs: ["lint", "tests-unit", "tests-integration"]
|
||||||
|
name: "build"
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- ubuntu-16.04
|
||||||
|
- macos-latest
|
||||||
|
- windows-latest
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- uses: actions/setup-python@v1
|
||||||
|
with:
|
||||||
|
python-version: '3.7'
|
||||||
|
- name: set pip cache dir
|
||||||
|
id: pip-cache
|
||||||
|
run: echo "::set-output name=dir::$(pip cache dir)"
|
||||||
|
- name: extract pip cache
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: ${{ steps.pip-cache.outputs.dir }}
|
||||||
|
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||||
|
restore-keys: ${{ runner.os }}-pip-
|
||||||
- name: Setup
|
- name: Setup
|
||||||
run: |
|
run: |
|
||||||
pip install pyinstaller
|
pip install --user --upgrade pip wheel
|
||||||
|
pip install sqlalchemy@git+https://github.com/eukreign/pyinstaller.git@sqlalchemy
|
||||||
|
- if: startsWith(runner.os, 'linux')
|
||||||
|
run: |
|
||||||
|
sudo apt-get install libzmq3-dev
|
||||||
|
pip install -e .[postgres]
|
||||||
|
- if: startsWith(runner.os, 'mac')
|
||||||
|
run: |
|
||||||
|
brew install zeromq
|
||||||
pip install -e .
|
pip install -e .
|
||||||
- if: startsWith(matrix.os, 'windows') == false
|
- if: startsWith(runner.os, 'linux') || startsWith(runner.os, 'mac')
|
||||||
name: Build & Run (Unix)
|
name: Build & Run (Unix)
|
||||||
run: |
|
run: |
|
||||||
pyinstaller --onefile --name lbrynet lbry/extras/cli.py
|
pyinstaller --onefile --name lbrynet lbry/cli.py
|
||||||
chmod +x dist/lbrynet
|
chmod +x dist/lbrynet
|
||||||
dist/lbrynet --version
|
dist/lbrynet --version
|
||||||
- if: startsWith(matrix.os, 'windows')
|
- if: startsWith(runner.os, 'windows')
|
||||||
name: Build & Run (Windows)
|
name: Build & Run (Windows)
|
||||||
run: |
|
run: |
|
||||||
pip install pywin32
|
pip install pywin32
|
||||||
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
|
pip install -e .
|
||||||
|
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/cli.py
|
||||||
dist/lbrynet.exe --version
|
dist/lbrynet.exe --version
|
||||||
|
- uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: lbrynet-${{ matrix.os }}
|
||||||
|
path: dist/
|
||||||
|
|
||||||
|
docker:
|
||||||
|
needs: ["build"]
|
||||||
|
name: "build (docker)"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- name: fetch lbrynet binary
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
name: lbrynet-ubuntu-16.04
|
||||||
|
- run: |
|
||||||
|
chmod +x lbrynet
|
||||||
|
mv lbrynet docker
|
||||||
|
- name: build and push docker image
|
||||||
|
uses: docker/build-push-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
repository: lbry/lbrynet
|
||||||
|
path: docker
|
||||||
|
tag_with_ref: true
|
||||||
|
tag_with_sha: true
|
||||||
|
add_git_labels: true
|
||||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -11,7 +11,8 @@ lbry.egg-info
|
||||||
__pycache__
|
__pycache__
|
||||||
_trial_temp/
|
_trial_temp/
|
||||||
|
|
||||||
/tests/integration/blockchain/files
|
/tests/integration/commands/files
|
||||||
/tests/.coverage.*
|
/tests/.coverage.*
|
||||||
|
|
||||||
|
/lbry/blockchain/bin
|
||||||
/lbry/wallet/bin
|
/lbry/wallet/bin
|
21
Makefile
21
Makefile
|
@ -1,15 +1,4 @@
|
||||||
.PHONY: install tools lint test idea
|
.PHONY: tools lint test idea
|
||||||
|
|
||||||
install:
|
|
||||||
CFLAGS="-DSQLITE_MAX_VARIABLE_NUMBER=2500000" pip install -U https://github.com/rogerbinns/apsw/releases/download/3.30.1-r1/apsw-3.30.1-r1.zip \
|
|
||||||
--global-option=fetch \
|
|
||||||
--global-option=--version --global-option=3.30.1 --global-option=--all \
|
|
||||||
--global-option=build --global-option=--enable --global-option=fts5
|
|
||||||
pip install -e .
|
|
||||||
|
|
||||||
tools:
|
|
||||||
pip install mypy==0.701
|
|
||||||
pip install coverage astroid pylint
|
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
pylint --rcfile=setup.cfg lbry
|
pylint --rcfile=setup.cfg lbry
|
||||||
|
@ -21,3 +10,11 @@ test:
|
||||||
idea:
|
idea:
|
||||||
mkdir -p .idea
|
mkdir -p .idea
|
||||||
cp -r scripts/idea/* .idea
|
cp -r scripts/idea/* .idea
|
||||||
|
|
||||||
|
start:
|
||||||
|
dropdb lbry --if-exists
|
||||||
|
createdb lbry
|
||||||
|
lbrynet start node \
|
||||||
|
--db-url=postgresql:///lbry --workers=0 --console=advanced --no-spv-address-filters \
|
||||||
|
--lbrycrd-rpc-user=lbry --lbrycrd-rpc-pass=somethingelse \
|
||||||
|
--lbrycrd-dir=${HOME}/.lbrycrd --data-dir=/tmp/tmp-lbrynet
|
||||||
|
|
|
@ -27,6 +27,10 @@ With the daemon running, `lbrynet commands` will show you a list of commands.
|
||||||
|
|
||||||
The full API is documented [here](https://lbry.tech/api/sdk).
|
The full API is documented [here](https://lbry.tech/api/sdk).
|
||||||
|
|
||||||
|
## Recommended hardware
|
||||||
|
|
||||||
|
The minimum hardware for a full node is 16cpus, 92gb of RAM, and 160gb of NVMe storage. The recommended hardware is 32cpus, 128gb of RAM, and 160gb of NVMe storage.
|
||||||
|
|
||||||
## Running from source
|
## Running from source
|
||||||
|
|
||||||
Installing from source is also relatively painless. Full instructions are in [INSTALL.md](INSTALL.md)
|
Installing from source is also relatively painless. Full instructions are in [INSTALL.md](INSTALL.md)
|
||||||
|
|
5
docker/Dockerfile
Normal file
5
docker/Dockerfile
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
FROM ubuntu:20.04
|
||||||
|
COPY lbrynet /bin
|
||||||
|
RUN lbrynet --version
|
||||||
|
ENTRYPOINT ["lbrynet"]
|
||||||
|
CMD ["start", "node"]
|
8
docker/Dockerfile.lbrycrd
Normal file
8
docker/Dockerfile.lbrycrd
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
FROM ubuntu:20.04
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y wget unzip && \
|
||||||
|
wget -nv https://build.lbry.io/lbrycrd/block_info_fix_try2/lbrycrd-linux.zip && \
|
||||||
|
unzip -d /bin lbrycrd-linux.zip && \
|
||||||
|
rm -rf lbrycrd-linux.zip /var/lib/apt/lists/*
|
||||||
|
RUN lbrycrdd --version
|
||||||
|
ENTRYPOINT ["lbrycrdd"]
|
|
@ -1,52 +0,0 @@
|
||||||
FROM ubuntu:19.10
|
|
||||||
|
|
||||||
ARG user=lbry
|
|
||||||
ARG db_dir=/database
|
|
||||||
ARG projects_dir=/home/$user
|
|
||||||
|
|
||||||
ARG DOCKER_TAG
|
|
||||||
ARG DOCKER_COMMIT=docker
|
|
||||||
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get -y --no-install-recommends install \
|
|
||||||
wget \
|
|
||||||
tar unzip \
|
|
||||||
build-essential \
|
|
||||||
python3 \
|
|
||||||
python3-dev \
|
|
||||||
python3-pip \
|
|
||||||
python3-wheel \
|
|
||||||
python3-setuptools && \
|
|
||||||
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
|
||||||
RUN mkdir -p $db_dir
|
|
||||||
RUN chown -R $user:$user $db_dir
|
|
||||||
|
|
||||||
COPY . $projects_dir
|
|
||||||
RUN chown -R $user:$user $projects_dir
|
|
||||||
|
|
||||||
USER $user
|
|
||||||
WORKDIR $projects_dir
|
|
||||||
|
|
||||||
RUN pip install uvloop
|
|
||||||
RUN make install
|
|
||||||
RUN python3 docker/set_build.py
|
|
||||||
RUN rm ~/.cache -rf
|
|
||||||
|
|
||||||
# entry point
|
|
||||||
ARG host=0.0.0.0
|
|
||||||
ARG tcp_port=50001
|
|
||||||
ARG daemon_url=http://lbry:lbry@localhost:9245/
|
|
||||||
VOLUME $db_dir
|
|
||||||
ENV TCP_PORT=$tcp_port
|
|
||||||
ENV HOST=$host
|
|
||||||
ENV DAEMON_URL=$daemon_url
|
|
||||||
ENV DB_DIRECTORY=$db_dir
|
|
||||||
ENV MAX_SESSIONS=1000000000
|
|
||||||
ENV MAX_SEND=1000000000000000000
|
|
||||||
ENV EVENT_LOOP_POLICY=uvloop
|
|
||||||
COPY ./docker/wallet_server_entrypoint.sh /entrypoint.sh
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
|
@ -1,34 +0,0 @@
|
||||||
version: "3"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
lbrycrd:
|
|
||||||
wallet_server:
|
|
||||||
|
|
||||||
services:
|
|
||||||
lbrycrd:
|
|
||||||
image: lbry/lbrycrd:${LBRYCRD_TAG:-latest-release}
|
|
||||||
restart: always
|
|
||||||
ports: # accessible from host
|
|
||||||
- "9246:9246" # rpc port
|
|
||||||
expose: # internal to docker network. also this doesn't do anything. its for documentation only.
|
|
||||||
- "9245" # node-to-node comms port
|
|
||||||
volumes:
|
|
||||||
- "lbrycrd:/data/.lbrycrd"
|
|
||||||
environment:
|
|
||||||
- RUN_MODE=default
|
|
||||||
- SNAPSHOT_URL=${LBRYCRD_SNAPSHOT_URL-https://lbry.com/snapshot/blockchain}
|
|
||||||
- RPC_ALLOW_IP=0.0.0.0/0
|
|
||||||
wallet_server:
|
|
||||||
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
|
|
||||||
depends_on:
|
|
||||||
- lbrycrd
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- "50001:50001" # rpc port
|
|
||||||
- "50005:50005" # websocket port
|
|
||||||
#- "2112:2112" # uncomment to enable prometheus
|
|
||||||
volumes:
|
|
||||||
- "wallet_server:/database"
|
|
||||||
environment:
|
|
||||||
- SNAPSHOT_URL=${WALLET_SERVER_SNAPSHOT_URL-https://lbry.com/snapshot/wallet}
|
|
||||||
- DAEMON_URL=http://lbry:lbry@lbrycrd:9245
|
|
41
docker/docker-compose.yml
Normal file
41
docker/docker-compose.yml
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
version: "3.8"
|
||||||
|
volumes:
|
||||||
|
lbrycrd-data:
|
||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
image: postgres:12
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: lbry
|
||||||
|
POSTGRES_PASSWORD: lbry
|
||||||
|
lbrycrd:
|
||||||
|
image: lbry/lbrycrd
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile.lbrycrd
|
||||||
|
volumes:
|
||||||
|
- lbrycrd-data:/root/.lbrycrd
|
||||||
|
command: >
|
||||||
|
-rpcbind=lbrycrd
|
||||||
|
-rpcallowip=0.0.0.0/0
|
||||||
|
-rpcuser=lbryuser
|
||||||
|
-rpcpassword=lbrypass
|
||||||
|
-zmqpubhashblock=tcp://lbrycrd:29000
|
||||||
|
lbrynet:
|
||||||
|
image: lbry/lbrynet:fast_wallet_server_sync
|
||||||
|
depends_on:
|
||||||
|
- postgres
|
||||||
|
- lbrycrd
|
||||||
|
volumes:
|
||||||
|
- lbrycrd-data:/lbrycrd
|
||||||
|
command: >
|
||||||
|
start
|
||||||
|
--full-node
|
||||||
|
--api=0.0.0.0:5279
|
||||||
|
--db-url=postgresql://lbry:lbry@postgres:5432/lbry
|
||||||
|
--workers=12
|
||||||
|
--console=basic
|
||||||
|
--no-spv-address-filters
|
||||||
|
--lbrycrd-rpc-host=lbrycrd
|
||||||
|
--lbrycrd-rpc-user=lbryuser
|
||||||
|
--lbrycrd-rpc-pass=lbrypass
|
||||||
|
--lbrycrd-dir=/lbrycrd
|
|
@ -1,7 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
|
||||||
cd "$DIR/../.." ## make sure we're in the right place. Docker Hub screws this up sometimes
|
|
||||||
echo "docker build dir: $(pwd)"
|
|
||||||
|
|
||||||
docker build --build-arg DOCKER_TAG=$DOCKER_TAG --build-arg DOCKER_COMMIT=$SOURCE_COMMIT -f $DOCKERFILE_PATH -t $IMAGE_NAME .
|
|
|
@ -1,11 +0,0 @@
|
||||||
# requires powershell and .NET 4+. see https://chocolatey.org/install for more info.
|
|
||||||
|
|
||||||
$chocoVersion = powershell choco -v
|
|
||||||
if(-not($chocoVersion)){
|
|
||||||
Write-Output "Chocolatey is not installed, installing now"
|
|
||||||
Write-Output "IF YOU KEEP GETTING THIS MESSAGE ON EVERY BUILD, TRY RESTARTING THE GITLAB RUNNER SO IT GETS CHOCO INTO IT'S ENV"
|
|
||||||
Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
Write-Output "Chocolatey version $chocoVersion is already installed"
|
|
||||||
}
|
|
|
@ -1,23 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# entrypoint for wallet server Docker image
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
|
|
||||||
|
|
||||||
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/claims.db ]]; then
|
|
||||||
files="$(ls)"
|
|
||||||
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
|
|
||||||
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
|
|
||||||
echo "Extracting snapshot..."
|
|
||||||
filename="$(grep -vf <(echo "$files") <(ls))" # finds the file that was not there before
|
|
||||||
case "$filename" in
|
|
||||||
*.tgz|*.tar.gz|*.tar.bz2 ) tar xvf "$filename" --directory /database ;;
|
|
||||||
*.zip ) unzip "$filename" -d /database ;;
|
|
||||||
* ) echo "Don't know how to extract ${filename}. SNAPSHOT COULD NOT BE LOADED" && exit 1 ;;
|
|
||||||
esac
|
|
||||||
rm "$filename"
|
|
||||||
fi
|
|
||||||
|
|
||||||
/home/lbry/.local/bin/torba-server "$@"
|
|
350
docs/api.json
350
docs/api.json
File diff suppressed because one or more lines are too long
|
@ -1,7 +0,0 @@
|
||||||
.git
|
|
||||||
.tox
|
|
||||||
__pycache__
|
|
||||||
dist
|
|
||||||
lbry.egg-info
|
|
||||||
docs
|
|
||||||
tests
|
|
|
@ -1,2 +1,8 @@
|
||||||
__version__ = "0.68.0"
|
__version__ = "1.0.0"
|
||||||
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
|
from lbry.wallet import Account, Wallet, WalletManager
|
||||||
|
from lbry.blockchain import Ledger, RegTestLedger, TestNetLedger
|
||||||
|
from lbry.blockchain import Transaction, Output, Input
|
||||||
|
from lbry.blockchain import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc
|
||||||
|
from lbry.service import API, Daemon, FullNode, LightClient
|
||||||
|
from lbry.db.database import Database
|
||||||
|
from lbry.conf import Config
|
||||||
|
|
|
@ -10,7 +10,11 @@ from lbry.connection_manager import ConnectionManager
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.conf import Config
|
from lbry.conf import Config
|
||||||
from lbry.dht.protocol.data_store import DictDataStore
|
from lbry.dht.protocol.data_store import DictDataStore
|
||||||
from lbry.extras.daemon.storage import SQLiteStorage
|
|
||||||
|
|
||||||
|
class SQLiteStorage:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
4
lbry/blockchain/__init__.py
Normal file
4
lbry/blockchain/__init__.py
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
from .ledger import Ledger, RegTestLedger, TestNetLedger, ledger_class_from_name
|
||||||
|
from .transaction import Transaction, Output, Input
|
||||||
|
from .bcd_data_stream import BCDataStream
|
||||||
|
from .dewies import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc
|
|
@ -4,8 +4,11 @@ from io import BytesIO
|
||||||
|
|
||||||
class BCDataStream:
|
class BCDataStream:
|
||||||
|
|
||||||
def __init__(self, data=None):
|
def __init__(self, data=None, fp=None):
|
||||||
self.data = BytesIO(data)
|
self.data = fp or BytesIO(data)
|
||||||
|
|
||||||
|
def tell(self):
|
||||||
|
return self.data.tell()
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
self.data.seek(0)
|
self.data.seek(0)
|
60
lbry/blockchain/block.py
Normal file
60
lbry/blockchain/block.py
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
import struct
|
||||||
|
from typing import NamedTuple, List
|
||||||
|
|
||||||
|
from chiabip158 import PyBIP158 # pylint: disable=no-name-in-module
|
||||||
|
|
||||||
|
from lbry.crypto.hash import double_sha256
|
||||||
|
from lbry.blockchain.transaction import Transaction
|
||||||
|
from lbry.blockchain.bcd_data_stream import BCDataStream
|
||||||
|
|
||||||
|
|
||||||
|
ZERO_BLOCK = bytes((0,)*32)
|
||||||
|
|
||||||
|
|
||||||
|
def create_address_filter(address_hashes: List[bytes]) -> bytes:
|
||||||
|
return bytes(PyBIP158([bytearray(a) for a in address_hashes]).GetEncoded())
|
||||||
|
|
||||||
|
|
||||||
|
def get_address_filter(address_filter: bytes) -> PyBIP158:
|
||||||
|
return PyBIP158(bytearray(address_filter))
|
||||||
|
|
||||||
|
|
||||||
|
class Block(NamedTuple):
|
||||||
|
height: int
|
||||||
|
version: int
|
||||||
|
file_number: int
|
||||||
|
block_hash: bytes
|
||||||
|
prev_block_hash: bytes
|
||||||
|
merkle_root: bytes
|
||||||
|
claim_trie_root: bytes
|
||||||
|
timestamp: int
|
||||||
|
bits: int
|
||||||
|
nonce: int
|
||||||
|
txs: List[Transaction]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_data_stream(stream: BCDataStream, height: int, file_number: int):
|
||||||
|
header = stream.data.read(112)
|
||||||
|
version, = struct.unpack('<I', header[:4])
|
||||||
|
timestamp, bits, nonce = struct.unpack('<III', header[100:112])
|
||||||
|
tx_count = stream.read_compact_size()
|
||||||
|
return Block(
|
||||||
|
height=height,
|
||||||
|
version=version,
|
||||||
|
file_number=file_number,
|
||||||
|
block_hash=double_sha256(header),
|
||||||
|
prev_block_hash=header[4:36],
|
||||||
|
merkle_root=header[36:68],
|
||||||
|
claim_trie_root=header[68:100][::-1],
|
||||||
|
timestamp=timestamp,
|
||||||
|
bits=bits,
|
||||||
|
nonce=nonce,
|
||||||
|
txs=[
|
||||||
|
Transaction(height=height, position=i, timestamp=timestamp).deserialize(stream)
|
||||||
|
for i in range(tx_count)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_first_block(self):
|
||||||
|
return self.prev_block_hash == ZERO_BLOCK
|
245
lbry/blockchain/database.py
Normal file
245
lbry/blockchain/database.py
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
import os.path
|
||||||
|
import asyncio
|
||||||
|
import sqlite3
|
||||||
|
from typing import List, Optional
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
from lbry.schema.url import normalize_name
|
||||||
|
|
||||||
|
from .bcd_data_stream import BCDataStream
|
||||||
|
|
||||||
|
|
||||||
|
FILES = [
|
||||||
|
'claims',
|
||||||
|
'block_index',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def make_short_url(r):
|
||||||
|
try:
|
||||||
|
# TODO: we describe it as normalized but the old SDK didnt do that
|
||||||
|
name = r["name"].decode().replace("\x00", "")
|
||||||
|
return f'{name}#{r["shortestID"] or r["claimID"][::-1].hex()[0]}'
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
# print(f'failed making short url due to name parse error for claim_id: {r["claimID"][::-1].hex()}')
|
||||||
|
return "INVALID NAME"
|
||||||
|
|
||||||
|
|
||||||
|
class FindShortestID:
|
||||||
|
__slots__ = 'short_id', 'new_id'
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.short_id = ''
|
||||||
|
self.new_id = None
|
||||||
|
|
||||||
|
def step(self, other_id, new_id):
|
||||||
|
other_id = other_id[::-1].hex()
|
||||||
|
if self.new_id is None:
|
||||||
|
self.new_id = new_id[::-1].hex()
|
||||||
|
for i in range(len(self.new_id)):
|
||||||
|
if other_id[i] != self.new_id[i]:
|
||||||
|
if i > len(self.short_id)-1:
|
||||||
|
self.short_id = self.new_id[:i+1]
|
||||||
|
break
|
||||||
|
|
||||||
|
def finalize(self):
|
||||||
|
return self.short_id
|
||||||
|
|
||||||
|
|
||||||
|
class BlockchainDB:
|
||||||
|
|
||||||
|
def __init__(self, directory: str):
|
||||||
|
self.directory = directory
|
||||||
|
self.connection: Optional[sqlite3.Connection] = None
|
||||||
|
self.executor: Optional[ThreadPoolExecutor] = None
|
||||||
|
|
||||||
|
async def run_in_executor(self, *args):
|
||||||
|
return await asyncio.get_running_loop().run_in_executor(self.executor, *args)
|
||||||
|
|
||||||
|
def sync_open(self):
|
||||||
|
self.connection = sqlite3.connect(
|
||||||
|
os.path.join(self.directory, FILES[0]+'.sqlite'),
|
||||||
|
timeout=60.0 * 5
|
||||||
|
)
|
||||||
|
for file in FILES[1:]:
|
||||||
|
self.connection.execute(
|
||||||
|
f"ATTACH DATABASE '{os.path.join(self.directory, file+'.sqlite')}' AS {file}"
|
||||||
|
)
|
||||||
|
self.connection.create_aggregate("find_shortest_id", 2, FindShortestID)
|
||||||
|
self.connection.execute("CREATE INDEX IF NOT EXISTS claim_originalheight ON claim (originalheight);")
|
||||||
|
self.connection.execute("CREATE INDEX IF NOT EXISTS claim_updateheight ON claim (updateheight);")
|
||||||
|
self.connection.execute("create index IF NOT EXISTS support_blockheight on support (blockheight);")
|
||||||
|
self.connection.row_factory = sqlite3.Row
|
||||||
|
|
||||||
|
async def open(self):
|
||||||
|
assert self.executor is None, "Database is already open."
|
||||||
|
self.executor = ThreadPoolExecutor(max_workers=1)
|
||||||
|
return await self.run_in_executor(self.sync_open)
|
||||||
|
|
||||||
|
def sync_close(self):
|
||||||
|
self.connection.close()
|
||||||
|
self.connection = None
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
if self.executor is not None:
|
||||||
|
if self.connection is not None:
|
||||||
|
await self.run_in_executor(self.sync_close)
|
||||||
|
self.executor.shutdown()
|
||||||
|
self.executor = None
|
||||||
|
|
||||||
|
async def commit(self):
|
||||||
|
await self.run_in_executor(self.connection.commit)
|
||||||
|
|
||||||
|
def sync_execute(self, sql: str, *args):
|
||||||
|
return self.connection.execute(sql, *args)
|
||||||
|
|
||||||
|
async def execute(self, sql: str, *args):
|
||||||
|
return await self.run_in_executor(self.sync_execute, sql, *args)
|
||||||
|
|
||||||
|
def sync_execute_fetchall(self, sql: str, *args) -> List[dict]:
|
||||||
|
return self.connection.execute(sql, *args).fetchall()
|
||||||
|
|
||||||
|
async def execute_fetchall(self, sql: str, *args) -> List[dict]:
|
||||||
|
return await self.run_in_executor(self.sync_execute_fetchall, sql, *args)
|
||||||
|
|
||||||
|
def sync_get_best_height(self) -> int:
|
||||||
|
sql = "SELECT MAX(height) FROM block_info"
|
||||||
|
return self.connection.execute(sql).fetchone()[0]
|
||||||
|
|
||||||
|
async def get_best_height(self) -> int:
|
||||||
|
return await self.run_in_executor(self.sync_get_best_height)
|
||||||
|
|
||||||
|
def sync_get_block_files(self, file_number: int = None, start_height: int = None) -> List[dict]:
|
||||||
|
sql = """
|
||||||
|
SELECT
|
||||||
|
file as file_number,
|
||||||
|
COUNT(hash) as blocks,
|
||||||
|
SUM(txcount) as txs,
|
||||||
|
MAX(height) as best_height,
|
||||||
|
MIN(height) as start_height
|
||||||
|
FROM block_info
|
||||||
|
WHERE status&1 AND status&4 AND NOT status&32 AND NOT status&64
|
||||||
|
"""
|
||||||
|
args = ()
|
||||||
|
if file_number is not None and start_height is not None:
|
||||||
|
sql += "AND file = ? AND height >= ?"
|
||||||
|
args = (file_number, start_height)
|
||||||
|
return [dict(r) for r in self.sync_execute_fetchall(sql + " GROUP BY file ORDER BY file ASC;", args)]
|
||||||
|
|
||||||
|
async def get_block_files(self, file_number: int = None, start_height: int = None) -> List[dict]:
|
||||||
|
return await self.run_in_executor(
|
||||||
|
self.sync_get_block_files, file_number, start_height
|
||||||
|
)
|
||||||
|
|
||||||
|
def sync_get_blocks_in_file(self, block_file: int, start_height=0) -> List[dict]:
|
||||||
|
return [dict(r) for r in self.sync_execute_fetchall(
|
||||||
|
"""
|
||||||
|
SELECT datapos as data_offset, height, hash as block_hash, txCount as txs
|
||||||
|
FROM block_info
|
||||||
|
WHERE file = ? AND height >= ?
|
||||||
|
AND status&1 AND status&4 AND NOT status&32 AND NOT status&64
|
||||||
|
ORDER BY datapos ASC;
|
||||||
|
""", (block_file, start_height)
|
||||||
|
)]
|
||||||
|
|
||||||
|
async def get_blocks_in_file(self, block_file: int, start_height=0) -> List[dict]:
|
||||||
|
return await self.run_in_executor(self.sync_get_blocks_in_file, block_file, start_height)
|
||||||
|
|
||||||
|
def sync_get_claim_support_txo_hashes(self, at_height: int) -> set:
|
||||||
|
return {
|
||||||
|
r['txID'] + BCDataStream.uint32.pack(r['txN'])
|
||||||
|
for r in self.connection.execute(
|
||||||
|
"""
|
||||||
|
SELECT txID, txN FROM claim WHERE updateHeight = ?
|
||||||
|
UNION
|
||||||
|
SELECT txID, txN FROM support WHERE blockHeight = ?
|
||||||
|
""", (at_height, at_height)
|
||||||
|
).fetchall()
|
||||||
|
}
|
||||||
|
|
||||||
|
def sync_get_takeover_count(self, start_height: int, end_height: int) -> int:
|
||||||
|
sql = """
|
||||||
|
SELECT COUNT(*) FROM claim WHERE name IN (
|
||||||
|
SELECT name FROM takeover
|
||||||
|
WHERE name IS NOT NULL AND height BETWEEN ? AND ?
|
||||||
|
)
|
||||||
|
""", (start_height, end_height)
|
||||||
|
return self.connection.execute(*sql).fetchone()[0]
|
||||||
|
|
||||||
|
async def get_takeover_count(self, start_height: int, end_height: int) -> int:
|
||||||
|
return await self.run_in_executor(self.sync_get_takeover_count, start_height, end_height)
|
||||||
|
|
||||||
|
def sync_get_takeovers(self, start_height: int, end_height: int) -> List[dict]:
|
||||||
|
sql = """
|
||||||
|
SELECT name, claimID, MAX(height) AS height FROM takeover
|
||||||
|
WHERE name IS NOT NULL AND height BETWEEN ? AND ?
|
||||||
|
GROUP BY name
|
||||||
|
""", (start_height, end_height)
|
||||||
|
return [{
|
||||||
|
'normalized': normalize_name(r['name'].decode()),
|
||||||
|
'claim_hash': r['claimID'],
|
||||||
|
'height': r['height']
|
||||||
|
} for r in self.sync_execute_fetchall(*sql)]
|
||||||
|
|
||||||
|
async def get_takeovers(self, start_height: int, end_height: int) -> List[dict]:
|
||||||
|
return await self.run_in_executor(self.sync_get_takeovers, start_height, end_height)
|
||||||
|
|
||||||
|
def sync_get_claim_metadata_count(self, start_height: int, end_height: int) -> int:
|
||||||
|
sql = "SELECT COUNT(*) FROM claim WHERE originalHeight BETWEEN ? AND ?"
|
||||||
|
return self.connection.execute(sql, (start_height, end_height)).fetchone()[0]
|
||||||
|
|
||||||
|
async def get_claim_metadata_count(self, start_height: int, end_height: int) -> int:
|
||||||
|
return await self.run_in_executor(self.sync_get_claim_metadata_count, start_height, end_height)
|
||||||
|
|
||||||
|
def sync_get_claim_metadata(self, claim_hashes) -> List[dict]:
|
||||||
|
sql = f"""
|
||||||
|
SELECT
|
||||||
|
name, claimID, activationHeight, expirationHeight, originalHeight,
|
||||||
|
(SELECT
|
||||||
|
CASE WHEN takeover.claimID = claim.claimID THEN takeover.height END
|
||||||
|
FROM takeover WHERE takeover.name = claim.nodename
|
||||||
|
ORDER BY height DESC LIMIT 1
|
||||||
|
) AS takeoverHeight,
|
||||||
|
(SELECT find_shortest_id(c.claimid, claim.claimid) FROM claim AS c
|
||||||
|
WHERE
|
||||||
|
c.nodename = claim.nodename AND
|
||||||
|
c.originalheight <= claim.originalheight AND
|
||||||
|
c.claimid != claim.claimid
|
||||||
|
) AS shortestID
|
||||||
|
FROM claim
|
||||||
|
WHERE claimID IN ({','.join(['?' for _ in claim_hashes])})
|
||||||
|
ORDER BY claimID
|
||||||
|
""", claim_hashes
|
||||||
|
return [{
|
||||||
|
"name": r["name"],
|
||||||
|
"claim_hash": r["claimID"],
|
||||||
|
"activation_height": r["activationHeight"],
|
||||||
|
"expiration_height": r["expirationHeight"],
|
||||||
|
"takeover_height": r["takeoverHeight"],
|
||||||
|
"creation_height": r["originalHeight"],
|
||||||
|
"short_url": make_short_url(r),
|
||||||
|
} for r in self.sync_execute_fetchall(*sql)]
|
||||||
|
|
||||||
|
async def get_claim_metadata(self, start_height: int, end_height: int) -> List[dict]:
|
||||||
|
return await self.run_in_executor(self.sync_get_claim_metadata, start_height, end_height)
|
||||||
|
|
||||||
|
def sync_get_support_metadata_count(self, start_height: int, end_height: int) -> int:
|
||||||
|
sql = "SELECT COUNT(*) FROM support WHERE blockHeight BETWEEN ? AND ?"
|
||||||
|
return self.connection.execute(sql, (start_height, end_height)).fetchone()[0]
|
||||||
|
|
||||||
|
async def get_support_metadata_count(self, start_height: int, end_height: int) -> int:
|
||||||
|
return await self.run_in_executor(self.sync_get_support_metadata_count, start_height, end_height)
|
||||||
|
|
||||||
|
def sync_get_support_metadata(self, start_height: int, end_height: int) -> List[dict]:
|
||||||
|
sql = """
|
||||||
|
SELECT name, txid, txn, activationHeight, expirationHeight
|
||||||
|
FROM support WHERE blockHeight BETWEEN ? AND ?
|
||||||
|
""", (start_height, end_height)
|
||||||
|
return [{
|
||||||
|
"name": r['name'],
|
||||||
|
"txo_hash_pk": r['txID'] + BCDataStream.uint32.pack(r['txN']),
|
||||||
|
"activation_height": r['activationHeight'],
|
||||||
|
"expiration_height": r['expirationHeight'],
|
||||||
|
} for r in self.sync_execute_fetchall(*sql)]
|
||||||
|
|
||||||
|
async def get_support_metadata(self, start_height: int, end_height: int) -> List[dict]:
|
||||||
|
return await self.run_in_executor(self.sync_get_support_metadata, start_height, end_height)
|
|
@ -1,10 +1,19 @@
|
||||||
|
import re
|
||||||
import textwrap
|
import textwrap
|
||||||
from .util import coins_to_satoshis, satoshis_to_coins
|
from decimal import Decimal
|
||||||
|
|
||||||
|
from lbry.constants import COIN
|
||||||
|
|
||||||
|
|
||||||
def lbc_to_dewies(lbc: str) -> int:
|
def lbc_to_dewies(lbc: str) -> int:
|
||||||
try:
|
try:
|
||||||
return coins_to_satoshis(lbc)
|
if not isinstance(lbc, str):
|
||||||
|
raise ValueError("{coins} must be a string")
|
||||||
|
result = re.search(r'^(\d{1,10})\.(\d{1,8})$', lbc)
|
||||||
|
if result is not None:
|
||||||
|
whole, fractional = result.groups()
|
||||||
|
return int(whole + fractional.ljust(8, "0"))
|
||||||
|
raise ValueError(f"'{lbc}' is not a valid coin decimal")
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise ValueError(textwrap.dedent(
|
raise ValueError(textwrap.dedent(
|
||||||
f"""
|
f"""
|
||||||
|
@ -30,13 +39,17 @@ def lbc_to_dewies(lbc: str) -> int:
|
||||||
|
|
||||||
|
|
||||||
def dewies_to_lbc(dewies) -> str:
|
def dewies_to_lbc(dewies) -> str:
|
||||||
return satoshis_to_coins(dewies)
|
coins = '{:.8f}'.format(dewies / COIN).rstrip('0')
|
||||||
|
if coins.endswith('.'):
|
||||||
|
return coins+'0'
|
||||||
|
else:
|
||||||
|
return coins
|
||||||
|
|
||||||
|
|
||||||
def dict_values_to_lbc(d):
|
def dict_values_to_lbc(d):
|
||||||
lbc_dict = {}
|
lbc_dict = {}
|
||||||
for key, value in d.items():
|
for key, value in d.items():
|
||||||
if isinstance(value, int):
|
if isinstance(value, (int, Decimal)):
|
||||||
lbc_dict[key] = dewies_to_lbc(value)
|
lbc_dict[key] = dewies_to_lbc(value)
|
||||||
elif isinstance(value, dict):
|
elif isinstance(value, dict):
|
||||||
lbc_dict[key] = dict_values_to_lbc(value)
|
lbc_dict[key] = dict_values_to_lbc(value)
|
|
@ -1,5 +1,5 @@
|
||||||
from binascii import hexlify, unhexlify
|
from binascii import hexlify, unhexlify
|
||||||
from .constants import NULL_HASH32
|
from lbry.constants import NULL_HASH32
|
||||||
|
|
||||||
|
|
||||||
class TXRef:
|
class TXRef:
|
||||||
|
@ -29,28 +29,35 @@ class TXRef:
|
||||||
|
|
||||||
class TXRefImmutable(TXRef):
|
class TXRefImmutable(TXRef):
|
||||||
|
|
||||||
__slots__ = ('_height',)
|
__slots__ = ('_height', '_timestamp')
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._height = -1
|
self._height = -1
|
||||||
|
self._timestamp = -1
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_hash(cls, tx_hash: bytes, height: int) -> 'TXRefImmutable':
|
def from_hash(cls, tx_hash: bytes, height: int, timestamp: int) -> 'TXRefImmutable':
|
||||||
ref = cls()
|
ref = cls()
|
||||||
ref._hash = tx_hash
|
ref._hash = tx_hash
|
||||||
ref._id = hexlify(tx_hash[::-1]).decode()
|
ref._id = hexlify(tx_hash[::-1]).decode()
|
||||||
ref._height = height
|
ref._height = height
|
||||||
|
ref._timestamp = timestamp
|
||||||
return ref
|
return ref
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_id(cls, tx_id: str, height: int) -> 'TXRefImmutable':
|
def from_id(cls, tx_id: str, height: int, timestamp: int) -> 'TXRefImmutable':
|
||||||
ref = cls()
|
ref = cls()
|
||||||
ref._id = tx_id
|
ref._id = tx_id
|
||||||
ref._hash = unhexlify(tx_id)[::-1]
|
ref._hash = unhexlify(tx_id)[::-1]
|
||||||
ref._height = height
|
ref._height = height
|
||||||
|
ref._timestamp = timestamp
|
||||||
return ref
|
return ref
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def height(self):
|
def height(self):
|
||||||
return self._height
|
return self._height
|
||||||
|
|
||||||
|
@property
|
||||||
|
def timestamp(self):
|
||||||
|
return self._timestamp
|
|
@ -12,7 +12,7 @@ from typing import Optional, Iterator, Tuple, Callable
|
||||||
from binascii import hexlify, unhexlify
|
from binascii import hexlify, unhexlify
|
||||||
|
|
||||||
from lbry.crypto.hash import sha512, double_sha256, ripemd160
|
from lbry.crypto.hash import sha512, double_sha256, ripemd160
|
||||||
from lbry.wallet.util import ArithUint256, date_to_julian_day
|
from lbry.blockchain.util import ArithUint256
|
||||||
from .checkpoints import HASHES
|
from .checkpoints import HASHES
|
||||||
|
|
||||||
|
|
||||||
|
@ -140,8 +140,8 @@ class Headers:
|
||||||
return
|
return
|
||||||
return int(self.first_block_timestamp + (height * self.timestamp_average_offset))
|
return int(self.first_block_timestamp + (height * self.timestamp_average_offset))
|
||||||
|
|
||||||
def estimated_julian_day(self, height):
|
def estimated_date(self, height):
|
||||||
return date_to_julian_day(date.fromtimestamp(self.estimated_timestamp(height)))
|
return date.fromtimestamp(self.estimated_timestamp(height))
|
||||||
|
|
||||||
async def get_raw_header(self, height) -> bytes:
|
async def get_raw_header(self, height) -> bytes:
|
||||||
if self.chunk_getter:
|
if self.chunk_getter:
|
346
lbry/blockchain/lbrycrd.py
Normal file
346
lbry/blockchain/lbrycrd.py
Normal file
|
@ -0,0 +1,346 @@
|
||||||
|
import os
|
||||||
|
import struct
|
||||||
|
import shutil
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import zipfile
|
||||||
|
import tempfile
|
||||||
|
import urllib.request
|
||||||
|
from typing import Optional
|
||||||
|
from binascii import hexlify
|
||||||
|
|
||||||
|
import aiohttp
|
||||||
|
import zmq
|
||||||
|
import zmq.asyncio
|
||||||
|
|
||||||
|
from lbry.conf import Config
|
||||||
|
from lbry.event import EventController
|
||||||
|
from lbry.error import LbrycrdEventSubscriptionError, LbrycrdUnauthorizedError
|
||||||
|
|
||||||
|
from .database import BlockchainDB
|
||||||
|
from .ledger import Ledger, RegTestLedger
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
DOWNLOAD_URL = (
|
||||||
|
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.4.6/lbrycrd-linux-1746.zip'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Process(asyncio.SubprocessProtocol):
|
||||||
|
|
||||||
|
IGNORE_OUTPUT = [
|
||||||
|
b'keypool keep',
|
||||||
|
b'keypool reserve',
|
||||||
|
b'keypool return',
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.ready = asyncio.Event()
|
||||||
|
self.stopped = asyncio.Event()
|
||||||
|
|
||||||
|
def pipe_data_received(self, fd, data):
|
||||||
|
if not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||||
|
if b'Error:' in data:
|
||||||
|
log.error(data.decode())
|
||||||
|
else:
|
||||||
|
for line in data.decode().splitlines():
|
||||||
|
log.debug(line.rstrip())
|
||||||
|
if b'Error:' in data:
|
||||||
|
self.ready.set()
|
||||||
|
raise SystemError(data.decode())
|
||||||
|
if b'Done loading' in data:
|
||||||
|
self.ready.set()
|
||||||
|
|
||||||
|
def process_exited(self):
|
||||||
|
self.stopped.set()
|
||||||
|
self.ready.set()
|
||||||
|
|
||||||
|
|
||||||
|
ZMQ_BLOCK_EVENT = 'pubhashblock'
|
||||||
|
|
||||||
|
|
||||||
|
class Lbrycrd:
|
||||||
|
|
||||||
|
def __init__(self, ledger: Ledger):
|
||||||
|
self.ledger, self.conf = ledger, ledger.conf
|
||||||
|
self.data_dir = self.actual_data_dir = ledger.conf.lbrycrd_dir
|
||||||
|
if self.is_regtest:
|
||||||
|
self.actual_data_dir = os.path.join(self.data_dir, 'regtest')
|
||||||
|
self.blocks_dir = os.path.join(self.actual_data_dir, 'blocks')
|
||||||
|
self.bin_dir = os.path.join(os.path.dirname(__file__), 'bin')
|
||||||
|
self.daemon_bin = os.path.join(self.bin_dir, 'lbrycrdd')
|
||||||
|
self.cli_bin = os.path.join(self.bin_dir, 'lbrycrd-cli')
|
||||||
|
self.protocol = None
|
||||||
|
self.transport = None
|
||||||
|
self.subscribed = False
|
||||||
|
self.subscription: Optional[asyncio.Task] = None
|
||||||
|
self.default_generate_address = None
|
||||||
|
self._on_block_hash_controller = EventController()
|
||||||
|
self.on_block_hash = self._on_block_hash_controller.stream
|
||||||
|
self.on_block_hash.listen(lambda e: log.info('%s %s', hexlify(e['hash']), e['msg']))
|
||||||
|
self._on_tx_hash_controller = EventController()
|
||||||
|
self.on_tx_hash = self._on_tx_hash_controller.stream
|
||||||
|
|
||||||
|
self.db = BlockchainDB(self.actual_data_dir)
|
||||||
|
self._session: Optional[aiohttp.ClientSession] = None
|
||||||
|
self._loop: Optional[asyncio.AbstractEventLoop] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def session(self) -> aiohttp.ClientSession:
|
||||||
|
if self._session is None:
|
||||||
|
self._session = aiohttp.ClientSession()
|
||||||
|
return self._session
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def temp_regtest(cls):
|
||||||
|
return cls(RegTestLedger(
|
||||||
|
Config.with_same_dir(tempfile.mkdtemp()).set(
|
||||||
|
blockchain="regtest",
|
||||||
|
lbrycrd_rpc_port=9245 + 2, # avoid conflict with default rpc port
|
||||||
|
lbrycrd_peer_port=9246 + 2, # avoid conflict with default peer port
|
||||||
|
lbrycrd_zmq="tcp://127.0.0.1:29002"
|
||||||
|
)
|
||||||
|
))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_block_file_name(block_file_number):
|
||||||
|
return f'blk{block_file_number:05}.dat'
|
||||||
|
|
||||||
|
def get_block_file_path(self, block_file_number):
|
||||||
|
return os.path.join(
|
||||||
|
self.actual_data_dir, 'blocks',
|
||||||
|
self.get_block_file_name(block_file_number)
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_regtest(self):
|
||||||
|
return isinstance(self.ledger, RegTestLedger)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def rpc_url(self):
|
||||||
|
return (
|
||||||
|
f'http://{self.conf.lbrycrd_rpc_user}:{self.conf.lbrycrd_rpc_pass}'
|
||||||
|
f'@{self.conf.lbrycrd_rpc_host}:{self.conf.lbrycrd_rpc_port}/'
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exists(self):
|
||||||
|
return (
|
||||||
|
os.path.exists(self.cli_bin) and
|
||||||
|
os.path.exists(self.daemon_bin)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def download(self):
|
||||||
|
downloaded_file = os.path.join(
|
||||||
|
self.bin_dir, DOWNLOAD_URL[DOWNLOAD_URL.rfind('/')+1:]
|
||||||
|
)
|
||||||
|
|
||||||
|
if not os.path.exists(self.bin_dir):
|
||||||
|
os.mkdir(self.bin_dir)
|
||||||
|
|
||||||
|
if not os.path.exists(downloaded_file):
|
||||||
|
log.info('Downloading: %s', DOWNLOAD_URL)
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(DOWNLOAD_URL) as response:
|
||||||
|
with open(downloaded_file, 'wb') as out_file:
|
||||||
|
while True:
|
||||||
|
chunk = await response.content.read(4096)
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
out_file.write(chunk)
|
||||||
|
with urllib.request.urlopen(DOWNLOAD_URL) as response:
|
||||||
|
with open(downloaded_file, 'wb') as out_file:
|
||||||
|
shutil.copyfileobj(response, out_file)
|
||||||
|
|
||||||
|
log.info('Extracting: %s', downloaded_file)
|
||||||
|
|
||||||
|
with zipfile.ZipFile(downloaded_file) as dotzip:
|
||||||
|
dotzip.extractall(self.bin_dir)
|
||||||
|
# zipfile bug https://bugs.python.org/issue15795
|
||||||
|
os.chmod(self.cli_bin, 0o755)
|
||||||
|
os.chmod(self.daemon_bin, 0o755)
|
||||||
|
|
||||||
|
return self.exists
|
||||||
|
|
||||||
|
async def ensure(self):
|
||||||
|
return self.exists or await self.download()
|
||||||
|
|
||||||
|
def get_start_command(self, *args):
|
||||||
|
if self.is_regtest:
|
||||||
|
args += ('-regtest',)
|
||||||
|
if self.conf.lbrycrd_zmq:
|
||||||
|
args += (
|
||||||
|
f'-zmqpubhashblock={self.conf.lbrycrd_zmq}',
|
||||||
|
f'-zmqpubhashtx={self.conf.lbrycrd_zmq}',
|
||||||
|
)
|
||||||
|
return (
|
||||||
|
self.daemon_bin,
|
||||||
|
f'-datadir={self.data_dir}',
|
||||||
|
f'-port={self.conf.lbrycrd_peer_port}',
|
||||||
|
f'-rpcport={self.conf.lbrycrd_rpc_port}',
|
||||||
|
f'-rpcuser={self.conf.lbrycrd_rpc_user}',
|
||||||
|
f'-rpcpassword={self.conf.lbrycrd_rpc_pass}',
|
||||||
|
'-server', '-printtoconsole',
|
||||||
|
*args
|
||||||
|
)
|
||||||
|
|
||||||
|
async def open(self):
|
||||||
|
await self.db.open()
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
await self.db.close()
|
||||||
|
await self.close_session()
|
||||||
|
|
||||||
|
async def close_session(self):
|
||||||
|
if self._session is not None:
|
||||||
|
await self._session.close()
|
||||||
|
self._session = None
|
||||||
|
|
||||||
|
async def start(self, *args):
|
||||||
|
loop = asyncio.get_running_loop()
|
||||||
|
command = self.get_start_command(*args)
|
||||||
|
log.info(' '.join(command))
|
||||||
|
self.transport, self.protocol = await loop.subprocess_exec(Process, *command)
|
||||||
|
await self.protocol.ready.wait()
|
||||||
|
assert not self.protocol.stopped.is_set()
|
||||||
|
await self.open()
|
||||||
|
|
||||||
|
async def stop(self, cleanup=True):
|
||||||
|
try:
|
||||||
|
await self.close()
|
||||||
|
self.transport.terminate()
|
||||||
|
await self.protocol.stopped.wait()
|
||||||
|
assert self.transport.get_returncode() == 0, "lbrycrd daemon exit with error"
|
||||||
|
self.transport.close()
|
||||||
|
finally:
|
||||||
|
if cleanup:
|
||||||
|
await self.cleanup()
|
||||||
|
|
||||||
|
async def cleanup(self):
|
||||||
|
await asyncio.get_running_loop().run_in_executor(
|
||||||
|
None, shutil.rmtree, self.data_dir, True
|
||||||
|
)
|
||||||
|
|
||||||
|
async def ensure_subscribable(self):
|
||||||
|
zmq_notifications = await self.get_zmq_notifications()
|
||||||
|
subs = {e['type']: e['address'] for e in zmq_notifications}
|
||||||
|
if ZMQ_BLOCK_EVENT not in subs:
|
||||||
|
raise LbrycrdEventSubscriptionError(ZMQ_BLOCK_EVENT)
|
||||||
|
if not self.conf.lbrycrd_zmq:
|
||||||
|
self.conf.lbrycrd_zmq = subs[ZMQ_BLOCK_EVENT]
|
||||||
|
|
||||||
|
async def subscribe(self):
|
||||||
|
if not self.subscribed:
|
||||||
|
self.subscribed = True
|
||||||
|
ctx = zmq.asyncio.Context.instance()
|
||||||
|
sock = ctx.socket(zmq.SUB) # pylint: disable=no-member
|
||||||
|
sock.connect(self.conf.lbrycrd_zmq)
|
||||||
|
sock.subscribe("hashblock")
|
||||||
|
sock.subscribe("hashtx")
|
||||||
|
self.subscription = asyncio.create_task(self.subscription_handler(sock))
|
||||||
|
|
||||||
|
async def subscription_handler(self, sock):
|
||||||
|
try:
|
||||||
|
while self.subscribed:
|
||||||
|
msg = await sock.recv_multipart()
|
||||||
|
if msg[0] == b'hashtx':
|
||||||
|
await self._on_tx_hash_controller.add({
|
||||||
|
'hash': msg[1],
|
||||||
|
'msg': struct.unpack('<I', msg[2])[0]
|
||||||
|
})
|
||||||
|
elif msg[0] == b'hashblock':
|
||||||
|
await self._on_block_hash_controller.add({
|
||||||
|
'hash': msg[1],
|
||||||
|
'msg': struct.unpack('<I', msg[2])[0]
|
||||||
|
})
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
sock.close()
|
||||||
|
raise
|
||||||
|
|
||||||
|
def unsubscribe(self):
|
||||||
|
if self.subscribed:
|
||||||
|
self.subscribed = False
|
||||||
|
self.subscription.cancel()
|
||||||
|
self.subscription = None
|
||||||
|
|
||||||
|
def sync_run(self, coro):
|
||||||
|
if self._loop is None:
|
||||||
|
try:
|
||||||
|
self._loop = asyncio.get_event_loop()
|
||||||
|
except RuntimeError:
|
||||||
|
self._loop = asyncio.new_event_loop()
|
||||||
|
return self._loop.run_until_complete(coro)
|
||||||
|
|
||||||
|
async def rpc(self, method, params=None):
|
||||||
|
if self._session is not None and self._session.closed:
|
||||||
|
raise Exception("session is closed! RPC attempted during shutting down.")
|
||||||
|
message = {
|
||||||
|
"jsonrpc": "1.0",
|
||||||
|
"id": "1",
|
||||||
|
"method": method,
|
||||||
|
"params": params or []
|
||||||
|
}
|
||||||
|
async with self.session.post(self.rpc_url, json=message) as resp:
|
||||||
|
if resp.status == 401:
|
||||||
|
raise LbrycrdUnauthorizedError()
|
||||||
|
try:
|
||||||
|
result = await resp.json()
|
||||||
|
except aiohttp.ContentTypeError as e:
|
||||||
|
raise Exception(await resp.text()) from e
|
||||||
|
if not result['error']:
|
||||||
|
return result['result']
|
||||||
|
else:
|
||||||
|
result['error'].update(method=method, params=params)
|
||||||
|
raise Exception(result['error'])
|
||||||
|
|
||||||
|
async def get_zmq_notifications(self):
|
||||||
|
return await self.rpc("getzmqnotifications")
|
||||||
|
|
||||||
|
async def generate(self, blocks):
|
||||||
|
if self.default_generate_address is None:
|
||||||
|
self.default_generate_address = await self.get_new_address()
|
||||||
|
return await self.generate_to_address(blocks, self.default_generate_address)
|
||||||
|
|
||||||
|
async def get_new_address(self):
|
||||||
|
return await self.rpc("getnewaddress")
|
||||||
|
|
||||||
|
async def generate_to_address(self, blocks, address):
|
||||||
|
return await self.rpc("generatetoaddress", [blocks, address])
|
||||||
|
|
||||||
|
async def send_to_address(self, address, amount):
|
||||||
|
return await self.rpc("sendtoaddress", [address, amount])
|
||||||
|
|
||||||
|
async def get_block(self, block_hash):
|
||||||
|
return await self.rpc("getblock", [block_hash])
|
||||||
|
|
||||||
|
async def get_raw_mempool(self):
|
||||||
|
return await self.rpc("getrawmempool")
|
||||||
|
|
||||||
|
async def get_raw_transaction(self, txid):
|
||||||
|
return await self.rpc("getrawtransaction", [txid])
|
||||||
|
|
||||||
|
async def fund_raw_transaction(self, tx):
|
||||||
|
return await self.rpc("fundrawtransaction", [tx])
|
||||||
|
|
||||||
|
async def sign_raw_transaction_with_wallet(self, tx):
|
||||||
|
return await self.rpc("signrawtransactionwithwallet", [tx])
|
||||||
|
|
||||||
|
async def send_raw_transaction(self, tx):
|
||||||
|
return await self.rpc("sendrawtransaction", [tx])
|
||||||
|
|
||||||
|
async def claim_name(self, name, data, amount):
|
||||||
|
return await self.rpc("claimname", [name, data, amount])
|
||||||
|
|
||||||
|
async def update_claim(self, txid, data, amount):
|
||||||
|
return await self.rpc("updateclaim", [txid, data, amount])
|
||||||
|
|
||||||
|
async def abandon_claim(self, txid, address):
|
||||||
|
return await self.rpc("abandonclaim", [txid, address])
|
||||||
|
|
||||||
|
async def support_claim(self, name, claim_id, amount, value="", istip=False):
|
||||||
|
return await self.rpc("supportclaim", [name, claim_id, amount, value, istip])
|
||||||
|
|
||||||
|
async def abandon_support(self, txid, address):
|
||||||
|
return await self.rpc("abandonsupport", [txid, address])
|
179
lbry/blockchain/ledger.py
Normal file
179
lbry/blockchain/ledger.py
Normal file
|
@ -0,0 +1,179 @@
|
||||||
|
from binascii import unhexlify
|
||||||
|
from string import hexdigits
|
||||||
|
from typing import TYPE_CHECKING, Type
|
||||||
|
|
||||||
|
from lbry.crypto.hash import hash160, double_sha256
|
||||||
|
from lbry.crypto.base58 import Base58
|
||||||
|
from lbry.schema.url import URL
|
||||||
|
from .header import Headers, UnvalidatedHeaders
|
||||||
|
from .checkpoints import HASHES
|
||||||
|
from .dewies import lbc_to_dewies
|
||||||
|
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from lbry.conf import Config
|
||||||
|
|
||||||
|
|
||||||
|
class Ledger:
|
||||||
|
name = 'LBRY Credits'
|
||||||
|
symbol = 'LBC'
|
||||||
|
network_name = 'mainnet'
|
||||||
|
|
||||||
|
headers_class = Headers
|
||||||
|
|
||||||
|
secret_prefix = bytes((0x1c,))
|
||||||
|
pubkey_address_prefix = bytes((0x55,))
|
||||||
|
script_address_prefix = bytes((0x7a,))
|
||||||
|
extended_public_key_prefix = unhexlify('0488b21e')
|
||||||
|
extended_private_key_prefix = unhexlify('0488ade4')
|
||||||
|
|
||||||
|
max_target = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
|
||||||
|
genesis_hash = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
|
||||||
|
genesis_bits = 0x1f00ffff
|
||||||
|
target_timespan = 150
|
||||||
|
|
||||||
|
fee_per_byte = 50
|
||||||
|
fee_per_name_char = 200000
|
||||||
|
|
||||||
|
checkpoints = HASHES
|
||||||
|
|
||||||
|
def __init__(self, conf: 'Config'):
|
||||||
|
self.conf = conf
|
||||||
|
self.coin_selection_strategy = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_id(cls):
|
||||||
|
return '{}_{}'.format(cls.symbol.lower(), cls.network_name.lower())
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def address_to_hash160(address) -> bytes:
|
||||||
|
return Base58.decode(address)[1:21]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pubkey_hash_to_address(cls, h160):
|
||||||
|
raw_address = cls.pubkey_address_prefix + h160
|
||||||
|
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def public_key_to_address(cls, public_key):
|
||||||
|
return cls.pubkey_hash_to_address(hash160(public_key))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def script_hash_to_address(cls, h160):
|
||||||
|
raw_address = cls.script_address_prefix + h160
|
||||||
|
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def private_key_to_wif(private_key):
|
||||||
|
return b'\x1c' + private_key + b'\x01'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def is_valid_address(cls, address):
|
||||||
|
decoded = Base58.decode_check(address)
|
||||||
|
return decoded[0] == cls.pubkey_address_prefix[0]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def valid_address_or_error(cls, address):
|
||||||
|
try:
|
||||||
|
assert cls.is_valid_address(address)
|
||||||
|
except:
|
||||||
|
raise Exception(f"'{address}' is not a valid address")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def valid_claim_id(claim_id: str):
|
||||||
|
if not len(claim_id) == 40:
|
||||||
|
raise Exception(f"Incorrect claimid length: {len(claim_id)}")
|
||||||
|
if set(claim_id).difference(hexdigits):
|
||||||
|
raise Exception("Claim id is not hex encoded")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def valid_channel_name_or_error(name: str):
|
||||||
|
try:
|
||||||
|
if not name:
|
||||||
|
raise Exception("Channel name cannot be blank.")
|
||||||
|
parsed = URL.parse(name)
|
||||||
|
if not parsed.has_channel:
|
||||||
|
raise Exception("Channel names must start with '@' symbol.")
|
||||||
|
if parsed.channel.name != name:
|
||||||
|
raise Exception("Channel name has invalid character")
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
raise Exception("Invalid channel name.")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def valid_stream_name_or_error(name: str):
|
||||||
|
try:
|
||||||
|
if not name:
|
||||||
|
raise Exception('Stream name cannot be blank.')
|
||||||
|
parsed = URL.parse(name)
|
||||||
|
if parsed.has_channel:
|
||||||
|
raise Exception(
|
||||||
|
"Stream names cannot start with '@' symbol. This is reserved for channels claims."
|
||||||
|
)
|
||||||
|
if not parsed.has_stream or parsed.stream.name != name:
|
||||||
|
raise Exception('Stream name has invalid characters.')
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
raise Exception("Invalid stream name.")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def valid_collection_name_or_error(name: str):
|
||||||
|
try:
|
||||||
|
if not name:
|
||||||
|
raise Exception('Collection name cannot be blank.')
|
||||||
|
parsed = URL.parse(name)
|
||||||
|
if parsed.has_channel:
|
||||||
|
raise Exception(
|
||||||
|
"Collection names cannot start with '@' symbol. This is reserved for channels claims."
|
||||||
|
)
|
||||||
|
if not parsed.has_stream or parsed.stream.name != name:
|
||||||
|
raise Exception('Collection name has invalid characters.')
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
raise Exception("Invalid collection name.")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_dewies_or_error(argument: str, lbc: str, positive_value=False):
|
||||||
|
try:
|
||||||
|
dewies = lbc_to_dewies(lbc)
|
||||||
|
if positive_value and dewies <= 0:
|
||||||
|
raise ValueError(f"'{argument}' value must be greater than 0.0")
|
||||||
|
return dewies
|
||||||
|
except ValueError as e:
|
||||||
|
raise ValueError(f"Invalid value for '{argument}': {e.args[0]}")
|
||||||
|
|
||||||
|
def get_fee_address(self, kwargs: dict, claim_address: str) -> str:
|
||||||
|
if 'fee_address' in kwargs:
|
||||||
|
self.valid_address_or_error(kwargs['fee_address'])
|
||||||
|
return kwargs['fee_address']
|
||||||
|
if 'fee_currency' in kwargs or 'fee_amount' in kwargs:
|
||||||
|
return claim_address
|
||||||
|
|
||||||
|
|
||||||
|
class TestNetLedger(Ledger):
|
||||||
|
network_name = 'testnet'
|
||||||
|
pubkey_address_prefix = bytes((111,))
|
||||||
|
script_address_prefix = bytes((196,))
|
||||||
|
extended_public_key_prefix = unhexlify('043587cf')
|
||||||
|
extended_private_key_prefix = unhexlify('04358394')
|
||||||
|
checkpoints = {}
|
||||||
|
|
||||||
|
|
||||||
|
class RegTestLedger(Ledger):
|
||||||
|
network_name = 'regtest'
|
||||||
|
headers_class = UnvalidatedHeaders
|
||||||
|
pubkey_address_prefix = bytes((111,))
|
||||||
|
script_address_prefix = bytes((196,))
|
||||||
|
extended_public_key_prefix = unhexlify('043587cf')
|
||||||
|
extended_private_key_prefix = unhexlify('04358394')
|
||||||
|
|
||||||
|
max_target = 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
|
||||||
|
genesis_hash = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
|
||||||
|
genesis_bits = 0x207fffff
|
||||||
|
target_timespan = 1
|
||||||
|
checkpoints = {}
|
||||||
|
|
||||||
|
|
||||||
|
def ledger_class_from_name(name) -> Type[Ledger]:
|
||||||
|
return {
|
||||||
|
Ledger.network_name: Ledger,
|
||||||
|
TestNetLedger.network_name: TestNetLedger,
|
||||||
|
RegTestLedger.network_name: RegTestLedger
|
||||||
|
}[name]
|
|
@ -4,7 +4,10 @@ from binascii import hexlify
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
from .bcd_data_stream import BCDataStream
|
from .bcd_data_stream import BCDataStream
|
||||||
from .util import subclass_tuple
|
|
||||||
|
|
||||||
|
def subclass_tuple(name, base):
|
||||||
|
return type(name, (base,), {'__slots__': ()})
|
||||||
|
|
||||||
|
|
||||||
# bitcoin opcodes
|
# bitcoin opcodes
|
||||||
|
@ -294,20 +297,25 @@ class Template:
|
||||||
|
|
||||||
class Script:
|
class Script:
|
||||||
|
|
||||||
__slots__ = 'source', '_template', '_values', '_template_hint'
|
__slots__ = 'source', 'offset', '_template', '_values', '_template_hint'
|
||||||
|
|
||||||
templates: List[Template] = []
|
templates: List[Template] = []
|
||||||
|
|
||||||
NO_SCRIPT = Template('no_script', None) # special case
|
NO_SCRIPT = Template('no_script', None) # special case
|
||||||
|
|
||||||
def __init__(self, source=None, template=None, values=None, template_hint=None):
|
def __init__(self, source=None, template=None, values=None, template_hint=None, offset=None):
|
||||||
self.source = source
|
self.source = source
|
||||||
|
self.offset = offset
|
||||||
self._template = template
|
self._template = template
|
||||||
self._values = values
|
self._values = values
|
||||||
self._template_hint = template_hint
|
self._template_hint = template_hint
|
||||||
if source is None and template and values:
|
if source is None and template and values:
|
||||||
self.generate()
|
self.generate()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def length(self):
|
||||||
|
return len(self.source)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def template(self):
|
def template(self):
|
||||||
if self._template is None:
|
if self._template is None:
|
||||||
|
@ -438,6 +446,17 @@ class OutputScript(Script):
|
||||||
SUPPORT_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes
|
SUPPORT_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes
|
||||||
))
|
))
|
||||||
|
|
||||||
|
SUPPORT_CLAIM_DATA_OPCODES = (
|
||||||
|
OP_SUPPORT_CLAIM, PUSH_SINGLE('claim_name'), PUSH_SINGLE('claim_id'), PUSH_SINGLE('support'),
|
||||||
|
OP_2DROP, OP_2DROP
|
||||||
|
)
|
||||||
|
SUPPORT_CLAIM_DATA_PUBKEY = Template('support_claim+data+pay_pubkey_hash', (
|
||||||
|
SUPPORT_CLAIM_DATA_OPCODES + PAY_PUBKEY_HASH.opcodes
|
||||||
|
))
|
||||||
|
SUPPORT_CLAIM_DATA_SCRIPT = Template('support_claim+data+pay_script_hash', (
|
||||||
|
SUPPORT_CLAIM_DATA_OPCODES + PAY_SCRIPT_HASH.opcodes
|
||||||
|
))
|
||||||
|
|
||||||
UPDATE_CLAIM_OPCODES = (
|
UPDATE_CLAIM_OPCODES = (
|
||||||
OP_UPDATE_CLAIM, PUSH_SINGLE('claim_name'), PUSH_SINGLE('claim_id'), PUSH_SINGLE('claim'),
|
OP_UPDATE_CLAIM, PUSH_SINGLE('claim_name'), PUSH_SINGLE('claim_id'), PUSH_SINGLE('claim'),
|
||||||
OP_2DROP, OP_2DROP
|
OP_2DROP, OP_2DROP
|
||||||
|
@ -474,6 +493,8 @@ class OutputScript(Script):
|
||||||
CLAIM_NAME_SCRIPT,
|
CLAIM_NAME_SCRIPT,
|
||||||
SUPPORT_CLAIM_PUBKEY,
|
SUPPORT_CLAIM_PUBKEY,
|
||||||
SUPPORT_CLAIM_SCRIPT,
|
SUPPORT_CLAIM_SCRIPT,
|
||||||
|
SUPPORT_CLAIM_DATA_PUBKEY,
|
||||||
|
SUPPORT_CLAIM_DATA_SCRIPT,
|
||||||
UPDATE_CLAIM_PUBKEY,
|
UPDATE_CLAIM_PUBKEY,
|
||||||
UPDATE_CLAIM_SCRIPT,
|
UPDATE_CLAIM_SCRIPT,
|
||||||
SELL_CLAIM, SELL_SCRIPT,
|
SELL_CLAIM, SELL_SCRIPT,
|
||||||
|
@ -527,6 +548,16 @@ class OutputScript(Script):
|
||||||
'pubkey_hash': pubkey_hash
|
'pubkey_hash': pubkey_hash
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pay_support_data_pubkey_hash(
|
||||||
|
cls, claim_name: bytes, claim_id: bytes, support, pubkey_hash: bytes):
|
||||||
|
return cls(template=cls.SUPPORT_CLAIM_DATA_PUBKEY, values={
|
||||||
|
'claim_name': claim_name,
|
||||||
|
'claim_id': claim_id,
|
||||||
|
'support': support,
|
||||||
|
'pubkey_hash': pubkey_hash
|
||||||
|
})
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def sell_script(cls, price):
|
def sell_script(cls, price):
|
||||||
return cls(template=cls.SELL_SCRIPT, values={
|
return cls(template=cls.SELL_SCRIPT, values={
|
||||||
|
@ -575,6 +606,10 @@ class OutputScript(Script):
|
||||||
def is_support_claim(self):
|
def is_support_claim(self):
|
||||||
return self.template.name.startswith('support_claim+')
|
return self.template.name.startswith('support_claim+')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_support_claim_data(self):
|
||||||
|
return self.template.name.startswith('support_claim+data+')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def is_sell_claim(self):
|
def is_sell_claim(self):
|
||||||
return self.template.name.startswith('sell_claim+')
|
return self.template.name.startswith('sell_claim+')
|
1
lbry/blockchain/sync/__init__.py
Normal file
1
lbry/blockchain/sync/__init__.py
Normal file
|
@ -0,0 +1 @@
|
||||||
|
from .synchronizer import BlockchainSync
|
336
lbry/blockchain/sync/blocks.py
Normal file
336
lbry/blockchain/sync/blocks.py
Normal file
|
@ -0,0 +1,336 @@
|
||||||
|
import logging
|
||||||
|
from binascii import hexlify, unhexlify
|
||||||
|
from typing import Tuple, List
|
||||||
|
|
||||||
|
from sqlalchemy import table, text, func, union, between
|
||||||
|
from sqlalchemy.future import select
|
||||||
|
from sqlalchemy.schema import CreateTable
|
||||||
|
|
||||||
|
from lbry.db.tables import (
|
||||||
|
Block as BlockTable, BlockFilter, BlockGroupFilter,
|
||||||
|
TX, TXFilter, MempoolFilter, TXO, TXI, Claim, Tag, Support
|
||||||
|
)
|
||||||
|
from lbry.db.tables import (
|
||||||
|
pg_add_block_constraints_and_indexes,
|
||||||
|
pg_add_block_filter_constraints_and_indexes,
|
||||||
|
pg_add_tx_constraints_and_indexes,
|
||||||
|
pg_add_tx_filter_constraints_and_indexes,
|
||||||
|
pg_add_txo_constraints_and_indexes,
|
||||||
|
pg_add_txi_constraints_and_indexes,
|
||||||
|
)
|
||||||
|
from lbry.db.query_context import ProgressContext, event_emitter, context
|
||||||
|
from lbry.db.sync import set_input_addresses, update_spent_outputs
|
||||||
|
from lbry.blockchain.transaction import Transaction
|
||||||
|
from lbry.blockchain.block import Block, create_address_filter
|
||||||
|
from lbry.blockchain.bcd_data_stream import BCDataStream
|
||||||
|
|
||||||
|
from .context import get_or_initialize_lbrycrd
|
||||||
|
from .filter_builder import FilterBuilder
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_best_block_height_for_file(file_number):
|
||||||
|
return context().fetchone(
|
||||||
|
select(func.coalesce(func.max(BlockTable.c.height), -1).label('height'))
|
||||||
|
.where(BlockTable.c.file_number == file_number)
|
||||||
|
)['height']
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.blocks.file", "blocks", "txs", throttle=100)
|
||||||
|
def sync_block_file(
|
||||||
|
file_number: int, start_height: int, txs: int, flush_size: int, p: ProgressContext
|
||||||
|
):
|
||||||
|
chain = get_or_initialize_lbrycrd(p.ctx)
|
||||||
|
new_blocks = chain.db.sync_get_blocks_in_file(file_number, start_height)
|
||||||
|
if not new_blocks:
|
||||||
|
return -1
|
||||||
|
file_name = chain.get_block_file_name(file_number)
|
||||||
|
p.start(len(new_blocks), txs, progress_id=file_number, label=file_name)
|
||||||
|
block_file_path = chain.get_block_file_path(file_number)
|
||||||
|
done_blocks = done_txs = 0
|
||||||
|
last_block_processed, loader = -1, p.ctx.get_bulk_loader()
|
||||||
|
with open(block_file_path, "rb") as fp:
|
||||||
|
stream = BCDataStream(fp=fp)
|
||||||
|
for done_blocks, block_info in enumerate(new_blocks, start=1):
|
||||||
|
block_height = block_info["height"]
|
||||||
|
fp.seek(block_info["data_offset"])
|
||||||
|
block = Block.from_data_stream(stream, block_height, file_number)
|
||||||
|
loader.add_block(block)
|
||||||
|
if len(loader.txs) >= flush_size:
|
||||||
|
done_txs += loader.flush(TX)
|
||||||
|
p.step(done_blocks, done_txs)
|
||||||
|
last_block_processed = block_height
|
||||||
|
if p.ctx.stop_event.is_set():
|
||||||
|
return last_block_processed
|
||||||
|
if loader.txs:
|
||||||
|
done_txs += loader.flush(TX)
|
||||||
|
p.step(done_blocks, done_txs)
|
||||||
|
return last_block_processed
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.blocks.indexes", "steps")
|
||||||
|
def blocks_constraints_and_indexes(p: ProgressContext):
|
||||||
|
p.start(1 + len(pg_add_block_constraints_and_indexes))
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM ANALYZE block;"))
|
||||||
|
p.step()
|
||||||
|
for constraint in pg_add_block_constraints_and_indexes:
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute(text(constraint))
|
||||||
|
p.step()
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.blocks.vacuum", "steps")
|
||||||
|
def blocks_vacuum(p: ProgressContext):
|
||||||
|
p.start(1)
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM block;"))
|
||||||
|
p.step()
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.spends.main", "steps")
|
||||||
|
def sync_spends(initial_sync: bool, p: ProgressContext):
|
||||||
|
if initial_sync:
|
||||||
|
p.start(
|
||||||
|
7 +
|
||||||
|
len(pg_add_tx_constraints_and_indexes) +
|
||||||
|
len(pg_add_txi_constraints_and_indexes) +
|
||||||
|
len(pg_add_txo_constraints_and_indexes)
|
||||||
|
)
|
||||||
|
# 1. tx table stuff
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM ANALYZE tx;"))
|
||||||
|
p.step()
|
||||||
|
for constraint in pg_add_tx_constraints_and_indexes:
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute(text(constraint))
|
||||||
|
p.step()
|
||||||
|
# A. Update TXIs to have the address of TXO they are spending.
|
||||||
|
# 2. txi table reshuffling
|
||||||
|
p.ctx.execute(text("ALTER TABLE txi RENAME TO old_txi;"))
|
||||||
|
p.ctx.execute(CreateTable(TXI, include_foreign_key_constraints=[]))
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute(text("ALTER TABLE txi DROP CONSTRAINT txi_pkey;"))
|
||||||
|
p.step()
|
||||||
|
# 3. insert
|
||||||
|
old_txi = table("old_txi", *(c.copy() for c in TXI.columns)) # pylint: disable=not-an-iterable
|
||||||
|
columns = [c for c in old_txi.columns if c.name != "address"] + [TXO.c.address]
|
||||||
|
join_txi_on_txo = old_txi.join(TXO, old_txi.c.txo_hash == TXO.c.txo_hash)
|
||||||
|
select_txis = select(*columns).select_from(join_txi_on_txo)
|
||||||
|
insert_txis = TXI.insert().from_select(columns, select_txis)
|
||||||
|
p.ctx.execute(insert_txis)
|
||||||
|
p.step()
|
||||||
|
# 4. drop old txi and vacuum
|
||||||
|
p.ctx.execute(text("DROP TABLE old_txi;"))
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM ANALYZE txi;"))
|
||||||
|
p.step()
|
||||||
|
for constraint in pg_add_txi_constraints_and_indexes:
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute(text(constraint))
|
||||||
|
p.step()
|
||||||
|
# B. Update TXOs to have the height at which they were spent (if they were).
|
||||||
|
# 5. txo table reshuffling
|
||||||
|
p.ctx.execute(text("ALTER TABLE txo RENAME TO old_txo;"))
|
||||||
|
p.ctx.execute(CreateTable(TXO, include_foreign_key_constraints=[]))
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute(text("ALTER TABLE txo DROP CONSTRAINT txo_pkey;"))
|
||||||
|
p.step()
|
||||||
|
# 6. insert
|
||||||
|
old_txo = table("old_txo", *(c.copy() for c in TXO.columns)) # pylint: disable=not-an-iterable
|
||||||
|
columns = [c for c in old_txo.columns if c.name != "spent_height"]
|
||||||
|
insert_columns = columns + [TXO.c.spent_height]
|
||||||
|
select_columns = columns + [func.coalesce(TXI.c.height, 0).label("spent_height")]
|
||||||
|
join_txo_on_txi = old_txo.join(TXI, old_txo.c.txo_hash == TXI.c.txo_hash, isouter=True)
|
||||||
|
select_txos = select(*select_columns).select_from(join_txo_on_txi)
|
||||||
|
insert_txos = TXO.insert().from_select(insert_columns, select_txos)
|
||||||
|
p.ctx.execute(insert_txos)
|
||||||
|
p.step()
|
||||||
|
# 7. drop old txo
|
||||||
|
p.ctx.execute(text("DROP TABLE old_txo;"))
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM ANALYZE txo;"))
|
||||||
|
p.step()
|
||||||
|
for constraint in pg_add_txo_constraints_and_indexes:
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute(text(constraint))
|
||||||
|
p.step()
|
||||||
|
else:
|
||||||
|
p.start(5)
|
||||||
|
# 1. Update spent TXOs setting spent_height
|
||||||
|
update_spent_outputs(p.ctx)
|
||||||
|
p.step()
|
||||||
|
# 2. Update TXIs to have the address of TXO they are spending.
|
||||||
|
set_input_addresses(p.ctx)
|
||||||
|
p.step()
|
||||||
|
# 3. Update tx visibility map, which speeds up index-only scans.
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM tx;"))
|
||||||
|
p.step()
|
||||||
|
# 4. Update txi visibility map, which speeds up index-only scans.
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM txi;"))
|
||||||
|
p.step()
|
||||||
|
# 4. Update txo visibility map, which speeds up index-only scans.
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM txo;"))
|
||||||
|
p.step()
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.mempool.clear", "txs")
|
||||||
|
def clear_mempool(p: ProgressContext):
|
||||||
|
delete_all_the_things(-1, p)
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.mempool.main", "txs")
|
||||||
|
def sync_mempool(p: ProgressContext) -> List[str]:
|
||||||
|
chain = get_or_initialize_lbrycrd(p.ctx)
|
||||||
|
mempool = chain.sync_run(chain.get_raw_mempool())
|
||||||
|
current = [hexlify(r['tx_hash'][::-1]).decode() for r in p.ctx.fetchall(
|
||||||
|
select(TX.c.tx_hash).where(TX.c.height < 0)
|
||||||
|
)]
|
||||||
|
loader = p.ctx.get_bulk_loader()
|
||||||
|
added = []
|
||||||
|
for txid in mempool:
|
||||||
|
if txid not in current:
|
||||||
|
raw_tx = chain.sync_run(chain.get_raw_transaction(txid))
|
||||||
|
loader.add_transaction(
|
||||||
|
None, Transaction(unhexlify(raw_tx), height=-1)
|
||||||
|
)
|
||||||
|
added.append(txid)
|
||||||
|
if p.ctx.stop_event.is_set():
|
||||||
|
return
|
||||||
|
loader.flush(TX)
|
||||||
|
return added
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.filters.generate", "blocks", throttle=100)
|
||||||
|
def sync_filters(start, end, p: ProgressContext):
|
||||||
|
fp = FilterBuilder(start, end)
|
||||||
|
p.start((end-start)+1, progress_id=start, label=f"generate filters {start}-{end}")
|
||||||
|
with p.ctx.connect_streaming() as c:
|
||||||
|
loader = p.ctx.get_bulk_loader()
|
||||||
|
|
||||||
|
tx_hash, height, addresses, last_added = None, None, set(), None
|
||||||
|
address_to_hash = p.ctx.ledger.address_to_hash160
|
||||||
|
for row in c.execute(get_block_tx_addresses_sql(*fp.query_heights)):
|
||||||
|
if tx_hash != row.tx_hash:
|
||||||
|
if tx_hash is not None:
|
||||||
|
last_added = tx_hash
|
||||||
|
fp.add(tx_hash, height, addresses)
|
||||||
|
tx_hash, height, addresses = row.tx_hash, row.height, set()
|
||||||
|
addresses.add(address_to_hash(row.address))
|
||||||
|
if all([last_added, tx_hash]) and last_added != tx_hash: # pickup last tx
|
||||||
|
fp.add(tx_hash, height, addresses)
|
||||||
|
|
||||||
|
for tx_hash, height, addresses in fp.tx_filters:
|
||||||
|
loader.add_transaction_filter(
|
||||||
|
tx_hash, height, create_address_filter(list(addresses))
|
||||||
|
)
|
||||||
|
|
||||||
|
for height, addresses in fp.block_filters.items():
|
||||||
|
loader.add_block_filter(
|
||||||
|
height, create_address_filter(list(addresses))
|
||||||
|
)
|
||||||
|
|
||||||
|
for group_filter in fp.group_filters:
|
||||||
|
for height, addresses in group_filter.groups.items():
|
||||||
|
loader.add_group_filter(
|
||||||
|
height, group_filter.factor, create_address_filter(list(addresses))
|
||||||
|
)
|
||||||
|
|
||||||
|
p.add(loader.flush(BlockFilter))
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.filters.indexes", "steps")
|
||||||
|
def filters_constraints_and_indexes(p: ProgressContext):
|
||||||
|
constraints = (
|
||||||
|
pg_add_tx_filter_constraints_and_indexes +
|
||||||
|
pg_add_block_filter_constraints_and_indexes
|
||||||
|
)
|
||||||
|
p.start(2 + len(constraints))
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM ANALYZE block_filter;"))
|
||||||
|
p.step()
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM ANALYZE tx_filter;"))
|
||||||
|
p.step()
|
||||||
|
for constraint in constraints:
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute(text(constraint))
|
||||||
|
p.step()
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.filters.vacuum", "steps")
|
||||||
|
def filters_vacuum(p: ProgressContext):
|
||||||
|
p.start(2)
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM block_filter;"))
|
||||||
|
p.step()
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM tx_filter;"))
|
||||||
|
p.step()
|
||||||
|
|
||||||
|
|
||||||
|
def get_block_range_without_filters() -> Tuple[int, int]:
|
||||||
|
sql = (
|
||||||
|
select(
|
||||||
|
func.coalesce(func.min(BlockTable.c.height), -1).label('start_height'),
|
||||||
|
func.coalesce(func.max(BlockTable.c.height), -1).label('end_height'),
|
||||||
|
)
|
||||||
|
.select_from(
|
||||||
|
BlockTable.join(BlockFilter, BlockTable.c.height == BlockFilter.c.height, isouter=True)
|
||||||
|
)
|
||||||
|
.where(BlockFilter.c.height.is_(None))
|
||||||
|
)
|
||||||
|
result = context().fetchone(sql)
|
||||||
|
return result['start_height'], result['end_height']
|
||||||
|
|
||||||
|
|
||||||
|
def get_block_tx_addresses_sql(start_height, end_height):
|
||||||
|
return union(
|
||||||
|
select(TXO.c.tx_hash, TXO.c.height, TXO.c.address).where(
|
||||||
|
(TXO.c.address.isnot(None)) & between(TXO.c.height, start_height, end_height)
|
||||||
|
),
|
||||||
|
select(TXI.c.tx_hash, TXI.c.height, TXI.c.address).where(
|
||||||
|
(TXI.c.address.isnot(None)) & between(TXI.c.height, start_height, end_height)
|
||||||
|
),
|
||||||
|
).order_by('height', 'tx_hash')
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.rewind.main", "steps")
|
||||||
|
def rewind(height: int, p: ProgressContext):
|
||||||
|
delete_all_the_things(height, p)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_all_the_things(height: int, p: ProgressContext):
|
||||||
|
def constrain(col):
|
||||||
|
if height == -1:
|
||||||
|
return col == -1
|
||||||
|
return col >= height
|
||||||
|
|
||||||
|
deletes = [
|
||||||
|
BlockTable.delete().where(constrain(BlockTable.c.height)),
|
||||||
|
TXI.delete().where(constrain(TXI.c.height)),
|
||||||
|
TXO.delete().where(constrain(TXO.c.height)),
|
||||||
|
TX.delete().where(constrain(TX.c.height)),
|
||||||
|
Tag.delete().where(
|
||||||
|
Tag.c.claim_hash.in_(
|
||||||
|
select(Claim.c.claim_hash).where(constrain(Claim.c.height))
|
||||||
|
)
|
||||||
|
),
|
||||||
|
Claim.delete().where(constrain(Claim.c.height)),
|
||||||
|
Support.delete().where(constrain(Support.c.height)),
|
||||||
|
MempoolFilter.delete(),
|
||||||
|
]
|
||||||
|
if height > 0:
|
||||||
|
deletes.extend([
|
||||||
|
BlockFilter.delete().where(BlockFilter.c.height >= height),
|
||||||
|
# TODO: group and tx filters need where() clauses (below actually breaks things)
|
||||||
|
BlockGroupFilter.delete(),
|
||||||
|
TXFilter.delete(),
|
||||||
|
])
|
||||||
|
for delete in p.iter(deletes):
|
||||||
|
p.ctx.execute(delete)
|
338
lbry/blockchain/sync/claims.py
Normal file
338
lbry/blockchain/sync/claims.py
Normal file
|
@ -0,0 +1,338 @@
|
||||||
|
import logging
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
|
from sqlalchemy import case, func, desc, text
|
||||||
|
from sqlalchemy.future import select
|
||||||
|
|
||||||
|
from lbry.db.queries.txio import (
|
||||||
|
minimum_txo_columns, row_to_txo,
|
||||||
|
where_unspent_txos, where_claims_with_changed_supports,
|
||||||
|
count_unspent_txos, where_channels_with_changed_content,
|
||||||
|
where_abandoned_claims, count_channels_with_changed_content,
|
||||||
|
where_claims_with_changed_reposts,
|
||||||
|
)
|
||||||
|
from lbry.db.query_context import ProgressContext, event_emitter
|
||||||
|
from lbry.db.tables import (
|
||||||
|
TX, TXO, Claim, Support, CensoredClaim,
|
||||||
|
pg_add_claim_and_tag_constraints_and_indexes
|
||||||
|
)
|
||||||
|
from lbry.db.utils import least
|
||||||
|
from lbry.db.constants import TXO_TYPES, CLAIM_TYPE_CODES
|
||||||
|
from lbry.schema.result import Censor
|
||||||
|
from lbry.blockchain.transaction import Output
|
||||||
|
|
||||||
|
from .context import get_or_initialize_lbrycrd
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def channel_content_count_calc(signable):
|
||||||
|
return (
|
||||||
|
select(func.count(signable.c.claim_hash))
|
||||||
|
.where((signable.c.channel_hash == Claim.c.claim_hash) & signable.c.is_signature_valid)
|
||||||
|
.scalar_subquery()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
support = TXO.alias('support')
|
||||||
|
|
||||||
|
|
||||||
|
def staked_support_subquery(claim_hash_column, aggregate):
|
||||||
|
"""Return a query that selects unspent supports"""
|
||||||
|
content = Claim.alias("content")
|
||||||
|
return (
|
||||||
|
select(
|
||||||
|
aggregate
|
||||||
|
).select_from(
|
||||||
|
support
|
||||||
|
.join(content, support.c.claim_hash == content.c.claim_hash)
|
||||||
|
).where(
|
||||||
|
((content.c.claim_hash == claim_hash_column) | (content.c.channel_hash == claim_hash_column)) &
|
||||||
|
(support.c.txo_type == TXO_TYPES["support"]) &
|
||||||
|
(support.c.spent_height == 0)
|
||||||
|
)
|
||||||
|
.scalar_subquery()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def staked_support_amount_calc(claim_hash):
|
||||||
|
"""Return a query that sums unspent supports for a claim"""
|
||||||
|
return staked_support_subquery(claim_hash, func.coalesce(func.sum(support.c.amount), 0))
|
||||||
|
|
||||||
|
|
||||||
|
def staked_support_count_calc(claim_hash):
|
||||||
|
"""Return a query that counts unspent supports for a claim"""
|
||||||
|
return staked_support_subquery(claim_hash, func.coalesce(func.count('*'), 0))
|
||||||
|
|
||||||
|
|
||||||
|
def claims_in_channel_amount_calc(claim_hash):
|
||||||
|
"""Return a query that sums the amount of all the claims in a channel"""
|
||||||
|
content = Claim.alias("content")
|
||||||
|
return (
|
||||||
|
select(
|
||||||
|
func.coalesce(func.sum(content.c.amount), 0)
|
||||||
|
).select_from(
|
||||||
|
content
|
||||||
|
).where(
|
||||||
|
content.c.channel_hash == claim_hash
|
||||||
|
)
|
||||||
|
.scalar_subquery()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def reposted_claim_count_calc(other):
|
||||||
|
repost = TXO.alias('repost')
|
||||||
|
return (
|
||||||
|
select(func.coalesce(func.count(repost.c.reposted_claim_hash), 0))
|
||||||
|
.where(
|
||||||
|
(repost.c.reposted_claim_hash == other.c.claim_hash) &
|
||||||
|
(repost.c.spent_height == 0)
|
||||||
|
).scalar_subquery()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def make_label(action, blocks):
|
||||||
|
if blocks[0] == blocks[-1]:
|
||||||
|
return f"{action} {blocks[0]:>6}"
|
||||||
|
else:
|
||||||
|
return f"{action} {blocks[0]:>6}-{blocks[-1]:>6}"
|
||||||
|
|
||||||
|
|
||||||
|
def select_claims_for_saving(
|
||||||
|
blocks: Tuple[int, int],
|
||||||
|
missing_in_claims_table=False,
|
||||||
|
missing_or_stale_in_claims_table=False,
|
||||||
|
):
|
||||||
|
channel_txo = TXO.alias('channel_txo')
|
||||||
|
return select(
|
||||||
|
*minimum_txo_columns, TXO.c.claim_hash,
|
||||||
|
claims_in_channel_amount_calc(TXO.c.claim_hash).label('claims_in_channel_amount'),
|
||||||
|
staked_support_amount_calc(TXO.c.claim_hash).label('staked_support_amount'),
|
||||||
|
staked_support_count_calc(TXO.c.claim_hash).label('staked_support_count'),
|
||||||
|
reposted_claim_count_calc(TXO).label('reposted_count'),
|
||||||
|
TXO.c.signature, TXO.c.signature_digest,
|
||||||
|
case([(
|
||||||
|
TXO.c.channel_hash.isnot(None),
|
||||||
|
select(channel_txo.c.public_key).select_from(channel_txo).where(
|
||||||
|
(channel_txo.c.txo_type == TXO_TYPES['channel']) &
|
||||||
|
(channel_txo.c.claim_hash == TXO.c.channel_hash) &
|
||||||
|
(channel_txo.c.height <= TXO.c.height)
|
||||||
|
).order_by(desc(channel_txo.c.height)).limit(1).scalar_subquery()
|
||||||
|
)]).label('channel_public_key')
|
||||||
|
).where(
|
||||||
|
where_unspent_txos(
|
||||||
|
CLAIM_TYPE_CODES, blocks,
|
||||||
|
missing_in_claims_table=missing_in_claims_table,
|
||||||
|
missing_or_stale_in_claims_table=missing_or_stale_in_claims_table,
|
||||||
|
)
|
||||||
|
).select_from(TXO.join(TX))
|
||||||
|
|
||||||
|
|
||||||
|
def row_to_claim_for_saving(row) -> Tuple[Output, dict]:
|
||||||
|
return row_to_txo(row), {
|
||||||
|
'claims_in_channel_amount': int(row.claims_in_channel_amount),
|
||||||
|
'staked_support_amount': int(row.staked_support_amount),
|
||||||
|
'staked_support_count': int(row.staked_support_count),
|
||||||
|
'reposted_count': int(row.reposted_count),
|
||||||
|
'signature': row.signature,
|
||||||
|
'signature_digest': row.signature_digest,
|
||||||
|
'channel_public_key': row.channel_public_key
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.claims.insert", "claims")
|
||||||
|
def claims_insert(
|
||||||
|
blocks: Tuple[int, int],
|
||||||
|
missing_in_claims_table: bool,
|
||||||
|
flush_size: int,
|
||||||
|
p: ProgressContext
|
||||||
|
):
|
||||||
|
chain = get_or_initialize_lbrycrd(p.ctx)
|
||||||
|
|
||||||
|
p.start(
|
||||||
|
count_unspent_txos(
|
||||||
|
CLAIM_TYPE_CODES, blocks,
|
||||||
|
missing_in_claims_table=missing_in_claims_table,
|
||||||
|
), progress_id=blocks[0], label=make_label("add claims", blocks)
|
||||||
|
)
|
||||||
|
|
||||||
|
with p.ctx.connect_streaming() as c:
|
||||||
|
loader = p.ctx.get_bulk_loader()
|
||||||
|
cursor = c.execute(select_claims_for_saving(
|
||||||
|
blocks, missing_in_claims_table=missing_in_claims_table
|
||||||
|
).order_by(TXO.c.claim_hash))
|
||||||
|
for rows in cursor.partitions(900):
|
||||||
|
claim_metadata = chain.db.sync_get_claim_metadata(
|
||||||
|
claim_hashes=[row['claim_hash'] for row in rows]
|
||||||
|
)
|
||||||
|
i = 0
|
||||||
|
for row in rows:
|
||||||
|
metadata = claim_metadata[i] if i < len(claim_metadata) else {}
|
||||||
|
if metadata and metadata['claim_hash'] == row.claim_hash:
|
||||||
|
i += 1
|
||||||
|
txo, extra = row_to_claim_for_saving(row)
|
||||||
|
extra.update({
|
||||||
|
'short_url': metadata.get('short_url'),
|
||||||
|
'creation_height': metadata.get('creation_height'),
|
||||||
|
'activation_height': metadata.get('activation_height'),
|
||||||
|
'expiration_height': metadata.get('expiration_height'),
|
||||||
|
'takeover_height': metadata.get('takeover_height'),
|
||||||
|
})
|
||||||
|
loader.add_claim(txo, **extra)
|
||||||
|
if len(loader.claims) >= flush_size:
|
||||||
|
p.add(loader.flush(Claim))
|
||||||
|
p.add(loader.flush(Claim))
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.claims.indexes", "steps")
|
||||||
|
def claims_constraints_and_indexes(p: ProgressContext):
|
||||||
|
p.start(2 + len(pg_add_claim_and_tag_constraints_and_indexes))
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM ANALYZE claim;"))
|
||||||
|
p.step()
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM ANALYZE tag;"))
|
||||||
|
p.step()
|
||||||
|
for constraint in pg_add_claim_and_tag_constraints_and_indexes:
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute(text(constraint))
|
||||||
|
p.step()
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.claims.vacuum", "steps")
|
||||||
|
def claims_vacuum(p: ProgressContext):
|
||||||
|
p.start(2)
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM claim;"))
|
||||||
|
p.step()
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM tag;"))
|
||||||
|
p.step()
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.claims.update", "claims")
|
||||||
|
def claims_update(blocks: Tuple[int, int], p: ProgressContext):
|
||||||
|
p.start(
|
||||||
|
count_unspent_txos(CLAIM_TYPE_CODES, blocks, missing_or_stale_in_claims_table=True),
|
||||||
|
progress_id=blocks[0], label=make_label("mod claims", blocks)
|
||||||
|
)
|
||||||
|
with p.ctx.connect_streaming() as c:
|
||||||
|
loader = p.ctx.get_bulk_loader()
|
||||||
|
cursor = c.execute(select_claims_for_saving(
|
||||||
|
blocks, missing_or_stale_in_claims_table=True
|
||||||
|
))
|
||||||
|
for row in cursor:
|
||||||
|
txo, extra = row_to_claim_for_saving(row)
|
||||||
|
loader.update_claim(txo, **extra)
|
||||||
|
if len(loader.update_claims) >= 25:
|
||||||
|
p.add(loader.flush(Claim))
|
||||||
|
p.add(loader.flush(Claim))
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.claims.delete", "claims")
|
||||||
|
def claims_delete(claims, p: ProgressContext):
|
||||||
|
p.start(claims, label="del claims")
|
||||||
|
deleted = p.ctx.execute(Claim.delete().where(where_abandoned_claims()))
|
||||||
|
p.step(deleted.rowcount)
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.claims.takeovers", "claims")
|
||||||
|
def update_takeovers(blocks: Tuple[int, int], takeovers, p: ProgressContext):
|
||||||
|
p.start(takeovers, label=make_label("mod winner", blocks))
|
||||||
|
chain = get_or_initialize_lbrycrd(p.ctx)
|
||||||
|
with p.ctx.engine.begin() as c:
|
||||||
|
for takeover in chain.db.sync_get_takeovers(start_height=blocks[0], end_height=blocks[-1]):
|
||||||
|
update_claims = (
|
||||||
|
Claim.update()
|
||||||
|
.where(Claim.c.normalized == takeover['normalized'])
|
||||||
|
.values(
|
||||||
|
is_controlling=case(
|
||||||
|
[(Claim.c.claim_hash == takeover['claim_hash'], True)],
|
||||||
|
else_=False
|
||||||
|
),
|
||||||
|
takeover_height=case(
|
||||||
|
[(Claim.c.claim_hash == takeover['claim_hash'], takeover['height'])],
|
||||||
|
else_=None
|
||||||
|
),
|
||||||
|
activation_height=least(Claim.c.activation_height, takeover['height']),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
result = c.execute(update_claims)
|
||||||
|
p.add(result.rowcount)
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.claims.stakes", "claims")
|
||||||
|
def update_stakes(blocks: Tuple[int, int], claims: int, p: ProgressContext):
|
||||||
|
p.start(claims)
|
||||||
|
sql = (
|
||||||
|
Claim.update()
|
||||||
|
.where(where_claims_with_changed_supports(blocks))
|
||||||
|
.values(
|
||||||
|
staked_amount=(
|
||||||
|
Claim.c.amount +
|
||||||
|
claims_in_channel_amount_calc(Claim.c.claim_hash) +
|
||||||
|
staked_support_amount_calc(Claim.c.claim_hash)
|
||||||
|
),
|
||||||
|
staked_support_amount=staked_support_amount_calc(Claim.c.claim_hash),
|
||||||
|
staked_support_count=staked_support_count_calc(Claim.c.claim_hash),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
result = p.ctx.execute(sql)
|
||||||
|
p.step(result.rowcount)
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.claims.reposts", "claims")
|
||||||
|
def update_reposts(blocks: Tuple[int, int], claims: int, p: ProgressContext):
|
||||||
|
p.start(claims)
|
||||||
|
sql = (
|
||||||
|
Claim.update()
|
||||||
|
.where(where_claims_with_changed_reposts(blocks))
|
||||||
|
.values(reposted_count=reposted_claim_count_calc(Claim))
|
||||||
|
)
|
||||||
|
result = p.ctx.execute(sql)
|
||||||
|
p.step(result.rowcount)
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.claims.channels", "channels")
|
||||||
|
def update_channel_stats(blocks: Tuple[int, int], initial_sync: int, p: ProgressContext):
|
||||||
|
update_sql = Claim.update().values(
|
||||||
|
signed_claim_count=channel_content_count_calc(Claim.alias('content')),
|
||||||
|
signed_support_count=channel_content_count_calc(Support),
|
||||||
|
)
|
||||||
|
if initial_sync:
|
||||||
|
p.start(p.ctx.fetchtotal(Claim.c.claim_type == TXO_TYPES['channel']), label="channel stats")
|
||||||
|
update_sql = update_sql.where(Claim.c.claim_type == TXO_TYPES['channel'])
|
||||||
|
elif blocks:
|
||||||
|
p.start(count_channels_with_changed_content(blocks), label="channel stats")
|
||||||
|
update_sql = update_sql.where(where_channels_with_changed_content(blocks))
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
result = p.ctx.execute(update_sql)
|
||||||
|
if result.rowcount and p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM claim;"))
|
||||||
|
p.step(result.rowcount)
|
||||||
|
|
||||||
|
|
||||||
|
def select_reposts(channel_hashes, filter_type):
|
||||||
|
return (
|
||||||
|
select(Claim.c.reposted_claim_hash, filter_type, Claim.c.channel_hash).where(
|
||||||
|
(Claim.c.channel_hash.in_(channel_hashes)) &
|
||||||
|
(Claim.c.reposted_claim_hash.isnot(None))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.claims.filters", "claim_filters")
|
||||||
|
def update_claim_filters(resolve_censor_channel_hashes, search_censor_channel_hashes, p: ProgressContext):
|
||||||
|
p.ctx.execute(CensoredClaim.delete())
|
||||||
|
# order matters: first we insert the resolve filters; then the search ones.
|
||||||
|
# a claim that's censored in resolve is automatically also censored in search results.
|
||||||
|
p.ctx.execute(CensoredClaim.insert().from_select(
|
||||||
|
['claim_hash', 'censor_type', 'censoring_channel_hash'],
|
||||||
|
select_reposts(resolve_censor_channel_hashes, Censor.RESOLVE)
|
||||||
|
))
|
||||||
|
p.ctx.execute(p.ctx.insert_or_ignore(CensoredClaim).from_select(
|
||||||
|
['claim_hash', 'censor_type', 'censoring_channel_hash'],
|
||||||
|
select_reposts(search_censor_channel_hashes, Censor.SEARCH)
|
||||||
|
))
|
25
lbry/blockchain/sync/context.py
Normal file
25
lbry/blockchain/sync/context.py
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
from contextvars import ContextVar
|
||||||
|
from lbry.db import query_context
|
||||||
|
|
||||||
|
from lbry.blockchain.lbrycrd import Lbrycrd
|
||||||
|
|
||||||
|
|
||||||
|
_chain: ContextVar[Lbrycrd] = ContextVar('chain')
|
||||||
|
|
||||||
|
|
||||||
|
def get_or_initialize_lbrycrd(ctx=None) -> Lbrycrd:
|
||||||
|
chain = _chain.get(None)
|
||||||
|
if chain is not None:
|
||||||
|
return chain
|
||||||
|
chain = Lbrycrd((ctx or query_context.context()).ledger)
|
||||||
|
chain.db.sync_open()
|
||||||
|
_chain.set(chain)
|
||||||
|
return chain
|
||||||
|
|
||||||
|
|
||||||
|
def uninitialize():
|
||||||
|
chain = _chain.get(None)
|
||||||
|
if chain is not None:
|
||||||
|
chain.db.sync_close()
|
||||||
|
chain.sync_run(chain.close_session())
|
||||||
|
_chain.set(None)
|
79
lbry/blockchain/sync/filter_builder.py
Normal file
79
lbry/blockchain/sync/filter_builder.py
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
|
||||||
|
def split_range_into_10k_batches(start, end):
|
||||||
|
batch = [start, end]
|
||||||
|
batches = [batch]
|
||||||
|
for block in range(start, end+1):
|
||||||
|
if 0 < block != batch[0] and block % 10_000 == 0:
|
||||||
|
batch = [block, block]
|
||||||
|
batches.append(batch)
|
||||||
|
else:
|
||||||
|
batch[1] = block
|
||||||
|
return batches
|
||||||
|
|
||||||
|
|
||||||
|
class GroupFilter:
|
||||||
|
"""
|
||||||
|
Collects addresses into buckets of specific sizes defined by 10 raised to power of factor.
|
||||||
|
eg. a factor of 2 (10**2) would create block buckets 100-199, 200-299, etc
|
||||||
|
a factor of 3 (10**3) would create block buckets 1000-1999, 2000-2999, etc
|
||||||
|
"""
|
||||||
|
def __init__(self, start, end, factor):
|
||||||
|
self.start = start
|
||||||
|
self.end = end
|
||||||
|
self.factor = factor
|
||||||
|
self.resolution = resolution = 10**factor
|
||||||
|
last_height_in_group, groups = resolution-1, {}
|
||||||
|
for block in range(start, end+1):
|
||||||
|
if block % resolution == last_height_in_group:
|
||||||
|
groups[block-last_height_in_group] = set()
|
||||||
|
self.last_height_in_group = last_height_in_group
|
||||||
|
self.groups: Dict[int, set] = groups
|
||||||
|
|
||||||
|
@property
|
||||||
|
def coverage(self):
|
||||||
|
return list(self.groups.keys())
|
||||||
|
|
||||||
|
def add(self, height, addresses):
|
||||||
|
group = self.groups.get(height - (height % self.resolution))
|
||||||
|
if group is not None:
|
||||||
|
group.update(addresses)
|
||||||
|
|
||||||
|
|
||||||
|
class FilterBuilder:
|
||||||
|
"""
|
||||||
|
Creates filter groups, calculates the necessary block range to fulfill creation
|
||||||
|
of filter groups and collects tx filters, block filters and group filters.
|
||||||
|
"""
|
||||||
|
def __init__(self, start, end):
|
||||||
|
self.start = start
|
||||||
|
self.end = end
|
||||||
|
self.group_filters = [
|
||||||
|
GroupFilter(start, end, 4),
|
||||||
|
GroupFilter(start, end, 3),
|
||||||
|
GroupFilter(start, end, 2),
|
||||||
|
]
|
||||||
|
self.start_tx_height, self.end_tx_height = self._calculate_tx_heights_for_query()
|
||||||
|
self.tx_filters = []
|
||||||
|
self.block_filters: Dict[int, set] = {}
|
||||||
|
|
||||||
|
def _calculate_tx_heights_for_query(self):
|
||||||
|
for group_filter in self.group_filters:
|
||||||
|
if group_filter.groups:
|
||||||
|
return group_filter.coverage[0], self.end
|
||||||
|
return self.start, self.end
|
||||||
|
|
||||||
|
@property
|
||||||
|
def query_heights(self):
|
||||||
|
return self.start_tx_height, self.end_tx_height
|
||||||
|
|
||||||
|
def add(self, tx_hash, height, addresses):
|
||||||
|
if self.start <= height <= self.end:
|
||||||
|
self.tx_filters.append((tx_hash, height, addresses))
|
||||||
|
block_filter = self.block_filters.get(height)
|
||||||
|
if block_filter is None:
|
||||||
|
block_filter = self.block_filters[height] = set()
|
||||||
|
block_filter.update(addresses)
|
||||||
|
for group_filter in self.group_filters:
|
||||||
|
group_filter.add(height, addresses)
|
95
lbry/blockchain/sync/supports.py
Normal file
95
lbry/blockchain/sync/supports.py
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
import logging
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
|
from sqlalchemy import case, desc, text
|
||||||
|
from sqlalchemy.future import select
|
||||||
|
|
||||||
|
from lbry.db.tables import TX, TXO, Support, pg_add_support_constraints_and_indexes
|
||||||
|
from lbry.db.query_context import ProgressContext, event_emitter
|
||||||
|
from lbry.db.queries import row_to_txo
|
||||||
|
from lbry.db.constants import TXO_TYPES
|
||||||
|
from lbry.db.queries.txio import (
|
||||||
|
minimum_txo_columns,
|
||||||
|
where_unspent_txos, where_abandoned_supports,
|
||||||
|
count_unspent_txos,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .claims import make_label
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.supports.insert", "supports")
|
||||||
|
def supports_insert(
|
||||||
|
blocks: Tuple[int, int],
|
||||||
|
missing_in_supports_table: bool,
|
||||||
|
flush_size: int,
|
||||||
|
p: ProgressContext
|
||||||
|
):
|
||||||
|
p.start(
|
||||||
|
count_unspent_txos(
|
||||||
|
TXO_TYPES['support'], blocks,
|
||||||
|
missing_in_supports_table=missing_in_supports_table,
|
||||||
|
), progress_id=blocks[0], label=make_label("add supprt", blocks)
|
||||||
|
)
|
||||||
|
channel_txo = TXO.alias('channel_txo')
|
||||||
|
select_supports = select(
|
||||||
|
*minimum_txo_columns, TXO.c.claim_hash,
|
||||||
|
TXO.c.signature, TXO.c.signature_digest,
|
||||||
|
case([(
|
||||||
|
TXO.c.channel_hash.isnot(None),
|
||||||
|
select(channel_txo.c.public_key).select_from(channel_txo).where(
|
||||||
|
(channel_txo.c.txo_type == TXO_TYPES['channel']) &
|
||||||
|
(channel_txo.c.claim_hash == TXO.c.channel_hash) &
|
||||||
|
(channel_txo.c.height <= TXO.c.height)
|
||||||
|
).order_by(desc(channel_txo.c.height)).limit(1).scalar_subquery()
|
||||||
|
)]).label('channel_public_key'),
|
||||||
|
).select_from(
|
||||||
|
TXO.join(TX)
|
||||||
|
).where(
|
||||||
|
where_unspent_txos(
|
||||||
|
TXO_TYPES['support'], blocks,
|
||||||
|
missing_in_supports_table=missing_in_supports_table,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
with p.ctx.connect_streaming() as c:
|
||||||
|
loader = p.ctx.get_bulk_loader()
|
||||||
|
for row in c.execute(select_supports):
|
||||||
|
txo = row_to_txo(row)
|
||||||
|
loader.add_support(
|
||||||
|
txo,
|
||||||
|
signature=row.signature,
|
||||||
|
signature_digest=row.signature_digest,
|
||||||
|
channel_public_key=row.channel_public_key
|
||||||
|
)
|
||||||
|
if len(loader.supports) >= flush_size:
|
||||||
|
p.add(loader.flush(Support))
|
||||||
|
p.add(loader.flush(Support))
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.supports.delete", "supports")
|
||||||
|
def supports_delete(supports, p: ProgressContext):
|
||||||
|
p.start(supports, label="del supprt")
|
||||||
|
deleted = p.ctx.execute(Support.delete().where(where_abandoned_supports()))
|
||||||
|
p.step(deleted.rowcount)
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.supports.indexes", "steps")
|
||||||
|
def supports_constraints_and_indexes(p: ProgressContext):
|
||||||
|
p.start(1 + len(pg_add_support_constraints_and_indexes))
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM ANALYZE support;"))
|
||||||
|
p.step()
|
||||||
|
for constraint in pg_add_support_constraints_and_indexes:
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute(text(constraint))
|
||||||
|
p.step()
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.supports.vacuum", "steps")
|
||||||
|
def supports_vacuum(p: ProgressContext):
|
||||||
|
p.start(1)
|
||||||
|
if p.ctx.is_postgres:
|
||||||
|
p.ctx.execute_notx(text("VACUUM support;"))
|
||||||
|
p.step()
|
412
lbry/blockchain/sync/synchronizer.py
Normal file
412
lbry/blockchain/sync/synchronizer.py
Normal file
|
@ -0,0 +1,412 @@
|
||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from binascii import unhexlify
|
||||||
|
from typing import Optional, Tuple, Set, List, Coroutine
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
|
from lbry.db import Database, trending
|
||||||
|
from lbry.db import queries as q
|
||||||
|
from lbry.db.constants import TXO_TYPES, CLAIM_TYPE_CODES
|
||||||
|
from lbry.db.query_context import Event, Progress
|
||||||
|
from lbry.event import BroadcastSubscription, EventController
|
||||||
|
from lbry.service.base import Sync, BlockEvent
|
||||||
|
from lbry.blockchain.lbrycrd import Lbrycrd
|
||||||
|
from lbry.error import LbrycrdEventSubscriptionError
|
||||||
|
|
||||||
|
from . import blocks as block_phase, claims as claim_phase, supports as support_phase
|
||||||
|
from .context import uninitialize
|
||||||
|
from .filter_builder import split_range_into_10k_batches
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
BLOCKS_INIT_EVENT = Event.add("blockchain.sync.blocks.init", "steps")
|
||||||
|
BLOCKS_MAIN_EVENT = Event.add("blockchain.sync.blocks.main", "blocks", "txs")
|
||||||
|
FILTER_INIT_EVENT = Event.add("blockchain.sync.filters.init", "steps")
|
||||||
|
FILTER_MAIN_EVENT = Event.add("blockchain.sync.filters.main", "blocks")
|
||||||
|
CLAIMS_INIT_EVENT = Event.add("blockchain.sync.claims.init", "steps")
|
||||||
|
CLAIMS_MAIN_EVENT = Event.add("blockchain.sync.claims.main", "claims")
|
||||||
|
TRENDS_INIT_EVENT = Event.add("blockchain.sync.trends.init", "steps")
|
||||||
|
TRENDS_MAIN_EVENT = Event.add("blockchain.sync.trends.main", "blocks")
|
||||||
|
SUPPORTS_INIT_EVENT = Event.add("blockchain.sync.supports.init", "steps")
|
||||||
|
SUPPORTS_MAIN_EVENT = Event.add("blockchain.sync.supports.main", "supports")
|
||||||
|
|
||||||
|
|
||||||
|
class BlockchainSync(Sync):
|
||||||
|
|
||||||
|
TX_FLUSH_SIZE = 25_000 # flush to db after processing this many TXs and update progress
|
||||||
|
CLAIM_FLUSH_SIZE = 25_000 # flush to db after processing this many claims and update progress
|
||||||
|
SUPPORT_FLUSH_SIZE = 25_000 # flush to db after processing this many supports and update progress
|
||||||
|
FILTER_FLUSH_SIZE = 10_000 # flush to db after processing this many filters and update progress
|
||||||
|
|
||||||
|
def __init__(self, chain: Lbrycrd, db: Database):
|
||||||
|
super().__init__(chain.ledger, db)
|
||||||
|
self.chain = chain
|
||||||
|
self.pid = os.getpid()
|
||||||
|
self._on_block_controller = EventController()
|
||||||
|
self.on_block = self._on_block_controller.stream
|
||||||
|
self.conf.events.register("blockchain.block", self.on_block)
|
||||||
|
self._on_mempool_controller = EventController()
|
||||||
|
self.on_mempool = self._on_mempool_controller.stream
|
||||||
|
self.on_block_hash_subscription: Optional[BroadcastSubscription] = None
|
||||||
|
self.on_tx_hash_subscription: Optional[BroadcastSubscription] = None
|
||||||
|
self.advance_loop_task: Optional[asyncio.Task] = None
|
||||||
|
self.block_hash_event = asyncio.Event()
|
||||||
|
self.tx_hash_event = asyncio.Event()
|
||||||
|
self.mempool = []
|
||||||
|
self.search_censor_channel_hashes = {
|
||||||
|
unhexlify(channel_id)[::-1] for channel_id in self.conf.search_censor_channel_ids
|
||||||
|
}
|
||||||
|
self.resolve_censor_channel_hashes = {
|
||||||
|
unhexlify(channel_id)[::-1] for channel_id in self.conf.resolve_censor_channel_ids
|
||||||
|
}
|
||||||
|
|
||||||
|
async def wait_for_chain_ready(self):
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return await self.chain.ensure_subscribable()
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
raise
|
||||||
|
except LbrycrdEventSubscriptionError as e:
|
||||||
|
log.warning(
|
||||||
|
"Lbrycrd is misconfigured. Please double check if"
|
||||||
|
" zmqpubhashblock is properly set on lbrycrd.conf"
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
log.warning("Blockchain not ready, waiting for it: %s", str(e))
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
self.db.stop_event.clear()
|
||||||
|
await self.wait_for_chain_ready()
|
||||||
|
self.advance_loop_task = asyncio.create_task(self.advance())
|
||||||
|
await self.advance_loop_task
|
||||||
|
await self.chain.subscribe()
|
||||||
|
self.advance_loop_task = asyncio.create_task(self.advance_loop())
|
||||||
|
self.on_block_hash_subscription = self.chain.on_block_hash.listen(
|
||||||
|
lambda e: self.block_hash_event.set()
|
||||||
|
)
|
||||||
|
self.on_tx_hash_subscription = self.chain.on_tx_hash.listen(
|
||||||
|
lambda e: self.tx_hash_event.set()
|
||||||
|
)
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
self.chain.unsubscribe()
|
||||||
|
self.db.stop_event.set()
|
||||||
|
for subscription in (
|
||||||
|
self.on_block_hash_subscription,
|
||||||
|
self.on_tx_hash_subscription,
|
||||||
|
self.advance_loop_task
|
||||||
|
):
|
||||||
|
if subscription is not None:
|
||||||
|
subscription.cancel()
|
||||||
|
if isinstance(self.db.executor, ThreadPoolExecutor):
|
||||||
|
await self.db.run(uninitialize)
|
||||||
|
|
||||||
|
async def run_tasks(self, tasks: List[Coroutine]) -> Optional[Set[asyncio.Future]]:
|
||||||
|
done, pending = await asyncio.wait(
|
||||||
|
tasks, return_when=asyncio.FIRST_EXCEPTION
|
||||||
|
)
|
||||||
|
if pending:
|
||||||
|
self.db.stop_event.set()
|
||||||
|
for future in pending:
|
||||||
|
future.cancel()
|
||||||
|
for future in done:
|
||||||
|
future.result()
|
||||||
|
return
|
||||||
|
return done
|
||||||
|
|
||||||
|
async def get_block_headers(self, start_height: int, end_height: int = None):
|
||||||
|
return await self.db.get_block_headers(start_height, end_height)
|
||||||
|
|
||||||
|
async def get_best_block_height(self) -> int:
|
||||||
|
return await self.db.get_best_block_height()
|
||||||
|
|
||||||
|
async def get_best_block_height_for_file(self, file_number) -> int:
|
||||||
|
return await self.db.run(
|
||||||
|
block_phase.get_best_block_height_for_file, file_number
|
||||||
|
)
|
||||||
|
|
||||||
|
async def sync_blocks(self) -> Optional[Tuple[int, int]]:
|
||||||
|
tasks = []
|
||||||
|
starting_height = None
|
||||||
|
tx_count = block_count = 0
|
||||||
|
with Progress(self.db.message_queue, BLOCKS_INIT_EVENT) as p:
|
||||||
|
ending_height = await self.chain.db.get_best_height()
|
||||||
|
for chain_file in p.iter(await self.chain.db.get_block_files()):
|
||||||
|
# block files may be read and saved out of order, need to check
|
||||||
|
# each file individually to see if we have missing blocks
|
||||||
|
our_best_file_height = await self.get_best_block_height_for_file(
|
||||||
|
chain_file['file_number']
|
||||||
|
)
|
||||||
|
if our_best_file_height == chain_file['best_height']:
|
||||||
|
# we have all blocks in this file, skipping
|
||||||
|
continue
|
||||||
|
if -1 < our_best_file_height < chain_file['best_height']:
|
||||||
|
# we have some blocks, need to figure out what we're missing
|
||||||
|
# call get_block_files again limited to this file and current_height
|
||||||
|
chain_file = (await self.chain.db.get_block_files(
|
||||||
|
file_number=chain_file['file_number'], start_height=our_best_file_height+1,
|
||||||
|
))[0]
|
||||||
|
tx_count += chain_file['txs']
|
||||||
|
block_count += chain_file['blocks']
|
||||||
|
file_start_height = chain_file['start_height']
|
||||||
|
starting_height = min(
|
||||||
|
file_start_height if starting_height is None else starting_height,
|
||||||
|
file_start_height
|
||||||
|
)
|
||||||
|
tasks.append(self.db.run(
|
||||||
|
block_phase.sync_block_file, chain_file['file_number'], file_start_height,
|
||||||
|
chain_file['txs'], self.TX_FLUSH_SIZE
|
||||||
|
))
|
||||||
|
with Progress(self.db.message_queue, BLOCKS_MAIN_EVENT) as p:
|
||||||
|
p.start(block_count, tx_count, extra={
|
||||||
|
"starting_height": starting_height,
|
||||||
|
"ending_height": ending_height,
|
||||||
|
"files": len(tasks),
|
||||||
|
"claims": await self.chain.db.get_claim_metadata_count(starting_height, ending_height),
|
||||||
|
"supports": await self.chain.db.get_support_metadata_count(starting_height, ending_height),
|
||||||
|
})
|
||||||
|
completed = await self.run_tasks(tasks)
|
||||||
|
if completed:
|
||||||
|
if starting_height == 0:
|
||||||
|
await self.db.run(block_phase.blocks_constraints_and_indexes)
|
||||||
|
else:
|
||||||
|
await self.db.run(block_phase.blocks_vacuum)
|
||||||
|
best_height_processed = max(f.result() for f in completed)
|
||||||
|
return starting_height, best_height_processed
|
||||||
|
|
||||||
|
async def sync_filters(self):
|
||||||
|
with Progress(self.db.message_queue, FILTER_INIT_EVENT) as p:
|
||||||
|
p.start(2)
|
||||||
|
initial_sync = not await self.db.has_filters()
|
||||||
|
p.step()
|
||||||
|
if initial_sync:
|
||||||
|
blocks = [0, await self.db.get_best_block_height()]
|
||||||
|
else:
|
||||||
|
blocks = await self.db.run(block_phase.get_block_range_without_filters)
|
||||||
|
if blocks != (-1, -1):
|
||||||
|
batches = split_range_into_10k_batches(*blocks)
|
||||||
|
p.step()
|
||||||
|
else:
|
||||||
|
p.step()
|
||||||
|
return
|
||||||
|
with Progress(self.db.message_queue, FILTER_MAIN_EVENT) as p:
|
||||||
|
p.start((blocks[1]-blocks[0])+1)
|
||||||
|
await self.run_tasks([
|
||||||
|
self.db.run(block_phase.sync_filters, *batch) for batch in batches
|
||||||
|
])
|
||||||
|
if initial_sync:
|
||||||
|
await self.db.run(block_phase.filters_constraints_and_indexes)
|
||||||
|
else:
|
||||||
|
await self.db.run(block_phase.filters_vacuum)
|
||||||
|
|
||||||
|
async def sync_spends(self, blocks_added):
|
||||||
|
if blocks_added:
|
||||||
|
await self.db.run(block_phase.sync_spends, blocks_added[0] == 0)
|
||||||
|
|
||||||
|
async def count_unspent_txos(
|
||||||
|
self,
|
||||||
|
txo_types: Tuple[int, ...],
|
||||||
|
blocks: Tuple[int, int] = None,
|
||||||
|
missing_in_supports_table: bool = False,
|
||||||
|
missing_in_claims_table: bool = False,
|
||||||
|
missing_or_stale_in_claims_table: bool = False,
|
||||||
|
) -> int:
|
||||||
|
return await self.db.run(
|
||||||
|
q.count_unspent_txos, txo_types, blocks,
|
||||||
|
missing_in_supports_table,
|
||||||
|
missing_in_claims_table,
|
||||||
|
missing_or_stale_in_claims_table,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def distribute_unspent_txos(
|
||||||
|
self,
|
||||||
|
txo_types: Tuple[int, ...],
|
||||||
|
blocks: Tuple[int, int] = None,
|
||||||
|
missing_in_supports_table: bool = False,
|
||||||
|
missing_in_claims_table: bool = False,
|
||||||
|
missing_or_stale_in_claims_table: bool = False,
|
||||||
|
) -> int:
|
||||||
|
return await self.db.run(
|
||||||
|
q.distribute_unspent_txos, txo_types, blocks,
|
||||||
|
missing_in_supports_table,
|
||||||
|
missing_in_claims_table,
|
||||||
|
missing_or_stale_in_claims_table,
|
||||||
|
self.db.workers
|
||||||
|
)
|
||||||
|
|
||||||
|
async def count_abandoned_supports(self) -> int:
|
||||||
|
return await self.db.run(q.count_abandoned_supports)
|
||||||
|
|
||||||
|
async def count_abandoned_claims(self) -> int:
|
||||||
|
return await self.db.run(q.count_abandoned_claims)
|
||||||
|
|
||||||
|
async def count_claims_with_changed_supports(self, blocks) -> int:
|
||||||
|
return await self.db.run(q.count_claims_with_changed_supports, blocks)
|
||||||
|
|
||||||
|
async def count_claims_with_changed_reposts(self, blocks) -> int:
|
||||||
|
return await self.db.run(q.count_claims_with_changed_reposts, blocks)
|
||||||
|
|
||||||
|
async def count_channels_with_changed_content(self, blocks) -> int:
|
||||||
|
return await self.db.run(q.count_channels_with_changed_content, blocks)
|
||||||
|
|
||||||
|
async def count_takeovers(self, blocks) -> int:
|
||||||
|
return await self.chain.db.get_takeover_count(
|
||||||
|
start_height=blocks[0], end_height=blocks[-1]
|
||||||
|
)
|
||||||
|
|
||||||
|
async def sync_claims(self, blocks) -> bool:
|
||||||
|
delete_claims = takeovers = claims_with_changed_supports = claims_with_changed_reposts = 0
|
||||||
|
initial_sync = not await self.db.has_filters()
|
||||||
|
with Progress(self.db.message_queue, CLAIMS_INIT_EVENT) as p:
|
||||||
|
if initial_sync:
|
||||||
|
total, batches = await self.distribute_unspent_txos(CLAIM_TYPE_CODES)
|
||||||
|
elif blocks:
|
||||||
|
p.start(5)
|
||||||
|
# 1. content claims to be inserted or updated
|
||||||
|
total = await self.count_unspent_txos(
|
||||||
|
CLAIM_TYPE_CODES, blocks, missing_or_stale_in_claims_table=True
|
||||||
|
)
|
||||||
|
batches = [blocks] if total else []
|
||||||
|
p.step()
|
||||||
|
# 2. claims to be deleted
|
||||||
|
delete_claims = await self.count_abandoned_claims()
|
||||||
|
total += delete_claims
|
||||||
|
p.step()
|
||||||
|
# 3. claims to be updated with new support totals
|
||||||
|
claims_with_changed_supports = await self.count_claims_with_changed_supports(blocks)
|
||||||
|
total += claims_with_changed_supports
|
||||||
|
p.step()
|
||||||
|
# 4. claims to be updated with new repost totals
|
||||||
|
claims_with_changed_reposts = await self.count_claims_with_changed_reposts(blocks)
|
||||||
|
total += claims_with_changed_reposts
|
||||||
|
p.step()
|
||||||
|
# 5. claims to be updated due to name takeovers
|
||||||
|
takeovers = await self.count_takeovers(blocks)
|
||||||
|
total += takeovers
|
||||||
|
p.step()
|
||||||
|
else:
|
||||||
|
return initial_sync
|
||||||
|
with Progress(self.db.message_queue, CLAIMS_MAIN_EVENT) as p:
|
||||||
|
p.start(total)
|
||||||
|
if batches:
|
||||||
|
await self.run_tasks([
|
||||||
|
self.db.run(claim_phase.claims_insert, batch, not initial_sync, self.CLAIM_FLUSH_SIZE)
|
||||||
|
for batch in batches
|
||||||
|
])
|
||||||
|
if not initial_sync:
|
||||||
|
await self.run_tasks([
|
||||||
|
self.db.run(claim_phase.claims_update, batch) for batch in batches
|
||||||
|
])
|
||||||
|
if delete_claims:
|
||||||
|
await self.db.run(claim_phase.claims_delete, delete_claims)
|
||||||
|
if takeovers:
|
||||||
|
await self.db.run(claim_phase.update_takeovers, blocks, takeovers)
|
||||||
|
if claims_with_changed_supports:
|
||||||
|
await self.db.run(claim_phase.update_stakes, blocks, claims_with_changed_supports)
|
||||||
|
if claims_with_changed_reposts:
|
||||||
|
await self.db.run(claim_phase.update_reposts, blocks, claims_with_changed_reposts)
|
||||||
|
if initial_sync:
|
||||||
|
await self.db.run(claim_phase.claims_constraints_and_indexes)
|
||||||
|
else:
|
||||||
|
await self.db.run(claim_phase.claims_vacuum)
|
||||||
|
return initial_sync
|
||||||
|
|
||||||
|
async def sync_supports(self, blocks):
|
||||||
|
delete_supports = 0
|
||||||
|
initial_sync = not await self.db.has_supports()
|
||||||
|
with Progress(self.db.message_queue, SUPPORTS_INIT_EVENT) as p:
|
||||||
|
if initial_sync:
|
||||||
|
total, support_batches = await self.distribute_unspent_txos(TXO_TYPES['support'])
|
||||||
|
elif blocks:
|
||||||
|
p.start(2)
|
||||||
|
# 1. supports to be inserted
|
||||||
|
total = await self.count_unspent_txos(
|
||||||
|
TXO_TYPES['support'], blocks, missing_in_supports_table=True
|
||||||
|
)
|
||||||
|
support_batches = [blocks] if total else []
|
||||||
|
p.step()
|
||||||
|
# 2. supports to be deleted
|
||||||
|
delete_supports = await self.count_abandoned_supports()
|
||||||
|
total += delete_supports
|
||||||
|
p.step()
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
with Progress(self.db.message_queue, SUPPORTS_MAIN_EVENT) as p:
|
||||||
|
p.start(total)
|
||||||
|
if support_batches:
|
||||||
|
await self.run_tasks([
|
||||||
|
self.db.run(
|
||||||
|
support_phase.supports_insert, batch, not initial_sync, self.SUPPORT_FLUSH_SIZE
|
||||||
|
) for batch in support_batches
|
||||||
|
])
|
||||||
|
if delete_supports:
|
||||||
|
await self.db.run(support_phase.supports_delete, delete_supports)
|
||||||
|
if initial_sync:
|
||||||
|
await self.db.run(support_phase.supports_constraints_and_indexes)
|
||||||
|
else:
|
||||||
|
await self.db.run(support_phase.supports_vacuum)
|
||||||
|
|
||||||
|
async def sync_channel_stats(self, blocks, initial_sync):
|
||||||
|
await self.db.run(claim_phase.update_channel_stats, blocks, initial_sync)
|
||||||
|
|
||||||
|
async def sync_trends(self):
|
||||||
|
ending_height = await self.chain.db.get_best_height()
|
||||||
|
if ending_height is not None:
|
||||||
|
await self.db.run(trending.calculate_trending, ending_height)
|
||||||
|
|
||||||
|
async def sync_claim_filtering(self):
|
||||||
|
await self.db.run(
|
||||||
|
claim_phase.update_claim_filters,
|
||||||
|
self.resolve_censor_channel_hashes,
|
||||||
|
self.search_censor_channel_hashes
|
||||||
|
)
|
||||||
|
|
||||||
|
async def advance(self):
|
||||||
|
blocks_added = await self.sync_blocks()
|
||||||
|
await self.sync_spends(blocks_added)
|
||||||
|
await self.sync_filters()
|
||||||
|
initial_claim_sync = await self.sync_claims(blocks_added)
|
||||||
|
await self.sync_supports(blocks_added)
|
||||||
|
await self.sync_channel_stats(blocks_added, initial_claim_sync)
|
||||||
|
await self.sync_trends()
|
||||||
|
await self.sync_claim_filtering()
|
||||||
|
if blocks_added:
|
||||||
|
await self._on_block_controller.add(BlockEvent(blocks_added[-1]))
|
||||||
|
|
||||||
|
async def sync_mempool(self):
|
||||||
|
added = await self.db.run(block_phase.sync_mempool)
|
||||||
|
await self.sync_spends([-1])
|
||||||
|
await self.db.run(claim_phase.claims_insert, [-1, -1], True, self.CLAIM_FLUSH_SIZE)
|
||||||
|
await self.db.run(claim_phase.claims_update, [-1, -1])
|
||||||
|
await self.db.run(claim_phase.claims_vacuum)
|
||||||
|
self.mempool.extend(added)
|
||||||
|
await self._on_mempool_controller.add(added)
|
||||||
|
|
||||||
|
async def clear_mempool(self):
|
||||||
|
self.mempool.clear()
|
||||||
|
await self.db.run(block_phase.clear_mempool)
|
||||||
|
|
||||||
|
async def advance_loop(self):
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
await asyncio.wait([
|
||||||
|
self.tx_hash_event.wait(),
|
||||||
|
self.block_hash_event.wait(),
|
||||||
|
], return_when=asyncio.FIRST_COMPLETED)
|
||||||
|
if self.block_hash_event.is_set():
|
||||||
|
self.block_hash_event.clear()
|
||||||
|
await self.clear_mempool()
|
||||||
|
await self.advance()
|
||||||
|
self.tx_hash_event.clear()
|
||||||
|
await self.sync_mempool()
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(e)
|
||||||
|
await self.stop()
|
||||||
|
|
||||||
|
async def rewind(self, height):
|
||||||
|
await self.db.run(block_phase.rewind, height)
|
|
@ -1,9 +1,10 @@
|
||||||
import struct
|
import struct
|
||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
import typing
|
import asyncio
|
||||||
|
from datetime import date
|
||||||
from binascii import hexlify, unhexlify
|
from binascii import hexlify, unhexlify
|
||||||
from typing import List, Iterable, Optional, Tuple
|
from typing import List, Iterable, Optional, Union
|
||||||
|
|
||||||
import ecdsa
|
import ecdsa
|
||||||
from cryptography.hazmat.backends import default_backend
|
from cryptography.hazmat.backends import default_backend
|
||||||
|
@ -13,24 +14,19 @@ from cryptography.hazmat.primitives.asymmetric import ec
|
||||||
from cryptography.hazmat.primitives.asymmetric.utils import Prehashed
|
from cryptography.hazmat.primitives.asymmetric.utils import Prehashed
|
||||||
from cryptography.exceptions import InvalidSignature
|
from cryptography.exceptions import InvalidSignature
|
||||||
|
|
||||||
from lbry.error import InsufficientFundsError
|
|
||||||
from lbry.crypto.hash import hash160, sha256
|
from lbry.crypto.hash import hash160, sha256
|
||||||
from lbry.crypto.base58 import Base58
|
from lbry.crypto.base58 import Base58
|
||||||
from lbry.schema.url import normalize_name
|
from lbry.schema.url import normalize_name
|
||||||
from lbry.schema.claim import Claim
|
from lbry.schema.claim import Claim
|
||||||
|
from lbry.schema.base import Signable
|
||||||
from lbry.schema.purchase import Purchase
|
from lbry.schema.purchase import Purchase
|
||||||
|
from lbry.schema.support import Support
|
||||||
|
|
||||||
from .script import InputScript, OutputScript
|
from .script import InputScript, OutputScript
|
||||||
from .constants import COIN, NULL_HASH32
|
|
||||||
from .bcd_data_stream import BCDataStream
|
from .bcd_data_stream import BCDataStream
|
||||||
from .hash import TXRef, TXRefImmutable
|
from .hash import TXRef, TXRefImmutable
|
||||||
from .util import ReadOnlyList
|
from .util import ReadOnlyList
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
|
||||||
from lbry.wallet.account import Account
|
|
||||||
from lbry.wallet.ledger import Ledger
|
|
||||||
from lbry.wallet.wallet import Wallet
|
|
||||||
|
|
||||||
log = logging.getLogger()
|
log = logging.getLogger()
|
||||||
|
|
||||||
|
|
||||||
|
@ -58,6 +54,10 @@ class TXRefMutable(TXRef):
|
||||||
def height(self):
|
def height(self):
|
||||||
return self.tx.height
|
return self.tx.height
|
||||||
|
|
||||||
|
@property
|
||||||
|
def timestamp(self):
|
||||||
|
return self.tx.timestamp
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
self._id = None
|
self._id = None
|
||||||
self._hash = None
|
self._hash = None
|
||||||
|
@ -107,7 +107,7 @@ class InputOutput:
|
||||||
|
|
||||||
__slots__ = 'tx_ref', 'position'
|
__slots__ = 'tx_ref', 'position'
|
||||||
|
|
||||||
def __init__(self, tx_ref: TXRef = None, position: int = None) -> None:
|
def __init__(self, tx_ref: Union[TXRef, TXRefImmutable] = None, position: int = None) -> None:
|
||||||
self.tx_ref = tx_ref
|
self.tx_ref = tx_ref
|
||||||
self.position = position
|
self.position = position
|
||||||
|
|
||||||
|
@ -129,6 +129,7 @@ class Input(InputOutput):
|
||||||
|
|
||||||
NULL_SIGNATURE = b'\x00'*72
|
NULL_SIGNATURE = b'\x00'*72
|
||||||
NULL_PUBLIC_KEY = b'\x00'*33
|
NULL_PUBLIC_KEY = b'\x00'*33
|
||||||
|
NULL_HASH32 = b'\x00'*32
|
||||||
|
|
||||||
__slots__ = 'txo_ref', 'sequence', 'coinbase', 'script'
|
__slots__ = 'txo_ref', 'sequence', 'coinbase', 'script'
|
||||||
|
|
||||||
|
@ -151,6 +152,12 @@ class Input(InputOutput):
|
||||||
script = InputScript.redeem_pubkey_hash(cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY)
|
script = InputScript.redeem_pubkey_hash(cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY)
|
||||||
return cls(txo.ref, script)
|
return cls(txo.ref, script)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create_coinbase(cls) -> 'Input':
|
||||||
|
tx_ref = TXRefImmutable.from_hash(cls.NULL_HASH32, 0, 0)
|
||||||
|
txo_ref = TXORef(tx_ref, 0)
|
||||||
|
return cls(txo_ref, b'beef')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def amount(self) -> int:
|
def amount(self) -> int:
|
||||||
""" Amount this input adds to the transaction. """
|
""" Amount this input adds to the transaction. """
|
||||||
|
@ -167,7 +174,7 @@ class Input(InputOutput):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def deserialize_from(cls, stream):
|
def deserialize_from(cls, stream):
|
||||||
tx_ref = TXRefImmutable.from_hash(stream.read(32), -1)
|
tx_ref = TXRefImmutable.from_hash(stream.read(32), -1, -1)
|
||||||
position = stream.read_uint32()
|
position = stream.read_uint32()
|
||||||
script = stream.read_string()
|
script = stream.read_string()
|
||||||
sequence = stream.read_uint32()
|
sequence = stream.read_uint32()
|
||||||
|
@ -190,32 +197,18 @@ class Input(InputOutput):
|
||||||
stream.write_uint32(self.sequence)
|
stream.write_uint32(self.sequence)
|
||||||
|
|
||||||
|
|
||||||
class OutputEffectiveAmountEstimator:
|
|
||||||
|
|
||||||
__slots__ = 'txo', 'txi', 'fee', 'effective_amount'
|
|
||||||
|
|
||||||
def __init__(self, ledger: 'Ledger', txo: 'Output') -> None:
|
|
||||||
self.txo = txo
|
|
||||||
self.txi = Input.spend(txo)
|
|
||||||
self.fee: int = self.txi.get_fee(ledger)
|
|
||||||
self.effective_amount: int = txo.amount - self.fee
|
|
||||||
|
|
||||||
def __lt__(self, other):
|
|
||||||
return self.effective_amount < other.effective_amount
|
|
||||||
|
|
||||||
|
|
||||||
class Output(InputOutput):
|
class Output(InputOutput):
|
||||||
|
|
||||||
__slots__ = (
|
__slots__ = (
|
||||||
'amount', 'script', 'is_internal_transfer', 'is_spent', 'is_my_output', 'is_my_input',
|
'amount', 'script', 'is_internal_transfer', 'spent_height', 'is_my_output', 'is_my_input',
|
||||||
'channel', 'private_key', 'meta', 'sent_supports', 'sent_tips', 'received_tips',
|
'channel', 'private_key', 'meta', 'sent_supports', 'sent_tips', 'received_tips',
|
||||||
'purchase', 'purchased_claim', 'purchase_receipt',
|
'purchase', 'purchased_claim', 'purchase_receipt',
|
||||||
'reposted_claim', 'claims',
|
'reposted_claim', 'claims', '_signable'
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, amount: int, script: OutputScript,
|
def __init__(self, amount: int, script: OutputScript,
|
||||||
tx_ref: TXRef = None, position: int = None,
|
tx_ref: TXRef = None, position: int = None,
|
||||||
is_internal_transfer: Optional[bool] = None, is_spent: Optional[bool] = None,
|
is_internal_transfer: Optional[bool] = None, spent_height: Optional[bool] = None,
|
||||||
is_my_output: Optional[bool] = None, is_my_input: Optional[bool] = None,
|
is_my_output: Optional[bool] = None, is_my_input: Optional[bool] = None,
|
||||||
sent_supports: Optional[int] = None, sent_tips: Optional[int] = None,
|
sent_supports: Optional[int] = None, sent_tips: Optional[int] = None,
|
||||||
received_tips: Optional[int] = None,
|
received_tips: Optional[int] = None,
|
||||||
|
@ -225,7 +218,7 @@ class Output(InputOutput):
|
||||||
self.amount = amount
|
self.amount = amount
|
||||||
self.script = script
|
self.script = script
|
||||||
self.is_internal_transfer = is_internal_transfer
|
self.is_internal_transfer = is_internal_transfer
|
||||||
self.is_spent = is_spent
|
self.spent_height = spent_height
|
||||||
self.is_my_output = is_my_output
|
self.is_my_output = is_my_output
|
||||||
self.is_my_input = is_my_input
|
self.is_my_input = is_my_input
|
||||||
self.sent_supports = sent_supports
|
self.sent_supports = sent_supports
|
||||||
|
@ -238,12 +231,13 @@ class Output(InputOutput):
|
||||||
self.purchase_receipt: 'Output' = None # txo representing purchase receipt for this claim
|
self.purchase_receipt: 'Output' = None # txo representing purchase receipt for this claim
|
||||||
self.reposted_claim: 'Output' = None # txo representing claim being reposted
|
self.reposted_claim: 'Output' = None # txo representing claim being reposted
|
||||||
self.claims: List['Output'] = None # resolved claims for collection
|
self.claims: List['Output'] = None # resolved claims for collection
|
||||||
|
self._signable: Optional[Signable] = None
|
||||||
self.meta = {}
|
self.meta = {}
|
||||||
|
|
||||||
def update_annotations(self, annotated: 'Output'):
|
def update_annotations(self, annotated: 'Output'):
|
||||||
if annotated is None:
|
if annotated is None:
|
||||||
self.is_internal_transfer = None
|
self.is_internal_transfer = None
|
||||||
self.is_spent = None
|
self.spent_height = None
|
||||||
self.is_my_output = None
|
self.is_my_output = None
|
||||||
self.is_my_input = None
|
self.is_my_input = None
|
||||||
self.sent_supports = None
|
self.sent_supports = None
|
||||||
|
@ -251,7 +245,7 @@ class Output(InputOutput):
|
||||||
self.received_tips = None
|
self.received_tips = None
|
||||||
else:
|
else:
|
||||||
self.is_internal_transfer = annotated.is_internal_transfer
|
self.is_internal_transfer = annotated.is_internal_transfer
|
||||||
self.is_spent = annotated.is_spent
|
self.spent_height = annotated.spent_height
|
||||||
self.is_my_output = annotated.is_my_output
|
self.is_my_output = annotated.is_my_output
|
||||||
self.is_my_input = annotated.is_my_input
|
self.is_my_input = annotated.is_my_input
|
||||||
self.sent_supports = annotated.sent_supports
|
self.sent_supports = annotated.sent_supports
|
||||||
|
@ -268,30 +262,46 @@ class Output(InputOutput):
|
||||||
def id(self):
|
def id(self):
|
||||||
return self.ref.id
|
return self.ref.id
|
||||||
|
|
||||||
|
@property
|
||||||
|
def hash(self):
|
||||||
|
return self.ref.hash
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_spent(self):
|
||||||
|
if self.spent_height is not None:
|
||||||
|
return self.spent_height > 0
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def pubkey_hash(self):
|
def pubkey_hash(self):
|
||||||
return self.script.values['pubkey_hash']
|
pubkey_hash = self.script.values.get('pubkey_hash')
|
||||||
|
if pubkey_hash:
|
||||||
|
return pubkey_hash
|
||||||
|
return hash160(self.script.values['pubkey'])
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def has_address(self):
|
def has_address(self):
|
||||||
return 'pubkey_hash' in self.script.values
|
return (
|
||||||
|
'pubkey_hash' in self.script.values or
|
||||||
|
'script_hash' in self.script.values or
|
||||||
|
'pubkey' in self.script.values
|
||||||
|
)
|
||||||
|
|
||||||
def get_address(self, ledger):
|
def get_address(self, ledger):
|
||||||
return ledger.hash160_to_address(self.pubkey_hash)
|
if 'script_hash' in self.script.values:
|
||||||
|
return ledger.script_hash_to_address(self.script.values['script_hash'])
|
||||||
def get_estimator(self, ledger):
|
return ledger.pubkey_hash_to_address(self.pubkey_hash)
|
||||||
return OutputEffectiveAmountEstimator(ledger, self)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def pay_pubkey_hash(cls, amount, pubkey_hash):
|
def pay_pubkey_hash(cls, amount, pubkey_hash):
|
||||||
return cls(amount, OutputScript.pay_pubkey_hash(pubkey_hash))
|
return cls(amount, OutputScript.pay_pubkey_hash(pubkey_hash))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def deserialize_from(cls, stream):
|
def deserialize_from(cls, stream, transaction_offset: int = 0):
|
||||||
return cls(
|
amount = stream.read_uint64()
|
||||||
amount=stream.read_uint64(),
|
length = stream.read_compact_size()
|
||||||
script=OutputScript(stream.read_string())
|
offset = stream.tell()-transaction_offset
|
||||||
)
|
script = OutputScript(stream.read(length), offset=offset)
|
||||||
|
return cls(amount=amount, script=script)
|
||||||
|
|
||||||
def serialize_to(self, stream, alternate_script=None):
|
def serialize_to(self, stream, alternate_script=None):
|
||||||
stream.write_uint64(self.amount)
|
stream.write_uint64(self.amount)
|
||||||
|
@ -311,6 +321,10 @@ class Output(InputOutput):
|
||||||
def is_support(self) -> bool:
|
def is_support(self) -> bool:
|
||||||
return self.script.is_support_claim
|
return self.script.is_support_claim
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_support_data(self) -> bool:
|
||||||
|
return self.script.is_support_claim_data
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def claim_hash(self) -> bytes:
|
def claim_hash(self) -> bytes:
|
||||||
if self.script.is_claim_name:
|
if self.script.is_claim_name:
|
||||||
|
@ -346,7 +360,38 @@ class Output(InputOutput):
|
||||||
def can_decode_claim(self):
|
def can_decode_claim(self):
|
||||||
try:
|
try:
|
||||||
return self.claim
|
return self.claim
|
||||||
except: # pylint: disable=bare-except
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def support(self) -> Support:
|
||||||
|
if self.is_support_data:
|
||||||
|
if not isinstance(self.script.values['support'], Support):
|
||||||
|
self.script.values['support'] = Support.from_bytes(self.script.values['support'])
|
||||||
|
return self.script.values['support']
|
||||||
|
raise ValueError('Only supports with data can be represented as Supports.')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def can_decode_support(self):
|
||||||
|
try:
|
||||||
|
return self.support
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def signable(self) -> Signable:
|
||||||
|
if self._signable is None:
|
||||||
|
if self.is_claim:
|
||||||
|
self._signable = self.claim
|
||||||
|
elif self.is_support_data:
|
||||||
|
self._signable = self.support
|
||||||
|
return self._signable
|
||||||
|
|
||||||
|
@property
|
||||||
|
def can_decode_signable(self) -> Signable:
|
||||||
|
try:
|
||||||
|
return self.signable
|
||||||
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -360,22 +405,22 @@ class Output(InputOutput):
|
||||||
return self.private_key is not None
|
return self.private_key is not None
|
||||||
|
|
||||||
def get_signature_digest(self, ledger):
|
def get_signature_digest(self, ledger):
|
||||||
if self.claim.unsigned_payload:
|
if self.signable.unsigned_payload:
|
||||||
pieces = [
|
pieces = [
|
||||||
Base58.decode(self.get_address(ledger)),
|
Base58.decode(self.get_address(ledger)),
|
||||||
self.claim.unsigned_payload,
|
self.signable.unsigned_payload,
|
||||||
self.claim.signing_channel_hash[::-1]
|
self.signable.signing_channel_hash[::-1]
|
||||||
]
|
]
|
||||||
else:
|
else:
|
||||||
pieces = [
|
pieces = [
|
||||||
self.tx_ref.tx.inputs[0].txo_ref.hash,
|
self.tx_ref.tx.inputs[0].txo_ref.hash,
|
||||||
self.claim.signing_channel_hash,
|
self.signable.signing_channel_hash,
|
||||||
self.claim.to_message_bytes()
|
self.signable.to_message_bytes()
|
||||||
]
|
]
|
||||||
return sha256(b''.join(pieces))
|
return sha256(b''.join(pieces))
|
||||||
|
|
||||||
def get_encoded_signature(self):
|
def get_encoded_signature(self):
|
||||||
signature = hexlify(self.claim.signature)
|
signature = hexlify(self.signable.signature)
|
||||||
r = int(signature[:int(len(signature)/2)], 16)
|
r = int(signature[:int(len(signature)/2)], 16)
|
||||||
s = int(signature[int(len(signature)/2):], 16)
|
s = int(signature[int(len(signature)/2):], 16)
|
||||||
return ecdsa.util.sigencode_der(r, s, len(signature)*4)
|
return ecdsa.util.sigencode_der(r, s, len(signature)*4)
|
||||||
|
@ -384,7 +429,10 @@ class Output(InputOutput):
|
||||||
def is_signature_valid(encoded_signature, signature_digest, public_key_bytes):
|
def is_signature_valid(encoded_signature, signature_digest, public_key_bytes):
|
||||||
try:
|
try:
|
||||||
public_key = load_der_public_key(public_key_bytes, default_backend())
|
public_key = load_der_public_key(public_key_bytes, default_backend())
|
||||||
public_key.verify(encoded_signature, signature_digest, ec.ECDSA(Prehashed(hashes.SHA256())))
|
public_key.verify( # pylint: disable=no-value-for-parameter
|
||||||
|
encoded_signature, signature_digest,
|
||||||
|
ec.ECDSA(Prehashed(hashes.SHA256()))
|
||||||
|
)
|
||||||
return True
|
return True
|
||||||
except (ValueError, InvalidSignature):
|
except (ValueError, InvalidSignature):
|
||||||
pass
|
pass
|
||||||
|
@ -399,22 +447,31 @@ class Output(InputOutput):
|
||||||
|
|
||||||
def sign(self, channel: 'Output', first_input_id=None):
|
def sign(self, channel: 'Output', first_input_id=None):
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
self.claim.signing_channel_hash = channel.claim_hash
|
self.signable.signing_channel_hash = channel.claim_hash
|
||||||
digest = sha256(b''.join([
|
digest = sha256(b''.join([
|
||||||
first_input_id or self.tx_ref.tx.inputs[0].txo_ref.hash,
|
first_input_id or self.tx_ref.tx.inputs[0].txo_ref.hash,
|
||||||
self.claim.signing_channel_hash,
|
self.signable.signing_channel_hash,
|
||||||
self.claim.to_message_bytes()
|
self.signable.to_message_bytes()
|
||||||
]))
|
]))
|
||||||
self.claim.signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
|
self.signable.signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
|
||||||
self.script.generate()
|
self.script.generate()
|
||||||
|
|
||||||
def clear_signature(self):
|
def clear_signature(self):
|
||||||
self.channel = None
|
self.channel = None
|
||||||
self.claim.clear_signature()
|
self.claim.clear_signature()
|
||||||
|
|
||||||
def generate_channel_private_key(self):
|
@staticmethod
|
||||||
self.private_key = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
|
def _sync_generate_channel_private_key():
|
||||||
self.claim.channel.public_key_bytes = self.private_key.get_verifying_key().to_der()
|
private_key = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
|
||||||
|
public_key_bytes = private_key.get_verifying_key().to_der()
|
||||||
|
return private_key, public_key_bytes
|
||||||
|
|
||||||
|
async def generate_channel_private_key(self):
|
||||||
|
private_key, public_key_bytes = await asyncio.get_running_loop().run_in_executor(
|
||||||
|
None, Output._sync_generate_channel_private_key
|
||||||
|
)
|
||||||
|
self.private_key = private_key
|
||||||
|
self.claim.channel.public_key_bytes = public_key_bytes
|
||||||
self.script.generate()
|
self.script.generate()
|
||||||
return self.private_key
|
return self.private_key
|
||||||
|
|
||||||
|
@ -443,6 +500,14 @@ class Output(InputOutput):
|
||||||
)
|
)
|
||||||
return cls(amount, script)
|
return cls(amount, script)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pay_support_data_pubkey_hash(
|
||||||
|
cls, amount: int, claim_name: str, claim_id: str, support: Support, pubkey_hash: bytes) -> 'Output':
|
||||||
|
script = OutputScript.pay_support_data_pubkey_hash(
|
||||||
|
claim_name.encode(), unhexlify(claim_id)[::-1], support, pubkey_hash
|
||||||
|
)
|
||||||
|
return cls(amount, script)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def add_purchase_data(cls, purchase: Purchase) -> 'Output':
|
def add_purchase_data(cls, purchase: Purchase) -> 'Output':
|
||||||
script = OutputScript.return_data(purchase)
|
script = OutputScript.return_data(purchase)
|
||||||
|
@ -477,6 +542,13 @@ class Output(InputOutput):
|
||||||
if self.purchased_claim is not None:
|
if self.purchased_claim is not None:
|
||||||
return self.purchased_claim.claim_id
|
return self.purchased_claim.claim_id
|
||||||
|
|
||||||
|
@property
|
||||||
|
def purchased_claim_hash(self):
|
||||||
|
if self.purchase is not None:
|
||||||
|
return self.purchase.purchase_data.claim_hash
|
||||||
|
if self.purchased_claim is not None:
|
||||||
|
return self.purchased_claim.claim_hash
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def has_price(self):
|
def has_price(self):
|
||||||
if self.can_decode_claim:
|
if self.can_decode_claim:
|
||||||
|
@ -494,7 +566,7 @@ class Output(InputOutput):
|
||||||
class Transaction:
|
class Transaction:
|
||||||
|
|
||||||
def __init__(self, raw=None, version: int = 1, locktime: int = 0, is_verified: bool = False,
|
def __init__(self, raw=None, version: int = 1, locktime: int = 0, is_verified: bool = False,
|
||||||
height: int = -2, position: int = -1, julian_day: int = None) -> None:
|
height: int = -2, position: int = -1, timestamp: int = 0) -> None:
|
||||||
self._raw = raw
|
self._raw = raw
|
||||||
self._raw_sans_segwit = None
|
self._raw_sans_segwit = None
|
||||||
self.is_segwit_flag = 0
|
self.is_segwit_flag = 0
|
||||||
|
@ -512,9 +584,13 @@ class Transaction:
|
||||||
# +num: confirmed in a specific block (height)
|
# +num: confirmed in a specific block (height)
|
||||||
self.height = height
|
self.height = height
|
||||||
self.position = position
|
self.position = position
|
||||||
self._day = julian_day
|
self.timestamp = timestamp
|
||||||
|
self._day: int = 0
|
||||||
if raw is not None:
|
if raw is not None:
|
||||||
self._deserialize()
|
self.deserialize()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"TX({self.id[:10]}...{self.id[-10:]})"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def is_broadcast(self):
|
def is_broadcast(self):
|
||||||
|
@ -536,9 +612,10 @@ class Transaction:
|
||||||
def hash(self):
|
def hash(self):
|
||||||
return self.ref.hash
|
return self.ref.hash
|
||||||
|
|
||||||
def get_julian_day(self, ledger):
|
@property
|
||||||
if self._day is None and self.height > 0:
|
def day(self):
|
||||||
self._day = ledger.headers.estimated_julian_day(self.height)
|
if self._day is None and self.timestamp > 0:
|
||||||
|
self._day = date.fromtimestamp(self.timestamp).toordinal()
|
||||||
return self._day
|
return self._day
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -674,9 +751,10 @@ class Transaction:
|
||||||
stream.write_uint32(self.signature_hash_type(1)) # signature hash type: SIGHASH_ALL
|
stream.write_uint32(self.signature_hash_type(1)) # signature hash type: SIGHASH_ALL
|
||||||
return stream.get_bytes()
|
return stream.get_bytes()
|
||||||
|
|
||||||
def _deserialize(self):
|
def deserialize(self, stream=None):
|
||||||
if self._raw is not None:
|
if self._raw is not None or stream is not None:
|
||||||
stream = BCDataStream(self._raw)
|
stream = stream or BCDataStream(self._raw)
|
||||||
|
start = stream.tell()
|
||||||
self.version = stream.read_uint32()
|
self.version = stream.read_uint32()
|
||||||
input_count = stream.read_compact_size()
|
input_count = stream.read_compact_size()
|
||||||
if input_count == 0:
|
if input_count == 0:
|
||||||
|
@ -687,7 +765,7 @@ class Transaction:
|
||||||
])
|
])
|
||||||
output_count = stream.read_compact_size()
|
output_count = stream.read_compact_size()
|
||||||
self._add(self._outputs, [
|
self._add(self._outputs, [
|
||||||
Output.deserialize_from(stream) for _ in range(output_count)
|
Output.deserialize_from(stream, start) for _ in range(output_count)
|
||||||
])
|
])
|
||||||
if self.is_segwit_flag:
|
if self.is_segwit_flag:
|
||||||
# drain witness portion of transaction
|
# drain witness portion of transaction
|
||||||
|
@ -697,181 +775,12 @@ class Transaction:
|
||||||
for _ in range(stream.read_compact_size()):
|
for _ in range(stream.read_compact_size()):
|
||||||
self.witnesses.append(stream.read(stream.read_compact_size()))
|
self.witnesses.append(stream.read(stream.read_compact_size()))
|
||||||
self.locktime = stream.read_uint32()
|
self.locktime = stream.read_uint32()
|
||||||
|
return self
|
||||||
@classmethod
|
|
||||||
def ensure_all_have_same_ledger_and_wallet(
|
|
||||||
cls, funding_accounts: Iterable['Account'],
|
|
||||||
change_account: 'Account' = None) -> Tuple['Ledger', 'Wallet']:
|
|
||||||
ledger = wallet = None
|
|
||||||
for account in funding_accounts:
|
|
||||||
if ledger is None:
|
|
||||||
ledger = account.ledger
|
|
||||||
wallet = account.wallet
|
|
||||||
if ledger != account.ledger:
|
|
||||||
raise ValueError(
|
|
||||||
'All funding accounts used to create a transaction must be on the same ledger.'
|
|
||||||
)
|
|
||||||
if wallet != account.wallet:
|
|
||||||
raise ValueError(
|
|
||||||
'All funding accounts used to create a transaction must be from the same wallet.'
|
|
||||||
)
|
|
||||||
if change_account is not None:
|
|
||||||
if change_account.ledger != ledger:
|
|
||||||
raise ValueError('Change account must use same ledger as funding accounts.')
|
|
||||||
if change_account.wallet != wallet:
|
|
||||||
raise ValueError('Change account must use same wallet as funding accounts.')
|
|
||||||
if ledger is None:
|
|
||||||
raise ValueError('No ledger found.')
|
|
||||||
if wallet is None:
|
|
||||||
raise ValueError('No wallet found.')
|
|
||||||
return ledger, wallet
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def create(cls, inputs: Iterable[Input], outputs: Iterable[Output],
|
|
||||||
funding_accounts: Iterable['Account'], change_account: 'Account',
|
|
||||||
sign: bool = True):
|
|
||||||
""" Find optimal set of inputs when only outputs are provided; add change
|
|
||||||
outputs if only inputs are provided or if inputs are greater than outputs. """
|
|
||||||
|
|
||||||
tx = cls() \
|
|
||||||
.add_inputs(inputs) \
|
|
||||||
.add_outputs(outputs)
|
|
||||||
|
|
||||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
|
||||||
|
|
||||||
# value of the outputs plus associated fees
|
|
||||||
cost = (
|
|
||||||
tx.get_base_fee(ledger) +
|
|
||||||
tx.get_total_output_sum(ledger)
|
|
||||||
)
|
|
||||||
# value of the inputs less the cost to spend those inputs
|
|
||||||
payment = tx.get_effective_input_sum(ledger)
|
|
||||||
|
|
||||||
try:
|
|
||||||
|
|
||||||
for _ in range(5):
|
|
||||||
|
|
||||||
if payment < cost:
|
|
||||||
deficit = cost - payment
|
|
||||||
spendables = await ledger.get_spendable_utxos(deficit, funding_accounts)
|
|
||||||
if not spendables:
|
|
||||||
raise InsufficientFundsError()
|
|
||||||
payment += sum(s.effective_amount for s in spendables)
|
|
||||||
tx.add_inputs(s.txi for s in spendables)
|
|
||||||
|
|
||||||
cost_of_change = (
|
|
||||||
tx.get_base_fee(ledger) +
|
|
||||||
Output.pay_pubkey_hash(COIN, NULL_HASH32).get_fee(ledger)
|
|
||||||
)
|
|
||||||
if payment > cost:
|
|
||||||
change = payment - cost
|
|
||||||
if change > cost_of_change:
|
|
||||||
change_address = await change_account.change.get_or_create_usable_address()
|
|
||||||
change_hash160 = change_account.ledger.address_to_hash160(change_address)
|
|
||||||
change_amount = change - cost_of_change
|
|
||||||
change_output = Output.pay_pubkey_hash(change_amount, change_hash160)
|
|
||||||
change_output.is_internal_transfer = True
|
|
||||||
tx.add_outputs([Output.pay_pubkey_hash(change_amount, change_hash160)])
|
|
||||||
|
|
||||||
if tx._outputs:
|
|
||||||
break
|
|
||||||
# this condition and the outer range(5) loop cover an edge case
|
|
||||||
# whereby a single input is just enough to cover the fee and
|
|
||||||
# has some change left over, but the change left over is less
|
|
||||||
# than the cost_of_change: thus the input is completely
|
|
||||||
# consumed and no output is added, which is an invalid tx.
|
|
||||||
# to be able to spend this input we must increase the cost
|
|
||||||
# of the TX and run through the balance algorithm a second time
|
|
||||||
# adding an extra input and change output, making tx valid.
|
|
||||||
# we do this 5 times in case the other UTXOs added are also
|
|
||||||
# less than the fee, after 5 attempts we give up and go home
|
|
||||||
cost += cost_of_change + 1
|
|
||||||
|
|
||||||
if sign:
|
|
||||||
await tx.sign(funding_accounts)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
log.exception('Failed to create transaction:')
|
|
||||||
await ledger.release_tx(tx)
|
|
||||||
raise e
|
|
||||||
|
|
||||||
return tx
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def signature_hash_type(hash_type):
|
def signature_hash_type(hash_type):
|
||||||
return hash_type
|
return hash_type
|
||||||
|
|
||||||
async def sign(self, funding_accounts: Iterable['Account']):
|
|
||||||
ledger, wallet = self.ensure_all_have_same_ledger_and_wallet(funding_accounts)
|
|
||||||
for i, txi in enumerate(self._inputs):
|
|
||||||
assert txi.script is not None
|
|
||||||
assert txi.txo_ref.txo is not None
|
|
||||||
txo_script = txi.txo_ref.txo.script
|
|
||||||
if txo_script.is_pay_pubkey_hash:
|
|
||||||
address = ledger.hash160_to_address(txo_script.values['pubkey_hash'])
|
|
||||||
private_key = await ledger.get_private_key_for_address(wallet, address)
|
|
||||||
assert private_key is not None, 'Cannot find private key for signing output.'
|
|
||||||
tx = self._serialize_for_signature(i)
|
|
||||||
txi.script.values['signature'] = \
|
|
||||||
private_key.sign(tx) + bytes((self.signature_hash_type(1),))
|
|
||||||
txi.script.values['pubkey'] = private_key.public_key.pubkey_bytes
|
|
||||||
txi.script.generate()
|
|
||||||
else:
|
|
||||||
raise NotImplementedError("Don't know how to spend this output.")
|
|
||||||
self._reset()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def pay(cls, amount: int, address: bytes, funding_accounts: List['Account'], change_account: 'Account'):
|
|
||||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
|
||||||
output = Output.pay_pubkey_hash(amount, ledger.address_to_hash160(address))
|
|
||||||
return cls.create([], [output], funding_accounts, change_account)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def claim_create(
|
|
||||||
cls, name: str, claim: Claim, amount: int, holding_address: str,
|
|
||||||
funding_accounts: List['Account'], change_account: 'Account', signing_channel: Output = None):
|
|
||||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
|
||||||
claim_output = Output.pay_claim_name_pubkey_hash(
|
|
||||||
amount, name, claim, ledger.address_to_hash160(holding_address)
|
|
||||||
)
|
|
||||||
if signing_channel is not None:
|
|
||||||
claim_output.sign(signing_channel, b'placeholder txid:nout')
|
|
||||||
return cls.create([], [claim_output], funding_accounts, change_account, sign=False)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def claim_update(
|
|
||||||
cls, previous_claim: Output, claim: Claim, amount: int, holding_address: str,
|
|
||||||
funding_accounts: List['Account'], change_account: 'Account', signing_channel: Output = None):
|
|
||||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
|
||||||
updated_claim = Output.pay_update_claim_pubkey_hash(
|
|
||||||
amount, previous_claim.claim_name, previous_claim.claim_id,
|
|
||||||
claim, ledger.address_to_hash160(holding_address)
|
|
||||||
)
|
|
||||||
if signing_channel is not None:
|
|
||||||
updated_claim.sign(signing_channel, b'placeholder txid:nout')
|
|
||||||
else:
|
|
||||||
updated_claim.clear_signature()
|
|
||||||
return cls.create(
|
|
||||||
[Input.spend(previous_claim)], [updated_claim], funding_accounts, change_account, sign=False
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def support(cls, claim_name: str, claim_id: str, amount: int, holding_address: str,
|
|
||||||
funding_accounts: List['Account'], change_account: 'Account'):
|
|
||||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
|
||||||
support_output = Output.pay_support_pubkey_hash(
|
|
||||||
amount, claim_name, claim_id, ledger.address_to_hash160(holding_address)
|
|
||||||
)
|
|
||||||
return cls.create([], [support_output], funding_accounts, change_account)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def purchase(cls, claim_id: str, amount: int, merchant_address: bytes,
|
|
||||||
funding_accounts: List['Account'], change_account: 'Account'):
|
|
||||||
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
|
|
||||||
payment = Output.pay_pubkey_hash(amount, ledger.address_to_hash160(merchant_address))
|
|
||||||
data = Output.add_purchase_data(Purchase(claim_id))
|
|
||||||
return cls.create([], [payment, data], funding_accounts, change_account)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def my_inputs(self):
|
def my_inputs(self):
|
||||||
for txi in self.inputs:
|
for txi in self.inputs:
|
|
@ -1,28 +1,4 @@
|
||||||
import re
|
|
||||||
from typing import TypeVar, Sequence, Optional
|
from typing import TypeVar, Sequence, Optional
|
||||||
from .constants import COIN
|
|
||||||
|
|
||||||
|
|
||||||
def date_to_julian_day(d):
|
|
||||||
return d.toordinal() + 1721424.5
|
|
||||||
|
|
||||||
|
|
||||||
def coins_to_satoshis(coins):
|
|
||||||
if not isinstance(coins, str):
|
|
||||||
raise ValueError("{coins} must be a string")
|
|
||||||
result = re.search(r'^(\d{1,10})\.(\d{1,8})$', coins)
|
|
||||||
if result is not None:
|
|
||||||
whole, fractional = result.groups()
|
|
||||||
return int(whole+fractional.ljust(8, "0"))
|
|
||||||
raise ValueError("'{lbc}' is not a valid coin decimal")
|
|
||||||
|
|
||||||
|
|
||||||
def satoshis_to_coins(satoshis):
|
|
||||||
coins = '{:.8f}'.format(satoshis / COIN).rstrip('0')
|
|
||||||
if coins.endswith('.'):
|
|
||||||
return coins+'0'
|
|
||||||
else:
|
|
||||||
return coins
|
|
||||||
|
|
||||||
|
|
||||||
T = TypeVar('T')
|
T = TypeVar('T')
|
||||||
|
@ -40,22 +16,6 @@ class ReadOnlyList(Sequence[T]):
|
||||||
return len(self.lst)
|
return len(self.lst)
|
||||||
|
|
||||||
|
|
||||||
def subclass_tuple(name, base):
|
|
||||||
return type(name, (base,), {'__slots__': ()})
|
|
||||||
|
|
||||||
|
|
||||||
class cachedproperty:
|
|
||||||
|
|
||||||
def __init__(self, f):
|
|
||||||
self.f = f
|
|
||||||
|
|
||||||
def __get__(self, obj, objtype):
|
|
||||||
obj = obj or objtype
|
|
||||||
value = self.f(obj)
|
|
||||||
setattr(obj, self.f.__name__, value)
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
class ArithUint256:
|
class ArithUint256:
|
||||||
# https://github.com/bitcoin/bitcoin/blob/master/src/arith_uint256.cpp
|
# https://github.com/bitcoin/bitcoin/blob/master/src/arith_uint256.cpp
|
||||||
|
|
|
@ -1,79 +1,17 @@
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import shutil
|
|
||||||
import signal
|
|
||||||
import pathlib
|
|
||||||
import json
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import pathlib
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import textwrap
|
||||||
import logging.handlers
|
import subprocess
|
||||||
|
|
||||||
import aiohttp
|
|
||||||
from aiohttp.web import GracefulExit
|
|
||||||
from docopt import docopt
|
from docopt import docopt
|
||||||
|
|
||||||
from lbry import __version__ as lbrynet_version
|
from lbry import __version__
|
||||||
from lbry.extras.daemon.loggly_handler import get_loggly_handler
|
|
||||||
from lbry.extras.daemon.daemon import Daemon
|
|
||||||
from lbry.conf import Config, CLIConfig
|
from lbry.conf import Config, CLIConfig
|
||||||
|
from lbry.service import Daemon, Client, FullNode, LightClient
|
||||||
log = logging.getLogger('lbry')
|
from lbry.service.metadata import interface
|
||||||
|
|
||||||
|
|
||||||
def display(data):
|
|
||||||
print(json.dumps(data, indent=2))
|
|
||||||
|
|
||||||
|
|
||||||
async def execute_command(conf, method, params, callback=display):
|
|
||||||
async with aiohttp.ClientSession() as session:
|
|
||||||
try:
|
|
||||||
message = {'method': method, 'params': params}
|
|
||||||
async with session.get(conf.api_connection_url, json=message) as resp:
|
|
||||||
try:
|
|
||||||
data = await resp.json()
|
|
||||||
if 'result' in data:
|
|
||||||
return callback(data['result'])
|
|
||||||
elif 'error' in data:
|
|
||||||
return callback(data['error'])
|
|
||||||
except Exception as e:
|
|
||||||
log.exception('Could not process response from server:', exc_info=e)
|
|
||||||
except aiohttp.ClientConnectionError:
|
|
||||||
print("Could not connect to daemon. Are you sure it's running?")
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_value(x, key=None):
|
|
||||||
if not isinstance(x, str):
|
|
||||||
return x
|
|
||||||
if key in ('uri', 'channel_name', 'name', 'file_name', 'claim_name', 'download_directory'):
|
|
||||||
return x
|
|
||||||
if x.lower() == 'true':
|
|
||||||
return True
|
|
||||||
if x.lower() == 'false':
|
|
||||||
return False
|
|
||||||
if x.isdigit():
|
|
||||||
return int(x)
|
|
||||||
return x
|
|
||||||
|
|
||||||
|
|
||||||
def remove_brackets(key):
|
|
||||||
if key.startswith("<") and key.endswith(">"):
|
|
||||||
return str(key[1:-1])
|
|
||||||
return key
|
|
||||||
|
|
||||||
|
|
||||||
def set_kwargs(parsed_args):
|
|
||||||
kwargs = {}
|
|
||||||
for key, arg in parsed_args.items():
|
|
||||||
if arg is None:
|
|
||||||
continue
|
|
||||||
k = None
|
|
||||||
if key.startswith("--") and remove_brackets(key[2:]) not in kwargs:
|
|
||||||
k = remove_brackets(key[2:])
|
|
||||||
elif remove_brackets(key) not in kwargs:
|
|
||||||
k = remove_brackets(key)
|
|
||||||
kwargs[k] = normalize_value(arg, k)
|
|
||||||
return kwargs
|
|
||||||
|
|
||||||
|
|
||||||
def split_subparser_argument(parent, original, name, condition):
|
def split_subparser_argument(parent, original, name, condition):
|
||||||
|
@ -102,7 +40,7 @@ class ArgumentParser(argparse.ArgumentParser):
|
||||||
self._optionals.title = 'Options'
|
self._optionals.title = 'Options'
|
||||||
if group_name is None:
|
if group_name is None:
|
||||||
self.epilog = (
|
self.epilog = (
|
||||||
f"Run 'lbrynet COMMAND --help' for more information on a command or group."
|
"Run 'lbrynet COMMAND --help' for more information on a command or group."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.epilog = (
|
self.epilog = (
|
||||||
|
@ -154,17 +92,10 @@ class HelpFormatter(argparse.HelpFormatter):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def add_command_parser(parent, command):
|
def add_command_parser(parent, method_name, command):
|
||||||
subcommand = parent.add_parser(
|
short = command['desc']['text'][0] if command['desc'] else ''
|
||||||
command['name'],
|
subcommand = parent.add_parser(command['name'], help=short)
|
||||||
help=command['doc'].strip().splitlines()[0]
|
subcommand.set_defaults(api_method_name=method_name, command=command['name'], doc=command['help'])
|
||||||
)
|
|
||||||
subcommand.set_defaults(
|
|
||||||
api_method_name=command['api_method_name'],
|
|
||||||
command=command['name'],
|
|
||||||
doc=command['doc'],
|
|
||||||
replaced_by=command.get('replaced_by', None)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_argument_parser():
|
def get_argument_parser():
|
||||||
|
@ -183,6 +114,9 @@ def get_argument_parser():
|
||||||
usage='lbrynet start [--config FILE] [--data-dir DIR] [--wallet-dir DIR] [--download-dir DIR] ...',
|
usage='lbrynet start [--config FILE] [--data-dir DIR] [--wallet-dir DIR] [--download-dir DIR] ...',
|
||||||
help='Start LBRY Network interface.'
|
help='Start LBRY Network interface.'
|
||||||
)
|
)
|
||||||
|
start.add_argument(
|
||||||
|
"service", choices=[LightClient.name, FullNode.name], default=LightClient.name, nargs="?"
|
||||||
|
)
|
||||||
start.add_argument(
|
start.add_argument(
|
||||||
'--quiet', dest='quiet', action="store_true",
|
'--quiet', dest='quiet', action="store_true",
|
||||||
help='Disable all console output.'
|
help='Disable all console output.'
|
||||||
|
@ -200,26 +134,32 @@ def get_argument_parser():
|
||||||
'--initial-headers', dest='initial_headers',
|
'--initial-headers', dest='initial_headers',
|
||||||
help='Specify path to initial blockchain headers, faster than downloading them on first run.'
|
help='Specify path to initial blockchain headers, faster than downloading them on first run.'
|
||||||
)
|
)
|
||||||
|
install = sub.add_parser("install", help="Install lbrynet with various system services.")
|
||||||
|
install.add_argument("system", choices=["systemd"])
|
||||||
|
install.add_argument(
|
||||||
|
"--global", dest="install_global", action="store_true",
|
||||||
|
help="Install system wide (requires running as root), default is for current user only."
|
||||||
|
)
|
||||||
Config.contribute_to_argparse(start)
|
Config.contribute_to_argparse(start)
|
||||||
start.set_defaults(command='start', start_parser=start, doc=start.format_help())
|
start.set_defaults(command='start', start_parser=start, doc=start.format_help())
|
||||||
|
install.set_defaults(command='install', install_parser=install, doc=install.format_help())
|
||||||
|
|
||||||
api = Daemon.get_api_definitions()
|
|
||||||
groups = {}
|
groups = {}
|
||||||
for group_name in sorted(api['groups']):
|
for group_name in sorted(interface['groups']):
|
||||||
group_parser = sub.add_parser(group_name, group_name=group_name, help=api['groups'][group_name])
|
group_parser = sub.add_parser(group_name, group_name=group_name, help=interface['groups'][group_name])
|
||||||
groups[group_name] = group_parser.add_subparsers(metavar='COMMAND')
|
groups[group_name] = group_parser.add_subparsers(metavar='COMMAND')
|
||||||
|
|
||||||
nicer_order = ['stop', 'get', 'publish', 'resolve']
|
nicer_order = ['stop', 'get', 'publish', 'resolve']
|
||||||
for command_name in sorted(api['commands']):
|
for command_name in sorted(interface['commands']):
|
||||||
if command_name not in nicer_order:
|
if command_name not in nicer_order:
|
||||||
nicer_order.append(command_name)
|
nicer_order.append(command_name)
|
||||||
|
|
||||||
for command_name in nicer_order:
|
for command_name in nicer_order:
|
||||||
command = api['commands'][command_name]
|
command = interface['commands'][command_name]
|
||||||
if command['group'] is None:
|
if command.get('group') is None:
|
||||||
add_command_parser(sub, command)
|
add_command_parser(sub, command_name, command)
|
||||||
else:
|
else:
|
||||||
add_command_parser(groups[command['group']], command)
|
add_command_parser(groups[command['group']], command_name, command)
|
||||||
|
|
||||||
return root
|
return root
|
||||||
|
|
||||||
|
@ -229,65 +169,64 @@ def ensure_directory_exists(path: str):
|
||||||
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
|
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
LOG_MODULES = 'lbry', 'aioupnp'
|
async def execute_command(conf, method, params):
|
||||||
|
client = Client(f"http://{conf.api}/ws")
|
||||||
|
await client.connect()
|
||||||
|
responses = await client.send(method, **params)
|
||||||
|
result = await responses.first
|
||||||
|
await client.disconnect()
|
||||||
|
print(result)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config):
|
def normalize_value(x, key=None):
|
||||||
default_formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(name)s:%(lineno)d: %(message)s")
|
if not isinstance(x, str):
|
||||||
file_handler = logging.handlers.RotatingFileHandler(conf.log_file_path, maxBytes=2097152, backupCount=5)
|
return x
|
||||||
file_handler.setFormatter(default_formatter)
|
if key in ('uri', 'channel_name', 'name', 'file_name', 'claim_name', 'download_directory'):
|
||||||
for module_name in LOG_MODULES:
|
return x
|
||||||
logger.getChild(module_name).addHandler(file_handler)
|
if x.lower() == 'true':
|
||||||
if not args.quiet:
|
return True
|
||||||
handler = logging.StreamHandler()
|
if x.lower() == 'false':
|
||||||
handler.setFormatter(default_formatter)
|
return False
|
||||||
for module_name in LOG_MODULES:
|
if x.isdigit():
|
||||||
logger.getChild(module_name).addHandler(handler)
|
return int(x)
|
||||||
|
return x
|
||||||
logger.getChild('lbry').setLevel(logging.INFO)
|
|
||||||
logger.getChild('aioupnp').setLevel(logging.WARNING)
|
|
||||||
logger.getChild('aiohttp').setLevel(logging.CRITICAL)
|
|
||||||
|
|
||||||
if args.verbose is not None:
|
|
||||||
if len(args.verbose) > 0:
|
|
||||||
for module in args.verbose:
|
|
||||||
logger.getChild(module).setLevel(logging.DEBUG)
|
|
||||||
else:
|
|
||||||
logger.getChild('lbry').setLevel(logging.DEBUG)
|
|
||||||
|
|
||||||
loggly_handler = get_loggly_handler(conf)
|
|
||||||
loggly_handler.setLevel(logging.ERROR)
|
|
||||||
logger.getChild('lbry').addHandler(loggly_handler)
|
|
||||||
|
|
||||||
|
|
||||||
def run_daemon(args: argparse.Namespace, conf: Config):
|
def remove_brackets(key):
|
||||||
loop = asyncio.get_event_loop()
|
if key.startswith("<") and key.endswith(">"):
|
||||||
if args.verbose is not None:
|
return str(key[1:-1])
|
||||||
loop.set_debug(True)
|
return key
|
||||||
if not args.no_logging:
|
|
||||||
setup_logging(logging.getLogger(), args, conf)
|
|
||||||
daemon = Daemon(conf)
|
|
||||||
|
|
||||||
def __exit():
|
|
||||||
raise GracefulExit()
|
|
||||||
|
|
||||||
try:
|
def set_kwargs(parsed_args):
|
||||||
loop.add_signal_handler(signal.SIGINT, __exit)
|
kwargs = {}
|
||||||
loop.add_signal_handler(signal.SIGTERM, __exit)
|
for key, arg in parsed_args.items():
|
||||||
except NotImplementedError:
|
if arg is None:
|
||||||
pass # Not implemented on Windows
|
continue
|
||||||
|
k = None
|
||||||
|
if key.startswith("--") and remove_brackets(key[2:]) not in kwargs:
|
||||||
|
k = remove_brackets(key[2:])
|
||||||
|
elif remove_brackets(key) not in kwargs:
|
||||||
|
k = remove_brackets(key)
|
||||||
|
kwargs[k] = normalize_value(arg, k)
|
||||||
|
return kwargs
|
||||||
|
|
||||||
try:
|
|
||||||
loop.run_until_complete(daemon.start())
|
|
||||||
loop.run_forever()
|
|
||||||
except (GracefulExit, KeyboardInterrupt, asyncio.CancelledError):
|
|
||||||
pass
|
|
||||||
finally:
|
|
||||||
loop.run_until_complete(daemon.stop())
|
|
||||||
logging.shutdown()
|
|
||||||
|
|
||||||
if hasattr(loop, 'shutdown_asyncgens'):
|
def install_systemd_service():
|
||||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
systemd_service = textwrap.dedent(f"""\
|
||||||
|
[Unit]
|
||||||
|
Description=LBRYnet
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart={sys.argv[0]} start --full-node
|
||||||
|
""")
|
||||||
|
subprocess.run(
|
||||||
|
["systemctl", "edit", "--user", "--full", "--force", "lbrynet.service"],
|
||||||
|
input=systemd_service, text=True, check=True,
|
||||||
|
env=dict(os.environ, SYSTEMD_EDITOR="cp /dev/stdin"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
|
@ -295,38 +234,39 @@ def main(argv=None):
|
||||||
parser = get_argument_parser()
|
parser = get_argument_parser()
|
||||||
args, command_args = parser.parse_known_args(argv)
|
args, command_args = parser.parse_known_args(argv)
|
||||||
|
|
||||||
conf = Config.create_from_arguments(args)
|
conf = Config()
|
||||||
|
conf.set_arguments(args)
|
||||||
|
conf.set_environment()
|
||||||
|
conf.set_default_paths()
|
||||||
|
conf.set_persisted()
|
||||||
for directory in (conf.data_dir, conf.download_dir, conf.wallet_dir):
|
for directory in (conf.data_dir, conf.download_dir, conf.wallet_dir):
|
||||||
ensure_directory_exists(directory)
|
ensure_directory_exists(directory)
|
||||||
|
|
||||||
if args.cli_version:
|
if args.cli_version:
|
||||||
print(f"lbrynet {lbrynet_version}")
|
print(f"lbrynet {__version__}")
|
||||||
elif args.command == 'start':
|
elif args.command == 'start':
|
||||||
if args.help:
|
if args.help:
|
||||||
args.start_parser.print_help()
|
args.start_parser.print_help()
|
||||||
|
elif args.service == FullNode.name:
|
||||||
|
return Daemon.from_config(FullNode, conf).run()
|
||||||
else:
|
else:
|
||||||
if args.initial_headers:
|
print(f'Only `start {FullNode.name}` is currently supported.')
|
||||||
ledger_path = os.path.join(conf.wallet_dir, 'lbc_mainnet')
|
elif args.command == 'install':
|
||||||
ensure_directory_exists(ledger_path)
|
if args.help:
|
||||||
current_size = 0
|
args.install_parser.print_help()
|
||||||
headers_path = os.path.join(ledger_path, 'headers')
|
elif args.system == 'systemd':
|
||||||
if os.path.exists(headers_path):
|
install_systemd_service()
|
||||||
current_size = os.stat(headers_path).st_size
|
|
||||||
if os.stat(args.initial_headers).st_size > current_size:
|
|
||||||
log.info('Copying header from %s to %s', args.initial_headers, headers_path)
|
|
||||||
shutil.copy(args.initial_headers, headers_path)
|
|
||||||
run_daemon(args, conf)
|
|
||||||
elif args.command is not None:
|
elif args.command is not None:
|
||||||
doc = args.doc
|
doc = args.doc
|
||||||
api_method_name = args.api_method_name
|
api_method_name = args.api_method_name
|
||||||
if args.replaced_by:
|
|
||||||
print(f"{args.api_method_name} is deprecated, using {args.replaced_by['api_method_name']}.")
|
|
||||||
doc = args.replaced_by['doc']
|
|
||||||
api_method_name = args.replaced_by['api_method_name']
|
|
||||||
if args.help:
|
if args.help:
|
||||||
print(doc)
|
print(doc)
|
||||||
else:
|
else:
|
||||||
parsed = docopt(doc, command_args)
|
parsed = docopt(
|
||||||
|
# TODO: ugly hack because docopt doesn't support commands with spaces in them
|
||||||
|
doc.replace(api_method_name.replace('_', ' '), api_method_name, 1),
|
||||||
|
command_args
|
||||||
|
)
|
||||||
params = set_kwargs(parsed)
|
params = set_kwargs(parsed)
|
||||||
asyncio.get_event_loop().run_until_complete(execute_command(conf, api_method_name, params))
|
asyncio.get_event_loop().run_until_complete(execute_command(conf, api_method_name, params))
|
||||||
elif args.group is not None:
|
elif args.group is not None:
|
194
lbry/conf.py
194
lbry/conf.py
|
@ -1,16 +1,17 @@
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import sys
|
import sys
|
||||||
import typing
|
import typing
|
||||||
import logging
|
import logging
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
from appdirs import user_data_dir, user_config_dir
|
from lbry.utils.dirs import user_data_dir, user_download_dir
|
||||||
from lbry.error import InvalidCurrencyError
|
from lbry.error import InvalidCurrencyError
|
||||||
from lbry.dht import constants
|
from lbry.dht import constants
|
||||||
from lbry.wallet.coinselection import STRATEGIES
|
from lbry.wallet.coinselection import COIN_SELECTION_STRATEGIES
|
||||||
|
from lbry.event import EventRegistry
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -192,7 +193,7 @@ class MaxKeyFee(Setting[dict]):
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
self.no_cli_name,
|
self.no_cli_name,
|
||||||
help=f"Disable maximum key fee check.",
|
help="Disable maximum key fee check.",
|
||||||
dest=self.name,
|
dest=self.name,
|
||||||
const=None,
|
const=None,
|
||||||
action="store_const",
|
action="store_const",
|
||||||
|
@ -325,7 +326,7 @@ class ConfigFileAccess:
|
||||||
cls = type(self.configuration)
|
cls = type(self.configuration)
|
||||||
with open(self.path, 'r') as config_file:
|
with open(self.path, 'r') as config_file:
|
||||||
raw = config_file.read()
|
raw = config_file.read()
|
||||||
serialized = yaml.load(raw) or {}
|
serialized = yaml.full_load(raw) or {}
|
||||||
for key, value in serialized.items():
|
for key, value in serialized.items():
|
||||||
attr = getattr(cls, key, None)
|
attr = getattr(cls, key, None)
|
||||||
if attr is None:
|
if attr is None:
|
||||||
|
@ -382,8 +383,13 @@ class BaseConfig:
|
||||||
self.environment = {} # from environment variables
|
self.environment = {} # from environment variables
|
||||||
self.persisted = {} # from config file
|
self.persisted = {} # from config file
|
||||||
self._updating_config = False
|
self._updating_config = False
|
||||||
|
self.events = EventRegistry()
|
||||||
|
self.set(**kwargs)
|
||||||
|
|
||||||
|
def set(self, **kwargs):
|
||||||
for key, value in kwargs.items():
|
for key, value in kwargs.items():
|
||||||
setattr(self, key, value)
|
setattr(self, key, value)
|
||||||
|
return self
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def update_config(self):
|
def update_config(self):
|
||||||
|
@ -500,19 +506,30 @@ class CLIConfig(TranscodeConfig):
|
||||||
|
|
||||||
|
|
||||||
class Config(CLIConfig):
|
class Config(CLIConfig):
|
||||||
|
db_url = String("Database connection URL, uses a local file based SQLite by default.")
|
||||||
|
workers = Integer(
|
||||||
|
"Multiprocessing, specify number of worker processes lbrynet can start (including main process)."
|
||||||
|
" (-1: threads only, 0: equal to number of CPUs, >1: specific number of processes)", -1
|
||||||
|
)
|
||||||
|
console = StringChoice(
|
||||||
|
"Basic text console output or advanced colored output with progress bars.",
|
||||||
|
["basic", "advanced", "none"], "advanced"
|
||||||
|
)
|
||||||
|
|
||||||
# directories
|
# directories
|
||||||
data_dir = Path("Directory path to store blobs.", metavar='DIR')
|
download_dir = Path("Directory to store downloaded files.", metavar='DIR')
|
||||||
download_dir = Path(
|
data_dir = Path("Main directory containing blobs, wallets and blockchain data.", metavar='DIR')
|
||||||
"Directory path to place assembled files downloaded from LBRY.",
|
blob_dir = Path("Directory to store blobs (default: 'data_dir'/blobs).", metavar='DIR')
|
||||||
previous_names=['download_directory'], metavar='DIR'
|
wallet_dir = Path("Directory to store wallets (default: 'data_dir'/wallets).", metavar='DIR')
|
||||||
)
|
wallet_storage = StringChoice("Wallet storage mode.", ["file", "database"], "file")
|
||||||
wallet_dir = Path(
|
|
||||||
"Directory containing a 'wallets' subdirectory with 'default_wallet' file.",
|
|
||||||
previous_names=['lbryum_wallet_dir'], metavar='DIR'
|
|
||||||
)
|
|
||||||
wallets = Strings(
|
wallets = Strings(
|
||||||
"Wallet files in 'wallet_dir' to load at startup.",
|
"Wallet files in 'wallet_dir' to load at startup.", ['default_wallet']
|
||||||
['default_wallet']
|
)
|
||||||
|
create_default_wallet = Toggle(
|
||||||
|
"Create an initial wallet if it does not exist on startup.", True
|
||||||
|
)
|
||||||
|
create_default_account = Toggle(
|
||||||
|
"Create an initial account if it does not exist in the default wallet.", True
|
||||||
)
|
)
|
||||||
|
|
||||||
# network
|
# network
|
||||||
|
@ -580,7 +597,7 @@ class Config(CLIConfig):
|
||||||
reflector_servers = Servers("Reflector re-hosting servers", [
|
reflector_servers = Servers("Reflector re-hosting servers", [
|
||||||
('reflector.lbry.com', 5566)
|
('reflector.lbry.com', 5566)
|
||||||
])
|
])
|
||||||
lbryum_servers = Servers("SPV wallet servers", [
|
known_full_nodes = Servers("Full blockchain nodes", [
|
||||||
('spv11.lbry.com', 50001),
|
('spv11.lbry.com', 50001),
|
||||||
('spv12.lbry.com', 50001),
|
('spv12.lbry.com', 50001),
|
||||||
('spv13.lbry.com', 50001),
|
('spv13.lbry.com', 50001),
|
||||||
|
@ -601,7 +618,16 @@ class Config(CLIConfig):
|
||||||
comment_server = String("Comment server API URL", "https://comments.lbry.com/api")
|
comment_server = String("Comment server API URL", "https://comments.lbry.com/api")
|
||||||
|
|
||||||
# blockchain
|
# blockchain
|
||||||
blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main')
|
blockchain = StringChoice("Blockchain network type.", ["mainnet", "regtest", "testnet"], "mainnet")
|
||||||
|
lbrycrd_rpc_user = String("Username for connecting to lbrycrd.", "rpcuser")
|
||||||
|
lbrycrd_rpc_pass = String("Password for connecting to lbrycrd.", "rpcpassword")
|
||||||
|
lbrycrd_rpc_host = String("Hostname for connecting to lbrycrd.", "localhost")
|
||||||
|
lbrycrd_rpc_port = Integer("Port for connecting to lbrycrd.", 9245)
|
||||||
|
lbrycrd_peer_port = Integer("Peer port for lbrycrd.", 9246)
|
||||||
|
lbrycrd_zmq = String("ZMQ events address.")
|
||||||
|
lbrycrd_dir = Path("Directory containing lbrycrd data.", metavar='DIR')
|
||||||
|
search_censor_channel_ids = Strings("List of channel ids for filtering out search results.", [])
|
||||||
|
resolve_censor_channel_ids = Strings("List of channel ids for filtering out resolve results.", [])
|
||||||
|
|
||||||
# daemon
|
# daemon
|
||||||
save_files = Toggle("Save downloaded files when calling `get` by default", True)
|
save_files = Toggle("Save downloaded files when calling `get` by default", True)
|
||||||
|
@ -620,7 +646,7 @@ class Config(CLIConfig):
|
||||||
|
|
||||||
coin_selection_strategy = StringChoice(
|
coin_selection_strategy = StringChoice(
|
||||||
"Strategy to use when selecting UTXOs for a transaction",
|
"Strategy to use when selecting UTXOs for a transaction",
|
||||||
STRATEGIES, "standard")
|
COIN_SELECTION_STRATEGIES, "standard")
|
||||||
|
|
||||||
save_resolved_claims = Toggle(
|
save_resolved_claims = Toggle(
|
||||||
"Save content claims to the database when they are resolved to keep file_list up to date, "
|
"Save content claims to the database when they are resolved to keep file_list up to date, "
|
||||||
|
@ -635,9 +661,18 @@ class Config(CLIConfig):
|
||||||
def streaming_port(self):
|
def streaming_port(self):
|
||||||
return int(self.streaming_server.split(':')[1])
|
return int(self.streaming_server.split(':')[1])
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
@classmethod
|
||||||
super().__init__(**kwargs)
|
def with_null_dir(cls):
|
||||||
self.set_default_paths()
|
return cls.with_same_dir('/dev/null')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def with_same_dir(cls, same_dir):
|
||||||
|
return cls(
|
||||||
|
data_dir=same_dir,
|
||||||
|
download_dir=same_dir,
|
||||||
|
wallet_dir=same_dir,
|
||||||
|
lbrycrd_dir=same_dir,
|
||||||
|
)
|
||||||
|
|
||||||
def set_default_paths(self):
|
def set_default_paths(self):
|
||||||
if 'darwin' in sys.platform.lower():
|
if 'darwin' in sys.platform.lower():
|
||||||
|
@ -649,61 +684,76 @@ class Config(CLIConfig):
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
cls = type(self)
|
cls = type(self)
|
||||||
cls.data_dir.default, cls.wallet_dir.default, cls.download_dir.default = get_directories()
|
cls.data_dir.default, cls.wallet_dir.default,\
|
||||||
cls.config.default = os.path.join(
|
cls.blob_dir.default, cls.download_dir.default = get_directories()
|
||||||
self.data_dir, 'daemon_settings.yml'
|
old_settings_file = os.path.join(self.data_dir, 'daemon_settings.yml')
|
||||||
)
|
if os.path.exists(old_settings_file):
|
||||||
|
cls.config.default = old_settings_file
|
||||||
|
else:
|
||||||
|
cls.config.default = os.path.join(self.data_dir, 'settings.yml')
|
||||||
|
if self.data_dir != cls.data_dir.default:
|
||||||
|
cls.blob_dir.default = os.path.join(self.data_dir, 'blobs')
|
||||||
|
cls.wallet_dir.default = os.path.join(self.data_dir, 'wallets')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def log_file_path(self):
|
def log_file_path(self):
|
||||||
return os.path.join(self.data_dir, 'lbrynet.log')
|
return os.path.join(self.data_dir, 'daemon.log')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def db_url_or_default(self):
|
||||||
|
if self.db_url:
|
||||||
|
return self.db_url
|
||||||
|
return 'sqlite:///'+os.path.join(self.data_dir, f'{self.blockchain}.db')
|
||||||
|
|
||||||
|
|
||||||
def get_windows_directories() -> typing.Tuple[str, str, str]:
|
def get_windows_directories() -> Tuple[str, str, str, str]:
|
||||||
from lbry.winpaths import get_path, FOLDERID, UserHandle, \
|
# very old
|
||||||
PathNotFoundException # pylint: disable=import-outside-toplevel
|
data_dir = user_data_dir('lbrynet', roaming=True)
|
||||||
|
blob_dir = os.path.join(data_dir, 'blobfiles')
|
||||||
try:
|
wallet_dir = os.path.join(user_data_dir('lbryum', roaming=True), 'wallets')
|
||||||
download_dir = get_path(FOLDERID.Downloads, UserHandle.current)
|
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
|
||||||
except PathNotFoundException:
|
return data_dir, wallet_dir, blob_dir, user_download_dir()
|
||||||
download_dir = os.getcwd()
|
|
||||||
|
|
||||||
# old
|
# old
|
||||||
appdata = get_path(FOLDERID.RoamingAppData, UserHandle.current)
|
|
||||||
data_dir = os.path.join(appdata, 'lbrynet')
|
|
||||||
lbryum_dir = os.path.join(appdata, 'lbryum')
|
|
||||||
if os.path.isdir(data_dir) or os.path.isdir(lbryum_dir):
|
|
||||||
return data_dir, lbryum_dir, download_dir
|
|
||||||
|
|
||||||
# new
|
|
||||||
data_dir = user_data_dir('lbrynet', 'lbry')
|
data_dir = user_data_dir('lbrynet', 'lbry')
|
||||||
lbryum_dir = user_data_dir('lbryum', 'lbry')
|
blob_dir = os.path.join(data_dir, 'blobfiles')
|
||||||
return data_dir, lbryum_dir, download_dir
|
wallet_dir = os.path.join(user_data_dir('lbryum', 'lbry'), 'wallets')
|
||||||
|
if os.path.isdir(blob_dir) and os.path.isdir(wallet_dir):
|
||||||
|
return data_dir, wallet_dir, blob_dir, user_download_dir()
|
||||||
def get_darwin_directories() -> typing.Tuple[str, str, str]:
|
|
||||||
data_dir = user_data_dir('LBRY')
|
|
||||||
lbryum_dir = os.path.expanduser('~/.lbryum')
|
|
||||||
download_dir = os.path.expanduser('~/Downloads')
|
|
||||||
return data_dir, lbryum_dir, download_dir
|
|
||||||
|
|
||||||
|
|
||||||
def get_linux_directories() -> typing.Tuple[str, str, str]:
|
|
||||||
try:
|
|
||||||
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
|
|
||||||
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read()).group(1)
|
|
||||||
down_dir = re.sub(r'\$HOME', os.getenv('HOME') or os.path.expanduser("~/"), down_dir)
|
|
||||||
download_dir = re.sub('\"', '', down_dir)
|
|
||||||
except OSError:
|
|
||||||
download_dir = os.getenv('XDG_DOWNLOAD_DIR')
|
|
||||||
if not download_dir:
|
|
||||||
download_dir = os.path.expanduser('~/Downloads')
|
|
||||||
|
|
||||||
# old
|
|
||||||
data_dir = os.path.expanduser('~/.lbrynet')
|
|
||||||
lbryum_dir = os.path.expanduser('~/.lbryum')
|
|
||||||
if os.path.isdir(data_dir) or os.path.isdir(lbryum_dir):
|
|
||||||
return data_dir, lbryum_dir, download_dir
|
|
||||||
|
|
||||||
# new
|
# new
|
||||||
return user_data_dir('lbry/lbrynet'), user_data_dir('lbry/lbryum'), download_dir
|
return get_universal_directories()
|
||||||
|
|
||||||
|
|
||||||
|
def get_darwin_directories() -> Tuple[str, str, str, str]:
|
||||||
|
data_dir = user_data_dir('LBRY')
|
||||||
|
blob_dir = os.path.join(data_dir, 'blobfiles')
|
||||||
|
wallet_dir = os.path.expanduser('~/.lbryum/wallets')
|
||||||
|
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
|
||||||
|
return data_dir, wallet_dir, blob_dir, user_download_dir()
|
||||||
|
return get_universal_directories()
|
||||||
|
|
||||||
|
|
||||||
|
def get_linux_directories() -> Tuple[str, str, str, str]:
|
||||||
|
# very old
|
||||||
|
data_dir = os.path.expanduser('~/.lbrynet')
|
||||||
|
blob_dir = os.path.join(data_dir, 'blobfiles')
|
||||||
|
wallet_dir = os.path.join(os.path.expanduser('~/.lbryum'), 'wallets')
|
||||||
|
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
|
||||||
|
return data_dir, wallet_dir, blob_dir, user_download_dir()
|
||||||
|
# old
|
||||||
|
data_dir = user_data_dir('lbry/lbrynet')
|
||||||
|
blob_dir = os.path.join(data_dir, 'blobfiles')
|
||||||
|
wallet_dir = user_data_dir('lbry/lbryum/wallets')
|
||||||
|
if os.path.isdir(blob_dir) or os.path.isdir(wallet_dir):
|
||||||
|
return data_dir, wallet_dir, blob_dir, user_download_dir()
|
||||||
|
# new
|
||||||
|
return get_universal_directories()
|
||||||
|
|
||||||
|
|
||||||
|
def get_universal_directories() -> Tuple[str, str, str, str]:
|
||||||
|
lbrynet_dir = user_data_dir('lbrynet', 'LBRY')
|
||||||
|
return (
|
||||||
|
lbrynet_dir,
|
||||||
|
os.path.join(lbrynet_dir, 'wallets'),
|
||||||
|
os.path.join(lbrynet_dir, 'blobs'),
|
||||||
|
user_download_dir()
|
||||||
|
)
|
||||||
|
|
496
lbry/console.py
Normal file
496
lbry/console.py
Normal file
|
@ -0,0 +1,496 @@
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import itertools
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Any, Type
|
||||||
|
from tempfile import TemporaryFile
|
||||||
|
|
||||||
|
from tqdm.std import tqdm, Bar
|
||||||
|
from tqdm.utils import FormatReplace, _unicode, disp_len, disp_trim, _is_ascii
|
||||||
|
|
||||||
|
from lbry import __version__
|
||||||
|
from lbry.service.base import Service
|
||||||
|
from lbry.service.full_node import FullNode
|
||||||
|
from lbry.service.light_client import LightClient
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RedirectOutput:
|
||||||
|
|
||||||
|
silence_lines = [
|
||||||
|
b'libprotobuf ERROR google/protobuf/wire_format_lite.cc:626',
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, stream_type: str):
|
||||||
|
assert stream_type in ('stderr', 'stdout')
|
||||||
|
self.stream_type = stream_type
|
||||||
|
self.stream_no = getattr(sys, stream_type).fileno()
|
||||||
|
self.last_flush = time.time()
|
||||||
|
self.last_read = 0
|
||||||
|
self.backup = None
|
||||||
|
self.file = None
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.backup = os.dup(self.stream_no)
|
||||||
|
setattr(sys, self.stream_type, os.fdopen(self.backup, 'w'))
|
||||||
|
self.file = TemporaryFile()
|
||||||
|
self.backup = os.dup2(self.file.fileno(), self.stream_no)
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.file.close()
|
||||||
|
os.dup2(self.backup, self.stream_no)
|
||||||
|
os.close(self.backup)
|
||||||
|
setattr(sys, self.stream_type, os.fdopen(self.stream_no, 'w'))
|
||||||
|
|
||||||
|
def capture(self):
|
||||||
|
self.__enter__()
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
self.__exit__(None, None, None)
|
||||||
|
|
||||||
|
def flush(self, writer, force=False):
|
||||||
|
if not force and (time.time() - self.last_flush) < 5:
|
||||||
|
return
|
||||||
|
self.file.seek(self.last_read)
|
||||||
|
for line in self.file.readlines():
|
||||||
|
silence = False
|
||||||
|
for bad_line in self.silence_lines:
|
||||||
|
if bad_line in line:
|
||||||
|
silence = True
|
||||||
|
break
|
||||||
|
if not silence:
|
||||||
|
writer(line.decode().rstrip())
|
||||||
|
self.last_read = self.file.tell()
|
||||||
|
self.last_flush = time.time()
|
||||||
|
|
||||||
|
|
||||||
|
class Console:
|
||||||
|
|
||||||
|
def __init__(self, service: Service):
|
||||||
|
self.service = service
|
||||||
|
|
||||||
|
def starting(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def stopping(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Basic(Console):
|
||||||
|
|
||||||
|
def __init__(self, service: Service):
|
||||||
|
super().__init__(service)
|
||||||
|
self.service.sync.on_progress.listen(self.on_sync_progress)
|
||||||
|
self.tasks = {}
|
||||||
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-8s %(name)s:%(lineno)d: %(message)s")
|
||||||
|
|
||||||
|
def starting(self):
|
||||||
|
conf = self.service.conf
|
||||||
|
s = [f'LBRY v{__version__}']
|
||||||
|
if isinstance(self.service, FullNode):
|
||||||
|
s.append('Full Node')
|
||||||
|
elif isinstance(self.service, LightClient):
|
||||||
|
s.append('Light Client')
|
||||||
|
if conf.workers == -1:
|
||||||
|
s.append('Threads Only')
|
||||||
|
else:
|
||||||
|
workers = os.cpu_count() if conf.workers == 0 else conf.workers
|
||||||
|
s.append(f'{workers} Worker' if workers == 1 else f'{workers} Workers')
|
||||||
|
s.append(f'({os.cpu_count()} CPUs available)')
|
||||||
|
log.info(' '.join(s))
|
||||||
|
|
||||||
|
def stopping(self):
|
||||||
|
log.info('exiting')
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def maybe_log_progress(event, done, total, last):
|
||||||
|
if done == 0:
|
||||||
|
log.info("%s 0%%", event)
|
||||||
|
return 0
|
||||||
|
elif done == total:
|
||||||
|
log.info("%s 100%%", event)
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
percent = done/total
|
||||||
|
if percent >= 0.25 > last:
|
||||||
|
log.info("%s 25%%", event)
|
||||||
|
return 0.25
|
||||||
|
elif percent >= 0.50 > last:
|
||||||
|
log.info("%s 50%%", event)
|
||||||
|
return 0.50
|
||||||
|
elif percent >= 0.75 > last:
|
||||||
|
log.info("%s 75%%", event)
|
||||||
|
return 0.75
|
||||||
|
return last
|
||||||
|
|
||||||
|
def on_sync_progress(self, event):
|
||||||
|
e, data = event["event"], event["data"]
|
||||||
|
name, current, total, last = e, data['done'][0], 0, 0
|
||||||
|
if not e.endswith("init") and not e.endswith("main") and not e.endswith("indexes"):
|
||||||
|
name = f"{e}#{data['id']}"
|
||||||
|
if "total" in data:
|
||||||
|
total, last = self.tasks[name] = (data["total"][0], last)
|
||||||
|
elif name in self.tasks:
|
||||||
|
total, last = self.tasks[name]
|
||||||
|
elif total == 0:
|
||||||
|
return
|
||||||
|
progress_status = (total, self.maybe_log_progress(name, current, total, last))
|
||||||
|
if progress_status[1] == 1:
|
||||||
|
del self.tasks[name]
|
||||||
|
else:
|
||||||
|
self.tasks[name] = progress_status
|
||||||
|
|
||||||
|
|
||||||
|
class Bar2(Bar):
|
||||||
|
|
||||||
|
def __init__(self, frac, default_len=10, charset=None):
|
||||||
|
super().__init__(frac[0], default_len, charset)
|
||||||
|
self.frac2 = frac[1]
|
||||||
|
|
||||||
|
def __format__(self, format_spec):
|
||||||
|
width = self.default_len
|
||||||
|
row1 = (1,)*int(self.frac2 * width * 2)
|
||||||
|
row2 = (2,)*int(self.frac * width * 2)
|
||||||
|
fill = []
|
||||||
|
for one, two, _ in itertools.zip_longest(row1, row2, range(width*2)):
|
||||||
|
fill.append((one or 0)+(two or 0))
|
||||||
|
bar = []
|
||||||
|
for i in range(0, width*2, 2):
|
||||||
|
if fill[i] == 1:
|
||||||
|
if fill[i+1] == 1:
|
||||||
|
bar.append('▀')
|
||||||
|
else:
|
||||||
|
bar.append('▘')
|
||||||
|
elif fill[i] == 2:
|
||||||
|
if fill[i+1] == 2:
|
||||||
|
bar.append('▄')
|
||||||
|
else:
|
||||||
|
bar.append('▖')
|
||||||
|
elif fill[i] == 3:
|
||||||
|
if fill[i+1] == 1:
|
||||||
|
bar.append('▛')
|
||||||
|
elif fill[i+1] == 2:
|
||||||
|
bar.append('▙')
|
||||||
|
elif fill[i+1] == 3:
|
||||||
|
bar.append('█')
|
||||||
|
else:
|
||||||
|
bar.append('▌')
|
||||||
|
else:
|
||||||
|
bar.append(' ')
|
||||||
|
return ''.join(bar)
|
||||||
|
|
||||||
|
|
||||||
|
class tqdm2(tqdm): # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
def __init__(self, initial=(0, 0), unit=('it', 'it'), total=(None, None), **kwargs):
|
||||||
|
self.n2 = self.last_print_n2 = initial[1] # pylint: disable=invalid-name
|
||||||
|
self.unit2 = unit[1]
|
||||||
|
self.total2 = total[1]
|
||||||
|
super().__init__(initial=initial[0], unit=unit[0], total=total[0], **kwargs)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def format_dict(self):
|
||||||
|
d = super().format_dict
|
||||||
|
d.update({
|
||||||
|
'n2': self.n2,
|
||||||
|
'unit2': self.unit2,
|
||||||
|
'total2': self.total2,
|
||||||
|
})
|
||||||
|
return d
|
||||||
|
|
||||||
|
def update(self, n=(1, 1)):
|
||||||
|
if self.disable:
|
||||||
|
return
|
||||||
|
last_last_print_t = self.last_print_t
|
||||||
|
self.n2 += n[1]
|
||||||
|
super().update(n[0])
|
||||||
|
if last_last_print_t != self.last_print_t:
|
||||||
|
self.last_print_n2 = self.n2
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_meter(
|
||||||
|
n, total, elapsed, ncols=None, prefix='', ascii=False, # pylint: disable=redefined-builtin
|
||||||
|
unit='it', unit_scale=False, rate=None, bar_format=None,
|
||||||
|
postfix=None, unit_divisor=1000, initial=0, **extra_kwargs
|
||||||
|
):
|
||||||
|
|
||||||
|
# sanity check: total
|
||||||
|
if total and n >= (total + 0.5): # allow float imprecision (#849)
|
||||||
|
total = None
|
||||||
|
|
||||||
|
# apply custom scale if necessary
|
||||||
|
if unit_scale and unit_scale not in (True, 1):
|
||||||
|
if total:
|
||||||
|
total *= unit_scale
|
||||||
|
n *= unit_scale
|
||||||
|
if rate:
|
||||||
|
rate *= unit_scale # by default rate = 1 / self.avg_time
|
||||||
|
unit_scale = False
|
||||||
|
|
||||||
|
elapsed_str = tqdm.format_interval(elapsed)
|
||||||
|
|
||||||
|
# if unspecified, attempt to use rate = average speed
|
||||||
|
# (we allow manual override since predicting time is an arcane art)
|
||||||
|
if rate is None and elapsed:
|
||||||
|
rate = n / elapsed
|
||||||
|
inv_rate = 1 / rate if rate else None
|
||||||
|
format_sizeof = tqdm.format_sizeof
|
||||||
|
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
|
||||||
|
'{0:5.2f}'.format(rate))
|
||||||
|
if rate else '?') + unit + '/s'
|
||||||
|
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
|
||||||
|
'{0:5.2f}'.format(inv_rate))
|
||||||
|
if inv_rate else '?') + 's/' + unit
|
||||||
|
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
|
||||||
|
|
||||||
|
if unit_scale:
|
||||||
|
n_fmt = format_sizeof(n, divisor=unit_divisor)
|
||||||
|
total_fmt = format_sizeof(total, divisor=unit_divisor) \
|
||||||
|
if total is not None else '?'
|
||||||
|
else:
|
||||||
|
n_fmt = str(n)
|
||||||
|
total_fmt = str(total) if total is not None else '?'
|
||||||
|
|
||||||
|
try:
|
||||||
|
postfix = ', ' + postfix if postfix else ''
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
remaining = (total - n) / rate if rate and total else 0
|
||||||
|
remaining_str = tqdm.format_interval(remaining) if rate else '?'
|
||||||
|
|
||||||
|
# format the stats displayed to the left and right sides of the bar
|
||||||
|
if prefix:
|
||||||
|
# old prefix setup work around
|
||||||
|
bool_prefix_colon_already = (prefix[-2:] == ": ")
|
||||||
|
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
|
||||||
|
else:
|
||||||
|
l_bar = ''
|
||||||
|
|
||||||
|
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
|
||||||
|
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
|
||||||
|
|
||||||
|
# Custom bar formatting
|
||||||
|
# Populate a dict with all available progress indicators
|
||||||
|
format_dict = dict(
|
||||||
|
# slight extension of self.format_dict
|
||||||
|
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
|
||||||
|
elapsed=elapsed_str, elapsed_s=elapsed,
|
||||||
|
ncols=ncols, desc=prefix or '', unit=unit,
|
||||||
|
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
|
||||||
|
rate_fmt=rate_fmt, rate_noinv=rate,
|
||||||
|
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
|
||||||
|
rate_inv_fmt=rate_inv_fmt,
|
||||||
|
postfix=postfix, unit_divisor=unit_divisor,
|
||||||
|
# plus more useful definitions
|
||||||
|
remaining=remaining_str, remaining_s=remaining,
|
||||||
|
l_bar=l_bar, r_bar=r_bar,
|
||||||
|
**extra_kwargs)
|
||||||
|
|
||||||
|
# total is known: we can predict some stats
|
||||||
|
if total:
|
||||||
|
n2, total2 = extra_kwargs['n2'], extra_kwargs['total2'] # pylint: disable=invalid-name
|
||||||
|
|
||||||
|
# fractional and percentage progress
|
||||||
|
frac = n / total
|
||||||
|
frac2 = n2 / total2
|
||||||
|
percentage = frac * 100
|
||||||
|
|
||||||
|
l_bar += '{0:3.0f}%|'.format(percentage)
|
||||||
|
|
||||||
|
if ncols == 0:
|
||||||
|
return l_bar[:-1] + r_bar[1:]
|
||||||
|
|
||||||
|
format_dict.update(l_bar=l_bar)
|
||||||
|
if bar_format:
|
||||||
|
format_dict.update(percentage=percentage)
|
||||||
|
|
||||||
|
# auto-remove colon for empty `desc`
|
||||||
|
if not prefix:
|
||||||
|
bar_format = bar_format.replace("{desc}: ", '')
|
||||||
|
else:
|
||||||
|
bar_format = "{l_bar}{bar}{r_bar}"
|
||||||
|
|
||||||
|
full_bar = FormatReplace()
|
||||||
|
try:
|
||||||
|
nobar = bar_format.format(bar=full_bar, **format_dict)
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
bar_format = _unicode(bar_format)
|
||||||
|
nobar = bar_format.format(bar=full_bar, **format_dict)
|
||||||
|
if not full_bar.format_called:
|
||||||
|
# no {bar}, we can just format and return
|
||||||
|
return nobar
|
||||||
|
|
||||||
|
# Formatting progress bar space available for bar's display
|
||||||
|
full_bar = Bar2(
|
||||||
|
(frac, frac2),
|
||||||
|
max(1, ncols - disp_len(nobar))
|
||||||
|
if ncols else 10,
|
||||||
|
charset=Bar2.ASCII if ascii is True else ascii or Bar2.UTF)
|
||||||
|
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
|
||||||
|
bar_format = _unicode(bar_format)
|
||||||
|
res = bar_format.format(bar=full_bar, **format_dict)
|
||||||
|
return disp_trim(res, ncols) if ncols else res
|
||||||
|
|
||||||
|
elif bar_format:
|
||||||
|
# user-specified bar_format but no total
|
||||||
|
l_bar += '|'
|
||||||
|
format_dict.update(l_bar=l_bar, percentage=0)
|
||||||
|
full_bar = FormatReplace()
|
||||||
|
nobar = bar_format.format(bar=full_bar, **format_dict)
|
||||||
|
if not full_bar.format_called:
|
||||||
|
return nobar
|
||||||
|
full_bar = Bar2(
|
||||||
|
(0, 0),
|
||||||
|
max(1, ncols - disp_len(nobar))
|
||||||
|
if ncols else 10,
|
||||||
|
charset=Bar2.BLANK)
|
||||||
|
res = bar_format.format(bar=full_bar, **format_dict)
|
||||||
|
return disp_trim(res, ncols) if ncols else res
|
||||||
|
else:
|
||||||
|
# no total: no progressbar, ETA, just progress stats
|
||||||
|
return ((prefix + ": ") if prefix else '') + \
|
||||||
|
'{0}{1} [{2}, {3}{4}]'.format(
|
||||||
|
n_fmt, unit, elapsed_str, rate_fmt, postfix)
|
||||||
|
|
||||||
|
|
||||||
|
class Advanced(Basic):
|
||||||
|
|
||||||
|
FORMAT = '{l_bar}{bar}| {n_fmt:>8}/{total_fmt:>8} [{elapsed:>7}<{remaining:>8}, {rate_fmt:>17}]'
|
||||||
|
|
||||||
|
def __init__(self, service: Service):
|
||||||
|
super().__init__(service)
|
||||||
|
self.bars: Dict[Any, tqdm] = {}
|
||||||
|
self.stderr = RedirectOutput('stderr')
|
||||||
|
|
||||||
|
def starting(self):
|
||||||
|
self.stderr.capture()
|
||||||
|
super().starting()
|
||||||
|
|
||||||
|
def stopping(self):
|
||||||
|
for bar in self.bars.values():
|
||||||
|
bar.close()
|
||||||
|
super().stopping()
|
||||||
|
#self.stderr.flush(self.bars['read'].write, True)
|
||||||
|
#self.stderr.release()
|
||||||
|
|
||||||
|
def get_or_create_bar(self, name, desc, units, totals, leave=False, bar_format=None, postfix=None, position=None):
|
||||||
|
bar = self.bars.get(name)
|
||||||
|
if bar is None:
|
||||||
|
if len(units) == 2:
|
||||||
|
bar = self.bars[name] = tqdm2(
|
||||||
|
desc=desc, unit=units, total=totals,
|
||||||
|
bar_format=bar_format or self.FORMAT, leave=leave,
|
||||||
|
postfix=postfix, position=position
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
bar = self.bars[name] = tqdm(
|
||||||
|
desc=desc, unit=units[0], total=totals[0],
|
||||||
|
bar_format=bar_format or self.FORMAT, leave=leave,
|
||||||
|
postfix=postfix, position=position
|
||||||
|
)
|
||||||
|
return bar
|
||||||
|
|
||||||
|
def sync_init(self, name, d):
|
||||||
|
bar_name = f"{name}#{d['id']}"
|
||||||
|
bar = self.bars.get(bar_name)
|
||||||
|
if bar is None:
|
||||||
|
label = d.get('label', name[-11:])
|
||||||
|
self.get_or_create_bar(bar_name, label, d['units'], d['total'], True)
|
||||||
|
else:
|
||||||
|
if d['done'][0] != -1:
|
||||||
|
bar.update(d['done'][0] - bar.n)
|
||||||
|
if d['done'][0] == -1 or d['done'][0] == bar.total:
|
||||||
|
bar.close()
|
||||||
|
self.bars.pop(bar_name)
|
||||||
|
|
||||||
|
def sync_main(self, name, d):
|
||||||
|
bar = self.bars.get(name)
|
||||||
|
if bar is None:
|
||||||
|
label = d.get('label', name[-11:])
|
||||||
|
self.get_or_create_bar(name, label, d['units'], d['total'], True)
|
||||||
|
#self.last_stats = f"{d['txs']:,d} txs, {d['claims']:,d} claims and {d['supports']:,d} supports"
|
||||||
|
#self.get_or_create_bar("read", "├─ blocks read", "blocks", d['blocks'], True)
|
||||||
|
#self.get_or_create_bar("save", "└─┬ txs saved", "txs", d['txs'], True)
|
||||||
|
else:
|
||||||
|
if d['done'] == (-1,)*len(d['done']):
|
||||||
|
base_name = name[:name.rindex('.')]
|
||||||
|
for child_name, child_bar in self.bars.items():
|
||||||
|
if child_name.startswith(base_name):
|
||||||
|
child_bar.close()
|
||||||
|
bar.close()
|
||||||
|
self.bars.pop(name)
|
||||||
|
else:
|
||||||
|
if len(d['done']) == 2:
|
||||||
|
bar.update((d['done'][0]-bar.n, d['done'][1]-bar.n2))
|
||||||
|
else:
|
||||||
|
bar.update(d['done'][0]-bar.n)
|
||||||
|
|
||||||
|
def sync_task(self, name, d):
|
||||||
|
bar_name = f"{name}#{d['id']}"
|
||||||
|
bar = self.bars.get(bar_name)
|
||||||
|
if bar is None:
|
||||||
|
#assert d['done'][0] == 0
|
||||||
|
label = d.get('label', name[-11:])
|
||||||
|
self.get_or_create_bar(
|
||||||
|
f"{name}#{d['id']}", label, d['units'], d['total'],
|
||||||
|
name.split('.')[-1] not in ('insert', 'update', 'file')
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if d['done'][0] != -1:
|
||||||
|
main_bar_name = f"{name[:name.rindex('.')]}.main"
|
||||||
|
if len(d['done']) > 1:
|
||||||
|
diff = tuple(a-b for a, b in zip(d['done'], (bar.n, bar.n2)))
|
||||||
|
else:
|
||||||
|
diff = d['done'][0] - bar.n
|
||||||
|
if main_bar_name != name:
|
||||||
|
main_bar = self.bars.get(main_bar_name)
|
||||||
|
if main_bar and main_bar.unit == bar.unit:
|
||||||
|
main_bar.update(diff)
|
||||||
|
bar.update(diff)
|
||||||
|
if d['done'][0] == -1 or d['done'][0] == bar.total:
|
||||||
|
bar.close()
|
||||||
|
self.bars.pop(bar_name)
|
||||||
|
|
||||||
|
def update_other_bars(self, e, d):
|
||||||
|
if d['total'] == 0:
|
||||||
|
return
|
||||||
|
bar = self.bars.get(e)
|
||||||
|
if not bar:
|
||||||
|
name = (
|
||||||
|
' '.join(e.split('.')[-2:])
|
||||||
|
.replace('support', 'suprt')
|
||||||
|
.replace('channels', 'chanls')
|
||||||
|
.replace('signatures', 'sigs')
|
||||||
|
)
|
||||||
|
bar = self.get_or_create_bar(e, f"├─ {name:>12}", d['unit'], d['total'], True)
|
||||||
|
diff = d['step']-bar.n
|
||||||
|
bar.update(diff)
|
||||||
|
#if d['step'] == d['total']:
|
||||||
|
#bar.close()
|
||||||
|
|
||||||
|
def on_sync_progress(self, event):
|
||||||
|
e, d = event['event'], event.get('data', {})
|
||||||
|
if e.endswith(".init"):
|
||||||
|
self.sync_init(e, d)
|
||||||
|
elif e.endswith(".main"):
|
||||||
|
self.sync_main(e, d)
|
||||||
|
else:
|
||||||
|
self.sync_task(e, d)
|
||||||
|
|
||||||
|
# if e.endswith("sync.start"):
|
||||||
|
# self.sync_start(d)
|
||||||
|
# self.stderr.flush(self.bars['read'].write)
|
||||||
|
# elif e.endswith("sync.complete"):
|
||||||
|
# self.stderr.flush(self.bars['read'].write, True)
|
||||||
|
# self.sync_complete()
|
||||||
|
# else:
|
||||||
|
# self.stderr.flush(self.bars['read'].write)
|
||||||
|
# self.update_progress(e, d)
|
||||||
|
|
||||||
|
|
||||||
|
def console_class_from_name(name) -> Type[Console]:
|
||||||
|
return {'basic': Basic, 'advanced': Advanced}.get(name, Console)
|
|
@ -1,2 +1,6 @@
|
||||||
|
DEFAULT_PAGE_SIZE = 20
|
||||||
|
|
||||||
|
NULL_HASH32 = b'\x00'*32
|
||||||
|
|
||||||
CENT = 1000000
|
CENT = 1000000
|
||||||
COIN = 100*CENT
|
COIN = 100*CENT
|
||||||
|
|
|
@ -2,7 +2,7 @@ from coincurve import PublicKey, PrivateKey as _PrivateKey
|
||||||
|
|
||||||
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
|
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
|
||||||
from lbry.crypto.base58 import Base58
|
from lbry.crypto.base58 import Base58
|
||||||
from .util import cachedproperty
|
from lbry.utils import cachedproperty
|
||||||
|
|
||||||
|
|
||||||
class DerivationError(Exception):
|
class DerivationError(Exception):
|
|
@ -36,12 +36,12 @@ def hash160(x):
|
||||||
return ripemd160(sha256(x))
|
return ripemd160(sha256(x))
|
||||||
|
|
||||||
|
|
||||||
def hash_to_hex_str(x):
|
def hash_to_hex_str(x: bytes) -> str:
|
||||||
""" Convert a big-endian binary hash to displayed hex string.
|
""" Convert a big-endian binary hash to displayed hex string.
|
||||||
Display form of a binary hash is reversed and converted to hex. """
|
Display form of a binary hash is reversed and converted to hex. """
|
||||||
return hexlify(reversed(x))
|
return hexlify(x[::-1])
|
||||||
|
|
||||||
|
|
||||||
def hex_str_to_hash(x):
|
def hex_str_to_hash(x: str) -> bytes:
|
||||||
""" Convert a displayed hex string to a binary hash. """
|
""" Convert a displayed hex string to a binary hash. """
|
||||||
return reversed(unhexlify(x))
|
return unhexlify(x)[::-1]
|
||||||
|
|
5
lbry/db/__init__.py
Normal file
5
lbry/db/__init__.py
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
from .database import Database, Result
|
||||||
|
from .constants import (
|
||||||
|
TXO_TYPES, SPENDABLE_TYPE_CODES,
|
||||||
|
CLAIM_TYPE_CODES, CLAIM_TYPE_NAMES
|
||||||
|
)
|
73
lbry/db/constants.py
Normal file
73
lbry/db/constants.py
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
MAX_QUERY_VARIABLES = 900
|
||||||
|
|
||||||
|
TXO_TYPES = {
|
||||||
|
"other": 0,
|
||||||
|
"stream": 1,
|
||||||
|
"channel": 2,
|
||||||
|
"support": 3,
|
||||||
|
"purchase": 4,
|
||||||
|
"collection": 5,
|
||||||
|
"repost": 6,
|
||||||
|
}
|
||||||
|
|
||||||
|
CLAIM_TYPE_NAMES = [
|
||||||
|
'stream',
|
||||||
|
'channel',
|
||||||
|
'collection',
|
||||||
|
'repost',
|
||||||
|
]
|
||||||
|
|
||||||
|
CONTENT_TYPE_NAMES = [
|
||||||
|
name for name in CLAIM_TYPE_NAMES if name != "channel"
|
||||||
|
]
|
||||||
|
|
||||||
|
CLAIM_TYPE_CODES = [
|
||||||
|
TXO_TYPES[name] for name in CLAIM_TYPE_NAMES
|
||||||
|
]
|
||||||
|
|
||||||
|
CONTENT_TYPE_CODES = [
|
||||||
|
TXO_TYPES[name] for name in CONTENT_TYPE_NAMES
|
||||||
|
]
|
||||||
|
|
||||||
|
SPENDABLE_TYPE_CODES = [
|
||||||
|
TXO_TYPES['other'],
|
||||||
|
TXO_TYPES['purchase']
|
||||||
|
]
|
||||||
|
|
||||||
|
STREAM_TYPES = {
|
||||||
|
'video': 1,
|
||||||
|
'audio': 2,
|
||||||
|
'image': 3,
|
||||||
|
'document': 4,
|
||||||
|
'binary': 5,
|
||||||
|
'model': 6
|
||||||
|
}
|
||||||
|
|
||||||
|
MATURE_TAGS = (
|
||||||
|
'nsfw', 'porn', 'xxx', 'mature', 'adult', 'sex'
|
||||||
|
)
|
||||||
|
|
||||||
|
ATTRIBUTE_ARRAY_MAX_LENGTH = 100
|
||||||
|
|
||||||
|
SEARCH_INTEGER_PARAMS = {
|
||||||
|
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||||
|
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
|
||||||
|
'tx_position', 'channel_join', 'reposted',
|
||||||
|
'amount', 'staked_amount', 'support_amount',
|
||||||
|
'trend_group', 'trend_mixed', 'trend_local', 'trend_global',
|
||||||
|
}
|
||||||
|
|
||||||
|
SEARCH_PARAMS = {
|
||||||
|
'name', 'text', 'claim_id', 'claim_ids', 'txid', 'nout', 'channel', 'channel_ids', 'not_channel_ids',
|
||||||
|
'public_key_id', 'claim_type', 'stream_types', 'media_types', 'fee_currency',
|
||||||
|
'has_channel_signature', 'signature_valid',
|
||||||
|
'any_tags', 'all_tags', 'not_tags', 'reposted_claim_id',
|
||||||
|
'any_locations', 'all_locations', 'not_locations',
|
||||||
|
'any_languages', 'all_languages', 'not_languages',
|
||||||
|
'is_controlling', 'limit', 'offset', 'order_by',
|
||||||
|
'no_totals',
|
||||||
|
} | SEARCH_INTEGER_PARAMS
|
||||||
|
|
||||||
|
SEARCH_ORDER_FIELDS = {
|
||||||
|
'name', 'claim_hash', 'claim_id'
|
||||||
|
} | SEARCH_INTEGER_PARAMS
|
369
lbry/db/database.py
Normal file
369
lbry/db/database.py
Normal file
|
@ -0,0 +1,369 @@
|
||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
import tempfile
|
||||||
|
import multiprocessing as mp
|
||||||
|
from typing import List, Optional, Iterable, Iterator, TypeVar, Generic, TYPE_CHECKING, Dict, Tuple
|
||||||
|
from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from sqlalchemy import create_engine, text
|
||||||
|
|
||||||
|
from lbry.event import EventController
|
||||||
|
from lbry.crypto.bip32 import PubKey
|
||||||
|
from lbry.blockchain.transaction import Transaction, Output
|
||||||
|
from .constants import TXO_TYPES, CLAIM_TYPE_CODES
|
||||||
|
from .query_context import initialize, uninitialize, ProgressPublisher
|
||||||
|
from . import queries as q
|
||||||
|
from . import sync
|
||||||
|
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from lbry.blockchain.ledger import Ledger
|
||||||
|
|
||||||
|
|
||||||
|
def clean_wallet_account_ids(constraints):
|
||||||
|
wallet = constraints.pop('wallet', None)
|
||||||
|
account = constraints.pop('account', None)
|
||||||
|
accounts = constraints.pop('accounts', [])
|
||||||
|
if account and not accounts:
|
||||||
|
accounts = [account]
|
||||||
|
if wallet:
|
||||||
|
constraints['wallet_account_ids'] = [account.id for account in wallet.accounts]
|
||||||
|
if not accounts:
|
||||||
|
accounts = wallet.accounts
|
||||||
|
if accounts:
|
||||||
|
constraints['account_ids'] = [account.id for account in accounts]
|
||||||
|
|
||||||
|
|
||||||
|
async def add_channel_keys_to_txo_results(accounts: List, txos: Iterable[Output]):
|
||||||
|
sub_channels = set()
|
||||||
|
for txo in txos:
|
||||||
|
if txo.is_claim and txo.claim.is_channel:
|
||||||
|
for account in accounts:
|
||||||
|
private_key = await account.get_channel_private_key(
|
||||||
|
txo.claim.channel.public_key_bytes
|
||||||
|
)
|
||||||
|
if private_key:
|
||||||
|
txo.private_key = private_key
|
||||||
|
break
|
||||||
|
if txo.channel is not None:
|
||||||
|
sub_channels.add(txo.channel)
|
||||||
|
if sub_channels:
|
||||||
|
await add_channel_keys_to_txo_results(accounts, sub_channels)
|
||||||
|
|
||||||
|
ResultType = TypeVar('ResultType')
|
||||||
|
|
||||||
|
|
||||||
|
class Result(Generic[ResultType]):
|
||||||
|
|
||||||
|
__slots__ = 'rows', 'total', 'censor'
|
||||||
|
|
||||||
|
def __init__(self, rows: List[ResultType], total, censor=None):
|
||||||
|
self.rows = rows
|
||||||
|
self.total = total
|
||||||
|
self.censor = censor
|
||||||
|
|
||||||
|
def __getitem__(self, item: int) -> ResultType:
|
||||||
|
return self.rows[item]
|
||||||
|
|
||||||
|
def __iter__(self) -> Iterator[ResultType]:
|
||||||
|
return iter(self.rows)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.rows)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return repr(self.rows)
|
||||||
|
|
||||||
|
|
||||||
|
class Database:
|
||||||
|
|
||||||
|
def __init__(self, ledger: 'Ledger'):
|
||||||
|
self.url = ledger.conf.db_url_or_default
|
||||||
|
self.ledger = ledger
|
||||||
|
self.workers = self._normalize_worker_processes(ledger.conf.workers)
|
||||||
|
self.executor: Optional[Executor] = None
|
||||||
|
self.message_queue = mp.Queue()
|
||||||
|
self.stop_event = mp.Event()
|
||||||
|
self._on_progress_controller = EventController()
|
||||||
|
self.on_progress = self._on_progress_controller.stream
|
||||||
|
self.progress_publisher = ProgressPublisher(
|
||||||
|
self.message_queue, self._on_progress_controller
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _normalize_worker_processes(workers):
|
||||||
|
if workers == 0:
|
||||||
|
return os.cpu_count()
|
||||||
|
elif workers > 0:
|
||||||
|
return workers
|
||||||
|
return 1
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def temp_from_url_regtest(cls, db_url, lbrycrd_config=None):
|
||||||
|
from lbry import Config, RegTestLedger # pylint: disable=import-outside-toplevel
|
||||||
|
directory = tempfile.mkdtemp()
|
||||||
|
if lbrycrd_config:
|
||||||
|
conf = lbrycrd_config
|
||||||
|
conf.data_dir = directory
|
||||||
|
conf.download_dir = directory
|
||||||
|
conf.wallet_dir = directory
|
||||||
|
else:
|
||||||
|
conf = Config.with_same_dir(directory)
|
||||||
|
conf.set(blockchain="regtest", db_url=db_url)
|
||||||
|
ledger = RegTestLedger(conf)
|
||||||
|
return cls(ledger)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def temp_sqlite_regtest(cls, lbrycrd_config=None):
|
||||||
|
from lbry import Config, RegTestLedger # pylint: disable=import-outside-toplevel
|
||||||
|
directory = tempfile.mkdtemp()
|
||||||
|
if lbrycrd_config:
|
||||||
|
conf = lbrycrd_config
|
||||||
|
conf.data_dir = directory
|
||||||
|
conf.download_dir = directory
|
||||||
|
conf.wallet_dir = directory
|
||||||
|
else:
|
||||||
|
conf = Config.with_same_dir(directory).set(blockchain="regtest")
|
||||||
|
ledger = RegTestLedger(conf)
|
||||||
|
return cls(ledger)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def temp_sqlite(cls):
|
||||||
|
from lbry import Config, Ledger # pylint: disable=import-outside-toplevel
|
||||||
|
conf = Config.with_same_dir(tempfile.mkdtemp())
|
||||||
|
return cls(Ledger(conf))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_url(cls, db_url):
|
||||||
|
from lbry import Config, Ledger # pylint: disable=import-outside-toplevel
|
||||||
|
return cls(Ledger(Config.with_null_dir().set(db_url=db_url)))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def in_memory(cls):
|
||||||
|
return cls.from_url('sqlite:///:memory:')
|
||||||
|
|
||||||
|
def sync_create(self, name):
|
||||||
|
engine = create_engine(self.url)
|
||||||
|
db = engine.connect()
|
||||||
|
db.execute(text("COMMIT"))
|
||||||
|
db.execute(text(f"CREATE DATABASE {name}"))
|
||||||
|
|
||||||
|
async def create(self, name):
|
||||||
|
return await asyncio.get_running_loop().run_in_executor(None, self.sync_create, name)
|
||||||
|
|
||||||
|
def sync_drop(self, name):
|
||||||
|
engine = create_engine(self.url)
|
||||||
|
db = engine.connect()
|
||||||
|
db.execute(text("COMMIT"))
|
||||||
|
db.execute(text(f"DROP DATABASE IF EXISTS {name}"))
|
||||||
|
|
||||||
|
async def drop(self, name):
|
||||||
|
return await asyncio.get_running_loop().run_in_executor(None, self.sync_drop, name)
|
||||||
|
|
||||||
|
async def open(self):
|
||||||
|
assert self.executor is None, "Database already open."
|
||||||
|
self.progress_publisher.start()
|
||||||
|
kwargs = {
|
||||||
|
"initializer": initialize,
|
||||||
|
"initargs": (
|
||||||
|
self.ledger,
|
||||||
|
self.message_queue, self.stop_event
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if self.workers > 1:
|
||||||
|
self.executor = ProcessPoolExecutor(max_workers=self.workers, **kwargs)
|
||||||
|
else:
|
||||||
|
self.executor = ThreadPoolExecutor(max_workers=1, **kwargs)
|
||||||
|
return await self.run(q.check_version_and_create_tables)
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
self.progress_publisher.stop()
|
||||||
|
if self.executor is not None:
|
||||||
|
if isinstance(self.executor, ThreadPoolExecutor):
|
||||||
|
await self.run(uninitialize)
|
||||||
|
self.executor.shutdown()
|
||||||
|
self.executor = None
|
||||||
|
# fixes "OSError: handle is closed"
|
||||||
|
# seems to only happen when running in PyCharm
|
||||||
|
# https://github.com/python/cpython/pull/6084#issuecomment-564585446
|
||||||
|
# TODO: delete this in Python 3.8/3.9?
|
||||||
|
from concurrent.futures.process import _threads_wakeups # pylint: disable=import-outside-toplevel
|
||||||
|
_threads_wakeups.clear()
|
||||||
|
|
||||||
|
async def run(self, func, *args, **kwargs):
|
||||||
|
if kwargs:
|
||||||
|
clean_wallet_account_ids(kwargs)
|
||||||
|
return await asyncio.get_running_loop().run_in_executor(
|
||||||
|
self.executor, partial(func, *args, **kwargs)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def fetch_result(self, func, *args, **kwargs) -> Result:
|
||||||
|
rows, total = await self.run(func, *args, **kwargs)
|
||||||
|
return Result(rows, total)
|
||||||
|
|
||||||
|
async def execute(self, sql):
|
||||||
|
return await self.run(q.execute, sql)
|
||||||
|
|
||||||
|
async def execute_sql_object(self, sql):
|
||||||
|
return await self.run(q.execute_sql_object, sql)
|
||||||
|
|
||||||
|
async def execute_fetchall(self, sql):
|
||||||
|
return await self.run(q.execute_fetchall, sql)
|
||||||
|
|
||||||
|
async def has_filters(self):
|
||||||
|
return await self.run(q.has_filters)
|
||||||
|
|
||||||
|
async def has_claims(self):
|
||||||
|
return await self.run(q.has_claims)
|
||||||
|
|
||||||
|
async def has_supports(self):
|
||||||
|
return await self.run(q.has_supports)
|
||||||
|
|
||||||
|
async def has_wallet(self, wallet_id):
|
||||||
|
return await self.run(q.has_wallet, wallet_id)
|
||||||
|
|
||||||
|
async def get_wallet(self, wallet_id: str):
|
||||||
|
return await self.run(q.get_wallet, wallet_id)
|
||||||
|
|
||||||
|
async def add_wallet(self, wallet_id: str, data: str):
|
||||||
|
return await self.run(q.add_wallet, wallet_id, data)
|
||||||
|
|
||||||
|
async def get_best_block_height(self) -> int:
|
||||||
|
return await self.run(q.get_best_block_height)
|
||||||
|
|
||||||
|
async def process_all_things_after_sync(self):
|
||||||
|
return await self.run(sync.process_all_things_after_sync)
|
||||||
|
|
||||||
|
async def get_block_headers(self, start_height: int, end_height: int = None):
|
||||||
|
return await self.run(q.get_block_headers, start_height, end_height)
|
||||||
|
|
||||||
|
async def get_filters(self, start_height: int, end_height: int = None, granularity: int = 0):
|
||||||
|
return await self.run(q.get_filters, start_height, end_height, granularity)
|
||||||
|
|
||||||
|
async def insert_block(self, block):
|
||||||
|
return await self.run(q.insert_block, block)
|
||||||
|
|
||||||
|
async def insert_transaction(self, block_hash, tx):
|
||||||
|
return await self.run(q.insert_transaction, block_hash, tx)
|
||||||
|
|
||||||
|
async def update_address_used_times(self, addresses):
|
||||||
|
return await self.run(q.update_address_used_times, addresses)
|
||||||
|
|
||||||
|
async def reserve_outputs(self, txos, is_reserved=True):
|
||||||
|
txo_hashes = [txo.hash for txo in txos]
|
||||||
|
if txo_hashes:
|
||||||
|
return await self.run(
|
||||||
|
q.reserve_outputs, txo_hashes, is_reserved
|
||||||
|
)
|
||||||
|
|
||||||
|
async def release_outputs(self, txos):
|
||||||
|
return await self.reserve_outputs(txos, is_reserved=False)
|
||||||
|
|
||||||
|
async def release_tx(self, tx):
|
||||||
|
return await self.release_outputs([txi.txo_ref.txo for txi in tx.inputs])
|
||||||
|
|
||||||
|
async def release_all_outputs(self, account):
|
||||||
|
return await self.run(q.release_all_outputs, account.id)
|
||||||
|
|
||||||
|
async def get_balance(self, **constraints):
|
||||||
|
return await self.run(q.get_balance, **constraints)
|
||||||
|
|
||||||
|
async def get_report(self, accounts):
|
||||||
|
return await self.run(q.get_report, accounts=accounts)
|
||||||
|
|
||||||
|
async def get_addresses(self, **constraints) -> Result[dict]:
|
||||||
|
addresses = await self.fetch_result(q.get_addresses, **constraints)
|
||||||
|
if addresses and 'pubkey' in addresses[0]:
|
||||||
|
for address in addresses:
|
||||||
|
address['pubkey'] = PubKey(
|
||||||
|
self.ledger, bytes(address.pop('pubkey')), bytes(address.pop('chain_code')),
|
||||||
|
address.pop('n'), address.pop('depth')
|
||||||
|
)
|
||||||
|
return addresses
|
||||||
|
|
||||||
|
async def get_all_addresses(self):
|
||||||
|
return await self.run(q.get_all_addresses)
|
||||||
|
|
||||||
|
async def get_address(self, **constraints):
|
||||||
|
for address in await self.get_addresses(limit=1, **constraints):
|
||||||
|
return address
|
||||||
|
|
||||||
|
async def add_keys(self, account, chain, pubkeys):
|
||||||
|
return await self.run(q.add_keys, [{
|
||||||
|
'account': account.id,
|
||||||
|
'address': k.address,
|
||||||
|
'chain': chain,
|
||||||
|
'pubkey': k.pubkey_bytes,
|
||||||
|
'chain_code': k.chain_code,
|
||||||
|
'n': k.n,
|
||||||
|
'depth': k.depth
|
||||||
|
} for k in pubkeys])
|
||||||
|
|
||||||
|
async def get_transactions(self, **constraints) -> Result[Transaction]:
|
||||||
|
return await self.fetch_result(q.get_transactions, **constraints)
|
||||||
|
|
||||||
|
async def get_transaction(self, **constraints) -> Optional[Transaction]:
|
||||||
|
txs = await self.get_transactions(limit=1, **constraints)
|
||||||
|
if txs:
|
||||||
|
return txs[0]
|
||||||
|
|
||||||
|
async def get_purchases(self, **constraints) -> Result[Output]:
|
||||||
|
return await self.fetch_result(q.get_purchases, **constraints)
|
||||||
|
|
||||||
|
async def search_claims(self, **constraints) -> Result[Output]:
|
||||||
|
#assert set(constraints).issubset(SEARCH_PARAMS), \
|
||||||
|
# f"Search query contains invalid arguments: {set(constraints).difference(SEARCH_PARAMS)}"
|
||||||
|
claims, total, censor = await self.run(q.search_claims, **constraints)
|
||||||
|
return Result(claims, total, censor)
|
||||||
|
|
||||||
|
async def protobuf_search_claims(self, **constraints) -> str:
|
||||||
|
return await self.run(q.protobuf_search_claims, **constraints)
|
||||||
|
|
||||||
|
async def search_supports(self, **constraints) -> Result[Output]:
|
||||||
|
return await self.fetch_result(q.search_supports, **constraints)
|
||||||
|
|
||||||
|
async def sum_supports(self, claim_hash, include_channel_content=False, exclude_own_supports=False) \
|
||||||
|
-> Tuple[List[Dict], int]:
|
||||||
|
return await self.run(q.sum_supports, claim_hash, include_channel_content, exclude_own_supports)
|
||||||
|
|
||||||
|
async def resolve(self, urls, **kwargs) -> Dict[str, Output]:
|
||||||
|
return await self.run(q.resolve, urls, **kwargs)
|
||||||
|
|
||||||
|
async def protobuf_resolve(self, urls, **kwargs) -> str:
|
||||||
|
return await self.run(q.protobuf_resolve, urls, **kwargs)
|
||||||
|
|
||||||
|
async def get_txo_sum(self, **constraints) -> int:
|
||||||
|
return await self.run(q.get_txo_sum, **constraints)
|
||||||
|
|
||||||
|
async def get_txo_plot(self, **constraints) -> List[dict]:
|
||||||
|
return await self.run(q.get_txo_plot, **constraints)
|
||||||
|
|
||||||
|
async def get_txos(self, **constraints) -> Result[Output]:
|
||||||
|
txos = await self.fetch_result(q.get_txos, **constraints)
|
||||||
|
if 'wallet' in constraints:
|
||||||
|
await add_channel_keys_to_txo_results(constraints['wallet'].accounts, txos)
|
||||||
|
return txos
|
||||||
|
|
||||||
|
async def get_utxos(self, **constraints) -> Result[Output]:
|
||||||
|
return await self.get_txos(spent_height=0, **constraints)
|
||||||
|
|
||||||
|
async def get_supports(self, **constraints) -> Result[Output]:
|
||||||
|
return await self.get_utxos(txo_type=TXO_TYPES['support'], **constraints)
|
||||||
|
|
||||||
|
async def get_claims(self, **constraints) -> Result[Output]:
|
||||||
|
if 'txo_type' not in constraints:
|
||||||
|
constraints['txo_type__in'] = CLAIM_TYPE_CODES
|
||||||
|
txos = await self.fetch_result(q.get_txos, **constraints)
|
||||||
|
if 'wallet' in constraints:
|
||||||
|
await add_channel_keys_to_txo_results(constraints['wallet'].accounts, txos)
|
||||||
|
return txos
|
||||||
|
|
||||||
|
async def get_streams(self, **constraints) -> Result[Output]:
|
||||||
|
return await self.get_claims(txo_type=TXO_TYPES['stream'], **constraints)
|
||||||
|
|
||||||
|
async def get_channels(self, **constraints) -> Result[Output]:
|
||||||
|
return await self.get_claims(txo_type=TXO_TYPES['channel'], **constraints)
|
||||||
|
|
||||||
|
async def get_collections(self, **constraints) -> Result[Output]:
|
||||||
|
return await self.get_claims(txo_type=TXO_TYPES['collection'], **constraints)
|
6
lbry/db/queries/__init__.py
Normal file
6
lbry/db/queries/__init__.py
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
from .base import *
|
||||||
|
from .txio import *
|
||||||
|
from .search import *
|
||||||
|
from .resolve import *
|
||||||
|
from .address import *
|
||||||
|
from .wallet import *
|
67
lbry/db/queries/address.py
Normal file
67
lbry/db/queries/address.py
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
import logging
|
||||||
|
from typing import Tuple, List, Optional
|
||||||
|
|
||||||
|
from sqlalchemy import func
|
||||||
|
from sqlalchemy.future import select
|
||||||
|
|
||||||
|
from ..utils import query
|
||||||
|
from ..query_context import context
|
||||||
|
from ..tables import TXO, PubkeyAddress, AccountAddress
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def update_address_used_times(addresses):
|
||||||
|
context().execute(
|
||||||
|
PubkeyAddress.update()
|
||||||
|
.values(used_times=(
|
||||||
|
select(func.count(TXO.c.address))
|
||||||
|
.where((TXO.c.address == PubkeyAddress.c.address)),
|
||||||
|
))
|
||||||
|
.where(PubkeyAddress.c.address._in(addresses))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def select_addresses(cols, **constraints):
|
||||||
|
return context().fetchall(query(
|
||||||
|
[AccountAddress, PubkeyAddress],
|
||||||
|
select(*cols).select_from(PubkeyAddress.join(AccountAddress)),
|
||||||
|
**constraints
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
def get_addresses(cols=None, include_total=False, **constraints) -> Tuple[List[dict], Optional[int]]:
|
||||||
|
if cols is None:
|
||||||
|
cols = (
|
||||||
|
PubkeyAddress.c.address,
|
||||||
|
PubkeyAddress.c.used_times,
|
||||||
|
AccountAddress.c.account,
|
||||||
|
AccountAddress.c.chain,
|
||||||
|
AccountAddress.c.pubkey,
|
||||||
|
AccountAddress.c.chain_code,
|
||||||
|
AccountAddress.c.n,
|
||||||
|
AccountAddress.c.depth
|
||||||
|
)
|
||||||
|
return (
|
||||||
|
select_addresses(cols, **constraints),
|
||||||
|
get_address_count(**constraints) if include_total else None
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_address_count(**constraints):
|
||||||
|
count = select_addresses([func.count().label('total')], **constraints)
|
||||||
|
return count[0]['total'] or 0
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_addresses(self):
|
||||||
|
return context().execute(select(PubkeyAddress.c.address))
|
||||||
|
|
||||||
|
|
||||||
|
def add_keys(pubkeys):
|
||||||
|
c = context()
|
||||||
|
current_limit = c.variable_limit // len(pubkeys[0]) # (overall limit) // (maximum on a query)
|
||||||
|
for start in range(0, len(pubkeys), current_limit - 1):
|
||||||
|
batch = pubkeys[start:(start + current_limit - 1)]
|
||||||
|
c.execute(c.insert_or_ignore(PubkeyAddress).values([{'address': k['address']} for k in batch]))
|
||||||
|
c.execute(c.insert_or_ignore(AccountAddress).values(batch))
|
123
lbry/db/queries/base.py
Normal file
123
lbry/db/queries/base.py
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
from math import log10
|
||||||
|
from binascii import hexlify
|
||||||
|
|
||||||
|
from sqlalchemy import text, between
|
||||||
|
from sqlalchemy.future import select
|
||||||
|
|
||||||
|
from ..query_context import context
|
||||||
|
from ..tables import (
|
||||||
|
SCHEMA_VERSION, metadata, Version,
|
||||||
|
Claim, Support, Block, BlockFilter, BlockGroupFilter, TX, TXFilter,
|
||||||
|
pg_add_account_address_constraints_and_indexes
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def execute(sql):
|
||||||
|
return context().execute(text(sql))
|
||||||
|
|
||||||
|
|
||||||
|
def execute_sql_object(sql):
|
||||||
|
return context().execute(sql)
|
||||||
|
|
||||||
|
|
||||||
|
def execute_fetchall(sql):
|
||||||
|
return context().fetchall(text(sql))
|
||||||
|
|
||||||
|
|
||||||
|
def has_filters():
|
||||||
|
return context().has_records(BlockFilter)
|
||||||
|
|
||||||
|
|
||||||
|
def has_claims():
|
||||||
|
return context().has_records(Claim)
|
||||||
|
|
||||||
|
|
||||||
|
def has_supports():
|
||||||
|
return context().has_records(Support)
|
||||||
|
|
||||||
|
|
||||||
|
def get_best_block_height():
|
||||||
|
return context().fetchmax(Block.c.height, -1)
|
||||||
|
|
||||||
|
|
||||||
|
def insert_block(block):
|
||||||
|
context().get_bulk_loader().add_block(block).flush()
|
||||||
|
|
||||||
|
|
||||||
|
def get_block_headers(first, last=None):
|
||||||
|
if last is not None:
|
||||||
|
query = (
|
||||||
|
select('*').select_from(Block)
|
||||||
|
.where(between(Block.c.height, first, last))
|
||||||
|
.order_by(Block.c.height)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
query = select('*').select_from(Block).where(Block.c.height == first)
|
||||||
|
return context().fetchall(query)
|
||||||
|
|
||||||
|
|
||||||
|
def get_filters(start_height, end_height=None, granularity=0):
|
||||||
|
assert granularity >= 0, "filter granularity must be 0 or positive number"
|
||||||
|
if granularity == 0:
|
||||||
|
query = (
|
||||||
|
select('*').select_from(TXFilter)
|
||||||
|
.where(between(TXFilter.c.height, start_height, end_height))
|
||||||
|
.order_by(TXFilter.c.height)
|
||||||
|
)
|
||||||
|
elif granularity == 1:
|
||||||
|
query = (
|
||||||
|
select('*').select_from(BlockFilter)
|
||||||
|
.where(between(BlockFilter.c.height, start_height, end_height))
|
||||||
|
.order_by(BlockFilter.c.height)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
query = (
|
||||||
|
select('*').select_from(BlockGroupFilter)
|
||||||
|
.where(
|
||||||
|
(BlockGroupFilter.c.height == start_height) &
|
||||||
|
(BlockGroupFilter.c.factor == log10(granularity))
|
||||||
|
)
|
||||||
|
.order_by(BlockGroupFilter.c.height)
|
||||||
|
)
|
||||||
|
result = []
|
||||||
|
for row in context().fetchall(query):
|
||||||
|
record = {
|
||||||
|
"height": row["height"],
|
||||||
|
"filter": hexlify(row["address_filter"]).decode(),
|
||||||
|
}
|
||||||
|
if granularity == 0:
|
||||||
|
record["txid"] = hexlify(row["tx_hash"][::-1]).decode()
|
||||||
|
result.append(record)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def insert_transaction(block_hash, tx):
|
||||||
|
context().get_bulk_loader().add_transaction(block_hash, tx).flush(TX)
|
||||||
|
|
||||||
|
|
||||||
|
def check_version_and_create_tables():
|
||||||
|
with context("db.connecting") as ctx:
|
||||||
|
if ctx.has_table('version'):
|
||||||
|
version = ctx.fetchone(select(Version.c.version).limit(1))
|
||||||
|
if version and version['version'] == SCHEMA_VERSION:
|
||||||
|
return
|
||||||
|
metadata.drop_all(ctx.engine)
|
||||||
|
metadata.create_all(ctx.engine)
|
||||||
|
ctx.execute(Version.insert().values(version=SCHEMA_VERSION))
|
||||||
|
for table in metadata.sorted_tables:
|
||||||
|
disable_trigger_and_constraints(table.name)
|
||||||
|
if ctx.is_postgres:
|
||||||
|
for statement in pg_add_account_address_constraints_and_indexes:
|
||||||
|
ctx.execute(text(statement))
|
||||||
|
|
||||||
|
|
||||||
|
def disable_trigger_and_constraints(table_name):
|
||||||
|
ctx = context()
|
||||||
|
if ctx.is_postgres:
|
||||||
|
ctx.execute(text(f"ALTER TABLE {table_name} DISABLE TRIGGER ALL;"))
|
||||||
|
if table_name in ('tag', 'stake', 'block_group_filter', 'mempool_filter'):
|
||||||
|
return
|
||||||
|
if ctx.is_postgres:
|
||||||
|
ctx.execute(text(
|
||||||
|
f"ALTER TABLE {table_name} DROP CONSTRAINT {table_name}_pkey CASCADE;"
|
||||||
|
))
|
101
lbry/db/queries/resolve.py
Normal file
101
lbry/db/queries/resolve.py
Normal file
|
@ -0,0 +1,101 @@
|
||||||
|
import logging
|
||||||
|
import itertools
|
||||||
|
from typing import List, Dict
|
||||||
|
|
||||||
|
from lbry.schema.url import URL
|
||||||
|
from lbry.schema.result import Outputs as ResultOutput
|
||||||
|
from lbry.error import ResolveCensoredError
|
||||||
|
from lbry.blockchain.transaction import Output
|
||||||
|
from . import rows_to_txos
|
||||||
|
|
||||||
|
from ..query_context import context
|
||||||
|
from .search import select_claims
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_claims(**constraints):
|
||||||
|
censor = context().get_resolve_censor()
|
||||||
|
rows = context().fetchall(select_claims(**constraints))
|
||||||
|
rows = censor.apply(rows)
|
||||||
|
return rows_to_txos(rows), censor
|
||||||
|
|
||||||
|
|
||||||
|
def _get_referenced_rows(txo_rows: List[Output], censor_channels: List[bytes]):
|
||||||
|
repost_hashes = set(txo.reposted_claim.claim_hash for txo in txo_rows if txo.reposted_claim)
|
||||||
|
channel_hashes = set(itertools.chain(
|
||||||
|
(txo.channel.claim_hash for txo in txo_rows if txo.channel),
|
||||||
|
censor_channels
|
||||||
|
))
|
||||||
|
|
||||||
|
reposted_txos = []
|
||||||
|
if repost_hashes:
|
||||||
|
reposted_txos = resolve_claims(**{'claim.claim_hash__in': repost_hashes})
|
||||||
|
if reposted_txos:
|
||||||
|
reposted_txos = reposted_txos[0]
|
||||||
|
channel_hashes |= set(txo.channel.claim_hash for txo in reposted_txos if txo.channel)
|
||||||
|
|
||||||
|
channel_txos = []
|
||||||
|
if channel_hashes:
|
||||||
|
channel_txos = resolve_claims(**{'claim.claim_hash__in': channel_hashes})
|
||||||
|
channel_txos = channel_txos[0] if channel_txos else []
|
||||||
|
|
||||||
|
# channels must come first for client side inflation to work properly
|
||||||
|
return channel_txos + reposted_txos
|
||||||
|
|
||||||
|
|
||||||
|
def protobuf_resolve(urls, **kwargs) -> str:
|
||||||
|
txo_rows = [resolve_url(raw_url) for raw_url in urls]
|
||||||
|
extra_txo_rows = _get_referenced_rows(
|
||||||
|
[txo_row for txo_row in txo_rows if isinstance(txo_row, Output)],
|
||||||
|
[txo.censor_hash for txo in txo_rows if isinstance(txo, ResolveCensoredError)]
|
||||||
|
)
|
||||||
|
return ResultOutput.to_base64(txo_rows, extra_txo_rows)
|
||||||
|
|
||||||
|
|
||||||
|
def resolve(urls, **kwargs) -> Dict[str, Output]:
|
||||||
|
return {url: resolve_url(url) for url in urls}
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_url(raw_url):
|
||||||
|
try:
|
||||||
|
url = URL.parse(raw_url)
|
||||||
|
except ValueError as e:
|
||||||
|
return e
|
||||||
|
|
||||||
|
channel = None
|
||||||
|
|
||||||
|
if url.has_channel:
|
||||||
|
q = url.channel.to_dict()
|
||||||
|
if set(q) == {'name'}:
|
||||||
|
q['is_controlling'] = True
|
||||||
|
else:
|
||||||
|
q['order_by'] = ['^creation_height']
|
||||||
|
matches, censor = resolve_claims(**q, limit=1)
|
||||||
|
if matches:
|
||||||
|
channel = matches[0]
|
||||||
|
elif censor.censored:
|
||||||
|
return ResolveCensoredError(raw_url, next(iter(censor.censored)))
|
||||||
|
elif not channel:
|
||||||
|
return LookupError(f'Could not find channel in "{raw_url}".')
|
||||||
|
|
||||||
|
if url.has_stream:
|
||||||
|
q = url.stream.to_dict()
|
||||||
|
if channel is not None:
|
||||||
|
q['order_by'] = ['^creation_height']
|
||||||
|
q['channel_hash'] = channel.claim_hash
|
||||||
|
q['is_signature_valid'] = True
|
||||||
|
elif set(q) == {'name'}:
|
||||||
|
q['is_controlling'] = True
|
||||||
|
matches, censor = resolve_claims(**q, limit=1)
|
||||||
|
if matches:
|
||||||
|
stream = matches[0]
|
||||||
|
stream.channel = channel
|
||||||
|
return stream
|
||||||
|
elif censor.censored:
|
||||||
|
return ResolveCensoredError(raw_url, next(iter(censor.censored)))
|
||||||
|
else:
|
||||||
|
return LookupError(f'Could not find claim at "{raw_url}".')
|
||||||
|
|
||||||
|
return channel
|
472
lbry/db/queries/search.py
Normal file
472
lbry/db/queries/search.py
Normal file
|
@ -0,0 +1,472 @@
|
||||||
|
import struct
|
||||||
|
import logging
|
||||||
|
from decimal import Decimal
|
||||||
|
from binascii import unhexlify
|
||||||
|
from typing import Tuple, List, Optional, Dict
|
||||||
|
|
||||||
|
from sqlalchemy import func, case, text
|
||||||
|
from sqlalchemy.future import select, Select
|
||||||
|
|
||||||
|
from lbry.schema.tags import clean_tags
|
||||||
|
from lbry.schema.result import Censor, Outputs as ResultOutput
|
||||||
|
from lbry.schema.url import normalize_name
|
||||||
|
from lbry.blockchain.transaction import Output
|
||||||
|
|
||||||
|
from ..utils import query
|
||||||
|
from ..query_context import context
|
||||||
|
from ..tables import TX, TXO, Claim, Support, Trend, CensoredClaim
|
||||||
|
from ..constants import (
|
||||||
|
TXO_TYPES, STREAM_TYPES, ATTRIBUTE_ARRAY_MAX_LENGTH,
|
||||||
|
SEARCH_INTEGER_PARAMS, SEARCH_ORDER_FIELDS
|
||||||
|
)
|
||||||
|
|
||||||
|
from .txio import BASE_SELECT_TXO_COLUMNS, rows_to_txos
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
BASE_SELECT_SUPPORT_COLUMNS = BASE_SELECT_TXO_COLUMNS + [
|
||||||
|
Support.c.channel_hash,
|
||||||
|
Support.c.is_signature_valid,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def compat_layer(**constraints):
|
||||||
|
# for old sdk, to be removed later
|
||||||
|
replacements = {
|
||||||
|
"effective_amount": "staked_amount",
|
||||||
|
"trending_mixed": "trend_mixed",
|
||||||
|
"trending_group": "trend_group",
|
||||||
|
"trending_local": "trend_local"
|
||||||
|
}
|
||||||
|
for old_key, new_key in replacements.items():
|
||||||
|
if old_key in constraints:
|
||||||
|
constraints[new_key] = constraints.pop(old_key)
|
||||||
|
order_by = constraints.get("order_by", [])
|
||||||
|
if old_key in order_by:
|
||||||
|
constraints["order_by"] = [order_key if order_key != old_key else new_key for order_key in order_by]
|
||||||
|
return constraints
|
||||||
|
|
||||||
|
|
||||||
|
def select_supports(cols: List = None, **constraints) -> Select:
|
||||||
|
if cols is None:
|
||||||
|
cols = BASE_SELECT_SUPPORT_COLUMNS
|
||||||
|
joins = Support.join(TXO, ).join(TX)
|
||||||
|
return query([Support], select(*cols).select_from(joins), **constraints)
|
||||||
|
|
||||||
|
|
||||||
|
def search_supports(**constraints) -> Tuple[List[Output], Optional[int]]:
|
||||||
|
total = None
|
||||||
|
if constraints.pop('include_total', False):
|
||||||
|
total = search_support_count(**constraints)
|
||||||
|
if 'claim_id' in constraints:
|
||||||
|
constraints['claim_hash'] = unhexlify(constraints.pop('claim_id'))[::-1]
|
||||||
|
rows = context().fetchall(select_supports(**constraints))
|
||||||
|
txos = rows_to_txos(rows, include_tx=False)
|
||||||
|
return txos, total
|
||||||
|
|
||||||
|
|
||||||
|
def sum_supports(claim_hash, include_channel_content=False, exclude_own_supports=False) -> Tuple[List[Dict], int]:
|
||||||
|
supporter = Claim.alias("supporter")
|
||||||
|
content = Claim.alias("content")
|
||||||
|
where_condition = (content.c.claim_hash == claim_hash)
|
||||||
|
if include_channel_content:
|
||||||
|
where_condition |= (content.c.channel_hash == claim_hash)
|
||||||
|
support_join_condition = TXO.c.channel_hash == supporter.c.claim_hash
|
||||||
|
if exclude_own_supports:
|
||||||
|
support_join_condition &= TXO.c.channel_hash != claim_hash
|
||||||
|
|
||||||
|
q = select(
|
||||||
|
supporter.c.short_url.label("supporter"),
|
||||||
|
func.sum(TXO.c.amount).label("staked"),
|
||||||
|
).select_from(
|
||||||
|
TXO
|
||||||
|
.join(content, TXO.c.claim_hash == content.c.claim_hash)
|
||||||
|
.join(supporter, support_join_condition)
|
||||||
|
).where(
|
||||||
|
where_condition &
|
||||||
|
(TXO.c.txo_type == TXO_TYPES["support"]) &
|
||||||
|
((TXO.c.address == content.c.address) | ((TXO.c.address != content.c.address) & (TXO.c.spent_height == 0)))
|
||||||
|
).group_by(
|
||||||
|
supporter.c.short_url
|
||||||
|
).order_by(
|
||||||
|
text("staked DESC, supporter ASC")
|
||||||
|
)
|
||||||
|
|
||||||
|
result = context().fetchall(q)
|
||||||
|
total = sum([row['staked'] for row in result])
|
||||||
|
return result, total
|
||||||
|
|
||||||
|
|
||||||
|
def search_support_count(**constraints) -> int:
|
||||||
|
constraints.pop('offset', None)
|
||||||
|
constraints.pop('limit', None)
|
||||||
|
constraints.pop('order_by', None)
|
||||||
|
count = context().fetchall(select_supports([func.count().label('total')], **constraints))
|
||||||
|
return count[0]['total'] or 0
|
||||||
|
|
||||||
|
|
||||||
|
channel_claim = Claim.alias('channel')
|
||||||
|
BASE_SELECT_CLAIM_COLUMNS = BASE_SELECT_TXO_COLUMNS + [
|
||||||
|
Claim.c.activation_height,
|
||||||
|
Claim.c.takeover_height,
|
||||||
|
Claim.c.creation_height,
|
||||||
|
Claim.c.expiration_height,
|
||||||
|
Claim.c.is_controlling,
|
||||||
|
Claim.c.channel_hash,
|
||||||
|
Claim.c.reposted_count,
|
||||||
|
Claim.c.reposted_claim_hash,
|
||||||
|
Claim.c.short_url,
|
||||||
|
Claim.c.signed_claim_count,
|
||||||
|
Claim.c.signed_support_count,
|
||||||
|
Claim.c.staked_amount,
|
||||||
|
Claim.c.staked_support_amount,
|
||||||
|
Claim.c.staked_support_count,
|
||||||
|
Claim.c.is_signature_valid,
|
||||||
|
case([(
|
||||||
|
channel_claim.c.short_url.isnot(None),
|
||||||
|
channel_claim.c.short_url + '/' + Claim.c.short_url
|
||||||
|
)]).label('canonical_url'),
|
||||||
|
func.coalesce(Trend.c.trend_local, 0).label('trend_local'),
|
||||||
|
func.coalesce(Trend.c.trend_mixed, 0).label('trend_mixed'),
|
||||||
|
func.coalesce(Trend.c.trend_global, 0).label('trend_global'),
|
||||||
|
func.coalesce(Trend.c.trend_group, 0).label('trend_group'),
|
||||||
|
CensoredClaim.c.censor_type,
|
||||||
|
CensoredClaim.c.censoring_channel_hash
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def select_claims(cols: List = None, for_count=False, **constraints) -> Select:
|
||||||
|
constraints = compat_layer(**constraints)
|
||||||
|
if cols is None:
|
||||||
|
cols = BASE_SELECT_CLAIM_COLUMNS
|
||||||
|
if 'order_by' in constraints:
|
||||||
|
order_by_parts = constraints['order_by']
|
||||||
|
if isinstance(order_by_parts, str):
|
||||||
|
order_by_parts = [order_by_parts]
|
||||||
|
sql_order_by = []
|
||||||
|
for order_by in order_by_parts:
|
||||||
|
is_asc = order_by.startswith('^')
|
||||||
|
column = order_by[1:] if is_asc else order_by
|
||||||
|
if column not in SEARCH_ORDER_FIELDS:
|
||||||
|
raise NameError(f'{column} is not a valid order_by field')
|
||||||
|
if column == 'name':
|
||||||
|
column = 'claim_name'
|
||||||
|
table = "trend" if column.startswith('trend') else "claim"
|
||||||
|
column = f"{table}.{column}"
|
||||||
|
if column in ('trend.trend_group', 'trend.trend_mixed', 'claim.release_time'):
|
||||||
|
column = f"COALESCE({column}, {-1 * (1<<32)})"
|
||||||
|
sql_order_by.append(
|
||||||
|
f"{column} {'ASC' if is_asc else 'DESC'}"
|
||||||
|
)
|
||||||
|
constraints['order_by'] = sql_order_by
|
||||||
|
|
||||||
|
ops = {'<=': '__lte', '>=': '__gte', '<': '__lt', '>': '__gt'}
|
||||||
|
for constraint in SEARCH_INTEGER_PARAMS:
|
||||||
|
if constraint in constraints:
|
||||||
|
value = constraints.pop(constraint)
|
||||||
|
postfix = ''
|
||||||
|
if isinstance(value, str):
|
||||||
|
if len(value) >= 2 and value[:2] in ops:
|
||||||
|
postfix, value = ops[value[:2]], value[2:]
|
||||||
|
elif len(value) >= 1 and value[0] in ops:
|
||||||
|
postfix, value = ops[value[0]], value[1:]
|
||||||
|
if constraint == 'fee_amount':
|
||||||
|
value = Decimal(value)*1000
|
||||||
|
constraints[f'{constraint}{postfix}'] = int(value)
|
||||||
|
|
||||||
|
if 'sequence' in constraints:
|
||||||
|
constraints['order_by'] = 'activation_height ASC'
|
||||||
|
constraints['offset'] = int(constraints.pop('sequence')) - 1
|
||||||
|
constraints['limit'] = 1
|
||||||
|
if 'amount_order' in constraints:
|
||||||
|
constraints['order_by'] = 'staked_amount DESC'
|
||||||
|
constraints['offset'] = int(constraints.pop('amount_order')) - 1
|
||||||
|
constraints['limit'] = 1
|
||||||
|
|
||||||
|
if 'claim_id' in constraints:
|
||||||
|
claim_id = constraints.pop('claim_id')
|
||||||
|
if len(claim_id) == 40:
|
||||||
|
constraints['claim_id'] = claim_id
|
||||||
|
else:
|
||||||
|
constraints['claim_id__like'] = f'{claim_id[:40]}%'
|
||||||
|
elif 'claim_ids' in constraints:
|
||||||
|
constraints['claim_id__in'] = set(constraints.pop('claim_ids'))
|
||||||
|
|
||||||
|
if 'reposted_claim_id' in constraints:
|
||||||
|
constraints['reposted_claim_hash'] = unhexlify(constraints.pop('reposted_claim_id'))[::-1]
|
||||||
|
|
||||||
|
if 'name' in constraints:
|
||||||
|
constraints['normalized'] = normalize_name(constraints.pop('name'))
|
||||||
|
|
||||||
|
if 'public_key_id' in constraints:
|
||||||
|
constraints['public_key_hash'] = (
|
||||||
|
context().ledger.address_to_hash160(constraints.pop('public_key_id')))
|
||||||
|
if 'channel_id' in constraints:
|
||||||
|
channel_id = constraints.pop('channel_id')
|
||||||
|
if channel_id:
|
||||||
|
if isinstance(channel_id, str):
|
||||||
|
channel_id = [channel_id]
|
||||||
|
constraints['channel_hash__in'] = {
|
||||||
|
unhexlify(cid)[::-1] for cid in channel_id
|
||||||
|
}
|
||||||
|
if 'not_channel_id' in constraints:
|
||||||
|
not_channel_ids = constraints.pop('not_channel_id')
|
||||||
|
if not_channel_ids:
|
||||||
|
not_channel_ids_binary = {
|
||||||
|
unhexlify(ncid)[::-1] for ncid in not_channel_ids
|
||||||
|
}
|
||||||
|
constraints['claim_hash__not_in#not_channel_ids'] = not_channel_ids_binary
|
||||||
|
if constraints.get('has_channel_signature', False):
|
||||||
|
constraints['channel_hash__not_in'] = not_channel_ids_binary
|
||||||
|
else:
|
||||||
|
constraints['null_or_not_channel__or'] = {
|
||||||
|
'signature_valid__is_null': True,
|
||||||
|
'channel_hash__not_in': not_channel_ids_binary
|
||||||
|
}
|
||||||
|
if 'is_signature_valid' in constraints:
|
||||||
|
has_channel_signature = constraints.pop('has_channel_signature', False)
|
||||||
|
is_signature_valid = constraints.pop('is_signature_valid')
|
||||||
|
if has_channel_signature:
|
||||||
|
constraints['is_signature_valid'] = is_signature_valid
|
||||||
|
else:
|
||||||
|
constraints['null_or_signature__or'] = {
|
||||||
|
'is_signature_valid__is_null': True,
|
||||||
|
'is_signature_valid': is_signature_valid
|
||||||
|
}
|
||||||
|
elif constraints.pop('has_channel_signature', False):
|
||||||
|
constraints['is_signature_valid__is_not_null'] = True
|
||||||
|
|
||||||
|
if 'txid' in constraints:
|
||||||
|
tx_hash = unhexlify(constraints.pop('txid'))[::-1]
|
||||||
|
nout = constraints.pop('nout', 0)
|
||||||
|
constraints['txo_hash'] = tx_hash + struct.pack('<I', nout)
|
||||||
|
|
||||||
|
if 'claim_type' in constraints:
|
||||||
|
claim_types = constraints.pop('claim_type')
|
||||||
|
if isinstance(claim_types, str):
|
||||||
|
claim_types = {claim_types}
|
||||||
|
if claim_types:
|
||||||
|
constraints['claim_type__in'] = {
|
||||||
|
TXO_TYPES[claim_type] for claim_type in claim_types
|
||||||
|
}
|
||||||
|
if 'stream_type' in constraints:
|
||||||
|
stream_types = constraints.pop('stream_type')
|
||||||
|
if isinstance(stream_types, str):
|
||||||
|
stream_types = {stream_types}
|
||||||
|
if stream_types:
|
||||||
|
constraints['stream_type__in'] = {
|
||||||
|
STREAM_TYPES[stream_type] for stream_type in stream_types
|
||||||
|
}
|
||||||
|
if 'media_type' in constraints:
|
||||||
|
media_types = constraints.pop('media_type')
|
||||||
|
if isinstance(media_types, str):
|
||||||
|
media_types = {media_types}
|
||||||
|
if media_types:
|
||||||
|
constraints['media_type__in'] = set(media_types)
|
||||||
|
|
||||||
|
if 'fee_currency' in constraints:
|
||||||
|
constraints['fee_currency'] = constraints.pop('fee_currency').lower()
|
||||||
|
|
||||||
|
_apply_constraints_for_array_attributes(constraints, 'tag', clean_tags, for_count)
|
||||||
|
_apply_constraints_for_array_attributes(constraints, 'language', lambda _: _, for_count)
|
||||||
|
_apply_constraints_for_array_attributes(constraints, 'location', lambda _: _, for_count)
|
||||||
|
|
||||||
|
if 'text' in constraints:
|
||||||
|
# TODO: fix
|
||||||
|
constraints["search"] = constraints.pop("text")
|
||||||
|
|
||||||
|
return query(
|
||||||
|
[Claim, TXO],
|
||||||
|
select(*cols)
|
||||||
|
.select_from(
|
||||||
|
Claim.join(TXO).join(TX)
|
||||||
|
.join(Trend, Trend.c.claim_hash == Claim.c.claim_hash, isouter=True)
|
||||||
|
.join(channel_claim, Claim.c.channel_hash == channel_claim.c.claim_hash, isouter=True)
|
||||||
|
.join(
|
||||||
|
CensoredClaim,
|
||||||
|
(CensoredClaim.c.claim_hash == Claim.c.claim_hash) |
|
||||||
|
(CensoredClaim.c.claim_hash == Claim.c.reposted_claim_hash) |
|
||||||
|
(CensoredClaim.c.claim_hash == Claim.c.channel_hash),
|
||||||
|
isouter=True
|
||||||
|
)
|
||||||
|
), **constraints
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def protobuf_search_claims(**constraints) -> str:
|
||||||
|
txos, _, censor = search_claims(**constraints)
|
||||||
|
return ResultOutput.to_base64(txos, [], blocked=censor)
|
||||||
|
|
||||||
|
|
||||||
|
def search_claims(**constraints) -> Tuple[List[Output], Optional[int], Optional[Censor]]:
|
||||||
|
ctx = context()
|
||||||
|
search_censor = ctx.get_search_censor()
|
||||||
|
|
||||||
|
total = None
|
||||||
|
if constraints.pop('include_total', False):
|
||||||
|
total = search_claim_count(**constraints)
|
||||||
|
|
||||||
|
constraints['offset'] = abs(constraints.get('offset', 0))
|
||||||
|
constraints['limit'] = min(abs(constraints.get('limit', 10)), 50)
|
||||||
|
|
||||||
|
channel_url = constraints.pop('channel', None)
|
||||||
|
if channel_url:
|
||||||
|
from .resolve import resolve_url # pylint: disable=import-outside-toplevel
|
||||||
|
channel = resolve_url(channel_url)
|
||||||
|
if isinstance(channel, Output):
|
||||||
|
constraints['channel_hash'] = channel.claim_hash
|
||||||
|
else:
|
||||||
|
return [], total, search_censor
|
||||||
|
|
||||||
|
rows = ctx.fetchall(select_claims(**constraints))
|
||||||
|
rows = search_censor.apply(rows)
|
||||||
|
txos = rows_to_txos(rows, include_tx=False)
|
||||||
|
annotate_with_channels(txos)
|
||||||
|
return txos, total, search_censor
|
||||||
|
|
||||||
|
|
||||||
|
def annotate_with_channels(txos):
|
||||||
|
channel_hashes = set()
|
||||||
|
for txo in txos:
|
||||||
|
if txo.can_decode_claim and txo.claim.is_signed:
|
||||||
|
channel_hashes.add(txo.claim.signing_channel_hash)
|
||||||
|
if channel_hashes:
|
||||||
|
rows = context().fetchall(select_claims(claim_hash__in=channel_hashes))
|
||||||
|
channels = {
|
||||||
|
txo.claim_hash: txo for txo in
|
||||||
|
rows_to_txos(rows, include_tx=False)
|
||||||
|
}
|
||||||
|
for txo in txos:
|
||||||
|
if txo.can_decode_claim and txo.claim.is_signed:
|
||||||
|
txo.channel = channels.get(txo.claim.signing_channel_hash, None)
|
||||||
|
|
||||||
|
|
||||||
|
def search_claim_count(**constraints) -> int:
|
||||||
|
constraints.pop('offset', None)
|
||||||
|
constraints.pop('limit', None)
|
||||||
|
constraints.pop('order_by', None)
|
||||||
|
count = context().fetchall(select_claims([func.count().label('total')], **constraints))
|
||||||
|
return count[0]['total'] or 0
|
||||||
|
|
||||||
|
|
||||||
|
CLAIM_HASH_OR_REPOST_HASH_SQL = f"""
|
||||||
|
CASE WHEN claim.claim_type = {TXO_TYPES['repost']}
|
||||||
|
THEN claim.reposted_claim_hash
|
||||||
|
ELSE claim.claim_hash
|
||||||
|
END
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def _apply_constraints_for_array_attributes(constraints, attr, cleaner, for_count=False):
|
||||||
|
any_items = set(cleaner(constraints.pop(f'any_{attr}', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
|
||||||
|
all_items = set(cleaner(constraints.pop(f'all_{attr}', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
|
||||||
|
not_items = set(cleaner(constraints.pop(f'not_{attr}', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
|
||||||
|
|
||||||
|
all_items = {item for item in all_items if item not in not_items}
|
||||||
|
any_items = {item for item in any_items if item not in not_items}
|
||||||
|
|
||||||
|
any_queries = {}
|
||||||
|
|
||||||
|
# if attr == 'tag':
|
||||||
|
# common_tags = any_items & COMMON_TAGS.keys()
|
||||||
|
# if common_tags:
|
||||||
|
# any_items -= common_tags
|
||||||
|
# if len(common_tags) < 5:
|
||||||
|
# for item in common_tags:
|
||||||
|
# index_name = COMMON_TAGS[item]
|
||||||
|
# any_queries[f'#_common_tag_{index_name}'] = f"""
|
||||||
|
# EXISTS(
|
||||||
|
# SELECT 1 FROM tag INDEXED BY tag_{index_name}_idx
|
||||||
|
# WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=tag.claim_hash
|
||||||
|
# AND tag = '{item}'
|
||||||
|
# )
|
||||||
|
# """
|
||||||
|
# elif len(common_tags) >= 5:
|
||||||
|
# constraints.update({
|
||||||
|
# f'$any_common_tag{i}': item for i, item in enumerate(common_tags)
|
||||||
|
# })
|
||||||
|
# values = ', '.join(
|
||||||
|
# f':$any_common_tag{i}' for i in range(len(common_tags))
|
||||||
|
# )
|
||||||
|
# any_queries[f'#_any_common_tags'] = f"""
|
||||||
|
# EXISTS(
|
||||||
|
# SELECT 1 FROM tag WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=tag.claim_hash
|
||||||
|
# AND tag IN ({values})
|
||||||
|
# )
|
||||||
|
# """
|
||||||
|
|
||||||
|
if any_items:
|
||||||
|
|
||||||
|
constraints.update({
|
||||||
|
f'$any_{attr}{i}': item for i, item in enumerate(any_items)
|
||||||
|
})
|
||||||
|
values = ', '.join(
|
||||||
|
f':$any_{attr}{i}' for i in range(len(any_items))
|
||||||
|
)
|
||||||
|
if for_count or attr == 'tag':
|
||||||
|
any_queries[f'#_any_{attr}'] = f"""
|
||||||
|
{CLAIM_HASH_OR_REPOST_HASH_SQL} IN (
|
||||||
|
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
else:
|
||||||
|
any_queries[f'#_any_{attr}'] = f"""
|
||||||
|
EXISTS(
|
||||||
|
SELECT 1 FROM {attr} WHERE
|
||||||
|
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
|
||||||
|
AND {attr} IN ({values})
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
|
||||||
|
if len(any_queries) == 1:
|
||||||
|
constraints.update(any_queries)
|
||||||
|
elif len(any_queries) > 1:
|
||||||
|
constraints[f'ORed_{attr}_queries__any'] = any_queries
|
||||||
|
|
||||||
|
if all_items:
|
||||||
|
constraints[f'$all_{attr}_count'] = len(all_items)
|
||||||
|
constraints.update({
|
||||||
|
f'$all_{attr}{i}': item for i, item in enumerate(all_items)
|
||||||
|
})
|
||||||
|
values = ', '.join(
|
||||||
|
f':$all_{attr}{i}' for i in range(len(all_items))
|
||||||
|
)
|
||||||
|
if for_count:
|
||||||
|
constraints[f'#_all_{attr}'] = f"""
|
||||||
|
{CLAIM_HASH_OR_REPOST_HASH_SQL} IN (
|
||||||
|
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
|
||||||
|
GROUP BY claim_hash HAVING COUNT({attr}) = :$all_{attr}_count
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
else:
|
||||||
|
constraints[f'#_all_{attr}'] = f"""
|
||||||
|
{len(all_items)}=(
|
||||||
|
SELECT count(*) FROM {attr} WHERE
|
||||||
|
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
|
||||||
|
AND {attr} IN ({values})
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not_items:
|
||||||
|
constraints.update({
|
||||||
|
f'$not_{attr}{i}': item for i, item in enumerate(not_items)
|
||||||
|
})
|
||||||
|
values = ', '.join(
|
||||||
|
f':$not_{attr}{i}' for i in range(len(not_items))
|
||||||
|
)
|
||||||
|
if for_count:
|
||||||
|
constraints[f'#_not_{attr}'] = f"""
|
||||||
|
{CLAIM_HASH_OR_REPOST_HASH_SQL} NOT IN (
|
||||||
|
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
else:
|
||||||
|
constraints[f'#_not_{attr}'] = f"""
|
||||||
|
NOT EXISTS(
|
||||||
|
SELECT 1 FROM {attr} WHERE
|
||||||
|
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
|
||||||
|
AND {attr} IN ({values})
|
||||||
|
)
|
||||||
|
"""
|
643
lbry/db/queries/txio.py
Normal file
643
lbry/db/queries/txio.py
Normal file
|
@ -0,0 +1,643 @@
|
||||||
|
import logging
|
||||||
|
from datetime import date
|
||||||
|
from typing import Tuple, List, Optional, Union
|
||||||
|
|
||||||
|
from sqlalchemy import union, func, text, between, distinct, case, false
|
||||||
|
from sqlalchemy.future import select, Select
|
||||||
|
|
||||||
|
from ...blockchain.transaction import (
|
||||||
|
Transaction, Output, OutputScript, TXRefImmutable
|
||||||
|
)
|
||||||
|
from ..tables import (
|
||||||
|
TX, TXO, TXI, txi_join_account, txo_join_account,
|
||||||
|
Claim, Support, AccountAddress
|
||||||
|
)
|
||||||
|
from ..utils import query, in_account_ids
|
||||||
|
from ..query_context import context
|
||||||
|
from ..constants import TXO_TYPES, CLAIM_TYPE_CODES, MAX_QUERY_VARIABLES
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
minimum_txo_columns = (
|
||||||
|
TXO.c.amount, TXO.c.position.label('txo_position'),
|
||||||
|
TX.c.tx_hash, TX.c.height, TX.c.timestamp,
|
||||||
|
func.substr(TX.c.raw, TXO.c.script_offset + 1, TXO.c.script_length).label('src'),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def row_to_txo(row):
|
||||||
|
return Output(
|
||||||
|
amount=row.amount,
|
||||||
|
script=OutputScript(row.src),
|
||||||
|
tx_ref=TXRefImmutable.from_hash(row.tx_hash, row.height, row.timestamp),
|
||||||
|
position=row.txo_position,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def where_txo_type_in(txo_type: Optional[Union[tuple, int]] = None):
|
||||||
|
if txo_type is not None:
|
||||||
|
if isinstance(txo_type, int):
|
||||||
|
return TXO.c.txo_type == txo_type
|
||||||
|
assert len(txo_type) > 0
|
||||||
|
if len(txo_type) == 1:
|
||||||
|
return TXO.c.txo_type == txo_type[0]
|
||||||
|
else:
|
||||||
|
return TXO.c.txo_type.in_(txo_type)
|
||||||
|
return TXO.c.txo_type.in_(CLAIM_TYPE_CODES)
|
||||||
|
|
||||||
|
|
||||||
|
def where_unspent_txos(
|
||||||
|
txo_types: Tuple[int, ...],
|
||||||
|
blocks: Tuple[int, int] = None,
|
||||||
|
missing_in_supports_table: bool = False,
|
||||||
|
missing_in_claims_table: bool = False,
|
||||||
|
missing_or_stale_in_claims_table: bool = False,
|
||||||
|
):
|
||||||
|
condition = where_txo_type_in(txo_types) & (TXO.c.spent_height == 0)
|
||||||
|
if blocks is not None:
|
||||||
|
condition &= between(TXO.c.height, *blocks)
|
||||||
|
if missing_in_supports_table:
|
||||||
|
condition &= TXO.c.txo_hash.notin_(select(Support.c.txo_hash))
|
||||||
|
elif missing_or_stale_in_claims_table:
|
||||||
|
condition &= TXO.c.txo_hash.notin_(select(Claim.c.txo_hash))
|
||||||
|
elif missing_in_claims_table:
|
||||||
|
condition &= TXO.c.claim_hash.notin_(select(Claim.c.claim_hash))
|
||||||
|
return condition
|
||||||
|
|
||||||
|
|
||||||
|
def where_abandoned_claims():
|
||||||
|
return Claim.c.claim_hash.notin_(
|
||||||
|
select(TXO.c.claim_hash).where(where_unspent_txos(CLAIM_TYPE_CODES))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def count_abandoned_claims():
|
||||||
|
return context().fetchtotal(where_abandoned_claims())
|
||||||
|
|
||||||
|
|
||||||
|
def where_abandoned_supports():
|
||||||
|
return Support.c.txo_hash.notin_(
|
||||||
|
select(TXO.c.txo_hash).where(where_unspent_txos(TXO_TYPES['support']))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def count_abandoned_supports():
|
||||||
|
return context().fetchtotal(where_abandoned_supports())
|
||||||
|
|
||||||
|
|
||||||
|
def count_unspent_txos(
|
||||||
|
txo_types: Tuple[int, ...],
|
||||||
|
blocks: Tuple[int, int] = None,
|
||||||
|
missing_in_supports_table: bool = False,
|
||||||
|
missing_in_claims_table: bool = False,
|
||||||
|
missing_or_stale_in_claims_table: bool = False,
|
||||||
|
):
|
||||||
|
return context().fetchtotal(
|
||||||
|
where_unspent_txos(
|
||||||
|
txo_types, blocks,
|
||||||
|
missing_in_supports_table,
|
||||||
|
missing_in_claims_table,
|
||||||
|
missing_or_stale_in_claims_table,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def distribute_unspent_txos(
|
||||||
|
txo_types: Tuple[int, ...],
|
||||||
|
blocks: Tuple[int, int] = None,
|
||||||
|
missing_in_supports_table: bool = False,
|
||||||
|
missing_in_claims_table: bool = False,
|
||||||
|
missing_or_stale_in_claims_table: bool = False,
|
||||||
|
number_of_buckets: int = 10
|
||||||
|
) -> Tuple[int, List[Tuple[int, int]]]:
|
||||||
|
chunks = (
|
||||||
|
select(func.ntile(number_of_buckets).over(order_by=TXO.c.height).label('chunk'), TXO.c.height)
|
||||||
|
.where(
|
||||||
|
where_unspent_txos(
|
||||||
|
txo_types, blocks,
|
||||||
|
missing_in_supports_table,
|
||||||
|
missing_in_claims_table,
|
||||||
|
missing_or_stale_in_claims_table,
|
||||||
|
)
|
||||||
|
).cte('chunks')
|
||||||
|
)
|
||||||
|
sql = (
|
||||||
|
select(
|
||||||
|
func.count('*').label('items'),
|
||||||
|
func.min(chunks.c.height).label('start_height'),
|
||||||
|
func.max(chunks.c.height).label('end_height'),
|
||||||
|
).group_by(chunks.c.chunk).order_by(chunks.c.chunk)
|
||||||
|
)
|
||||||
|
total = 0
|
||||||
|
buckets = []
|
||||||
|
for bucket in context().fetchall(sql):
|
||||||
|
total += bucket['items']
|
||||||
|
if len(buckets) > 0:
|
||||||
|
if buckets[-1][-1] == bucket['start_height']:
|
||||||
|
if bucket['start_height'] == bucket['end_height']:
|
||||||
|
continue
|
||||||
|
bucket['start_height'] += 1
|
||||||
|
buckets.append((bucket['start_height'], bucket['end_height']))
|
||||||
|
return total, buckets
|
||||||
|
|
||||||
|
|
||||||
|
def claims_with_changed_supports(blocks: Optional[Tuple[int, int]]) -> Select:
|
||||||
|
has_changed_supports = (
|
||||||
|
select(Claim.c.claim_hash.label("claim_hash"), Claim.c.channel_hash.label("channel_hash"))
|
||||||
|
.join(Claim, Claim.c.claim_hash == TXO.c.claim_hash)
|
||||||
|
.where(
|
||||||
|
(TXO.c.txo_type == TXO_TYPES['support']) &
|
||||||
|
(between(TXO.c.height, blocks[0], blocks[-1]) | between(TXO.c.spent_height, blocks[0], blocks[-1]))
|
||||||
|
)
|
||||||
|
.cte("has_changed_supports")
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
select(has_changed_supports.c.claim_hash.label("claim_hash"))
|
||||||
|
.union_all( # UNION ALL is faster than UNION because it does not remove duplicates
|
||||||
|
select(has_changed_supports.c.channel_hash)
|
||||||
|
.where(has_changed_supports.c.channel_hash.isnot(None))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def where_claims_with_changed_supports(blocks: Optional[Tuple[int, int]]) -> Select:
|
||||||
|
return Claim.c.claim_hash.in_(
|
||||||
|
claims_with_changed_supports(blocks)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def count_claims_with_changed_supports(blocks: Optional[Tuple[int, int]]) -> int:
|
||||||
|
sub_query = claims_with_changed_supports(blocks).subquery()
|
||||||
|
sql = select(func.count(distinct(sub_query.c.claim_hash)).label('total')).select_from(sub_query)
|
||||||
|
return context().fetchone(sql)['total']
|
||||||
|
|
||||||
|
|
||||||
|
def where_changed_content_txos(blocks: Optional[Tuple[int, int]]):
|
||||||
|
return (
|
||||||
|
(TXO.c.channel_hash.isnot(None)) & (
|
||||||
|
between(TXO.c.height, blocks[0], blocks[-1]) |
|
||||||
|
between(TXO.c.spent_height, blocks[0], blocks[-1])
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def where_channels_with_changed_content(blocks: Optional[Tuple[int, int]]):
|
||||||
|
return Claim.c.claim_hash.in_(
|
||||||
|
select(TXO.c.channel_hash).where(
|
||||||
|
where_changed_content_txos(blocks)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def count_channels_with_changed_content(blocks: Optional[Tuple[int, int]]):
|
||||||
|
sql = (
|
||||||
|
select(func.count(distinct(TXO.c.channel_hash)).label('total'))
|
||||||
|
.where(where_changed_content_txos(blocks))
|
||||||
|
)
|
||||||
|
return context().fetchone(sql)['total']
|
||||||
|
|
||||||
|
|
||||||
|
def where_changed_repost_txos(blocks: Optional[Tuple[int, int]]):
|
||||||
|
return (
|
||||||
|
(TXO.c.txo_type == TXO_TYPES['repost']) & (
|
||||||
|
between(TXO.c.height, blocks[0], blocks[-1]) |
|
||||||
|
between(TXO.c.spent_height, blocks[0], blocks[-1])
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def where_claims_with_changed_reposts(blocks: Optional[Tuple[int, int]]):
|
||||||
|
return Claim.c.claim_hash.in_(
|
||||||
|
select(TXO.c.reposted_claim_hash).where(
|
||||||
|
where_changed_repost_txos(blocks)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def count_claims_with_changed_reposts(blocks: Optional[Tuple[int, int]]):
|
||||||
|
sql = (
|
||||||
|
select(func.count(distinct(TXO.c.reposted_claim_hash)).label('total'))
|
||||||
|
.where(where_changed_repost_txos(blocks))
|
||||||
|
)
|
||||||
|
return context().fetchone(sql)['total']
|
||||||
|
|
||||||
|
|
||||||
|
def select_transactions(cols, account_ids=None, **constraints):
|
||||||
|
s: Select = select(*cols).select_from(TX)
|
||||||
|
if not {'tx_hash', 'tx_hash__in'}.intersection(constraints):
|
||||||
|
assert account_ids, (
|
||||||
|
"'accounts' argument required when "
|
||||||
|
"no 'tx_hash' constraint is present"
|
||||||
|
)
|
||||||
|
where = in_account_ids(account_ids)
|
||||||
|
tx_hashes = union(
|
||||||
|
select(TXO.c.tx_hash).select_from(txo_join_account).where(where),
|
||||||
|
select(TXI.c.tx_hash).select_from(txi_join_account).where(where)
|
||||||
|
)
|
||||||
|
s = s.where(TX.c.tx_hash.in_(tx_hashes))
|
||||||
|
return context().fetchall(query([TX], s, **constraints))
|
||||||
|
|
||||||
|
|
||||||
|
TXO_NOT_MINE = Output(None, None, is_my_output=False)
|
||||||
|
|
||||||
|
|
||||||
|
def get_raw_transactions(tx_hashes):
|
||||||
|
return context().fetchall(
|
||||||
|
select(TX.c.tx_hash, TX.c.raw).where(TX.c.tx_hash.in_(tx_hashes))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_transactions(include_total=False, **constraints) -> Tuple[List[Transaction], Optional[int]]:
|
||||||
|
account_ids = constraints.pop('account_ids', None)
|
||||||
|
include_is_my_input = constraints.pop('include_is_my_input', False)
|
||||||
|
include_is_my_output = constraints.pop('include_is_my_output', False)
|
||||||
|
|
||||||
|
tx_rows = select_transactions(
|
||||||
|
[TX.c.tx_hash, TX.c.raw, TX.c.height, TX.c.position, TX.c.timestamp, TX.c.is_verified],
|
||||||
|
order_by=constraints.pop('order_by', ["height=0 DESC", "height DESC", "position DESC"]),
|
||||||
|
account_ids=account_ids,
|
||||||
|
**constraints
|
||||||
|
)
|
||||||
|
|
||||||
|
txids, txs, txi_txoids = [], [], []
|
||||||
|
for row in tx_rows:
|
||||||
|
txids.append(row['tx_hash'])
|
||||||
|
txs.append(Transaction(
|
||||||
|
raw=row['raw'], height=row['height'], position=row['position'],
|
||||||
|
timestamp=row['timestamp'], is_verified=bool(row['is_verified'])
|
||||||
|
))
|
||||||
|
for txi in txs[-1].inputs:
|
||||||
|
txi_txoids.append(txi.txo_ref.hash)
|
||||||
|
|
||||||
|
annotated_txos = {}
|
||||||
|
for offset in range(0, len(txids), MAX_QUERY_VARIABLES):
|
||||||
|
annotated_txos.update({
|
||||||
|
txo.id: txo for txo in
|
||||||
|
get_txos(
|
||||||
|
wallet_account_ids=account_ids,
|
||||||
|
tx_hash__in=txids[offset:offset + MAX_QUERY_VARIABLES], order_by='txo.tx_hash',
|
||||||
|
include_is_my_input=include_is_my_input,
|
||||||
|
include_is_my_output=include_is_my_output,
|
||||||
|
)[0]
|
||||||
|
})
|
||||||
|
|
||||||
|
referenced_txos = {}
|
||||||
|
for offset in range(0, len(txi_txoids), MAX_QUERY_VARIABLES):
|
||||||
|
referenced_txos.update({
|
||||||
|
txo.id: txo for txo in
|
||||||
|
get_txos(
|
||||||
|
wallet_account_ids=account_ids,
|
||||||
|
txo_hash__in=txi_txoids[offset:offset + MAX_QUERY_VARIABLES], order_by='txo.txo_hash',
|
||||||
|
include_is_my_output=include_is_my_output,
|
||||||
|
)[0]
|
||||||
|
})
|
||||||
|
|
||||||
|
for tx in txs:
|
||||||
|
for txi in tx.inputs:
|
||||||
|
txo = referenced_txos.get(txi.txo_ref.id)
|
||||||
|
if txo:
|
||||||
|
txi.txo_ref = txo.ref
|
||||||
|
for txo in tx.outputs:
|
||||||
|
_txo = annotated_txos.get(txo.id)
|
||||||
|
if _txo:
|
||||||
|
txo.update_annotations(_txo)
|
||||||
|
else:
|
||||||
|
txo.update_annotations(TXO_NOT_MINE)
|
||||||
|
|
||||||
|
for tx in txs:
|
||||||
|
txos = tx.outputs
|
||||||
|
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
|
||||||
|
txos[0].purchase = txos[1]
|
||||||
|
|
||||||
|
return txs, get_transaction_count(**constraints) if include_total else None
|
||||||
|
|
||||||
|
|
||||||
|
def get_transaction_count(**constraints):
|
||||||
|
constraints.pop('wallet', None)
|
||||||
|
constraints.pop('offset', None)
|
||||||
|
constraints.pop('limit', None)
|
||||||
|
constraints.pop('order_by', None)
|
||||||
|
count = select_transactions([func.count().label('total')], **constraints)
|
||||||
|
return count[0]['total'] or 0
|
||||||
|
|
||||||
|
|
||||||
|
BASE_SELECT_TXO_COLUMNS = [
|
||||||
|
TX.c.tx_hash, TX.c.raw, TX.c.height, TX.c.position.label('tx_position'),
|
||||||
|
TX.c.is_verified, TX.c.timestamp,
|
||||||
|
TXO.c.txo_type, TXO.c.position.label('txo_position'), TXO.c.amount, TXO.c.spent_height,
|
||||||
|
TXO.c.script_offset, TXO.c.script_length,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def select_txos(
|
||||||
|
cols=None, account_ids=None, is_my_input=None,
|
||||||
|
is_my_output=True, is_my_input_or_output=None, exclude_internal_transfers=False,
|
||||||
|
include_is_my_input=False, claim_id_not_in_claim_table=None,
|
||||||
|
txo_id_not_in_claim_table=None, txo_id_not_in_support_table=None,
|
||||||
|
**constraints
|
||||||
|
) -> Select:
|
||||||
|
if cols is None:
|
||||||
|
cols = BASE_SELECT_TXO_COLUMNS
|
||||||
|
s: Select = select(*cols)
|
||||||
|
if account_ids:
|
||||||
|
my_addresses = select(AccountAddress.c.address).where(in_account_ids(account_ids))
|
||||||
|
if is_my_input_or_output:
|
||||||
|
include_is_my_input = True
|
||||||
|
s = s.where(
|
||||||
|
TXO.c.address.in_(my_addresses) | (
|
||||||
|
(TXI.c.address.isnot(None)) &
|
||||||
|
(TXI.c.address.in_(my_addresses))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if is_my_output:
|
||||||
|
s = s.where(TXO.c.address.in_(my_addresses))
|
||||||
|
elif is_my_output is False:
|
||||||
|
s = s.where(TXO.c.address.notin_(my_addresses))
|
||||||
|
if is_my_input:
|
||||||
|
include_is_my_input = True
|
||||||
|
s = s.where(
|
||||||
|
(TXI.c.address.isnot(None)) &
|
||||||
|
(TXI.c.address.in_(my_addresses))
|
||||||
|
)
|
||||||
|
elif is_my_input is False:
|
||||||
|
include_is_my_input = True
|
||||||
|
s = s.where(
|
||||||
|
(TXI.c.address.is_(None)) |
|
||||||
|
(TXI.c.address.notin_(my_addresses))
|
||||||
|
)
|
||||||
|
if exclude_internal_transfers:
|
||||||
|
include_is_my_input = True
|
||||||
|
s = s.where(
|
||||||
|
(TXO.c.txo_type != TXO_TYPES['other']) |
|
||||||
|
(TXO.c.address.notin_(my_addresses))
|
||||||
|
(TXI.c.address.is_(None)) |
|
||||||
|
(TXI.c.address.notin_(my_addresses))
|
||||||
|
)
|
||||||
|
joins = TXO.join(TX)
|
||||||
|
if constraints.pop('is_spent', None) is False:
|
||||||
|
s = s.where((TXO.c.spent_height == 0) & (TXO.c.is_reserved == false()))
|
||||||
|
if include_is_my_input:
|
||||||
|
joins = joins.join(TXI, (TXI.c.position == 0) & (TXI.c.tx_hash == TXO.c.tx_hash), isouter=True)
|
||||||
|
if claim_id_not_in_claim_table:
|
||||||
|
s = s.where(TXO.c.claim_hash.notin_(select(Claim.c.claim_hash)))
|
||||||
|
elif txo_id_not_in_claim_table:
|
||||||
|
s = s.where(TXO.c.txo_hash.notin_(select(Claim.c.txo_hash)))
|
||||||
|
elif txo_id_not_in_support_table:
|
||||||
|
s = s.where(TXO.c.txo_hash.notin_(select(Support.c.txo_hash)))
|
||||||
|
return query([TXO, TX], s.select_from(joins), **constraints)
|
||||||
|
|
||||||
|
|
||||||
|
META_ATTRS = (
|
||||||
|
'activation_height', 'takeover_height', 'creation_height', 'staked_amount',
|
||||||
|
'short_url', 'canonical_url', 'staked_support_amount', 'staked_support_count',
|
||||||
|
'signed_claim_count', 'signed_support_count', 'is_signature_valid',
|
||||||
|
'trend_group', 'trend_mixed', 'trend_local', 'trend_global',
|
||||||
|
'reposted_count', 'expiration_height',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def rows_to_txos(rows: List[dict], include_tx=True) -> List[Output]:
|
||||||
|
txos = []
|
||||||
|
tx_cache = {}
|
||||||
|
for row in rows:
|
||||||
|
if include_tx:
|
||||||
|
if row['tx_hash'] not in tx_cache:
|
||||||
|
tx_cache[row['tx_hash']] = Transaction(
|
||||||
|
row['raw'], height=row['height'], position=row['tx_position'],
|
||||||
|
timestamp=row['timestamp'],
|
||||||
|
is_verified=bool(row['is_verified']),
|
||||||
|
)
|
||||||
|
txo = tx_cache[row['tx_hash']].outputs[row['txo_position']]
|
||||||
|
else:
|
||||||
|
source = row['raw'][row['script_offset']:row['script_offset']+row['script_length']]
|
||||||
|
txo = Output(
|
||||||
|
amount=row['amount'],
|
||||||
|
script=OutputScript(source),
|
||||||
|
tx_ref=TXRefImmutable.from_hash(row['tx_hash'], row['height'], row['timestamp']),
|
||||||
|
position=row['txo_position'],
|
||||||
|
)
|
||||||
|
txo.spent_height = row['spent_height']
|
||||||
|
if 'is_my_input' in row:
|
||||||
|
txo.is_my_input = bool(row['is_my_input'])
|
||||||
|
if 'is_my_output' in row:
|
||||||
|
txo.is_my_output = bool(row['is_my_output'])
|
||||||
|
if 'is_my_input' in row and 'is_my_output' in row:
|
||||||
|
if txo.is_my_input and txo.is_my_output and row['txo_type'] == TXO_TYPES['other']:
|
||||||
|
txo.is_internal_transfer = True
|
||||||
|
else:
|
||||||
|
txo.is_internal_transfer = False
|
||||||
|
if 'received_tips' in row:
|
||||||
|
txo.received_tips = row['received_tips']
|
||||||
|
for attr in META_ATTRS:
|
||||||
|
if attr in row:
|
||||||
|
txo.meta[attr] = row[attr]
|
||||||
|
txos.append(txo)
|
||||||
|
return txos
|
||||||
|
|
||||||
|
|
||||||
|
def get_txos(no_tx=False, include_total=False, **constraints) -> Tuple[List[Output], Optional[int]]:
|
||||||
|
wallet_account_ids = constraints.pop('wallet_account_ids', [])
|
||||||
|
include_is_my_input = constraints.get('include_is_my_input', False)
|
||||||
|
include_is_my_output = constraints.pop('include_is_my_output', False)
|
||||||
|
include_received_tips = constraints.pop('include_received_tips', False)
|
||||||
|
|
||||||
|
select_columns = BASE_SELECT_TXO_COLUMNS + [
|
||||||
|
TXO.c.claim_name
|
||||||
|
]
|
||||||
|
|
||||||
|
my_accounts = None
|
||||||
|
if wallet_account_ids:
|
||||||
|
my_accounts = select(AccountAddress.c.address).where(in_account_ids(wallet_account_ids))
|
||||||
|
|
||||||
|
if include_is_my_output and my_accounts is not None:
|
||||||
|
if constraints.get('is_my_output', None) in (True, False):
|
||||||
|
select_columns.append(text(f"{1 if constraints['is_my_output'] else 0} AS is_my_output"))
|
||||||
|
else:
|
||||||
|
select_columns.append(TXO.c.address.in_(my_accounts).label('is_my_output'))
|
||||||
|
|
||||||
|
if include_is_my_input and my_accounts is not None:
|
||||||
|
if constraints.get('is_my_input', None) in (True, False):
|
||||||
|
select_columns.append(text(f"{1 if constraints['is_my_input'] else 0} AS is_my_input"))
|
||||||
|
else:
|
||||||
|
select_columns.append((
|
||||||
|
(TXI.c.address.isnot(None)) &
|
||||||
|
(TXI.c.address.in_(my_accounts))
|
||||||
|
).label('is_my_input'))
|
||||||
|
|
||||||
|
if include_received_tips:
|
||||||
|
support = TXO.alias('support')
|
||||||
|
select_columns.append(
|
||||||
|
select(func.coalesce(func.sum(support.c.amount), 0))
|
||||||
|
.select_from(support).where(
|
||||||
|
(support.c.claim_hash == TXO.c.claim_hash) &
|
||||||
|
(support.c.txo_type == TXO_TYPES['support']) &
|
||||||
|
(support.c.address.in_(my_accounts)) &
|
||||||
|
(support.c.txo_hash.notin_(select(TXI.c.txo_hash)))
|
||||||
|
).label('received_tips')
|
||||||
|
)
|
||||||
|
|
||||||
|
if 'order_by' not in constraints or constraints['order_by'] == 'height':
|
||||||
|
constraints['order_by'] = [
|
||||||
|
"tx.height=0 DESC", "tx.height DESC", "tx.position DESC", "txo.position"
|
||||||
|
]
|
||||||
|
elif constraints.get('order_by', None) == 'none':
|
||||||
|
del constraints['order_by']
|
||||||
|
|
||||||
|
rows = context().fetchall(select_txos(select_columns, **constraints))
|
||||||
|
txos = rows_to_txos(rows, not no_tx)
|
||||||
|
|
||||||
|
channel_hashes = set()
|
||||||
|
for txo in txos:
|
||||||
|
if txo.is_claim and txo.can_decode_claim:
|
||||||
|
if txo.claim.is_signed:
|
||||||
|
channel_hashes.add(txo.claim.signing_channel_hash)
|
||||||
|
|
||||||
|
if channel_hashes:
|
||||||
|
channels = {
|
||||||
|
txo.claim_hash: txo for txo in
|
||||||
|
get_txos(
|
||||||
|
txo_type=TXO_TYPES['channel'], spent_height=0,
|
||||||
|
wallet_account_ids=wallet_account_ids, claim_hash__in=channel_hashes
|
||||||
|
)[0]
|
||||||
|
}
|
||||||
|
for txo in txos:
|
||||||
|
if txo.is_claim and txo.can_decode_claim:
|
||||||
|
txo.channel = channels.get(txo.claim.signing_channel_hash, None)
|
||||||
|
|
||||||
|
return txos, get_txo_count(**constraints) if include_total else None
|
||||||
|
|
||||||
|
|
||||||
|
def _clean_txo_constraints_for_aggregation(constraints):
|
||||||
|
constraints.pop('include_is_my_input', None)
|
||||||
|
constraints.pop('include_is_my_output', None)
|
||||||
|
constraints.pop('include_received_tips', None)
|
||||||
|
constraints.pop('wallet_account_ids', None)
|
||||||
|
constraints.pop('offset', None)
|
||||||
|
constraints.pop('limit', None)
|
||||||
|
constraints.pop('order_by', None)
|
||||||
|
|
||||||
|
|
||||||
|
def get_txo_count(**constraints):
|
||||||
|
_clean_txo_constraints_for_aggregation(constraints)
|
||||||
|
count = context().fetchall(select_txos([func.count().label('total')], **constraints))
|
||||||
|
return count[0]['total'] or 0
|
||||||
|
|
||||||
|
|
||||||
|
def get_txo_sum(**constraints):
|
||||||
|
_clean_txo_constraints_for_aggregation(constraints)
|
||||||
|
result = context().fetchall(select_txos([func.sum(TXO.c.amount).label('total')], **constraints))
|
||||||
|
return result[0]['total'] or 0
|
||||||
|
|
||||||
|
|
||||||
|
def get_balance(account_ids):
|
||||||
|
ctx = context()
|
||||||
|
my_addresses = select(AccountAddress.c.address).where(in_account_ids(account_ids))
|
||||||
|
if ctx.is_postgres:
|
||||||
|
txo_address_check = TXO.c.address == func.any(func.array(my_addresses))
|
||||||
|
txi_address_check = TXI.c.address == func.any(func.array(my_addresses))
|
||||||
|
else:
|
||||||
|
txo_address_check = TXO.c.address.in_(my_addresses)
|
||||||
|
txi_address_check = TXI.c.address.in_(my_addresses)
|
||||||
|
s: Select = (
|
||||||
|
select(
|
||||||
|
func.coalesce(func.sum(TXO.c.amount), 0).label("total"),
|
||||||
|
func.coalesce(func.sum(case(
|
||||||
|
[(TXO.c.txo_type != TXO_TYPES["other"], TXO.c.amount)],
|
||||||
|
)), 0).label("reserved"),
|
||||||
|
func.coalesce(func.sum(case(
|
||||||
|
[(where_txo_type_in(CLAIM_TYPE_CODES), TXO.c.amount)],
|
||||||
|
)), 0).label("claims"),
|
||||||
|
func.coalesce(func.sum(case(
|
||||||
|
[(where_txo_type_in(TXO_TYPES["support"]), TXO.c.amount)],
|
||||||
|
)), 0).label("supports"),
|
||||||
|
func.coalesce(func.sum(case(
|
||||||
|
[(where_txo_type_in(TXO_TYPES["support"]) & (
|
||||||
|
(TXI.c.address.isnot(None)) & txi_address_check
|
||||||
|
), TXO.c.amount)],
|
||||||
|
)), 0).label("my_supports"),
|
||||||
|
)
|
||||||
|
.where((TXO.c.spent_height == 0) & txo_address_check)
|
||||||
|
.select_from(
|
||||||
|
TXO.join(TXI, (TXI.c.position == 0) & (TXI.c.tx_hash == TXO.c.tx_hash), isouter=True)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
result = ctx.fetchone(s)
|
||||||
|
return {
|
||||||
|
"total": result["total"],
|
||||||
|
"available": result["total"] - result["reserved"],
|
||||||
|
"reserved": result["reserved"],
|
||||||
|
"reserved_subtotals": {
|
||||||
|
"claims": result["claims"],
|
||||||
|
"supports": result["my_supports"],
|
||||||
|
"tips": result["supports"] - result["my_supports"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_report(account_ids):
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def get_txo_plot(start_day=None, days_back=0, end_day=None, days_after=None, **constraints):
|
||||||
|
_clean_txo_constraints_for_aggregation(constraints)
|
||||||
|
if start_day is None:
|
||||||
|
# TODO: Fix
|
||||||
|
current_ordinal = 0 # self.ledger.headers.estimated_date(self.ledger.headers.height).toordinal()
|
||||||
|
constraints['day__gte'] = current_ordinal - days_back
|
||||||
|
else:
|
||||||
|
constraints['day__gte'] = date.fromisoformat(start_day).toordinal()
|
||||||
|
if end_day is not None:
|
||||||
|
constraints['day__lte'] = date.fromisoformat(end_day).toordinal()
|
||||||
|
elif days_after is not None:
|
||||||
|
constraints['day__lte'] = constraints['day__gte'] + days_after
|
||||||
|
plot = context().fetchall(select_txos(
|
||||||
|
[TX.c.day, func.sum(TXO.c.amount).label('total')],
|
||||||
|
group_by='day', order_by='day', **constraints
|
||||||
|
))
|
||||||
|
for row in plot:
|
||||||
|
row['day'] = date.fromordinal(row['day'])
|
||||||
|
return plot
|
||||||
|
|
||||||
|
|
||||||
|
def get_purchases(**constraints) -> Tuple[List[Output], Optional[int]]:
|
||||||
|
accounts = constraints.pop('accounts', None)
|
||||||
|
assert accounts, "'accounts' argument required to find purchases"
|
||||||
|
if not {'purchased_claim_hash', 'purchased_claim_hash__in'}.intersection(constraints):
|
||||||
|
constraints['purchased_claim_hash__is_not_null'] = True
|
||||||
|
constraints['tx_hash__in'] = (
|
||||||
|
select(TXI.c.tx_hash).select_from(txi_join_account).where(in_account_ids(accounts))
|
||||||
|
)
|
||||||
|
txs, count = get_transactions(**constraints)
|
||||||
|
return [tx.outputs[0] for tx in txs], count
|
||||||
|
|
||||||
|
|
||||||
|
def get_supports_summary(self, **constraints):
|
||||||
|
return get_txos(
|
||||||
|
txo_type=TXO_TYPES['support'],
|
||||||
|
spent_height=0, is_my_output=True,
|
||||||
|
include_is_my_input=True,
|
||||||
|
no_tx=True,
|
||||||
|
**constraints
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def reserve_outputs(txo_hashes, is_reserved=True):
|
||||||
|
context().execute(
|
||||||
|
TXO.update()
|
||||||
|
.values(is_reserved=is_reserved)
|
||||||
|
.where(TXO.c.txo_hash.in_(txo_hashes))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def release_all_outputs(account_id):
|
||||||
|
context().execute(
|
||||||
|
TXO.update().values(is_reserved=False).where(
|
||||||
|
TXO.c.is_reserved & TXO.c.address.in_(
|
||||||
|
select(AccountAddress.c.address).where(in_account_ids(account_id))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
24
lbry/db/queries/wallet.py
Normal file
24
lbry/db/queries/wallet.py
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
from sqlalchemy import exists
|
||||||
|
from sqlalchemy.future import select
|
||||||
|
|
||||||
|
from ..query_context import context
|
||||||
|
from ..tables import Wallet
|
||||||
|
|
||||||
|
|
||||||
|
def has_wallet(wallet_id: str) -> bool:
|
||||||
|
sql = select(exists(select(Wallet.c.wallet_id).where(Wallet.c.wallet_id == wallet_id)))
|
||||||
|
return context().execute(sql).fetchone()[0]
|
||||||
|
|
||||||
|
|
||||||
|
def get_wallet(wallet_id: str):
|
||||||
|
return context().fetchone(
|
||||||
|
select(Wallet.c.data).where(Wallet.c.wallet_id == wallet_id)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def add_wallet(wallet_id: str, data: str):
|
||||||
|
c = context()
|
||||||
|
c.execute(
|
||||||
|
c.insert_or_replace(Wallet, ["data"])
|
||||||
|
.values(wallet_id=wallet_id, data=data)
|
||||||
|
)
|
745
lbry/db/query_context.py
Normal file
745
lbry/db/query_context.py
Normal file
|
@ -0,0 +1,745 @@
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
import functools
|
||||||
|
from io import BytesIO
|
||||||
|
import multiprocessing as mp
|
||||||
|
from decimal import Decimal
|
||||||
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from contextvars import ContextVar
|
||||||
|
|
||||||
|
from sqlalchemy import create_engine, inspect, bindparam, func, exists, event as sqlalchemy_event
|
||||||
|
from sqlalchemy.future import select
|
||||||
|
from sqlalchemy.engine import Engine
|
||||||
|
from sqlalchemy.sql import Insert
|
||||||
|
try:
|
||||||
|
from pgcopy import CopyManager
|
||||||
|
except ImportError:
|
||||||
|
CopyManager = None
|
||||||
|
|
||||||
|
from lbry.event import EventQueuePublisher
|
||||||
|
from lbry.blockchain.ledger import Ledger
|
||||||
|
from lbry.blockchain.transaction import Transaction, Output, Input
|
||||||
|
from lbry.schema.tags import clean_tags
|
||||||
|
from lbry.schema.result import Censor
|
||||||
|
from lbry.schema.mime_types import guess_stream_type
|
||||||
|
|
||||||
|
from .utils import pg_insert
|
||||||
|
from .tables import (
|
||||||
|
Block, BlockFilter, BlockGroupFilter,
|
||||||
|
TX, TXFilter, TXO, TXI, Claim, Tag, Support
|
||||||
|
)
|
||||||
|
from .constants import TXO_TYPES, STREAM_TYPES
|
||||||
|
|
||||||
|
|
||||||
|
_context: ContextVar['QueryContext'] = ContextVar('_context')
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class QueryContext:
|
||||||
|
engine: Engine
|
||||||
|
ledger: Ledger
|
||||||
|
message_queue: mp.Queue
|
||||||
|
stop_event: mp.Event
|
||||||
|
stack: List[List]
|
||||||
|
metrics: Dict
|
||||||
|
is_tracking_metrics: bool
|
||||||
|
blocked_streams: Dict
|
||||||
|
blocked_channels: Dict
|
||||||
|
filtered_streams: Dict
|
||||||
|
filtered_channels: Dict
|
||||||
|
pid: int
|
||||||
|
|
||||||
|
# QueryContext __enter__/__exit__ state
|
||||||
|
current_timer_name: Optional[str] = None
|
||||||
|
current_timer_time: float = 0
|
||||||
|
current_progress: Optional['ProgressContext'] = None
|
||||||
|
|
||||||
|
copy_managers: Dict[str, CopyManager] = field(default_factory=dict)
|
||||||
|
_variable_limit: Optional[int] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_postgres(self):
|
||||||
|
return self.engine.dialect.name == 'postgresql'
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_sqlite(self):
|
||||||
|
return self.engine.dialect.name == 'sqlite'
|
||||||
|
|
||||||
|
@property
|
||||||
|
def variable_limit(self):
|
||||||
|
if self._variable_limit is not None:
|
||||||
|
return self._variable_limit
|
||||||
|
if self.is_sqlite:
|
||||||
|
for result in self.fetchall('PRAGMA COMPILE_OPTIONS;'):
|
||||||
|
for _, value in result.items():
|
||||||
|
if value.startswith('MAX_VARIABLE_NUMBER'):
|
||||||
|
self._variable_limit = int(value.split('=')[1])
|
||||||
|
return self._variable_limit
|
||||||
|
self._variable_limit = 999 # todo: default for 3.32.0 is 32766, but we are still hitting 999 somehow
|
||||||
|
else:
|
||||||
|
self._variable_limit = 32766
|
||||||
|
return self._variable_limit
|
||||||
|
|
||||||
|
def raise_unsupported_dialect(self):
|
||||||
|
raise RuntimeError(f'Unsupported database dialect: {self.engine.dialect.name}.')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_resolve_censor(cls) -> Censor:
|
||||||
|
return Censor(Censor.RESOLVE)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_search_censor(cls) -> Censor:
|
||||||
|
return Censor(Censor.SEARCH)
|
||||||
|
|
||||||
|
def pg_copy(self, table, rows):
|
||||||
|
with self.engine.begin() as c:
|
||||||
|
copy_manager = self.copy_managers.get(table.name)
|
||||||
|
if copy_manager is None:
|
||||||
|
self.copy_managers[table.name] = copy_manager = CopyManager(
|
||||||
|
c.connection, table.name, rows[0].keys()
|
||||||
|
)
|
||||||
|
copy_manager.conn = c.connection
|
||||||
|
copy_manager.copy(map(dict.values, rows), BytesIO)
|
||||||
|
copy_manager.conn = None
|
||||||
|
|
||||||
|
def connect_without_transaction(self):
|
||||||
|
return self.engine.connect().execution_options(isolation_level="AUTOCOMMIT")
|
||||||
|
|
||||||
|
def connect_streaming(self):
|
||||||
|
return self.engine.connect().execution_options(stream_results=True)
|
||||||
|
|
||||||
|
def execute_notx(self, sql, *args):
|
||||||
|
with self.connect_without_transaction() as c:
|
||||||
|
return c.execute(sql, *args)
|
||||||
|
|
||||||
|
def execute(self, sql, *args):
|
||||||
|
with self.engine.begin() as c:
|
||||||
|
return c.execute(sql, *args)
|
||||||
|
|
||||||
|
def fetchone(self, sql, *args):
|
||||||
|
with self.engine.begin() as c:
|
||||||
|
row = c.execute(sql, *args).fetchone()
|
||||||
|
return dict(row._mapping) if row else row
|
||||||
|
|
||||||
|
def fetchall(self, sql, *args):
|
||||||
|
with self.engine.begin() as c:
|
||||||
|
rows = c.execute(sql, *args).fetchall()
|
||||||
|
return [dict(row._mapping) for row in rows]
|
||||||
|
|
||||||
|
def fetchtotal(self, condition) -> int:
|
||||||
|
sql = select(func.count('*').label('total')).where(condition)
|
||||||
|
return self.fetchone(sql)['total']
|
||||||
|
|
||||||
|
def fetchmax(self, column, default: int) -> int:
|
||||||
|
sql = select(func.coalesce(func.max(column), default).label('max_result'))
|
||||||
|
return self.fetchone(sql)['max_result']
|
||||||
|
|
||||||
|
def has_records(self, table) -> bool:
|
||||||
|
sql = select(exists([1], from_obj=table).label('result'))
|
||||||
|
return bool(self.fetchone(sql)['result'])
|
||||||
|
|
||||||
|
def insert_or_ignore(self, table):
|
||||||
|
if self.is_sqlite:
|
||||||
|
return table.insert().prefix_with("OR IGNORE")
|
||||||
|
elif self.is_postgres:
|
||||||
|
return pg_insert(table).on_conflict_do_nothing()
|
||||||
|
else:
|
||||||
|
self.raise_unsupported_dialect()
|
||||||
|
|
||||||
|
def insert_or_replace(self, table, replace):
|
||||||
|
if self.is_sqlite:
|
||||||
|
return table.insert().prefix_with("OR REPLACE")
|
||||||
|
elif self.is_postgres:
|
||||||
|
insert = pg_insert(table)
|
||||||
|
return insert.on_conflict_do_update(
|
||||||
|
table.primary_key, set_={col: getattr(insert.excluded, col) for col in replace}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.raise_unsupported_dialect()
|
||||||
|
|
||||||
|
def has_table(self, table):
|
||||||
|
return inspect(self.engine).has_table(table)
|
||||||
|
|
||||||
|
def get_bulk_loader(self) -> 'BulkLoader':
|
||||||
|
return BulkLoader(self)
|
||||||
|
|
||||||
|
def reset_metrics(self):
|
||||||
|
self.stack = []
|
||||||
|
self.metrics = {}
|
||||||
|
|
||||||
|
def with_timer(self, timer_name: str) -> 'QueryContext':
|
||||||
|
self.current_timer_name = timer_name
|
||||||
|
return self
|
||||||
|
|
||||||
|
@property
|
||||||
|
def elapsed(self):
|
||||||
|
return time.perf_counter() - self.current_timer_time
|
||||||
|
|
||||||
|
def __enter__(self) -> 'QueryContext':
|
||||||
|
self.current_timer_time = time.perf_counter()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.current_timer_name = None
|
||||||
|
self.current_timer_time = 0
|
||||||
|
self.current_progress = None
|
||||||
|
|
||||||
|
|
||||||
|
def context(with_timer: str = None) -> 'QueryContext':
|
||||||
|
if isinstance(with_timer, str):
|
||||||
|
return _context.get().with_timer(with_timer)
|
||||||
|
return _context.get()
|
||||||
|
|
||||||
|
|
||||||
|
def set_postgres_settings(connection, _):
|
||||||
|
cursor = connection.cursor()
|
||||||
|
cursor.execute('SET work_mem="500MB";')
|
||||||
|
cursor.execute('COMMIT;')
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
|
||||||
|
def set_sqlite_settings(connection, _):
|
||||||
|
connection.isolation_level = None
|
||||||
|
cursor = connection.cursor()
|
||||||
|
cursor.execute('PRAGMA journal_mode=WAL;')
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
|
||||||
|
def do_sqlite_begin(connection):
|
||||||
|
# see: https://bit.ly/3j4vvXm
|
||||||
|
connection.exec_driver_sql("BEGIN")
|
||||||
|
|
||||||
|
|
||||||
|
def initialize(
|
||||||
|
ledger: Ledger, message_queue: mp.Queue, stop_event: mp.Event,
|
||||||
|
track_metrics=False, block_and_filter=None):
|
||||||
|
url = ledger.conf.db_url_or_default
|
||||||
|
engine = create_engine(url)
|
||||||
|
if engine.name == "postgresql":
|
||||||
|
sqlalchemy_event.listen(engine, "connect", set_postgres_settings)
|
||||||
|
elif engine.name == "sqlite":
|
||||||
|
sqlalchemy_event.listen(engine, "connect", set_sqlite_settings)
|
||||||
|
sqlalchemy_event.listen(engine, "begin", do_sqlite_begin)
|
||||||
|
if block_and_filter is not None:
|
||||||
|
blocked_streams, blocked_channels, filtered_streams, filtered_channels = block_and_filter
|
||||||
|
else:
|
||||||
|
blocked_streams = blocked_channels = filtered_streams = filtered_channels = {}
|
||||||
|
_context.set(
|
||||||
|
QueryContext(
|
||||||
|
pid=os.getpid(), engine=engine,
|
||||||
|
ledger=ledger, message_queue=message_queue, stop_event=stop_event,
|
||||||
|
stack=[], metrics={}, is_tracking_metrics=track_metrics,
|
||||||
|
blocked_streams=blocked_streams, blocked_channels=blocked_channels,
|
||||||
|
filtered_streams=filtered_streams, filtered_channels=filtered_channels,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def uninitialize():
|
||||||
|
ctx = _context.get(None)
|
||||||
|
if ctx is not None:
|
||||||
|
ctx.engine.dispose()
|
||||||
|
_context.set(None)
|
||||||
|
|
||||||
|
|
||||||
|
class Event:
|
||||||
|
_events: List['Event'] = []
|
||||||
|
__slots__ = 'id', 'name', 'units'
|
||||||
|
|
||||||
|
def __init__(self, name: str, units: Tuple[str]):
|
||||||
|
self.id = None
|
||||||
|
self.name = name
|
||||||
|
self.units = units
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_by_id(cls, event_id) -> 'Event':
|
||||||
|
return cls._events[event_id]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_by_name(cls, name) -> 'Event':
|
||||||
|
for event in cls._events:
|
||||||
|
if event.name == name:
|
||||||
|
return event
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add(cls, name: str, *units: str) -> 'Event':
|
||||||
|
assert cls.get_by_name(name) is None, f"Event {name} already exists."
|
||||||
|
assert name.count('.') == 3, f"Event {name} does not follow pattern of: [module].sync.[phase].[task]"
|
||||||
|
event = cls(name, units)
|
||||||
|
cls._events.append(event)
|
||||||
|
event.id = cls._events.index(event)
|
||||||
|
return event
|
||||||
|
|
||||||
|
|
||||||
|
def event_emitter(name: str, *units: str, throttle=1):
|
||||||
|
event = Event.add(name, *units)
|
||||||
|
|
||||||
|
def wrapper(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def with_progress(*args, **kwargs):
|
||||||
|
with progress(event, throttle=throttle) as p:
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs, p=p)
|
||||||
|
except BreakProgress:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
traceback.print_exc()
|
||||||
|
raise
|
||||||
|
return with_progress
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
class ProgressPublisher(EventQueuePublisher):
|
||||||
|
|
||||||
|
def message_to_event(self, message):
|
||||||
|
total, extra = None, None
|
||||||
|
if len(message) == 3:
|
||||||
|
event_id, progress_id, done = message
|
||||||
|
elif len(message) == 5:
|
||||||
|
event_id, progress_id, done, total, extra = message
|
||||||
|
else:
|
||||||
|
raise TypeError("progress message must be tuple of 3 or 5 values.")
|
||||||
|
event = Event.get_by_id(event_id)
|
||||||
|
d = {
|
||||||
|
"event": event.name,
|
||||||
|
"data": {"id": progress_id, "done": done}
|
||||||
|
}
|
||||||
|
if total is not None:
|
||||||
|
d['data']['total'] = total
|
||||||
|
d['data']['units'] = event.units
|
||||||
|
if isinstance(extra, dict):
|
||||||
|
d['data'].update(extra)
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
class BreakProgress(Exception):
|
||||||
|
"""Break out of progress when total is 0."""
|
||||||
|
|
||||||
|
|
||||||
|
class Progress:
|
||||||
|
|
||||||
|
def __init__(self, message_queue: mp.Queue, event: Event, throttle=1):
|
||||||
|
self.message_queue = message_queue
|
||||||
|
self.event = event
|
||||||
|
self.progress_id = 0
|
||||||
|
self.throttle = throttle
|
||||||
|
self.last_done = (0,)*len(event.units)
|
||||||
|
self.last_done_queued = (0,)*len(event.units)
|
||||||
|
self.totals = (0,)*len(event.units)
|
||||||
|
|
||||||
|
def __enter__(self) -> 'Progress':
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
if self.last_done != self.last_done_queued:
|
||||||
|
self.message_queue.put((self.event.id, self.progress_id, self.last_done))
|
||||||
|
self.last_done_queued = self.last_done
|
||||||
|
if exc_type == BreakProgress:
|
||||||
|
return True
|
||||||
|
if self.last_done != self.totals: # or exc_type is not None:
|
||||||
|
# TODO: add exception info into closing message if there is any
|
||||||
|
self.message_queue.put((
|
||||||
|
self.event.id, self.progress_id, (-1,)*len(self.event.units)
|
||||||
|
))
|
||||||
|
|
||||||
|
def start(self, *totals: int, progress_id=0, label=None, extra=None):
|
||||||
|
assert len(totals) == len(self.event.units), \
|
||||||
|
f"Totals {totals} do not match up with units {self.event.units}."
|
||||||
|
if not any(totals):
|
||||||
|
raise BreakProgress
|
||||||
|
self.totals = totals
|
||||||
|
self.progress_id = progress_id
|
||||||
|
extra = {} if extra is None else extra.copy()
|
||||||
|
if label is not None:
|
||||||
|
extra['label'] = label
|
||||||
|
self.step(*((0,)*len(totals)), force=True, extra=extra)
|
||||||
|
|
||||||
|
def step(self, *done: int, force=False, extra=None):
|
||||||
|
if done == ():
|
||||||
|
assert len(self.totals) == 1, "Incrementing step() only works with one unit progress."
|
||||||
|
done = (self.last_done[0]+1,)
|
||||||
|
assert len(done) == len(self.totals), \
|
||||||
|
f"Done elements {done} don't match total elements {self.totals}."
|
||||||
|
self.last_done = done
|
||||||
|
send_condition = force or extra is not None or (
|
||||||
|
# throttle rate of events being generated (only throttles first unit value)
|
||||||
|
(self.throttle == 1 or done[0] % self.throttle == 0) and
|
||||||
|
# deduplicate finish event by not sending a step where done == total
|
||||||
|
any(i < j for i, j in zip(done, self.totals)) and
|
||||||
|
# deduplicate same event
|
||||||
|
done != self.last_done_queued
|
||||||
|
)
|
||||||
|
if send_condition:
|
||||||
|
if extra is not None:
|
||||||
|
self.message_queue.put_nowait(
|
||||||
|
(self.event.id, self.progress_id, done, self.totals, extra)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.message_queue.put_nowait(
|
||||||
|
(self.event.id, self.progress_id, done)
|
||||||
|
)
|
||||||
|
self.last_done_queued = done
|
||||||
|
|
||||||
|
def add(self, *done: int, force=False, extra=None):
|
||||||
|
assert len(done) == len(self.last_done), \
|
||||||
|
f"Done elements {done} don't match total elements {self.last_done}."
|
||||||
|
self.step(
|
||||||
|
*(i+j for i, j in zip(self.last_done, done)),
|
||||||
|
force=force, extra=extra
|
||||||
|
)
|
||||||
|
|
||||||
|
def iter(self, items: List):
|
||||||
|
self.start(len(items))
|
||||||
|
for item in items:
|
||||||
|
yield item
|
||||||
|
self.step()
|
||||||
|
|
||||||
|
|
||||||
|
class ProgressContext(Progress):
|
||||||
|
|
||||||
|
def __init__(self, ctx: QueryContext, event: Event, throttle=1):
|
||||||
|
super().__init__(ctx.message_queue, event, throttle)
|
||||||
|
self.ctx = ctx
|
||||||
|
|
||||||
|
def __enter__(self) -> 'ProgressContext':
|
||||||
|
self.ctx.__enter__()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
return any((
|
||||||
|
self.ctx.__exit__(exc_type, exc_val, exc_tb),
|
||||||
|
super().__exit__(exc_type, exc_val, exc_tb)
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
def progress(e: Event, throttle=1) -> ProgressContext:
|
||||||
|
ctx = context(e.name)
|
||||||
|
ctx.current_progress = ProgressContext(ctx, e, throttle=throttle)
|
||||||
|
return ctx.current_progress
|
||||||
|
|
||||||
|
|
||||||
|
class BulkLoader:
|
||||||
|
|
||||||
|
def __init__(self, ctx: QueryContext):
|
||||||
|
self.ctx = ctx
|
||||||
|
self.ledger = ctx.ledger
|
||||||
|
self.blocks = []
|
||||||
|
self.txs = []
|
||||||
|
self.txos = []
|
||||||
|
self.txis = []
|
||||||
|
self.supports = []
|
||||||
|
self.claims = []
|
||||||
|
self.tags = []
|
||||||
|
self.update_claims = []
|
||||||
|
self.delete_tags = []
|
||||||
|
self.tx_filters = []
|
||||||
|
self.block_filters = []
|
||||||
|
self.group_filters = []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def block_to_row(block: Block) -> dict:
|
||||||
|
return {
|
||||||
|
'block_hash': block.block_hash,
|
||||||
|
'previous_hash': block.prev_block_hash,
|
||||||
|
'file_number': block.file_number,
|
||||||
|
'height': 0 if block.is_first_block else block.height,
|
||||||
|
'timestamp': block.timestamp,
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def tx_to_row(block_hash: bytes, tx: Transaction) -> dict:
|
||||||
|
row = {
|
||||||
|
'tx_hash': tx.hash,
|
||||||
|
#'block_hash': block_hash,
|
||||||
|
'raw': tx.raw,
|
||||||
|
'height': tx.height,
|
||||||
|
'position': tx.position,
|
||||||
|
'is_verified': tx.is_verified,
|
||||||
|
'timestamp': tx.timestamp,
|
||||||
|
'day': tx.day,
|
||||||
|
'purchased_claim_hash': None,
|
||||||
|
}
|
||||||
|
txos = tx.outputs
|
||||||
|
if len(txos) >= 2 and txos[1].can_decode_purchase_data:
|
||||||
|
txos[0].purchase = txos[1]
|
||||||
|
row['purchased_claim_hash'] = txos[1].purchase_data.claim_hash
|
||||||
|
return row
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def txi_to_row(tx: Transaction, txi: Input) -> dict:
|
||||||
|
return {
|
||||||
|
'tx_hash': tx.hash,
|
||||||
|
'txo_hash': txi.txo_ref.hash,
|
||||||
|
'position': txi.position,
|
||||||
|
'height': tx.height,
|
||||||
|
}
|
||||||
|
|
||||||
|
def txo_to_row(self, tx: Transaction, txo: Output) -> dict:
|
||||||
|
row = {
|
||||||
|
'tx_hash': tx.hash,
|
||||||
|
'txo_hash': txo.hash,
|
||||||
|
'address': txo.get_address(self.ledger) if txo.has_address else None,
|
||||||
|
'position': txo.position,
|
||||||
|
'amount': txo.amount,
|
||||||
|
'height': tx.height,
|
||||||
|
'script_offset': txo.script.offset,
|
||||||
|
'script_length': txo.script.length,
|
||||||
|
'txo_type': 0,
|
||||||
|
'claim_id': None,
|
||||||
|
'claim_hash': None,
|
||||||
|
'claim_name': None,
|
||||||
|
'channel_hash': None,
|
||||||
|
'signature': None,
|
||||||
|
'signature_digest': None,
|
||||||
|
'reposted_claim_hash': None,
|
||||||
|
'public_key': None,
|
||||||
|
'public_key_hash': None
|
||||||
|
}
|
||||||
|
if txo.is_claim:
|
||||||
|
if txo.can_decode_claim:
|
||||||
|
claim = txo.claim
|
||||||
|
row['txo_type'] = TXO_TYPES.get(claim.claim_type, TXO_TYPES['stream'])
|
||||||
|
if claim.is_channel:
|
||||||
|
row['public_key'] = claim.channel.public_key_bytes
|
||||||
|
row['public_key_hash'] = self.ledger.address_to_hash160(
|
||||||
|
self.ledger.public_key_to_address(claim.channel.public_key_bytes)
|
||||||
|
)
|
||||||
|
elif claim.is_repost:
|
||||||
|
row['reposted_claim_hash'] = claim.repost.reference.claim_hash
|
||||||
|
else:
|
||||||
|
row['txo_type'] = TXO_TYPES['stream']
|
||||||
|
elif txo.is_support:
|
||||||
|
row['txo_type'] = TXO_TYPES['support']
|
||||||
|
elif txo.purchase is not None:
|
||||||
|
row['txo_type'] = TXO_TYPES['purchase']
|
||||||
|
row['claim_id'] = txo.purchased_claim_id
|
||||||
|
row['claim_hash'] = txo.purchased_claim_hash
|
||||||
|
if txo.script.is_claim_involved:
|
||||||
|
signable = txo.can_decode_signable
|
||||||
|
if signable and signable.is_signed:
|
||||||
|
row['channel_hash'] = signable.signing_channel_hash
|
||||||
|
row['signature'] = txo.get_encoded_signature()
|
||||||
|
row['signature_digest'] = txo.get_signature_digest(self.ledger)
|
||||||
|
row['claim_id'] = txo.claim_id
|
||||||
|
row['claim_hash'] = txo.claim_hash
|
||||||
|
try:
|
||||||
|
row['claim_name'] = txo.claim_name.replace('\x00', '')
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
pass
|
||||||
|
return row
|
||||||
|
|
||||||
|
def claim_to_rows(
|
||||||
|
self, txo: Output, claims_in_channel_amount: int, staked_support_amount: int, staked_support_count: int,
|
||||||
|
reposted_count: int, signature: bytes = None, signature_digest: bytes = None, channel_public_key: bytes = None,
|
||||||
|
) -> Tuple[dict, List]:
|
||||||
|
|
||||||
|
tx = txo.tx_ref
|
||||||
|
d = {
|
||||||
|
'claim_type': None,
|
||||||
|
'address': txo.get_address(self.ledger),
|
||||||
|
'txo_hash': txo.hash,
|
||||||
|
'amount': txo.amount,
|
||||||
|
'height': tx.height,
|
||||||
|
'timestamp': tx.timestamp,
|
||||||
|
# support
|
||||||
|
'staked_amount': txo.amount + claims_in_channel_amount + staked_support_amount,
|
||||||
|
'staked_support_amount': staked_support_amount,
|
||||||
|
'staked_support_count': staked_support_count,
|
||||||
|
# basic metadata
|
||||||
|
'title': None,
|
||||||
|
'description': None,
|
||||||
|
'author': None,
|
||||||
|
# streams
|
||||||
|
'stream_type': None,
|
||||||
|
'media_type': None,
|
||||||
|
'duration': None,
|
||||||
|
'release_time': None,
|
||||||
|
'fee_amount': 0,
|
||||||
|
'fee_currency': None,
|
||||||
|
# reposts
|
||||||
|
'reposted_claim_hash': None,
|
||||||
|
'reposted_count': reposted_count,
|
||||||
|
# signed claims
|
||||||
|
'channel_hash': None,
|
||||||
|
'is_signature_valid': None,
|
||||||
|
}
|
||||||
|
|
||||||
|
claim = txo.can_decode_claim
|
||||||
|
if not claim:
|
||||||
|
return d, []
|
||||||
|
|
||||||
|
if claim.is_stream:
|
||||||
|
d['claim_type'] = TXO_TYPES['stream']
|
||||||
|
d['media_type'] = claim.stream.source.media_type
|
||||||
|
d['stream_type'] = STREAM_TYPES[guess_stream_type(d['media_type'])]
|
||||||
|
d['title'] = claim.stream.title.replace('\x00', '')
|
||||||
|
d['description'] = claim.stream.description.replace('\x00', '')
|
||||||
|
d['author'] = claim.stream.author.replace('\x00', '')
|
||||||
|
if claim.stream.video and claim.stream.video.duration:
|
||||||
|
d['duration'] = claim.stream.video.duration
|
||||||
|
if claim.stream.audio and claim.stream.audio.duration:
|
||||||
|
d['duration'] = claim.stream.audio.duration
|
||||||
|
if claim.stream.release_time:
|
||||||
|
d['release_time'] = claim.stream.release_time
|
||||||
|
if claim.stream.has_fee:
|
||||||
|
fee = claim.stream.fee
|
||||||
|
if isinstance(fee.amount, Decimal):
|
||||||
|
d['fee_amount'] = int(fee.amount*1000)
|
||||||
|
if isinstance(fee.currency, str):
|
||||||
|
d['fee_currency'] = fee.currency.lower()
|
||||||
|
elif claim.is_repost:
|
||||||
|
d['claim_type'] = TXO_TYPES['repost']
|
||||||
|
d['reposted_claim_hash'] = claim.repost.reference.claim_hash
|
||||||
|
elif claim.is_channel:
|
||||||
|
d['claim_type'] = TXO_TYPES['channel']
|
||||||
|
if claim.is_signed:
|
||||||
|
d['channel_hash'] = claim.signing_channel_hash
|
||||||
|
d['is_signature_valid'] = (
|
||||||
|
all((signature, signature_digest, channel_public_key)) and
|
||||||
|
Output.is_signature_valid(
|
||||||
|
signature, signature_digest, channel_public_key
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
tags = []
|
||||||
|
if claim.message.tags:
|
||||||
|
claim_hash = txo.claim_hash
|
||||||
|
tags = [
|
||||||
|
{'claim_hash': claim_hash, 'tag': tag}
|
||||||
|
for tag in clean_tags(claim.message.tags)
|
||||||
|
]
|
||||||
|
|
||||||
|
return d, tags
|
||||||
|
|
||||||
|
def support_to_row(
|
||||||
|
self, txo: Output, channel_public_key: bytes = None,
|
||||||
|
signature: bytes = None, signature_digest: bytes = None
|
||||||
|
):
|
||||||
|
tx = txo.tx_ref
|
||||||
|
d = {
|
||||||
|
'txo_hash': txo.ref.hash,
|
||||||
|
'claim_hash': txo.claim_hash,
|
||||||
|
'address': txo.get_address(self.ledger),
|
||||||
|
'amount': txo.amount,
|
||||||
|
'height': tx.height,
|
||||||
|
'timestamp': tx.timestamp,
|
||||||
|
'emoji': None,
|
||||||
|
'channel_hash': None,
|
||||||
|
'is_signature_valid': None,
|
||||||
|
}
|
||||||
|
support = txo.can_decode_support
|
||||||
|
if support:
|
||||||
|
d['emoji'] = support.emoji
|
||||||
|
if support.is_signed:
|
||||||
|
d['channel_hash'] = support.signing_channel_hash
|
||||||
|
d['is_signature_valid'] = (
|
||||||
|
all((signature, signature_digest, channel_public_key)) and
|
||||||
|
Output.is_signature_valid(
|
||||||
|
signature, signature_digest, channel_public_key
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return d
|
||||||
|
|
||||||
|
def add_block(self, block: Block):
|
||||||
|
self.blocks.append(self.block_to_row(block))
|
||||||
|
for tx in block.txs:
|
||||||
|
self.add_transaction(block.block_hash, tx)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def add_block_filter(self, height: int, address_filter: bytes):
|
||||||
|
self.block_filters.append({
|
||||||
|
'height': height,
|
||||||
|
'address_filter': address_filter
|
||||||
|
})
|
||||||
|
|
||||||
|
def add_group_filter(self, height: int, factor: int, address_filter: bytes):
|
||||||
|
self.group_filters.append({
|
||||||
|
'height': height,
|
||||||
|
'factor': factor,
|
||||||
|
'address_filter': address_filter
|
||||||
|
})
|
||||||
|
|
||||||
|
def add_transaction(self, block_hash: bytes, tx: Transaction):
|
||||||
|
self.txs.append(self.tx_to_row(block_hash, tx))
|
||||||
|
for txi in tx.inputs:
|
||||||
|
if txi.coinbase is None:
|
||||||
|
self.txis.append(self.txi_to_row(tx, txi))
|
||||||
|
for txo in tx.outputs:
|
||||||
|
self.txos.append(self.txo_to_row(tx, txo))
|
||||||
|
return self
|
||||||
|
|
||||||
|
def add_transaction_filter(self, tx_hash: bytes, height: int, address_filter: bytes):
|
||||||
|
self.tx_filters.append({
|
||||||
|
'tx_hash': tx_hash,
|
||||||
|
'height': height,
|
||||||
|
'address_filter': address_filter
|
||||||
|
})
|
||||||
|
|
||||||
|
def add_support(self, txo: Output, **extra):
|
||||||
|
self.supports.append(self.support_to_row(txo, **extra))
|
||||||
|
|
||||||
|
def add_claim(
|
||||||
|
self, txo: Output, short_url: str,
|
||||||
|
creation_height: int, activation_height: int, expiration_height: int,
|
||||||
|
takeover_height: int = None, **extra
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
claim_name = txo.claim_name.replace('\x00', '')
|
||||||
|
normalized_name = txo.normalized_name
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
claim_name = normalized_name = ''
|
||||||
|
d, tags = self.claim_to_rows(txo, **extra)
|
||||||
|
d['claim_hash'] = txo.claim_hash
|
||||||
|
d['claim_id'] = txo.claim_id
|
||||||
|
d['claim_name'] = claim_name
|
||||||
|
d['normalized'] = normalized_name
|
||||||
|
d['short_url'] = short_url
|
||||||
|
d['creation_height'] = creation_height
|
||||||
|
d['activation_height'] = activation_height
|
||||||
|
d['expiration_height'] = expiration_height
|
||||||
|
d['takeover_height'] = takeover_height
|
||||||
|
d['is_controlling'] = takeover_height is not None
|
||||||
|
self.claims.append(d)
|
||||||
|
self.tags.extend(tags)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def update_claim(self, txo: Output, **extra):
|
||||||
|
d, tags = self.claim_to_rows(txo, **extra)
|
||||||
|
d['pk'] = txo.claim_hash
|
||||||
|
self.update_claims.append(d)
|
||||||
|
self.delete_tags.append({'pk': txo.claim_hash})
|
||||||
|
self.tags.extend(tags)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def get_queries(self):
|
||||||
|
return (
|
||||||
|
(Block.insert(), self.blocks),
|
||||||
|
(BlockFilter.insert(), self.block_filters),
|
||||||
|
(BlockGroupFilter.insert(), self.group_filters),
|
||||||
|
(TX.insert(), self.txs),
|
||||||
|
(TXFilter.insert(), self.tx_filters),
|
||||||
|
(TXO.insert(), self.txos),
|
||||||
|
(TXI.insert(), self.txis),
|
||||||
|
(Claim.insert(), self.claims),
|
||||||
|
(Tag.delete().where(Tag.c.claim_hash == bindparam('pk')), self.delete_tags),
|
||||||
|
(Claim.update().where(Claim.c.claim_hash == bindparam('pk')), self.update_claims),
|
||||||
|
(Tag.insert(), self.tags),
|
||||||
|
(Support.insert(), self.supports),
|
||||||
|
)
|
||||||
|
|
||||||
|
def flush(self, return_row_count_for_table) -> int:
|
||||||
|
done = 0
|
||||||
|
for sql, rows in self.get_queries():
|
||||||
|
if not rows:
|
||||||
|
continue
|
||||||
|
if self.ctx.is_postgres and isinstance(sql, Insert):
|
||||||
|
self.ctx.pg_copy(sql.table, rows)
|
||||||
|
else:
|
||||||
|
self.ctx.execute(sql, rows)
|
||||||
|
if sql.table == return_row_count_for_table:
|
||||||
|
done += len(rows)
|
||||||
|
rows.clear()
|
||||||
|
return done
|
103
lbry/db/sync.py
Normal file
103
lbry/db/sync.py
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
from sqlalchemy.future import select
|
||||||
|
|
||||||
|
from lbry.db.query_context import progress, Event
|
||||||
|
from lbry.db.tables import TX, TXI, TXO, Claim, Support
|
||||||
|
from .constants import TXO_TYPES, CLAIM_TYPE_CODES
|
||||||
|
from .queries import (
|
||||||
|
BASE_SELECT_TXO_COLUMNS,
|
||||||
|
rows_to_txos, where_unspent_txos,
|
||||||
|
where_abandoned_supports,
|
||||||
|
where_abandoned_claims
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
SPENDS_UPDATE_EVENT = Event.add("client.sync.spends.update", "steps")
|
||||||
|
CLAIMS_INSERT_EVENT = Event.add("client.sync.claims.insert", "claims")
|
||||||
|
CLAIMS_UPDATE_EVENT = Event.add("client.sync.claims.update", "claims")
|
||||||
|
CLAIMS_DELETE_EVENT = Event.add("client.sync.claims.delete", "claims")
|
||||||
|
SUPPORT_INSERT_EVENT = Event.add("client.sync.supports.insert", "supports")
|
||||||
|
SUPPORT_UPDATE_EVENT = Event.add("client.sync.supports.update", "supports")
|
||||||
|
SUPPORT_DELETE_EVENT = Event.add("client.sync.supports.delete", "supports")
|
||||||
|
|
||||||
|
|
||||||
|
def process_all_things_after_sync():
|
||||||
|
with progress(SPENDS_UPDATE_EVENT) as p:
|
||||||
|
p.start(2)
|
||||||
|
update_spent_outputs(p.ctx)
|
||||||
|
p.step(1)
|
||||||
|
set_input_addresses(p.ctx)
|
||||||
|
p.step(2)
|
||||||
|
with progress(SUPPORT_DELETE_EVENT) as p:
|
||||||
|
p.start(1)
|
||||||
|
sql = Support.delete().where(where_abandoned_supports())
|
||||||
|
p.ctx.execute(sql)
|
||||||
|
with progress(SUPPORT_INSERT_EVENT) as p:
|
||||||
|
loader = p.ctx.get_bulk_loader()
|
||||||
|
sql = (
|
||||||
|
select(*BASE_SELECT_TXO_COLUMNS)
|
||||||
|
.where(where_unspent_txos(TXO_TYPES['support'], missing_in_supports_table=True))
|
||||||
|
.select_from(TXO.join(TX))
|
||||||
|
)
|
||||||
|
for support in rows_to_txos(p.ctx.fetchall(sql)):
|
||||||
|
loader.add_support(support)
|
||||||
|
loader.flush(Support)
|
||||||
|
with progress(CLAIMS_DELETE_EVENT) as p:
|
||||||
|
p.start(1)
|
||||||
|
sql = Claim.delete().where(where_abandoned_claims())
|
||||||
|
p.ctx.execute(sql)
|
||||||
|
with progress(CLAIMS_INSERT_EVENT) as p:
|
||||||
|
loader = p.ctx.get_bulk_loader()
|
||||||
|
sql = (
|
||||||
|
select(*BASE_SELECT_TXO_COLUMNS)
|
||||||
|
.where(where_unspent_txos(CLAIM_TYPE_CODES, missing_in_claims_table=True))
|
||||||
|
.select_from(TXO.join(TX))
|
||||||
|
)
|
||||||
|
for claim in rows_to_txos(p.ctx.fetchall(sql)):
|
||||||
|
loader.add_claim(claim, '', 0, 0, 0, 0, staked_support_amount=0, staked_support_count=0)
|
||||||
|
loader.flush(Claim)
|
||||||
|
with progress(CLAIMS_UPDATE_EVENT) as p:
|
||||||
|
loader = p.ctx.get_bulk_loader()
|
||||||
|
sql = (
|
||||||
|
select(*BASE_SELECT_TXO_COLUMNS)
|
||||||
|
.where(where_unspent_txos(CLAIM_TYPE_CODES, missing_or_stale_in_claims_table=True))
|
||||||
|
.select_from(TXO.join(TX))
|
||||||
|
)
|
||||||
|
for claim in rows_to_txos(p.ctx.fetchall(sql)):
|
||||||
|
loader.update_claim(claim)
|
||||||
|
loader.flush(Claim)
|
||||||
|
|
||||||
|
|
||||||
|
def set_input_addresses(ctx):
|
||||||
|
# Update TXIs to have the address of TXO they are spending.
|
||||||
|
if ctx.is_sqlite:
|
||||||
|
address_query = select(TXO.c.address).where(TXI.c.txo_hash == TXO.c.txo_hash)
|
||||||
|
set_addresses = (
|
||||||
|
TXI.update()
|
||||||
|
.values(address=address_query.scalar_subquery())
|
||||||
|
.where(TXI.c.address.is_(None))
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
set_addresses = (
|
||||||
|
TXI.update()
|
||||||
|
.values({TXI.c.address: TXO.c.address})
|
||||||
|
.where((TXI.c.address.is_(None)) & (TXI.c.txo_hash == TXO.c.txo_hash))
|
||||||
|
)
|
||||||
|
ctx.execute(set_addresses)
|
||||||
|
|
||||||
|
|
||||||
|
def update_spent_outputs(ctx):
|
||||||
|
# Update spent TXOs setting spent_height
|
||||||
|
set_spent_height = (
|
||||||
|
TXO.update()
|
||||||
|
.values({
|
||||||
|
TXO.c.spent_height: (
|
||||||
|
select(TXI.c.height)
|
||||||
|
.where(TXI.c.txo_hash == TXO.c.txo_hash)
|
||||||
|
.scalar_subquery()
|
||||||
|
)
|
||||||
|
}).where(
|
||||||
|
(TXO.c.spent_height == 0) &
|
||||||
|
(TXO.c.txo_hash.in_(select(TXI.c.txo_hash).where(TXI.c.address.is_(None))))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
ctx.execute(set_spent_height)
|
345
lbry/db/tables.py
Normal file
345
lbry/db/tables.py
Normal file
|
@ -0,0 +1,345 @@
|
||||||
|
# pylint: skip-file
|
||||||
|
|
||||||
|
from sqlalchemy import (
|
||||||
|
MetaData, Table, Column, ForeignKey,
|
||||||
|
LargeBinary, Text, SmallInteger, Integer, BigInteger, Boolean,
|
||||||
|
)
|
||||||
|
from .constants import TXO_TYPES, CLAIM_TYPE_CODES
|
||||||
|
|
||||||
|
|
||||||
|
SCHEMA_VERSION = '1.4'
|
||||||
|
|
||||||
|
|
||||||
|
metadata = MetaData()
|
||||||
|
|
||||||
|
|
||||||
|
Version = Table(
|
||||||
|
'version', metadata,
|
||||||
|
Column('version', Text, primary_key=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
Wallet = Table(
|
||||||
|
'wallet', metadata,
|
||||||
|
Column('wallet_id', Text, primary_key=True),
|
||||||
|
Column('data', Text),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
PubkeyAddress = Table(
|
||||||
|
'pubkey_address', metadata,
|
||||||
|
Column('address', Text, primary_key=True),
|
||||||
|
Column('used_times', Integer, server_default='0'),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
AccountAddress = Table(
|
||||||
|
'account_address', metadata,
|
||||||
|
Column('account', Text, primary_key=True),
|
||||||
|
Column('address', Text, ForeignKey(PubkeyAddress.columns.address), primary_key=True),
|
||||||
|
Column('chain', SmallInteger),
|
||||||
|
Column('pubkey', LargeBinary),
|
||||||
|
Column('chain_code', LargeBinary),
|
||||||
|
Column('n', Integer),
|
||||||
|
Column('depth', SmallInteger),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
pg_add_account_address_constraints_and_indexes = [
|
||||||
|
"CREATE UNIQUE INDEX account_address_idx ON account_address (account, address);"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
Block = Table(
|
||||||
|
'block', metadata,
|
||||||
|
Column('height', Integer, primary_key=True),
|
||||||
|
Column('block_hash', LargeBinary),
|
||||||
|
Column('previous_hash', LargeBinary),
|
||||||
|
Column('file_number', SmallInteger),
|
||||||
|
Column('timestamp', Integer),
|
||||||
|
)
|
||||||
|
|
||||||
|
pg_add_block_constraints_and_indexes = [
|
||||||
|
"ALTER TABLE block ADD PRIMARY KEY (height);",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
BlockFilter = Table(
|
||||||
|
'block_filter', metadata,
|
||||||
|
Column('height', Integer, primary_key=True),
|
||||||
|
Column('address_filter', LargeBinary),
|
||||||
|
)
|
||||||
|
|
||||||
|
pg_add_block_filter_constraints_and_indexes = [
|
||||||
|
"ALTER TABLE block_filter ADD PRIMARY KEY (height);",
|
||||||
|
"ALTER TABLE block_filter ADD CONSTRAINT fk_block_filter"
|
||||||
|
" FOREIGN KEY (height) REFERENCES block (height) ON DELETE CASCADE;",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
BlockGroupFilter = Table(
|
||||||
|
'block_group_filter', metadata,
|
||||||
|
Column('height', Integer),
|
||||||
|
Column('factor', SmallInteger),
|
||||||
|
Column('address_filter', LargeBinary),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
TX = Table(
|
||||||
|
'tx', metadata,
|
||||||
|
Column('tx_hash', LargeBinary, primary_key=True),
|
||||||
|
Column('raw', LargeBinary),
|
||||||
|
Column('height', Integer),
|
||||||
|
Column('position', SmallInteger),
|
||||||
|
Column('timestamp', Integer, nullable=True),
|
||||||
|
Column('day', Integer, nullable=True),
|
||||||
|
Column('is_verified', Boolean, server_default='FALSE'),
|
||||||
|
Column('purchased_claim_hash', LargeBinary, nullable=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
pg_add_tx_constraints_and_indexes = [
|
||||||
|
"ALTER TABLE tx ADD PRIMARY KEY (tx_hash);",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
TXFilter = Table(
|
||||||
|
'tx_filter', metadata,
|
||||||
|
Column('tx_hash', LargeBinary, primary_key=True),
|
||||||
|
Column('height', Integer),
|
||||||
|
Column('address_filter', LargeBinary),
|
||||||
|
)
|
||||||
|
|
||||||
|
pg_add_tx_filter_constraints_and_indexes = [
|
||||||
|
"ALTER TABLE tx_filter ADD PRIMARY KEY (tx_hash);",
|
||||||
|
"ALTER TABLE tx_filter ADD CONSTRAINT fk_tx_filter"
|
||||||
|
" FOREIGN KEY (tx_hash) REFERENCES tx (tx_hash) ON DELETE CASCADE;"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
MempoolFilter = Table(
|
||||||
|
'mempool_filter', metadata,
|
||||||
|
Column('filter_number', Integer),
|
||||||
|
Column('mempool_filter', LargeBinary),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
TXO = Table(
|
||||||
|
'txo', metadata,
|
||||||
|
Column('tx_hash', LargeBinary, ForeignKey(TX.columns.tx_hash)),
|
||||||
|
Column('txo_hash', LargeBinary, primary_key=True),
|
||||||
|
Column('address', Text),
|
||||||
|
Column('position', SmallInteger),
|
||||||
|
Column('amount', BigInteger),
|
||||||
|
Column('height', Integer),
|
||||||
|
Column('spent_height', Integer, server_default='0'),
|
||||||
|
Column('script_offset', Integer),
|
||||||
|
Column('script_length', Integer),
|
||||||
|
Column('is_reserved', Boolean, server_default='0'),
|
||||||
|
|
||||||
|
# claims
|
||||||
|
Column('txo_type', SmallInteger, server_default='0'),
|
||||||
|
Column('claim_id', Text, nullable=True),
|
||||||
|
Column('claim_hash', LargeBinary, nullable=True),
|
||||||
|
Column('claim_name', Text, nullable=True),
|
||||||
|
Column('channel_hash', LargeBinary, nullable=True), # claims in channel
|
||||||
|
Column('signature', LargeBinary, nullable=True),
|
||||||
|
Column('signature_digest', LargeBinary, nullable=True),
|
||||||
|
|
||||||
|
# reposts
|
||||||
|
Column('reposted_claim_hash', LargeBinary, nullable=True),
|
||||||
|
|
||||||
|
# channels
|
||||||
|
Column('public_key', LargeBinary, nullable=True),
|
||||||
|
Column('public_key_hash', LargeBinary, nullable=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
txo_join_account = TXO.join(AccountAddress, TXO.columns.address == AccountAddress.columns.address)
|
||||||
|
|
||||||
|
pg_add_txo_constraints_and_indexes = [
|
||||||
|
"ALTER TABLE txo ADD PRIMARY KEY (txo_hash);",
|
||||||
|
# find appropriate channel public key for signing a content claim
|
||||||
|
f"CREATE INDEX txo_channel_hash_by_height_desc_w_pub_key "
|
||||||
|
f"ON txo (claim_hash, height desc) INCLUDE (public_key) "
|
||||||
|
f"WHERE txo_type={TXO_TYPES['channel']};",
|
||||||
|
# for calculating supports on a claim
|
||||||
|
f"CREATE INDEX txo_unspent_supports ON txo (claim_hash) INCLUDE (amount) "
|
||||||
|
f"WHERE spent_height = 0 AND txo_type={TXO_TYPES['support']};",
|
||||||
|
# for calculating balance
|
||||||
|
f"CREATE INDEX txo_unspent_by_address ON txo (address) INCLUDE (amount, txo_type, tx_hash) "
|
||||||
|
f"WHERE spent_height = 0;",
|
||||||
|
# for finding modified claims in a block range
|
||||||
|
f"CREATE INDEX txo_claim_changes "
|
||||||
|
f"ON txo (height DESC) INCLUDE (claim_hash, txo_hash) "
|
||||||
|
f"WHERE spent_height = 0 AND txo_type IN {tuple(CLAIM_TYPE_CODES)};",
|
||||||
|
# for finding claims which need support totals re-calculated in a block range
|
||||||
|
f"CREATE INDEX txo_added_supports_by_height ON txo (height DESC) "
|
||||||
|
f"INCLUDE (claim_hash) WHERE txo_type={TXO_TYPES['support']};",
|
||||||
|
f"CREATE INDEX txo_spent_supports_by_height ON txo (spent_height DESC) "
|
||||||
|
f"INCLUDE (claim_hash) WHERE txo_type={TXO_TYPES['support']};",
|
||||||
|
# for finding claims which need repost totals re-calculated in a block range
|
||||||
|
f"CREATE INDEX txo_added_reposts_by_height ON txo (height DESC) "
|
||||||
|
f"INCLUDE (reposted_claim_hash) WHERE txo_type={TXO_TYPES['repost']};",
|
||||||
|
f"CREATE INDEX txo_spent_reposts_by_height ON txo (spent_height DESC) "
|
||||||
|
f"INCLUDE (reposted_claim_hash) WHERE txo_type={TXO_TYPES['repost']};",
|
||||||
|
"CREATE INDEX txo_reposted_claim_hash ON txo (reposted_claim_hash)"
|
||||||
|
"WHERE reposted_claim_hash IS NOT NULL AND spent_height = 0;",
|
||||||
|
"CREATE INDEX txo_height ON txo (height);",
|
||||||
|
# used by sum_supports query (at least)
|
||||||
|
"CREATE INDEX txo_claim_hash ON txo (claim_hash)",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
TXI = Table(
|
||||||
|
'txi', metadata,
|
||||||
|
Column('tx_hash', LargeBinary, ForeignKey(TX.columns.tx_hash)),
|
||||||
|
Column('txo_hash', LargeBinary, ForeignKey(TXO.columns.txo_hash), primary_key=True),
|
||||||
|
Column('address', Text, nullable=True),
|
||||||
|
Column('position', SmallInteger),
|
||||||
|
Column('height', Integer),
|
||||||
|
)
|
||||||
|
|
||||||
|
txi_join_account = TXI.join(AccountAddress, TXI.columns.address == AccountAddress.columns.address)
|
||||||
|
|
||||||
|
pg_add_txi_constraints_and_indexes = [
|
||||||
|
"ALTER TABLE txi ADD PRIMARY KEY (txo_hash);",
|
||||||
|
"CREATE INDEX txi_height ON txi (height);",
|
||||||
|
"CREATE INDEX txi_first_address ON txi (tx_hash) INCLUDE (address) WHERE position = 0;",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
Claim = Table(
|
||||||
|
'claim', metadata,
|
||||||
|
Column('claim_hash', LargeBinary, primary_key=True),
|
||||||
|
Column('claim_id', Text),
|
||||||
|
Column('claim_name', Text),
|
||||||
|
Column('normalized', Text),
|
||||||
|
Column('address', Text),
|
||||||
|
Column('txo_hash', LargeBinary, ForeignKey(TXO.columns.txo_hash)),
|
||||||
|
Column('amount', BigInteger),
|
||||||
|
Column('staked_amount', BigInteger),
|
||||||
|
Column('timestamp', Integer), # last updated timestamp
|
||||||
|
Column('creation_timestamp', Integer),
|
||||||
|
Column('release_time', Integer, nullable=True),
|
||||||
|
Column('height', Integer), # last updated height
|
||||||
|
Column('creation_height', Integer),
|
||||||
|
Column('activation_height', Integer),
|
||||||
|
Column('expiration_height', Integer),
|
||||||
|
Column('takeover_height', Integer, nullable=True),
|
||||||
|
Column('is_controlling', Boolean),
|
||||||
|
|
||||||
|
# short_url: normalized#shortest-unique-claim_id
|
||||||
|
Column('short_url', Text),
|
||||||
|
# canonical_url: channel's-short_url/normalized#shortest-unique-claim_id-within-channel
|
||||||
|
# canonical_url is computed dynamically
|
||||||
|
|
||||||
|
Column('title', Text, nullable=True),
|
||||||
|
Column('author', Text, nullable=True),
|
||||||
|
Column('description', Text, nullable=True),
|
||||||
|
|
||||||
|
Column('claim_type', SmallInteger),
|
||||||
|
Column('staked_support_count', Integer, server_default='0'),
|
||||||
|
Column('staked_support_amount', BigInteger, server_default='0'),
|
||||||
|
|
||||||
|
# streams
|
||||||
|
Column('stream_type', SmallInteger, nullable=True),
|
||||||
|
Column('media_type', Text, nullable=True),
|
||||||
|
Column('fee_amount', BigInteger, server_default='0'),
|
||||||
|
Column('fee_currency', Text, nullable=True),
|
||||||
|
Column('duration', Integer, nullable=True),
|
||||||
|
|
||||||
|
# reposts
|
||||||
|
Column('reposted_claim_hash', LargeBinary, nullable=True), # on claim doing the repost
|
||||||
|
Column('reposted_count', Integer, server_default='0'), # on claim being reposted
|
||||||
|
|
||||||
|
# claims which are channels
|
||||||
|
Column('signed_claim_count', Integer, server_default='0'),
|
||||||
|
Column('signed_support_count', Integer, server_default='0'),
|
||||||
|
|
||||||
|
# claims which are inside channels
|
||||||
|
Column('channel_hash', LargeBinary, nullable=True),
|
||||||
|
Column('is_signature_valid', Boolean, nullable=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
Tag = Table(
|
||||||
|
'tag', metadata,
|
||||||
|
Column('claim_hash', LargeBinary),
|
||||||
|
Column('tag', Text),
|
||||||
|
)
|
||||||
|
|
||||||
|
pg_add_claim_and_tag_constraints_and_indexes = [
|
||||||
|
"ALTER TABLE claim ADD PRIMARY KEY (claim_hash);",
|
||||||
|
# for checking if claim is up-to-date
|
||||||
|
"CREATE UNIQUE INDEX claim_txo_hash ON claim (txo_hash);",
|
||||||
|
# used by takeover process to reset winning claims
|
||||||
|
"CREATE INDEX claim_normalized ON claim (normalized);",
|
||||||
|
# ordering and search by release_time
|
||||||
|
"CREATE INDEX claim_release_time ON claim (release_time DESC NULLs LAST);",
|
||||||
|
# used to count()/sum() claims signed by channel
|
||||||
|
"CREATE INDEX signed_content ON claim (channel_hash) "
|
||||||
|
"INCLUDE (amount) WHERE is_signature_valid;",
|
||||||
|
# used to count()/sum() reposted claims
|
||||||
|
"CREATE INDEX reposted_content ON claim (reposted_claim_hash);",
|
||||||
|
# basic tag indexes
|
||||||
|
"ALTER TABLE tag ADD PRIMARY KEY (claim_hash, tag);",
|
||||||
|
"CREATE INDEX tags ON tag (tag) INCLUDE (claim_hash);",
|
||||||
|
# used by sum_supports query (at least)
|
||||||
|
"CREATE INDEX claim_channel_hash ON claim (channel_hash)",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
Support = Table(
|
||||||
|
'support', metadata,
|
||||||
|
|
||||||
|
Column('txo_hash', LargeBinary, ForeignKey(TXO.columns.txo_hash), primary_key=True),
|
||||||
|
Column('claim_hash', LargeBinary),
|
||||||
|
Column('address', Text),
|
||||||
|
Column('amount', BigInteger),
|
||||||
|
Column('height', Integer),
|
||||||
|
Column('timestamp', Integer),
|
||||||
|
|
||||||
|
# support metadata
|
||||||
|
Column('emoji', Text),
|
||||||
|
|
||||||
|
# signed supports
|
||||||
|
Column('channel_hash', LargeBinary, nullable=True),
|
||||||
|
Column('signature', LargeBinary, nullable=True),
|
||||||
|
Column('signature_digest', LargeBinary, nullable=True),
|
||||||
|
Column('is_signature_valid', Boolean, nullable=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
pg_add_support_constraints_and_indexes = [
|
||||||
|
"ALTER TABLE support ADD PRIMARY KEY (txo_hash);",
|
||||||
|
# used to count()/sum() supports signed by channel
|
||||||
|
"CREATE INDEX signed_support ON support (channel_hash) "
|
||||||
|
"INCLUDE (amount) WHERE is_signature_valid;",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
Stake = Table(
|
||||||
|
'stake', metadata,
|
||||||
|
Column('claim_hash', LargeBinary),
|
||||||
|
Column('height', Integer),
|
||||||
|
Column('stake_min', BigInteger),
|
||||||
|
Column('stake_max', BigInteger),
|
||||||
|
Column('stake_sum', BigInteger),
|
||||||
|
Column('stake_count', Integer),
|
||||||
|
Column('stake_unique', Integer),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
Trend = Table(
|
||||||
|
'trend', metadata,
|
||||||
|
Column('claim_hash', LargeBinary, primary_key=True),
|
||||||
|
Column('trend_group', BigInteger, server_default='0'),
|
||||||
|
Column('trend_mixed', BigInteger, server_default='0'),
|
||||||
|
Column('trend_local', BigInteger, server_default='0'),
|
||||||
|
Column('trend_global', BigInteger, server_default='0'),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
CensoredClaim = Table(
|
||||||
|
'censored_claim', metadata,
|
||||||
|
Column('claim_hash', LargeBinary, primary_key=True),
|
||||||
|
Column('censor_type', SmallInteger),
|
||||||
|
Column('censoring_channel_hash', LargeBinary),
|
||||||
|
)
|
25
lbry/db/trending.py
Normal file
25
lbry/db/trending.py
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
from sqlalchemy import select
|
||||||
|
from sqlalchemy.sql import func
|
||||||
|
|
||||||
|
from lbry.db.query_context import event_emitter, ProgressContext
|
||||||
|
from lbry.db.tables import Trend, Support, Claim
|
||||||
|
WINDOW = 576 # a day
|
||||||
|
|
||||||
|
|
||||||
|
@event_emitter("blockchain.sync.trending.update", "steps")
|
||||||
|
def calculate_trending(height, p: ProgressContext):
|
||||||
|
with p.ctx.engine.begin() as ctx:
|
||||||
|
ctx.execute(Trend.delete())
|
||||||
|
start = height - WINDOW
|
||||||
|
trending = func.sum(Support.c.amount * (WINDOW - (height - Support.c.height)))
|
||||||
|
sql = (
|
||||||
|
select([Claim.c.claim_hash, trending, trending, trending, 4])
|
||||||
|
.where(
|
||||||
|
(Support.c.claim_hash == Claim.c.claim_hash) &
|
||||||
|
(Support.c.height <= height) &
|
||||||
|
(Support.c.height >= start)
|
||||||
|
).group_by(Claim.c.claim_hash)
|
||||||
|
)
|
||||||
|
ctx.execute(Trend.insert().from_select(
|
||||||
|
['claim_hash', 'trend_global', 'trend_local', 'trend_mixed', 'trend_group'], sql
|
||||||
|
))
|
174
lbry/db/utils.py
Normal file
174
lbry/db/utils.py
Normal file
|
@ -0,0 +1,174 @@
|
||||||
|
from itertools import islice
|
||||||
|
from typing import List, Union
|
||||||
|
|
||||||
|
from sqlalchemy import text, and_, or_
|
||||||
|
from sqlalchemy.sql.expression import Select, FunctionElement
|
||||||
|
from sqlalchemy.types import Numeric
|
||||||
|
from sqlalchemy.ext.compiler import compiles
|
||||||
|
try:
|
||||||
|
from sqlalchemy.dialects.postgresql import insert as pg_insert # pylint: disable=unused-import
|
||||||
|
except ImportError:
|
||||||
|
pg_insert = None
|
||||||
|
|
||||||
|
from .tables import AccountAddress
|
||||||
|
|
||||||
|
|
||||||
|
class greatest(FunctionElement): # pylint: disable=invalid-name
|
||||||
|
type = Numeric()
|
||||||
|
name = 'greatest'
|
||||||
|
|
||||||
|
|
||||||
|
@compiles(greatest)
|
||||||
|
def default_greatest(element, compiler, **kw):
|
||||||
|
return "greatest(%s)" % compiler.process(element.clauses, **kw)
|
||||||
|
|
||||||
|
|
||||||
|
@compiles(greatest, 'sqlite')
|
||||||
|
def sqlite_greatest(element, compiler, **kw):
|
||||||
|
return "max(%s)" % compiler.process(element.clauses, **kw)
|
||||||
|
|
||||||
|
|
||||||
|
class least(FunctionElement): # pylint: disable=invalid-name
|
||||||
|
type = Numeric()
|
||||||
|
name = 'least'
|
||||||
|
|
||||||
|
|
||||||
|
@compiles(least)
|
||||||
|
def default_least(element, compiler, **kw):
|
||||||
|
return "least(%s)" % compiler.process(element.clauses, **kw)
|
||||||
|
|
||||||
|
|
||||||
|
@compiles(least, 'sqlite')
|
||||||
|
def sqlite_least(element, compiler, **kw):
|
||||||
|
return "min(%s)" % compiler.process(element.clauses, **kw)
|
||||||
|
|
||||||
|
|
||||||
|
def chunk(rows, step):
|
||||||
|
it, total = iter(rows), len(rows)
|
||||||
|
for _ in range(0, total, step):
|
||||||
|
yield list(islice(it, step))
|
||||||
|
total -= step
|
||||||
|
|
||||||
|
|
||||||
|
def constrain_single_or_list(constraints, column, value, convert=lambda x: x):
|
||||||
|
if value is not None:
|
||||||
|
if isinstance(value, list):
|
||||||
|
value = [convert(v) for v in value]
|
||||||
|
if len(value) == 1:
|
||||||
|
constraints[column] = value[0]
|
||||||
|
elif len(value) > 1:
|
||||||
|
constraints[f"{column}__in"] = value
|
||||||
|
else:
|
||||||
|
constraints[column] = convert(value)
|
||||||
|
return constraints
|
||||||
|
|
||||||
|
|
||||||
|
def in_account_ids(account_ids: Union[List[str], str]):
|
||||||
|
if isinstance(account_ids, list):
|
||||||
|
if len(account_ids) > 1:
|
||||||
|
return AccountAddress.c.account.in_(account_ids)
|
||||||
|
account_ids = account_ids[0]
|
||||||
|
return AccountAddress.c.account == account_ids
|
||||||
|
|
||||||
|
|
||||||
|
def query(table, s: Select, **constraints) -> Select:
|
||||||
|
limit = constraints.pop('limit', None)
|
||||||
|
if limit is not None:
|
||||||
|
s = s.limit(limit)
|
||||||
|
|
||||||
|
offset = constraints.pop('offset', None)
|
||||||
|
if offset is not None:
|
||||||
|
s = s.offset(offset)
|
||||||
|
|
||||||
|
order_by = constraints.pop('order_by', None)
|
||||||
|
if order_by:
|
||||||
|
if isinstance(order_by, str):
|
||||||
|
s = s.order_by(text(order_by))
|
||||||
|
elif isinstance(order_by, list):
|
||||||
|
s = s.order_by(text(', '.join(order_by)))
|
||||||
|
else:
|
||||||
|
raise ValueError("order_by must be string or list")
|
||||||
|
|
||||||
|
group_by = constraints.pop('group_by', None)
|
||||||
|
if group_by is not None:
|
||||||
|
s = s.group_by(text(group_by))
|
||||||
|
|
||||||
|
account_ids = constraints.pop('account_ids', [])
|
||||||
|
if account_ids:
|
||||||
|
s = s.where(in_account_ids(account_ids))
|
||||||
|
|
||||||
|
if constraints:
|
||||||
|
s = s.where(and_(*constraints_to_clause(table, constraints)))
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def constraints_to_clause(tables, constraints):
|
||||||
|
clause = []
|
||||||
|
for key, constraint in constraints.items():
|
||||||
|
if key.endswith('__not'):
|
||||||
|
col, op = key[:-len('__not')], '__ne__'
|
||||||
|
elif key.endswith('__is_null'):
|
||||||
|
col = key[:-len('__is_null')]
|
||||||
|
op = '__eq__'
|
||||||
|
constraint = None
|
||||||
|
elif key.endswith('__is_not_null'):
|
||||||
|
col = key[:-len('__is_not_null')]
|
||||||
|
op = '__ne__'
|
||||||
|
constraint = None
|
||||||
|
elif key.endswith('__lt'):
|
||||||
|
col, op = key[:-len('__lt')], '__lt__'
|
||||||
|
elif key.endswith('__lte'):
|
||||||
|
col, op = key[:-len('__lte')], '__le__'
|
||||||
|
elif key.endswith('__gt'):
|
||||||
|
col, op = key[:-len('__gt')], '__gt__'
|
||||||
|
elif key.endswith('__gte'):
|
||||||
|
col, op = key[:-len('__gte')], '__ge__'
|
||||||
|
elif key.endswith('__like'):
|
||||||
|
col, op = key[:-len('__like')], 'like'
|
||||||
|
elif key.endswith('__not_like'):
|
||||||
|
col, op = key[:-len('__not_like')], 'notlike'
|
||||||
|
elif key.endswith('__in') or key.endswith('__not_in'):
|
||||||
|
if key.endswith('__in'):
|
||||||
|
col, op, one_val_op = key[:-len('__in')], 'in_', '__eq__'
|
||||||
|
else:
|
||||||
|
col, op, one_val_op = key[:-len('__not_in')], 'notin_', '__ne__'
|
||||||
|
if isinstance(constraint, Select):
|
||||||
|
pass
|
||||||
|
elif constraint:
|
||||||
|
if isinstance(constraint, (list, set, tuple)):
|
||||||
|
if len(constraint) == 1:
|
||||||
|
op = one_val_op
|
||||||
|
constraint = next(iter(constraint))
|
||||||
|
elif isinstance(constraint, str):
|
||||||
|
constraint = text(constraint)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"{col} requires a list, set or string as constraint value.")
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
elif key.endswith('__or'):
|
||||||
|
clause.append(or_(*constraints_to_clause(tables, constraint)))
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
col, op = key, '__eq__'
|
||||||
|
attr = None
|
||||||
|
if '.' in col:
|
||||||
|
table_name, col = col.split('.')
|
||||||
|
_table = None
|
||||||
|
for table in tables:
|
||||||
|
if table.name == table_name.lower():
|
||||||
|
_table = table
|
||||||
|
break
|
||||||
|
if _table is not None:
|
||||||
|
attr = getattr(_table.c, col)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Table '{table_name}' not available: {', '.join([t.name for t in tables])}.")
|
||||||
|
else:
|
||||||
|
for table in tables:
|
||||||
|
attr = getattr(table.c, col, None)
|
||||||
|
if attr is not None:
|
||||||
|
break
|
||||||
|
if attr is None:
|
||||||
|
raise ValueError(f"Attribute '{col}' not found on tables: {', '.join([t.name for t in tables])}.")
|
||||||
|
clause.append(getattr(attr, op)(constraint))
|
||||||
|
return clause
|
|
@ -3,11 +3,15 @@ import typing
|
||||||
import logging
|
import logging
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.dht.node import Node
|
from lbry.dht.node import Node
|
||||||
from lbry.extras.daemon.storage import SQLiteStorage
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class SQLiteStorage:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class BlobAnnouncer:
|
class BlobAnnouncer:
|
||||||
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
|
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
|
||||||
self.loop = loop
|
self.loop = loop
|
||||||
|
|
|
@ -171,7 +171,7 @@ def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDa
|
||||||
def make_compact_ip(address: str) -> bytearray:
|
def make_compact_ip(address: str) -> bytearray:
|
||||||
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
|
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
|
||||||
if len(compact_ip) != 4:
|
if len(compact_ip) != 4:
|
||||||
raise ValueError(f"invalid IPv4 length")
|
raise ValueError("invalid IPv4 length")
|
||||||
return compact_ip
|
return compact_ip
|
||||||
|
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ def make_compact_address(node_id: bytes, address: str, port: int) -> bytearray:
|
||||||
if not 0 < port < 65536:
|
if not 0 < port < 65536:
|
||||||
raise ValueError(f'Invalid port: {port}')
|
raise ValueError(f'Invalid port: {port}')
|
||||||
if len(node_id) != constants.HASH_BITS // 8:
|
if len(node_id) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid node node_id length")
|
raise ValueError("invalid node node_id length")
|
||||||
return compact_ip + port.to_bytes(2, 'big') + node_id
|
return compact_ip + port.to_bytes(2, 'big') + node_id
|
||||||
|
|
||||||
|
|
||||||
|
@ -191,5 +191,5 @@ def decode_compact_address(compact_address: bytes) -> typing.Tuple[bytes, str, i
|
||||||
if not 0 < port < 65536:
|
if not 0 < port < 65536:
|
||||||
raise ValueError(f'Invalid port: {port}')
|
raise ValueError(f'Invalid port: {port}')
|
||||||
if len(node_id) != constants.HASH_BITS // 8:
|
if len(node_id) != constants.HASH_BITS // 8:
|
||||||
raise ValueError(f"invalid node node_id length")
|
raise ValueError("invalid node node_id length")
|
||||||
return node_id, address, port
|
return node_id, address, port
|
||||||
|
|
|
@ -81,3 +81,6 @@ Code | Name | Message
|
||||||
701 | InvalidExchangeRateResponse | Failed to get exchange rate from {source}: {reason}
|
701 | InvalidExchangeRateResponse | Failed to get exchange rate from {source}: {reason}
|
||||||
702 | CurrencyConversion | {message}
|
702 | CurrencyConversion | {message}
|
||||||
703 | InvalidCurrency | Invalid currency: {currency} is not a supported currency.
|
703 | InvalidCurrency | Invalid currency: {currency} is not a supported currency.
|
||||||
|
**8xx** | Lbrycrd | **Lbrycrd**
|
||||||
|
801 | LbrycrdUnauthorized | Failed to authenticate with lbrycrd. Perhaps wrong username or password?
|
||||||
|
811 | LbrycrdEventSubscription | Lbrycrd is not publishing '{event}' events.
|
||||||
|
|
|
@ -398,3 +398,22 @@ class InvalidCurrencyError(CurrencyExchangeError):
|
||||||
def __init__(self, currency):
|
def __init__(self, currency):
|
||||||
self.currency = currency
|
self.currency = currency
|
||||||
super().__init__(f"Invalid currency: {currency} is not a supported currency.")
|
super().__init__(f"Invalid currency: {currency} is not a supported currency.")
|
||||||
|
|
||||||
|
|
||||||
|
class LbrycrdError(BaseError):
|
||||||
|
"""
|
||||||
|
**Lbrycrd**
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class LbrycrdUnauthorizedError(LbrycrdError):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__("Failed to authenticate with lbrycrd. Perhaps wrong username or password?")
|
||||||
|
|
||||||
|
|
||||||
|
class LbrycrdEventSubscriptionError(LbrycrdError):
|
||||||
|
|
||||||
|
def __init__(self, event):
|
||||||
|
self.event = event
|
||||||
|
super().__init__(f"Lbrycrd is not publishing '{event}' events.")
|
||||||
|
|
|
@ -63,7 +63,7 @@ class ErrorClass:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_fields(args):
|
def get_fields(args):
|
||||||
if len(args) > 1:
|
if len(args) > 1:
|
||||||
return f''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
|
return ''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
263
lbry/event.py
Normal file
263
lbry/event.py
Normal file
|
@ -0,0 +1,263 @@
|
||||||
|
import time
|
||||||
|
import asyncio
|
||||||
|
import threading
|
||||||
|
import logging
|
||||||
|
from queue import Empty
|
||||||
|
from multiprocessing import Queue
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BroadcastSubscription:
|
||||||
|
|
||||||
|
def __init__(self, controller: 'EventController', on_data, on_error, on_done):
|
||||||
|
self._controller = controller
|
||||||
|
self._previous = self._next = None
|
||||||
|
self._on_data = on_data
|
||||||
|
self._on_error = on_error
|
||||||
|
self._on_done = on_done
|
||||||
|
self.is_paused = False
|
||||||
|
self.is_canceled = False
|
||||||
|
self.is_closed = False
|
||||||
|
|
||||||
|
def pause(self):
|
||||||
|
self.is_paused = True
|
||||||
|
|
||||||
|
def resume(self):
|
||||||
|
self.is_paused = False
|
||||||
|
|
||||||
|
def cancel(self):
|
||||||
|
self._controller._cancel(self)
|
||||||
|
self.is_canceled = True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def can_fire(self):
|
||||||
|
return not any((self.is_paused, self.is_canceled, self.is_closed))
|
||||||
|
|
||||||
|
def _add(self, data):
|
||||||
|
if self.can_fire and self._on_data is not None:
|
||||||
|
return self._on_data(data)
|
||||||
|
|
||||||
|
def _add_error(self, exception):
|
||||||
|
if self.can_fire and self._on_error is not None:
|
||||||
|
return self._on_error(exception)
|
||||||
|
|
||||||
|
def _close(self):
|
||||||
|
try:
|
||||||
|
if self.can_fire and self._on_done is not None:
|
||||||
|
return self._on_done()
|
||||||
|
finally:
|
||||||
|
self.is_closed = True
|
||||||
|
|
||||||
|
|
||||||
|
class EventController:
|
||||||
|
|
||||||
|
def __init__(self, merge_repeated_events=False):
|
||||||
|
self.stream = EventStream(self)
|
||||||
|
self._first_subscription = None
|
||||||
|
self._last_subscription = None
|
||||||
|
self._last_event = None
|
||||||
|
self._merge_repeated = merge_repeated_events
|
||||||
|
|
||||||
|
@property
|
||||||
|
def has_listener(self):
|
||||||
|
return self._first_subscription is not None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _iterate_subscriptions(self):
|
||||||
|
next_sub = self._first_subscription
|
||||||
|
while next_sub is not None:
|
||||||
|
subscription = next_sub
|
||||||
|
yield subscription
|
||||||
|
next_sub = next_sub._next
|
||||||
|
|
||||||
|
async def _notify(self, notify, *args):
|
||||||
|
try:
|
||||||
|
maybe_coroutine = notify(*args)
|
||||||
|
if maybe_coroutine is not None and asyncio.iscoroutine(maybe_coroutine):
|
||||||
|
await maybe_coroutine
|
||||||
|
except Exception as e:
|
||||||
|
log.exception(e)
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def add(self, event):
|
||||||
|
if self._merge_repeated and event == self._last_event:
|
||||||
|
return
|
||||||
|
self._last_event = event
|
||||||
|
for subscription in self._iterate_subscriptions:
|
||||||
|
await self._notify(subscription._add, event)
|
||||||
|
|
||||||
|
async def add_all(self, events):
|
||||||
|
for event in events:
|
||||||
|
await self.add(event)
|
||||||
|
|
||||||
|
async def add_error(self, exception):
|
||||||
|
for subscription in self._iterate_subscriptions:
|
||||||
|
await self._notify(subscription._add_error, exception)
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
for subscription in self._iterate_subscriptions:
|
||||||
|
await self._notify(subscription._close)
|
||||||
|
|
||||||
|
def _cancel(self, subscription):
|
||||||
|
previous = subscription._previous
|
||||||
|
next_sub = subscription._next
|
||||||
|
if previous is None:
|
||||||
|
self._first_subscription = next_sub
|
||||||
|
else:
|
||||||
|
previous._next = next_sub
|
||||||
|
if next_sub is None:
|
||||||
|
self._last_subscription = previous
|
||||||
|
else:
|
||||||
|
next_sub._previous = previous
|
||||||
|
|
||||||
|
def _listen(self, on_data, on_error, on_done):
|
||||||
|
subscription = BroadcastSubscription(self, on_data, on_error, on_done)
|
||||||
|
old_last = self._last_subscription
|
||||||
|
self._last_subscription = subscription
|
||||||
|
subscription._previous = old_last
|
||||||
|
subscription._next = None
|
||||||
|
if old_last is None:
|
||||||
|
self._first_subscription = subscription
|
||||||
|
else:
|
||||||
|
old_last._next = subscription
|
||||||
|
return subscription
|
||||||
|
|
||||||
|
|
||||||
|
class EventStream:
|
||||||
|
|
||||||
|
def __init__(self, controller: EventController):
|
||||||
|
self._controller = controller
|
||||||
|
|
||||||
|
def listen(self, on_data, on_error=None, on_done=None) -> BroadcastSubscription:
|
||||||
|
return self._controller._listen(on_data, on_error, on_done)
|
||||||
|
|
||||||
|
def where(self, condition) -> asyncio.Future:
|
||||||
|
future = asyncio.get_running_loop().create_future()
|
||||||
|
|
||||||
|
def where_test(value):
|
||||||
|
if condition(value):
|
||||||
|
self._cancel_and_callback(subscription, future, value)
|
||||||
|
|
||||||
|
subscription = self.listen(
|
||||||
|
where_test,
|
||||||
|
lambda exception: self._cancel_and_error(subscription, future, exception)
|
||||||
|
)
|
||||||
|
|
||||||
|
return future
|
||||||
|
|
||||||
|
@property
|
||||||
|
def first(self) -> asyncio.Future:
|
||||||
|
future = asyncio.get_running_loop().create_future()
|
||||||
|
subscription = self.listen(
|
||||||
|
lambda value: not future.done() and self._cancel_and_callback(subscription, future, value),
|
||||||
|
lambda exception: not future.done() and self._cancel_and_error(subscription, future, exception)
|
||||||
|
)
|
||||||
|
return future
|
||||||
|
|
||||||
|
@property
|
||||||
|
def last(self) -> asyncio.Future:
|
||||||
|
future = asyncio.get_running_loop().create_future()
|
||||||
|
value = None
|
||||||
|
|
||||||
|
def update_value(_value):
|
||||||
|
nonlocal value
|
||||||
|
value = _value
|
||||||
|
|
||||||
|
subscription = self.listen(
|
||||||
|
update_value,
|
||||||
|
lambda exception: not future.done() and self._cancel_and_error(subscription, future, exception),
|
||||||
|
lambda: not future.done() and self._cancel_and_callback(subscription, future, value),
|
||||||
|
)
|
||||||
|
|
||||||
|
return future
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _cancel_and_callback(subscription: BroadcastSubscription, future: asyncio.Future, value):
|
||||||
|
subscription.cancel()
|
||||||
|
future.set_result(value)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _cancel_and_error(subscription: BroadcastSubscription, future: asyncio.Future, exception):
|
||||||
|
subscription.cancel()
|
||||||
|
future.set_exception(exception)
|
||||||
|
|
||||||
|
|
||||||
|
class EventRegistry:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.events = {}
|
||||||
|
|
||||||
|
def register(self, name, stream: EventStream):
|
||||||
|
assert name not in self.events
|
||||||
|
self.events[name] = stream
|
||||||
|
|
||||||
|
def get(self, event_name):
|
||||||
|
return self.events.get(event_name)
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.events.clear()
|
||||||
|
|
||||||
|
|
||||||
|
class EventQueuePublisher(threading.Thread):
|
||||||
|
|
||||||
|
STOP = 'STOP'
|
||||||
|
|
||||||
|
def __init__(self, queue: Queue, event_controller: EventController):
|
||||||
|
super().__init__()
|
||||||
|
self.queue = queue
|
||||||
|
self.event_controller = event_controller
|
||||||
|
self.loop = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def message_to_event(message):
|
||||||
|
return message
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
self.loop = asyncio.get_running_loop()
|
||||||
|
super().start()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
queue_get_timeout = 0.2
|
||||||
|
buffer_drain_size = 100
|
||||||
|
buffer_drain_timeout = 0.1
|
||||||
|
|
||||||
|
buffer = []
|
||||||
|
last_drained_ms_ago = time.perf_counter()
|
||||||
|
while True:
|
||||||
|
|
||||||
|
try:
|
||||||
|
msg = self.queue.get(timeout=queue_get_timeout)
|
||||||
|
if msg != self.STOP:
|
||||||
|
buffer.append(msg)
|
||||||
|
except Empty:
|
||||||
|
msg = None
|
||||||
|
|
||||||
|
drain = any((
|
||||||
|
len(buffer) >= buffer_drain_size,
|
||||||
|
(time.perf_counter() - last_drained_ms_ago) >= buffer_drain_timeout,
|
||||||
|
msg == self.STOP
|
||||||
|
))
|
||||||
|
if drain and buffer:
|
||||||
|
asyncio.run_coroutine_threadsafe(
|
||||||
|
self.event_controller.add_all([
|
||||||
|
self.message_to_event(msg) for msg in buffer
|
||||||
|
]), self.loop
|
||||||
|
)
|
||||||
|
buffer.clear()
|
||||||
|
last_drained_ms_ago = time.perf_counter()
|
||||||
|
|
||||||
|
if msg == self.STOP:
|
||||||
|
return
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.queue.put(self.STOP)
|
||||||
|
if self.is_alive():
|
||||||
|
self.join()
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.start()
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
self.stop()
|
|
@ -1,233 +0,0 @@
|
||||||
import asyncio
|
|
||||||
import collections
|
|
||||||
import logging
|
|
||||||
import typing
|
|
||||||
import aiohttp
|
|
||||||
from lbry import utils
|
|
||||||
from lbry.conf import Config
|
|
||||||
from lbry.extras import system_info
|
|
||||||
|
|
||||||
ANALYTICS_ENDPOINT = 'https://api.segment.io/v1'
|
|
||||||
ANALYTICS_TOKEN = 'Ax5LZzR1o3q3Z3WjATASDwR5rKyHH0qOIRIbLmMXn2H='
|
|
||||||
|
|
||||||
# Things We Track
|
|
||||||
SERVER_STARTUP = 'Server Startup'
|
|
||||||
SERVER_STARTUP_SUCCESS = 'Server Startup Success'
|
|
||||||
SERVER_STARTUP_ERROR = 'Server Startup Error'
|
|
||||||
DOWNLOAD_STARTED = 'Download Started'
|
|
||||||
DOWNLOAD_ERRORED = 'Download Errored'
|
|
||||||
DOWNLOAD_FINISHED = 'Download Finished'
|
|
||||||
HEARTBEAT = 'Heartbeat'
|
|
||||||
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
|
|
||||||
NEW_CHANNEL = 'New Channel'
|
|
||||||
CREDITS_SENT = 'Credits Sent'
|
|
||||||
UPNP_SETUP = "UPnP Setup"
|
|
||||||
|
|
||||||
BLOB_BYTES_UPLOADED = 'Blob Bytes Uploaded'
|
|
||||||
|
|
||||||
|
|
||||||
TIME_TO_FIRST_BYTES = "Time To First Bytes"
|
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def _event_properties(installation_id: str, session_id: str,
|
|
||||||
event_properties: typing.Optional[typing.Dict]) -> typing.Dict:
|
|
||||||
properties = {
|
|
||||||
'lbry_id': installation_id,
|
|
||||||
'session_id': session_id,
|
|
||||||
}
|
|
||||||
properties.update(event_properties or {})
|
|
||||||
return properties
|
|
||||||
|
|
||||||
|
|
||||||
def _download_properties(conf: Config, external_ip: str, resolve_duration: float,
|
|
||||||
total_duration: typing.Optional[float], download_id: str, name: str,
|
|
||||||
outpoint: str, active_peer_count: typing.Optional[int],
|
|
||||||
tried_peers_count: typing.Optional[int], connection_failures_count: typing.Optional[int],
|
|
||||||
added_fixed_peers: bool, fixed_peer_delay: float, sd_hash: str,
|
|
||||||
sd_download_duration: typing.Optional[float] = None,
|
|
||||||
head_blob_hash: typing.Optional[str] = None,
|
|
||||||
head_blob_length: typing.Optional[int] = None,
|
|
||||||
head_blob_download_duration: typing.Optional[float] = None,
|
|
||||||
error: typing.Optional[str] = None, error_msg: typing.Optional[str] = None,
|
|
||||||
wallet_server: typing.Optional[str] = None) -> typing.Dict:
|
|
||||||
return {
|
|
||||||
"external_ip": external_ip,
|
|
||||||
"download_id": download_id,
|
|
||||||
"total_duration": round(total_duration, 4),
|
|
||||||
"resolve_duration": None if not resolve_duration else round(resolve_duration, 4),
|
|
||||||
"error": error,
|
|
||||||
"error_message": error_msg,
|
|
||||||
'name': name,
|
|
||||||
"outpoint": outpoint,
|
|
||||||
|
|
||||||
"node_rpc_timeout": conf.node_rpc_timeout,
|
|
||||||
"peer_connect_timeout": conf.peer_connect_timeout,
|
|
||||||
"blob_download_timeout": conf.blob_download_timeout,
|
|
||||||
"use_fixed_peers": len(conf.reflector_servers) > 0,
|
|
||||||
"fixed_peer_delay": fixed_peer_delay,
|
|
||||||
"added_fixed_peers": added_fixed_peers,
|
|
||||||
"active_peer_count": active_peer_count,
|
|
||||||
"tried_peers_count": tried_peers_count,
|
|
||||||
|
|
||||||
"sd_blob_hash": sd_hash,
|
|
||||||
"sd_blob_duration": None if not sd_download_duration else round(sd_download_duration, 4),
|
|
||||||
|
|
||||||
"head_blob_hash": head_blob_hash,
|
|
||||||
"head_blob_length": head_blob_length,
|
|
||||||
"head_blob_duration": None if not head_blob_download_duration else round(head_blob_download_duration, 4),
|
|
||||||
|
|
||||||
"connection_failures_count": connection_failures_count,
|
|
||||||
"wallet_server": wallet_server
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _make_context(platform):
|
|
||||||
# see https://segment.com/docs/spec/common/#context
|
|
||||||
# they say they'll ignore fields outside the spec, but evidently they don't
|
|
||||||
context = {
|
|
||||||
'app': {
|
|
||||||
'version': platform['lbrynet_version'],
|
|
||||||
'build': platform['build'],
|
|
||||||
},
|
|
||||||
# TODO: expand os info to give linux/osx specific info
|
|
||||||
'os': {
|
|
||||||
'name': platform['os_system'],
|
|
||||||
'version': platform['os_release']
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if 'desktop' in platform and 'distro' in platform:
|
|
||||||
context['os']['desktop'] = platform['desktop']
|
|
||||||
context['os']['distro'] = platform['distro']
|
|
||||||
return context
|
|
||||||
|
|
||||||
|
|
||||||
class AnalyticsManager:
|
|
||||||
def __init__(self, conf: Config, installation_id: str, session_id: str):
|
|
||||||
self.conf = conf
|
|
||||||
self.cookies = {}
|
|
||||||
self.url = ANALYTICS_ENDPOINT
|
|
||||||
self._write_key = utils.deobfuscate(ANALYTICS_TOKEN)
|
|
||||||
self._tracked_data = collections.defaultdict(list)
|
|
||||||
self.context = _make_context(system_info.get_platform())
|
|
||||||
self.installation_id = installation_id
|
|
||||||
self.session_id = session_id
|
|
||||||
self.task: typing.Optional[asyncio.Task] = None
|
|
||||||
self.external_ip: typing.Optional[str] = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def enabled(self):
|
|
||||||
return self.conf.share_usage_data
|
|
||||||
|
|
||||||
@property
|
|
||||||
def is_started(self):
|
|
||||||
return self.task is not None
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
if self.task is None:
|
|
||||||
self.task = asyncio.create_task(self.run())
|
|
||||||
|
|
||||||
async def run(self):
|
|
||||||
while True:
|
|
||||||
if self.enabled:
|
|
||||||
self.external_ip = await utils.get_external_ip()
|
|
||||||
await self._send_heartbeat()
|
|
||||||
await asyncio.sleep(1800)
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
if self.task is not None and not self.task.done():
|
|
||||||
self.task.cancel()
|
|
||||||
|
|
||||||
async def _post(self, data: typing.Dict):
|
|
||||||
request_kwargs = {
|
|
||||||
'method': 'POST',
|
|
||||||
'url': self.url + '/track',
|
|
||||||
'headers': {'Connection': 'Close'},
|
|
||||||
'auth': aiohttp.BasicAuth(self._write_key, ''),
|
|
||||||
'json': data,
|
|
||||||
'cookies': self.cookies
|
|
||||||
}
|
|
||||||
try:
|
|
||||||
async with utils.aiohttp_request(**request_kwargs) as response:
|
|
||||||
self.cookies.update(response.cookies)
|
|
||||||
except Exception as e:
|
|
||||||
log.debug('Encountered an exception while POSTing to %s: ', self.url + '/track', exc_info=e)
|
|
||||||
|
|
||||||
async def track(self, event: typing.Dict):
|
|
||||||
"""Send a single tracking event"""
|
|
||||||
if self.enabled:
|
|
||||||
log.debug('Sending track event: %s', event)
|
|
||||||
await self._post(event)
|
|
||||||
|
|
||||||
async def send_upnp_setup_success_fail(self, success, status):
|
|
||||||
await self.track(
|
|
||||||
self._event(UPNP_SETUP, {
|
|
||||||
'success': success,
|
|
||||||
'status': status,
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
async def send_server_startup(self):
|
|
||||||
await self.track(self._event(SERVER_STARTUP))
|
|
||||||
|
|
||||||
async def send_server_startup_success(self):
|
|
||||||
await self.track(self._event(SERVER_STARTUP_SUCCESS))
|
|
||||||
|
|
||||||
async def send_server_startup_error(self, message):
|
|
||||||
await self.track(self._event(SERVER_STARTUP_ERROR, {'message': message}))
|
|
||||||
|
|
||||||
async def send_time_to_first_bytes(self, resolve_duration: typing.Optional[float],
|
|
||||||
total_duration: typing.Optional[float], download_id: str,
|
|
||||||
name: str, outpoint: typing.Optional[str],
|
|
||||||
found_peers_count: typing.Optional[int],
|
|
||||||
tried_peers_count: typing.Optional[int],
|
|
||||||
connection_failures_count: typing.Optional[int],
|
|
||||||
added_fixed_peers: bool,
|
|
||||||
fixed_peers_delay: float, sd_hash: str,
|
|
||||||
sd_download_duration: typing.Optional[float] = None,
|
|
||||||
head_blob_hash: typing.Optional[str] = None,
|
|
||||||
head_blob_length: typing.Optional[int] = None,
|
|
||||||
head_blob_duration: typing.Optional[int] = None,
|
|
||||||
error: typing.Optional[str] = None,
|
|
||||||
error_msg: typing.Optional[str] = None,
|
|
||||||
wallet_server: typing.Optional[str] = None):
|
|
||||||
await self.track(self._event(TIME_TO_FIRST_BYTES, _download_properties(
|
|
||||||
self.conf, self.external_ip, resolve_duration, total_duration, download_id, name, outpoint,
|
|
||||||
found_peers_count, tried_peers_count, connection_failures_count, added_fixed_peers, fixed_peers_delay,
|
|
||||||
sd_hash, sd_download_duration, head_blob_hash, head_blob_length, head_blob_duration, error, error_msg,
|
|
||||||
wallet_server
|
|
||||||
)))
|
|
||||||
|
|
||||||
async def send_download_finished(self, download_id, name, sd_hash):
|
|
||||||
await self.track(
|
|
||||||
self._event(
|
|
||||||
DOWNLOAD_FINISHED, {
|
|
||||||
'download_id': download_id,
|
|
||||||
'name': name,
|
|
||||||
'stream_info': sd_hash
|
|
||||||
}
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
async def send_claim_action(self, action):
|
|
||||||
await self.track(self._event(CLAIM_ACTION, {'action': action}))
|
|
||||||
|
|
||||||
async def send_new_channel(self):
|
|
||||||
await self.track(self._event(NEW_CHANNEL))
|
|
||||||
|
|
||||||
async def send_credits_sent(self):
|
|
||||||
await self.track(self._event(CREDITS_SENT))
|
|
||||||
|
|
||||||
async def _send_heartbeat(self):
|
|
||||||
await self.track(self._event(HEARTBEAT))
|
|
||||||
|
|
||||||
def _event(self, event, properties: typing.Optional[typing.Dict] = None):
|
|
||||||
return {
|
|
||||||
'userId': 'lbry',
|
|
||||||
'event': event,
|
|
||||||
'properties': _event_properties(self.installation_id, self.session_id, properties),
|
|
||||||
'context': self.context,
|
|
||||||
'timestamp': utils.isonow()
|
|
||||||
}
|
|
|
@ -1,6 +0,0 @@
|
||||||
from lbry.conf import Config
|
|
||||||
from lbry.extras.cli import execute_command
|
|
||||||
|
|
||||||
|
|
||||||
def daemon_rpc(conf: Config, method: str, **kwargs):
|
|
||||||
return execute_command(conf, method, kwargs, callback=lambda data: data)
|
|
|
@ -1,66 +0,0 @@
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
import hashlib
|
|
||||||
import binascii
|
|
||||||
|
|
||||||
import ecdsa
|
|
||||||
from lbry import utils
|
|
||||||
from lbry.crypto.hash import sha256
|
|
||||||
from lbry.wallet.transaction import Output
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_encoded_signature(signature):
|
|
||||||
signature = signature.encode() if isinstance(signature, str) else signature
|
|
||||||
r = int(signature[:int(len(signature) / 2)], 16)
|
|
||||||
s = int(signature[int(len(signature) / 2):], 16)
|
|
||||||
return ecdsa.util.sigencode_der(r, s, len(signature) * 4)
|
|
||||||
|
|
||||||
|
|
||||||
def cid2hash(claim_id: str) -> bytes:
|
|
||||||
return binascii.unhexlify(claim_id.encode())[::-1]
|
|
||||||
|
|
||||||
|
|
||||||
def is_comment_signed_by_channel(comment: dict, channel: Output, abandon=False):
|
|
||||||
if isinstance(channel, Output):
|
|
||||||
try:
|
|
||||||
signing_field = comment['comment_id'] if abandon else comment['comment']
|
|
||||||
pieces = [
|
|
||||||
comment['signing_ts'].encode(),
|
|
||||||
cid2hash(comment['channel_id']),
|
|
||||||
signing_field.encode()
|
|
||||||
]
|
|
||||||
return Output.is_signature_valid(
|
|
||||||
get_encoded_signature(comment['signature']),
|
|
||||||
sha256(b''.join(pieces)),
|
|
||||||
channel.claim.channel.public_key_bytes
|
|
||||||
)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def sign_comment(comment: dict, channel: Output, abandon=False):
|
|
||||||
timestamp = str(int(time.time()))
|
|
||||||
signing_field = comment['comment_id'] if abandon else comment['comment']
|
|
||||||
pieces = [timestamp.encode(), channel.claim_hash, signing_field.encode()]
|
|
||||||
digest = sha256(b''.join(pieces))
|
|
||||||
signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
|
|
||||||
comment.update({
|
|
||||||
'signature': binascii.hexlify(signature).decode(),
|
|
||||||
'signing_ts': timestamp
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
async def jsonrpc_post(url: str, method: str, params: dict = None, **kwargs) -> any:
|
|
||||||
params = params or {}
|
|
||||||
params.update(kwargs)
|
|
||||||
json_body = {'jsonrpc': '2.0', 'id': None, 'method': method, 'params': params}
|
|
||||||
async with utils.aiohttp_request('POST', url, json=json_body) as response:
|
|
||||||
try:
|
|
||||||
result = await response.json()
|
|
||||||
return result['result'] if 'result' in result else result
|
|
||||||
except Exception as cte:
|
|
||||||
log.exception('Unable to decode response from server: %s', cte)
|
|
||||||
return await response.text()
|
|
|
@ -1,75 +0,0 @@
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
from lbry.conf import Config
|
|
||||||
from lbry.extras.daemon.componentmanager import ComponentManager
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class ComponentType(type):
|
|
||||||
def __new__(mcs, name, bases, newattrs):
|
|
||||||
klass = type.__new__(mcs, name, bases, newattrs)
|
|
||||||
if name != "Component" and newattrs['__module__'] != 'lbry.testcase':
|
|
||||||
ComponentManager.default_component_classes[klass.component_name] = klass
|
|
||||||
return klass
|
|
||||||
|
|
||||||
|
|
||||||
class Component(metaclass=ComponentType):
|
|
||||||
"""
|
|
||||||
lbry-daemon component helper
|
|
||||||
|
|
||||||
Inheriting classes will be automatically registered with the ComponentManager and must implement setup and stop
|
|
||||||
methods
|
|
||||||
"""
|
|
||||||
|
|
||||||
depends_on = []
|
|
||||||
component_name = None
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
self.conf: Config = component_manager.conf
|
|
||||||
self.component_manager = component_manager
|
|
||||||
self._running = False
|
|
||||||
|
|
||||||
def __lt__(self, other):
|
|
||||||
return self.component_name < other.component_name
|
|
||||||
|
|
||||||
@property
|
|
||||||
def running(self):
|
|
||||||
return self._running
|
|
||||||
|
|
||||||
async def get_status(self):
|
|
||||||
return
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
async def _setup(self):
|
|
||||||
try:
|
|
||||||
result = await self.start()
|
|
||||||
self._running = True
|
|
||||||
return result
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
log.info("Cancelled setup of %s component", self.__class__.__name__)
|
|
||||||
raise
|
|
||||||
except Exception as err:
|
|
||||||
log.exception("Error setting up %s", self.component_name or self.__class__.__name__)
|
|
||||||
raise err
|
|
||||||
|
|
||||||
async def _stop(self):
|
|
||||||
try:
|
|
||||||
result = await self.stop()
|
|
||||||
self._running = False
|
|
||||||
return result
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
log.info("Cancelled stop of %s component", self.__class__.__name__)
|
|
||||||
raise
|
|
||||||
except Exception as err:
|
|
||||||
log.exception("Error stopping %s", self.__class__.__name__)
|
|
||||||
raise err
|
|
|
@ -1,171 +0,0 @@
|
||||||
import logging
|
|
||||||
import asyncio
|
|
||||||
from lbry.conf import Config
|
|
||||||
from lbry.error import ComponentStartConditionNotMetError
|
|
||||||
from lbry.dht.peer import PeerManager
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class RegisteredConditions:
|
|
||||||
conditions = {}
|
|
||||||
|
|
||||||
|
|
||||||
class RequiredConditionType(type):
|
|
||||||
def __new__(mcs, name, bases, newattrs):
|
|
||||||
klass = type.__new__(mcs, name, bases, newattrs)
|
|
||||||
if name != "RequiredCondition":
|
|
||||||
if klass.name in RegisteredConditions.conditions:
|
|
||||||
raise SyntaxError("already have a component registered for \"%s\"" % klass.name)
|
|
||||||
RegisteredConditions.conditions[klass.name] = klass
|
|
||||||
return klass
|
|
||||||
|
|
||||||
|
|
||||||
class RequiredCondition(metaclass=RequiredConditionType):
|
|
||||||
name = ""
|
|
||||||
component = ""
|
|
||||||
message = ""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def evaluate(component):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
class ComponentManager:
|
|
||||||
default_component_classes = {}
|
|
||||||
|
|
||||||
def __init__(self, conf: Config, analytics_manager=None, skip_components=None,
|
|
||||||
peer_manager=None, **override_components):
|
|
||||||
self.conf = conf
|
|
||||||
self.skip_components = skip_components or []
|
|
||||||
self.loop = asyncio.get_event_loop()
|
|
||||||
self.analytics_manager = analytics_manager
|
|
||||||
self.component_classes = {}
|
|
||||||
self.components = set()
|
|
||||||
self.started = asyncio.Event(loop=self.loop)
|
|
||||||
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
|
|
||||||
|
|
||||||
for component_name, component_class in self.default_component_classes.items():
|
|
||||||
if component_name in override_components:
|
|
||||||
component_class = override_components.pop(component_name)
|
|
||||||
if component_name not in self.skip_components:
|
|
||||||
self.component_classes[component_name] = component_class
|
|
||||||
|
|
||||||
if override_components:
|
|
||||||
raise SyntaxError("unexpected components: %s" % override_components)
|
|
||||||
|
|
||||||
for component_class in self.component_classes.values():
|
|
||||||
self.components.add(component_class(self))
|
|
||||||
|
|
||||||
def evaluate_condition(self, condition_name):
|
|
||||||
if condition_name not in RegisteredConditions.conditions:
|
|
||||||
raise NameError(condition_name)
|
|
||||||
condition = RegisteredConditions.conditions[condition_name]
|
|
||||||
try:
|
|
||||||
component = self.get_component(condition.component)
|
|
||||||
result = condition.evaluate(component)
|
|
||||||
except Exception:
|
|
||||||
log.exception('failed to evaluate condition:')
|
|
||||||
result = False
|
|
||||||
return result, "" if result else condition.message
|
|
||||||
|
|
||||||
def sort_components(self, reverse=False):
|
|
||||||
"""
|
|
||||||
Sort components by requirements
|
|
||||||
"""
|
|
||||||
steps = []
|
|
||||||
staged = set()
|
|
||||||
components = set(self.components)
|
|
||||||
|
|
||||||
# components with no requirements
|
|
||||||
step = []
|
|
||||||
for component in set(components):
|
|
||||||
if not component.depends_on:
|
|
||||||
step.append(component)
|
|
||||||
staged.add(component.component_name)
|
|
||||||
components.remove(component)
|
|
||||||
|
|
||||||
if step:
|
|
||||||
step.sort()
|
|
||||||
steps.append(step)
|
|
||||||
|
|
||||||
while components:
|
|
||||||
step = []
|
|
||||||
to_stage = set()
|
|
||||||
for component in set(components):
|
|
||||||
reqs_met = 0
|
|
||||||
for needed in component.depends_on:
|
|
||||||
if needed in staged:
|
|
||||||
reqs_met += 1
|
|
||||||
if reqs_met == len(component.depends_on):
|
|
||||||
step.append(component)
|
|
||||||
to_stage.add(component.component_name)
|
|
||||||
components.remove(component)
|
|
||||||
if step:
|
|
||||||
step.sort()
|
|
||||||
staged.update(to_stage)
|
|
||||||
steps.append(step)
|
|
||||||
elif components:
|
|
||||||
raise ComponentStartConditionNotMetError(components)
|
|
||||||
if reverse:
|
|
||||||
steps.reverse()
|
|
||||||
return steps
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
""" Start Components in sequence sorted by requirements """
|
|
||||||
for stage in self.sort_components():
|
|
||||||
needing_start = [
|
|
||||||
component._setup() for component in stage if not component.running
|
|
||||||
]
|
|
||||||
if needing_start:
|
|
||||||
await asyncio.wait(needing_start)
|
|
||||||
self.started.set()
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
"""
|
|
||||||
Stop Components in reversed startup order
|
|
||||||
"""
|
|
||||||
stages = self.sort_components(reverse=True)
|
|
||||||
for stage in stages:
|
|
||||||
needing_stop = [
|
|
||||||
component._stop() for component in stage if component.running
|
|
||||||
]
|
|
||||||
if needing_stop:
|
|
||||||
await asyncio.wait(needing_stop)
|
|
||||||
|
|
||||||
def all_components_running(self, *component_names):
|
|
||||||
"""
|
|
||||||
Check if components are running
|
|
||||||
|
|
||||||
:return: (bool) True if all specified components are running
|
|
||||||
"""
|
|
||||||
components = {component.component_name: component for component in self.components}
|
|
||||||
for component in component_names:
|
|
||||||
if component not in components:
|
|
||||||
raise NameError("%s is not a known Component" % component)
|
|
||||||
if not components[component].running:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def get_components_status(self):
|
|
||||||
"""
|
|
||||||
List status of all the components, whether they are running or not
|
|
||||||
|
|
||||||
:return: (dict) {(str) component_name: (bool) True is running else False}
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
component.component_name: component.running
|
|
||||||
for component in self.components
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_actual_component(self, component_name):
|
|
||||||
for component in self.components:
|
|
||||||
if component.component_name == component_name:
|
|
||||||
return component
|
|
||||||
raise NameError(component_name)
|
|
||||||
|
|
||||||
def get_component(self, component_name):
|
|
||||||
return self.get_actual_component(component_name).component
|
|
||||||
|
|
||||||
def has_component(self, component_name):
|
|
||||||
return any(component for component in self.components if component_name == component.component_name)
|
|
|
@ -1,553 +0,0 @@
|
||||||
import math
|
|
||||||
import os
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import binascii
|
|
||||||
import typing
|
|
||||||
import base58
|
|
||||||
|
|
||||||
from aioupnp import __version__ as aioupnp_version
|
|
||||||
from aioupnp.upnp import UPnP
|
|
||||||
from aioupnp.fault import UPnPError
|
|
||||||
|
|
||||||
from lbry import utils
|
|
||||||
from lbry.dht.node import Node
|
|
||||||
from lbry.dht.peer import is_valid_public_ipv4
|
|
||||||
from lbry.dht.blob_announcer import BlobAnnouncer
|
|
||||||
from lbry.blob.blob_manager import BlobManager
|
|
||||||
from lbry.blob_exchange.server import BlobServer
|
|
||||||
from lbry.stream.stream_manager import StreamManager
|
|
||||||
from lbry.extras.daemon.component import Component
|
|
||||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
|
||||||
from lbry.extras.daemon.storage import SQLiteStorage
|
|
||||||
from lbry.wallet import WalletManager
|
|
||||||
from lbry.wallet.usage_payment import WalletServerPayer
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# settings must be initialized before this file is imported
|
|
||||||
|
|
||||||
DATABASE_COMPONENT = "database"
|
|
||||||
BLOB_COMPONENT = "blob_manager"
|
|
||||||
WALLET_COMPONENT = "wallet"
|
|
||||||
WALLET_SERVER_PAYMENTS_COMPONENT = "wallet_server_payments"
|
|
||||||
DHT_COMPONENT = "dht"
|
|
||||||
HASH_ANNOUNCER_COMPONENT = "hash_announcer"
|
|
||||||
STREAM_MANAGER_COMPONENT = "stream_manager"
|
|
||||||
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
|
|
||||||
UPNP_COMPONENT = "upnp"
|
|
||||||
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
|
|
||||||
|
|
||||||
|
|
||||||
class DatabaseComponent(Component):
|
|
||||||
component_name = DATABASE_COMPONENT
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
super().__init__(component_manager)
|
|
||||||
self.storage = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self):
|
|
||||||
return self.storage
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_current_db_revision():
|
|
||||||
return 14
|
|
||||||
|
|
||||||
@property
|
|
||||||
def revision_filename(self):
|
|
||||||
return os.path.join(self.conf.data_dir, 'db_revision')
|
|
||||||
|
|
||||||
def _write_db_revision_file(self, version_num):
|
|
||||||
with open(self.revision_filename, mode='w') as db_revision:
|
|
||||||
db_revision.write(str(version_num))
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
# check directories exist, create them if they don't
|
|
||||||
log.info("Loading databases")
|
|
||||||
|
|
||||||
if not os.path.exists(self.revision_filename):
|
|
||||||
log.info("db_revision file not found. Creating it")
|
|
||||||
self._write_db_revision_file(self.get_current_db_revision())
|
|
||||||
|
|
||||||
# check the db migration and run any needed migrations
|
|
||||||
with open(self.revision_filename, "r") as revision_read_handle:
|
|
||||||
old_revision = int(revision_read_handle.read().strip())
|
|
||||||
|
|
||||||
if old_revision > self.get_current_db_revision():
|
|
||||||
raise Exception('This version of lbrynet is not compatible with the database\n'
|
|
||||||
'Your database is revision %i, expected %i' %
|
|
||||||
(old_revision, self.get_current_db_revision()))
|
|
||||||
if old_revision < self.get_current_db_revision():
|
|
||||||
from lbry.extras.daemon.migrator import dbmigrator # pylint: disable=import-outside-toplevel
|
|
||||||
log.info("Upgrading your databases (revision %i to %i)", old_revision, self.get_current_db_revision())
|
|
||||||
await asyncio.get_event_loop().run_in_executor(
|
|
||||||
None, dbmigrator.migrate_db, self.conf, old_revision, self.get_current_db_revision()
|
|
||||||
)
|
|
||||||
self._write_db_revision_file(self.get_current_db_revision())
|
|
||||||
log.info("Finished upgrading the databases.")
|
|
||||||
|
|
||||||
self.storage = SQLiteStorage(
|
|
||||||
self.conf, os.path.join(self.conf.data_dir, "lbrynet.sqlite")
|
|
||||||
)
|
|
||||||
await self.storage.open()
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
await self.storage.close()
|
|
||||||
self.storage = None
|
|
||||||
|
|
||||||
|
|
||||||
class WalletComponent(Component):
|
|
||||||
component_name = WALLET_COMPONENT
|
|
||||||
depends_on = [DATABASE_COMPONENT]
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
super().__init__(component_manager)
|
|
||||||
self.wallet_manager = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self):
|
|
||||||
return self.wallet_manager
|
|
||||||
|
|
||||||
async def get_status(self):
|
|
||||||
if self.wallet_manager is None:
|
|
||||||
return
|
|
||||||
session_pool = self.wallet_manager.ledger.network.session_pool
|
|
||||||
sessions = session_pool.sessions
|
|
||||||
connected = None
|
|
||||||
if self.wallet_manager.ledger.network.client:
|
|
||||||
addr_and_port = self.wallet_manager.ledger.network.client.server_address_and_port
|
|
||||||
if addr_and_port:
|
|
||||||
connected = f"{addr_and_port[0]}:{addr_and_port[1]}"
|
|
||||||
result = {
|
|
||||||
'connected': connected,
|
|
||||||
'connected_features': self.wallet_manager.ledger.network.server_features,
|
|
||||||
'servers': [
|
|
||||||
{
|
|
||||||
'host': session.server[0],
|
|
||||||
'port': session.server[1],
|
|
||||||
'latency': session.connection_latency,
|
|
||||||
'availability': session.available,
|
|
||||||
} for session in sessions
|
|
||||||
],
|
|
||||||
'known_servers': len(sessions),
|
|
||||||
'available_servers': len(list(session_pool.available_sessions))
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.wallet_manager.ledger.network.remote_height:
|
|
||||||
local_height = self.wallet_manager.ledger.local_height_including_downloaded_height
|
|
||||||
disk_height = len(self.wallet_manager.ledger.headers)
|
|
||||||
remote_height = self.wallet_manager.ledger.network.remote_height
|
|
||||||
download_height, target_height = local_height - disk_height, remote_height - disk_height
|
|
||||||
if target_height > 0:
|
|
||||||
progress = min(max(math.ceil(float(download_height) / float(target_height) * 100), 0), 100)
|
|
||||||
else:
|
|
||||||
progress = 100
|
|
||||||
best_hash = await self.wallet_manager.get_best_blockhash()
|
|
||||||
result.update({
|
|
||||||
'headers_synchronization_progress': progress,
|
|
||||||
'blocks': max(local_height, 0),
|
|
||||||
'blocks_behind': max(remote_height - local_height, 0),
|
|
||||||
'best_blockhash': best_hash,
|
|
||||||
})
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
log.info("Starting wallet")
|
|
||||||
self.wallet_manager = await WalletManager.from_lbrynet_config(self.conf)
|
|
||||||
await self.wallet_manager.start()
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
await self.wallet_manager.stop()
|
|
||||||
self.wallet_manager = None
|
|
||||||
|
|
||||||
|
|
||||||
class WalletServerPaymentsComponent(Component):
|
|
||||||
component_name = WALLET_SERVER_PAYMENTS_COMPONENT
|
|
||||||
depends_on = [WALLET_COMPONENT]
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
super().__init__(component_manager)
|
|
||||||
self.usage_payment_service = WalletServerPayer(
|
|
||||||
max_fee=self.conf.max_wallet_server_fee, analytics_manager=self.component_manager.analytics_manager,
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self) -> typing.Optional[WalletServerPayer]:
|
|
||||||
return self.usage_payment_service
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
wallet_manager = self.component_manager.get_component(WALLET_COMPONENT)
|
|
||||||
await self.usage_payment_service.start(wallet_manager.ledger, wallet_manager.default_wallet)
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
await self.usage_payment_service.stop()
|
|
||||||
|
|
||||||
async def get_status(self):
|
|
||||||
return {
|
|
||||||
'max_fee': self.usage_payment_service.max_fee,
|
|
||||||
'running': self.usage_payment_service.running
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class BlobComponent(Component):
|
|
||||||
component_name = BLOB_COMPONENT
|
|
||||||
depends_on = [DATABASE_COMPONENT]
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
super().__init__(component_manager)
|
|
||||||
self.blob_manager: typing.Optional[BlobManager] = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self) -> typing.Optional[BlobManager]:
|
|
||||||
return self.blob_manager
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
|
||||||
data_store = None
|
|
||||||
if DHT_COMPONENT not in self.component_manager.skip_components:
|
|
||||||
dht_node: Node = self.component_manager.get_component(DHT_COMPONENT)
|
|
||||||
if dht_node:
|
|
||||||
data_store = dht_node.protocol.data_store
|
|
||||||
blob_dir = os.path.join(self.conf.data_dir, 'blobfiles')
|
|
||||||
if not os.path.isdir(blob_dir):
|
|
||||||
os.mkdir(blob_dir)
|
|
||||||
self.blob_manager = BlobManager(self.component_manager.loop, blob_dir, storage, self.conf, data_store)
|
|
||||||
return await self.blob_manager.setup()
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
self.blob_manager.stop()
|
|
||||||
|
|
||||||
async def get_status(self):
|
|
||||||
count = 0
|
|
||||||
if self.blob_manager:
|
|
||||||
count = len(self.blob_manager.completed_blob_hashes)
|
|
||||||
return {
|
|
||||||
'finished_blobs': count,
|
|
||||||
'connections': {} if not self.blob_manager else self.blob_manager.connection_manager.status
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class DHTComponent(Component):
|
|
||||||
component_name = DHT_COMPONENT
|
|
||||||
depends_on = [UPNP_COMPONENT, DATABASE_COMPONENT]
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
super().__init__(component_manager)
|
|
||||||
self.dht_node: typing.Optional[Node] = None
|
|
||||||
self.external_udp_port = None
|
|
||||||
self.external_peer_port = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self) -> typing.Optional[Node]:
|
|
||||||
return self.dht_node
|
|
||||||
|
|
||||||
async def get_status(self):
|
|
||||||
return {
|
|
||||||
'node_id': None if not self.dht_node else binascii.hexlify(self.dht_node.protocol.node_id),
|
|
||||||
'peers_in_routing_table': 0 if not self.dht_node else len(self.dht_node.protocol.routing_table.get_peers())
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_node_id(self):
|
|
||||||
node_id_filename = os.path.join(self.conf.data_dir, "node_id")
|
|
||||||
if os.path.isfile(node_id_filename):
|
|
||||||
with open(node_id_filename, "r") as node_id_file:
|
|
||||||
return base58.b58decode(str(node_id_file.read()).strip())
|
|
||||||
node_id = utils.generate_id()
|
|
||||||
with open(node_id_filename, "w") as node_id_file:
|
|
||||||
node_id_file.write(base58.b58encode(node_id).decode())
|
|
||||||
return node_id
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
log.info("start the dht")
|
|
||||||
upnp_component = self.component_manager.get_component(UPNP_COMPONENT)
|
|
||||||
self.external_peer_port = upnp_component.upnp_redirects.get("TCP", self.conf.tcp_port)
|
|
||||||
self.external_udp_port = upnp_component.upnp_redirects.get("UDP", self.conf.udp_port)
|
|
||||||
external_ip = upnp_component.external_ip
|
|
||||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
|
||||||
if not external_ip:
|
|
||||||
external_ip = await utils.get_external_ip()
|
|
||||||
if not external_ip:
|
|
||||||
log.warning("failed to get external ip")
|
|
||||||
|
|
||||||
self.dht_node = Node(
|
|
||||||
self.component_manager.loop,
|
|
||||||
self.component_manager.peer_manager,
|
|
||||||
node_id=self.get_node_id(),
|
|
||||||
internal_udp_port=self.conf.udp_port,
|
|
||||||
udp_port=self.external_udp_port,
|
|
||||||
external_ip=external_ip,
|
|
||||||
peer_port=self.external_peer_port,
|
|
||||||
rpc_timeout=self.conf.node_rpc_timeout,
|
|
||||||
split_buckets_under_index=self.conf.split_buckets_under_index,
|
|
||||||
storage=storage
|
|
||||||
)
|
|
||||||
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
|
|
||||||
log.info("Started the dht")
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
self.dht_node.stop()
|
|
||||||
|
|
||||||
|
|
||||||
class HashAnnouncerComponent(Component):
|
|
||||||
component_name = HASH_ANNOUNCER_COMPONENT
|
|
||||||
depends_on = [DHT_COMPONENT, DATABASE_COMPONENT]
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
super().__init__(component_manager)
|
|
||||||
self.hash_announcer: typing.Optional[BlobAnnouncer] = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self) -> typing.Optional[BlobAnnouncer]:
|
|
||||||
return self.hash_announcer
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
|
||||||
dht_node = self.component_manager.get_component(DHT_COMPONENT)
|
|
||||||
self.hash_announcer = BlobAnnouncer(self.component_manager.loop, dht_node, storage)
|
|
||||||
self.hash_announcer.start(self.conf.concurrent_blob_announcers)
|
|
||||||
log.info("Started blob announcer")
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
self.hash_announcer.stop()
|
|
||||||
log.info("Stopped blob announcer")
|
|
||||||
|
|
||||||
async def get_status(self):
|
|
||||||
return {
|
|
||||||
'announce_queue_size': 0 if not self.hash_announcer else len(self.hash_announcer.announce_queue)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class StreamManagerComponent(Component):
|
|
||||||
component_name = STREAM_MANAGER_COMPONENT
|
|
||||||
depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT]
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
super().__init__(component_manager)
|
|
||||||
self.stream_manager: typing.Optional[StreamManager] = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self) -> typing.Optional[StreamManager]:
|
|
||||||
return self.stream_manager
|
|
||||||
|
|
||||||
async def get_status(self):
|
|
||||||
if not self.stream_manager:
|
|
||||||
return
|
|
||||||
return {
|
|
||||||
'managed_files': len(self.stream_manager.streams),
|
|
||||||
}
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
|
|
||||||
storage = self.component_manager.get_component(DATABASE_COMPONENT)
|
|
||||||
wallet = self.component_manager.get_component(WALLET_COMPONENT)
|
|
||||||
node = self.component_manager.get_component(DHT_COMPONENT) \
|
|
||||||
if self.component_manager.has_component(DHT_COMPONENT) else None
|
|
||||||
log.info('Starting the file manager')
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
self.stream_manager = StreamManager(
|
|
||||||
loop, self.conf, blob_manager, wallet, storage, node, self.component_manager.analytics_manager
|
|
||||||
)
|
|
||||||
await self.stream_manager.start()
|
|
||||||
log.info('Done setting up file manager')
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
self.stream_manager.stop()
|
|
||||||
|
|
||||||
|
|
||||||
class PeerProtocolServerComponent(Component):
|
|
||||||
component_name = PEER_PROTOCOL_SERVER_COMPONENT
|
|
||||||
depends_on = [UPNP_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT]
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
super().__init__(component_manager)
|
|
||||||
self.blob_server: typing.Optional[BlobServer] = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self) -> typing.Optional[BlobServer]:
|
|
||||||
return self.blob_server
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
log.info("start blob server")
|
|
||||||
blob_manager: BlobManager = self.component_manager.get_component(BLOB_COMPONENT)
|
|
||||||
wallet: WalletManager = self.component_manager.get_component(WALLET_COMPONENT)
|
|
||||||
peer_port = self.conf.tcp_port
|
|
||||||
address = await wallet.get_unused_address()
|
|
||||||
self.blob_server = BlobServer(asyncio.get_event_loop(), blob_manager, address)
|
|
||||||
self.blob_server.start_server(peer_port, interface=self.conf.network_interface)
|
|
||||||
await self.blob_server.started_listening.wait()
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
if self.blob_server:
|
|
||||||
self.blob_server.stop_server()
|
|
||||||
|
|
||||||
|
|
||||||
class UPnPComponent(Component):
|
|
||||||
component_name = UPNP_COMPONENT
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
super().__init__(component_manager)
|
|
||||||
self._int_peer_port = self.conf.tcp_port
|
|
||||||
self._int_dht_node_port = self.conf.udp_port
|
|
||||||
self.use_upnp = self.conf.use_upnp
|
|
||||||
self.upnp: typing.Optional[UPnP] = None
|
|
||||||
self.upnp_redirects = {}
|
|
||||||
self.external_ip: typing.Optional[str] = None
|
|
||||||
self._maintain_redirects_task = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self) -> 'UPnPComponent':
|
|
||||||
return self
|
|
||||||
|
|
||||||
async def _repeatedly_maintain_redirects(self, now=True):
|
|
||||||
while True:
|
|
||||||
if now:
|
|
||||||
await self._maintain_redirects()
|
|
||||||
await asyncio.sleep(360, loop=self.component_manager.loop)
|
|
||||||
|
|
||||||
async def _maintain_redirects(self):
|
|
||||||
# setup the gateway if necessary
|
|
||||||
if not self.upnp:
|
|
||||||
try:
|
|
||||||
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
|
|
||||||
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
|
||||||
except Exception as err:
|
|
||||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
|
||||||
raise
|
|
||||||
log.warning("upnp discovery failed: %s", err)
|
|
||||||
self.upnp = None
|
|
||||||
|
|
||||||
# update the external ip
|
|
||||||
external_ip = None
|
|
||||||
if self.upnp:
|
|
||||||
try:
|
|
||||||
external_ip = await self.upnp.get_external_ip()
|
|
||||||
if external_ip != "0.0.0.0" and not self.external_ip:
|
|
||||||
log.info("got external ip from UPnP: %s", external_ip)
|
|
||||||
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
|
||||||
pass
|
|
||||||
if external_ip and not is_valid_public_ipv4(external_ip):
|
|
||||||
log.warning("UPnP returned a private/reserved ip - %s, checking lbry.com fallback", external_ip)
|
|
||||||
external_ip = await utils.get_external_ip()
|
|
||||||
if self.external_ip and self.external_ip != external_ip:
|
|
||||||
log.info("external ip changed from %s to %s", self.external_ip, external_ip)
|
|
||||||
if external_ip:
|
|
||||||
self.external_ip = external_ip
|
|
||||||
# assert self.external_ip is not None # TODO: handle going/starting offline
|
|
||||||
|
|
||||||
if not self.upnp_redirects and self.upnp: # setup missing redirects
|
|
||||||
log.info("add UPnP port mappings")
|
|
||||||
upnp_redirects = {}
|
|
||||||
if PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
|
||||||
try:
|
|
||||||
upnp_redirects["TCP"] = await self.upnp.get_next_mapping(
|
|
||||||
self._int_peer_port, "TCP", "LBRY peer port", self._int_peer_port
|
|
||||||
)
|
|
||||||
except (UPnPError, asyncio.TimeoutError, NotImplementedError):
|
|
||||||
pass
|
|
||||||
if DHT_COMPONENT not in self.component_manager.skip_components:
|
|
||||||
try:
|
|
||||||
upnp_redirects["UDP"] = await self.upnp.get_next_mapping(
|
|
||||||
self._int_dht_node_port, "UDP", "LBRY DHT port", self._int_dht_node_port
|
|
||||||
)
|
|
||||||
except (UPnPError, asyncio.TimeoutError, NotImplementedError):
|
|
||||||
pass
|
|
||||||
if upnp_redirects:
|
|
||||||
log.info("set up redirects: %s", upnp_redirects)
|
|
||||||
self.upnp_redirects.update(upnp_redirects)
|
|
||||||
elif self.upnp: # check existing redirects are still active
|
|
||||||
found = set()
|
|
||||||
mappings = await self.upnp.get_redirects()
|
|
||||||
for mapping in mappings:
|
|
||||||
proto = mapping.protocol
|
|
||||||
if proto in self.upnp_redirects and mapping.external_port == self.upnp_redirects[proto]:
|
|
||||||
if mapping.lan_address == self.upnp.lan_address:
|
|
||||||
found.add(proto)
|
|
||||||
if 'UDP' not in found and DHT_COMPONENT not in self.component_manager.skip_components:
|
|
||||||
try:
|
|
||||||
udp_port = await self.upnp.get_next_mapping(self._int_dht_node_port, "UDP", "LBRY DHT port")
|
|
||||||
self.upnp_redirects['UDP'] = udp_port
|
|
||||||
log.info("refreshed upnp redirect for dht port: %i", udp_port)
|
|
||||||
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
|
||||||
del self.upnp_redirects['UDP']
|
|
||||||
if 'TCP' not in found and PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components:
|
|
||||||
try:
|
|
||||||
tcp_port = await self.upnp.get_next_mapping(self._int_peer_port, "TCP", "LBRY peer port")
|
|
||||||
self.upnp_redirects['TCP'] = tcp_port
|
|
||||||
log.info("refreshed upnp redirect for peer port: %i", tcp_port)
|
|
||||||
except (asyncio.TimeoutError, UPnPError, NotImplementedError):
|
|
||||||
del self.upnp_redirects['TCP']
|
|
||||||
if ('TCP' in self.upnp_redirects and
|
|
||||||
PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components) and \
|
|
||||||
('UDP' in self.upnp_redirects and DHT_COMPONENT not in self.component_manager.skip_components):
|
|
||||||
if self.upnp_redirects:
|
|
||||||
log.debug("upnp redirects are still active")
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
log.info("detecting external ip")
|
|
||||||
if not self.use_upnp:
|
|
||||||
self.external_ip = await utils.get_external_ip()
|
|
||||||
return
|
|
||||||
success = False
|
|
||||||
await self._maintain_redirects()
|
|
||||||
if self.upnp:
|
|
||||||
if not self.upnp_redirects and not all([x in self.component_manager.skip_components for x in
|
|
||||||
(DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)]):
|
|
||||||
log.error("failed to setup upnp")
|
|
||||||
else:
|
|
||||||
success = True
|
|
||||||
if self.upnp_redirects:
|
|
||||||
log.debug("set up upnp port redirects for gateway: %s", self.upnp.gateway.manufacturer_string)
|
|
||||||
else:
|
|
||||||
log.error("failed to setup upnp")
|
|
||||||
if not self.external_ip:
|
|
||||||
self.external_ip = await utils.get_external_ip()
|
|
||||||
if self.external_ip:
|
|
||||||
log.info("detected external ip using lbry.com fallback")
|
|
||||||
if self.component_manager.analytics_manager:
|
|
||||||
self.component_manager.loop.create_task(
|
|
||||||
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
|
|
||||||
success, await self.get_status()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
self._maintain_redirects_task = self.component_manager.loop.create_task(
|
|
||||||
self._repeatedly_maintain_redirects(now=False)
|
|
||||||
)
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
if self.upnp_redirects:
|
|
||||||
log.info("Removing upnp redirects: %s", self.upnp_redirects)
|
|
||||||
await asyncio.wait([
|
|
||||||
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
|
|
||||||
], loop=self.component_manager.loop)
|
|
||||||
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
|
|
||||||
self._maintain_redirects_task.cancel()
|
|
||||||
|
|
||||||
async def get_status(self):
|
|
||||||
return {
|
|
||||||
'aioupnp_version': aioupnp_version,
|
|
||||||
'redirects': self.upnp_redirects,
|
|
||||||
'gateway': 'No gateway found' if not self.upnp else self.upnp.gateway.manufacturer_string,
|
|
||||||
'dht_redirect_set': 'UDP' in self.upnp_redirects,
|
|
||||||
'peer_redirect_set': 'TCP' in self.upnp_redirects,
|
|
||||||
'external_ip': self.external_ip
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class ExchangeRateManagerComponent(Component):
|
|
||||||
component_name = EXCHANGE_RATE_MANAGER_COMPONENT
|
|
||||||
|
|
||||||
def __init__(self, component_manager):
|
|
||||||
super().__init__(component_manager)
|
|
||||||
self.exchange_rate_manager = ExchangeRateManager()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def component(self) -> ExchangeRateManager:
|
|
||||||
return self.exchange_rate_manager
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
self.exchange_rate_manager.start()
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
self.exchange_rate_manager.stop()
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,95 +0,0 @@
|
||||||
import asyncio
|
|
||||||
import json
|
|
||||||
import logging.handlers
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
import typing
|
|
||||||
from aiohttp.client_exceptions import ClientError
|
|
||||||
import aiohttp
|
|
||||||
from lbry import utils, __version__
|
|
||||||
if typing.TYPE_CHECKING:
|
|
||||||
from lbry.conf import Config
|
|
||||||
|
|
||||||
LOGGLY_TOKEN = 'BQEzZmMzLJHgAGxkBF00LGD0YGuyATVgAmqxAQEuAQZ2BQH4'
|
|
||||||
|
|
||||||
|
|
||||||
class JsonFormatter(logging.Formatter):
|
|
||||||
"""Format log records using json serialization"""
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super().__init__()
|
|
||||||
self.attributes = kwargs
|
|
||||||
|
|
||||||
def format(self, record):
|
|
||||||
data = {
|
|
||||||
'loggerName': record.name,
|
|
||||||
'asciTime': self.formatTime(record),
|
|
||||||
'fileName': record.filename,
|
|
||||||
'functionName': record.funcName,
|
|
||||||
'levelNo': record.levelno,
|
|
||||||
'lineNo': record.lineno,
|
|
||||||
'levelName': record.levelname,
|
|
||||||
'message': record.getMessage(),
|
|
||||||
}
|
|
||||||
data.update(self.attributes)
|
|
||||||
if record.exc_info:
|
|
||||||
data['exc_info'] = self.formatException(record.exc_info)
|
|
||||||
return json.dumps(data)
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPSLogglyHandler(logging.Handler):
|
|
||||||
def __init__(self, loggly_token: str, config: 'Config'):
|
|
||||||
super().__init__()
|
|
||||||
self.cookies = {}
|
|
||||||
self.url = "https://logs-01.loggly.com/inputs/{token}/tag/{tag}".format(
|
|
||||||
token=utils.deobfuscate(loggly_token), tag='lbrynet-' + __version__
|
|
||||||
)
|
|
||||||
self._loop = asyncio.get_event_loop()
|
|
||||||
self._session = aiohttp.ClientSession()
|
|
||||||
self._config = config
|
|
||||||
|
|
||||||
@property
|
|
||||||
def enabled(self):
|
|
||||||
return self._config.share_usage_data
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_full_message(record):
|
|
||||||
if record.exc_info:
|
|
||||||
return '\n'.join(traceback.format_exception(*record.exc_info))
|
|
||||||
else:
|
|
||||||
return record.getMessage()
|
|
||||||
|
|
||||||
async def _emit(self, record, retry=True):
|
|
||||||
data = self.format(record).encode()
|
|
||||||
try:
|
|
||||||
async with self._session.post(self.url, data=data,
|
|
||||||
cookies=self.cookies) as response:
|
|
||||||
self.cookies.update(response.cookies)
|
|
||||||
except ClientError:
|
|
||||||
if self._loop.is_running() and retry and self.enabled:
|
|
||||||
await self._session.close()
|
|
||||||
self._session = aiohttp.ClientSession()
|
|
||||||
return await self._emit(record, retry=False)
|
|
||||||
|
|
||||||
def emit(self, record):
|
|
||||||
if not self.enabled:
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
asyncio.ensure_future(self._emit(record), loop=self._loop)
|
|
||||||
except RuntimeError: # TODO: use a second loop
|
|
||||||
print(f"\nfailed to send traceback to loggly, please file an issue with the following traceback:\n"
|
|
||||||
f"{self.format(record)}")
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
super().close()
|
|
||||||
try:
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
loop.run_until_complete(self._session.close())
|
|
||||||
except RuntimeError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_loggly_handler(config):
|
|
||||||
handler = HTTPSLogglyHandler(LOGGLY_TOKEN, config=config)
|
|
||||||
handler.setFormatter(JsonFormatter())
|
|
||||||
return handler
|
|
|
@ -1,70 +0,0 @@
|
||||||
# pylint: skip-file
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import logging
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def migrate_db(conf, start, end):
|
|
||||||
current = start
|
|
||||||
while current < end:
|
|
||||||
if current == 1:
|
|
||||||
from .migrate1to2 import do_migration
|
|
||||||
elif current == 2:
|
|
||||||
from .migrate2to3 import do_migration
|
|
||||||
elif current == 3:
|
|
||||||
from .migrate3to4 import do_migration
|
|
||||||
elif current == 4:
|
|
||||||
from .migrate4to5 import do_migration
|
|
||||||
elif current == 5:
|
|
||||||
from .migrate5to6 import do_migration
|
|
||||||
elif current == 6:
|
|
||||||
from .migrate6to7 import do_migration
|
|
||||||
elif current == 7:
|
|
||||||
from .migrate7to8 import do_migration
|
|
||||||
elif current == 8:
|
|
||||||
from .migrate8to9 import do_migration
|
|
||||||
elif current == 9:
|
|
||||||
from .migrate9to10 import do_migration
|
|
||||||
elif current == 10:
|
|
||||||
from .migrate10to11 import do_migration
|
|
||||||
elif current == 11:
|
|
||||||
from .migrate11to12 import do_migration
|
|
||||||
elif current == 12:
|
|
||||||
from .migrate12to13 import do_migration
|
|
||||||
elif current == 13:
|
|
||||||
from .migrate13to14 import do_migration
|
|
||||||
else:
|
|
||||||
raise Exception(f"DB migration of version {current} to {current+1} is not available")
|
|
||||||
try:
|
|
||||||
do_migration(conf)
|
|
||||||
except Exception:
|
|
||||||
log.exception("failed to migrate database")
|
|
||||||
if os.path.exists(os.path.join(conf.data_dir, "lbrynet.sqlite")):
|
|
||||||
backup_name = f"rev_{current}_unmigrated_database"
|
|
||||||
count = 0
|
|
||||||
while os.path.exists(os.path.join(conf.data_dir, backup_name + ".sqlite")):
|
|
||||||
count += 1
|
|
||||||
backup_name = f"rev_{current}_unmigrated_database_{count}"
|
|
||||||
backup_path = os.path.join(conf.data_dir, backup_name + ".sqlite")
|
|
||||||
os.rename(os.path.join(conf.data_dir, "lbrynet.sqlite"), backup_path)
|
|
||||||
log.info("made a backup of the unmigrated database: %s", backup_path)
|
|
||||||
if os.path.isfile(os.path.join(conf.data_dir, "db_revision")):
|
|
||||||
os.remove(os.path.join(conf.data_dir, "db_revision"))
|
|
||||||
return None
|
|
||||||
current += 1
|
|
||||||
log.info("successfully migrated the database from revision %i to %i", current - 1, current)
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def run_migration_script():
|
|
||||||
log_format = "(%(asctime)s)[%(filename)s:%(lineno)s] %(funcName)s(): %(message)s"
|
|
||||||
logging.basicConfig(level=logging.DEBUG, format=log_format, filename="migrator.log")
|
|
||||||
sys.stdout = open("migrator.out.log", 'w')
|
|
||||||
sys.stderr = open("migrator.err.log", 'w')
|
|
||||||
migrate_db(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
run_migration_script()
|
|
|
@ -1,54 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import os
|
|
||||||
import binascii
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
|
||||||
connection = sqlite3.connect(db_path)
|
|
||||||
cursor = connection.cursor()
|
|
||||||
|
|
||||||
current_columns = []
|
|
||||||
for col_info in cursor.execute("pragma table_info('file');").fetchall():
|
|
||||||
current_columns.append(col_info[1])
|
|
||||||
if 'content_fee' in current_columns or 'saved_file' in current_columns:
|
|
||||||
connection.close()
|
|
||||||
print("already migrated")
|
|
||||||
return
|
|
||||||
|
|
||||||
cursor.execute(
|
|
||||||
"pragma foreign_keys=off;"
|
|
||||||
)
|
|
||||||
|
|
||||||
cursor.execute("""
|
|
||||||
create table if not exists new_file (
|
|
||||||
stream_hash text primary key not null references stream,
|
|
||||||
file_name text,
|
|
||||||
download_directory text,
|
|
||||||
blob_data_rate real not null,
|
|
||||||
status text not null,
|
|
||||||
saved_file integer not null,
|
|
||||||
content_fee text
|
|
||||||
);
|
|
||||||
""")
|
|
||||||
for (stream_hash, file_name, download_dir, data_rate, status) in cursor.execute("select * from file").fetchall():
|
|
||||||
saved_file = 0
|
|
||||||
if download_dir != '{stream}' and file_name != '{stream}':
|
|
||||||
try:
|
|
||||||
if os.path.isfile(os.path.join(binascii.unhexlify(download_dir).decode(),
|
|
||||||
binascii.unhexlify(file_name).decode())):
|
|
||||||
saved_file = 1
|
|
||||||
else:
|
|
||||||
download_dir, file_name = None, None
|
|
||||||
except Exception:
|
|
||||||
download_dir, file_name = None, None
|
|
||||||
else:
|
|
||||||
download_dir, file_name = None, None
|
|
||||||
cursor.execute(
|
|
||||||
"insert into new_file values (?, ?, ?, ?, ?, ?, NULL)",
|
|
||||||
(stream_hash, file_name, download_dir, data_rate, status, saved_file)
|
|
||||||
)
|
|
||||||
cursor.execute("drop table file")
|
|
||||||
cursor.execute("alter table new_file rename to file")
|
|
||||||
connection.commit()
|
|
||||||
connection.close()
|
|
|
@ -1,69 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
db_path = os.path.join(conf.data_dir, 'lbrynet.sqlite')
|
|
||||||
connection = sqlite3.connect(db_path)
|
|
||||||
connection.row_factory = sqlite3.Row
|
|
||||||
cursor = connection.cursor()
|
|
||||||
|
|
||||||
current_columns = []
|
|
||||||
for col_info in cursor.execute("pragma table_info('file');").fetchall():
|
|
||||||
current_columns.append(col_info[1])
|
|
||||||
|
|
||||||
if 'added_on' in current_columns:
|
|
||||||
connection.close()
|
|
||||||
print('already migrated')
|
|
||||||
return
|
|
||||||
|
|
||||||
# follow 12 step schema change procedure
|
|
||||||
cursor.execute("pragma foreign_keys=off")
|
|
||||||
|
|
||||||
# we don't have any indexes, views or triggers, so step 3 is skipped.
|
|
||||||
cursor.execute("drop table if exists new_file")
|
|
||||||
cursor.execute("""
|
|
||||||
create table if not exists new_file (
|
|
||||||
stream_hash text not null primary key references stream,
|
|
||||||
file_name text,
|
|
||||||
download_directory text,
|
|
||||||
blob_data_rate text not null,
|
|
||||||
status text not null,
|
|
||||||
saved_file integer not null,
|
|
||||||
content_fee text,
|
|
||||||
added_on integer not null
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
""")
|
|
||||||
|
|
||||||
# step 5: transfer content from old to new
|
|
||||||
select = "select * from file"
|
|
||||||
for (stream_hash, file_name, download_dir, blob_rate, status, saved_file, fee) \
|
|
||||||
in cursor.execute(select).fetchall():
|
|
||||||
added_on = int(time.time())
|
|
||||||
cursor.execute(
|
|
||||||
"insert into new_file values (?, ?, ?, ?, ?, ?, ?, ?)",
|
|
||||||
(stream_hash, file_name, download_dir, blob_rate, status, saved_file, fee, added_on)
|
|
||||||
)
|
|
||||||
|
|
||||||
# step 6: drop old table
|
|
||||||
cursor.execute("drop table file")
|
|
||||||
|
|
||||||
# step 7: rename new table to old table
|
|
||||||
cursor.execute("alter table new_file rename to file")
|
|
||||||
|
|
||||||
# step 8: we aren't using indexes, views or triggers so skip
|
|
||||||
# step 9: no views so skip
|
|
||||||
# step 10: foreign key check
|
|
||||||
cursor.execute("pragma foreign_key_check;")
|
|
||||||
|
|
||||||
# step 11: commit transaction
|
|
||||||
connection.commit()
|
|
||||||
|
|
||||||
# step 12: re-enable foreign keys
|
|
||||||
connection.execute("pragma foreign_keys=on;")
|
|
||||||
|
|
||||||
# done :)
|
|
||||||
connection.close()
|
|
|
@ -1,80 +0,0 @@
|
||||||
import os
|
|
||||||
import sqlite3
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
|
||||||
connection = sqlite3.connect(db_path)
|
|
||||||
cursor = connection.cursor()
|
|
||||||
|
|
||||||
current_columns = []
|
|
||||||
for col_info in cursor.execute("pragma table_info('file');").fetchall():
|
|
||||||
current_columns.append(col_info[1])
|
|
||||||
if 'bt_infohash' in current_columns:
|
|
||||||
connection.close()
|
|
||||||
print("already migrated")
|
|
||||||
return
|
|
||||||
|
|
||||||
cursor.executescript("""
|
|
||||||
pragma foreign_keys=off;
|
|
||||||
|
|
||||||
create table if not exists torrent (
|
|
||||||
bt_infohash char(20) not null primary key,
|
|
||||||
tracker text,
|
|
||||||
length integer not null,
|
|
||||||
name text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists torrent_node ( -- BEP-0005
|
|
||||||
bt_infohash char(20) not null references torrent,
|
|
||||||
host text not null,
|
|
||||||
port integer not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists torrent_tracker ( -- BEP-0012
|
|
||||||
bt_infohash char(20) not null references torrent,
|
|
||||||
tracker text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists torrent_http_seed ( -- BEP-0017
|
|
||||||
bt_infohash char(20) not null references torrent,
|
|
||||||
http_seed text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists new_file (
|
|
||||||
stream_hash char(96) references stream,
|
|
||||||
bt_infohash char(20) references torrent,
|
|
||||||
file_name text,
|
|
||||||
download_directory text,
|
|
||||||
blob_data_rate real not null,
|
|
||||||
status text not null,
|
|
||||||
saved_file integer not null,
|
|
||||||
content_fee text,
|
|
||||||
added_on integer not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists new_content_claim (
|
|
||||||
stream_hash char(96) references stream,
|
|
||||||
bt_infohash char(20) references torrent,
|
|
||||||
claim_outpoint text unique not null references claim
|
|
||||||
);
|
|
||||||
|
|
||||||
insert into new_file (stream_hash, bt_infohash, file_name, download_directory, blob_data_rate, status,
|
|
||||||
saved_file, content_fee, added_on) select
|
|
||||||
stream_hash, NULL, file_name, download_directory, blob_data_rate, status, saved_file, content_fee,
|
|
||||||
added_on
|
|
||||||
from file;
|
|
||||||
|
|
||||||
insert or ignore into new_content_claim (stream_hash, bt_infohash, claim_outpoint)
|
|
||||||
select stream_hash, NULL, claim_outpoint from content_claim;
|
|
||||||
|
|
||||||
drop table file;
|
|
||||||
drop table content_claim;
|
|
||||||
alter table new_file rename to file;
|
|
||||||
alter table new_content_claim rename to content_claim;
|
|
||||||
|
|
||||||
pragma foreign_keys=on;
|
|
||||||
""")
|
|
||||||
|
|
||||||
connection.commit()
|
|
||||||
connection.close()
|
|
|
@ -1,21 +0,0 @@
|
||||||
import os
|
|
||||||
import sqlite3
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
|
||||||
connection = sqlite3.connect(db_path)
|
|
||||||
cursor = connection.cursor()
|
|
||||||
|
|
||||||
cursor.executescript("""
|
|
||||||
create table if not exists peer (
|
|
||||||
node_id char(96) not null primary key,
|
|
||||||
address text not null,
|
|
||||||
udp_port integer not null,
|
|
||||||
tcp_port integer,
|
|
||||||
unique (address, udp_port)
|
|
||||||
);
|
|
||||||
""")
|
|
||||||
|
|
||||||
connection.commit()
|
|
||||||
connection.close()
|
|
|
@ -1,77 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import os
|
|
||||||
import logging
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
UNSET_NOUT = -1
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
log.info("Doing the migration")
|
|
||||||
migrate_blockchainname_db(conf.data_dir)
|
|
||||||
log.info("Migration succeeded")
|
|
||||||
|
|
||||||
|
|
||||||
def migrate_blockchainname_db(db_dir):
|
|
||||||
blockchainname_db = os.path.join(db_dir, "blockchainname.db")
|
|
||||||
# skip migration on fresh installs
|
|
||||||
if not os.path.isfile(blockchainname_db):
|
|
||||||
return
|
|
||||||
temp_db = sqlite3.connect(":memory:")
|
|
||||||
db_file = sqlite3.connect(blockchainname_db)
|
|
||||||
file_cursor = db_file.cursor()
|
|
||||||
mem_cursor = temp_db.cursor()
|
|
||||||
|
|
||||||
mem_cursor.execute("create table if not exists name_metadata ("
|
|
||||||
" name text, "
|
|
||||||
" txid text, "
|
|
||||||
" n integer, "
|
|
||||||
" sd_hash text)")
|
|
||||||
mem_cursor.execute("create table if not exists claim_ids ("
|
|
||||||
" claimId text, "
|
|
||||||
" name text, "
|
|
||||||
" txid text, "
|
|
||||||
" n integer)")
|
|
||||||
temp_db.commit()
|
|
||||||
|
|
||||||
name_metadata = file_cursor.execute("select * from name_metadata").fetchall()
|
|
||||||
claim_metadata = file_cursor.execute("select * from claim_ids").fetchall()
|
|
||||||
|
|
||||||
# fill n as V1_UNSET_NOUT, Wallet.py will be responsible for filling in correct n
|
|
||||||
for name, txid, sd_hash in name_metadata:
|
|
||||||
mem_cursor.execute(
|
|
||||||
"insert into name_metadata values (?, ?, ?, ?) ",
|
|
||||||
(name, txid, UNSET_NOUT, sd_hash))
|
|
||||||
|
|
||||||
for claim_id, name, txid in claim_metadata:
|
|
||||||
mem_cursor.execute(
|
|
||||||
"insert into claim_ids values (?, ?, ?, ?)",
|
|
||||||
(claim_id, name, txid, UNSET_NOUT))
|
|
||||||
temp_db.commit()
|
|
||||||
|
|
||||||
new_name_metadata = mem_cursor.execute("select * from name_metadata").fetchall()
|
|
||||||
new_claim_metadata = mem_cursor.execute("select * from claim_ids").fetchall()
|
|
||||||
|
|
||||||
file_cursor.execute("drop table name_metadata")
|
|
||||||
file_cursor.execute("create table name_metadata ("
|
|
||||||
" name text, "
|
|
||||||
" txid text, "
|
|
||||||
" n integer, "
|
|
||||||
" sd_hash text)")
|
|
||||||
|
|
||||||
for name, txid, n, sd_hash in new_name_metadata:
|
|
||||||
file_cursor.execute(
|
|
||||||
"insert into name_metadata values (?, ?, ?, ?) ", (name, txid, n, sd_hash))
|
|
||||||
|
|
||||||
file_cursor.execute("drop table claim_ids")
|
|
||||||
file_cursor.execute("create table claim_ids ("
|
|
||||||
" claimId text, "
|
|
||||||
" name text, "
|
|
||||||
" txid text, "
|
|
||||||
" n integer)")
|
|
||||||
|
|
||||||
for claim_id, name, txid, n in new_claim_metadata:
|
|
||||||
file_cursor.execute("insert into claim_ids values (?, ?, ?, ?)", (claim_id, name, txid, n))
|
|
||||||
|
|
||||||
db_file.commit()
|
|
||||||
db_file.close()
|
|
||||||
temp_db.close()
|
|
|
@ -1,42 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import os
|
|
||||||
import logging
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
log.info("Doing the migration")
|
|
||||||
migrate_blockchainname_db(conf.data_dir)
|
|
||||||
log.info("Migration succeeded")
|
|
||||||
|
|
||||||
|
|
||||||
def migrate_blockchainname_db(db_dir):
|
|
||||||
blockchainname_db = os.path.join(db_dir, "blockchainname.db")
|
|
||||||
# skip migration on fresh installs
|
|
||||||
if not os.path.isfile(blockchainname_db):
|
|
||||||
return
|
|
||||||
|
|
||||||
db_file = sqlite3.connect(blockchainname_db)
|
|
||||||
file_cursor = db_file.cursor()
|
|
||||||
|
|
||||||
tables = file_cursor.execute("SELECT tbl_name FROM sqlite_master "
|
|
||||||
"WHERE type='table'").fetchall()
|
|
||||||
|
|
||||||
if 'tmp_name_metadata_table' in tables and 'name_metadata' not in tables:
|
|
||||||
file_cursor.execute("ALTER TABLE tmp_name_metadata_table RENAME TO name_metadata")
|
|
||||||
else:
|
|
||||||
file_cursor.executescript(
|
|
||||||
"CREATE TABLE IF NOT EXISTS tmp_name_metadata_table "
|
|
||||||
" (name TEXT UNIQUE NOT NULL, "
|
|
||||||
" txid TEXT NOT NULL, "
|
|
||||||
" n INTEGER NOT NULL, "
|
|
||||||
" sd_hash TEXT NOT NULL); "
|
|
||||||
"INSERT OR IGNORE INTO tmp_name_metadata_table "
|
|
||||||
" (name, txid, n, sd_hash) "
|
|
||||||
" SELECT name, txid, n, sd_hash FROM name_metadata; "
|
|
||||||
"DROP TABLE name_metadata; "
|
|
||||||
"ALTER TABLE tmp_name_metadata_table RENAME TO name_metadata;"
|
|
||||||
)
|
|
||||||
db_file.commit()
|
|
||||||
db_file.close()
|
|
|
@ -1,85 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import os
|
|
||||||
import logging
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
log.info("Doing the migration")
|
|
||||||
migrate_blobs_db(conf.data_dir)
|
|
||||||
log.info("Migration succeeded")
|
|
||||||
|
|
||||||
|
|
||||||
def migrate_blobs_db(db_dir):
|
|
||||||
"""
|
|
||||||
We migrate the blobs.db used in BlobManager to have a "should_announce" column,
|
|
||||||
and set this to True for blobs that are sd_hash's or head blobs (first blob in stream)
|
|
||||||
"""
|
|
||||||
|
|
||||||
blobs_db = os.path.join(db_dir, "blobs.db")
|
|
||||||
lbryfile_info_db = os.path.join(db_dir, 'lbryfile_info.db')
|
|
||||||
|
|
||||||
# skip migration on fresh installs
|
|
||||||
if not os.path.isfile(blobs_db) and not os.path.isfile(lbryfile_info_db):
|
|
||||||
return
|
|
||||||
|
|
||||||
# if blobs.db doesn't exist, skip migration
|
|
||||||
if not os.path.isfile(blobs_db):
|
|
||||||
log.info("blobs.db was not found but lbryfile_info.db was found, skipping migration")
|
|
||||||
return
|
|
||||||
|
|
||||||
blobs_db_file = sqlite3.connect(blobs_db)
|
|
||||||
blobs_db_cursor = blobs_db_file.cursor()
|
|
||||||
|
|
||||||
# check if new columns exist (it shouldn't) and create it
|
|
||||||
try:
|
|
||||||
blobs_db_cursor.execute("SELECT should_announce FROM blobs")
|
|
||||||
except sqlite3.OperationalError:
|
|
||||||
blobs_db_cursor.execute(
|
|
||||||
"ALTER TABLE blobs ADD COLUMN should_announce integer NOT NULL DEFAULT 0")
|
|
||||||
else:
|
|
||||||
log.warning("should_announce already exists somehow, proceeding anyways")
|
|
||||||
|
|
||||||
# if lbryfile_info.db doesn't exist, skip marking blobs as should_announce = True
|
|
||||||
if not os.path.isfile(lbryfile_info_db):
|
|
||||||
log.error("lbryfile_info.db was not found, skipping check for should_announce")
|
|
||||||
return
|
|
||||||
|
|
||||||
lbryfile_info_file = sqlite3.connect(lbryfile_info_db)
|
|
||||||
lbryfile_info_cursor = lbryfile_info_file.cursor()
|
|
||||||
|
|
||||||
# find blobs that are stream descriptors
|
|
||||||
lbryfile_info_cursor.execute('SELECT * FROM lbry_file_descriptors')
|
|
||||||
descriptors = lbryfile_info_cursor.fetchall()
|
|
||||||
should_announce_blob_hashes = []
|
|
||||||
for d in descriptors:
|
|
||||||
sd_blob_hash = (d[0],)
|
|
||||||
should_announce_blob_hashes.append(sd_blob_hash)
|
|
||||||
|
|
||||||
# find blobs that are the first blob in a stream
|
|
||||||
lbryfile_info_cursor.execute('SELECT * FROM lbry_file_blobs WHERE position = 0')
|
|
||||||
blobs = lbryfile_info_cursor.fetchall()
|
|
||||||
head_blob_hashes = []
|
|
||||||
for b in blobs:
|
|
||||||
blob_hash = (b[0],)
|
|
||||||
should_announce_blob_hashes.append(blob_hash)
|
|
||||||
|
|
||||||
# now mark them as should_announce = True
|
|
||||||
blobs_db_cursor.executemany('UPDATE blobs SET should_announce=1 WHERE blob_hash=?',
|
|
||||||
should_announce_blob_hashes)
|
|
||||||
|
|
||||||
# Now run some final checks here to make sure migration succeeded
|
|
||||||
try:
|
|
||||||
blobs_db_cursor.execute("SELECT should_announce FROM blobs")
|
|
||||||
except sqlite3.OperationalError:
|
|
||||||
raise Exception('Migration failed, cannot find should_announce')
|
|
||||||
|
|
||||||
blobs_db_cursor.execute("SELECT * FROM blobs WHERE should_announce=1")
|
|
||||||
blobs = blobs_db_cursor.fetchall()
|
|
||||||
if len(blobs) != len(should_announce_blob_hashes):
|
|
||||||
log.error("Some how not all blobs were marked as announceable")
|
|
||||||
|
|
||||||
blobs_db_file.commit()
|
|
||||||
blobs_db_file.close()
|
|
||||||
lbryfile_info_file.close()
|
|
|
@ -1,62 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import os
|
|
||||||
import logging
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
log.info("Doing the migration")
|
|
||||||
add_lbry_file_metadata(conf.data_dir)
|
|
||||||
log.info("Migration succeeded")
|
|
||||||
|
|
||||||
|
|
||||||
def add_lbry_file_metadata(db_dir):
|
|
||||||
"""
|
|
||||||
We migrate the blobs.db used in BlobManager to have a "should_announce" column,
|
|
||||||
and set this to True for blobs that are sd_hash's or head blobs (first blob in stream)
|
|
||||||
"""
|
|
||||||
|
|
||||||
name_metadata = os.path.join(db_dir, "blockchainname.db")
|
|
||||||
lbryfile_info_db = os.path.join(db_dir, 'lbryfile_info.db')
|
|
||||||
|
|
||||||
if not os.path.isfile(name_metadata) and not os.path.isfile(lbryfile_info_db):
|
|
||||||
return
|
|
||||||
|
|
||||||
if not os.path.isfile(lbryfile_info_db):
|
|
||||||
log.info("blockchainname.db was not found but lbryfile_info.db was found, skipping migration")
|
|
||||||
return
|
|
||||||
|
|
||||||
name_metadata_db = sqlite3.connect(name_metadata)
|
|
||||||
lbryfile_db = sqlite3.connect(lbryfile_info_db)
|
|
||||||
name_metadata_cursor = name_metadata_db.cursor()
|
|
||||||
lbryfile_cursor = lbryfile_db.cursor()
|
|
||||||
|
|
||||||
lbryfile_db.executescript(
|
|
||||||
"create table if not exists lbry_file_metadata (" +
|
|
||||||
" lbry_file integer primary key, " +
|
|
||||||
" txid text, " +
|
|
||||||
" n integer, " +
|
|
||||||
" foreign key(lbry_file) references lbry_files(rowid)"
|
|
||||||
")")
|
|
||||||
|
|
||||||
_files = lbryfile_cursor.execute("select rowid, stream_hash from lbry_files").fetchall()
|
|
||||||
|
|
||||||
lbry_files = {x[1]: x[0] for x in _files}
|
|
||||||
for (sd_hash, stream_hash) in lbryfile_cursor.execute("select * "
|
|
||||||
"from lbry_file_descriptors").fetchall():
|
|
||||||
lbry_file_id = lbry_files[stream_hash]
|
|
||||||
outpoint = name_metadata_cursor.execute("select txid, n from name_metadata "
|
|
||||||
"where sd_hash=?",
|
|
||||||
(sd_hash,)).fetchall()
|
|
||||||
if outpoint:
|
|
||||||
txid, nout = outpoint[0]
|
|
||||||
lbryfile_cursor.execute("insert into lbry_file_metadata values (?, ?, ?)",
|
|
||||||
(lbry_file_id, txid, nout))
|
|
||||||
else:
|
|
||||||
lbryfile_cursor.execute("insert into lbry_file_metadata values (?, ?, ?)",
|
|
||||||
(lbry_file_id, None, None))
|
|
||||||
lbryfile_db.commit()
|
|
||||||
|
|
||||||
lbryfile_db.close()
|
|
||||||
name_metadata_db.close()
|
|
|
@ -1,326 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
from binascii import hexlify
|
|
||||||
from lbry.schema.claim import Claim
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
CREATE_TABLES_QUERY = """
|
|
||||||
pragma foreign_keys=on;
|
|
||||||
pragma journal_mode=WAL;
|
|
||||||
|
|
||||||
create table if not exists blob (
|
|
||||||
blob_hash char(96) primary key not null,
|
|
||||||
blob_length integer not null,
|
|
||||||
next_announce_time integer not null,
|
|
||||||
should_announce integer not null default 0,
|
|
||||||
status text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists stream (
|
|
||||||
stream_hash char(96) not null primary key,
|
|
||||||
sd_hash char(96) not null references blob,
|
|
||||||
stream_key text not null,
|
|
||||||
stream_name text not null,
|
|
||||||
suggested_filename text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists stream_blob (
|
|
||||||
stream_hash char(96) not null references stream,
|
|
||||||
blob_hash char(96) references blob,
|
|
||||||
position integer not null,
|
|
||||||
iv char(32) not null,
|
|
||||||
primary key (stream_hash, blob_hash)
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists claim (
|
|
||||||
claim_outpoint text not null primary key,
|
|
||||||
claim_id char(40) not null,
|
|
||||||
claim_name text not null,
|
|
||||||
amount integer not null,
|
|
||||||
height integer not null,
|
|
||||||
serialized_metadata blob not null,
|
|
||||||
channel_claim_id text,
|
|
||||||
address text not null,
|
|
||||||
claim_sequence integer not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists file (
|
|
||||||
stream_hash text primary key not null references stream,
|
|
||||||
file_name text not null,
|
|
||||||
download_directory text not null,
|
|
||||||
blob_data_rate real not null,
|
|
||||||
status text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists content_claim (
|
|
||||||
stream_hash text unique not null references file,
|
|
||||||
claim_outpoint text not null references claim,
|
|
||||||
primary key (stream_hash, claim_outpoint)
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists support (
|
|
||||||
support_outpoint text not null primary key,
|
|
||||||
claim_id text not null,
|
|
||||||
amount integer not null,
|
|
||||||
address text not null
|
|
||||||
);
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def run_operation(db):
|
|
||||||
def _decorate(fn):
|
|
||||||
def _wrapper(*args):
|
|
||||||
cursor = db.cursor()
|
|
||||||
try:
|
|
||||||
result = fn(cursor, *args)
|
|
||||||
db.commit()
|
|
||||||
return result
|
|
||||||
except sqlite3.IntegrityError:
|
|
||||||
db.rollback()
|
|
||||||
raise
|
|
||||||
return _wrapper
|
|
||||||
return _decorate
|
|
||||||
|
|
||||||
|
|
||||||
def verify_sd_blob(sd_hash, blob_dir):
|
|
||||||
with open(os.path.join(blob_dir, sd_hash), "r") as sd_file:
|
|
||||||
data = sd_file.read()
|
|
||||||
sd_length = len(data)
|
|
||||||
decoded = json.loads(data)
|
|
||||||
assert set(decoded.keys()) == {
|
|
||||||
'stream_name', 'blobs', 'stream_type', 'key', 'suggested_file_name', 'stream_hash'
|
|
||||||
}, "invalid sd blob"
|
|
||||||
for blob in sorted(decoded['blobs'], key=lambda x: int(x['blob_num']), reverse=True):
|
|
||||||
if blob['blob_num'] == len(decoded['blobs']) - 1:
|
|
||||||
assert {'length', 'blob_num', 'iv'} == set(blob.keys()), 'invalid stream terminator'
|
|
||||||
assert blob['length'] == 0, 'non zero length stream terminator'
|
|
||||||
else:
|
|
||||||
assert {'blob_hash', 'length', 'blob_num', 'iv'} == set(blob.keys()), 'invalid stream blob'
|
|
||||||
assert blob['length'] > 0, 'zero length stream blob'
|
|
||||||
return decoded, sd_length
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
new_db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
|
||||||
connection = sqlite3.connect(new_db_path)
|
|
||||||
|
|
||||||
metadata_db = sqlite3.connect(os.path.join(conf.data_dir, "blockchainname.db"))
|
|
||||||
lbryfile_db = sqlite3.connect(os.path.join(conf.data_dir, 'lbryfile_info.db'))
|
|
||||||
blobs_db = sqlite3.connect(os.path.join(conf.data_dir, 'blobs.db'))
|
|
||||||
|
|
||||||
name_metadata_cursor = metadata_db.cursor()
|
|
||||||
lbryfile_cursor = lbryfile_db.cursor()
|
|
||||||
blobs_db_cursor = blobs_db.cursor()
|
|
||||||
|
|
||||||
old_rowid_to_outpoint = {
|
|
||||||
rowid: (txid, nout) for (rowid, txid, nout) in
|
|
||||||
lbryfile_cursor.execute("select * from lbry_file_metadata").fetchall()
|
|
||||||
}
|
|
||||||
|
|
||||||
old_sd_hash_to_outpoint = {
|
|
||||||
sd_hash: (txid, nout) for (txid, nout, sd_hash) in
|
|
||||||
name_metadata_cursor.execute("select txid, n, sd_hash from name_metadata").fetchall()
|
|
||||||
}
|
|
||||||
|
|
||||||
sd_hash_to_stream_hash = dict(
|
|
||||||
lbryfile_cursor.execute("select sd_blob_hash, stream_hash from lbry_file_descriptors").fetchall()
|
|
||||||
)
|
|
||||||
|
|
||||||
stream_hash_to_stream_blobs = {}
|
|
||||||
|
|
||||||
for (blob_hash, stream_hash, position, iv, length) in lbryfile_db.execute(
|
|
||||||
"select * from lbry_file_blobs").fetchall():
|
|
||||||
stream_blobs = stream_hash_to_stream_blobs.get(stream_hash, [])
|
|
||||||
stream_blobs.append((blob_hash, length, position, iv))
|
|
||||||
stream_hash_to_stream_blobs[stream_hash] = stream_blobs
|
|
||||||
|
|
||||||
claim_outpoint_queries = {}
|
|
||||||
|
|
||||||
for claim_query in metadata_db.execute(
|
|
||||||
"select distinct c.txid, c.n, c.claimId, c.name, claim_cache.claim_sequence, claim_cache.claim_address, "
|
|
||||||
"claim_cache.height, claim_cache.amount, claim_cache.claim_pb "
|
|
||||||
"from claim_cache inner join claim_ids c on claim_cache.claim_id=c.claimId"):
|
|
||||||
txid, nout = claim_query[0], claim_query[1]
|
|
||||||
if (txid, nout) in claim_outpoint_queries:
|
|
||||||
continue
|
|
||||||
claim_outpoint_queries[(txid, nout)] = claim_query
|
|
||||||
|
|
||||||
@run_operation(connection)
|
|
||||||
def _populate_blobs(transaction, blob_infos):
|
|
||||||
transaction.executemany(
|
|
||||||
"insert into blob values (?, ?, ?, ?, ?)",
|
|
||||||
[(blob_hash, blob_length, int(next_announce_time), should_announce, "finished")
|
|
||||||
for (blob_hash, blob_length, _, next_announce_time, should_announce) in blob_infos]
|
|
||||||
)
|
|
||||||
|
|
||||||
@run_operation(connection)
|
|
||||||
def _import_file(transaction, sd_hash, stream_hash, key, stream_name, suggested_file_name, data_rate,
|
|
||||||
status, stream_blobs):
|
|
||||||
try:
|
|
||||||
transaction.execute(
|
|
||||||
"insert or ignore into stream values (?, ?, ?, ?, ?)",
|
|
||||||
(stream_hash, sd_hash, key, stream_name, suggested_file_name)
|
|
||||||
)
|
|
||||||
except sqlite3.IntegrityError:
|
|
||||||
# failed because the sd isn't a known blob, we'll try to read the blob file and recover it
|
|
||||||
return sd_hash
|
|
||||||
|
|
||||||
# insert any stream blobs that were missing from the blobs table
|
|
||||||
transaction.executemany(
|
|
||||||
"insert or ignore into blob values (?, ?, ?, ?, ?)",
|
|
||||||
[
|
|
||||||
(blob_hash, length, 0, 0, "pending")
|
|
||||||
for (blob_hash, length, position, iv) in stream_blobs
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
# insert the stream blobs
|
|
||||||
for blob_hash, length, position, iv in stream_blobs:
|
|
||||||
transaction.execute(
|
|
||||||
"insert or ignore into stream_blob values (?, ?, ?, ?)",
|
|
||||||
(stream_hash, blob_hash, position, iv)
|
|
||||||
)
|
|
||||||
|
|
||||||
download_dir = conf.download_dir
|
|
||||||
if not isinstance(download_dir, bytes):
|
|
||||||
download_dir = download_dir.encode()
|
|
||||||
|
|
||||||
# insert the file
|
|
||||||
transaction.execute(
|
|
||||||
"insert or ignore into file values (?, ?, ?, ?, ?)",
|
|
||||||
(stream_hash, stream_name, hexlify(download_dir),
|
|
||||||
data_rate, status)
|
|
||||||
)
|
|
||||||
|
|
||||||
@run_operation(connection)
|
|
||||||
def _add_recovered_blobs(transaction, blob_infos, sd_hash, sd_length):
|
|
||||||
transaction.execute(
|
|
||||||
"insert or replace into blob values (?, ?, ?, ?, ?)", (sd_hash, sd_length, 0, 1, "finished")
|
|
||||||
)
|
|
||||||
for blob in sorted(blob_infos, key=lambda x: x['blob_num'], reverse=True):
|
|
||||||
if blob['blob_num'] < len(blob_infos) - 1:
|
|
||||||
transaction.execute(
|
|
||||||
"insert or ignore into blob values (?, ?, ?, ?, ?)",
|
|
||||||
(blob['blob_hash'], blob['length'], 0, 0, "pending")
|
|
||||||
)
|
|
||||||
|
|
||||||
@run_operation(connection)
|
|
||||||
def _make_db(new_db):
|
|
||||||
# create the new tables
|
|
||||||
new_db.executescript(CREATE_TABLES_QUERY)
|
|
||||||
|
|
||||||
# first migrate the blobs
|
|
||||||
blobs = blobs_db_cursor.execute("select * from blobs").fetchall()
|
|
||||||
_populate_blobs(blobs) # pylint: disable=no-value-for-parameter
|
|
||||||
log.info("migrated %i blobs", new_db.execute("select count(*) from blob").fetchone()[0])
|
|
||||||
|
|
||||||
# used to store the query arguments if we need to try re-importing the lbry file later
|
|
||||||
file_args = {} # <sd_hash>: args tuple
|
|
||||||
|
|
||||||
file_outpoints = {} # <outpoint tuple>: sd_hash
|
|
||||||
|
|
||||||
# get the file and stream queries ready
|
|
||||||
for (rowid, sd_hash, stream_hash, key, stream_name, suggested_file_name, data_rate, status) in \
|
|
||||||
lbryfile_db.execute(
|
|
||||||
"select distinct lbry_files.rowid, d.sd_blob_hash, lbry_files.*, o.blob_data_rate, o.status "
|
|
||||||
"from lbry_files "
|
|
||||||
"inner join lbry_file_descriptors d on lbry_files.stream_hash=d.stream_hash "
|
|
||||||
"inner join lbry_file_options o on lbry_files.stream_hash=o.stream_hash"):
|
|
||||||
|
|
||||||
# this is try to link the file to a content claim after we've imported all the files
|
|
||||||
if rowid in old_rowid_to_outpoint:
|
|
||||||
file_outpoints[old_rowid_to_outpoint[rowid]] = sd_hash
|
|
||||||
elif sd_hash in old_sd_hash_to_outpoint:
|
|
||||||
file_outpoints[old_sd_hash_to_outpoint[sd_hash]] = sd_hash
|
|
||||||
|
|
||||||
sd_hash_to_stream_hash[sd_hash] = stream_hash
|
|
||||||
if stream_hash in stream_hash_to_stream_blobs:
|
|
||||||
file_args[sd_hash] = (
|
|
||||||
sd_hash, stream_hash, key, stream_name,
|
|
||||||
suggested_file_name, data_rate or 0.0,
|
|
||||||
status, stream_hash_to_stream_blobs.pop(stream_hash)
|
|
||||||
)
|
|
||||||
|
|
||||||
# used to store the query arguments if we need to try re-importing the claim
|
|
||||||
claim_queries = {} # <sd_hash>: claim query tuple
|
|
||||||
|
|
||||||
# get the claim queries ready, only keep those with associated files
|
|
||||||
for outpoint, sd_hash in file_outpoints.items():
|
|
||||||
if outpoint in claim_outpoint_queries:
|
|
||||||
claim_queries[sd_hash] = claim_outpoint_queries[outpoint]
|
|
||||||
|
|
||||||
# insert the claims
|
|
||||||
new_db.executemany(
|
|
||||||
"insert or ignore into claim values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
|
||||||
[
|
|
||||||
(
|
|
||||||
"%s:%i" % (claim_arg_tup[0], claim_arg_tup[1]), claim_arg_tup[2], claim_arg_tup[3],
|
|
||||||
claim_arg_tup[7], claim_arg_tup[6], claim_arg_tup[8],
|
|
||||||
Claim.from_bytes(claim_arg_tup[8]).signing_channel_id, claim_arg_tup[5], claim_arg_tup[4]
|
|
||||||
)
|
|
||||||
for sd_hash, claim_arg_tup in claim_queries.items() if claim_arg_tup
|
|
||||||
] # sd_hash, (txid, nout, claim_id, name, sequence, address, height, amount, serialized)
|
|
||||||
)
|
|
||||||
|
|
||||||
log.info("migrated %i claims", new_db.execute("select count(*) from claim").fetchone()[0])
|
|
||||||
|
|
||||||
damaged_stream_sds = []
|
|
||||||
# import the files and get sd hashes of streams to attempt recovering
|
|
||||||
for sd_hash, file_query in file_args.items():
|
|
||||||
failed_sd = _import_file(*file_query)
|
|
||||||
if failed_sd:
|
|
||||||
damaged_stream_sds.append(failed_sd)
|
|
||||||
|
|
||||||
# recover damaged streams
|
|
||||||
if damaged_stream_sds:
|
|
||||||
blob_dir = os.path.join(conf.data_dir, "blobfiles")
|
|
||||||
damaged_sds_on_disk = [] if not os.path.isdir(blob_dir) else list({p for p in os.listdir(blob_dir)
|
|
||||||
if p in damaged_stream_sds})
|
|
||||||
for damaged_sd in damaged_sds_on_disk:
|
|
||||||
try:
|
|
||||||
decoded, sd_length = verify_sd_blob(damaged_sd, blob_dir)
|
|
||||||
blobs = decoded['blobs']
|
|
||||||
_add_recovered_blobs(blobs, damaged_sd, sd_length) # pylint: disable=no-value-for-parameter
|
|
||||||
_import_file(*file_args[damaged_sd])
|
|
||||||
damaged_stream_sds.remove(damaged_sd)
|
|
||||||
except (OSError, ValueError, TypeError, AssertionError, sqlite3.IntegrityError):
|
|
||||||
continue
|
|
||||||
|
|
||||||
log.info("migrated %i files", new_db.execute("select count(*) from file").fetchone()[0])
|
|
||||||
|
|
||||||
# associate the content claims to their respective files
|
|
||||||
for claim_arg_tup in claim_queries.values():
|
|
||||||
if claim_arg_tup and (claim_arg_tup[0], claim_arg_tup[1]) in file_outpoints \
|
|
||||||
and file_outpoints[(claim_arg_tup[0], claim_arg_tup[1])] in sd_hash_to_stream_hash:
|
|
||||||
try:
|
|
||||||
new_db.execute(
|
|
||||||
"insert or ignore into content_claim values (?, ?)",
|
|
||||||
(
|
|
||||||
sd_hash_to_stream_hash.get(file_outpoints.get((claim_arg_tup[0], claim_arg_tup[1]))),
|
|
||||||
"%s:%i" % (claim_arg_tup[0], claim_arg_tup[1])
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except sqlite3.IntegrityError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
log.info("migrated %i content claims", new_db.execute("select count(*) from content_claim").fetchone()[0])
|
|
||||||
try:
|
|
||||||
_make_db() # pylint: disable=no-value-for-parameter
|
|
||||||
except sqlite3.OperationalError as err:
|
|
||||||
if err.message == "table blob has 7 columns but 5 values were supplied":
|
|
||||||
log.warning("detected a failed previous migration to revision 6, repairing it")
|
|
||||||
connection.close()
|
|
||||||
os.remove(new_db_path)
|
|
||||||
return do_migration(conf)
|
|
||||||
raise err
|
|
||||||
|
|
||||||
connection.close()
|
|
||||||
blobs_db.close()
|
|
||||||
lbryfile_db.close()
|
|
||||||
metadata_db.close()
|
|
||||||
# os.remove(os.path.join(db_dir, "blockchainname.db"))
|
|
||||||
# os.remove(os.path.join(db_dir, 'lbryfile_info.db'))
|
|
||||||
# os.remove(os.path.join(db_dir, 'blobs.db'))
|
|
|
@ -1,13 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
|
||||||
connection = sqlite3.connect(db_path)
|
|
||||||
cursor = connection.cursor()
|
|
||||||
cursor.executescript("alter table blob add last_announced_time integer;")
|
|
||||||
cursor.executescript("alter table blob add single_announce integer;")
|
|
||||||
cursor.execute("update blob set next_announce_time=0")
|
|
||||||
connection.commit()
|
|
||||||
connection.close()
|
|
|
@ -1,21 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
|
||||||
connection = sqlite3.connect(db_path)
|
|
||||||
cursor = connection.cursor()
|
|
||||||
|
|
||||||
cursor.executescript(
|
|
||||||
"""
|
|
||||||
create table reflected_stream (
|
|
||||||
sd_hash text not null,
|
|
||||||
reflector_address text not null,
|
|
||||||
timestamp integer,
|
|
||||||
primary key (sd_hash, reflector_address)
|
|
||||||
);
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
connection.commit()
|
|
||||||
connection.close()
|
|
|
@ -1,47 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
from lbry.blob.blob_info import BlobInfo
|
|
||||||
from lbry.stream.descriptor import StreamDescriptor
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
|
||||||
blob_dir = os.path.join(conf.data_dir, "blobfiles")
|
|
||||||
connection = sqlite3.connect(db_path)
|
|
||||||
cursor = connection.cursor()
|
|
||||||
|
|
||||||
query = "select stream_name, stream_key, suggested_filename, sd_hash, stream_hash from stream"
|
|
||||||
streams = cursor.execute(query).fetchall()
|
|
||||||
|
|
||||||
blobs = cursor.execute("select s.stream_hash, s.position, s.iv, b.blob_hash, b.blob_length from stream_blob s "
|
|
||||||
"left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall()
|
|
||||||
blobs_by_stream = {}
|
|
||||||
for stream_hash, position, iv, blob_hash, blob_length in blobs:
|
|
||||||
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, blob_hash))
|
|
||||||
|
|
||||||
for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams:
|
|
||||||
sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename,
|
|
||||||
blobs_by_stream[stream_hash], stream_hash, sd_hash)
|
|
||||||
if sd_hash != sd.calculate_sd_hash():
|
|
||||||
log.info("Stream for descriptor %s is invalid, cleaning it up", sd_hash)
|
|
||||||
blob_hashes = [blob.blob_hash for blob in blobs_by_stream[stream_hash]]
|
|
||||||
delete_stream(cursor, stream_hash, sd_hash, blob_hashes, blob_dir)
|
|
||||||
|
|
||||||
connection.commit()
|
|
||||||
connection.close()
|
|
||||||
|
|
||||||
|
|
||||||
def delete_stream(transaction, stream_hash, sd_hash, blob_hashes, blob_dir):
|
|
||||||
transaction.execute("delete from content_claim where stream_hash=? ", (stream_hash,))
|
|
||||||
transaction.execute("delete from file where stream_hash=? ", (stream_hash, ))
|
|
||||||
transaction.execute("delete from stream_blob where stream_hash=?", (stream_hash, ))
|
|
||||||
transaction.execute("delete from stream where stream_hash=? ", (stream_hash, ))
|
|
||||||
transaction.execute("delete from blob where blob_hash=?", (sd_hash, ))
|
|
||||||
for blob_hash in blob_hashes:
|
|
||||||
transaction.execute("delete from blob where blob_hash=?", (blob_hash, ))
|
|
||||||
file_path = os.path.join(blob_dir, blob_hash)
|
|
||||||
if os.path.isfile(file_path):
|
|
||||||
os.unlink(file_path)
|
|
|
@ -1,20 +0,0 @@
|
||||||
import sqlite3
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def do_migration(conf):
|
|
||||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
|
||||||
connection = sqlite3.connect(db_path)
|
|
||||||
cursor = connection.cursor()
|
|
||||||
|
|
||||||
query = "select stream_hash, sd_hash from main.stream"
|
|
||||||
for stream_hash, sd_hash in cursor.execute(query).fetchall():
|
|
||||||
head_blob_hash = cursor.execute(
|
|
||||||
"select blob_hash from stream_blob where position = 0 and stream_hash = ?",
|
|
||||||
(stream_hash,)
|
|
||||||
).fetchone()
|
|
||||||
if not head_blob_hash:
|
|
||||||
continue
|
|
||||||
cursor.execute("update blob set should_announce=1 where blob_hash in (?, ?)", (sd_hash, head_blob_hash[0],))
|
|
||||||
connection.commit()
|
|
||||||
connection.close()
|
|
|
@ -1,835 +0,0 @@
|
||||||
import os
|
|
||||||
import logging
|
|
||||||
import sqlite3
|
|
||||||
import typing
|
|
||||||
import asyncio
|
|
||||||
import binascii
|
|
||||||
import time
|
|
||||||
from typing import Optional
|
|
||||||
from lbry.wallet import SQLiteMixin
|
|
||||||
from lbry.conf import Config
|
|
||||||
from lbry.wallet.dewies import dewies_to_lbc, lbc_to_dewies
|
|
||||||
from lbry.wallet.transaction import Transaction
|
|
||||||
from lbry.schema.claim import Claim
|
|
||||||
from lbry.dht.constants import DATA_EXPIRATION
|
|
||||||
from lbry.blob.blob_info import BlobInfo
|
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
|
||||||
from lbry.blob.blob_file import BlobFile
|
|
||||||
from lbry.stream.descriptor import StreamDescriptor
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_effective_amount(amount: str, supports: typing.Optional[typing.List[typing.Dict]] = None) -> str:
|
|
||||||
return dewies_to_lbc(
|
|
||||||
lbc_to_dewies(amount) + sum([lbc_to_dewies(support['amount']) for support in supports])
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class StoredContentClaim:
|
|
||||||
def __init__(self, outpoint: Optional[str] = None, claim_id: Optional[str] = None, name: Optional[str] = None,
|
|
||||||
amount: Optional[int] = None, height: Optional[int] = None, serialized: Optional[str] = None,
|
|
||||||
channel_claim_id: Optional[str] = None, address: Optional[str] = None,
|
|
||||||
claim_sequence: Optional[int] = None, channel_name: Optional[str] = None):
|
|
||||||
self.claim_id = claim_id
|
|
||||||
self.outpoint = outpoint
|
|
||||||
self.claim_name = name
|
|
||||||
self.amount = amount
|
|
||||||
self.height = height
|
|
||||||
self.claim: typing.Optional[Claim] = None if not serialized else Claim.from_bytes(
|
|
||||||
binascii.unhexlify(serialized)
|
|
||||||
)
|
|
||||||
self.claim_address = address
|
|
||||||
self.claim_sequence = claim_sequence
|
|
||||||
self.channel_claim_id = channel_claim_id
|
|
||||||
self.channel_name = channel_name
|
|
||||||
|
|
||||||
@property
|
|
||||||
def txid(self) -> typing.Optional[str]:
|
|
||||||
return None if not self.outpoint else self.outpoint.split(":")[0]
|
|
||||||
|
|
||||||
@property
|
|
||||||
def nout(self) -> typing.Optional[int]:
|
|
||||||
return None if not self.outpoint else int(self.outpoint.split(":")[1])
|
|
||||||
|
|
||||||
def as_dict(self) -> typing.Dict:
|
|
||||||
return {
|
|
||||||
"name": self.claim_name,
|
|
||||||
"claim_id": self.claim_id,
|
|
||||||
"address": self.claim_address,
|
|
||||||
"claim_sequence": self.claim_sequence,
|
|
||||||
"value": self.claim,
|
|
||||||
"height": self.height,
|
|
||||||
"amount": dewies_to_lbc(self.amount),
|
|
||||||
"nout": self.nout,
|
|
||||||
"txid": self.txid,
|
|
||||||
"channel_claim_id": self.channel_claim_id,
|
|
||||||
"channel_name": self.channel_name
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _get_content_claims(transaction: sqlite3.Connection, query: str,
|
|
||||||
source_hashes: typing.List[str]) -> typing.Dict[str, StoredContentClaim]:
|
|
||||||
claims = {}
|
|
||||||
for claim_info in _batched_select(transaction, query, source_hashes):
|
|
||||||
claims[claim_info[0]] = StoredContentClaim(*claim_info[1:])
|
|
||||||
return claims
|
|
||||||
|
|
||||||
|
|
||||||
def get_claims_from_stream_hashes(transaction: sqlite3.Connection,
|
|
||||||
stream_hashes: typing.List[str]) -> typing.Dict[str, StoredContentClaim]:
|
|
||||||
query = (
|
|
||||||
"select content_claim.stream_hash, c.*, case when c.channel_claim_id is not null then "
|
|
||||||
" (select claim_name from claim where claim_id==c.channel_claim_id) "
|
|
||||||
" else null end as channel_name "
|
|
||||||
" from content_claim "
|
|
||||||
" inner join claim c on c.claim_outpoint=content_claim.claim_outpoint and content_claim.stream_hash in {}"
|
|
||||||
" order by c.rowid desc"
|
|
||||||
)
|
|
||||||
return _get_content_claims(transaction, query, stream_hashes)
|
|
||||||
|
|
||||||
|
|
||||||
def get_claims_from_torrent_info_hashes(transaction: sqlite3.Connection,
|
|
||||||
info_hashes: typing.List[str]) -> typing.Dict[str, StoredContentClaim]:
|
|
||||||
query = (
|
|
||||||
"select content_claim.bt_infohash, c.*, case when c.channel_claim_id is not null then "
|
|
||||||
" (select claim_name from claim where claim_id==c.channel_claim_id) "
|
|
||||||
" else null end as channel_name "
|
|
||||||
" from content_claim "
|
|
||||||
" inner join claim c on c.claim_outpoint=content_claim.claim_outpoint and content_claim.bt_infohash in {}"
|
|
||||||
" order by c.rowid desc"
|
|
||||||
)
|
|
||||||
return _get_content_claims(transaction, query, info_hashes)
|
|
||||||
|
|
||||||
|
|
||||||
def _batched_select(transaction, query, parameters, batch_size=900):
|
|
||||||
for start_index in range(0, len(parameters), batch_size):
|
|
||||||
current_batch = parameters[start_index:start_index+batch_size]
|
|
||||||
bind = "({})".format(','.join(['?'] * len(current_batch)))
|
|
||||||
yield from transaction.execute(query.format(bind), current_batch)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_lbry_file_stream_dict(rowid, added_on, stream_hash, file_name, download_dir, data_rate, status,
|
|
||||||
sd_hash, stream_key, stream_name, suggested_file_name, claim, saved_file,
|
|
||||||
raw_content_fee, fully_reflected):
|
|
||||||
return {
|
|
||||||
"rowid": rowid,
|
|
||||||
"added_on": added_on,
|
|
||||||
"stream_hash": stream_hash,
|
|
||||||
"file_name": file_name, # hex
|
|
||||||
"download_directory": download_dir, # hex
|
|
||||||
"blob_data_rate": data_rate,
|
|
||||||
"status": status,
|
|
||||||
"sd_hash": sd_hash,
|
|
||||||
"key": stream_key,
|
|
||||||
"stream_name": stream_name, # hex
|
|
||||||
"suggested_file_name": suggested_file_name, # hex
|
|
||||||
"claim": claim,
|
|
||||||
"saved_file": bool(saved_file),
|
|
||||||
"content_fee": None if not raw_content_fee else Transaction(
|
|
||||||
binascii.unhexlify(raw_content_fee)
|
|
||||||
),
|
|
||||||
"fully_reflected": fully_reflected
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_all_lbry_files(transaction: sqlite3.Connection) -> typing.List[typing.Dict]:
|
|
||||||
files = []
|
|
||||||
signed_claims = {}
|
|
||||||
for (rowid, stream_hash, _, file_name, download_dir, data_rate, status, saved_file, raw_content_fee,
|
|
||||||
added_on, _, sd_hash, stream_key, stream_name, suggested_file_name, *claim_args) in transaction.execute(
|
|
||||||
"select file.rowid, file.*, stream.*, c.*, "
|
|
||||||
" case when (SELECT 1 FROM reflected_stream r WHERE r.sd_hash=stream.sd_hash) "
|
|
||||||
" is null then 0 else 1 end as fully_reflected "
|
|
||||||
"from file inner join stream on file.stream_hash=stream.stream_hash "
|
|
||||||
"inner join content_claim cc on file.stream_hash=cc.stream_hash "
|
|
||||||
"inner join claim c on cc.claim_outpoint=c.claim_outpoint "
|
|
||||||
"order by c.rowid desc").fetchall():
|
|
||||||
claim_args, fully_reflected = tuple(claim_args[:-1]), claim_args[-1]
|
|
||||||
claim = StoredContentClaim(*claim_args)
|
|
||||||
if claim.channel_claim_id:
|
|
||||||
if claim.channel_claim_id not in signed_claims:
|
|
||||||
signed_claims[claim.channel_claim_id] = []
|
|
||||||
signed_claims[claim.channel_claim_id].append(claim)
|
|
||||||
files.append(
|
|
||||||
_get_lbry_file_stream_dict(
|
|
||||||
rowid, added_on, stream_hash, file_name, download_dir, data_rate, status,
|
|
||||||
sd_hash, stream_key, stream_name, suggested_file_name, claim, saved_file,
|
|
||||||
raw_content_fee, fully_reflected
|
|
||||||
)
|
|
||||||
)
|
|
||||||
for claim_name, claim_id in _batched_select(
|
|
||||||
transaction, "select c.claim_name, c.claim_id from claim c where c.claim_id in {}",
|
|
||||||
tuple(signed_claims.keys())):
|
|
||||||
for claim in signed_claims[claim_id]:
|
|
||||||
claim.channel_name = claim_name
|
|
||||||
return files
|
|
||||||
|
|
||||||
|
|
||||||
def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
|
|
||||||
# add all blobs, except the last one, which is empty
|
|
||||||
transaction.executemany(
|
|
||||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
|
|
||||||
((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0)
|
|
||||||
for blob in (descriptor.blobs[:-1] if len(descriptor.blobs) > 1 else descriptor.blobs) + [sd_blob])
|
|
||||||
).fetchall()
|
|
||||||
# associate the blobs to the stream
|
|
||||||
transaction.execute("insert or ignore into stream values (?, ?, ?, ?, ?)",
|
|
||||||
(descriptor.stream_hash, sd_blob.blob_hash, descriptor.key,
|
|
||||||
binascii.hexlify(descriptor.stream_name.encode()).decode(),
|
|
||||||
binascii.hexlify(descriptor.suggested_file_name.encode()).decode())).fetchall()
|
|
||||||
# add the stream
|
|
||||||
transaction.executemany(
|
|
||||||
"insert or ignore into stream_blob values (?, ?, ?, ?)",
|
|
||||||
((descriptor.stream_hash, blob.blob_hash, blob.blob_num, blob.iv)
|
|
||||||
for blob in descriptor.blobs)
|
|
||||||
).fetchall()
|
|
||||||
# ensure should_announce is set regardless if insert was ignored
|
|
||||||
transaction.execute(
|
|
||||||
"update blob set should_announce=1 where blob_hash in (?, ?)",
|
|
||||||
(sd_blob.blob_hash, descriptor.blobs[0].blob_hash,)
|
|
||||||
).fetchall()
|
|
||||||
|
|
||||||
|
|
||||||
def delete_stream(transaction: sqlite3.Connection, descriptor: 'StreamDescriptor'):
|
|
||||||
blob_hashes = [(blob.blob_hash, ) for blob in descriptor.blobs[:-1]]
|
|
||||||
blob_hashes.append((descriptor.sd_hash, ))
|
|
||||||
transaction.execute("delete from content_claim where stream_hash=? ", (descriptor.stream_hash,)).fetchall()
|
|
||||||
transaction.execute("delete from file where stream_hash=? ", (descriptor.stream_hash,)).fetchall()
|
|
||||||
transaction.execute("delete from stream_blob where stream_hash=?", (descriptor.stream_hash,)).fetchall()
|
|
||||||
transaction.execute("delete from stream where stream_hash=? ", (descriptor.stream_hash,)).fetchall()
|
|
||||||
transaction.executemany("delete from blob where blob_hash=?", blob_hashes).fetchall()
|
|
||||||
|
|
||||||
|
|
||||||
def delete_torrent(transaction: sqlite3.Connection, bt_infohash: str):
|
|
||||||
transaction.execute("delete from content_claim where bt_infohash=?", (bt_infohash, )).fetchall()
|
|
||||||
transaction.execute("delete from torrent_tracker where bt_infohash=?", (bt_infohash,)).fetchall()
|
|
||||||
transaction.execute("delete from torrent_node where bt_infohash=?", (bt_infohash,)).fetchall()
|
|
||||||
transaction.execute("delete from torrent_http_seed where bt_infohash=?", (bt_infohash,)).fetchall()
|
|
||||||
transaction.execute("delete from file where bt_infohash=?", (bt_infohash,)).fetchall()
|
|
||||||
transaction.execute("delete from torrent where bt_infohash=?", (bt_infohash,)).fetchall()
|
|
||||||
|
|
||||||
|
|
||||||
def store_file(transaction: sqlite3.Connection, stream_hash: str, file_name: typing.Optional[str],
|
|
||||||
download_directory: typing.Optional[str], data_payment_rate: float, status: str,
|
|
||||||
content_fee: typing.Optional[Transaction], added_on: typing.Optional[int] = None) -> int:
|
|
||||||
if not file_name and not download_directory:
|
|
||||||
encoded_file_name, encoded_download_dir = None, None
|
|
||||||
else:
|
|
||||||
encoded_file_name = binascii.hexlify(file_name.encode()).decode()
|
|
||||||
encoded_download_dir = binascii.hexlify(download_directory.encode()).decode()
|
|
||||||
time_added = added_on or int(time.time())
|
|
||||||
transaction.execute(
|
|
||||||
"insert or replace into file values (?, NULL, ?, ?, ?, ?, ?, ?, ?)",
|
|
||||||
(stream_hash, encoded_file_name, encoded_download_dir, data_payment_rate, status,
|
|
||||||
1 if (file_name and download_directory and os.path.isfile(os.path.join(download_directory, file_name))) else 0,
|
|
||||||
None if not content_fee else binascii.hexlify(content_fee.raw).decode(), time_added)
|
|
||||||
).fetchall()
|
|
||||||
|
|
||||||
return transaction.execute("select rowid from file where stream_hash=?", (stream_hash, )).fetchone()[0]
|
|
||||||
|
|
||||||
|
|
||||||
class SQLiteStorage(SQLiteMixin):
|
|
||||||
CREATE_TABLES_QUERY = """
|
|
||||||
pragma foreign_keys=on;
|
|
||||||
pragma journal_mode=WAL;
|
|
||||||
|
|
||||||
create table if not exists blob (
|
|
||||||
blob_hash char(96) primary key not null,
|
|
||||||
blob_length integer not null,
|
|
||||||
next_announce_time integer not null,
|
|
||||||
should_announce integer not null default 0,
|
|
||||||
status text not null,
|
|
||||||
last_announced_time integer,
|
|
||||||
single_announce integer
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists stream (
|
|
||||||
stream_hash char(96) not null primary key,
|
|
||||||
sd_hash char(96) not null references blob,
|
|
||||||
stream_key text not null,
|
|
||||||
stream_name text not null,
|
|
||||||
suggested_filename text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists stream_blob (
|
|
||||||
stream_hash char(96) not null references stream,
|
|
||||||
blob_hash char(96) references blob,
|
|
||||||
position integer not null,
|
|
||||||
iv char(32) not null,
|
|
||||||
primary key (stream_hash, blob_hash)
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists claim (
|
|
||||||
claim_outpoint text not null primary key,
|
|
||||||
claim_id char(40) not null,
|
|
||||||
claim_name text not null,
|
|
||||||
amount integer not null,
|
|
||||||
height integer not null,
|
|
||||||
serialized_metadata blob not null,
|
|
||||||
channel_claim_id text,
|
|
||||||
address text not null,
|
|
||||||
claim_sequence integer not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists torrent (
|
|
||||||
bt_infohash char(20) not null primary key,
|
|
||||||
tracker text,
|
|
||||||
length integer not null,
|
|
||||||
name text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists torrent_node ( -- BEP-0005
|
|
||||||
bt_infohash char(20) not null references torrent,
|
|
||||||
host text not null,
|
|
||||||
port integer not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists torrent_tracker ( -- BEP-0012
|
|
||||||
bt_infohash char(20) not null references torrent,
|
|
||||||
tracker text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists torrent_http_seed ( -- BEP-0017
|
|
||||||
bt_infohash char(20) not null references torrent,
|
|
||||||
http_seed text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists file (
|
|
||||||
stream_hash char(96) references stream,
|
|
||||||
bt_infohash char(20) references torrent,
|
|
||||||
file_name text,
|
|
||||||
download_directory text,
|
|
||||||
blob_data_rate real not null,
|
|
||||||
status text not null,
|
|
||||||
saved_file integer not null,
|
|
||||||
content_fee text,
|
|
||||||
added_on integer not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists content_claim (
|
|
||||||
stream_hash char(96) references stream,
|
|
||||||
bt_infohash char(20) references torrent,
|
|
||||||
claim_outpoint text unique not null references claim
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists support (
|
|
||||||
support_outpoint text not null primary key,
|
|
||||||
claim_id text not null,
|
|
||||||
amount integer not null,
|
|
||||||
address text not null
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists reflected_stream (
|
|
||||||
sd_hash text not null,
|
|
||||||
reflector_address text not null,
|
|
||||||
timestamp integer,
|
|
||||||
primary key (sd_hash, reflector_address)
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists peer (
|
|
||||||
node_id char(96) not null primary key,
|
|
||||||
address text not null,
|
|
||||||
udp_port integer not null,
|
|
||||||
tcp_port integer,
|
|
||||||
unique (address, udp_port)
|
|
||||||
);
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, conf: Config, path, loop=None, time_getter: typing.Optional[typing.Callable[[], float]] = None):
|
|
||||||
super().__init__(path)
|
|
||||||
self.conf = conf
|
|
||||||
self.content_claim_callbacks = {}
|
|
||||||
self.loop = loop or asyncio.get_event_loop()
|
|
||||||
self.time_getter = time_getter or time.time
|
|
||||||
|
|
||||||
async def run_and_return_one_or_none(self, query, *args):
|
|
||||||
for row in await self.db.execute_fetchall(query, args):
|
|
||||||
if len(row) == 1:
|
|
||||||
return row[0]
|
|
||||||
return row
|
|
||||||
|
|
||||||
async def run_and_return_list(self, query, *args):
|
|
||||||
rows = list(await self.db.execute_fetchall(query, args))
|
|
||||||
return [col[0] for col in rows] if rows else []
|
|
||||||
|
|
||||||
# # # # # # # # # blob functions # # # # # # # # #
|
|
||||||
|
|
||||||
async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int], finished=False):
|
|
||||||
def _add_blobs(transaction: sqlite3.Connection):
|
|
||||||
transaction.executemany(
|
|
||||||
"insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)",
|
|
||||||
(
|
|
||||||
(blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0)
|
|
||||||
for blob_hash, length in blob_hashes_and_lengths
|
|
||||||
)
|
|
||||||
).fetchall()
|
|
||||||
if finished:
|
|
||||||
transaction.executemany(
|
|
||||||
"update blob set status='finished' where blob.blob_hash=?", (
|
|
||||||
(blob_hash, ) for blob_hash, _ in blob_hashes_and_lengths
|
|
||||||
)
|
|
||||||
).fetchall()
|
|
||||||
return await self.db.run(_add_blobs)
|
|
||||||
|
|
||||||
def get_blob_status(self, blob_hash: str):
|
|
||||||
return self.run_and_return_one_or_none(
|
|
||||||
"select status from blob where blob_hash=?", blob_hash
|
|
||||||
)
|
|
||||||
|
|
||||||
def update_last_announced_blobs(self, blob_hashes: typing.List[str]):
|
|
||||||
def _update_last_announced_blobs(transaction: sqlite3.Connection):
|
|
||||||
last_announced = self.time_getter()
|
|
||||||
return transaction.executemany(
|
|
||||||
"update blob set next_announce_time=?, last_announced_time=?, single_announce=0 "
|
|
||||||
"where blob_hash=?",
|
|
||||||
((int(last_announced + (DATA_EXPIRATION / 2)), int(last_announced), blob_hash)
|
|
||||||
for blob_hash in blob_hashes)
|
|
||||||
).fetchall()
|
|
||||||
return self.db.run(_update_last_announced_blobs)
|
|
||||||
|
|
||||||
def should_single_announce_blobs(self, blob_hashes, immediate=False):
|
|
||||||
def set_single_announce(transaction):
|
|
||||||
now = int(self.time_getter())
|
|
||||||
for blob_hash in blob_hashes:
|
|
||||||
if immediate:
|
|
||||||
transaction.execute(
|
|
||||||
"update blob set single_announce=1, next_announce_time=? "
|
|
||||||
"where blob_hash=? and status='finished'", (int(now), blob_hash)
|
|
||||||
).fetchall()
|
|
||||||
else:
|
|
||||||
transaction.execute(
|
|
||||||
"update blob set single_announce=1 where blob_hash=? and status='finished'", (blob_hash,)
|
|
||||||
).fetchall()
|
|
||||||
return self.db.run(set_single_announce)
|
|
||||||
|
|
||||||
def get_blobs_to_announce(self):
|
|
||||||
def get_and_update(transaction):
|
|
||||||
timestamp = int(self.time_getter())
|
|
||||||
if self.conf.announce_head_and_sd_only:
|
|
||||||
r = transaction.execute(
|
|
||||||
"select blob_hash from blob "
|
|
||||||
"where blob_hash is not null and "
|
|
||||||
"(should_announce=1 or single_announce=1) and next_announce_time<? and status='finished' "
|
|
||||||
"order by next_announce_time asc limit ?",
|
|
||||||
(timestamp, int(self.conf.concurrent_blob_announcers * 10))
|
|
||||||
).fetchall()
|
|
||||||
else:
|
|
||||||
r = transaction.execute(
|
|
||||||
"select blob_hash from blob where blob_hash is not null "
|
|
||||||
"and next_announce_time<? and status='finished' "
|
|
||||||
"order by next_announce_time asc limit ?",
|
|
||||||
(timestamp, int(self.conf.concurrent_blob_announcers * 10))
|
|
||||||
).fetchall()
|
|
||||||
return [b[0] for b in r]
|
|
||||||
return self.db.run(get_and_update)
|
|
||||||
|
|
||||||
def delete_blobs_from_db(self, blob_hashes):
|
|
||||||
def delete_blobs(transaction):
|
|
||||||
transaction.executemany(
|
|
||||||
"delete from blob where blob_hash=?;", ((blob_hash,) for blob_hash in blob_hashes)
|
|
||||||
).fetchall()
|
|
||||||
return self.db.run_with_foreign_keys_disabled(delete_blobs)
|
|
||||||
|
|
||||||
def get_all_blob_hashes(self):
|
|
||||||
return self.run_and_return_list("select blob_hash from blob")
|
|
||||||
|
|
||||||
def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]:
|
|
||||||
def _sync_blobs(transaction: sqlite3.Connection) -> typing.Set[str]:
|
|
||||||
finished_blob_hashes = tuple(
|
|
||||||
blob_hash for (blob_hash, ) in transaction.execute(
|
|
||||||
"select blob_hash from blob where status='finished'"
|
|
||||||
).fetchall()
|
|
||||||
)
|
|
||||||
finished_blobs_set = set(finished_blob_hashes)
|
|
||||||
to_update_set = finished_blobs_set.difference(blob_files)
|
|
||||||
transaction.executemany(
|
|
||||||
"update blob set status='pending' where blob_hash=?",
|
|
||||||
((blob_hash, ) for blob_hash in to_update_set)
|
|
||||||
).fetchall()
|
|
||||||
return blob_files.intersection(finished_blobs_set)
|
|
||||||
return self.db.run(_sync_blobs)
|
|
||||||
|
|
||||||
# # # # # # # # # stream functions # # # # # # # # #
|
|
||||||
|
|
||||||
async def stream_exists(self, sd_hash: str) -> bool:
|
|
||||||
streams = await self.run_and_return_one_or_none("select stream_hash from stream where sd_hash=?", sd_hash)
|
|
||||||
return streams is not None
|
|
||||||
|
|
||||||
async def file_exists(self, sd_hash: str) -> bool:
|
|
||||||
streams = await self.run_and_return_one_or_none("select f.stream_hash from file f "
|
|
||||||
"inner join stream s on "
|
|
||||||
"s.stream_hash=f.stream_hash and s.sd_hash=?", sd_hash)
|
|
||||||
return streams is not None
|
|
||||||
|
|
||||||
def store_stream(self, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'):
|
|
||||||
return self.db.run(store_stream, sd_blob, descriptor)
|
|
||||||
|
|
||||||
def get_blobs_for_stream(self, stream_hash, only_completed=False) -> typing.Awaitable[typing.List[BlobInfo]]:
|
|
||||||
def _get_blobs_for_stream(transaction):
|
|
||||||
crypt_blob_infos = []
|
|
||||||
stream_blobs = transaction.execute(
|
|
||||||
"select blob_hash, position, iv from stream_blob where stream_hash=? "
|
|
||||||
"order by position asc", (stream_hash, )
|
|
||||||
).fetchall()
|
|
||||||
if only_completed:
|
|
||||||
lengths = transaction.execute(
|
|
||||||
"select b.blob_hash, b.blob_length from blob b "
|
|
||||||
"inner join stream_blob s ON b.blob_hash=s.blob_hash and b.status='finished' and s.stream_hash=?",
|
|
||||||
(stream_hash, )
|
|
||||||
).fetchall()
|
|
||||||
else:
|
|
||||||
lengths = transaction.execute(
|
|
||||||
"select b.blob_hash, b.blob_length from blob b "
|
|
||||||
"inner join stream_blob s ON b.blob_hash=s.blob_hash and s.stream_hash=?",
|
|
||||||
(stream_hash, )
|
|
||||||
).fetchall()
|
|
||||||
|
|
||||||
blob_length_dict = {}
|
|
||||||
for blob_hash, length in lengths:
|
|
||||||
blob_length_dict[blob_hash] = length
|
|
||||||
|
|
||||||
for blob_hash, position, iv in stream_blobs:
|
|
||||||
blob_length = blob_length_dict.get(blob_hash, 0)
|
|
||||||
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, blob_hash))
|
|
||||||
if not blob_hash:
|
|
||||||
break
|
|
||||||
return crypt_blob_infos
|
|
||||||
return self.db.run(_get_blobs_for_stream)
|
|
||||||
|
|
||||||
def get_sd_blob_hash_for_stream(self, stream_hash):
|
|
||||||
return self.run_and_return_one_or_none(
|
|
||||||
"select sd_hash from stream where stream_hash=?", stream_hash
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_stream_hash_for_sd_hash(self, sd_blob_hash):
|
|
||||||
return self.run_and_return_one_or_none(
|
|
||||||
"select stream_hash from stream where sd_hash = ?", sd_blob_hash
|
|
||||||
)
|
|
||||||
|
|
||||||
def delete_stream(self, descriptor: 'StreamDescriptor'):
|
|
||||||
return self.db.run_with_foreign_keys_disabled(delete_stream, descriptor)
|
|
||||||
|
|
||||||
async def delete_torrent(self, bt_infohash: str):
|
|
||||||
return await self.db.run(delete_torrent, bt_infohash)
|
|
||||||
|
|
||||||
# # # # # # # # # file stuff # # # # # # # # #
|
|
||||||
|
|
||||||
def save_downloaded_file(self, stream_hash: str, file_name: typing.Optional[str],
|
|
||||||
download_directory: typing.Optional[str], data_payment_rate: float,
|
|
||||||
content_fee: typing.Optional[Transaction] = None,
|
|
||||||
added_on: typing.Optional[int] = None) -> typing.Awaitable[int]:
|
|
||||||
return self.save_published_file(
|
|
||||||
stream_hash, file_name, download_directory, data_payment_rate, status="running",
|
|
||||||
content_fee=content_fee, added_on=added_on
|
|
||||||
)
|
|
||||||
|
|
||||||
def save_published_file(self, stream_hash: str, file_name: typing.Optional[str],
|
|
||||||
download_directory: typing.Optional[str], data_payment_rate: float,
|
|
||||||
status: str = "finished",
|
|
||||||
content_fee: typing.Optional[Transaction] = None,
|
|
||||||
added_on: typing.Optional[int] = None) -> typing.Awaitable[int]:
|
|
||||||
return self.db.run(store_file, stream_hash, file_name, download_directory, data_payment_rate, status,
|
|
||||||
content_fee, added_on)
|
|
||||||
|
|
||||||
async def update_manually_removed_files_since_last_run(self):
|
|
||||||
"""
|
|
||||||
Update files that have been removed from the downloads directory since the last run
|
|
||||||
"""
|
|
||||||
def update_manually_removed_files(transaction: sqlite3.Connection):
|
|
||||||
files = {}
|
|
||||||
query = "select stream_hash, download_directory, file_name from file where saved_file=1 " \
|
|
||||||
"and stream_hash is not null"
|
|
||||||
for (stream_hash, download_directory, file_name) in transaction.execute(query).fetchall():
|
|
||||||
if download_directory and file_name:
|
|
||||||
files[stream_hash] = download_directory, file_name
|
|
||||||
return files
|
|
||||||
|
|
||||||
def detect_removed(files):
|
|
||||||
return [
|
|
||||||
stream_hash for stream_hash, (download_directory, file_name) in files.items()
|
|
||||||
if not os.path.isfile(os.path.join(binascii.unhexlify(download_directory).decode(),
|
|
||||||
binascii.unhexlify(file_name).decode()))
|
|
||||||
]
|
|
||||||
|
|
||||||
def update_db_removed(transaction: sqlite3.Connection, removed):
|
|
||||||
query = "update file set file_name=null, download_directory=null, saved_file=0 where stream_hash in {}"
|
|
||||||
for cur in _batched_select(transaction, query, removed):
|
|
||||||
cur.fetchall()
|
|
||||||
|
|
||||||
stream_and_file = await self.db.run(update_manually_removed_files)
|
|
||||||
removed = await self.loop.run_in_executor(None, detect_removed, stream_and_file)
|
|
||||||
if removed:
|
|
||||||
await self.db.run(update_db_removed, removed)
|
|
||||||
|
|
||||||
def get_all_lbry_files(self) -> typing.Awaitable[typing.List[typing.Dict]]:
|
|
||||||
return self.db.run(get_all_lbry_files)
|
|
||||||
|
|
||||||
def change_file_status(self, stream_hash: str, new_status: str):
|
|
||||||
log.debug("update file status %s -> %s", stream_hash, new_status)
|
|
||||||
return self.db.execute_fetchall("update file set status=? where stream_hash=?", (new_status, stream_hash))
|
|
||||||
|
|
||||||
async def change_file_download_dir_and_file_name(self, stream_hash: str, download_dir: typing.Optional[str],
|
|
||||||
file_name: typing.Optional[str]):
|
|
||||||
if not file_name or not download_dir:
|
|
||||||
encoded_file_name, encoded_download_dir = None, None
|
|
||||||
else:
|
|
||||||
encoded_file_name = binascii.hexlify(file_name.encode()).decode()
|
|
||||||
encoded_download_dir = binascii.hexlify(download_dir.encode()).decode()
|
|
||||||
return await self.db.execute_fetchall("update file set download_directory=?, file_name=? where stream_hash=?", (
|
|
||||||
encoded_download_dir, encoded_file_name, stream_hash,
|
|
||||||
))
|
|
||||||
|
|
||||||
async def save_content_fee(self, stream_hash: str, content_fee: Transaction):
|
|
||||||
return await self.db.execute_fetchall("update file set content_fee=? where stream_hash=?", (
|
|
||||||
binascii.hexlify(content_fee.raw), stream_hash,
|
|
||||||
))
|
|
||||||
|
|
||||||
async def set_saved_file(self, stream_hash: str):
|
|
||||||
return await self.db.execute_fetchall("update file set saved_file=1 where stream_hash=?", (
|
|
||||||
stream_hash,
|
|
||||||
))
|
|
||||||
|
|
||||||
async def clear_saved_file(self, stream_hash: str):
|
|
||||||
return await self.db.execute_fetchall("update file set saved_file=0 where stream_hash=?", (
|
|
||||||
stream_hash,
|
|
||||||
))
|
|
||||||
|
|
||||||
async def recover_streams(self, descriptors_and_sds: typing.List[typing.Tuple['StreamDescriptor', 'BlobFile',
|
|
||||||
typing.Optional[Transaction]]],
|
|
||||||
download_directory: str):
|
|
||||||
def _recover(transaction: sqlite3.Connection):
|
|
||||||
stream_hashes = [x[0].stream_hash for x in descriptors_and_sds]
|
|
||||||
for descriptor, sd_blob, content_fee in descriptors_and_sds:
|
|
||||||
content_claim = transaction.execute(
|
|
||||||
"select * from content_claim where stream_hash=?", (descriptor.stream_hash, )
|
|
||||||
).fetchone()
|
|
||||||
delete_stream(transaction, descriptor) # this will also delete the content claim
|
|
||||||
store_stream(transaction, sd_blob, descriptor)
|
|
||||||
store_file(transaction, descriptor.stream_hash, os.path.basename(descriptor.suggested_file_name),
|
|
||||||
download_directory, 0.0, 'stopped', content_fee=content_fee)
|
|
||||||
if content_claim:
|
|
||||||
transaction.execute("insert or ignore into content_claim values (?, ?, ?)", content_claim)
|
|
||||||
transaction.executemany(
|
|
||||||
"update file set status='stopped' where stream_hash=?",
|
|
||||||
((stream_hash, ) for stream_hash in stream_hashes)
|
|
||||||
).fetchall()
|
|
||||||
download_dir = binascii.hexlify(self.conf.download_dir.encode()).decode()
|
|
||||||
transaction.executemany(
|
|
||||||
f"update file set download_directory=? where stream_hash=?",
|
|
||||||
((download_dir, stream_hash) for stream_hash in stream_hashes)
|
|
||||||
).fetchall()
|
|
||||||
await self.db.run_with_foreign_keys_disabled(_recover)
|
|
||||||
|
|
||||||
def get_all_stream_hashes(self):
|
|
||||||
return self.run_and_return_list("select stream_hash from stream")
|
|
||||||
|
|
||||||
# # # # # # # # # support functions # # # # # # # # #
|
|
||||||
|
|
||||||
def save_supports(self, claim_id_to_supports: dict):
|
|
||||||
# TODO: add 'address' to support items returned for a claim from lbrycrdd and lbryum-server
|
|
||||||
def _save_support(transaction):
|
|
||||||
bind = "({})".format(','.join(['?'] * len(claim_id_to_supports)))
|
|
||||||
transaction.execute(
|
|
||||||
f"delete from support where claim_id in {bind}", tuple(claim_id_to_supports.keys())
|
|
||||||
).fetchall()
|
|
||||||
for claim_id, supports in claim_id_to_supports.items():
|
|
||||||
for support in supports:
|
|
||||||
transaction.execute(
|
|
||||||
"insert into support values (?, ?, ?, ?)",
|
|
||||||
("%s:%i" % (support['txid'], support['nout']), claim_id, lbc_to_dewies(support['amount']),
|
|
||||||
support.get('address', ""))
|
|
||||||
).fetchall()
|
|
||||||
return self.db.run(_save_support)
|
|
||||||
|
|
||||||
def get_supports(self, *claim_ids):
|
|
||||||
def _format_support(outpoint, supported_id, amount, address):
|
|
||||||
return {
|
|
||||||
"txid": outpoint.split(":")[0],
|
|
||||||
"nout": int(outpoint.split(":")[1]),
|
|
||||||
"claim_id": supported_id,
|
|
||||||
"amount": dewies_to_lbc(amount),
|
|
||||||
"address": address,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _get_supports(transaction):
|
|
||||||
return [
|
|
||||||
_format_support(*support_info)
|
|
||||||
for support_info in _batched_select(
|
|
||||||
transaction,
|
|
||||||
"select * from support where claim_id in {}",
|
|
||||||
claim_ids
|
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
return self.db.run(_get_supports)
|
|
||||||
|
|
||||||
# # # # # # # # # claim functions # # # # # # # # #
|
|
||||||
|
|
||||||
async def save_claims(self, claim_infos):
|
|
||||||
claim_id_to_supports = {}
|
|
||||||
update_file_callbacks = []
|
|
||||||
|
|
||||||
def _save_claims(transaction):
|
|
||||||
content_claims_to_update = []
|
|
||||||
for claim_info in claim_infos:
|
|
||||||
outpoint = "%s:%i" % (claim_info['txid'], claim_info['nout'])
|
|
||||||
claim_id = claim_info['claim_id']
|
|
||||||
name = claim_info['name']
|
|
||||||
amount = lbc_to_dewies(claim_info['amount'])
|
|
||||||
height = claim_info['height']
|
|
||||||
address = claim_info['address']
|
|
||||||
sequence = claim_info['claim_sequence']
|
|
||||||
certificate_id = claim_info['value'].signing_channel_id
|
|
||||||
try:
|
|
||||||
source_hash = claim_info['value'].stream.source.sd_hash
|
|
||||||
except (AttributeError, ValueError):
|
|
||||||
source_hash = None
|
|
||||||
serialized = binascii.hexlify(claim_info['value'].to_bytes())
|
|
||||||
transaction.execute(
|
|
||||||
"insert or replace into claim values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
|
||||||
(outpoint, claim_id, name, amount, height, serialized, certificate_id, address, sequence)
|
|
||||||
).fetchall()
|
|
||||||
# if this response doesn't have support info don't overwrite the existing
|
|
||||||
# support info
|
|
||||||
if 'supports' in claim_info:
|
|
||||||
claim_id_to_supports[claim_id] = claim_info['supports']
|
|
||||||
if not source_hash:
|
|
||||||
continue
|
|
||||||
stream_hash = transaction.execute(
|
|
||||||
"select file.stream_hash from stream "
|
|
||||||
"inner join file on file.stream_hash=stream.stream_hash where sd_hash=?", (source_hash,)
|
|
||||||
).fetchone()
|
|
||||||
if not stream_hash:
|
|
||||||
continue
|
|
||||||
stream_hash = stream_hash[0]
|
|
||||||
known_outpoint = transaction.execute(
|
|
||||||
"select claim_outpoint from content_claim where stream_hash=?", (stream_hash,)
|
|
||||||
).fetchone()
|
|
||||||
known_claim_id = transaction.execute(
|
|
||||||
"select claim_id from claim "
|
|
||||||
"inner join content_claim c3 ON claim.claim_outpoint=c3.claim_outpoint "
|
|
||||||
"where c3.stream_hash=?", (stream_hash,)
|
|
||||||
).fetchone()
|
|
||||||
if not known_claim_id:
|
|
||||||
content_claims_to_update.append((stream_hash, outpoint))
|
|
||||||
elif known_outpoint != outpoint:
|
|
||||||
content_claims_to_update.append((stream_hash, outpoint))
|
|
||||||
for stream_hash, outpoint in content_claims_to_update:
|
|
||||||
self._save_content_claim(transaction, outpoint, stream_hash)
|
|
||||||
if stream_hash in self.content_claim_callbacks:
|
|
||||||
update_file_callbacks.append(self.content_claim_callbacks[stream_hash]())
|
|
||||||
|
|
||||||
await self.db.run(_save_claims)
|
|
||||||
if update_file_callbacks:
|
|
||||||
await asyncio.wait(update_file_callbacks)
|
|
||||||
if claim_id_to_supports:
|
|
||||||
await self.save_supports(claim_id_to_supports)
|
|
||||||
|
|
||||||
def save_claims_for_resolve(self, claim_infos):
|
|
||||||
to_save = {}
|
|
||||||
for info in claim_infos:
|
|
||||||
if 'value' in info:
|
|
||||||
if info['value']:
|
|
||||||
to_save[info['claim_id']] = info
|
|
||||||
else:
|
|
||||||
for key in ('certificate', 'claim'):
|
|
||||||
if info.get(key, {}).get('value'):
|
|
||||||
to_save[info[key]['claim_id']] = info[key]
|
|
||||||
return self.save_claims(to_save.values())
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _save_content_claim(transaction, claim_outpoint, stream_hash):
|
|
||||||
# get the claim id and serialized metadata
|
|
||||||
claim_info = transaction.execute(
|
|
||||||
"select claim_id, serialized_metadata from claim where claim_outpoint=?", (claim_outpoint,)
|
|
||||||
).fetchone()
|
|
||||||
if not claim_info:
|
|
||||||
raise Exception("claim not found")
|
|
||||||
new_claim_id, claim = claim_info[0], Claim.from_bytes(binascii.unhexlify(claim_info[1]))
|
|
||||||
|
|
||||||
# certificate claims should not be in the content_claim table
|
|
||||||
if not claim.is_stream:
|
|
||||||
raise Exception("claim does not contain a stream")
|
|
||||||
|
|
||||||
# get the known sd hash for this stream
|
|
||||||
known_sd_hash = transaction.execute(
|
|
||||||
"select sd_hash from stream where stream_hash=?", (stream_hash,)
|
|
||||||
).fetchone()
|
|
||||||
if not known_sd_hash:
|
|
||||||
raise Exception("stream not found")
|
|
||||||
# check the claim contains the same sd hash
|
|
||||||
if known_sd_hash[0] != claim.stream.source.sd_hash:
|
|
||||||
raise Exception("stream mismatch")
|
|
||||||
|
|
||||||
# if there is a current claim associated to the file, check that the new claim is an update to it
|
|
||||||
current_associated_content = transaction.execute(
|
|
||||||
"select claim_outpoint from content_claim where stream_hash=?", (stream_hash,)
|
|
||||||
).fetchone()
|
|
||||||
if current_associated_content:
|
|
||||||
current_associated_claim_id = transaction.execute(
|
|
||||||
"select claim_id from claim where claim_outpoint=?", current_associated_content
|
|
||||||
).fetchone()[0]
|
|
||||||
if current_associated_claim_id != new_claim_id:
|
|
||||||
raise Exception(
|
|
||||||
f"mismatching claim ids when updating stream {current_associated_claim_id} vs {new_claim_id}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# update the claim associated to the file
|
|
||||||
transaction.execute("delete from content_claim where stream_hash=?", (stream_hash, )).fetchall()
|
|
||||||
transaction.execute(
|
|
||||||
"insert into content_claim values (?, NULL, ?)", (stream_hash, claim_outpoint)
|
|
||||||
).fetchall()
|
|
||||||
|
|
||||||
async def save_content_claim(self, stream_hash, claim_outpoint):
|
|
||||||
await self.db.run(self._save_content_claim, claim_outpoint, stream_hash)
|
|
||||||
# update corresponding ManagedEncryptedFileDownloader object
|
|
||||||
if stream_hash in self.content_claim_callbacks:
|
|
||||||
await self.content_claim_callbacks[stream_hash]()
|
|
||||||
|
|
||||||
async def get_content_claim(self, stream_hash: str, include_supports: typing.Optional[bool] = True) -> typing.Dict:
|
|
||||||
claims = await self.db.run(get_claims_from_stream_hashes, [stream_hash])
|
|
||||||
claim = None
|
|
||||||
if claims:
|
|
||||||
claim = claims[stream_hash].as_dict()
|
|
||||||
if include_supports:
|
|
||||||
supports = await self.get_supports(claim['claim_id'])
|
|
||||||
claim['supports'] = supports
|
|
||||||
claim['effective_amount'] = calculate_effective_amount(claim['amount'], supports)
|
|
||||||
return claim
|
|
||||||
|
|
||||||
# # # # # # # # # reflector functions # # # # # # # # #
|
|
||||||
|
|
||||||
def update_reflected_stream(self, sd_hash, reflector_address, success=True):
|
|
||||||
if success:
|
|
||||||
return self.db.execute_fetchall(
|
|
||||||
"insert or replace into reflected_stream values (?, ?, ?)",
|
|
||||||
(sd_hash, reflector_address, self.time_getter())
|
|
||||||
)
|
|
||||||
return self.db.execute_fetchall(
|
|
||||||
"delete from reflected_stream where sd_hash=? and reflector_address=?",
|
|
||||||
(sd_hash, reflector_address)
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_streams_to_re_reflect(self):
|
|
||||||
return self.run_and_return_list(
|
|
||||||
"select s.sd_hash from stream s "
|
|
||||||
"left outer join reflected_stream r on s.sd_hash=r.sd_hash "
|
|
||||||
"where r.timestamp is null or r.timestamp < ?",
|
|
||||||
int(self.time_getter()) - 86400
|
|
||||||
)
|
|
||||||
|
|
||||||
# # # # # # # # # # dht functions # # # # # # # # # # #
|
|
||||||
async def get_persisted_kademlia_peers(self) -> typing.List[typing.Tuple[bytes, str, int, int]]:
|
|
||||||
query = 'select node_id, address, udp_port, tcp_port from peer'
|
|
||||||
return [(binascii.unhexlify(n), a, u, t) for n, a, u, t in await self.db.execute_fetchall(query)]
|
|
||||||
|
|
||||||
async def save_kademlia_peers(self, peers: typing.List['KademliaPeer']):
|
|
||||||
def _save_kademlia_peers(transaction: sqlite3.Connection):
|
|
||||||
transaction.execute('delete from peer').fetchall()
|
|
||||||
transaction.executemany(
|
|
||||||
'insert into peer(node_id, address, udp_port, tcp_port) values (?, ?, ?, ?)',
|
|
||||||
tuple([(binascii.hexlify(p.node_id), p.address, p.udp_port, p.tcp_port) for p in peers])
|
|
||||||
).fetchall()
|
|
||||||
return await self.db.run(_save_kademlia_peers)
|
|
|
@ -1,62 +0,0 @@
|
||||||
# Copyright 2016-2017 Ionuț Arțăriși <ionut@artarisi.eu>
|
|
||||||
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
# This came from https://github.com/mapleoin/undecorated
|
|
||||||
|
|
||||||
from inspect import isfunction, ismethod, isclass
|
|
||||||
|
|
||||||
__version__ = '0.3.0'
|
|
||||||
|
|
||||||
|
|
||||||
def undecorated(o):
|
|
||||||
"""Remove all decorators from a function, method or class"""
|
|
||||||
# class decorator
|
|
||||||
if isinstance(o, type):
|
|
||||||
return o
|
|
||||||
|
|
||||||
try:
|
|
||||||
# python2
|
|
||||||
closure = o.func_closure
|
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
|
||||||
# python3
|
|
||||||
closure = o.__closure__
|
|
||||||
except AttributeError:
|
|
||||||
return
|
|
||||||
|
|
||||||
if closure:
|
|
||||||
for cell in closure:
|
|
||||||
# avoid infinite recursion
|
|
||||||
if cell.cell_contents is o:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# check if the contents looks like a decorator; in that case
|
|
||||||
# we need to go one level down into the dream, otherwise it
|
|
||||||
# might just be a different closed-over variable, which we
|
|
||||||
# can ignore.
|
|
||||||
|
|
||||||
# Note: this favors supporting decorators defined without
|
|
||||||
# @wraps to the detriment of function/method/class closures
|
|
||||||
if looks_like_a_decorator(cell.cell_contents):
|
|
||||||
undecd = undecorated(cell.cell_contents)
|
|
||||||
if undecd:
|
|
||||||
return undecd
|
|
||||||
return o
|
|
||||||
|
|
||||||
|
|
||||||
def looks_like_a_decorator(a):
|
|
||||||
return isfunction(a) or ismethod(a) or isclass(a)
|
|
|
@ -1,29 +0,0 @@
|
||||||
import platform
|
|
||||||
import os
|
|
||||||
import logging.handlers
|
|
||||||
|
|
||||||
from lbry import build_info, __version__ as lbrynet_version
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_platform() -> dict:
|
|
||||||
os_system = platform.system()
|
|
||||||
if os.environ and 'ANDROID_ARGUMENT' in os.environ:
|
|
||||||
os_system = 'android'
|
|
||||||
d = {
|
|
||||||
"processor": platform.processor(),
|
|
||||||
"python_version": platform.python_version(),
|
|
||||||
"platform": platform.platform(),
|
|
||||||
"os_release": platform.release(),
|
|
||||||
"os_system": os_system,
|
|
||||||
"lbrynet_version": lbrynet_version,
|
|
||||||
"version": lbrynet_version,
|
|
||||||
"build": build_info.BUILD, # CI server sets this during build step
|
|
||||||
}
|
|
||||||
if d["os_system"] == "Linux":
|
|
||||||
import distro # pylint: disable=import-outside-toplevel
|
|
||||||
d["distro"] = distro.info()
|
|
||||||
d["desktop"] = os.environ.get('XDG_CURRENT_DESKTOP', 'Unknown')
|
|
||||||
|
|
||||||
return d
|
|
|
@ -54,11 +54,11 @@ class VideoFileAnalyzer:
|
||||||
# We work around that issue here by using run_in_executor. Check it again in Python 3.8.
|
# We work around that issue here by using run_in_executor. Check it again in Python 3.8.
|
||||||
async def _execute_ffmpeg(self, arguments):
|
async def _execute_ffmpeg(self, arguments):
|
||||||
arguments = self._which_ffmpeg + " " + arguments
|
arguments = self._which_ffmpeg + " " + arguments
|
||||||
return await asyncio.get_event_loop().run_in_executor(None, self._execute, arguments, self._env_copy)
|
return await asyncio.get_running_loop().run_in_executor(None, self._execute, arguments, self._env_copy)
|
||||||
|
|
||||||
async def _execute_ffprobe(self, arguments):
|
async def _execute_ffprobe(self, arguments):
|
||||||
arguments = self._which_ffprobe + " " + arguments
|
arguments = self._which_ffprobe + " " + arguments
|
||||||
return await asyncio.get_event_loop().run_in_executor(None, self._execute, arguments, self._env_copy)
|
return await asyncio.get_running_loop().run_in_executor(None, self._execute, arguments, self._env_copy)
|
||||||
|
|
||||||
async def _verify_executables(self):
|
async def _verify_executables(self):
|
||||||
try:
|
try:
|
||||||
|
@ -69,8 +69,8 @@ class VideoFileAnalyzer:
|
||||||
version = str(e)
|
version = str(e)
|
||||||
if code != 0 or not version.startswith("ffmpeg"):
|
if code != 0 or not version.startswith("ffmpeg"):
|
||||||
log.warning("Unable to run ffmpeg, but it was requested. Code: %d; Message: %s", code, version)
|
log.warning("Unable to run ffmpeg, but it was requested. Code: %d; Message: %s", code, version)
|
||||||
raise FileNotFoundError(f"Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
|
raise FileNotFoundError("Unable to locate or run ffmpeg or ffprobe. Please install FFmpeg "
|
||||||
f"and ensure that it is callable via PATH or conf.ffmpeg_path")
|
"and ensure that it is callable via PATH or conf.ffmpeg_path")
|
||||||
log.debug("Using %s at %s", version.splitlines()[0].split(" Copyright")[0], self._which_ffmpeg)
|
log.debug("Using %s at %s", version.splitlines()[0].split(" Copyright")[0], self._which_ffmpeg)
|
||||||
return version
|
return version
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
from .claim import Claim
|
|
|
@ -30,14 +30,10 @@ class Claim(Signable):
|
||||||
COLLECTION = 'collection'
|
COLLECTION = 'collection'
|
||||||
REPOST = 'repost'
|
REPOST = 'repost'
|
||||||
|
|
||||||
__slots__ = 'version',
|
__slots__ = ()
|
||||||
|
|
||||||
message_class = ClaimMessage
|
message_class = ClaimMessage
|
||||||
|
|
||||||
def __init__(self, message=None):
|
|
||||||
super().__init__(message)
|
|
||||||
self.version = 2
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def claim_type(self) -> str:
|
def claim_type(self) -> str:
|
||||||
return self.message.WhichOneof('type')
|
return self.message.WhichOneof('type')
|
||||||
|
@ -139,6 +135,9 @@ class BaseClaim:
|
||||||
field = getattr(self, l)
|
field = getattr(self, l)
|
||||||
if kwargs.pop(f'clear_{l}', False):
|
if kwargs.pop(f'clear_{l}', False):
|
||||||
del field[:]
|
del field[:]
|
||||||
|
if l in ('tags', 'languages', 'locations'):
|
||||||
|
items = kwargs.pop(l[:-1], None)
|
||||||
|
else:
|
||||||
items = kwargs.pop(l, None)
|
items = kwargs.pop(l, None)
|
||||||
if items is not None:
|
if items is not None:
|
||||||
if isinstance(items, str):
|
if isinstance(items, str):
|
||||||
|
@ -151,6 +150,8 @@ class BaseClaim:
|
||||||
for key, value in kwargs.items():
|
for key, value in kwargs.items():
|
||||||
setattr(self, key, value)
|
setattr(self, key, value)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def title(self) -> str:
|
def title(self) -> str:
|
||||||
return self.claim.message.title
|
return self.claim.message.title
|
||||||
|
@ -267,6 +268,8 @@ class Stream(BaseClaim):
|
||||||
|
|
||||||
super().update(**kwargs)
|
super().update(**kwargs)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def author(self) -> str:
|
def author(self) -> str:
|
||||||
return self.message.author
|
return self.message.author
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import json
|
import json
|
||||||
|
import warnings
|
||||||
from decimal import Decimal
|
from decimal import Decimal
|
||||||
|
|
||||||
from google.protobuf.message import DecodeError
|
from google.protobuf.message import DecodeError
|
||||||
|
@ -8,6 +9,9 @@ from lbry.schema.types.v1.certificate_pb2 import KeyType
|
||||||
from lbry.schema.types.v1.fee_pb2 import Fee as FeeMessage
|
from lbry.schema.types.v1.fee_pb2 import Fee as FeeMessage
|
||||||
|
|
||||||
|
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning, module="lbry.schema.compat")
|
||||||
|
|
||||||
|
|
||||||
def from_old_json_schema(claim, payload: bytes):
|
def from_old_json_schema(claim, payload: bytes):
|
||||||
try:
|
try:
|
||||||
value = json.loads(payload)
|
value = json.loads(payload)
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
import base64
|
import base64
|
||||||
import struct
|
import struct
|
||||||
from typing import List
|
from typing import List
|
||||||
from binascii import hexlify
|
|
||||||
from itertools import chain
|
from itertools import chain
|
||||||
|
|
||||||
from lbry.error import ResolveCensoredError
|
from lbry.error import ResolveCensoredError
|
||||||
|
@ -16,46 +15,40 @@ BLOCKED = ErrorMessage.Code.Name(ErrorMessage.BLOCKED)
|
||||||
def set_reference(reference, claim_hash, rows):
|
def set_reference(reference, claim_hash, rows):
|
||||||
if claim_hash:
|
if claim_hash:
|
||||||
for txo in rows:
|
for txo in rows:
|
||||||
if claim_hash == txo['claim_hash']:
|
if claim_hash == txo.claim_hash:
|
||||||
reference.tx_hash = txo['txo_hash'][:32]
|
reference.tx_hash = txo.tx_ref.hash
|
||||||
reference.nout = struct.unpack('<I', txo['txo_hash'][32:])[0]
|
reference.nout = txo.position
|
||||||
reference.height = txo['height']
|
reference.height = txo.spent_height
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
class Censor:
|
class Censor:
|
||||||
|
|
||||||
__slots__ = 'streams', 'channels', 'censored', 'total'
|
SEARCH = 1
|
||||||
|
RESOLVE = 2
|
||||||
|
|
||||||
def __init__(self, streams: dict = None, channels: dict = None):
|
__slots__ = 'censor_type', 'censored'
|
||||||
self.streams = streams or {}
|
|
||||||
self.channels = channels or {}
|
def __init__(self, censor_type):
|
||||||
|
self.censor_type = censor_type
|
||||||
self.censored = {}
|
self.censored = {}
|
||||||
self.total = 0
|
|
||||||
|
def apply(self, rows):
|
||||||
|
return [row for row in rows if not self.censor(row)]
|
||||||
|
|
||||||
def censor(self, row) -> bool:
|
def censor(self, row) -> bool:
|
||||||
was_censored = False
|
was_censored = (row['censor_type'] or 0) >= self.censor_type
|
||||||
for claim_hash, lookup in (
|
|
||||||
(row['claim_hash'], self.streams),
|
|
||||||
(row['claim_hash'], self.channels),
|
|
||||||
(row['channel_hash'], self.channels),
|
|
||||||
(row['reposted_claim_hash'], self.streams),
|
|
||||||
(row['reposted_claim_hash'], self.channels)):
|
|
||||||
censoring_channel_hash = lookup.get(claim_hash)
|
|
||||||
if censoring_channel_hash:
|
|
||||||
was_censored = True
|
|
||||||
self.censored.setdefault(censoring_channel_hash, 0)
|
|
||||||
self.censored[censoring_channel_hash] += 1
|
|
||||||
break
|
|
||||||
if was_censored:
|
if was_censored:
|
||||||
self.total += 1
|
censoring_channel_hash = row['censoring_channel_hash']
|
||||||
|
self.censored.setdefault(censoring_channel_hash, set())
|
||||||
|
self.censored[censoring_channel_hash].add(row['tx_hash'])
|
||||||
return was_censored
|
return was_censored
|
||||||
|
|
||||||
def to_message(self, outputs: OutputsMessage, extra_txo_rows):
|
def to_message(self, outputs: OutputsMessage, extra_txo_rows):
|
||||||
outputs.blocked_total = self.total
|
outputs.blocked_total = len(self.censored)
|
||||||
for censoring_channel_hash, count in self.censored.items():
|
for censoring_channel_hash, count in self.censored.items():
|
||||||
blocked = outputs.blocked.add()
|
blocked = outputs.blocked.add()
|
||||||
blocked.count = count
|
blocked.count = len(count)
|
||||||
set_reference(blocked.channel, censoring_channel_hash, extra_txo_rows)
|
set_reference(blocked.channel, censoring_channel_hash, extra_txo_rows)
|
||||||
|
|
||||||
|
|
||||||
|
@ -120,10 +113,10 @@ class Outputs:
|
||||||
'expiration_height': claim.expiration_height,
|
'expiration_height': claim.expiration_height,
|
||||||
'effective_amount': claim.effective_amount,
|
'effective_amount': claim.effective_amount,
|
||||||
'support_amount': claim.support_amount,
|
'support_amount': claim.support_amount,
|
||||||
'trending_group': claim.trending_group,
|
'trend_group': claim.trending_group,
|
||||||
'trending_mixed': claim.trending_mixed,
|
'trend_mixed': claim.trending_mixed,
|
||||||
'trending_local': claim.trending_local,
|
'trend_local': claim.trending_local,
|
||||||
'trending_global': claim.trending_global,
|
'trend_global': claim.trending_global,
|
||||||
}
|
}
|
||||||
if claim.HasField('channel'):
|
if claim.HasField('channel'):
|
||||||
txo.channel = tx_map[claim.channel.tx_hash].outputs[claim.channel.nout]
|
txo.channel = tx_map[claim.channel.tx_hash].outputs[claim.channel.nout]
|
||||||
|
@ -148,7 +141,7 @@ class Outputs:
|
||||||
for txo_message in chain(outputs.txos, outputs.extra_txos):
|
for txo_message in chain(outputs.txos, outputs.extra_txos):
|
||||||
if txo_message.WhichOneof('meta') == 'error':
|
if txo_message.WhichOneof('meta') == 'error':
|
||||||
continue
|
continue
|
||||||
txs.add((hexlify(txo_message.tx_hash[::-1]).decode(), txo_message.height))
|
txs.add((txo_message.tx_hash, txo_message.height))
|
||||||
return cls(
|
return cls(
|
||||||
outputs.txos, outputs.extra_txos, txs,
|
outputs.txos, outputs.extra_txos, txs,
|
||||||
outputs.offset, outputs.total,
|
outputs.offset, outputs.total,
|
||||||
|
@ -185,26 +178,27 @@ class Outputs:
|
||||||
txo_message.error.code = ErrorMessage.BLOCKED
|
txo_message.error.code = ErrorMessage.BLOCKED
|
||||||
set_reference(txo_message.error.blocked.channel, txo.censor_hash, extra_txo_rows)
|
set_reference(txo_message.error.blocked.channel, txo.censor_hash, extra_txo_rows)
|
||||||
return
|
return
|
||||||
txo_message.tx_hash = txo['txo_hash'][:32]
|
txo_message.tx_hash = txo.tx_ref.hash
|
||||||
txo_message.nout, = struct.unpack('<I', txo['txo_hash'][32:])
|
txo_message.nout = txo.position
|
||||||
txo_message.height = txo['height']
|
txo_message.height = txo.tx_ref.height
|
||||||
txo_message.claim.short_url = txo['short_url']
|
txo_message.claim.short_url = txo.meta['short_url']
|
||||||
txo_message.claim.reposted = txo['reposted']
|
txo_message.claim.reposted = txo.meta['reposted_count']
|
||||||
if txo['canonical_url'] is not None:
|
if txo.meta['canonical_url'] is not None:
|
||||||
txo_message.claim.canonical_url = txo['canonical_url']
|
txo_message.claim.canonical_url = txo.meta['canonical_url']
|
||||||
txo_message.claim.is_controlling = bool(txo['is_controlling'])
|
txo_message.claim.is_controlling = bool(txo.meta['takeover_height'])
|
||||||
if txo['last_take_over_height'] is not None:
|
if txo_message.claim.is_controlling:
|
||||||
txo_message.claim.take_over_height = txo['last_take_over_height']
|
txo_message.claim.take_over_height = txo.meta['takeover_height']
|
||||||
txo_message.claim.creation_height = txo['creation_height']
|
txo_message.claim.creation_height = txo.meta['creation_height']
|
||||||
txo_message.claim.activation_height = txo['activation_height']
|
txo_message.claim.activation_height = txo.meta['activation_height']
|
||||||
txo_message.claim.expiration_height = txo['expiration_height']
|
txo_message.claim.expiration_height = txo.meta['expiration_height']
|
||||||
if txo['claims_in_channel'] is not None:
|
if txo.meta['signed_claim_count'] is not None:
|
||||||
txo_message.claim.claims_in_channel = txo['claims_in_channel']
|
txo_message.claim.claims_in_channel = txo.meta['signed_claim_count']
|
||||||
txo_message.claim.effective_amount = txo['effective_amount']
|
txo_message.claim.effective_amount = txo.meta['staked_amount']
|
||||||
txo_message.claim.support_amount = txo['support_amount']
|
txo_message.claim.support_amount = txo.meta['staked_support_amount']
|
||||||
txo_message.claim.trending_group = txo['trending_group']
|
#txo_message.claim.trending_group = txo['trending_group']
|
||||||
txo_message.claim.trending_mixed = txo['trending_mixed']
|
#txo_message.claim.trending_mixed = txo['trending_mixed']
|
||||||
txo_message.claim.trending_local = txo['trending_local']
|
#txo_message.claim.trending_local = txo['trending_local']
|
||||||
txo_message.claim.trending_global = txo['trending_global']
|
#txo_message.claim.trending_global = txo['trending_global']
|
||||||
set_reference(txo_message.claim.channel, txo['channel_hash'], extra_txo_rows)
|
if txo.channel:
|
||||||
set_reference(txo_message.claim.repost, txo['reposted_claim_hash'], extra_txo_rows)
|
set_reference(txo_message.claim.channel, txo.claim.signing_channel_hash, extra_txo_rows)
|
||||||
|
#set_reference(txo_message.claim.repost, txo['reposted_claim_hash'], extra_txo_rows)
|
||||||
|
|
|
@ -1,6 +1,19 @@
|
||||||
from lbry.schema.base import Signable
|
from lbry.schema.base import Signable
|
||||||
|
from lbry.schema.types.v2.support_pb2 import Support as SupportMessage
|
||||||
|
|
||||||
|
|
||||||
class Support(Signable):
|
class Support(Signable):
|
||||||
__slots__ = ()
|
__slots__ = ()
|
||||||
message_class = None # TODO: add support protobufs
|
message_class = SupportMessage
|
||||||
|
|
||||||
|
def __init__(self, emoji='👍', message=None):
|
||||||
|
super().__init__(message)
|
||||||
|
self.emoji = emoji
|
||||||
|
|
||||||
|
@property
|
||||||
|
def emoji(self) -> str:
|
||||||
|
return self.message.emoji
|
||||||
|
|
||||||
|
@emoji.setter
|
||||||
|
def emoji(self, emoji: str):
|
||||||
|
self.message.emoji = emoji
|
||||||
|
|
|
@ -6,7 +6,11 @@ WEIRD_CHARS_RE = re.compile(r"[#!~]")
|
||||||
|
|
||||||
|
|
||||||
def normalize_tag(tag: str):
|
def normalize_tag(tag: str):
|
||||||
return MULTI_SPACE_RE.sub(' ', WEIRD_CHARS_RE.sub(' ', tag.lower().replace("'", ""))).strip()
|
return MULTI_SPACE_RE.sub(
|
||||||
|
' ', WEIRD_CHARS_RE.sub(
|
||||||
|
' ', tag.lower().replace("'", "").replace('\x00', '')
|
||||||
|
)
|
||||||
|
).strip()
|
||||||
|
|
||||||
|
|
||||||
def clean_tags(tags: List[str]):
|
def clean_tags(tags: List[str]):
|
||||||
|
|
69
lbry/schema/types/v2/support_pb2.py
Normal file
69
lbry/schema/types/v2/support_pb2.py
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||||
|
# source: support.proto
|
||||||
|
|
||||||
|
import sys
|
||||||
|
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
|
||||||
|
from google.protobuf import descriptor as _descriptor
|
||||||
|
from google.protobuf import message as _message
|
||||||
|
from google.protobuf import reflection as _reflection
|
||||||
|
from google.protobuf import symbol_database as _symbol_database
|
||||||
|
from google.protobuf import descriptor_pb2
|
||||||
|
# @@protoc_insertion_point(imports)
|
||||||
|
|
||||||
|
_sym_db = _symbol_database.Default()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
DESCRIPTOR = _descriptor.FileDescriptor(
|
||||||
|
name='support.proto',
|
||||||
|
package='pb',
|
||||||
|
syntax='proto3',
|
||||||
|
serialized_pb=_b('\n\rsupport.proto\x12\x02pb\"\x18\n\x07Support\x12\r\n\x05\x65moji\x18\x01 \x01(\tb\x06proto3')
|
||||||
|
)
|
||||||
|
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
_SUPPORT = _descriptor.Descriptor(
|
||||||
|
name='Support',
|
||||||
|
full_name='pb.Support',
|
||||||
|
filename=None,
|
||||||
|
file=DESCRIPTOR,
|
||||||
|
containing_type=None,
|
||||||
|
fields=[
|
||||||
|
_descriptor.FieldDescriptor(
|
||||||
|
name='emoji', full_name='pb.Support.emoji', index=0,
|
||||||
|
number=1, type=9, cpp_type=9, label=1,
|
||||||
|
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||||
|
message_type=None, enum_type=None, containing_type=None,
|
||||||
|
is_extension=False, extension_scope=None,
|
||||||
|
options=None),
|
||||||
|
],
|
||||||
|
extensions=[
|
||||||
|
],
|
||||||
|
nested_types=[],
|
||||||
|
enum_types=[
|
||||||
|
],
|
||||||
|
options=None,
|
||||||
|
is_extendable=False,
|
||||||
|
syntax='proto3',
|
||||||
|
extension_ranges=[],
|
||||||
|
oneofs=[
|
||||||
|
],
|
||||||
|
serialized_start=21,
|
||||||
|
serialized_end=45,
|
||||||
|
)
|
||||||
|
|
||||||
|
DESCRIPTOR.message_types_by_name['Support'] = _SUPPORT
|
||||||
|
|
||||||
|
Support = _reflection.GeneratedProtocolMessageType('Support', (_message.Message,), dict(
|
||||||
|
DESCRIPTOR = _SUPPORT,
|
||||||
|
__module__ = 'support_pb2'
|
||||||
|
# @@protoc_insertion_point(class_scope:pb.Support)
|
||||||
|
))
|
||||||
|
_sym_db.RegisterMessage(Support)
|
||||||
|
|
||||||
|
|
||||||
|
# @@protoc_insertion_point(module_scope)
|
|
@ -44,7 +44,7 @@ URL_REGEX = _create_url_regex()
|
||||||
|
|
||||||
|
|
||||||
def normalize_name(name):
|
def normalize_name(name):
|
||||||
return unicodedata.normalize('NFD', name).casefold()
|
return unicodedata.normalize('NFD', name).casefold().replace('\x00', '')
|
||||||
|
|
||||||
|
|
||||||
class PathSegment(NamedTuple):
|
class PathSegment(NamedTuple):
|
||||||
|
|
5
lbry/service/__init__.py
Normal file
5
lbry/service/__init__.py
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
from .api import API, Client
|
||||||
|
from .base import Service
|
||||||
|
from .daemon import Daemon, jsonrpc_dumps_pretty
|
||||||
|
from .full_node import FullNode
|
||||||
|
from .light_client import LightClient
|
3497
lbry/service/api.py
Normal file
3497
lbry/service/api.py
Normal file
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue