This commit is contained in:
Lex Berezhny 2020-06-19 14:28:34 -04:00
parent 18dc5fbc9f
commit d1a243247d
18 changed files with 2092 additions and 1372 deletions

View file

@ -50,7 +50,10 @@ class Block(NamedTuple):
timestamp=timestamp,
bits=bits,
nonce=nonce,
txs=[Transaction(height=height, position=i).deserialize(stream) for i in range(tx_count)]
txs=[
Transaction(height=height, position=i, timestamp=timestamp).deserialize(stream)
for i in range(tx_count)
]
)
@property

View file

@ -1,8 +1,11 @@
import os.path
import asyncio
import sqlite3
from typing import Optional
from typing import List, Optional
from concurrent.futures import ThreadPoolExecutor
from lbry.schema.url import normalize_name
from .bcd_data_stream import BCDataStream
FILES = [
@ -11,6 +14,27 @@ FILES = [
]
class FindShortestID:
__slots__ = 'short_id', 'new_id'
def __init__(self):
self.short_id = ''
self.new_id = None
def step(self, other_id, new_id):
other_id = other_id[::-1].hex()
if self.new_id is None:
self.new_id = new_id[::-1].hex()
for i in range(len(self.new_id)):
if other_id[i] != self.new_id[i]:
if i > len(self.short_id)-1:
self.short_id = self.new_id[:i+1]
break
def finalize(self):
return self.short_id
class BlockchainDB:
def __init__(self, directory: str):
@ -30,6 +54,10 @@ class BlockchainDB:
self.connection.execute(
f"ATTACH DATABASE '{os.path.join(self.directory, file+'.sqlite')}' AS {file}"
)
self.connection.create_aggregate("find_shortest_id", 2, FindShortestID)
#self.connection.execute(
# "CREATE INDEX IF NOT EXISTS claim_originalheight ON claim (originalheight);"
#)
self.connection.row_factory = sqlite3.Row
async def open(self):
@ -48,88 +76,162 @@ class BlockchainDB:
self.executor.shutdown()
self.executor = None
async def commit(self):
await self.run_in_executor(self.connection.commit)
def sync_execute(self, sql: str, *args):
return self.connection.execute(sql, *args)
async def execute(self, sql, *args):
async def execute(self, sql: str, *args):
return await self.run_in_executor(self.sync_execute, sql, *args)
def sync_execute_fetchall(self, sql: str, *args):
return list(self.connection.execute(sql, *args).fetchall())
def sync_execute_fetchall(self, sql: str, *args) -> List[dict]:
return [dict(r) for r in self.connection.execute(sql, *args).fetchall()]
async def execute_fetchall(self, sql: str, *args):
async def execute_fetchall(self, sql: str, *args) -> List[dict]:
return await self.run_in_executor(self.sync_execute_fetchall, sql, *args)
def sync_get_block_files(self, file_number=None, above_height=None):
def sync_get_best_height(self) -> int:
sql = "SELECT MAX(height) FROM block_info"
return self.connection.execute(sql).fetchone()[0]
async def get_best_height(self) -> int:
return await self.run_in_executor(self.sync_get_best_height)
def sync_get_block_files(self, file_number: int = None, start_height: int = None) -> List[dict]:
sql = """
SELECT
file as file_number,
COUNT(hash) as blocks,
SUM(txcount) as txs,
MAX(height) as max_height
MAX(height) as best_height
FROM block_info
WHERE status&1 AND status&4
"""
args = ()
if file_number is not None and above_height is not None:
sql += "AND file = ? AND height > ?"
args = (file_number, above_height)
if file_number is not None and start_height is not None:
sql += "AND file = ? AND height >= ?"
args = (file_number, start_height)
return self.sync_execute_fetchall(sql + " GROUP BY file ORDER BY file ASC;", args)
async def get_block_files(self, file_number=None, above_height=None):
async def get_block_files(self, file_number: int = None, start_height: int = None) -> List[dict]:
return await self.run_in_executor(
self.sync_get_block_files, file_number, above_height
self.sync_get_block_files, file_number, start_height
)
def sync_get_blocks_in_file(self, block_file, above_height=-1):
def sync_get_blocks_in_file(self, block_file: int, start_height=0) -> List[dict]:
return self.sync_execute_fetchall(
"""
SELECT datapos as data_offset, height, hash as block_hash, txCount as txs
FROM block_info
WHERE file = ? AND height > ? AND status&1 AND status&4
WHERE file = ? AND height >= ? AND status&1 AND status&4
ORDER BY datapos ASC;
""", (block_file, above_height)
""", (block_file, start_height)
)
async def get_blocks_in_file(self, block_file, above_height=-1):
return await self.run_in_executor(self.sync_get_blocks_in_file, block_file, above_height)
async def get_blocks_in_file(self, block_file: int, start_height=0) -> List[dict]:
return await self.run_in_executor(self.sync_get_blocks_in_file, block_file, start_height)
def sync_get_claimtrie(self):
def sync_get_claim_support_txo_hashes(self, at_height: int) -> set:
return {
r['txID'] + BCDataStream.uint32.pack(r['txN'])
for r in self.connection.execute(
"""
SELECT txID, txN FROM claim WHERE updateHeight = ?
UNION
SELECT txID, txN FROM support WHERE blockHeight = ?
""", (at_height, at_height)
).fetchall()
}
def sync_get_takeover_count(self, start_height: int, end_height: int) -> int:
sql = "SELECT COUNT(*) FROM takeover WHERE height BETWEEN ? AND ?"
return self.connection.execute(sql, (start_height, end_height)).fetchone()[0]
async def get_takeover_count(self, start_height: int, end_height: int) -> int:
return await self.run_in_executor(self.sync_get_takeover_count, start_height, end_height)
def sync_get_takeovers(self, start_height: int, end_height: int) -> List[dict]:
return self.sync_execute_fetchall(
"""
SELECT
takeover.name AS normalized,
takeover.name,
takeover.claimID AS claim_hash,
takeover.height AS last_take_over_height,
originalHeight AS original_height,
updateHeight AS update_height,
validHeight AS valid_height,
activationHeight AS activation_height,
expirationHeight AS expiration_height
FROM takeover JOIN claim USING (claimID)
GROUP BY takeover.name HAVING MAX(height);
"""
takeover.height
FROM takeover
WHERE height BETWEEN ? AND ?
ORDER BY height, name
""", (start_height, end_height)
)
async def get_claimtrie(self):
return await self.run_in_executor(self.sync_get_claimtrie)
async def get_takeovers(self, start_height: int, end_height: int) -> List[dict]:
return await self.run_in_executor(self.sync_get_takeovers, start_height, end_height)
def sync_get_claims(self):
return self.sync_execute_fetchall(
def sync_get_claim_metadata_count(self, start_height: int, end_height: int) -> int:
sql = "SELECT COUNT(*) FROM claim WHERE originalHeight BETWEEN ? AND ?"
return self.connection.execute(sql, (start_height, end_height)).fetchone()[0]
async def get_claim_metadata_count(self, start_height: int, end_height: int) -> int:
return await self.run_in_executor(self.sync_get_claim_metadata_count, start_height, end_height)
def sync_get_claim_metadata(self, start_height: int, end_height: int) -> List[dict]:
return [{
"name": r["name"],
"claim_hash_": r["claimID"],
"activation_height": r["activationHeight"],
"expiration_height": r["expirationHeight"],
"takeover_height": r["takeoverHeight"],
"is_controlling": r["isControlling"],
"short_url": f'{normalize_name(r["name"].decode())}#{r["shortestID"] or r["claimID"][::-1].hex()[0]}',
"short_url_": f'{normalize_name(r["name"].decode())}#{r["shortestID"] or r["claimID"][::-1].hex()[0]}',
} for r in self.sync_execute_fetchall(
"""
SELECT
claimID AS claim_hash,
txID AS tx_hash,
txN AS position,
amount,
originalHeight AS original_height,
updateHeight AS update_height,
validHeight AS valid_height,
activationHeight AS activation_height,
expirationHeight AS expiration_height
FROM claims.claim
"""
)
name, claimID, activationHeight, expirationHeight,
(SELECT
CASE WHEN takeover.claimID = claim.claimID THEN takeover.height END
FROM takeover WHERE takeover.name = claim.name
ORDER BY height DESC LIMIT 1
) AS takeoverHeight,
(SELECT CASE WHEN takeover.claimID = claim.claimID THEN 1 ELSE 0 END
FROM takeover WHERE takeover.name = claim.name
ORDER BY height DESC LIMIT 1
) AS isControlling,
(SELECT find_shortest_id(c.claimid, claim.claimid) FROM claim AS c
WHERE
c.nodename = claim.nodename AND
c.originalheight <= claim.originalheight AND
c.claimid != claim.claimid
) AS shortestID
FROM claim
WHERE originalHeight BETWEEN ? AND ?
ORDER BY originalHeight
""", (start_height, end_height)
)]
async def get_claims(self):
return await self.run_in_executor(self.sync_get_claims)
async def get_claim_metadata(self, start_height: int, end_height: int) -> List[dict]:
return await self.run_in_executor(self.sync_get_claim_metadata, start_height, end_height)
def sync_get_support_metadata_count(self, start_height: int, end_height: int) -> int:
sql = "SELECT COUNT(*) FROM support WHERE blockHeight BETWEEN ? AND ?"
return self.connection.execute(sql, (start_height, end_height)).fetchone()[0]
async def get_support_metadata_count(self, start_height: int, end_height: int) -> int:
return await self.run_in_executor(self.sync_get_support_metadata_count, start_height, end_height)
def sync_get_support_metadata(self, start_height: int, end_height: int) -> List[dict]:
return [{
"name": r['name'],
"txo_hash_pk": r['txID'] + BCDataStream.uint32.pack(r['txN']),
"activation_height": r['activationHeight'],
"expiration_height": r['expirationHeight'],
} for r in self.sync_execute_fetchall(
"""
SELECT name, txid, txn, activationHeight, expirationHeight
FROM support WHERE blockHeight BETWEEN ? AND ?
""", (start_height, end_height)
)
]
async def get_support_metadata(self, start_height: int, end_height: int) -> List[dict]:
return await self.run_in_executor(self.sync_get_support_metadata, start_height, end_height)

View file

@ -443,6 +443,17 @@ class OutputScript(Script):
SUPPORT_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes
))
SUPPORT_CLAIM_DATA_OPCODES = (
OP_SUPPORT_CLAIM, PUSH_SINGLE('claim_name'), PUSH_SINGLE('claim_id'), PUSH_SINGLE('support'),
OP_2DROP, OP_2DROP
)
SUPPORT_CLAIM_DATA_PUBKEY = Template('support_claim+data+pay_pubkey_hash', (
SUPPORT_CLAIM_DATA_OPCODES + PAY_PUBKEY_HASH.opcodes
))
SUPPORT_CLAIM_DATA_SCRIPT = Template('support_claim+data+pay_script_hash', (
SUPPORT_CLAIM_DATA_OPCODES + PAY_SCRIPT_HASH.opcodes
))
UPDATE_CLAIM_OPCODES = (
OP_UPDATE_CLAIM, PUSH_SINGLE('claim_name'), PUSH_SINGLE('claim_id'), PUSH_SINGLE('claim'),
OP_2DROP, OP_2DROP
@ -479,6 +490,8 @@ class OutputScript(Script):
CLAIM_NAME_SCRIPT,
SUPPORT_CLAIM_PUBKEY,
SUPPORT_CLAIM_SCRIPT,
SUPPORT_CLAIM_DATA_PUBKEY,
SUPPORT_CLAIM_DATA_SCRIPT,
UPDATE_CLAIM_PUBKEY,
UPDATE_CLAIM_SCRIPT,
SELL_CLAIM, SELL_SCRIPT,
@ -532,6 +545,16 @@ class OutputScript(Script):
'pubkey_hash': pubkey_hash
})
@classmethod
def pay_support_data_pubkey_hash(
cls, claim_name: bytes, claim_id: bytes, support, pubkey_hash: bytes):
return cls(template=cls.SUPPORT_CLAIM_DATA_PUBKEY, values={
'claim_name': claim_name,
'claim_id': claim_id,
'support': support,
'pubkey_hash': pubkey_hash
})
@classmethod
def sell_script(cls, price):
return cls(template=cls.SELL_SCRIPT, values={
@ -580,6 +603,10 @@ class OutputScript(Script):
def is_support_claim(self):
return self.template.name.startswith('support_claim+')
@property
def is_support_claim_data(self):
return self.template.name.startswith('support_claim+data+')
@property
def is_sell_claim(self):
return self.template.name.startswith('sell_claim+')

View file

@ -2,21 +2,27 @@ import os
import asyncio
import logging
from contextvars import ContextVar
from typing import Optional
from typing import Optional, Tuple, Set, NamedTuple
from sqlalchemy import func, bindparam
from sqlalchemy import func, bindparam, case, distinct, between
from sqlalchemy.future import select
from lbry.event import BroadcastSubscription
from lbry.service.base import Sync, BlockEvent
from lbry.db import Database, queries, TXO_TYPES
from lbry.db.tables import Claim, Claimtrie, TXO, TXI, Block as BlockTable
from lbry.db import Database, queries, TXO_TYPES, CLAIM_TYPE_CODES
from lbry.db.tables import Claim, Takeover, Support, TXO, TX, TXI, Block as BlockTable
from lbry.db.query_context import progress, context, Event
from lbry.db.utils import chunk
from lbry.db.queries import rows_to_txos
from lbry.db.sync import (
condition_spent_claims, condition_spent_supports,
select_missing_claims, select_stale_claims, select_missing_supports
)
from lbry.schema.url import normalize_name
from .lbrycrd import Lbrycrd
from .block import Block, create_block_filter
from .bcd_data_stream import BCDataStream
from .transaction import Output
log = logging.getLogger(__name__)
@ -33,14 +39,14 @@ def get_or_initialize_lbrycrd(ctx=None) -> Lbrycrd:
return chain
def process_block_file(block_file_number, current_height):
def process_block_file(block_file_number: int, starting_height: int, initial_sync: bool):
ctx = context()
chain = get_or_initialize_lbrycrd(ctx)
stop = ctx.stop_event
loader = ctx.get_bulk_loader()
with progress(Event.BLOCK_READ, 100) as p:
new_blocks = chain.db.sync_get_blocks_in_file(block_file_number, current_height)
new_blocks = chain.db.sync_get_blocks_in_file(block_file_number, starting_height)
if not new_blocks:
return -1
done, total, last_block_processed = 0, len(new_blocks), -1
@ -54,7 +60,9 @@ def process_block_file(block_file_number, current_height):
block_height = block_info['height']
fp.seek(block_info['data_offset'])
block = Block.from_data_stream(stream, block_height, block_file_number)
loader.add_block(block)
loader.add_block(
block, initial_sync and chain.db.sync_get_claim_support_txo_hashes(block_height)
)
last_block_processed = block_height
p.step(done)
@ -65,61 +73,351 @@ def process_block_file(block_file_number, current_height):
return last_block_processed
def process_claimtrie(heights):
def process_takeovers(starting_height: int, ending_height: int):
chain = get_or_initialize_lbrycrd()
with progress(Event.TAKEOVER_INSERT) as p:
p.start(chain.db.sync_get_takeover_count(
above_height=starting_height, limit_height=ending_height
))
done, step_size = 0, 500
for offset in range(starting_height, ending_height+1, step_size):
takeovers = chain.db.sync_get_takeovers(
above_height=offset, limit_height=min(offset+step_size, ending_height),
)
if takeovers:
p.ctx.execute(Takeover.insert(), takeovers)
done += len(takeovers)
p.step(done)
with progress(Event.TRIE_DELETE) as p:
def signature_validation(d: dict, row: dict, public_key) -> dict:
d['is_signature_valid'] = False
if Output.is_signature_valid(bytes(row['signature']), bytes(row['signature_digest']), public_key):
d['is_signature_valid'] = True
return d
def select_updated_channel_keys(starting_height, ending_height, *cols):
return (
select(*cols).select_from(Claim)
.where(
(Claim.c.claim_type == TXO_TYPES['channel']) &
between(Claim.c.public_key_height, starting_height, ending_height)
)
)
def get_updated_channel_key_count(ctx, starting_height, ending_height):
sql = select_updated_channel_keys(
starting_height, ending_height, func.count('*').label('total')
)
return ctx.fetchone(sql)['total']
def get_updated_channel_keys(ctx, starting_height, ending_height):
sql = select_updated_channel_keys(
starting_height, ending_height,
Claim.c.claim_hash, Claim.c.public_key, Claim.c.height
)
return ctx.fetchall(sql)
def get_signables_for_channel(ctx, table, pk, channel):
sql = (
select(pk, table.c.signature, table.c.signature_digest)
.where(table.c.channel_hash == channel['claim_hash'])
)
return ctx.fetchall(sql)
def select_unvalidated_signables(signable, starting_height: int, ending_height: int, *cols):
channel = Claim.alias('channel')
if len(cols) > 1:
cols += (channel.c.public_key,)
return (
select(*cols)
.select_from(signable.join(channel, signable.c.channel_hash == channel.c.claim_hash))
.where(
(signable.c.signature != None) &
(signable.c.is_signature_valid == False) &
between(signable.c.height, starting_height, ending_height)
)
)
def get_unvalidated_signable_count(ctx, signable, starting_height: int, ending_height: int):
sql = select_unvalidated_signables(
signable, starting_height, ending_height, func.count('*').label('total')
)
return ctx.fetchone(sql)['total']
def get_unvalidated_signables(ctx, signable, starting_height: int, ending_height: int, pk):
sql = select_unvalidated_signables(
signable, starting_height, ending_height,
pk, signable.c.signature, signable.c.signature_digest
)
return ctx.fetchall(sql)
class ClaimChanges(NamedTuple):
deleted_channels: Set[bytes]
channels_with_changed_claims: Set[bytes]
claims_with_changed_supports: Set[bytes]
def process_claims_and_supports():
with progress(Event.CLAIM_DELETE) as p:
channels_with_deleted_claims = {
r['channel_hash'] for r in p.ctx.fetchall(
select(distinct(Claim.c.channel_hash))
.where(condition_spent_claims(
list(set(CLAIM_TYPE_CODES) - {TXO_TYPES['channel']})
) & (Claim.c.channel_hash != None))
)
}
deleted_channels = {
r['claim_hash'] for r in p.ctx.fetchall(
select(distinct(Claim.c.claim_hash)).where(
(Claim.c.claim_type == TXO_TYPES['channel']) &
condition_spent_claims([TXO_TYPES['channel']])
)
)
}
p.start(1)
p.ctx.execute(Claimtrie.delete())
p.ctx.execute(Claim.delete().where(condition_spent_claims()))
with progress(Event.TRIE_UPDATE) as p, context().connection.begin():
trie = chain.db.sync_get_claimtrie()
p.start(len(trie))
done = 0
for chunk_size, chunk_rows in chunk(trie, 10000):
p.ctx.execute(
Claimtrie.insert(), [{
'normalized': r['normalized'],
'claim_hash': r['claim_hash'],
'last_take_over_height': r['last_take_over_height'],
} for r in chunk_rows]
with progress(Event.CLAIM_INSERT) as p:
channels_with_added_claims = set()
loader = p.ctx.get_bulk_loader()
for txo in rows_to_txos(p.ctx.fetchall(select_missing_claims)):
loader.add_claim(txo)
if txo.can_decode_claim and txo.claim.is_signed:
channels_with_added_claims.add(txo.claim.signing_channel_hash)
loader.save()
with progress(Event.CLAIM_UPDATE) as p:
loader = p.ctx.get_bulk_loader()
for claim in rows_to_txos(p.ctx.fetchall(select_stale_claims)):
loader.update_claim(claim)
loader.save()
with progress(Event.SUPPORT_DELETE) as p:
claims_with_deleted_supports = {
r['claim_hash'] for r in p.ctx.fetchall(
select(distinct(Support.c.claim_hash)).where(condition_spent_supports)
)
done += chunk_size
}
p.start(1)
sql = Support.delete().where(condition_spent_supports)
p.ctx.execute(sql)
with progress(Event.SUPPORT_INSERT) as p:
claims_with_added_supports = {
r['claim_hash'] for r in p.ctx.fetchall(
select(distinct(Support.c.claim_hash)).where(condition_spent_supports)
)
}
loader = p.ctx.get_bulk_loader()
for support in rows_to_txos(p.ctx.fetchall(select_missing_supports)):
loader.add_support(support)
loader.save()
return ClaimChanges(
deleted_channels=deleted_channels,
channels_with_changed_claims=(
channels_with_added_claims | channels_with_deleted_claims
),
claims_with_changed_supports=(
claims_with_added_supports | claims_with_deleted_supports
)
)
def process_metadata(starting_height: int, ending_height: int, initial_sync: bool):
# TODO:
# - claim updates to point to a different channel
# - deleting a channel should invalidate contained claim signatures
chain = get_or_initialize_lbrycrd()
channel = Claim.alias('channel')
changes = process_claims_and_supports() if not initial_sync else None
support_amount_calculator = (
select(func.coalesce(func.sum(Support.c.amount), 0) + Claim.c.amount)
.select_from(Support)
.where(Support.c.claim_hash == Claim.c.claim_hash)
.scalar_subquery()
)
supports_in_claim_calculator = (
select(func.count('*'))
.select_from(Support)
.where(Support.c.claim_hash == Claim.c.claim_hash)
.scalar_subquery()
)
stream = Claim.alias('stream')
claims_in_channel_calculator = (
select(func.count('*'))
.select_from(stream)
.where(stream.c.channel_hash == Claim.c.claim_hash)
.scalar_subquery()
)
with progress(Event.CLAIM_META) as p:
p.start(chain.db.sync_get_claim_metadata_count(start_height=starting_height, end_height=ending_height))
claim_update_sql = (
Claim.update().where(Claim.c.claim_hash == bindparam('claim_hash_'))
.values(
canonical_url=case([(
((Claim.c.canonical_url == None) & (Claim.c.channel_hash != None)),
select(channel.c.short_url).select_from(channel)
.where(channel.c.claim_hash == Claim.c.channel_hash)
.scalar_subquery() + '/' + bindparam('short_url_')
)], else_=Claim.c.canonical_url),
support_amount=support_amount_calculator,
supports_in_claim_count=supports_in_claim_calculator,
claims_in_channel_count=case([(
(Claim.c.claim_type == TXO_TYPES['channel']), claims_in_channel_calculator
)], else_=0),
)
)
done, step_size = 0, 500
for offset in range(starting_height, ending_height+1, step_size):
claims = chain.db.sync_get_claim_metadata(
start_height=offset, end_height=min(offset+step_size, ending_height)
)
if claims:
p.ctx.execute(claim_update_sql, claims)
done += len(claims)
p.step(done)
with progress(Event.TRIE_UPDATE, 250) as p, context().connection.begin():
claims = chain.db.sync_get_claims()
p.start(len(claims))
done = 0
for record in claims:
p.ctx.execute(
if not initial_sync and changes.claims_with_changed_supports:
# covered by Event.CLAIM_META during initial_sync, then only run if supports change
with progress(Event.CLAIM_CALC) as p:
p.start(len(changes.claims_with_changed_supports))
sql = (
Claim.update()
.where(Claim.c.claim_hash == record['claim_hash'])
.where((Claim.c.claim_hash.in_(changes.claims_with_changed_supports)))
.values(
activation_height=record['activation_height'],
expiration_height=record['expiration_height']
support_amount=support_amount_calculator,
supports_in_claim_count=supports_in_claim_calculator,
)
)
done += 1
p.ctx.execute(sql)
if not initial_sync and changes.channels_with_changed_claims:
# covered by Event.CLAIM_META during initial_sync, then only run if claims are deleted
with progress(Event.CLAIM_CALC) as p:
p.start(len(changes.channels_with_changed_claims))
sql = (
Claim.update()
.where((Claim.c.claim_hash.in_(changes.channels_with_changed_claims)))
.values(claims_in_channel_count=claims_in_channel_calculator)
)
p.ctx.execute(sql)
if not initial_sync:
# covered by Event.CLAIM_META during initial_sync, otherwise loop over every block
with progress(Event.CLAIM_TRIE, 100) as p:
p.start(chain.db.sync_get_takeover_count(start_height=starting_height, end_height=ending_height))
for offset in range(starting_height, ending_height+1):
for takeover in chain.db.sync_get_takeovers(start_height=offset, end_height=offset):
update_claims = (
Claim.update()
.where(Claim.c.normalized == normalize_name(takeover['name'].decode()))
.values(
is_controlling=case(
[(Claim.c.claim_hash == takeover['claim_hash'], True)],
else_=False
),
takeover_height=case(
[(Claim.c.claim_hash == takeover['claim_hash'], takeover['height'])],
else_=None
),
activation_height=func.min(Claim.c.activation_height, takeover['height']),
)
)
p.ctx.execute(update_claims)
p.step(1)
# with progress(Event.SUPPORT_META) as p:
# p.start(chain.db.sync_get_support_metadata_count(start_height=starting_height, end_height=ending_height))
# done, step_size = 0, 500
# for offset in range(starting_height, ending_height+1, step_size):
# supports = chain.db.sync_get_support_metadata(
# start_height=offset, end_height=min(offset+step_size, ending_height)
# )
# if supports:
# p.ctx.execute(
# Support.update().where(Support.c.txo_hash == bindparam('txo_hash_pk')),
# supports
# )
# done += len(supports)
# p.step(done)
with progress(Event.CHANNEL_SIGN) as p:
p.start(get_updated_channel_key_count(p.ctx, starting_height, ending_height))
done, step_size = 0, 500
for offset in range(starting_height, ending_height+1, step_size):
channels = get_updated_channel_keys(p.ctx, offset, min(offset+step_size, ending_height))
for channel in channels:
claim_updates = []
for claim in get_signables_for_channel(p.ctx, Claim, Claim.c.claim_hash, channel):
claim_updates.append(
signature_validation({'pk': claim['claim_hash']}, claim, channel['public_key'])
)
if claim_updates:
p.ctx.execute(
Claim.update().where(Claim.c.claim_hash == bindparam('pk')), claim_updates
)
support_updates = []
for support in get_signables_for_channel(p.ctx, Support, Support.c.txo_hash, channel):
support_updates.append(
signature_validation({'pk': support['txo_hash']}, support, channel['public_key'])
)
if support_updates:
p.ctx.execute(
Support.update().where(Support.c.txo_hash == bindparam('pk')), support_updates
)
p.step(len(channels))
with progress(Event.CLAIM_SIGN) as p:
p.start(get_unvalidated_signable_count(p.ctx, Claim, starting_height, ending_height))
done, step_size = 0, 500
for offset in range(starting_height, ending_height+1, step_size):
claims = get_unvalidated_signables(
p.ctx, Claim, offset, min(offset+step_size, ending_height), Claim.c.claim_hash)
claim_updates = []
for claim in claims:
claim_updates.append(
signature_validation({'pk': claim['claim_hash']}, claim, claim['public_key'])
)
if claim_updates:
p.ctx.execute(
Claim.update().where(Claim.c.claim_hash == bindparam('pk')), claim_updates
)
p.step(done)
with context("effective amount update") as ctx:
support = TXO.alias('support')
effective_amount_update = (
Claim.update()
.where(Claim.c.activation_height <= heights[-1])
.values(
effective_amount=(
select(func.coalesce(func.sum(support.c.amount), 0) + Claim.c.amount)
.select_from(support).where(
(support.c.claim_hash == Claim.c.claim_hash) &
(support.c.txo_type == TXO_TYPES['support']) &
(support.c.txo_hash.notin_(select(TXI.c.txo_hash)))
).scalar_subquery()
with progress(Event.SUPPORT_SIGN) as p:
p.start(get_unvalidated_signable_count(p.ctx, Support, starting_height, ending_height))
done, step_size = 0, 500
for offset in range(starting_height, ending_height+1, step_size):
supports = get_unvalidated_signables(
p.ctx, Support, offset, min(offset+step_size, ending_height), Support.c.txo_hash)
support_updates = []
for support in supports:
support_updates.append(
signature_validation({'pk': support['txo_hash']}, support, support['public_key'])
)
if support_updates:
p.ctx.execute(
Support.update().where(Support.c.txo_hash == bindparam('pk')), support_updates
)
)
ctx.execute(effective_amount_update)
p.step(done)
def process_block_and_tx_filters():
@ -159,8 +457,14 @@ class BlockchainSync(Sync):
self.advance_loop_event = asyncio.Event()
async def start(self):
# initial advance as task so that it can be stop()'ed before finishing
self.advance_loop_task = asyncio.create_task(self.advance())
for _ in range(2):
# initial sync can take a long time, new blocks may have been
# created while sync was running; therefore, run a second sync
# after first one finishes to possibly sync those new blocks.
# run advance as a task so that it can be stop()'ed if necessary.
self.advance_loop_task = asyncio.create_task(
self.advance(await self.db.needs_initial_sync())
)
await self.advance_loop_task
self.chain.subscribe()
self.advance_loop_task = asyncio.create_task(self.advance_loop())
@ -180,32 +484,33 @@ class BlockchainSync(Sync):
self.db.executor, f, *args
)
async def load_blocks(self):
async def load_blocks(self, initial_sync: bool) -> Optional[Tuple[int, int]]:
tasks = []
starting_height = None
tx_count = block_count = ending_height = 0
for file in await self.chain.db.get_block_files():
starting_height, ending_height = None, await self.chain.db.get_best_height()
tx_count = block_count = 0
for chain_file in await self.chain.db.get_block_files():
# block files may be read and saved out of order, need to check
# each file individually to see if we have missing blocks
current_height = await self.db.get_best_height_for_file(file['file_number'])
if current_height == file['max_height']:
our_best_file_height = await self.db.get_best_block_height_for_file(chain_file['file_number'])
if our_best_file_height == chain_file['best_height']:
# we have all blocks in this file, skipping
continue
if -1 < current_height < file['max_height']:
if -1 < our_best_file_height < chain_file['best_height']:
# we have some blocks, need to figure out what we're missing
# call get_block_files again limited to this file and current_height
file = (await self.chain.db.get_block_files(
file_number=file['file_number'], above_height=current_height
file_number=chain_file['file_number'], start_height=our_best_file_height+1
))[0]
tx_count += file['txs']
block_count += file['blocks']
tx_count += chain_file['txs']
block_count += chain_file['blocks']
starting_height = min(
current_height if starting_height is None else starting_height, current_height
our_best_file_height+1 if starting_height is None else starting_height, our_best_file_height+1
)
ending_height = max(ending_height, file['max_height'])
tasks.append(self.run(process_block_file, file['file_number'], current_height))
tasks.append(self.run(
process_block_file, chain_file['file_number'], our_best_file_height+1, initial_sync
))
if not tasks:
return None
return
await self._on_progress_controller.add({
"event": "blockchain.sync.start",
"data": {
@ -223,7 +528,7 @@ class BlockchainSync(Sync):
self.db.stop_event.set()
for future in pending:
future.cancel()
return None
return
best_height_processed = max(f.result() for f in done)
# putting event in queue instead of add to progress_controller because
# we want this message to appear after all of the queued messages from workers
@ -234,16 +539,18 @@ class BlockchainSync(Sync):
))
return starting_height, best_height_processed
async def advance(self):
heights = await self.load_blocks()
if heights and heights[0] < heights[-1]:
await self.db.process_inputs(heights)
await self.db.process_claims(heights)
await self.db.process_supports(heights)
await self.run(process_claimtrie, heights)
async def process(self, starting_height: int, ending_height: int, initial_sync: bool):
await self.db.process_inputs_outputs()
await self.run(process_metadata, starting_height, ending_height, initial_sync)
if self.conf.spv_address_filters:
await self.run(process_block_and_tx_filters, heights)
await self._on_block_controller.add(BlockEvent(heights[1]))
await self.run(process_block_and_tx_filters)
async def advance(self, initial_sync=False):
heights = await self.load_blocks(initial_sync)
if heights:
starting_height, ending_height = heights
await self.process(starting_height, ending_height, initial_sync)
await self._on_block_controller.add(BlockEvent(ending_height))
async def advance_loop(self):
while True:

View file

@ -1,79 +0,0 @@
import os
import sqlite3
import asyncio
from typing import List
from .block import Block
from .lbrycrd import Lbrycrd
def sync_create_lbrycrd_databases(dir_path: str):
for file_name, ddl in DDL.items():
connection = sqlite3.connect(os.path.join(dir_path, file_name))
connection.executescript(ddl)
connection.close()
async def create_lbrycrd_databases(dir_path: str):
await asyncio.get_running_loop().run_in_executor(
None, sync_create_lbrycrd_databases, dir_path
)
async def add_block_to_lbrycrd(chain: Lbrycrd, block: Block, takeovers: List[str]):
for tx in block.txs:
for txo in tx.outputs:
if txo.is_claim:
await insert_claim(chain, block, tx, txo)
if txo.id in takeovers:
await insert_takeover(chain, block, tx, txo)
async def insert_claim(chain, block, tx, txo):
await chain.db.execute(
"""
INSERT OR REPLACE INTO claim (
claimID, name, nodeName, txID, txN, originalHeight, updateHeight, validHeight,
activationHeight, expirationHeight, amount
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 10000, ?)
""", (
txo.claim_hash, txo.claim_name, txo.claim_name, tx.hash, txo.position,
block.height, block.height, block.height, block.height, txo.amount
)
)
async def insert_takeover(chain, block, tx, txo):
await chain.db.execute(
"INSERT INTO takeover (name) VALUES (?)",
(txo.claim_name,)
)
# These are extracted by opening each of lbrycrd latest sqlite databases and
# running '.schema' command.
DDL = {
'claims.sqlite': """
CREATE TABLE node (name BLOB NOT NULL PRIMARY KEY, parent BLOB REFERENCES node(name) DEFERRABLE INITIALLY DEFERRED, hash BLOB);
CREATE TABLE claim (claimID BLOB NOT NULL PRIMARY KEY, name BLOB NOT NULL, nodeName BLOB NOT NULL REFERENCES node(name) DEFERRABLE INITIALLY DEFERRED, txID BLOB NOT NULL, txN INTEGER NOT NULL, originalHeight INTEGER NOT NULL, updateHeight INTEGER NOT NULL, validHeight INTEGER NOT NULL, activationHeight INTEGER NOT NULL, expirationHeight INTEGER NOT NULL, amount INTEGER NOT NULL);
CREATE TABLE support (txID BLOB NOT NULL, txN INTEGER NOT NULL, supportedClaimID BLOB NOT NULL, name BLOB NOT NULL, nodeName BLOB NOT NULL, blockHeight INTEGER NOT NULL, validHeight INTEGER NOT NULL, activationHeight INTEGER NOT NULL, expirationHeight INTEGER NOT NULL, amount INTEGER NOT NULL, PRIMARY KEY(txID, txN));
CREATE TABLE takeover (name BLOB NOT NULL, height INTEGER NOT NULL, claimID BLOB, PRIMARY KEY(name, height DESC));
CREATE INDEX node_hash_len_name ON node (hash, LENGTH(name) DESC);
CREATE INDEX node_parent ON node (parent);
CREATE INDEX takeover_height ON takeover (height);
CREATE INDEX claim_activationHeight ON claim (activationHeight);
CREATE INDEX claim_expirationHeight ON claim (expirationHeight);
CREATE INDEX claim_nodeName ON claim (nodeName);
CREATE INDEX support_supportedClaimID ON support (supportedClaimID);
CREATE INDEX support_activationHeight ON support (activationHeight);
CREATE INDEX support_expirationHeight ON support (expirationHeight);
CREATE INDEX support_nodeName ON support (nodeName);
""",
'block_index.sqlite': """
CREATE TABLE block_file (file INTEGER NOT NULL PRIMARY KEY, blocks INTEGER NOT NULL, size INTEGER NOT NULL, undoSize INTEGER NOT NULL, heightFirst INTEGER NOT NULL, heightLast INTEGER NOT NULL, timeFirst INTEGER NOT NULL, timeLast INTEGER NOT NULL );
CREATE TABLE block_info (hash BLOB NOT NULL PRIMARY KEY, prevHash BLOB NOT NULL, height INTEGER NOT NULL, file INTEGER NOT NULL, dataPos INTEGER NOT NULL, undoPos INTEGER NOT NULL, txCount INTEGER NOT NULL, status INTEGER NOT NULL, version INTEGER NOT NULL, rootTxHash BLOB NOT NULL, rootTrieHash BLOB NOT NULL, time INTEGER NOT NULL, bits INTEGER NOT NULL, nonce INTEGER NOT NULL );
CREATE TABLE tx_to_block (txID BLOB NOT NULL PRIMARY KEY, file INTEGER NOT NULL, blockPos INTEGER NOT NULL, txPos INTEGER NOT NULL);
CREATE TABLE flag (name TEXT NOT NULL PRIMARY KEY, value INTEGER NOT NULL);
CREATE INDEX block_info_height ON block_info (height);
""",
}

View file

@ -2,8 +2,9 @@ import struct
import hashlib
import logging
import asyncio
from datetime import date
from binascii import hexlify, unhexlify
from typing import List, Iterable, Optional
from typing import List, Iterable, Optional, Union
import ecdsa
from cryptography.hazmat.backends import default_backend
@ -17,7 +18,9 @@ from lbry.crypto.hash import hash160, sha256
from lbry.crypto.base58 import Base58
from lbry.schema.url import normalize_name
from lbry.schema.claim import Claim
from lbry.schema.base import Signable
from lbry.schema.purchase import Purchase
from lbry.schema.support import Support
from .script import InputScript, OutputScript
from .bcd_data_stream import BCDataStream
@ -100,7 +103,7 @@ class InputOutput:
__slots__ = 'tx_ref', 'position'
def __init__(self, tx_ref: TXRef = None, position: int = None) -> None:
def __init__(self, tx_ref: Union[TXRef, TXRefImmutable] = None, position: int = None) -> None:
self.tx_ref = tx_ref
self.position = position
@ -196,7 +199,7 @@ class Output(InputOutput):
'amount', 'script', 'is_internal_transfer', 'is_spent', 'is_my_output', 'is_my_input',
'channel', 'private_key', 'meta', 'sent_supports', 'sent_tips', 'received_tips',
'purchase', 'purchased_claim', 'purchase_receipt',
'reposted_claim', 'claims',
'reposted_claim', 'claims', '_signable'
)
def __init__(self, amount: int, script: OutputScript,
@ -224,6 +227,7 @@ class Output(InputOutput):
self.purchase_receipt: 'Output' = None # txo representing purchase receipt for this claim
self.reposted_claim: 'Output' = None # txo representing claim being reposted
self.claims: List['Output'] = None # resolved claims for collection
self._signable: Optional[Signable] = None
self.meta = {}
def update_annotations(self, annotated: 'Output'):
@ -299,6 +303,10 @@ class Output(InputOutput):
def is_support(self) -> bool:
return self.script.is_support_claim
@property
def is_support_data(self) -> bool:
return self.script.is_support_claim_data
@property
def claim_hash(self) -> bytes:
if self.script.is_claim_name:
@ -334,9 +342,33 @@ class Output(InputOutput):
def can_decode_claim(self):
try:
return self.claim
except: # pylint: disable=bare-except
except Exception:
return False
@property
def support(self) -> Support:
if self.is_support_data:
if not isinstance(self.script.values['support'], Support):
self.script.values['support'] = Support.from_bytes(self.script.values['support'])
return self.script.values['support']
raise ValueError('Only supports with data can be represented as Supports.')
@property
def can_decode_support(self):
try:
return self.support
except Exception:
return False
@property
def signable(self) -> Signable:
if self._signable is None:
if self.is_claim:
self._signable = self.claim
elif self.is_support_data:
self._signable = self.support
return self._signable
@property
def permanent_url(self) -> str:
if self.script.is_claim_involved:
@ -348,22 +380,22 @@ class Output(InputOutput):
return self.private_key is not None
def get_signature_digest(self, ledger):
if self.claim.unsigned_payload:
if self.signable.unsigned_payload:
pieces = [
Base58.decode(self.get_address(ledger)),
self.claim.unsigned_payload,
self.claim.signing_channel_hash[::-1]
self.signable.unsigned_payload,
self.signable.signing_channel_hash[::-1]
]
else:
pieces = [
self.tx_ref.tx.inputs[0].txo_ref.hash,
self.claim.signing_channel_hash,
self.claim.to_message_bytes()
self.signable.signing_channel_hash,
self.signable.to_message_bytes()
]
return sha256(b''.join(pieces))
def get_encoded_signature(self):
signature = hexlify(self.claim.signature)
signature = hexlify(self.signable.signature)
r = int(signature[:int(len(signature)/2)], 16)
s = int(signature[int(len(signature)/2):], 16)
return ecdsa.util.sigencode_der(r, s, len(signature)*4)
@ -390,13 +422,13 @@ class Output(InputOutput):
def sign(self, channel: 'Output', first_input_id=None):
self.channel = channel
self.claim.signing_channel_hash = channel.claim_hash
self.signable.signing_channel_hash = channel.claim_hash
digest = sha256(b''.join([
first_input_id or self.tx_ref.tx.inputs[0].txo_ref.hash,
self.claim.signing_channel_hash,
self.claim.to_message_bytes()
self.signable.signing_channel_hash,
self.signable.to_message_bytes()
]))
self.claim.signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
self.signable.signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
self.script.generate()
def clear_signature(self):
@ -443,6 +475,14 @@ class Output(InputOutput):
)
return cls(amount, script)
@classmethod
def pay_support_data_pubkey_hash(
cls, amount: int, claim_name: str, claim_id: str, support: Support, pubkey_hash: bytes) -> 'Output':
script = OutputScript.pay_support_data_pubkey_hash(
claim_name.encode(), unhexlify(claim_id)[::-1], support, pubkey_hash
)
return cls(amount, script)
@classmethod
def add_purchase_data(cls, purchase: Purchase) -> 'Output':
script = OutputScript.return_data(purchase)
@ -501,7 +541,7 @@ class Output(InputOutput):
class Transaction:
def __init__(self, raw=None, version: int = 1, locktime: int = 0, is_verified: bool = False,
height: int = -2, position: int = -1, julian_day: int = None) -> None:
height: int = -2, position: int = -1, timestamp: int = 0) -> None:
self._raw = raw
self._raw_sans_segwit = None
self.is_segwit_flag = 0
@ -519,7 +559,8 @@ class Transaction:
# +num: confirmed in a specific block (height)
self.height = height
self.position = position
self._day = julian_day
self.timestamp = timestamp
self._day: int = 0
if raw is not None:
self.deserialize()
@ -546,9 +587,10 @@ class Transaction:
def hash(self):
return self.ref.hash
def get_ordinal_day(self, ledger):
if self._day is None and self.height > 0:
self._day = ledger.headers.estimated_date(self.height).toordinal()
@property
def day(self):
if self._day is None and self.timestamp > 0:
self._day = date.fromtimestamp(self.timestamp).toordinal()
return self._day
@property

View file

@ -109,6 +109,12 @@ class Database:
ledger = RegTestLedger(conf)
return cls(ledger)
@classmethod
def temp_sqlite(cls):
from lbry import Config, Ledger # pylint: disable=import-outside-toplevel
conf = Config.with_same_dir(tempfile.mkdtemp())
return cls(Ledger(conf))
@classmethod
def in_memory(cls):
from lbry import Config, Ledger # pylint: disable=import-outside-toplevel
@ -173,20 +179,20 @@ class Database:
async def execute_fetchall(self, sql):
return await self.run_in_executor(q.execute_fetchall, sql)
async def process_inputs(self, heights):
return await self.run_in_executor(sync.process_inputs, heights)
async def process_inputs_outputs(self):
return await self.run_in_executor(sync.process_inputs_outputs)
async def process_claims(self, heights):
return await self.run_in_executor(sync.process_claims, heights)
async def process_all_things_after_sync(self):
return await self.run_in_executor(sync.process_all_things_after_sync)
async def process_supports(self, heights):
return await self.run_in_executor(sync.process_supports, heights)
async def needs_initial_sync(self) -> bool:
return (await self.get_best_tx_height()) == -1
async def get_best_height(self) -> int:
return await self.run_in_executor(q.get_best_height)
async def get_best_tx_height(self) -> int:
return await self.run_in_executor(q.get_best_tx_height)
async def get_best_height_for_file(self, file_number) -> int:
return await self.run_in_executor(q.get_best_height_for_file, file_number)
async def get_best_block_height_for_file(self, file_number) -> int:
return await self.run_in_executor(q.get_best_block_height_for_file, file_number)
async def get_blocks_without_filters(self):
return await self.run_in_executor(q.get_blocks_without_filters)
@ -203,6 +209,9 @@ class Database:
async def get_transaction_address_filters(self, block_hash):
return await self.run_in_executor(q.get_transaction_address_filters, block_hash)
async def insert_block(self, block):
return await self.run_in_executor(q.insert_block, block)
async def insert_transaction(self, block_hash, tx):
return await self.run_in_executor(q.insert_transaction, block_hash, tx)
@ -263,7 +272,9 @@ class Database:
return await self.fetch_result(q.get_purchases, **constraints)
async def search_claims(self, **constraints) -> Result[Output]:
claims, total, censor = await self.run_in_executor(q.search, **constraints)
#assert set(constraints).issubset(SEARCH_PARAMS), \
# f"Search query contains invalid arguments: {set(constraints).difference(SEARCH_PARAMS)}"
claims, total, censor = await self.run_in_executor(q.search_claims, **constraints)
return Result(claims, total, censor)
async def get_txo_sum(self, **constraints) -> int:
@ -285,7 +296,9 @@ class Database:
return await self.get_utxos(txo_type=TXO_TYPES['support'], **constraints)
async def get_claims(self, **constraints) -> Result[Output]:
txos = await self.fetch_result(q.get_claims, **constraints)
if 'txo_type' not in constraints:
constraints['txo_type'] = CLAIM_TYPES
txos = await self.fetch_result(q.get_txos, **constraints)
if 'wallet' in constraints:
await add_channel_keys_to_txo_results(constraints['wallet'].accounts, txos)
return txos

View file

@ -21,13 +21,13 @@ from .utils import query, in_account_ids
from .query_context import context
from .constants import (
TXO_TYPES, CLAIM_TYPE_CODES, STREAM_TYPES, ATTRIBUTE_ARRAY_MAX_LENGTH,
SEARCH_PARAMS, SEARCH_INTEGER_PARAMS, SEARCH_ORDER_FIELDS
SEARCH_INTEGER_PARAMS, SEARCH_ORDER_FIELDS
)
from .tables import (
metadata,
SCHEMA_VERSION, Version,
Block, TX, TXO, TXI, txi_join_account, txo_join_account,
Claim, Claimtrie,
Claim, Support, Takeover,
PubkeyAddress, AccountAddress
)
@ -58,6 +58,10 @@ def check_version_and_create_tables():
ctx.execute(text("ALTER TABLE block DISABLE TRIGGER ALL;"))
def insert_block(block):
context().get_bulk_loader().add_block(block).save()
def insert_transaction(block_hash, tx):
context().get_bulk_loader().add_transaction(block_hash, tx).save()
@ -70,13 +74,13 @@ def execute_fetchall(sql):
return context().fetchall(text(sql))
def get_best_height():
def get_best_tx_height():
return context().fetchone(
select(func.coalesce(func.max(TX.c.height), -1).label('total')).select_from(TX)
)['total']
select(func.coalesce(func.max(TX.c.height), -1).label('height')).select_from(TX)
)['height']
def get_best_height_for_file(file_number):
def get_best_block_height_for_file(file_number):
return context().fetchone(
select(func.coalesce(func.max(Block.c.height), -1).label('height'))
.select_from(Block)
@ -154,6 +158,33 @@ def release_all_outputs(account_id):
)
def get_takeover_names(above_height, limit_height, offset, limit):
return context().fetchall(
select(
Takeover.c.normalized.label('_name'),
func.max(Takeover.c.height).label('_height'),
)
.where((Takeover.c.height < above_height) & (Takeover.c.height >= limit_height))
.group_by(Takeover.c.normalized)
.limit(limit).offset(offset)
)
def get_takeovers(above_height, limit_height, offset, limit):
return context().fetchall(
select(
Takeover.c.normalized,
Takeover.c.claim_hash,
Takeover.c.height,
)
.select_from(Takeover)
.where((Takeover.c.height < above_height) & (Takeover.c.height >= limit_height))
.group_by(Takeover.c.normalized)
.limit(limit).offset(offset)
)
def select_transactions(cols, account_ids=None, **constraints):
s: Select = select(*cols).select_from(TX)
if not {'tx_hash', 'tx_hash__in'}.intersection(constraints):
@ -259,11 +290,22 @@ def get_transaction_count(**constraints):
return count[0]['total'] or 0
BASE_SELECT_TXO_COLUMNS = [
TX.c.tx_hash, TX.c.raw, TX.c.height, TX.c.position.label('tx_position'),
TX.c.is_verified, TX.c.timestamp,
TXO.c.txo_type, TXO.c.position.label('txo_position'), TXO.c.amount, TXO.c.is_spent,
TXO.c.script_offset, TXO.c.script_length,
]
def select_txos(
cols, account_ids=None, is_my_input=None, is_my_output=True,
is_my_input_or_output=None, exclude_internal_transfers=False,
include_is_spent=False, include_is_my_input=False,
is_spent=None, spent=None, is_claim_list=False, **constraints):
cols=None, account_ids=None, is_my_input=None,
is_my_output=True, is_my_input_or_output=None, exclude_internal_transfers=False,
include_is_my_input=False, claim_id_not_in_claim_table=None,
txo_id_not_in_claim_table=None, txo_id_not_in_support_table=None,
**constraints) -> Select:
if cols is None:
cols = BASE_SELECT_TXO_COLUMNS
s: Select = select(*cols)
if account_ids:
my_addresses = select(AccountAddress.c.address).where(in_account_ids(account_ids))
@ -301,22 +343,61 @@ def select_txos(
(TXI.c.address.notin_(my_addresses))
)
joins = TXO.join(TX)
tables = [TXO, TX]
if spent is None:
spent = TXI.alias('spent')
if is_spent:
s = s.where(spent.c.txo_hash != None)
elif is_spent is False:
s = s.where((spent.c.txo_hash == None) & (TXO.c.is_reserved == False))
if include_is_spent or is_spent is not None:
joins = joins.join(spent, spent.c.txo_hash == TXO.c.txo_hash, isouter=True)
if constraints.get('is_spent', None) is False:
s = s.where((TXO.c.is_spent == False) & (TXO.c.is_reserved == False))
if include_is_my_input:
joins = joins.join(TXI, (TXI.c.position == 0) & (TXI.c.tx_hash == TXO.c.tx_hash), isouter=True)
if is_claim_list:
tables.append(Claim)
joins = joins.join(Claim)
s = s.select_from(joins)
return context().fetchall(query(tables, s, **constraints))
if claim_id_not_in_claim_table:
s = s.where(TXO.c.claim_hash.notin_(select(Claim.c.claim_hash)))
elif txo_id_not_in_claim_table:
s = s.where(TXO.c.txo_hash.notin_(select(Claim.c.txo_hash)))
elif txo_id_not_in_support_table:
s = s.where(TXO.c.txo_hash.notin_(select(Support.c.txo_hash)))
return query([TXO, TX], s.select_from(joins), **constraints)
META_ATTRS = (
'activation_height', 'takeover_height', 'support_amount', 'creation_height',
'short_url', 'canonical_url', 'claims_in_channel_count', 'supports_in_claim_count',
)
def rows_to_txos(rows: List[dict], include_tx=True) -> List[Output]:
txos = []
tx_cache = {}
for row in rows:
if include_tx:
if row['tx_hash'] not in tx_cache:
tx_cache[row['tx_hash']] = Transaction(
row['raw'], height=row['height'], position=row['tx_position'],
is_verified=bool(row['is_verified']),
)
txo = tx_cache[row['tx_hash']].outputs[row['txo_position']]
else:
source = row['raw'][row['script_offset']:row['script_offset']+row['script_length']]
txo = Output(
amount=row['amount'],
script=OutputScript(source),
tx_ref=TXRefImmutable.from_hash(row['tx_hash'], row['height']),
position=row['txo_position'],
)
txo.is_spent = bool(row['is_spent'])
if 'is_my_input' in row:
txo.is_my_input = bool(row['is_my_input'])
if 'is_my_output' in row:
txo.is_my_output = bool(row['is_my_output'])
if 'is_my_input' in row and 'is_my_output' in row:
if txo.is_my_input and txo.is_my_output and row['txo_type'] == TXO_TYPES['other']:
txo.is_internal_transfer = True
else:
txo.is_internal_transfer = False
if 'received_tips' in row:
txo.received_tips = row['received_tips']
for attr in META_ATTRS:
if attr in row:
txo.meta[attr] = row[attr]
txos.append(txo)
return txos
def get_txos(no_tx=False, include_total=False, **constraints) -> Tuple[List[Output], Optional[int]]:
@ -326,12 +407,8 @@ def get_txos(no_tx=False, include_total=False, **constraints) -> Tuple[List[Outp
include_is_my_output = constraints.pop('include_is_my_output', False)
include_received_tips = constraints.pop('include_received_tips', False)
select_columns = [
TX.c.tx_hash, TX.c.raw, TX.c.height, TX.c.position.label('tx_position'), TX.c.is_verified,
TXO.c.txo_type, TXO.c.position.label('txo_position'), TXO.c.amount,
TXO.c.script_offset, TXO.c.script_length,
select_columns = BASE_SELECT_TXO_COLUMNS + [
TXO.c.claim_name
]
my_accounts = None
@ -376,40 +453,8 @@ def get_txos(no_tx=False, include_total=False, **constraints) -> Tuple[List[Outp
elif constraints.get('order_by', None) == 'none':
del constraints['order_by']
rows = select_txos(select_columns, spent=spent, **constraints)
txs = {}
txos = []
for row in rows:
if no_tx:
source = row['raw'][row['script_offset']:row['script_offset']+row['script_length']]
txo = Output(
amount=row['amount'],
script=OutputScript(source),
tx_ref=TXRefImmutable.from_hash(row['tx_hash'], row['height']),
position=row['txo_position']
)
else:
if row['tx_hash'] not in txs:
txs[row['tx_hash']] = Transaction(
row['raw'], height=row['height'], position=row['tx_position'],
is_verified=bool(row['is_verified'])
)
txo = txs[row['tx_hash']].outputs[row['txo_position']]
if include_is_spent:
txo.is_spent = bool(row['is_spent'])
if include_is_my_input:
txo.is_my_input = bool(row['is_my_input'])
if include_is_my_output:
txo.is_my_output = bool(row['is_my_output'])
if include_is_my_input and include_is_my_output:
if txo.is_my_input and txo.is_my_output and row['txo_type'] == TXO_TYPES['other']:
txo.is_internal_transfer = True
else:
txo.is_internal_transfer = False
if include_received_tips:
txo.received_tips = row['received_tips']
txos.append(txo)
rows = context().fetchall(select_txos(select_columns, spent=spent, **constraints))
txos = rows_to_txos(rows, not no_tx)
channel_hashes = set()
for txo in txos:
@ -445,13 +490,13 @@ def _clean_txo_constraints_for_aggregation(constraints):
def get_txo_count(**constraints):
_clean_txo_constraints_for_aggregation(constraints)
count = select_txos([func.count().label('total')], **constraints)
count = context().fetchall(select_txos([func.count().label('total')], **constraints))
return count[0]['total'] or 0
def get_txo_sum(**constraints):
_clean_txo_constraints_for_aggregation(constraints)
result = select_txos([func.sum(TXO.c.amount).label('total')], **constraints)
result = context().fetchall(select_txos([func.sum(TXO.c.amount).label('total')], **constraints))
return result[0]['total'] or 0
@ -475,124 +520,33 @@ def get_txo_plot(start_day=None, days_back=0, end_day=None, days_after=None, **c
constraints['day__lte'] = date.fromisoformat(end_day).toordinal()
elif days_after is not None:
constraints['day__lte'] = constraints['day__gte'] + days_after
plot = select_txos(
plot = context().fetchall(select_txos(
[TX.c.day, func.sum(TXO.c.amount).label('total')],
group_by='day', order_by='day', **constraints
)
))
for row in plot:
row['day'] = date.fromordinal(row['day'])
return plot
def get_purchases(**constraints) -> Tuple[List[Output], Optional[int]]:
accounts = constraints.pop('accounts', None)
assert accounts, "'accounts' argument required to find purchases"
if not {'purchased_claim_hash', 'purchased_claim_hash__in'}.intersection(constraints):
constraints['purchased_claim_hash__is_not_null'] = True
constraints['tx_hash__in'] = (
select(TXI.c.tx_hash).select_from(txi_join_account).where(in_account_ids(accounts))
)
txs, count = get_transactions(**constraints)
return [tx.outputs[0] for tx in txs], count
BASE_SELECT_CLAIM_COLUMNS = BASE_SELECT_TXO_COLUMNS + [
Claim.c.activation_height,
Claim.c.takeover_height,
Claim.c.creation_height,
Claim.c.is_controlling,
Claim.c.channel_hash,
Claim.c.reposted_claim_hash,
Claim.c.short_url,
Claim.c.canonical_url,
Claim.c.claims_in_channel_count,
Claim.c.support_amount,
Claim.c.supports_in_claim_count,
]
def select_addresses(cols, **constraints):
return context().fetchall(query(
[AccountAddress, PubkeyAddress],
select(*cols).select_from(PubkeyAddress.join(AccountAddress)),
**constraints
))
def get_addresses(cols=None, include_total=False, **constraints) -> Tuple[List[dict], Optional[int]]:
def select_claims(cols: List = None, for_count=False, **constraints) -> Select:
if cols is None:
cols = (
PubkeyAddress.c.address,
PubkeyAddress.c.used_times,
AccountAddress.c.account,
AccountAddress.c.chain,
AccountAddress.c.pubkey,
AccountAddress.c.chain_code,
AccountAddress.c.n,
AccountAddress.c.depth
)
return (
select_addresses(cols, **constraints),
get_address_count(**constraints) if include_total else None
)
def get_address_count(**constraints):
count = select_addresses([func.count().label('total')], **constraints)
return count[0]['total'] or 0
def get_all_addresses(self):
return context().execute(select(PubkeyAddress.c.address))
def add_keys(account, chain, pubkeys):
c = context()
c.execute(
c.insert_or_ignore(PubkeyAddress)
.values([{'address': k.address} for k in pubkeys])
)
c.execute(
c.insert_or_ignore(AccountAddress)
.values([{
'account': account.id,
'address': k.address,
'chain': chain,
'pubkey': k.pubkey_bytes,
'chain_code': k.chain_code,
'n': k.n,
'depth': k.depth
} for k in pubkeys])
)
def get_supports_summary(self, **constraints):
return get_txos(
txo_type=TXO_TYPES['support'],
is_spent=False, is_my_output=True,
include_is_my_input=True,
no_tx=True,
**constraints
)
def search_to_bytes(constraints) -> Union[bytes, Tuple[bytes, Dict]]:
return Outputs.to_bytes(*search(**constraints))
def resolve_to_bytes(urls) -> Union[bytes, Tuple[bytes, Dict]]:
return Outputs.to_bytes(*resolve(urls))
def execute_censored(sql, row_offset: int, row_limit: int, censor: Censor) -> List:
ctx = context()
return ctx.fetchall(sql)
# c = ctx.db.cursor()
# def row_filter(cursor, row):
# nonlocal row_offset
# #row = row_factory(cursor, row)
# if len(row) > 1 and censor.censor(row):
# return
# if row_offset:
# row_offset -= 1
# return
# return row
# c.setrowtrace(row_filter)
# i, rows = 0, []
# for row in c.execute(sql):
# i += 1
# rows.append(row)
# if i >= row_limit:
# break
# return rows
def claims_query(cols, for_count=False, **constraints) -> Tuple[str, Dict]:
cols = BASE_SELECT_CLAIM_COLUMNS
if 'order_by' in constraints:
order_by_parts = constraints['order_by']
if isinstance(order_by_parts, str):
@ -624,10 +578,6 @@ def claims_query(cols, for_count=False, **constraints) -> Tuple[str, Dict]:
value = Decimal(value)*1000
constraints[f'{constraint}{postfix}'] = int(value)
if constraints.pop('is_controlling', False):
if {'sequence', 'amount_order'}.isdisjoint(constraints):
for_count = False
constraints['Claimtrie.claim_hash__is_not_null'] = ''
if 'sequence' in constraints:
constraints['order_by'] = 'activation_height ASC'
constraints['offset'] = int(constraints.pop('sequence')) - 1
@ -724,74 +674,29 @@ def claims_query(cols, for_count=False, **constraints) -> Tuple[str, Dict]:
# TODO: fix
constraints["search"] = constraints.pop("text")
return query(
[Claim, Claimtrie],
select(*cols).select_from(Claim.join(Claimtrie, isouter=True).join(TXO).join(TX)),
**constraints
)
joins = Claim.join(TXO).join(TX)
return query([Claim], select(*cols).select_from(joins), **constraints)
def select_claims(censor: Censor, cols: List, for_count=False, **constraints) -> List:
if 'channel' in constraints:
channel_url = constraints.pop('channel')
match = resolve_url(channel_url)
if isinstance(match, dict):
constraints['channel_hash'] = match['claim_hash']
else:
return [{'row_count': 0}] if cols == 'count(*) as row_count' else []
row_offset = constraints.pop('offset', 0)
row_limit = constraints.pop('limit', 20)
return execute_censored(
claims_query(cols, for_count, **constraints),
row_offset, row_limit, censor
)
def search_claims(**constraints) -> Tuple[List[Output], Optional[int], Optional[Censor]]:
total = None
if not constraints.pop('no_totals', False):
total = search_claim_count(**constraints)
constraints['offset'] = abs(constraints.get('offset', 0))
constraints['limit'] = min(abs(constraints.get('limit', 10)), 50)
ctx = context()
search_censor = ctx.get_search_censor()
rows = context().fetchall(select_claims(**constraints))
txos = rows_to_txos(rows, include_tx=False)
return txos, total, search_censor
def count_claims(**constraints) -> int:
def search_claim_count(**constraints) -> int:
constraints.pop('offset', None)
constraints.pop('limit', None)
constraints.pop('order_by', None)
count = select_claims(Censor(), [func.count().label('row_count')], for_count=True, **constraints)
return count[0]['row_count']
def search_claims(censor: Censor, **constraints) -> List:
return select_claims(
censor, [
Claimtrie.c.claim_hash.label('is_controlling'),
Claimtrie.c.last_take_over_height,
TX.c.raw,
TX.c.height,
TX.c.tx_hash,
TXO.c.script_offset,
TXO.c.script_length,
TXO.c.amount,
TXO.c.position.label('txo_position'),
Claim.c.claim_hash,
Claim.c.txo_hash,
# Claim.c.claims_in_channel,
# Claim.c.reposted,
# Claim.c.height,
# Claim.c.creation_height,
# Claim.c.activation_height,
# Claim.c.expiration_height,
# Claim.c.effective_amount,
# Claim.c.support_amount,
# Claim.c.trending_group,
# Claim.c.trending_mixed,
# Claim.c.trending_local,
# Claim.c.trending_global,
# Claim.c.short_url,
# Claim.c.canonical_url,
Claim.c.channel_hash,
Claim.c.reposted_claim_hash,
# Claim.c.signature_valid
], **constraints
)
def get_claims(**constraints) -> Tuple[List[Output], Optional[int]]:
return get_txos(no_tx=True, is_claim_list=True, **constraints)
count = context().fetchall(select_claims([func.count().label('total')], **constraints))
return count[0]['total'] or 0
def _get_referenced_rows(txo_rows: List[dict], censor_channels: List[bytes]):
@ -815,43 +720,81 @@ def _get_referenced_rows(txo_rows: List[dict], censor_channels: List[bytes]):
return channel_txos + reposted_txos
def old_search(**constraints) -> Tuple[List, List, int, int, Censor]:
assert set(constraints).issubset(SEARCH_PARAMS), \
f"Search query contains invalid arguments: {set(constraints).difference(SEARCH_PARAMS)}"
total = None
if not constraints.pop('no_totals', False):
total = count_claims(**constraints)
constraints['offset'] = abs(constraints.get('offset', 0))
constraints['limit'] = min(abs(constraints.get('limit', 10)), 50)
ctx = context()
search_censor = ctx.get_search_censor()
txo_rows = search_claims(search_censor, **constraints)
extra_txo_rows = _get_referenced_rows(txo_rows, search_censor.censored.keys())
return txo_rows, extra_txo_rows, constraints['offset'], total, search_censor
def search(**constraints) -> Tuple[List, int, Censor]:
assert set(constraints).issubset(SEARCH_PARAMS), \
f"Search query contains invalid arguments: {set(constraints).difference(SEARCH_PARAMS)}"
total = None
if not constraints.pop('no_totals', False):
total = count_claims(**constraints)
constraints['offset'] = abs(constraints.get('offset', 0))
constraints['limit'] = min(abs(constraints.get('limit', 10)), 50)
ctx = context()
search_censor = ctx.get_search_censor()
txos = []
for row in search_claims(search_censor, **constraints):
source = row['raw'][row['script_offset']:row['script_offset']+row['script_length']]
txo = Output(
amount=row['amount'],
script=OutputScript(source),
tx_ref=TXRefImmutable.from_hash(row['tx_hash'], row['height']),
position=row['txo_position']
def get_purchases(**constraints) -> Tuple[List[Output], Optional[int]]:
accounts = constraints.pop('accounts', None)
assert accounts, "'accounts' argument required to find purchases"
if not {'purchased_claim_hash', 'purchased_claim_hash__in'}.intersection(constraints):
constraints['purchased_claim_hash__is_not_null'] = True
constraints['tx_hash__in'] = (
select(TXI.c.tx_hash).select_from(txi_join_account).where(in_account_ids(accounts))
)
txs, count = get_transactions(**constraints)
return [tx.outputs[0] for tx in txs], count
def select_addresses(cols, **constraints):
return context().fetchall(query(
[AccountAddress, PubkeyAddress],
select(*cols).select_from(PubkeyAddress.join(AccountAddress)),
**constraints
))
def get_addresses(cols=None, include_total=False, **constraints) -> Tuple[List[dict], Optional[int]]:
if cols is None:
cols = (
PubkeyAddress.c.address,
PubkeyAddress.c.used_times,
AccountAddress.c.account,
AccountAddress.c.chain,
AccountAddress.c.pubkey,
AccountAddress.c.chain_code,
AccountAddress.c.n,
AccountAddress.c.depth
)
return (
select_addresses(cols, **constraints),
get_address_count(**constraints) if include_total else None
)
def get_address_count(**constraints):
count = select_addresses([func.count().label('total')], **constraints)
return count[0]['total'] or 0
def get_all_addresses(self):
return context().execute(select(PubkeyAddress.c.address))
def add_keys(account, chain, pubkeys):
c = context()
c.execute(
c.insert_or_ignore(PubkeyAddress)
.values([{'address': k.address} for k in pubkeys])
)
c.execute(
c.insert_or_ignore(AccountAddress)
.values([{
'account': account.id,
'address': k.address,
'chain': chain,
'pubkey': k.pubkey_bytes,
'chain_code': k.chain_code,
'n': k.n,
'depth': k.depth
} for k in pubkeys])
)
def get_supports_summary(self, **constraints):
return get_txos(
txo_type=TXO_TYPES['support'],
is_spent=False, is_my_output=True,
include_is_my_input=True,
no_tx=True,
**constraints
)
txos.append(txo)
#extra_txo_rows = _get_referenced_rows(txo_rows, search_censor.censored.keys())
return txos, total, search_censor
def resolve(urls) -> Tuple[List, List]:

View file

@ -3,11 +3,11 @@ import time
import multiprocessing as mp
from enum import Enum
from decimal import Decimal
from typing import Dict, List, Optional
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass
from contextvars import ContextVar
from sqlalchemy import create_engine, inspect
from sqlalchemy import create_engine, inspect, bindparam, case
from sqlalchemy.engine import Engine, Connection
from lbry.event import EventQueuePublisher
@ -18,7 +18,7 @@ from lbry.schema.result import Censor
from lbry.schema.mime_types import guess_stream_type
from .utils import pg_insert, chunk
from .tables import Block, TX, TXO, TXI, Claim, Tag, Claimtrie, Support
from .tables import Block, TX, TXO, TXI, Claim, Tag, Takeover, Support
from .constants import TXO_TYPES, STREAM_TYPES
@ -163,7 +163,7 @@ class ProgressUnit(Enum):
TASKS = "tasks", None
BLOCKS = "blocks", Block
TXS = "txs", TX
TRIE = "trie", Claimtrie
TAKEOVERS = "takeovers", Takeover
TXIS = "txis", TXI
CLAIMS = "claims", Claim
SUPPORTS = "supports", Support
@ -182,17 +182,22 @@ class Event(Enum):
BLOCK_READ = "blockchain.sync.block.read", ProgressUnit.BLOCKS
BLOCK_SAVE = "blockchain.sync.block.save", ProgressUnit.TXS
BLOCK_DONE = "blockchain.sync.block.done", ProgressUnit.TASKS
TRIE_DELETE = "blockchain.sync.trie.delete", ProgressUnit.TRIE
TRIE_UPDATE = "blockchain.sync.trie.update", ProgressUnit.TRIE
TRIE_INSERT = "blockchain.sync.trie.insert", ProgressUnit.TRIE
CLAIM_META = "blockchain.sync.claim.update", ProgressUnit.CLAIMS
CLAIM_CALC = "blockchain.sync.claim.totals", ProgressUnit.CLAIMS
CLAIM_TRIE = "blockchain.sync.claim.trie", ProgressUnit.TAKEOVERS
CLAIM_SIGN = "blockchain.sync.claim.signatures", ProgressUnit.CLAIMS
SUPPORT_META = "blockchain.sync.support.update", ProgressUnit.SUPPORTS
SUPPORT_SIGN = "blockchain.sync.support.signatures", ProgressUnit.SUPPORTS
CHANNEL_SIGN = "blockchain.sync.channel.signatures", ProgressUnit.CLAIMS
TRENDING_CALC = "blockchain.sync.trending", ProgressUnit.BLOCKS
TAKEOVER_INSERT = "blockchain.sync.takeover.insert", ProgressUnit.TAKEOVERS
# full node + light client sync events
INPUT_UPDATE = "db.sync.input", ProgressUnit.TXIS
CLAIM_DELETE = "db.sync.claim.delete", ProgressUnit.CLAIMS
CLAIM_UPDATE = "db.sync.claim.update", ProgressUnit.CLAIMS
CLAIM_INSERT = "db.sync.claim.insert", ProgressUnit.CLAIMS
CLAIM_UPDATE = "db.sync.claim.update", ProgressUnit.CLAIMS
SUPPORT_DELETE = "db.sync.support.delete", ProgressUnit.SUPPORTS
SUPPORT_UPDATE = "db.sync.support.update", ProgressUnit.SUPPORTS
SUPPORT_INSERT = "db.sync.support.insert", ProgressUnit.SUPPORTS
def __new__(cls, value, unit: ProgressUnit):
@ -222,6 +227,10 @@ class ProgressPublisher(EventQueuePublisher):
return d
class BreakProgress(Exception):
"""Break out of progress when total is 0."""
class ProgressContext:
def __init__(self, ctx: QueryContext, event: Event, step_size=1):
@ -237,10 +246,14 @@ class ProgressContext:
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type == BreakProgress:
return True
self.ctx.message_queue.put(self.get_event_args(self.total))
return self.ctx.__exit__(exc_type, exc_val, exc_tb)
def start(self, total, extra=None):
if not total:
raise BreakProgress
self.total = total
if extra is not None:
self.extra = extra
@ -248,6 +261,8 @@ class ProgressContext:
def step(self, done):
send_condition = (
# no-op
done != 0 and self.total != 0 and
# enforce step rate
(self.step_size == 1 or done % self.step_size == 0) and
# deduplicate finish event by not sending a step where done == total
@ -280,20 +295,24 @@ class BulkLoader:
self.txs = []
self.txos = []
self.txis = []
self.supports = []
self.claims = []
self.tags = []
self.update_claims = []
self.delete_tags = []
@staticmethod
def block_to_row(block):
def block_to_row(block: Block) -> dict:
return {
'block_hash': block.block_hash,
'previous_hash': block.prev_block_hash,
'file_number': block.file_number,
'height': 0 if block.is_first_block else block.height,
'timestamp': block.timestamp,
}
@staticmethod
def tx_to_row(block_hash: bytes, tx: Transaction):
def tx_to_row(block_hash: bytes, tx: Transaction) -> dict:
row = {
'tx_hash': tx.hash,
'block_hash': block_hash,
@ -301,8 +320,8 @@ class BulkLoader:
'height': tx.height,
'position': tx.position,
'is_verified': tx.is_verified,
# TODO: fix
# 'day': tx.get_ordinal_day(self.db.ledger),
'timestamp': tx.timestamp,
'day': tx.day,
'purchased_claim_hash': None,
}
txos = tx.outputs
@ -312,14 +331,14 @@ class BulkLoader:
return row
@staticmethod
def txi_to_row(tx: Transaction, txi: Input):
def txi_to_row(tx: Transaction, txi: Input) -> dict:
return {
'tx_hash': tx.hash,
'txo_hash': txi.txo_ref.hash,
'position': txi.position,
}
def txo_to_row(self, tx: Transaction, txo: Output):
def txo_to_row(self, tx: Transaction, txo: Output) -> dict:
row = {
'tx_hash': tx.hash,
'txo_hash': txo.hash,
@ -345,7 +364,6 @@ class BulkLoader:
row['channel_hash'] = claim.signing_channel_hash
else:
row['txo_type'] = TXO_TYPES['stream']
#self.add_claim(txo)
elif txo.is_support:
row['txo_type'] = TXO_TYPES['support']
elif txo.purchase is not None:
@ -367,28 +385,13 @@ class BulkLoader:
pass
return row
def add_block(self, block):
self.blocks.append(self.block_to_row(block))
for tx in block.txs:
self.add_transaction(block.block_hash, tx)
return self
def add_transaction(self, block_hash: bytes, tx: Transaction):
self.txs.append(self.tx_to_row(block_hash, tx))
for txi in tx.inputs:
if txi.coinbase is None:
self.txis.append(self.txi_to_row(tx, txi))
for txo in tx.outputs:
self.txos.append(self.txo_to_row(tx, txo))
return self
def add_claim(self, txo):
def claim_to_rows(self, txo: Output) -> Tuple[dict, List]:
try:
assert txo.claim_name
assert txo.normalized_name
except Exception:
#self.logger.exception(f"Could not decode claim name for {tx.id}:{txo.position}.")
return
return {}, []
tx = txo.tx_ref.tx
claim_hash = txo.claim_hash
claim_record = {
@ -400,10 +403,8 @@ class BulkLoader:
'txo_hash': txo.ref.hash,
'tx_position': tx.position,
'amount': txo.amount,
'timestamp': 0, # TODO: fix
'creation_timestamp': 0, # TODO: fix
'height': tx.height,
'creation_height': tx.height,
'timestamp': tx.timestamp,
'release_time': None,
'title': None,
'author': None,
@ -418,16 +419,19 @@ class BulkLoader:
# reposts
'reposted_claim_hash': None,
# claims which are channels
'public_key_bytes': None,
'public_key': None,
'public_key_hash': None,
# signed claims
'channel_hash': None,
'signature': None,
'signature_digest': None,
}
self.claims.append(claim_record)
try:
claim = txo.claim
except Exception:
#self.logger.exception(f"Could not parse claim protobuf for {tx.id}:{txo.position}.")
return
return claim_record, []
if claim.is_stream:
claim_record['claim_type'] = TXO_TYPES['stream']
@ -453,24 +457,96 @@ class BulkLoader:
claim_record['reposted_claim_hash'] = claim.repost.reference.claim_hash
elif claim.is_channel:
claim_record['claim_type'] = TXO_TYPES['channel']
claim_record['public_key_bytes'] = txo.claim.channel.public_key_bytes
claim_record['public_key'] = claim.channel.public_key_bytes
claim_record['public_key_hash'] = self.ledger.address_to_hash160(
self.ledger.public_key_to_address(txo.claim.channel.public_key_bytes)
self.ledger.public_key_to_address(claim.channel.public_key_bytes)
)
if claim.is_signed:
claim_record['channel_hash'] = claim.signing_channel_hash
claim_record['signature'] = txo.get_encoded_signature()
claim_record['signature_digest'] = txo.get_signature_digest(None)
for tag in clean_tags(claim.message.tags):
self.tags.append({'claim_hash': claim_hash, 'tag': tag})
tags = [
{'claim_hash': claim_hash, 'tag': tag} for tag in clean_tags(claim.message.tags)
]
return claim_record, tags
def add_block(self, block: Block, add_claims_supports: set = None):
self.blocks.append(self.block_to_row(block))
for tx in block.txs:
self.add_transaction(block.block_hash, tx, add_claims_supports)
return self
def add_transaction(self, block_hash: bytes, tx: Transaction, add_claims_supports: set = None):
self.txs.append(self.tx_to_row(block_hash, tx))
for txi in tx.inputs:
if txi.coinbase is None:
self.txis.append(self.txi_to_row(tx, txi))
for txo in tx.outputs:
self.txos.append(self.txo_to_row(tx, txo))
if add_claims_supports:
if txo.is_support and txo.hash in add_claims_supports:
self.add_support(txo)
elif txo.is_claim and txo.hash in add_claims_supports:
self.add_claim(txo)
return self
def add_support(self, txo: Output):
tx = txo.tx_ref.tx
claim_hash = txo.claim_hash
support_record = {
'claim_hash': claim_hash,
'address': txo.get_address(self.ledger),
'txo_hash': txo.ref.hash,
'tx_position': tx.position,
'amount': txo.amount,
'height': tx.height,
}
self.supports.append(support_record)
support = txo.can_decode_support
if support:
support_record['emoji'] = support.emoji
if support.is_signed:
support_record['channel_hash'] = support.signing_channel_hash
def add_claim(self, txo: Output):
claim, tags = self.claim_to_rows(txo)
if claim:
tx = txo.tx_ref.tx
claim['public_key_height'] = tx.height
if txo.script.is_claim_name:
claim['creation_height'] = tx.height
claim['creation_timestamp'] = tx.timestamp
self.claims.append(claim)
self.tags.extend(tags)
return self
def update_claim(self, txo: Output):
claim, tags = self.claim_to_rows(txo)
if claim:
claim['claim_hash_pk'] = claim.pop('claim_hash')
self.update_claims.append(claim)
self.delete_tags.append({'claim_hash_pk': claim['claim_hash_pk']})
self.tags.extend(tags)
return self
def save(self, batch_size=10000):
queries = (
(Block, self.blocks),
(TX, self.txs),
(TXO, self.txos),
(TXI, self.txis),
(Claim, self.claims),
(Tag, self.tags),
(Block.insert(), self.blocks),
(TX.insert(), self.txs),
(TXO.insert(), self.txos),
(TXI.insert(), self.txis),
(Claim.insert(), self.claims),
(Tag.delete().where(Tag.c.claim_hash == bindparam('claim_hash_pk')), self.delete_tags),
(Claim.update()
.where(Claim.c.claim_hash == bindparam('claim_hash_pk'))
.values(public_key_height=case(
[(Claim.c.public_key_hash != bindparam('public_key_hash'), bindparam('height'))],
else_=Claim.c.public_key_height
)), self.update_claims),
(Tag.insert(), self.tags),
(Support.insert(), self.supports),
)
p = self.ctx.current_progress
@ -478,10 +554,9 @@ class BulkLoader:
if p:
unit_table = p.event.unit.table
progress_total, row_total = 0, sum(len(q[1]) for q in queries)
for table, rows in queries:
if table == unit_table:
progress_total = len(rows)
break
for sql, rows in queries:
if sql.table == unit_table:
progress_total += len(rows)
if not progress_total:
assert row_total == 0, "Rows used for progress are empty but other rows present."
return
@ -489,10 +564,9 @@ class BulkLoader:
p.start(progress_total)
execute = self.ctx.connection.execute
for table, rows in queries:
sql = table.insert()
for chunk_size, chunk_rows in chunk(rows, batch_size):
execute(sql, list(chunk_rows))
for sql, rows in queries:
for chunk_rows in chunk(rows, batch_size):
execute(sql, chunk_rows)
if p:
done += int(chunk_size/row_scale)
done += int(len(chunk_rows)/row_scale)
p.step(done)

View file

@ -1,48 +1,128 @@
# pylint: disable=singleton-comparison
from sqlalchemy.future import select
from .constants import CLAIM_TYPE_CODES
from .queries import get_txos
from .query_context import progress, Event
from .tables import (
TXO, TXI,
Claim
from lbry.db.constants import CLAIM_TYPE_CODES, TXO_TYPES
from lbry.db.queries import select_txos, rows_to_txos
from lbry.db.query_context import progress, Event
from lbry.db.tables import (
TXO, TXI, Claim, Support
)
def process_inputs(heights):
def process_all_things_after_sync():
process_inputs_outputs()
process_supports()
process_claims()
def process_inputs_outputs():
with progress(Event.INPUT_UPDATE) as p:
p.start(2)
if p.ctx.is_sqlite:
address_query = select(TXO.c.address).where(TXI.c.txo_hash == TXO.c.txo_hash)
sql = (
set_addresses = (
TXI.update()
.values(address=address_query.scalar_subquery())
.where(TXI.c.address == None)
)
else:
sql = (
set_addresses = (
TXI.update()
.values({TXI.c.address: TXO.c.address})
.where((TXI.c.address == None) & (TXI.c.txo_hash == TXO.c.txo_hash))
)
# 1. Update TXIs to have the address of TXO they are spending.
p.ctx.execute(set_addresses)
p.step(1)
# 2. Update spent TXOs setting is_spent = True
set_is_spent = (
TXO.update()
.values({TXO.c.is_spent: True})
.where(
(TXO.c.is_spent == False) &
(TXO.c.txo_hash.in_(select(TXI.c.txo_hash)))
)
)
p.ctx.execute(set_is_spent)
p.step(2)
def condition_spent_claims(claim_type: list = None):
if claim_type is not None:
if len(claim_type) == 0:
raise ValueError("Missing 'claim_type'.")
if len(claim_type) == 1:
type_filter = TXO.c.txo_type == claim_type[0]
else:
type_filter = TXO.c.txo_type.in_(claim_type)
else:
type_filter = TXO.c.txo_type.in_(CLAIM_TYPE_CODES)
return Claim.c.claim_hash.notin_(
select(TXO.c.claim_hash).where(type_filter & (TXO.c.is_spent == False))
)
# find UTXOs that are claims and their claim_id is not in claim table,
# this means they need to be inserted
select_missing_claims = (
select_txos(txo_type__in=CLAIM_TYPE_CODES, is_spent=False, claim_id_not_in_claim_table=True)
)
# find UTXOs that are claims and their txo_id is not in claim table,
# this ONLY works if you first ran select_missing_claims and inserted the missing claims, then
# all claims_ids should match between TXO and Claim table but txo_hashes will not match for
# claims that are not up-to-date
select_stale_claims = (
select_txos(txo_type__in=CLAIM_TYPE_CODES, is_spent=False, txo_id_not_in_claim_table=True)
)
condition_spent_supports = (
Support.c.txo_hash.notin_(
select(TXO.c.txo_hash).where(
(TXO.c.txo_type == TXO_TYPES['support']) &
(TXO.c.is_spent == False)
)
)
)
select_missing_supports = (
select_txos(txo_type=TXO_TYPES['support'], is_spent=False, txo_id_not_in_support_table=True)
)
def process_supports():
with progress(Event.SUPPORT_DELETE) as p:
p.start(1)
sql = Support.delete().where(condition_spent_supports)
p.ctx.execute(sql)
def process_claims(heights):
with progress(Event.CLAIM_DELETE) as p:
p.start(1)
p.ctx.execute(Claim.delete())
with progress(Event.CLAIM_UPDATE) as p:
with progress(Event.SUPPORT_INSERT) as p:
loader = p.ctx.get_bulk_loader()
for claim in get_txos(
txo_type__in=CLAIM_TYPE_CODES, is_spent=False,
height__gte=heights[0], height__lte=heights[1])[0]:
loader.add_claim(claim)
for support in rows_to_txos(p.ctx.fetchall(select_missing_supports)):
loader.add_support(support)
loader.save()
def process_supports(heights):
pass
def process_claims():
with progress(Event.CLAIM_DELETE) as p:
p.start(1)
sql = Claim.delete().where(condition_spent_claims())
p.ctx.execute(sql)
with progress(Event.CLAIM_INSERT) as p:
loader = p.ctx.get_bulk_loader()
for claim in rows_to_txos(p.ctx.fetchall(select_missing_claims)):
loader.add_claim(claim)
loader.save()
with progress(Event.CLAIM_UPDATE) as p:
loader = p.ctx.get_bulk_loader()
for claim in rows_to_txos(p.ctx.fetchall(select_stale_claims)):
loader.update_claim(claim)
loader.save()

View file

@ -43,6 +43,7 @@ Block = Table(
Column('previous_hash', LargeBinary),
Column('file_number', SmallInteger),
Column('height', Integer),
Column('timestamp', Integer),
Column('block_filter', LargeBinary, nullable=True)
)
@ -54,9 +55,10 @@ TX = Table(
Column('raw', LargeBinary),
Column('height', Integer),
Column('position', SmallInteger),
Column('timestamp', Integer, nullable=True),
Column('day', Integer, nullable=True),
Column('is_verified', Boolean, server_default='FALSE'),
Column('purchased_claim_hash', LargeBinary, nullable=True),
Column('day', Integer, nullable=True),
Column('tx_filter', LargeBinary, nullable=True)
)
@ -107,11 +109,13 @@ Claim = Table(
Column('height', Integer), # last updated height
Column('creation_height', Integer),
Column('activation_height', Integer, nullable=True),
Column('expiration_height', Integer),
Column('expiration_height', Integer, nullable=True),
Column('takeover_height', Integer, nullable=True),
Column('is_controlling', Boolean, server_default='0'),
Column('release_time', Integer, nullable=True),
# normalized#shortest-unique-claim_id
Column('short_url', Text),
Column('short_url', Text, nullable=True),
# channel's-short_url/normalized#shortest-unique-claim_id-within-channel
Column('canonical_url', Text, nullable=True),
@ -120,7 +124,8 @@ Claim = Table(
Column('description', Text, nullable=True),
Column('claim_type', SmallInteger),
Column('reposted', Integer, server_default='0'),
Column('claim_reposted_count', Integer, server_default='0'),
Column('supports_in_claim_count', Integer, server_default='0'),
# streams
Column('stream_type', Text, nullable=True),
@ -133,18 +138,17 @@ Claim = Table(
Column('reposted_claim_hash', LargeBinary, nullable=True),
# claims which are channels
Column('public_key_bytes', LargeBinary, nullable=True),
Column('public_key', LargeBinary, nullable=True),
Column('public_key_hash', LargeBinary, nullable=True),
Column('claims_in_channel', Integer, server_default='0'),
Column('public_key_height', Integer, server_default='0'), # height at which public key was last changed
Column('claims_in_channel_count', Integer, server_default='0'),
# claims which are inside channels
Column('channel_hash', LargeBinary, nullable=True),
Column('channel_join', Integer, nullable=True), # height at which claim got valid signature / joined channel
Column('signature', LargeBinary, nullable=True),
Column('signature_digest', LargeBinary, nullable=True),
Column('signature_valid', Boolean, nullable=True),
Column('is_signature_valid', Boolean, server_default='0'),
Column('effective_amount', BigInteger, server_default='0'),
Column('support_amount', BigInteger, server_default='0'),
Column('trending_group', BigInteger, server_default='0'),
Column('trending_mixed', BigInteger, server_default='0'),
@ -162,14 +166,30 @@ Tag = Table(
Support = Table(
'support', metadata,
Column('normalized', Text, primary_key=True),
Column('claim_hash', LargeBinary, ForeignKey(Claim.columns.claim_hash)),
Column('txo_hash', LargeBinary, ForeignKey(TXO.columns.txo_hash), primary_key=True),
Column('claim_hash', LargeBinary, ForeignKey(TXO.columns.claim_hash)),
Column('address', Text),
Column('tx_position', SmallInteger),
Column('activation_height', Integer, nullable=True),
Column('expiration_height', Integer, nullable=True),
Column('amount', BigInteger),
Column('height', Integer),
# support metadata
Column('emoji', Text),
# signed supports
Column('channel_hash', LargeBinary, nullable=True),
Column('signature', LargeBinary, nullable=True),
Column('signature_digest', LargeBinary, nullable=True),
Column('is_signature_valid', Boolean, server_default='0'),
)
Claimtrie = Table(
'claimtrie', metadata,
Column('normalized', Text, primary_key=True),
Column('claim_hash', LargeBinary, ForeignKey(Claim.columns.claim_hash)),
Column('last_take_over_height', Integer),
Takeover = Table(
'takeover', metadata,
Column('normalized', Text),
Column('claim_hash', LargeBinary, ForeignKey(TXO.columns.claim_hash)),
Column('height', Integer),
)

View file

@ -14,7 +14,7 @@ from .tables import AccountAddress
def chunk(rows, step):
it, total = iter(rows), len(rows)
for _ in range(0, total, step):
yield min(step, total), islice(it, step)
yield list(islice(it, step))
total -= step

View file

@ -30,14 +30,10 @@ class Claim(Signable):
COLLECTION = 'collection'
REPOST = 'repost'
__slots__ = 'version',
__slots__ = ()
message_class = ClaimMessage
def __init__(self, message=None):
super().__init__(message)
self.version = 2
@property
def claim_type(self) -> str:
return self.message.WhichOneof('type')

View file

@ -1,6 +1,19 @@
from lbry.schema.base import Signable
from lbry.schema.types.v2.support_pb2 import Support as SupportMessage
class Support(Signable):
__slots__ = ()
message_class = None # TODO: add support protobufs
message_class = SupportMessage
def __init__(self, emoji='👍', message=None):
super().__init__(message)
self.emoji = emoji
@property
def emoji(self) -> str:
return self.message.emoji
@emoji.setter
def emoji(self, emoji: str):
self.message.emoji = emoji

View file

@ -0,0 +1,69 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: support.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='support.proto',
package='pb',
syntax='proto3',
serialized_pb=_b('\n\rsupport.proto\x12\x02pb\"\x18\n\x07Support\x12\r\n\x05\x65moji\x18\x01 \x01(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SUPPORT = _descriptor.Descriptor(
name='Support',
full_name='pb.Support',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='emoji', full_name='pb.Support.emoji', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=21,
serialized_end=45,
)
DESCRIPTOR.message_types_by_name['Support'] = _SUPPORT
Support = _reflection.GeneratedProtocolMessageType('Support', (_message.Message,), dict(
DESCRIPTOR = _SUPPORT,
__module__ = 'support_pb2'
# @@protoc_insertion_point(class_scope:pb.Support)
))
_sym_db.RegisterMessage(Support)
# @@protoc_insertion_point(module_scope)

View file

@ -2,6 +2,7 @@ import os
import sys
import json
import shutil
import hashlib
import logging
import tempfile
import functools
@ -10,18 +11,25 @@ import time
from asyncio.runners import _cancel_all_tasks # type: ignore
import unittest
from unittest.case import _Outcome
from typing import Optional
from binascii import unhexlify
from typing import Optional, List, Union
from binascii import unhexlify, hexlify
import ecdsa
from lbry.db import Database
from lbry.blockchain import (
RegTestLedger, Transaction, Input, Output, dewies_to_lbc
)
from lbry.blockchain.block import Block
from lbry.blockchain.bcd_data_stream import BCDataStream
from lbry.blockchain.lbrycrd import Lbrycrd
from lbry.constants import CENT, NULL_HASH32
from lbry.blockchain.dewies import lbc_to_dewies
from lbry.constants import COIN, CENT, NULL_HASH32
from lbry.service import Daemon, FullNode, jsonrpc_dumps_pretty
from lbry.conf import Config
from lbry.console import Console
from lbry.wallet import Wallet, Account
from lbry.schema.claim import Claim
from lbry.service.exchange_rate_manager import (
ExchangeRateManager, ExchangeRate, LBRYFeed, LBRYBTCFeed
@ -219,6 +227,173 @@ class AdvanceTimeTestCase(AsyncioTestCase):
await asyncio.sleep(0)
class UnitDBTestCase(AsyncioTestCase):
async def asyncSetUp(self):
await super().asyncSetUp()
self.db = Database.temp_sqlite()
self.addCleanup(self.db.close)
await self.db.open()
self.ledger = self.db.ledger
self.conf = self.ledger.conf
self.outputs: List[Output] = []
self.current_height = 0
async def add(self, block_or_tx: Union[Block, Transaction], block_hash: Optional[bytes] = None):
if isinstance(block_or_tx, Block):
await self.db.insert_block(block_or_tx)
for tx in block_or_tx.txs:
self.outputs.extend(tx.outputs)
return block_or_tx
elif isinstance(block_or_tx, Transaction):
await self.db.insert_transaction(block_hash, block_or_tx)
self.outputs.extend(block_or_tx.outputs)
return block_or_tx.outputs[0]
else:
raise NotImplementedError(f"Can't add {type(block_or_tx)}.")
def block(self, height: int, txs: List[Transaction]):
self.current_height = height
for tx in txs:
tx.height = height
return Block(
height=height, version=1, file_number=0,
block_hash=f'beef{height}'.encode(), prev_block_hash=f'beef{height-1}'.encode(),
merkle_root=b'beef', claim_trie_root=b'beef',
timestamp=99, bits=1, nonce=1, txs=txs
)
def coinbase(self):
return (
Transaction(height=0)
.add_inputs([Input.create_coinbase()])
.add_outputs([Output.pay_pubkey_hash(1000*COIN, (0).to_bytes(32, 'little'))])
)
def tx(self, amount='1.0', height=None, txi=None, txo=None):
counter = len(self.outputs)
self.current_height = height or (self.current_height+1)
txis = [Input.spend(self.outputs[-1])]
if txi is not None:
txis.insert(0, txi)
txo = txo or Output.pay_pubkey_hash(lbc_to_dewies(amount), counter.to_bytes(32, 'little'))
change = (sum(txi.txo_ref.txo.amount for txi in txis) - txo.amount) - CENT
assert change > 0
return (
Transaction(height=self.current_height)
.add_inputs(txis)
.add_outputs([
txo,
Output.pay_pubkey_hash(change, (counter + 1).to_bytes(32, 'little'))
])
)
def create_claim(self, claim_name='foo', claim=b'', amount='1.0', height=None):
return self.tx(
height=height,
txo=Output.pay_claim_name_pubkey_hash(
lbc_to_dewies(amount), claim_name, claim,
len(self.outputs).to_bytes(32, 'little')
)
)
def update_claim(self, txo, amount='1.0', height=None):
return self.tx(
height=height,
txo=Output.pay_update_claim_pubkey_hash(
lbc_to_dewies(amount), txo.claim_name, txo.claim_id, txo.claim,
len(self.outputs).to_bytes(32, 'little')
)
)
def support_claim(self, txo, amount='1.0', height=None):
return self.tx(
height=height,
txo=Output.pay_support_pubkey_hash(
lbc_to_dewies(amount), txo.claim_name, txo.claim_id,
len(self.outputs).to_bytes(32, 'little')
)
)
def repost_claim(self, claim_id, amount, channel):
claim = Claim()
claim.repost.reference.claim_id = claim_id
result = self.create_claim('repost', claim, amount)
if channel:
result.outputs[0].sign(channel)
result._reset()
return result
def abandon_claim(self, txo):
return self.tx(amount='0.01', txi=Input.spend(txo))
def _set_channel_key(self, channel, key):
private_key = ecdsa.SigningKey.from_string(key*32, curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
channel.private_key = private_key
channel.claim.channel.public_key_bytes = private_key.get_verifying_key().to_der()
channel.script.generate()
def create_channel(self, title, amount, name='@foo', key=b'a', **kwargs):
claim = Claim()
claim.stream.update(title=title, **kwargs)
tx = self.create_claim(name, claim, amount)
self._set_channel_key(tx.outputs[0], key)
return tx
def update_channel(self, channel, amount, key=b'a'):
self._set_channel_key(channel, key)
return self.update_claim(channel, amount)
def create_stream(self, title, amount, name='foo', channel=None, **kwargs):
claim = Claim()
claim.stream.update(title=title, **kwargs)
result = self.create_claim(name, claim, amount)
if channel:
result.outputs[0].sign(channel)
result._reset()
return result
def update_stream(self, stream, amount, channel=None):
result = self.update_claim(stream, amount)
if channel:
result.outputs[0].sign(channel)
result._reset()
return result
async def get_txis(self):
txis = []
for txi in await self.db.execute_fetchall("select txo_hash, address from txi"):
txoid = hexlify(txi["txo_hash"][:32][::-1]).decode()
position, = BCDataStream.uint32.unpack(txi['txo_hash'][32:])
txis.append((f'{txoid}:{position}', txi['address']))
return txis
async def get_txos(self):
txos = []
sql = (
"select txo_hash, txo.position, is_spent from txo join tx using (tx_hash) "
"order by tx.height, tx.position, txo.position"
)
for txo in await self.db.execute_fetchall(sql):
txoid = hexlify(txo["txo_hash"][:32][::-1]).decode()
txos.append((
f"{txoid}:{txo['position']}",
bool(txo['is_spent'])
))
return txos
async def get_claims(self):
claims = []
sql = (
"select claim_id from claim order by height, tx_position"
)
for claim in await self.db.execute_fetchall(sql):
claims.append(claim['claim_id'])
return claims
class IntegrationTestCase(AsyncioTestCase):
SEED = None

File diff suppressed because it is too large Load diff

View file

@ -1,528 +0,0 @@
import tempfile
import ecdsa
import hashlib
from unittest import skip
from binascii import hexlify
from typing import List, Tuple
from lbry.testcase import AsyncioTestCase, get_output
from lbry.conf import Config
from lbry.db.query_context import BulkLoader
from lbry.schema.claim import Claim
from lbry.schema.result import Censor
from lbry.blockchain.block import Block
from lbry.constants import COIN
from lbry.blockchain.transaction import Transaction, Input, Output
from lbry.service.full_node import FullNode
from lbry.blockchain.ledger import Ledger
from lbry.blockchain.lbrycrd import Lbrycrd
from lbry.blockchain.testing import create_lbrycrd_databases, add_block_to_lbrycrd
def get_input(fuzz=1):
return Input.spend(get_output(COIN, fuzz.to_bytes(32, 'little')))
def get_tx(fuzz=1):
return Transaction().add_inputs([get_input(fuzz)])
def search(**constraints) -> List:
return reader.search_claims(Censor(), **constraints)
def censored_search(**constraints) -> Tuple[List, Censor]:
rows, _, _, _, censor = reader.search(constraints)
return rows, censor
@skip('figure out what to do with these tests, claimtrie calcs are now done by lbrycrd')
class TestSQLDB(AsyncioTestCase):
async def asyncSetUp(self):
await super().asyncSetUp()
self.chain = Lbrycrd(Ledger(Config.with_same_dir(tempfile.mkdtemp())))
self.addCleanup(self.chain.cleanup)
await create_lbrycrd_databases(self.chain.actual_data_dir)
await self.chain.open()
self.addCleanup(self.chain.close)
self.service = FullNode(
self.chain.ledger, f'sqlite:///{self.chain.data_dir}/lbry.db', self.chain
)
self.service.conf.spv_address_filters = False
self.db = self.service.db
self.addCleanup(self.db.close)
await self.db.open()
self._txos = {}
async def advance(self, height, txs, takeovers=None):
block = Block(
height=height, version=1, file_number=0,
block_hash=f'beef{height}'.encode(), prev_block_hash=f'beef{height-1}'.encode(),
merkle_root=b'beef', claim_trie_root=b'beef',
timestamp=99, bits=1, nonce=1, txs=txs
)
await add_block_to_lbrycrd(self.chain, block, takeovers or [])
await BulkLoader(self.db).add_block(block).save()
await self.service.sync.post_process()
return [tx.outputs[0] for tx in txs]
def _make_tx(self, output, txi=None, **kwargs):
tx = get_tx(**kwargs).add_outputs([output])
if txi is not None:
tx.add_inputs([txi])
self._txos[output.ref.hash] = output
return tx
def _set_channel_key(self, channel, key):
private_key = ecdsa.SigningKey.from_string(key*32, curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
channel.private_key = private_key
channel.claim.channel.public_key_bytes = private_key.get_verifying_key().to_der()
channel.script.generate()
def get_channel(self, title, amount, name='@foo', key=b'a', **kwargs):
claim = Claim()
claim.channel.title = title
channel = Output.pay_claim_name_pubkey_hash(amount, name, claim, b'abc')
self._set_channel_key(channel, key)
return self._make_tx(channel, **kwargs)
def get_channel_update(self, channel, amount, key=b'a'):
self._set_channel_key(channel, key)
return self._make_tx(
Output.pay_update_claim_pubkey_hash(
amount, channel.claim_name, channel.claim_id, channel.claim, b'abc'
),
Input.spend(channel)
)
def get_stream(self, title, amount, name='foo', channel=None, **kwargs):
claim = Claim()
claim.stream.update(title=title, **kwargs)
result = self._make_tx(Output.pay_claim_name_pubkey_hash(amount, name, claim, b'abc'))
if channel:
result.outputs[0].sign(channel)
result._reset()
return result
def get_stream_update(self, tx, amount, channel=None):
stream = Transaction(tx[0].raw).outputs[0]
result = self._make_tx(
Output.pay_update_claim_pubkey_hash(
amount, stream.claim_name, stream.claim_id, stream.claim, b'abc'
),
Input.spend(stream)
)
if channel:
result.outputs[0].sign(channel)
result._reset()
return result
def get_repost(self, claim_id, amount, channel):
claim = Claim()
claim.repost.reference.claim_id = claim_id
result = self._make_tx(Output.pay_claim_name_pubkey_hash(amount, 'repost', claim, b'abc'))
result.outputs[0].sign(channel)
result._reset()
return result
def get_abandon(self, tx):
claim = Transaction(tx[0].raw).outputs[0]
return self._make_tx(
Output.pay_pubkey_hash(claim.amount, b'abc'),
Input.spend(claim)
)
def get_support(self, tx, amount):
claim = Transaction(tx[0].raw).outputs[0]
return self._make_tx(
Output.pay_support_pubkey_hash(
amount, claim.claim_name, claim.claim_id, b'abc'
)
)
@skip('figure out what to do with these tests, claimtrie calcs are now done by lbrycrd')
class TestClaimtrie(TestSQLDB):
def setUp(self):
super().setUp()
self._input_counter = 1
def _get_x_with_claim_id_prefix(self, getter, prefix, cached_iteration=None, **kwargs):
iterations = cached_iteration+1 if cached_iteration else 100
for i in range(cached_iteration or 1, iterations):
stream = getter(f'claim #{i}', COIN, fuzz=self._input_counter, **kwargs)
if stream.outputs[0].claim_id.startswith(prefix):
cached_iteration is None and print(f'Found "{prefix}" in {i} iterations.')
self._input_counter += 1
return stream
if cached_iteration:
raise ValueError(f'Failed to find "{prefix}" at cached iteration, run with None to find iteration.')
raise ValueError(f'Failed to find "{prefix}" in {iterations} iterations, try different values.')
def get_channel_with_claim_id_prefix(self, prefix, cached_iteration=None, **kwargs):
return self._get_x_with_claim_id_prefix(self.get_channel, prefix, cached_iteration, **kwargs)
def get_stream_with_claim_id_prefix(self, prefix, cached_iteration=None, **kwargs):
return self._get_x_with_claim_id_prefix(self.get_stream, prefix, cached_iteration, **kwargs)
async def test_canonical_url_and_channel_validation(self):
advance, search = self.advance, partial(self.service.search_claims, [])
tx_chan_a = self.get_channel_with_claim_id_prefix('a', 1, key=b'c')
tx_chan_ab = self.get_channel_with_claim_id_prefix('ab', 20, key=b'c')
txo_chan_a = tx_chan_a.outputs[0]
txo_chan_ab = tx_chan_ab.outputs[0]
await advance(1, [tx_chan_a])
await advance(2, [tx_chan_ab])
(r_ab, r_a) = search(order_by=['creation_height'], limit=2)
self.assertEqual("@foo#a", r_a['short_url'])
self.assertEqual("@foo#ab", r_ab['short_url'])
self.assertIsNone(r_a['canonical_url'])
self.assertIsNone(r_ab['canonical_url'])
self.assertEqual(0, r_a['claims_in_channel'])
self.assertEqual(0, r_ab['claims_in_channel'])
tx_a = self.get_stream_with_claim_id_prefix('a', 2)
tx_ab = self.get_stream_with_claim_id_prefix('ab', 42)
tx_abc = self.get_stream_with_claim_id_prefix('abc', 65)
await advance(3, [tx_a])
await advance(4, [tx_ab, tx_abc])
(r_abc, r_ab, r_a) = search(order_by=['creation_height', 'tx_position'], limit=3)
self.assertEqual("foo#a", r_a['short_url'])
self.assertEqual("foo#ab", r_ab['short_url'])
self.assertEqual("foo#abc", r_abc['short_url'])
self.assertIsNone(r_a['canonical_url'])
self.assertIsNone(r_ab['canonical_url'])
self.assertIsNone(r_abc['canonical_url'])
tx_a2 = self.get_stream_with_claim_id_prefix('a', 7, channel=txo_chan_a)
tx_ab2 = self.get_stream_with_claim_id_prefix('ab', 23, channel=txo_chan_a)
a2_claim = tx_a2.outputs[0]
ab2_claim = tx_ab2.outputs[0]
await advance(6, [tx_a2])
await advance(7, [tx_ab2])
(r_ab2, r_a2) = search(order_by=['creation_height'], limit=2)
self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url'])
self.assertEqual(f"foo#{ab2_claim.claim_id[:4]}", r_ab2['short_url'])
self.assertEqual("@foo#a/foo#a", r_a2['canonical_url'])
self.assertEqual("@foo#a/foo#ab", r_ab2['canonical_url'])
self.assertEqual(2, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel'])
# change channel public key, invaliding stream claim signatures
await advance(8, [self.get_channel_update(txo_chan_a, COIN, key=b'a')])
(r_ab2, r_a2) = search(order_by=['creation_height'], limit=2)
self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url'])
self.assertEqual(f"foo#{ab2_claim.claim_id[:4]}", r_ab2['short_url'])
self.assertIsNone(r_a2['canonical_url'])
self.assertIsNone(r_ab2['canonical_url'])
self.assertEqual(0, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel'])
# reinstate previous channel public key (previous stream claim signatures become valid again)
channel_update = self.get_channel_update(txo_chan_a, COIN, key=b'c')
await advance(9, [channel_update])
(r_ab2, r_a2) = search(order_by=['creation_height'], limit=2)
self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url'])
self.assertEqual(f"foo#{ab2_claim.claim_id[:4]}", r_ab2['short_url'])
self.assertEqual("@foo#a/foo#a", r_a2['canonical_url'])
self.assertEqual("@foo#a/foo#ab", r_ab2['canonical_url'])
self.assertEqual(2, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel'])
self.assertEqual(0, search(claim_id=txo_chan_ab.claim_id, limit=1)[0]['claims_in_channel'])
# change channel of stream
self.assertEqual("@foo#a/foo#ab", search(claim_id=ab2_claim.claim_id, limit=1)[0]['canonical_url'])
tx_ab2 = self.get_stream_update(tx_ab2, COIN, txo_chan_ab)
await advance(10, [tx_ab2])
self.assertEqual("@foo#ab/foo#a", search(claim_id=ab2_claim.claim_id, limit=1)[0]['canonical_url'])
# TODO: currently there is a bug where stream leaving a channel does not update that channels claims count
self.assertEqual(2, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel'])
# TODO: after bug is fixed remove test above and add test below
#self.assertEqual(1, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel'])
self.assertEqual(1, search(claim_id=txo_chan_ab.claim_id, limit=1)[0]['claims_in_channel'])
# claim abandon updates claims_in_channel
await advance(11, [self.get_abandon(tx_ab2)])
self.assertEqual(0, search(claim_id=txo_chan_ab.claim_id, limit=1)[0]['claims_in_channel'])
# delete channel, invaliding stream claim signatures
await advance(12, [self.get_abandon(channel_update)])
(r_a2,) = search(order_by=['creation_height'], limit=1)
self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url'])
self.assertIsNone(r_a2['canonical_url'])
def test_resolve_issue_2448(self):
advance = self.advance
tx_chan_a = self.get_channel_with_claim_id_prefix('a', 1, key=b'c')
tx_chan_ab = self.get_channel_with_claim_id_prefix('ab', 72, key=b'c')
txo_chan_a = tx_chan_a[0].outputs[0]
txo_chan_ab = tx_chan_ab[0].outputs[0]
advance(1, [tx_chan_a])
advance(2, [tx_chan_ab])
self.assertEqual(reader.resolve_url("@foo#a")['claim_hash'], txo_chan_a.claim_hash)
self.assertEqual(reader.resolve_url("@foo#ab")['claim_hash'], txo_chan_ab.claim_hash)
# update increase last height change of channel
advance(9, [self.get_channel_update(txo_chan_a, COIN, key=b'c')])
# make sure that activation_height is used instead of height (issue #2448)
self.assertEqual(reader.resolve_url("@foo#a")['claim_hash'], txo_chan_a.claim_hash)
self.assertEqual(reader.resolve_url("@foo#ab")['claim_hash'], txo_chan_ab.claim_hash)
def test_canonical_find_shortest_id(self):
new_hash = 'abcdef0123456789beef'
other0 = '1bcdef0123456789beef'
other1 = 'ab1def0123456789beef'
other2 = 'abc1ef0123456789beef'
other3 = 'abcdef0123456789bee1'
f = FindShortestID()
f.step(other0, new_hash)
self.assertEqual('#a', f.finalize())
f.step(other1, new_hash)
self.assertEqual('#abc', f.finalize())
f.step(other2, new_hash)
self.assertEqual('#abcd', f.finalize())
f.step(other3, new_hash)
self.assertEqual('#abcdef0123456789beef', f.finalize())
@skip('figure out what to do with these tests, claimtrie calcs are now done by lbrycrd')
class TestTrending(TestSQLDB):
def test_trending(self):
advance = self.advance
no_trend = self.get_stream('Claim A', COIN)
downwards = self.get_stream('Claim B', COIN)
up_small = self.get_stream('Claim C', COIN)
up_medium = self.get_stream('Claim D', COIN)
up_biggly = self.get_stream('Claim E', COIN)
claims = advance(1, [up_biggly, up_medium, up_small, no_trend, downwards])
for window in range(1, 8):
advance(zscore.TRENDING_WINDOW * window, [
self.get_support(downwards, (20-window)*COIN),
self.get_support(up_small, int(20+(window/10)*COIN)),
self.get_support(up_medium, (20+(window*(2 if window == 7 else 1)))*COIN),
self.get_support(up_biggly, (20+(window*(3 if window == 7 else 1)))*COIN),
])
results = search(order_by=['trending_local'])
self.assertEqual([c.claim_id for c in claims], [hexlify(c['claim_hash'][::-1]).decode() for c in results])
self.assertEqual([10, 6, 2, 0, -2], [int(c['trending_local']) for c in results])
self.assertEqual([53, 38, -32, 0, -6], [int(c['trending_global']) for c in results])
self.assertEqual([4, 4, 2, 0, 1], [int(c['trending_group']) for c in results])
self.assertEqual([53, 38, 2, 0, -6], [int(c['trending_mixed']) for c in results])
def test_edge(self):
problematic = self.get_stream('Problem', COIN)
self.advance(1, [problematic])
self.advance(zscore.TRENDING_WINDOW, [self.get_support(problematic, 53000000000)])
self.advance(zscore.TRENDING_WINDOW * 2, [self.get_support(problematic, 500000000)])
@skip('figure out what to do with these tests, claimtrie calcs are now done by lbrycrd')
class TestContentBlocking(TestSQLDB):
def test_blocking_and_filtering(self):
# content claims and channels
tx0 = self.get_channel('A Channel', COIN, '@channel1')
regular_channel = tx0[0].outputs[0]
tx1 = self.get_stream('Claim One', COIN, 'claim1')
tx2 = self.get_stream('Claim Two', COIN, 'claim2', regular_channel)
tx3 = self.get_stream('Claim Three', COIN, 'claim3')
self.advance(1, [tx0, tx1, tx2, tx3])
claim1, claim2, claim3 = tx1[0].outputs[0], tx2[0].outputs[0], tx3[0].outputs[0]
# block and filter channels
tx0 = self.get_channel('Blocking Channel', COIN, '@block')
tx1 = self.get_channel('Filtering Channel', COIN, '@filter')
blocking_channel = tx0[0].outputs[0]
filtering_channel = tx1[0].outputs[0]
self.sql.blocking_channel_hashes.add(blocking_channel.claim_hash)
self.sql.filtering_channel_hashes.add(filtering_channel.claim_hash)
self.advance(2, [tx0, tx1])
self.assertEqual({}, dict(self.sql.blocked_streams))
self.assertEqual({}, dict(self.sql.blocked_channels))
self.assertEqual({}, dict(self.sql.filtered_streams))
self.assertEqual({}, dict(self.sql.filtered_channels))
# nothing blocked
results, _ = reader.resolve([
claim1.claim_name, claim2.claim_name,
claim3.claim_name, regular_channel.claim_name
])
self.assertEqual(claim1.claim_hash, results[0]['claim_hash'])
self.assertEqual(claim2.claim_hash, results[1]['claim_hash'])
self.assertEqual(claim3.claim_hash, results[2]['claim_hash'])
self.assertEqual(regular_channel.claim_hash, results[3]['claim_hash'])
# nothing filtered
results, censor = censored_search()
self.assertEqual(6, len(results))
self.assertEqual(0, censor.total)
self.assertEqual({}, censor.censored)
# block claim reposted to blocking channel, also gets filtered
repost_tx1 = self.get_repost(claim1.claim_id, COIN, blocking_channel)
repost1 = repost_tx1[0].outputs[0]
self.advance(3, [repost_tx1])
self.assertEqual(
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
dict(self.sql.blocked_streams)
)
self.assertEqual({}, dict(self.sql.blocked_channels))
self.assertEqual(
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
dict(self.sql.filtered_streams)
)
self.assertEqual({}, dict(self.sql.filtered_channels))
# claim is blocked from results by direct repost
results, censor = censored_search(text='Claim')
self.assertEqual(2, len(results))
self.assertEqual(claim2.claim_hash, results[0]['claim_hash'])
self.assertEqual(claim3.claim_hash, results[1]['claim_hash'])
self.assertEqual(1, censor.total)
self.assertEqual({blocking_channel.claim_hash: 1}, censor.censored)
results, _ = reader.resolve([claim1.claim_name])
self.assertEqual(
f"Resolve of 'claim1' was censored by channel with claim id '{blocking_channel.claim_id}'.",
results[0].args[0]
)
results, _ = reader.resolve([
claim2.claim_name, regular_channel.claim_name # claim2 and channel still resolved
])
self.assertEqual(claim2.claim_hash, results[0]['claim_hash'])
self.assertEqual(regular_channel.claim_hash, results[1]['claim_hash'])
# block claim indirectly by blocking its parent channel
repost_tx2 = self.get_repost(regular_channel.claim_id, COIN, blocking_channel)
repost2 = repost_tx2[0].outputs[0]
self.advance(4, [repost_tx2])
self.assertEqual(
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
dict(self.sql.blocked_streams)
)
self.assertEqual(
{repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
dict(self.sql.blocked_channels)
)
self.assertEqual(
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
dict(self.sql.filtered_streams)
)
self.assertEqual(
{repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
dict(self.sql.filtered_channels)
)
# claim in blocked channel is filtered from search and can't resolve
results, censor = censored_search(text='Claim')
self.assertEqual(1, len(results))
self.assertEqual(claim3.claim_hash, results[0]['claim_hash'])
self.assertEqual(2, censor.total)
self.assertEqual({blocking_channel.claim_hash: 2}, censor.censored)
results, _ = reader.resolve([
claim2.claim_name, regular_channel.claim_name # claim2 and channel don't resolve
])
self.assertEqual(
f"Resolve of 'claim2' was censored by channel with claim id '{blocking_channel.claim_id}'.",
results[0].args[0]
)
self.assertEqual(
f"Resolve of '@channel1' was censored by channel with claim id '{blocking_channel.claim_id}'.",
results[1].args[0]
)
results, _ = reader.resolve([claim3.claim_name]) # claim3 still resolved
self.assertEqual(claim3.claim_hash, results[0]['claim_hash'])
# filtered claim is only filtered and not blocked
repost_tx3 = self.get_repost(claim3.claim_id, COIN, filtering_channel)
repost3 = repost_tx3[0].outputs[0]
self.advance(5, [repost_tx3])
self.assertEqual(
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
dict(self.sql.blocked_streams)
)
self.assertEqual(
{repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
dict(self.sql.blocked_channels)
)
self.assertEqual(
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash,
repost3.claim.repost.reference.claim_hash: filtering_channel.claim_hash},
dict(self.sql.filtered_streams)
)
self.assertEqual(
{repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
dict(self.sql.filtered_channels)
)
# filtered claim doesn't return in search but is resolveable
results, censor = censored_search(text='Claim')
self.assertEqual(0, len(results))
self.assertEqual(3, censor.total)
self.assertEqual({blocking_channel.claim_hash: 2, filtering_channel.claim_hash: 1}, censor.censored)
results, _ = reader.resolve([claim3.claim_name]) # claim3 still resolved
self.assertEqual(claim3.claim_hash, results[0]['claim_hash'])
# abandon unblocks content
self.advance(6, [
self.get_abandon(repost_tx1),
self.get_abandon(repost_tx2),
self.get_abandon(repost_tx3)
])
self.assertEqual({}, dict(self.sql.blocked_streams))
self.assertEqual({}, dict(self.sql.blocked_channels))
self.assertEqual({}, dict(self.sql.filtered_streams))
self.assertEqual({}, dict(self.sql.filtered_channels))
results, censor = censored_search(text='Claim')
self.assertEqual(3, len(results))
self.assertEqual(0, censor.total)
results, censor = censored_search()
self.assertEqual(6, len(results))
self.assertEqual(0, censor.total)
results, _ = reader.resolve([
claim1.claim_name, claim2.claim_name,
claim3.claim_name, regular_channel.claim_name
])
self.assertEqual(claim1.claim_hash, results[0]['claim_hash'])
self.assertEqual(claim2.claim_hash, results[1]['claim_hash'])
self.assertEqual(claim3.claim_hash, results[2]['claim_hash'])
self.assertEqual(regular_channel.claim_hash, results[3]['claim_hash'])
def test_pagination(self):
one, two, three, four, five, six, seven, filter_channel = self.advance(1, [
self.get_stream('One', COIN),
self.get_stream('Two', COIN),
self.get_stream('Three', COIN),
self.get_stream('Four', COIN),
self.get_stream('Five', COIN),
self.get_stream('Six', COIN),
self.get_stream('Seven', COIN),
self.get_channel('Filtering Channel', COIN, '@filter'),
])
self.sql.filtering_channel_hashes.add(filter_channel.claim_hash)
# nothing filtered
results, censor = censored_search(order_by='^height', offset=1, limit=3)
self.assertEqual(3, len(results))
self.assertEqual(
[two.claim_hash, three.claim_hash, four.claim_hash],
[r['claim_hash'] for r in results]
)
self.assertEqual(0, censor.total)
# content filtered
repost1, repost2 = self.advance(2, [
self.get_repost(one.claim_id, COIN, filter_channel),
self.get_repost(two.claim_id, COIN, filter_channel),
])
results, censor = censored_search(order_by='^height', offset=1, limit=3)
self.assertEqual(3, len(results))
self.assertEqual(
[four.claim_hash, five.claim_hash, six.claim_hash],
[r['claim_hash'] for r in results]
)
self.assertEqual(2, censor.total)
self.assertEqual({filter_channel.claim_hash: 2}, censor.censored)