include reason from the repost description in blocking/filtering errors

This commit is contained in:
Jack Robison 2022-08-13 14:35:48 -04:00
parent 93850d72eb
commit db3294e6e0
4 changed files with 54 additions and 22 deletions

View file

@ -693,23 +693,43 @@ class SecondaryDB:
channel_tx_hash = claim_tx_hashes[channel_txo.tx_num]
channel_tx_position = channel_txo.position
channel_height = bisect_right(self.tx_counts, channel_txo.tx_num)
if apply_blocking:
blocker_hash = self.blocked_streams.get(touched) or self.blocked_streams.get(
blocker = self.blocked_streams.get(touched) or self.blocked_streams.get(
reposted_claim_hash) or self.blocked_channels.get(touched) or self.blocked_channels.get(
reposted_channel_hash) or self.blocked_channels.get(channel_hash)
if blocker_hash:
reason_row = self._fs_get_claim_by_hash(blocker_hash)
return ResolveCensoredError(f'lbry://{canonical_url}', blocker_hash.hex(), censor_row=reason_row)
if blocker:
blocker_channel_hash, blocker_repost_hash = blocker
blocker_channel = self._fs_get_claim_by_hash(blocker_channel_hash)
blocker_claim = self._fs_get_claim_by_hash(blocker_repost_hash)
censored_url = f'lbry://{canonical_url}'
censoring_url = f'lbry://{blocker_channel.name}#{blocker_channel_hash.hex()[:10]}/{blocker_claim.name}#{blocker_repost_hash.hex()[:10]}'
reason = self.get_claim_metadata(blocker_claim.tx_hash, blocker_claim.position)
if reason:
reason = reason.repost.description
else:
reason = ''
return ResolveCensoredError(
censored_url, censoring_url, blocker_channel_hash.hex(), reason, blocker_channel
)
if apply_filtering:
filter_hash = self.filtered_streams.get(touched) or self.filtered_streams.get(
filter_info = self.filtered_streams.get(touched) or self.filtered_streams.get(
reposted_claim_hash) or self.filtered_channels.get(touched) or self.filtered_channels.get(
reposted_channel_hash) or self.filtered_channels.get(channel_hash)
if filter_hash:
reason_row = self._fs_get_claim_by_hash(filter_hash)
return ResolveCensoredError(f'lbry://{canonical_url}', filter_hash.hex(), censor_row=reason_row)
if filter_info:
filter_channel_hash, filter_repost_hash = filter_info
filter_channel = self._fs_get_claim_by_hash(filter_channel_hash)
filter_claim = self._fs_get_claim_by_hash(filter_repost_hash)
censored_url = f'lbry://{canonical_url}'
censoring_url = f'lbry://{filter_channel.name}#{filter_channel_hash.hex()[:10]}/{filter_claim.name}#{filter_repost_hash.hex()[:10]}'
reason = self.get_claim_metadata(filter_claim.tx_hash, filter_claim.position)
if reason:
reason = reason.repost.description
else:
reason = ''
return ResolveCensoredError(
censored_url, censoring_url, filter_channel_hash.hex(), reason, filter_channel
)
return ResolveResult(
claim_txo.name, normalized_name, touched, claim_txo.tx_num, claim_txo.position, tx_hash, height,
@ -793,9 +813,9 @@ class SecondaryDB:
txo = self.get_claim_txo(repost)
if txo:
if txo.normalized_name.startswith('@'):
channels[repost] = reposter_channel_hash
channels[repost] = reposter_channel_hash, stream.claim_hash
else:
streams[repost] = reposter_channel_hash
streams[repost] = reposter_channel_hash, stream.claim_hash
return streams, channels
def get_channel_for_claim(self, claim_hash, tx_num, position) -> Optional[bytes]:

View file

@ -112,12 +112,18 @@ class ElasticSyncDB(SecondaryDB):
claim_languages = [lang.language or 'none' for lang in meta.languages] or ['none']
tags = list(set(claim_tags).union(set(reposted_tags)))
languages = list(set(claim_languages).union(set(reposted_languages)))
blocking_channel = None
blocked_hash = self.blocked_streams.get(claim_hash) or self.blocked_streams.get(
reposted_claim_hash) or self.blocked_channels.get(claim_hash) or self.blocked_channels.get(
reposted_claim_hash) or self.blocked_channels.get(claim.channel_hash)
if blocked_hash:
blocking_channel, blocked_hash = blocked_hash
filtered_channel = None
filtered_hash = self.filtered_streams.get(claim_hash) or self.filtered_streams.get(
reposted_claim_hash) or self.filtered_channels.get(claim_hash) or self.filtered_channels.get(
reposted_claim_hash) or self.filtered_channels.get(claim.channel_hash)
if filtered_hash:
filtered_channel, filtered_hash = filtered_hash
value = {
'claim_id': claim_hash.hex(),
'claim_name': claim.name,
@ -166,7 +172,8 @@ class ElasticSyncDB(SecondaryDB):
'tags': tags,
'languages': languages,
'censor_type': Censor.RESOLVE if blocked_hash else Censor.SEARCH if filtered_hash else Censor.NOT_CENSORED,
'censoring_channel_id': (blocked_hash or filtered_hash or b'').hex() or None,
'censoring_channel_id': (blocking_channel or filtered_channel or b'').hex() or None,
'censoring_claim_id': (blocked_hash or filtered_hash or b'').hex() or None,
'claims_in_channel': None if not metadata.is_channel else self.get_claims_in_channel_count(claim_hash),
'reposted_tx_id': None if not claim.reposted_tx_hash else claim.reposted_tx_hash[::-1].hex(),
'reposted_tx_position': claim.reposted_tx_position,

View file

@ -154,27 +154,28 @@ class ElasticSyncService(BlockchainReaderService):
return update
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
only_channels = lambda x: {k: chan for k, (chan, repost) in x.items()}
if filtered_streams:
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4)
self.index, body=self.update_filter_query(Censor.SEARCH, only_channels(filtered_streams)), slices=4)
await self.sync_client.indices.refresh(self.index)
if filtered_channels:
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4)
self.index, body=self.update_filter_query(Censor.SEARCH, only_channels(filtered_channels)), slices=4)
await self.sync_client.indices.refresh(self.index)
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4)
self.index, body=self.update_filter_query(Censor.SEARCH, only_channels(filtered_channels), True), slices=4)
await self.sync_client.indices.refresh(self.index)
if blocked_streams:
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4)
self.index, body=self.update_filter_query(Censor.RESOLVE, only_channels(blocked_streams)), slices=4)
await self.sync_client.indices.refresh(self.index)
if blocked_channels:
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4)
self.index, body=self.update_filter_query(Censor.RESOLVE, only_channels(blocked_channels)), slices=4)
await self.sync_client.indices.refresh(self.index)
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4)
self.index, body=self.update_filter_query(Censor.RESOLVE, only_channels(blocked_channels), True), slices=4)
await self.sync_client.indices.refresh(self.index)
@staticmethod

View file

@ -1,4 +1,7 @@
import typing
from .base import BaseError, claim_id
if typing.TYPE_CHECKING:
from hub.db.common import ResolveResult
class UserInputError(BaseError):
@ -262,11 +265,12 @@ class ResolveTimeoutError(WalletError):
class ResolveCensoredError(WalletError):
def __init__(self, url, censor_id, censor_row):
self.url = url
def __init__(self, censored_url: str, censoring_url: str, censor_id: str, reason: str, censor_row: 'ResolveResult'):
self.url = censored_url
self.censor_id = censor_id
self.censor_row = censor_row
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.")
super().__init__(f"Resolve of '{censored_url}' was censored by {censoring_url}'. Reason given: {reason}")
class KeyFeeAboveMaxAllowedError(WalletError):