From ef89c2e47a1e8330e2f94d3472a13340854c3c75 Mon Sep 17 00:00:00 2001 From: Lex Berezhny Date: Wed, 8 Sep 2021 10:55:21 -0400 Subject: [PATCH 1/7] use databse to track blob disk space use and preserve own blobs --- lbry/blob/blob_file.py | 10 ++++- lbry/blob/blob_info.py | 8 +++- lbry/blob/blob_manager.py | 8 +++- lbry/blob/disk_space_manager.py | 45 +++++++------------ lbry/extras/daemon/components.py | 16 ++++--- lbry/extras/daemon/daemon.py | 2 +- lbry/extras/daemon/storage.py | 37 +++++++++++---- .../datanetwork/test_file_commands.py | 9 ++-- 8 files changed, 84 insertions(+), 51 deletions(-) diff --git a/lbry/blob/blob_file.py b/lbry/blob/blob_file.py index d702d0a06..cda667561 100644 --- a/lbry/blob/blob_file.py +++ b/lbry/blob/blob_file.py @@ -1,5 +1,6 @@ import os import re +import time import asyncio import binascii import logging @@ -70,12 +71,15 @@ class AbstractBlob: 'writers', 'verified', 'writing', - 'readers' + 'readers', + 'is_mine', + 'added_on', ] def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None, blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None, - blob_directory: typing.Optional[str] = None): + blob_directory: typing.Optional[str] = None, is_mine: bool = False, + added_on: typing.Optional[int] = None): self.loop = loop self.blob_hash = blob_hash self.length = length @@ -85,6 +89,8 @@ class AbstractBlob: self.verified: asyncio.Event = asyncio.Event(loop=self.loop) self.writing: asyncio.Event = asyncio.Event(loop=self.loop) self.readers: typing.List[typing.BinaryIO] = [] + self.is_mine = is_mine + self.added_on = added_on or time.time() if not is_valid_blobhash(blob_hash): raise InvalidBlobHashError(blob_hash) diff --git a/lbry/blob/blob_info.py b/lbry/blob/blob_info.py index febfdcb65..408b6047b 100644 --- a/lbry/blob/blob_info.py +++ b/lbry/blob/blob_info.py @@ -7,13 +7,19 @@ class BlobInfo: 'blob_num', 'length', 'iv', + 'added_on', + 'is_mine' ] - def __init__(self, blob_num: int, length: int, iv: str, blob_hash: typing.Optional[str] = None): + def __init__( + self, blob_num: int, length: int, iv: str, + blob_hash: typing.Optional[str] = None, added_on=None, is_mine=None): self.blob_hash = blob_hash self.blob_num = blob_num self.length = length self.iv = iv + self.added_on = added_on + self.is_mine = is_mine def as_dict(self) -> typing.Dict: d = { diff --git a/lbry/blob/blob_manager.py b/lbry/blob/blob_manager.py index 4a7e54740..c99a017b9 100644 --- a/lbry/blob/blob_manager.py +++ b/lbry/blob/blob_manager.py @@ -105,9 +105,13 @@ class BlobManager: if isinstance(blob, BlobFile): if blob.blob_hash not in self.completed_blob_hashes: self.completed_blob_hashes.add(blob.blob_hash) - return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=True)) + return self.loop.create_task(self.storage.add_blobs( + (blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=True) + ) else: - return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=False)) + return self.loop.create_task(self.storage.add_blobs( + (blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False) + ) def check_completed_blobs(self, blob_hashes: typing.List[str]) -> typing.List[str]: """Returns of the blobhashes_to_check, which are valid""" diff --git a/lbry/blob/disk_space_manager.py b/lbry/blob/disk_space_manager.py index f42a43be4..2a19180dc 100644 --- a/lbry/blob/disk_space_manager.py +++ b/lbry/blob/disk_space_manager.py @@ -7,51 +7,38 @@ log = logging.getLogger(__name__) class DiskSpaceManager: - def __init__(self, config, cleaning_interval=30 * 60): + def __init__(self, config, db, blob_manager, cleaning_interval=30 * 60): self.config = config + self.db = db + self.blob_manager = blob_manager self.cleaning_interval = cleaning_interval self.running = False self.task = None - @property - def space_used_bytes(self): - used = 0 - data_dir = os.path.join(self.config.data_dir, 'blobfiles') - for item in os.scandir(data_dir): - if item.is_file: - used += item.stat().st_size - return used + async def get_space_used_bytes(self): + return await self.db.get_stored_blob_disk_usage() - @property - def space_used_mb(self): - return int(self.space_used_bytes/1024.0/1024.0) + async def get_space_used_mb(self): + return int(await self.get_space_used_bytes()/1024.0/1024.0) - def clean(self): + async def clean(self): if not self.config.blob_storage_limit: return 0 - used = 0 - files = [] - data_dir = os.path.join(self.config.data_dir, 'blobfiles') - for file in os.scandir(data_dir): - if file.is_file: - file_stats = file.stat() - used += file_stats.st_size - files.append((file_stats.st_mtime, file_stats.st_size, file.path)) - files.sort() - available = (self.config.blob_storage_limit*1024*1024) - used - cleaned = 0 - for _, file_size, file in files: + delete = [] + available = (self.config.blob_storage_limit*1024*1024) - await self.db.get_stored_blob_disk_usage() + for blob_hash, file_size in await self.db.get_stored_blobs(is_mine=False): available += file_size if available > 0: break - os.remove(file) - cleaned += 1 - return cleaned + delete.append(blob_hash) + if delete: + await self.blob_manager.delete_blobs(delete, delete_from_db=True) + return len(delete) async def cleaning_loop(self): while self.running: await asyncio.sleep(self.cleaning_interval) - await asyncio.get_event_loop().run_in_executor(None, self.clean) + await self.clean() async def start(self): self.running = True diff --git a/lbry/extras/daemon/components.py b/lbry/extras/daemon/components.py index a383b9ea5..18d846375 100644 --- a/lbry/extras/daemon/components.py +++ b/lbry/extras/daemon/components.py @@ -379,22 +379,28 @@ class FileManagerComponent(Component): class DiskSpaceComponent(Component): component_name = DISK_SPACE_COMPONENT + depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT] def __init__(self, component_manager): super().__init__(component_manager) - self.disk_space_manager = DiskSpaceManager(self.conf) + self.disk_space_manager: typing.Optional[DiskSpaceManager] = None @property def component(self) -> typing.Optional[DiskSpaceManager]: return self.disk_space_manager async def get_status(self): - return { - 'space_used': str(self.disk_space_manager.space_used_mb), - 'running': self.disk_space_manager.running, - } + if self.disk_space_manager: + return { + 'space_used': str(await self.disk_space_manager.get_space_used_mb()), + 'running': self.disk_space_manager.running, + } + return {'space_used': '0', 'running': False} async def start(self): + db = self.component_manager.get_component(DATABASE_COMPONENT) + blob_manager = self.component_manager.get_component(BLOB_COMPONENT) + self.disk_space_manager = DiskSpaceManager(self.conf, db, blob_manager) await self.disk_space_manager.start() async def stop(self): diff --git a/lbry/extras/daemon/daemon.py b/lbry/extras/daemon/daemon.py index 15374fa23..d0220cff6 100644 --- a/lbry/extras/daemon/daemon.py +++ b/lbry/extras/daemon/daemon.py @@ -5041,7 +5041,7 @@ class Daemon(metaclass=JSONRPCServerType): Returns: (bool) true if successful """ - return self.disk_space_manager.clean() + return await self.disk_space_manager.clean() @requires(FILE_MANAGER_COMPONENT) async def jsonrpc_file_reflect(self, **kwargs): diff --git a/lbry/extras/daemon/storage.py b/lbry/extras/daemon/storage.py index 38e6caf68..83dc3b0eb 100644 --- a/lbry/extras/daemon/storage.py +++ b/lbry/extras/daemon/storage.py @@ -170,8 +170,8 @@ def get_all_lbry_files(transaction: sqlite3.Connection) -> typing.List[typing.Di def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descriptor: 'StreamDescriptor'): # add all blobs, except the last one, which is empty transaction.executemany( - "insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)", - ((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0) + "insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?, ?, ?)", + ((blob.blob_hash, blob.length, 0, 0, "pending", 0, 0, blob.added_on, blob.is_mine) for blob in (descriptor.blobs[:-1] if len(descriptor.blobs) > 1 else descriptor.blobs) + [sd_blob]) ).fetchall() # associate the blobs to the stream @@ -242,7 +242,9 @@ class SQLiteStorage(SQLiteMixin): should_announce integer not null default 0, status text not null, last_announced_time integer, - single_announce integer + single_announce integer, + added_on integer not null, + is_mine integer not null default 0 ); create table if not exists stream ( @@ -356,19 +358,19 @@ class SQLiteStorage(SQLiteMixin): # # # # # # # # # blob functions # # # # # # # # # - async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int], finished=False): + async def add_blobs(self, *blob_hashes_and_lengths: typing.Tuple[str, int, int, int], finished=False): def _add_blobs(transaction: sqlite3.Connection): transaction.executemany( - "insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?)", + "insert or ignore into blob values (?, ?, ?, ?, ?, ?, ?, ?, ?)", ( - (blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0) - for blob_hash, length in blob_hashes_and_lengths + (blob_hash, length, 0, 0, "pending" if not finished else "finished", 0, 0, added_on, is_mine) + for blob_hash, length, added_on, is_mine in blob_hashes_and_lengths ) ).fetchall() if finished: transaction.executemany( "update blob set status='finished' where blob.blob_hash=?", ( - (blob_hash, ) for blob_hash, _ in blob_hashes_and_lengths + (blob_hash, ) for blob_hash, _, _, _ in blob_hashes_and_lengths ) ).fetchall() return await self.db.run(_add_blobs) @@ -435,6 +437,25 @@ class SQLiteStorage(SQLiteMixin): def get_all_blob_hashes(self): return self.run_and_return_list("select blob_hash from blob") + async def get_stored_blobs(self, is_mine): + return await self.db.execute_fetchall( + "select blob_hash, blob_length from blob where is_mine=? order by added_on", (is_mine,) + ) + + async def get_stored_blob_disk_usage(self, is_mine=None): + if is_mine is None: + sql, args = "select coalesce(sum(blob_length), 0) from blob", () + else: + sql, args = "select coalesce(sum(blob_length), 0) from blob where is_mine=?", (is_mine,) + return (await self.db.execute_fetchone(sql, args))[0] + + async def update_blob_ownership(self, stream_hash, is_mine: bool): + await self.db.execute_fetchall( + "update blob set is_mine = ? where blob_hash in (" + " select blob_hash from stream_blob where stream_hash = ?" + ")", (1 if is_mine else 0, stream_hash) + ) + def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]: def _sync_blobs(transaction: sqlite3.Connection) -> typing.Set[str]: finished_blob_hashes = tuple( diff --git a/tests/integration/datanetwork/test_file_commands.py b/tests/integration/datanetwork/test_file_commands.py index dccaca324..70f9c4b2c 100644 --- a/tests/integration/datanetwork/test_file_commands.py +++ b/tests/integration/datanetwork/test_file_commands.py @@ -522,9 +522,12 @@ class DiskSpaceManagement(CommandTestCase): self.assertEqual(True, status['disk_space']['running']) await self.stream_create('foo1', '0.01', data=('0' * 3 * 1024 * 1024).encode()) await self.stream_create('foo2', '0.01', data=('0' * 2 * 1024 * 1024).encode()) - self.assertEqual('5', (await self.status())['disk_space']['space_used']) + stream = await self.stream_create('foo3', '0.01', data=('0' * 2 * 1024 * 1024).encode()) + stream_hash = stream['outputs'][0]['value']['source']['hash'] + await self.daemon.storage.update_blob_ownership(stream_hash, False) + self.assertEqual('7', (await self.status())['disk_space']['space_used']) await self.blob_clean() - self.assertEqual('5', (await self.status())['disk_space']['space_used']) + self.assertEqual('7', (await self.status())['disk_space']['space_used']) self.daemon.conf.blob_storage_limit = 3 await self.blob_clean() - self.assertEqual('3', (await self.status())['disk_space']['space_used']) + self.assertEqual('2', (await self.status())['disk_space']['space_used']) From 60836d8523726aa6737f38a5fbfabf52b3a02e5b Mon Sep 17 00:00:00 2001 From: Lex Berezhny Date: Fri, 10 Sep 2021 10:53:52 -0400 Subject: [PATCH 2/7] db migration and other fixes --- lbry/blob/blob_file.py | 54 ++++++++++--------- lbry/blob/disk_space_manager.py | 8 +-- lbry/extras/daemon/components.py | 2 +- lbry/extras/daemon/migrator/dbmigrator.py | 2 + lbry/extras/daemon/migrator/migrate14to15.py | 16 ++++++ lbry/extras/daemon/storage.py | 16 +++--- lbry/stream/descriptor.py | 8 ++- lbry/testcase.py | 3 ++ .../datanetwork/test_file_commands.py | 45 ++++++++++++---- 9 files changed, 109 insertions(+), 45 deletions(-) create mode 100644 lbry/extras/daemon/migrator/migrate14to15.py diff --git a/lbry/blob/blob_file.py b/lbry/blob/blob_file.py index cda667561..42ed3a856 100644 --- a/lbry/blob/blob_file.py +++ b/lbry/blob/blob_file.py @@ -72,14 +72,15 @@ class AbstractBlob: 'verified', 'writing', 'readers', - 'is_mine', 'added_on', + 'is_mine', ] - def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None, - blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None, - blob_directory: typing.Optional[str] = None, is_mine: bool = False, - added_on: typing.Optional[int] = None): + def __init__( + self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None, + blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None, + blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False, + ): self.loop = loop self.blob_hash = blob_hash self.length = length @@ -89,8 +90,8 @@ class AbstractBlob: self.verified: asyncio.Event = asyncio.Event(loop=self.loop) self.writing: asyncio.Event = asyncio.Event(loop=self.loop) self.readers: typing.List[typing.BinaryIO] = [] - self.is_mine = is_mine self.added_on = added_on or time.time() + self.is_mine = is_mine if not is_valid_blobhash(blob_hash): raise InvalidBlobHashError(blob_hash) @@ -186,20 +187,21 @@ class AbstractBlob: @classmethod async def create_from_unencrypted( - cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes, - unencrypted: bytes, blob_num: int, - blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None) -> BlobInfo: + cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes, + unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool, + blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None, + ) -> BlobInfo: """ Create an encrypted BlobFile from plaintext bytes """ blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted) length = len(blob_bytes) - blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir) + blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir, added_on, is_mine) writer = blob.get_blob_writer() writer.write(blob_bytes) await blob.verified.wait() - return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash) + return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash, added_on, is_mine) def save_verified_blob(self, verified_bytes: bytes): if self.verified.is_set(): @@ -254,11 +256,13 @@ class BlobBuffer(AbstractBlob): """ An in-memory only blob """ - def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None, - blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None, - blob_directory: typing.Optional[str] = None): + def __init__( + self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None, + blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None, + blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False + ): self._verified_bytes: typing.Optional[BytesIO] = None - super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory) + super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine) @contextlib.contextmanager def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]: @@ -295,10 +299,12 @@ class BlobFile(AbstractBlob): """ A blob existing on the local file system """ - def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None, - blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None, - blob_directory: typing.Optional[str] = None): - super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory) + def __init__( + self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None, + blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None, + blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False + ): + super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine) if not blob_directory or not os.path.isdir(blob_directory): raise OSError(f"invalid blob directory '{blob_directory}'") self.file_path = os.path.join(self.blob_directory, self.blob_hash) @@ -349,12 +355,12 @@ class BlobFile(AbstractBlob): @classmethod async def create_from_unencrypted( - cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes, - unencrypted: bytes, blob_num: int, - blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], - asyncio.Task]] = None) -> BlobInfo: + cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes, + unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool, + blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None + ) -> BlobInfo: if not blob_dir or not os.path.isdir(blob_dir): raise OSError(f"cannot create blob in directory: '{blob_dir}'") return await super().create_from_unencrypted( - loop, blob_dir, key, iv, unencrypted, blob_num, blob_completed_callback + loop, blob_dir, key, iv, unencrypted, blob_num, added_on, is_mine, blob_completed_callback ) diff --git a/lbry/blob/disk_space_manager.py b/lbry/blob/disk_space_manager.py index 2a19180dc..4ed445129 100644 --- a/lbry/blob/disk_space_manager.py +++ b/lbry/blob/disk_space_manager.py @@ -25,12 +25,14 @@ class DiskSpaceManager: if not self.config.blob_storage_limit: return 0 delete = [] - available = (self.config.blob_storage_limit*1024*1024) - await self.db.get_stored_blob_disk_usage() - for blob_hash, file_size in await self.db.get_stored_blobs(is_mine=False): + available = (self.config.blob_storage_limit*1024*1024) - await self.get_space_used_bytes() + if available > 0: + return 0 + for blob_hash, file_size, added_on in await self.db.get_stored_blobs(is_mine=False): + delete.append(blob_hash) available += file_size if available > 0: break - delete.append(blob_hash) if delete: await self.blob_manager.delete_blobs(delete, delete_from_db=True) return len(delete) diff --git a/lbry/extras/daemon/components.py b/lbry/extras/daemon/components.py index 18d846375..e38d4835f 100644 --- a/lbry/extras/daemon/components.py +++ b/lbry/extras/daemon/components.py @@ -61,7 +61,7 @@ class DatabaseComponent(Component): @staticmethod def get_current_db_revision(): - return 14 + return 15 @property def revision_filename(self): diff --git a/lbry/extras/daemon/migrator/dbmigrator.py b/lbry/extras/daemon/migrator/dbmigrator.py index 726cc1974..a2b1211e7 100644 --- a/lbry/extras/daemon/migrator/dbmigrator.py +++ b/lbry/extras/daemon/migrator/dbmigrator.py @@ -35,6 +35,8 @@ def migrate_db(conf, start, end): from .migrate12to13 import do_migration elif current == 13: from .migrate13to14 import do_migration + elif current == 14: + from .migrate14to15 import do_migration else: raise Exception(f"DB migration of version {current} to {current+1} is not available") try: diff --git a/lbry/extras/daemon/migrator/migrate14to15.py b/lbry/extras/daemon/migrator/migrate14to15.py new file mode 100644 index 000000000..a0b7257e3 --- /dev/null +++ b/lbry/extras/daemon/migrator/migrate14to15.py @@ -0,0 +1,16 @@ +import os +import sqlite3 + + +def do_migration(conf): + db_path = os.path.join(conf.data_dir, "lbrynet.sqlite") + connection = sqlite3.connect(db_path) + cursor = connection.cursor() + + cursor.executescript(""" + alter table blob add column added_on integer not null default 0; + alter table blob add column is_mine integer not null default 0; + """) + + connection.commit() + connection.close() diff --git a/lbry/extras/daemon/storage.py b/lbry/extras/daemon/storage.py index 83dc3b0eb..4b51074ec 100644 --- a/lbry/extras/daemon/storage.py +++ b/lbry/extras/daemon/storage.py @@ -437,23 +437,27 @@ class SQLiteStorage(SQLiteMixin): def get_all_blob_hashes(self): return self.run_and_return_list("select blob_hash from blob") - async def get_stored_blobs(self, is_mine): + async def get_stored_blobs(self, is_mine: bool): + is_mine = 1 if is_mine else 0 return await self.db.execute_fetchall( - "select blob_hash, blob_length from blob where is_mine=? order by added_on", (is_mine,) + "select blob_hash, blob_length, added_on from blob where is_mine=? order by added_on asc", + (is_mine,) ) - async def get_stored_blob_disk_usage(self, is_mine=None): + async def get_stored_blob_disk_usage(self, is_mine: Optional[bool] = None): if is_mine is None: sql, args = "select coalesce(sum(blob_length), 0) from blob", () else: + is_mine = 1 if is_mine else 0 sql, args = "select coalesce(sum(blob_length), 0) from blob where is_mine=?", (is_mine,) return (await self.db.execute_fetchone(sql, args))[0] - async def update_blob_ownership(self, stream_hash, is_mine: bool): + async def update_blob_ownership(self, sd_hash, is_mine: bool): + is_mine = 1 if is_mine else 0 await self.db.execute_fetchall( "update blob set is_mine = ? where blob_hash in (" - " select blob_hash from stream_blob where stream_hash = ?" - ")", (1 if is_mine else 0, stream_hash) + " select blob_hash from blob natural join stream_blob natural join stream where sd_hash = ?" + ")", (is_mine, sd_hash) ) def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]: diff --git a/lbry/stream/descriptor.py b/lbry/stream/descriptor.py index fadabf29c..bd0cd2d97 100644 --- a/lbry/stream/descriptor.py +++ b/lbry/stream/descriptor.py @@ -4,6 +4,7 @@ import binascii import logging import typing import asyncio +import time import re from collections import OrderedDict from cryptography.hazmat.primitives.ciphers.algorithms import AES @@ -252,14 +253,17 @@ class StreamDescriptor: iv_generator = iv_generator or random_iv_generator() key = key or os.urandom(AES.block_size // 8) blob_num = -1 + added_on = time.time() async for blob_bytes in file_reader(file_path): blob_num += 1 blob_info = await BlobFile.create_from_unencrypted( - loop, blob_dir, key, next(iv_generator), blob_bytes, blob_num, blob_completed_callback + loop, blob_dir, key, next(iv_generator), blob_bytes, blob_num, added_on, True, blob_completed_callback ) blobs.append(blob_info) blobs.append( - BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode())) # add the stream terminator + # add the stream terminator + BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode(), None, added_on, True) + ) file_name = os.path.basename(file_path) suggested_file_name = sanitize_file_name(file_name) descriptor = cls( diff --git a/lbry/testcase.py b/lbry/testcase.py index 8e022399b..70fe84159 100644 --- a/lbry/testcase.py +++ b/lbry/testcase.py @@ -649,6 +649,9 @@ class CommandTestCase(IntegrationTestCase): async def transaction_list(self, *args, **kwargs): return (await self.out(self.daemon.jsonrpc_transaction_list(*args, **kwargs)))['items'] + async def blob_list(self, *args, **kwargs): + return (await self.out(self.daemon.jsonrpc_blob_list(*args, **kwargs)))['items'] + @staticmethod def get_claim_id(tx): return tx['outputs'][0]['claim_id'] diff --git a/tests/integration/datanetwork/test_file_commands.py b/tests/integration/datanetwork/test_file_commands.py index 70f9c4b2c..994ad3506 100644 --- a/tests/integration/datanetwork/test_file_commands.py +++ b/tests/integration/datanetwork/test_file_commands.py @@ -515,19 +515,46 @@ class FileCommands(CommandTestCase): class DiskSpaceManagement(CommandTestCase): + async def get_referenced_blobs(self, tx): + sd_hash = tx['outputs'][0]['value']['source']['sd_hash'] + stream_hash = await self.daemon.storage.get_stream_hash_for_sd_hash(sd_hash) + return tx['outputs'][0]['value']['source']['sd_hash'], set(await self.blob_list( + stream_hash=stream_hash + )) + async def test_file_management(self): status = await self.status() self.assertIn('disk_space', status) self.assertEqual('0', status['disk_space']['space_used']) self.assertEqual(True, status['disk_space']['running']) - await self.stream_create('foo1', '0.01', data=('0' * 3 * 1024 * 1024).encode()) - await self.stream_create('foo2', '0.01', data=('0' * 2 * 1024 * 1024).encode()) - stream = await self.stream_create('foo3', '0.01', data=('0' * 2 * 1024 * 1024).encode()) - stream_hash = stream['outputs'][0]['value']['source']['hash'] - await self.daemon.storage.update_blob_ownership(stream_hash, False) - self.assertEqual('7', (await self.status())['disk_space']['space_used']) + sd_hash1, blobs1 = await self.get_referenced_blobs( + await self.stream_create('foo1', '0.01', data=('0' * 3 * 1024 * 1024).encode()) + ) + sd_hash2, blobs2 = await self.get_referenced_blobs( + await self.stream_create('foo2', '0.01', data=('0' * 2 * 1024 * 1024).encode()) + ) + sd_hash3, blobs3 = await self.get_referenced_blobs( + await self.stream_create('foo3', '0.01', data=('0' * 2 * 1024 * 1024).encode()) + ) + sd_hash4, blobs4 = await self.get_referenced_blobs( + await self.stream_create('foo4', '0.01', data=('0' * 2 * 1024 * 1024).encode()) + ) + + await self.daemon.storage.update_blob_ownership(sd_hash1, False) + await self.daemon.storage.update_blob_ownership(sd_hash3, False) + await self.daemon.storage.update_blob_ownership(sd_hash4, False) + + self.assertEqual('9', (await self.status())['disk_space']['space_used']) + self.assertEqual(blobs1 | blobs2 | blobs3 | blobs4, set(await self.blob_list())) + await self.blob_clean() - self.assertEqual('7', (await self.status())['disk_space']['space_used']) - self.daemon.conf.blob_storage_limit = 3 + + self.assertEqual('9', (await self.status())['disk_space']['space_used']) + self.assertEqual(blobs1 | blobs2 | blobs3 | blobs4, set(await self.blob_list())) + + self.daemon.conf.blob_storage_limit = 5 await self.blob_clean() - self.assertEqual('2', (await self.status())['disk_space']['space_used']) + + self.assertEqual('4', (await self.status())['disk_space']['space_used']) + blobs = set(await self.blob_list()) + self.assertEqual(blobs2 | blobs4, set(await self.blob_list())) From 7264b53e5fd4cff71b8b8336b6aa2740d11c2893 Mon Sep 17 00:00:00 2001 From: Lex Berezhny Date: Wed, 15 Sep 2021 10:37:08 -0400 Subject: [PATCH 3/7] during disk clean your own sd blob is now kept and file status of deleted files is set to stopped --- lbry/blob/blob_file.py | 2 +- lbry/blob/blob_manager.py | 12 ++++++------ lbry/blob/disk_space_manager.py | 1 + lbry/extras/daemon/storage.py | 6 +++++- lbry/stream/descriptor.py | 16 +++++++++++----- lbry/stream/stream_manager.py | 2 +- .../datanetwork/test_file_commands.py | 19 +++++++++++-------- 7 files changed, 36 insertions(+), 22 deletions(-) diff --git a/lbry/blob/blob_file.py b/lbry/blob/blob_file.py index 42ed3a856..8b7b9ee49 100644 --- a/lbry/blob/blob_file.py +++ b/lbry/blob/blob_file.py @@ -356,7 +356,7 @@ class BlobFile(AbstractBlob): @classmethod async def create_from_unencrypted( cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes, - unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool, + unencrypted: bytes, blob_num: int, added_on: float, is_mine: bool, blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None ) -> BlobInfo: if not blob_dir or not os.path.isdir(blob_dir): diff --git a/lbry/blob/blob_manager.py b/lbry/blob/blob_manager.py index c99a017b9..52441ecfb 100644 --- a/lbry/blob/blob_manager.py +++ b/lbry/blob/blob_manager.py @@ -36,30 +36,30 @@ class BlobManager: self.config.blob_lru_cache_size) self.connection_manager = ConnectionManager(loop) - def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None): + def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None, is_mine: bool = False): if self.config.save_blobs or ( is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))): return BlobFile( - self.loop, blob_hash, length, self.blob_completed, self.blob_dir + self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine ) return BlobBuffer( - self.loop, blob_hash, length, self.blob_completed, self.blob_dir + self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine ) - def get_blob(self, blob_hash, length: typing.Optional[int] = None): + def get_blob(self, blob_hash, length: typing.Optional[int] = None, is_mine: bool = False): if blob_hash in self.blobs: if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer): buffer = self.blobs.pop(blob_hash) if blob_hash in self.completed_blob_hashes: self.completed_blob_hashes.remove(blob_hash) - self.blobs[blob_hash] = self._get_blob(blob_hash, length) + self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine) if buffer.is_readable(): with buffer.reader_context() as reader: self.blobs[blob_hash].write_blob(reader.read()) if length and self.blobs[blob_hash].length is None: self.blobs[blob_hash].set_length(length) else: - self.blobs[blob_hash] = self._get_blob(blob_hash, length) + self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine) return self.blobs[blob_hash] def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool: diff --git a/lbry/blob/disk_space_manager.py b/lbry/blob/disk_space_manager.py index 4ed445129..7d00323a6 100644 --- a/lbry/blob/disk_space_manager.py +++ b/lbry/blob/disk_space_manager.py @@ -34,6 +34,7 @@ class DiskSpaceManager: if available > 0: break if delete: + await self.db.stop_all_files() await self.blob_manager.delete_blobs(delete, delete_from_db=True) return len(delete) diff --git a/lbry/extras/daemon/storage.py b/lbry/extras/daemon/storage.py index 4b51074ec..758c25970 100644 --- a/lbry/extras/daemon/storage.py +++ b/lbry/extras/daemon/storage.py @@ -457,7 +457,7 @@ class SQLiteStorage(SQLiteMixin): await self.db.execute_fetchall( "update blob set is_mine = ? where blob_hash in (" " select blob_hash from blob natural join stream_blob natural join stream where sd_hash = ?" - ")", (is_mine, sd_hash) + ") OR blob_hash = ?", (is_mine, sd_hash, sd_hash) ) def sync_missing_blobs(self, blob_files: typing.Set[str]) -> typing.Awaitable[typing.Set[str]]: @@ -595,6 +595,10 @@ class SQLiteStorage(SQLiteMixin): log.debug("update file status %s -> %s", stream_hash, new_status) return self.db.execute_fetchall("update file set status=? where stream_hash=?", (new_status, stream_hash)) + def stop_all_files(self): + log.debug("stopping all files") + return self.db.execute_fetchall("update file set status=?", ("stopped",)) + async def change_file_download_dir_and_file_name(self, stream_hash: str, download_dir: typing.Optional[str], file_name: typing.Optional[str]): if not file_name or not download_dir: diff --git a/lbry/stream/descriptor.py b/lbry/stream/descriptor.py index bd0cd2d97..b68184433 100644 --- a/lbry/stream/descriptor.py +++ b/lbry/stream/descriptor.py @@ -153,15 +153,19 @@ class StreamDescriptor: h.update(self.old_sort_json()) return h.hexdigest() - async def make_sd_blob(self, blob_file_obj: typing.Optional[AbstractBlob] = None, - old_sort: typing.Optional[bool] = False, - blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None): + async def make_sd_blob( + self, blob_file_obj: typing.Optional[AbstractBlob] = None, old_sort: typing.Optional[bool] = False, + blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None, + added_on: float = None, is_mine: bool = False + ): sd_hash = self.calculate_sd_hash() if not old_sort else self.calculate_old_sort_sd_hash() if not old_sort: sd_data = self.as_json() else: sd_data = self.old_sort_json() - sd_blob = blob_file_obj or BlobFile(self.loop, sd_hash, len(sd_data), blob_completed_callback, self.blob_dir) + sd_blob = blob_file_obj or BlobFile( + self.loop, sd_hash, len(sd_data), blob_completed_callback, self.blob_dir, added_on, is_mine + ) if blob_file_obj: blob_file_obj.set_length(len(sd_data)) if not sd_blob.get_is_verified(): @@ -269,7 +273,9 @@ class StreamDescriptor: descriptor = cls( loop, blob_dir, file_name, binascii.hexlify(key).decode(), suggested_file_name, blobs ) - sd_blob = await descriptor.make_sd_blob(old_sort=old_sort, blob_completed_callback=blob_completed_callback) + sd_blob = await descriptor.make_sd_blob( + old_sort=old_sort, blob_completed_callback=blob_completed_callback, added_on=added_on, is_mine=True + ) descriptor.sd_hash = sd_blob.blob_hash return descriptor diff --git a/lbry/stream/stream_manager.py b/lbry/stream/stream_manager.py index a9ce211e6..cce283473 100644 --- a/lbry/stream/stream_manager.py +++ b/lbry/stream/stream_manager.py @@ -236,7 +236,7 @@ class StreamManager(SourceManager): blob_completed_callback=self.blob_manager.blob_completed ) await self.storage.store_stream( - self.blob_manager.get_blob(descriptor.sd_hash), descriptor + self.blob_manager.get_blob(descriptor.sd_hash, is_mine=True), descriptor ) row_id = await self.storage.save_published_file( descriptor.stream_hash, os.path.basename(file_path), os.path.dirname(file_path), 0 diff --git a/tests/integration/datanetwork/test_file_commands.py b/tests/integration/datanetwork/test_file_commands.py index 994ad3506..e1e92aead 100644 --- a/tests/integration/datanetwork/test_file_commands.py +++ b/tests/integration/datanetwork/test_file_commands.py @@ -528,13 +528,13 @@ class DiskSpaceManagement(CommandTestCase): self.assertEqual('0', status['disk_space']['space_used']) self.assertEqual(True, status['disk_space']['running']) sd_hash1, blobs1 = await self.get_referenced_blobs( - await self.stream_create('foo1', '0.01', data=('0' * 3 * 1024 * 1024).encode()) + await self.stream_create('foo1', '0.01', data=('0' * 2 * 1024 * 1024).encode()) ) sd_hash2, blobs2 = await self.get_referenced_blobs( - await self.stream_create('foo2', '0.01', data=('0' * 2 * 1024 * 1024).encode()) + await self.stream_create('foo2', '0.01', data=('0' * 3 * 1024 * 1024).encode()) ) sd_hash3, blobs3 = await self.get_referenced_blobs( - await self.stream_create('foo3', '0.01', data=('0' * 2 * 1024 * 1024).encode()) + await self.stream_create('foo3', '0.01', data=('0' * 3 * 1024 * 1024).encode()) ) sd_hash4, blobs4 = await self.get_referenced_blobs( await self.stream_create('foo4', '0.01', data=('0' * 2 * 1024 * 1024).encode()) @@ -544,17 +544,20 @@ class DiskSpaceManagement(CommandTestCase): await self.daemon.storage.update_blob_ownership(sd_hash3, False) await self.daemon.storage.update_blob_ownership(sd_hash4, False) - self.assertEqual('9', (await self.status())['disk_space']['space_used']) + self.assertEqual('10', (await self.status())['disk_space']['space_used']) self.assertEqual(blobs1 | blobs2 | blobs3 | blobs4, set(await self.blob_list())) await self.blob_clean() - self.assertEqual('9', (await self.status())['disk_space']['space_used']) + self.assertEqual('10', (await self.status())['disk_space']['space_used']) self.assertEqual(blobs1 | blobs2 | blobs3 | blobs4, set(await self.blob_list())) - self.daemon.conf.blob_storage_limit = 5 + self.daemon.conf.blob_storage_limit = 6 await self.blob_clean() - self.assertEqual('4', (await self.status())['disk_space']['space_used']) + self.assertEqual('5', (await self.status())['disk_space']['space_used']) blobs = set(await self.blob_list()) - self.assertEqual(blobs2 | blobs4, set(await self.blob_list())) + self.assertFalse(blobs1.issubset(blobs)) + self.assertTrue(blobs2.issubset(blobs)) + self.assertFalse(blobs3.issubset(blobs)) + self.assertTrue(blobs4.issubset(blobs)) From 3e30228d954fa137a2b1261cf9c294eb4942d86d Mon Sep 17 00:00:00 2001 From: Lex Berezhny Date: Wed, 15 Sep 2021 10:49:03 -0400 Subject: [PATCH 4/7] lint --- lbry/blob/disk_space_manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lbry/blob/disk_space_manager.py b/lbry/blob/disk_space_manager.py index 7d00323a6..3882d5d80 100644 --- a/lbry/blob/disk_space_manager.py +++ b/lbry/blob/disk_space_manager.py @@ -1,4 +1,3 @@ -import os import asyncio import logging @@ -28,7 +27,7 @@ class DiskSpaceManager: available = (self.config.blob_storage_limit*1024*1024) - await self.get_space_used_bytes() if available > 0: return 0 - for blob_hash, file_size, added_on in await self.db.get_stored_blobs(is_mine=False): + for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False): delete.append(blob_hash) available += file_size if available > 0: From cfe6c82a31e50115c1fd27e76d6e1bd52e7dd3da Mon Sep 17 00:00:00 2001 From: Lex Berezhny Date: Sun, 19 Sep 2021 21:38:09 -0400 Subject: [PATCH 5/7] tests --- lbry/blob/blob_info.py | 2 +- tests/integration/other/test_cli.py | 4 +-- tests/unit/blob/test_disk_space_manager.py | 30 ------------------- .../unit/components/test_component_manager.py | 6 ++-- tests/unit/database/test_SQLiteStorage.py | 2 +- .../lbrynet_daemon/test_allowed_origin.py | 4 +-- 6 files changed, 9 insertions(+), 39 deletions(-) delete mode 100644 tests/unit/blob/test_disk_space_manager.py diff --git a/lbry/blob/blob_info.py b/lbry/blob/blob_info.py index 408b6047b..7d4bc71dd 100644 --- a/lbry/blob/blob_info.py +++ b/lbry/blob/blob_info.py @@ -13,7 +13,7 @@ class BlobInfo: def __init__( self, blob_num: int, length: int, iv: str, - blob_hash: typing.Optional[str] = None, added_on=None, is_mine=None): + blob_hash: typing.Optional[str] = None, added_on=0, is_mine=False): self.blob_hash = blob_hash self.blob_num = blob_num self.length = length diff --git a/tests/integration/other/test_cli.py b/tests/integration/other/test_cli.py index 1d733899f..7de635fc6 100644 --- a/tests/integration/other/test_cli.py +++ b/tests/integration/other/test_cli.py @@ -5,7 +5,7 @@ from lbry.testcase import AsyncioTestCase from lbry.conf import Config from lbry.extras import cli from lbry.extras.daemon.components import ( - DATABASE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, + DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT, LIBTORRENT_COMPONENT @@ -21,7 +21,7 @@ class CLIIntegrationTest(AsyncioTestCase): conf.share_usage_data = False conf.api = 'localhost:5299' conf.components_to_skip = ( - DATABASE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, + DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT, LIBTORRENT_COMPONENT diff --git a/tests/unit/blob/test_disk_space_manager.py b/tests/unit/blob/test_disk_space_manager.py deleted file mode 100644 index 54871f092..000000000 --- a/tests/unit/blob/test_disk_space_manager.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import unittest -import tempfile - -import lbry.wallet -from lbry.conf import Config -from lbry.blob.disk_space_manager import DiskSpaceManager - - -class ConfigurationTests(unittest.TestCase): - - def test_space_management(self): - with tempfile.TemporaryDirectory() as temp_dir: - os.mkdir(os.path.join(temp_dir, 'blobfiles')) - config = Config( - blob_storage_limit=5, - data_dir=temp_dir, - wallet_dir=temp_dir, - config=os.path.join(temp_dir, 'settings.yml'), - ) - dsm = DiskSpaceManager(config) - self.assertEqual(0, dsm.space_used_mb) - for file_no in range(10): - with open(os.path.join(config.data_dir, 'blobfiles', f'3mb-{file_no}'), 'w') as blob: - blob.write('0' * 1 * 1024 * 1024) - self.assertEqual(10, dsm.space_used_mb) - self.assertTrue(dsm.clean()) - self.assertEqual(5, dsm.space_used_mb) - self.assertFalse(dsm.clean()) - diff --git a/tests/unit/components/test_component_manager.py b/tests/unit/components/test_component_manager.py index f539f033e..af7f19c33 100644 --- a/tests/unit/components/test_component_manager.py +++ b/tests/unit/components/test_component_manager.py @@ -3,7 +3,7 @@ from lbry.testcase import AsyncioTestCase, AdvanceTimeTestCase from lbry.conf import Config from lbry.extras.daemon.componentmanager import ComponentManager -from lbry.extras.daemon.components import DATABASE_COMPONENT, DHT_COMPONENT +from lbry.extras.daemon.components import DATABASE_COMPONENT, DISK_SPACE_COMPONENT, DHT_COMPONENT from lbry.extras.daemon.components import HASH_ANNOUNCER_COMPONENT, UPNP_COMPONENT from lbry.extras.daemon.components import PEER_PROTOCOL_SERVER_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT from lbry.extras.daemon import components @@ -15,7 +15,6 @@ class TestComponentManager(AsyncioTestCase): self.default_components_sort = [ [ components.DatabaseComponent, - components.DiskSpaceComponent, components.ExchangeRateManagerComponent, components.TorrentComponent, components.UPnPComponent @@ -26,6 +25,7 @@ class TestComponentManager(AsyncioTestCase): components.WalletComponent ], [ + components.DiskSpaceComponent, components.FileManagerComponent, components.HashAnnouncerComponent, components.PeerProtocolServerComponent, @@ -153,7 +153,7 @@ class TestComponentManagerProperStart(AdvanceTimeTestCase): self.component_manager = ComponentManager( Config(), skip_components=[ - DATABASE_COMPONENT, DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, + DATABASE_COMPONENT, DISK_SPACE_COMPONENT, DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT], wallet=FakeDelayedWallet, diff --git a/tests/unit/database/test_SQLiteStorage.py b/tests/unit/database/test_SQLiteStorage.py index 07063aa7f..8f4d42703 100644 --- a/tests/unit/database/test_SQLiteStorage.py +++ b/tests/unit/database/test_SQLiteStorage.py @@ -81,7 +81,7 @@ class StorageTest(AsyncioTestCase): await self.storage.close() async def store_fake_blob(self, blob_hash, length=100): - await self.storage.add_blobs((blob_hash, length), finished=True) + await self.storage.add_blobs((blob_hash, length, 0, 0), finished=True) async def store_fake_stream(self, stream_hash, blobs=None, file_name="fake_file", key="DEADBEEF"): blobs = blobs or [BlobInfo(1, 100, "DEADBEEF", random_lbry_hash())] diff --git a/tests/unit/lbrynet_daemon/test_allowed_origin.py b/tests/unit/lbrynet_daemon/test_allowed_origin.py index e9fc7c247..6316db0a0 100644 --- a/tests/unit/lbrynet_daemon/test_allowed_origin.py +++ b/tests/unit/lbrynet_daemon/test_allowed_origin.py @@ -8,7 +8,7 @@ from lbry.testcase import AsyncioTestCase from lbry.conf import Config from lbry.extras.daemon.security import is_request_allowed as allowed, ensure_request_allowed as ensure from lbry.extras.daemon.components import ( - DATABASE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, + DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT, LIBTORRENT_COMPONENT @@ -69,7 +69,7 @@ class TestAccessHeaders(AsyncioTestCase): conf.share_usage_data = False conf.api = 'localhost:5299' conf.components_to_skip = ( - DATABASE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, + DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT, LIBTORRENT_COMPONENT From 0697d60a48745ddc1126d564693b1a4ec156dde0 Mon Sep 17 00:00:00 2001 From: Lex Berezhny Date: Mon, 20 Sep 2021 09:01:35 -0400 Subject: [PATCH 6/7] coveralls still down, will have to merged with coveralls off --- .github/workflows/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1bc557b78..5a6d3637f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -68,7 +68,7 @@ jobs: COVERALLS_PARALLEL: true run: | pip install coveralls - coveralls --service=github + #coveralls --service=github tests-integration: name: "tests / integration" @@ -114,7 +114,7 @@ jobs: COVERALLS_PARALLEL: true run: | coverage combine tests - coveralls --service=github + #coveralls --service=github coverage: @@ -126,7 +126,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | pip install coveralls - coveralls --service=github --finish + #coveralls --service=github --finish build: needs: ["lint", "tests-unit", "tests-integration"] From 9b9794b5e0df45920916b4da232affdf04cb1970 Mon Sep 17 00:00:00 2001 From: Lex Berezhny Date: Mon, 20 Sep 2021 09:23:42 -0400 Subject: [PATCH 7/7] default is_mine to true during migration --- lbry/extras/daemon/migrator/migrate14to15.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lbry/extras/daemon/migrator/migrate14to15.py b/lbry/extras/daemon/migrator/migrate14to15.py index a0b7257e3..72fbfcb34 100644 --- a/lbry/extras/daemon/migrator/migrate14to15.py +++ b/lbry/extras/daemon/migrator/migrate14to15.py @@ -9,7 +9,7 @@ def do_migration(conf): cursor.executescript(""" alter table blob add column added_on integer not null default 0; - alter table blob add column is_mine integer not null default 0; + alter table blob add column is_mine integer not null default 1; """) connection.commit()