lbry-sdk/tests/unit/stream/test_managed_stream.py

190 lines
8.5 KiB
Python
Raw Normal View History

2019-03-31 19:42:27 +02:00
import os
import shutil
import unittest
from unittest import mock
import asyncio
from lbry.blob.blob_file import MAX_BLOB_SIZE
from lbry.blob_exchange.serialization import BlobResponse
from lbry.blob_exchange.server import BlobServerProtocol
from lbry.dht.node import Node
2019-10-01 02:00:10 +02:00
from lbry.dht.peer import make_kademlia_peer
from lbry.stream.managed_stream import ManagedStream
from lbry.stream.descriptor import StreamDescriptor
2019-03-31 19:42:27 +02:00
from tests.unit.blob_exchange.test_transfer_blob import BlobExchangeTestBase
class TestManagedStream(BlobExchangeTestBase):
2019-10-08 20:03:27 +02:00
async def create_stream(self, blob_count: int = 10, file_name='test_file'):
2019-03-31 19:42:27 +02:00
self.stream_bytes = b''
for _ in range(blob_count):
self.stream_bytes += os.urandom(MAX_BLOB_SIZE - 1)
2019-03-31 19:42:27 +02:00
# create the stream
2019-10-08 20:03:27 +02:00
file_path = os.path.join(self.server_dir, file_name)
2019-03-31 19:42:27 +02:00
with open(file_path, 'wb') as f:
f.write(self.stream_bytes)
descriptor = await StreamDescriptor.create_stream(self.loop, self.server_blob_manager.blob_dir, file_path)
self.sd_hash = descriptor.calculate_sd_hash()
return descriptor
async def setup_stream(self, blob_count: int = 10):
await self.create_stream(blob_count)
self.stream = ManagedStream(
self.loop, self.client_config, self.client_blob_manager, self.sd_hash, self.client_dir
)
2019-03-31 19:42:27 +02:00
2019-10-08 20:03:27 +02:00
async def test_client_sanitizes_file_name(self):
illegal_name = 't<?t_f:|<'
descriptor = await self.create_stream(file_name=illegal_name)
descriptor.suggested_file_name = illegal_name
self.stream = ManagedStream(
self.loop, self.client_config, self.client_blob_manager, self.sd_hash, self.client_dir
)
2019-10-08 22:43:11 +02:00
await self._test_transfer_stream(10, skip_setup=True)
2019-09-23 17:48:36 +02:00
self.assertTrue(self.stream.completed)
2019-10-08 20:03:27 +02:00
self.assertEqual(self.stream.file_name, 'tt_f')
self.assertTrue(self.stream.output_file_exists)
2019-09-23 17:48:36 +02:00
self.assertTrue(os.path.isfile(self.stream.full_path))
2019-10-08 20:03:27 +02:00
self.assertEqual(self.stream.full_path, os.path.join(self.client_dir, 'tt_f'))
self.assertTrue(os.path.isfile(os.path.join(self.client_dir, 'tt_f')))
2019-09-23 17:48:36 +02:00
async def test_status_file_completed(self):
await self._test_transfer_stream(10)
self.assertTrue(self.stream.output_file_exists)
2019-09-09 01:02:05 +02:00
self.assertTrue(self.stream.completed)
with open(self.stream.full_path, 'w+b') as outfile:
outfile.truncate(1)
self.assertTrue(self.stream.output_file_exists)
2019-09-09 01:02:05 +02:00
self.assertFalse(self.stream.completed)
2019-09-23 17:48:36 +02:00
async def _test_transfer_stream(self, blob_count: int, mock_accumulate_peers=None, stop_when_done=True,
2019-10-08 20:03:27 +02:00
skip_setup=False):
if not skip_setup:
await self.setup_stream(blob_count)
2019-03-31 19:42:27 +02:00
mock_node = mock.Mock(spec=Node)
def _mock_accumulate_peers(q1, q2):
async def _task():
pass
q2.put_nowait([self.server_from_client])
return q2, self.loop.create_task(_task())
mock_node.accumulate_peers = mock_accumulate_peers or _mock_accumulate_peers
2020-01-29 17:49:14 +01:00
self.stream.downloader.node = mock_node
2020-01-29 01:24:05 +01:00
await self.stream.save_file()
await self.stream.finished_write_attempt.wait()
2019-03-31 19:42:27 +02:00
self.assertTrue(os.path.isfile(self.stream.full_path))
if stop_when_done:
await self.stream.stop()
2019-03-31 19:42:27 +02:00
self.assertTrue(os.path.isfile(self.stream.full_path))
with open(self.stream.full_path, 'rb') as f:
self.assertEqual(f.read(), self.stream_bytes)
await asyncio.sleep(0.01)
async def test_transfer_stream(self):
await self._test_transfer_stream(10)
self.assertEqual(self.stream.status, "finished")
self.assertFalse(self.stream._running.is_set())
async def test_delayed_stop(self):
await self._test_transfer_stream(10, stop_when_done=False)
self.assertEqual(self.stream.status, "finished")
self.assertTrue(self.stream._running.is_set())
await asyncio.sleep(0.5, loop=self.loop)
self.assertTrue(self.stream._running.is_set())
2019-05-17 15:12:00 +02:00
await asyncio.sleep(2, loop=self.loop)
self.assertEqual(self.stream.status, "finished")
self.assertFalse(self.stream._running.is_set())
2019-03-31 19:42:27 +02:00
@unittest.SkipTest
async def test_transfer_hundred_blob_stream(self):
await self._test_transfer_stream(100)
async def test_transfer_stream_bad_first_peer_good_second(self):
await self.setup_stream(2)
mock_node = mock.Mock(spec=Node)
2019-11-29 21:28:41 +01:00
bad_peer = make_kademlia_peer(b'2' * 48, "127.0.0.1", tcp_port=3334, allow_localhost=True)
2019-03-31 19:42:27 +02:00
def _mock_accumulate_peers(q1, q2):
async def _task():
pass
q2.put_nowait([bad_peer])
self.loop.call_later(1, q2.put_nowait, [self.server_from_client])
return q2, self.loop.create_task(_task())
mock_node.accumulate_peers = _mock_accumulate_peers
2020-01-29 17:49:14 +01:00
self.stream.downloader.node = mock_node
2020-01-29 01:24:05 +01:00
await self.stream.save_file()
2019-03-31 19:42:27 +02:00
await self.stream.finished_writing.wait()
self.assertTrue(os.path.isfile(self.stream.full_path))
with open(self.stream.full_path, 'rb') as f:
self.assertEqual(f.read(), self.stream_bytes)
await self.stream.stop()
2019-03-31 19:42:27 +02:00
# self.assertIs(self.server_from_client.tcp_last_down, None)
# self.assertIsNot(bad_peer.tcp_last_down, None)
async def test_client_chunked_response(self):
self.server.stop_server()
class ChunkedServerProtocol(BlobServerProtocol):
def send_response(self, responses):
to_send = []
while responses:
to_send.append(responses.pop())
for byte in BlobResponse(to_send).serialize():
self.transport.write(bytes([byte]))
self.server.server_protocol_class = ChunkedServerProtocol
self.server.start_server(33333, '127.0.0.1')
self.assertEqual(0, len(self.client_blob_manager.completed_blob_hashes))
await asyncio.wait_for(self._test_transfer_stream(10), timeout=2)
self.assertEqual(11, len(self.client_blob_manager.completed_blob_hashes))
async def test_create_and_decrypt_one_blob_stream(self, blobs=1, corrupt=False):
descriptor = await self.create_stream(blobs)
# copy blob files
shutil.copy(os.path.join(self.server_blob_manager.blob_dir, self.sd_hash),
os.path.join(self.client_blob_manager.blob_dir, self.sd_hash))
self.stream = ManagedStream(self.loop, self.client_config, self.client_blob_manager, self.sd_hash,
self.client_dir)
for blob_info in descriptor.blobs[:-1]:
shutil.copy(os.path.join(self.server_blob_manager.blob_dir, blob_info.blob_hash),
os.path.join(self.client_blob_manager.blob_dir, blob_info.blob_hash))
if corrupt and blob_info.length == MAX_BLOB_SIZE:
with open(os.path.join(self.client_blob_manager.blob_dir, blob_info.blob_hash), "rb+") as handle:
handle.truncate()
handle.flush()
await self.stream.save_file()
2019-03-31 19:42:27 +02:00
await self.stream.finished_writing.wait()
if corrupt:
return self.assertFalse(os.path.isfile(os.path.join(self.client_dir, "test_file")))
with open(os.path.join(self.client_dir, "test_file"), "rb") as f:
decrypted = f.read()
self.assertEqual(decrypted, self.stream_bytes)
2019-10-02 18:58:51 +02:00
self.assertTrue(self.client_blob_manager.get_blob(self.sd_hash).get_is_verified())
2019-03-31 19:42:27 +02:00
self.assertEqual(
True, self.client_blob_manager.get_blob(self.stream.descriptor.blobs[0].blob_hash).get_is_verified()
)
#
# # its all blobs + sd blob - last blob, which is the same size as descriptor.blobs
# self.assertEqual(len(descriptor.blobs), len(await downloader_storage.get_all_finished_blobs()))
# self.assertEqual(
# [descriptor.sd_hash, descriptor.blobs[0].blob_hash], await downloader_storage.get_blobs_to_announce()
# )
#
# await downloader_storage.close()
# await self.storage.close()
async def test_create_and_decrypt_multi_blob_stream(self):
await self.test_create_and_decrypt_one_blob_stream(10)
# async def test_create_truncate_and_handle_stream(self):
# # The purpose of this test is just to make sure it can finish even if a blob is corrupt/truncated
# await asyncio.wait_for(self.test_create_and_decrypt_one_blob_stream(corrupt=True), timeout=5)