2017-09-13 21:46:39 +02:00
|
|
|
from lbrynet.blob import BlobFile
|
2018-11-04 20:06:29 +01:00
|
|
|
from lbrynet.p2p.Error import DownloadCanceledError, InvalidDataError
|
2017-09-12 17:42:17 +02:00
|
|
|
|
2018-07-06 22:16:58 +02:00
|
|
|
from tests.util import mk_db_and_blob_dir, rm_db_and_blob_dir, random_lbry_hash
|
2017-09-12 17:42:17 +02:00
|
|
|
from twisted.trial import unittest
|
|
|
|
from twisted.internet import defer
|
|
|
|
|
2018-07-06 22:16:58 +02:00
|
|
|
|
2017-09-12 17:42:17 +02:00
|
|
|
class BlobFileTest(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.db_dir, self.blob_dir = mk_db_and_blob_dir()
|
|
|
|
self.fake_content_len = 64
|
2018-07-22 03:12:33 +02:00
|
|
|
self.fake_content = b'0'*self.fake_content_len
|
2017-10-02 18:13:45 +02:00
|
|
|
self.fake_content_hash = '53871b26a08e90cb62142f2a39f0b80de41792322b0ca560' \
|
|
|
|
'2b6eb7b5cf067c49498a7492bb9364bbf90f40c1c5412105'
|
2017-09-12 17:42:17 +02:00
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
rm_db_and_blob_dir(self.db_dir, self.blob_dir)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_good_write_and_read(self):
|
|
|
|
# test a write that should succeed
|
|
|
|
blob_file = BlobFile(self.blob_dir, self.fake_content_hash, self.fake_content_len)
|
|
|
|
self.assertFalse(blob_file.verified)
|
|
|
|
|
2017-09-13 05:52:42 +02:00
|
|
|
writer, finished_d = blob_file.open_for_writing(peer=1)
|
|
|
|
writer.write(self.fake_content)
|
2017-09-13 20:01:53 +02:00
|
|
|
writer.close()
|
2017-09-12 17:42:17 +02:00
|
|
|
out = yield finished_d
|
2018-10-18 13:41:33 +02:00
|
|
|
self.assertIsInstance(out, BlobFile)
|
2017-09-12 17:42:17 +02:00
|
|
|
self.assertTrue(out.verified)
|
|
|
|
self.assertEqual(self.fake_content_len, out.get_length())
|
|
|
|
|
|
|
|
# read from the instance used to write to, and verify content
|
|
|
|
f = blob_file.open_for_reading()
|
|
|
|
c = f.read()
|
|
|
|
self.assertEqual(c, self.fake_content)
|
|
|
|
self.assertFalse(out.is_downloading())
|
|
|
|
|
|
|
|
# read from newly declared instance, and verify content
|
|
|
|
del blob_file
|
|
|
|
blob_file = BlobFile(self.blob_dir, self.fake_content_hash, self.fake_content_len)
|
|
|
|
self.assertTrue(blob_file.verified)
|
|
|
|
f = blob_file.open_for_reading()
|
2017-09-26 16:54:20 +02:00
|
|
|
self.assertEqual(1, blob_file.readers)
|
2017-09-12 17:42:17 +02:00
|
|
|
c = f.read()
|
|
|
|
self.assertEqual(c, self.fake_content)
|
|
|
|
|
2017-09-26 16:54:20 +02:00
|
|
|
# close reader
|
|
|
|
f.close()
|
|
|
|
self.assertEqual(0, blob_file.readers)
|
|
|
|
|
|
|
|
|
2017-09-12 17:42:17 +02:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_delete(self):
|
|
|
|
blob_file = BlobFile(self.blob_dir, self.fake_content_hash, self.fake_content_len)
|
2017-09-13 05:52:42 +02:00
|
|
|
writer, finished_d = blob_file.open_for_writing(peer=1)
|
|
|
|
writer.write(self.fake_content)
|
2017-09-12 17:42:17 +02:00
|
|
|
out = yield finished_d
|
|
|
|
out = yield blob_file.delete()
|
|
|
|
|
|
|
|
blob_file = BlobFile(self.blob_dir, self.fake_content_hash)
|
|
|
|
self.assertFalse(blob_file.verified)
|
|
|
|
|
2017-09-26 16:54:20 +02:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_delete_fail(self):
|
|
|
|
# deletes should fail if being written to
|
|
|
|
blob_file = BlobFile(self.blob_dir, self.fake_content_hash, self.fake_content_len)
|
|
|
|
writer, finished_d = blob_file.open_for_writing(peer=1)
|
|
|
|
yield self.assertFailure(blob_file.delete(), ValueError)
|
|
|
|
writer.write(self.fake_content)
|
|
|
|
writer.close()
|
|
|
|
|
|
|
|
# deletes should fail if being read and not closed
|
|
|
|
blob_file = BlobFile(self.blob_dir, self.fake_content_hash, self.fake_content_len)
|
|
|
|
self.assertTrue(blob_file.verified)
|
|
|
|
f = blob_file.open_for_reading()
|
|
|
|
yield self.assertFailure(blob_file.delete(), ValueError)
|
|
|
|
|
2017-09-12 17:42:17 +02:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_too_much_write(self):
|
|
|
|
# writing too much data should result in failure
|
2017-09-29 12:44:22 +02:00
|
|
|
expected_length = 16
|
2018-07-22 03:12:33 +02:00
|
|
|
content = b'0'*32
|
2017-09-12 17:42:17 +02:00
|
|
|
blob_hash = random_lbry_hash()
|
|
|
|
blob_file = BlobFile(self.blob_dir, blob_hash, expected_length)
|
2017-09-13 05:52:42 +02:00
|
|
|
writer, finished_d = blob_file.open_for_writing(peer=1)
|
|
|
|
writer.write(content)
|
2017-09-12 17:42:17 +02:00
|
|
|
out = yield self.assertFailure(finished_d, InvalidDataError)
|
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_bad_hash(self):
|
|
|
|
# test a write that should fail because its content's hash
|
|
|
|
# does not equal the blob_hash
|
2017-09-29 12:44:22 +02:00
|
|
|
length = 64
|
2018-07-22 03:12:33 +02:00
|
|
|
content = b'0'*length
|
2017-09-12 17:42:17 +02:00
|
|
|
blob_hash = random_lbry_hash()
|
|
|
|
blob_file = BlobFile(self.blob_dir, blob_hash, length)
|
2017-09-13 05:52:42 +02:00
|
|
|
writer, finished_d = blob_file.open_for_writing(peer=1)
|
|
|
|
writer.write(content)
|
2017-09-12 17:42:17 +02:00
|
|
|
yield self.assertFailure(finished_d, InvalidDataError)
|
|
|
|
|
2017-09-13 20:01:53 +02:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_close_on_incomplete_write(self):
|
|
|
|
# write all but 1 byte of data,
|
|
|
|
blob_file = BlobFile(self.blob_dir, self.fake_content_hash, self.fake_content_len)
|
|
|
|
writer, finished_d = blob_file.open_for_writing(peer=1)
|
|
|
|
writer.write(self.fake_content[:self.fake_content_len-1])
|
|
|
|
writer.close()
|
|
|
|
yield self.assertFailure(finished_d, DownloadCanceledError)
|
|
|
|
|
2017-09-15 19:46:38 +02:00
|
|
|
# writes after close will throw a IOError exception
|
|
|
|
with self.assertRaises(IOError):
|
2017-09-13 20:01:53 +02:00
|
|
|
writer.write(self.fake_content)
|
|
|
|
|
|
|
|
# another call to close will do nothing
|
|
|
|
writer.close()
|
|
|
|
|
|
|
|
# file should not exist, since we did not finish write
|
|
|
|
blob_file_2 = BlobFile(self.blob_dir, self.fake_content_hash, self.fake_content_len)
|
|
|
|
out = blob_file_2.open_for_reading()
|
2018-10-18 13:41:33 +02:00
|
|
|
self.assertIsNone(out)
|
2017-09-12 17:42:17 +02:00
|
|
|
|
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_multiple_writers(self):
|
|
|
|
# start first writer and write half way, and then start second writer and write everything
|
|
|
|
blob_hash = self.fake_content_hash
|
|
|
|
blob_file = BlobFile(self.blob_dir, blob_hash, self.fake_content_len)
|
2017-09-13 05:52:42 +02:00
|
|
|
writer_1, finished_d_1 = blob_file.open_for_writing(peer=1)
|
2018-07-22 03:12:33 +02:00
|
|
|
writer_1.write(self.fake_content[:self.fake_content_len//2])
|
2017-09-12 17:42:17 +02:00
|
|
|
|
2017-09-13 05:52:42 +02:00
|
|
|
writer_2, finished_d_2 = blob_file.open_for_writing(peer=2)
|
|
|
|
writer_2.write(self.fake_content)
|
2017-09-12 17:42:17 +02:00
|
|
|
out_2 = yield finished_d_2
|
|
|
|
out_1 = yield self.assertFailure(finished_d_1, DownloadCanceledError)
|
|
|
|
|
2018-10-18 13:41:33 +02:00
|
|
|
self.assertIsInstance(out_2, BlobFile)
|
2017-09-12 17:42:17 +02:00
|
|
|
self.assertTrue(out_2.verified)
|
|
|
|
self.assertEqual(self.fake_content_len, out_2.get_length())
|
|
|
|
|
|
|
|
f = blob_file.open_for_reading()
|
|
|
|
c = f.read()
|
|
|
|
self.assertEqual(self.fake_content_len, len(c))
|
|
|
|
self.assertEqual(bytearray(c), self.fake_content)
|
|
|
|
|
2017-10-06 22:34:45 +02:00
|
|
|
@defer.inlineCallbacks
|
|
|
|
def test_multiple_writers_save_at_same_time(self):
|
|
|
|
blob_hash = self.fake_content_hash
|
|
|
|
blob_file = BlobFile(self.blob_dir, blob_hash, self.fake_content_len)
|
|
|
|
writer_1, finished_d_1 = blob_file.open_for_writing(peer=1)
|
|
|
|
writer_2, finished_d_2 = blob_file.open_for_writing(peer=2)
|
|
|
|
|
2017-10-06 23:05:10 +02:00
|
|
|
blob_file.save_verified_blob(writer_1)
|
2017-10-06 22:34:45 +02:00
|
|
|
# second write should fail to save
|
2017-10-06 23:05:10 +02:00
|
|
|
yield self.assertFailure(blob_file.save_verified_blob(writer_2), DownloadCanceledError)
|
2017-09-12 17:42:17 +02:00
|
|
|
|
2018-07-24 02:59:57 +02:00
|
|
|
# schedule a close, just to leave the reactor clean
|
|
|
|
finished_d_1.addBoth(lambda x:None)
|
|
|
|
finished_d_2.addBoth(lambda x:None)
|
|
|
|
self.addCleanup(writer_1.close)
|
|
|
|
self.addCleanup(writer_2.close)
|