forked from LBRYCommunity/lbry-sdk
tests
This commit is contained in:
parent
1ac7831f3c
commit
8927a4889e
5 changed files with 172 additions and 69 deletions
|
@ -136,7 +136,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
||||||
await self.assertBalance(self.account, '0.0')
|
await self.assertBalance(self.account, '0.0')
|
||||||
address = await self.account.receiving.get_or_create_usable_address()
|
address = await self.account.receiving.get_or_create_usable_address()
|
||||||
# evil trick: mempool is unsorted on real life, but same order between python instances. reproduce it
|
# evil trick: mempool is unsorted on real life, but same order between python instances. reproduce it
|
||||||
original_summary = self.conductor.spv_node.server.mempool.transaction_summaries
|
original_summary = self.conductor.spv_node.server.bp.mempool.transaction_summaries
|
||||||
|
|
||||||
def random_summary(*args, **kwargs):
|
def random_summary(*args, **kwargs):
|
||||||
summary = original_summary(*args, **kwargs)
|
summary = original_summary(*args, **kwargs)
|
||||||
|
@ -145,7 +145,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
||||||
while summary == ordered:
|
while summary == ordered:
|
||||||
random.shuffle(summary)
|
random.shuffle(summary)
|
||||||
return summary
|
return summary
|
||||||
self.conductor.spv_node.server.mempool.transaction_summaries = random_summary
|
self.conductor.spv_node.server.bp.mempool.transaction_summaries = random_summary
|
||||||
# 10 unconfirmed txs, all from blockchain wallet
|
# 10 unconfirmed txs, all from blockchain wallet
|
||||||
sends = [self.blockchain.send_to_address(address, 10) for _ in range(10)]
|
sends = [self.blockchain.send_to_address(address, 10) for _ in range(10)]
|
||||||
# use batching to reduce issues with send_to_address on cli
|
# use batching to reduce issues with send_to_address on cli
|
||||||
|
|
|
@ -195,14 +195,14 @@ class TestHubDiscovery(CommandTestCase):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestStress(CommandTestCase):
|
class TestStressFlush(CommandTestCase):
|
||||||
async def test_flush_over_66_thousand(self):
|
# async def test_flush_over_66_thousand(self):
|
||||||
history = self.conductor.spv_node.server.db.history
|
# history = self.conductor.spv_node.server.db.history
|
||||||
history.flush_count = 66_000
|
# history.flush_count = 66_000
|
||||||
history.flush()
|
# history.flush()
|
||||||
self.assertEqual(history.flush_count, 66_001)
|
# self.assertEqual(history.flush_count, 66_001)
|
||||||
await self.generate(1)
|
# await self.generate(1)
|
||||||
self.assertEqual(history.flush_count, 66_002)
|
# self.assertEqual(history.flush_count, 66_002)
|
||||||
|
|
||||||
async def test_thousands_claim_ids_on_search(self):
|
async def test_thousands_claim_ids_on_search(self):
|
||||||
await self.stream_create()
|
await self.stream_create()
|
||||||
|
|
|
@ -1,57 +1,57 @@
|
||||||
import unittest
|
# import unittest
|
||||||
from shutil import rmtree
|
# from shutil import rmtree
|
||||||
from tempfile import mkdtemp
|
# from tempfile import mkdtemp
|
||||||
|
#
|
||||||
from lbry.wallet.server.history import History
|
# from lbry.wallet.server.history import History
|
||||||
from lbry.wallet.server.storage import LevelDB
|
# from lbry.wallet.server.storage import LevelDB
|
||||||
|
#
|
||||||
|
#
|
||||||
# dumped from a real history database. Aside from the state, all records are <hashX><flush_count>: <value>
|
# # dumped from a real history database. Aside from the state, all records are <hashX><flush_count>: <value>
|
||||||
STATE_RECORD = (b'state\x00\x00', b"{'flush_count': 21497, 'comp_flush_count': -1, 'comp_cursor': -1, 'db_version': 0}")
|
# STATE_RECORD = (b'state\x00\x00', b"{'flush_count': 21497, 'comp_flush_count': -1, 'comp_cursor': -1, 'db_version': 0}")
|
||||||
UNMIGRATED_RECORDS = {
|
# UNMIGRATED_RECORDS = {
|
||||||
'00538b2cbe4a5f1be2dc320241': 'f5ed500142ee5001',
|
# '00538b2cbe4a5f1be2dc320241': 'f5ed500142ee5001',
|
||||||
'00538b48def1904014880501f2': 'b9a52a01baa52a01',
|
# '00538b48def1904014880501f2': 'b9a52a01baa52a01',
|
||||||
'00538cdcf989b74de32c5100ca': 'c973870078748700',
|
# '00538cdcf989b74de32c5100ca': 'c973870078748700',
|
||||||
'00538d42d5df44603474284ae1': 'f5d9d802',
|
# '00538d42d5df44603474284ae1': 'f5d9d802',
|
||||||
'00538d42d5df44603474284ae2': '75dad802',
|
# '00538d42d5df44603474284ae2': '75dad802',
|
||||||
'00538ebc879dac6ddbee9e0029': '3ca42f0042a42f00',
|
# '00538ebc879dac6ddbee9e0029': '3ca42f0042a42f00',
|
||||||
'00538ed1d391327208748200bc': '804e7d00af4e7d00',
|
# '00538ed1d391327208748200bc': '804e7d00af4e7d00',
|
||||||
'00538f3de41d9e33affa0300c2': '7de8810086e88100',
|
# '00538f3de41d9e33affa0300c2': '7de8810086e88100',
|
||||||
'00539007f87792d98422c505a5': '8c5a7202445b7202',
|
# '00539007f87792d98422c505a5': '8c5a7202445b7202',
|
||||||
'0053902cf52ee9682d633b0575': 'eb0f64026c106402',
|
# '0053902cf52ee9682d633b0575': 'eb0f64026c106402',
|
||||||
'005390e05674571551632205a2': 'a13d7102e13d7102',
|
# '005390e05674571551632205a2': 'a13d7102e13d7102',
|
||||||
'0053914ef25a9ceed927330584': '78096902960b6902',
|
# '0053914ef25a9ceed927330584': '78096902960b6902',
|
||||||
'005391768113f69548f37a01b1': 'a5b90b0114ba0b01',
|
# '005391768113f69548f37a01b1': 'a5b90b0114ba0b01',
|
||||||
'005391a289812669e5b44c02c2': '33da8a016cdc8a01',
|
# '005391a289812669e5b44c02c2': '33da8a016cdc8a01',
|
||||||
}
|
# }
|
||||||
|
#
|
||||||
|
#
|
||||||
class TestHistoryDBMigration(unittest.TestCase):
|
# class TestHistoryDBMigration(unittest.TestCase):
|
||||||
def test_migrate_flush_count_from_16_to_32_bits(self):
|
# def test_migrate_flush_count_from_16_to_32_bits(self):
|
||||||
self.history = History()
|
# self.history = History()
|
||||||
tmpdir = mkdtemp()
|
# tmpdir = mkdtemp()
|
||||||
self.addCleanup(lambda: rmtree(tmpdir))
|
# self.addCleanup(lambda: rmtree(tmpdir))
|
||||||
LevelDB.import_module()
|
# LevelDB.import_module()
|
||||||
db = LevelDB(tmpdir, 'hist', True)
|
# db = LevelDB(tmpdir, 'hist', True)
|
||||||
with db.write_batch() as batch:
|
# with db.write_batch() as batch:
|
||||||
for key, value in UNMIGRATED_RECORDS.items():
|
# for key, value in UNMIGRATED_RECORDS.items():
|
||||||
batch.put(bytes.fromhex(key), bytes.fromhex(value))
|
# batch.put(bytes.fromhex(key), bytes.fromhex(value))
|
||||||
batch.put(*STATE_RECORD)
|
# batch.put(*STATE_RECORD)
|
||||||
self.history.db = db
|
# self.history.db = db
|
||||||
self.history.read_state()
|
# self.history.read_state()
|
||||||
self.assertEqual(21497, self.history.flush_count)
|
# self.assertEqual(21497, self.history.flush_count)
|
||||||
self.assertEqual(0, self.history.db_version)
|
# self.assertEqual(0, self.history.db_version)
|
||||||
self.assertTrue(self.history.needs_migration)
|
# self.assertTrue(self.history.needs_migration)
|
||||||
self.history.migrate()
|
# self.history.migrate()
|
||||||
self.assertFalse(self.history.needs_migration)
|
# self.assertFalse(self.history.needs_migration)
|
||||||
self.assertEqual(1, self.history.db_version)
|
# self.assertEqual(1, self.history.db_version)
|
||||||
for idx, (key, value) in enumerate(sorted(db.iterator())):
|
# for idx, (key, value) in enumerate(sorted(db.iterator())):
|
||||||
if key == b'state\x00\x00':
|
# if key == b'state\x00\x00':
|
||||||
continue
|
# continue
|
||||||
key, counter = key[:-4], key[-4:]
|
# key, counter = key[:-4], key[-4:]
|
||||||
expected_value = UNMIGRATED_RECORDS[key.hex() + counter.hex()[-4:]]
|
# expected_value = UNMIGRATED_RECORDS[key.hex() + counter.hex()[-4:]]
|
||||||
self.assertEqual(value.hex(), expected_value)
|
# self.assertEqual(value.hex(), expected_value)
|
||||||
|
#
|
||||||
|
#
|
||||||
if __name__ == '__main__':
|
# if __name__ == '__main__':
|
||||||
unittest.main()
|
# unittest.main()
|
||||||
|
|
103
tests/unit/wallet/server/test_revertable.py
Normal file
103
tests/unit/wallet/server/test_revertable.py
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
import unittest
|
||||||
|
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertableDelete, RevertablePut, OpStackIntegrity
|
||||||
|
from lbry.wallet.server.db.prefixes import Prefixes
|
||||||
|
|
||||||
|
|
||||||
|
class TestRevertableOpStack(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self.fake_db = {}
|
||||||
|
self.stack = RevertableOpStack(self.fake_db.get)
|
||||||
|
|
||||||
|
def tearDown(self) -> None:
|
||||||
|
self.stack.clear()
|
||||||
|
self.fake_db.clear()
|
||||||
|
|
||||||
|
def process_stack(self):
|
||||||
|
for op in self.stack:
|
||||||
|
if op.is_put:
|
||||||
|
self.fake_db[op.key] = op.value
|
||||||
|
else:
|
||||||
|
self.fake_db.pop(op.key)
|
||||||
|
self.stack.clear()
|
||||||
|
|
||||||
|
def update(self, key1: bytes, value1: bytes, key2: bytes, value2: bytes):
|
||||||
|
self.stack.append(RevertableDelete(key1, value1))
|
||||||
|
self.stack.append(RevertablePut(key2, value2))
|
||||||
|
|
||||||
|
def test_simplify(self):
|
||||||
|
key1 = Prefixes.claim_to_txo.pack_key(b'\x01' * 20)
|
||||||
|
key2 = Prefixes.claim_to_txo.pack_key(b'\x02' * 20)
|
||||||
|
key3 = Prefixes.claim_to_txo.pack_key(b'\x03' * 20)
|
||||||
|
key4 = Prefixes.claim_to_txo.pack_key(b'\x04' * 20)
|
||||||
|
|
||||||
|
val1 = Prefixes.claim_to_txo.pack_value(1, 0, 1, 0, 1, 0, 'derp')
|
||||||
|
val2 = Prefixes.claim_to_txo.pack_value(1, 0, 1, 0, 1, 0, 'oops')
|
||||||
|
val3 = Prefixes.claim_to_txo.pack_value(1, 0, 1, 0, 1, 0, 'other')
|
||||||
|
|
||||||
|
# check that we can't delete a non existent value
|
||||||
|
with self.assertRaises(OpStackIntegrity):
|
||||||
|
self.stack.append(RevertableDelete(key1, val1))
|
||||||
|
|
||||||
|
self.stack.append(RevertablePut(key1, val1))
|
||||||
|
self.assertEqual(1, len(self.stack))
|
||||||
|
self.stack.append(RevertableDelete(key1, val1))
|
||||||
|
self.assertEqual(0, len(self.stack))
|
||||||
|
|
||||||
|
self.stack.append(RevertablePut(key1, val1))
|
||||||
|
self.assertEqual(1, len(self.stack))
|
||||||
|
# try to delete the wrong value
|
||||||
|
with self.assertRaises(OpStackIntegrity):
|
||||||
|
self.stack.append(RevertableDelete(key2, val2))
|
||||||
|
|
||||||
|
self.stack.append(RevertableDelete(key1, val1))
|
||||||
|
self.assertEqual(0, len(self.stack))
|
||||||
|
self.stack.append(RevertablePut(key2, val3))
|
||||||
|
self.assertEqual(1, len(self.stack))
|
||||||
|
|
||||||
|
self.process_stack()
|
||||||
|
|
||||||
|
self.assertDictEqual({key2: val3}, self.fake_db)
|
||||||
|
|
||||||
|
# check that we can't put on top of the existing stored value
|
||||||
|
with self.assertRaises(OpStackIntegrity):
|
||||||
|
self.stack.append(RevertablePut(key2, val1))
|
||||||
|
|
||||||
|
self.assertEqual(0, len(self.stack))
|
||||||
|
self.stack.append(RevertableDelete(key2, val3))
|
||||||
|
self.assertEqual(1, len(self.stack))
|
||||||
|
self.stack.append(RevertablePut(key2, val3))
|
||||||
|
self.assertEqual(0, len(self.stack))
|
||||||
|
|
||||||
|
self.update(key2, val3, key2, val1)
|
||||||
|
self.assertEqual(2, len(self.stack))
|
||||||
|
|
||||||
|
self.process_stack()
|
||||||
|
self.assertDictEqual({key2: val1}, self.fake_db)
|
||||||
|
|
||||||
|
self.update(key2, val1, key2, val2)
|
||||||
|
self.assertEqual(2, len(self.stack))
|
||||||
|
self.update(key2, val2, key2, val3)
|
||||||
|
self.update(key2, val3, key2, val2)
|
||||||
|
self.update(key2, val2, key2, val3)
|
||||||
|
self.update(key2, val3, key2, val2)
|
||||||
|
with self.assertRaises(OpStackIntegrity):
|
||||||
|
self.update(key2, val3, key2, val2)
|
||||||
|
self.update(key2, val2, key2, val3)
|
||||||
|
self.assertEqual(2, len(self.stack))
|
||||||
|
self.stack.append(RevertableDelete(key2, val3))
|
||||||
|
self.process_stack()
|
||||||
|
self.assertDictEqual({}, self.fake_db)
|
||||||
|
|
||||||
|
self.stack.append(RevertablePut(key2, val3))
|
||||||
|
self.process_stack()
|
||||||
|
with self.assertRaises(OpStackIntegrity):
|
||||||
|
self.update(key2, val2, key2, val2)
|
||||||
|
self.update(key2, val3, key2, val2)
|
||||||
|
self.assertDictEqual({key2: val3}, self.fake_db)
|
||||||
|
undo = self.stack.get_undo_ops()
|
||||||
|
self.process_stack()
|
||||||
|
self.assertDictEqual({key2: val2}, self.fake_db)
|
||||||
|
self.stack.apply_packed_undo_ops(undo)
|
||||||
|
self.process_stack()
|
||||||
|
self.assertDictEqual({key2: val3}, self.fake_db)
|
||||||
|
|
|
@ -12,7 +12,6 @@ from lbry.wallet.server.db import writer
|
||||||
from lbry.wallet.server.coin import LBCRegTest
|
from lbry.wallet.server.coin import LBCRegTest
|
||||||
from lbry.wallet.server.db.trending import zscore
|
from lbry.wallet.server.db.trending import zscore
|
||||||
from lbry.wallet.server.db.canonical import FindShortestID
|
from lbry.wallet.server.db.canonical import FindShortestID
|
||||||
from lbry.wallet.server.block_processor import Timer
|
|
||||||
from lbry.wallet.transaction import Transaction, Input, Output
|
from lbry.wallet.transaction import Transaction, Input, Output
|
||||||
try:
|
try:
|
||||||
import reader
|
import reader
|
||||||
|
@ -62,7 +61,6 @@ class TestSQLDB(unittest.TestCase):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
self.addCleanup(reader.cleanup)
|
self.addCleanup(reader.cleanup)
|
||||||
self.timer = Timer('BlockProcessor')
|
|
||||||
self._current_height = 0
|
self._current_height = 0
|
||||||
self._txos = {}
|
self._txos = {}
|
||||||
|
|
||||||
|
@ -176,6 +174,7 @@ class TestSQLDB(unittest.TestCase):
|
||||||
self.assertEqual(accepted or [], self.get_accepted())
|
self.assertEqual(accepted or [], self.get_accepted())
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skip("port canonical url tests to leveldb") # TODO: port canonical url tests to leveldb
|
||||||
class TestClaimtrie(TestSQLDB):
|
class TestClaimtrie(TestSQLDB):
|
||||||
|
|
||||||
def test_example_from_spec(self):
|
def test_example_from_spec(self):
|
||||||
|
@ -526,6 +525,7 @@ class TestClaimtrie(TestSQLDB):
|
||||||
self.assertEqual('#abcdef0123456789beef', f.finalize())
|
self.assertEqual('#abcdef0123456789beef', f.finalize())
|
||||||
|
|
||||||
|
|
||||||
|
@unittest.skip("port trending tests to ES") # TODO: port trending tests to ES
|
||||||
class TestTrending(TestSQLDB):
|
class TestTrending(TestSQLDB):
|
||||||
|
|
||||||
def test_trending(self):
|
def test_trending(self):
|
||||||
|
|
Loading…
Reference in a new issue