2019-01-22 18:49:43 +01:00
|
|
|
import asyncio
|
2021-07-08 08:55:21 +02:00
|
|
|
import time
|
2019-12-06 00:30:37 +01:00
|
|
|
import unittest
|
2019-01-22 18:49:43 +01:00
|
|
|
import typing
|
2019-12-31 21:30:13 +01:00
|
|
|
from lbry.testcase import AsyncioTestCase
|
2019-01-22 18:49:43 +01:00
|
|
|
from tests import dht_mocks
|
2019-11-12 22:29:56 +01:00
|
|
|
from lbry.conf import Config
|
2019-06-21 03:02:58 +02:00
|
|
|
from lbry.dht import constants
|
|
|
|
from lbry.dht.node import Node
|
2019-10-01 02:00:10 +02:00
|
|
|
from lbry.dht.peer import PeerManager, make_kademlia_peer
|
2019-11-12 22:29:56 +01:00
|
|
|
from lbry.extras.daemon.storage import SQLiteStorage
|
2018-07-31 03:23:38 +02:00
|
|
|
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2022-07-11 09:11:01 +02:00
|
|
|
class TestBootstrapNode(AsyncioTestCase):
|
|
|
|
TIMEOUT = 10.0 # do not increase. Hitting a timeout is a real failure
|
2022-08-22 23:45:10 +02:00
|
|
|
|
|
|
|
async def test_bootstrap_node_adds_all_peers(self):
|
2022-07-11 09:11:01 +02:00
|
|
|
loop = asyncio.get_event_loop()
|
|
|
|
loop.set_debug(False)
|
|
|
|
|
|
|
|
with dht_mocks.mock_network_loop(loop):
|
|
|
|
advance = dht_mocks.get_time_accelerator(loop)
|
|
|
|
self.bootstrap_node = Node(self.loop, PeerManager(loop), constants.generate_id(),
|
|
|
|
4444, 4444, 3333, '1.2.3.4', is_bootstrap_node=True)
|
|
|
|
self.bootstrap_node.start('1.2.3.4', [])
|
|
|
|
self.bootstrap_node.protocol.ping_queue._default_delay = 0
|
|
|
|
self.addCleanup(self.bootstrap_node.stop)
|
|
|
|
|
|
|
|
# start the nodes
|
|
|
|
nodes = {}
|
|
|
|
futs = []
|
|
|
|
for i in range(100):
|
|
|
|
nodes[i] = Node(loop, PeerManager(loop), constants.generate_id(i), 4444, 4444, 3333, f'1.3.3.{i}')
|
|
|
|
nodes[i].start(f'1.3.3.{i}', [('1.2.3.4', 4444)])
|
|
|
|
self.addCleanup(nodes[i].stop)
|
|
|
|
futs.append(nodes[i].joined.wait())
|
|
|
|
await asyncio.gather(*futs)
|
|
|
|
while self.bootstrap_node.protocol.ping_queue.busy:
|
|
|
|
await advance(1)
|
|
|
|
self.assertEqual(100, len(self.bootstrap_node.protocol.routing_table.get_peers()))
|
|
|
|
|
|
|
|
|
2019-01-22 18:49:43 +01:00
|
|
|
class TestNodePingQueueDiscover(AsyncioTestCase):
|
|
|
|
async def test_ping_queue_discover(self):
|
|
|
|
loop = asyncio.get_event_loop()
|
2019-01-23 16:41:34 +01:00
|
|
|
loop.set_debug(False)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2019-01-22 18:49:43 +01:00
|
|
|
peer_addresses = [
|
|
|
|
(constants.generate_id(1), '1.2.3.1'),
|
|
|
|
(constants.generate_id(2), '1.2.3.2'),
|
|
|
|
(constants.generate_id(3), '1.2.3.3'),
|
|
|
|
(constants.generate_id(4), '1.2.3.4'),
|
|
|
|
(constants.generate_id(5), '1.2.3.5'),
|
|
|
|
(constants.generate_id(6), '1.2.3.6'),
|
|
|
|
(constants.generate_id(7), '1.2.3.7'),
|
|
|
|
(constants.generate_id(8), '1.2.3.8'),
|
|
|
|
(constants.generate_id(9), '1.2.3.9'),
|
|
|
|
]
|
|
|
|
with dht_mocks.mock_network_loop(loop):
|
2022-02-18 22:53:10 +01:00
|
|
|
advance = dht_mocks.get_time_accelerator(loop)
|
2019-01-22 18:49:43 +01:00
|
|
|
# start the nodes
|
|
|
|
nodes: typing.Dict[int, Node] = {
|
|
|
|
i: Node(loop, PeerManager(loop), node_id, 4444, 4444, 3333, address)
|
|
|
|
for i, (node_id, address) in enumerate(peer_addresses)
|
|
|
|
}
|
|
|
|
for i, n in nodes.items():
|
|
|
|
n.start(peer_addresses[i][1], [])
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2019-01-22 18:49:43 +01:00
|
|
|
await advance(1)
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2019-01-22 18:49:43 +01:00
|
|
|
node_1 = nodes[0]
|
2018-07-31 03:23:38 +02:00
|
|
|
|
2019-01-22 18:49:43 +01:00
|
|
|
# ping 8 nodes from node_1, this will result in a delayed return ping
|
|
|
|
futs = []
|
|
|
|
for i in range(1, len(peer_addresses)):
|
|
|
|
node = nodes[i]
|
|
|
|
assert node.protocol.node_id != node_1.protocol.node_id
|
2019-10-01 02:00:10 +02:00
|
|
|
peer = make_kademlia_peer(
|
2019-01-22 18:49:43 +01:00
|
|
|
node.protocol.node_id, node.protocol.external_ip, udp_port=node.protocol.udp_port
|
|
|
|
)
|
|
|
|
futs.append(node_1.protocol.get_rpc_peer(peer).ping())
|
|
|
|
await advance(3)
|
|
|
|
replies = await asyncio.gather(*tuple(futs))
|
|
|
|
self.assertTrue(all(map(lambda reply: reply == b"pong", replies)))
|
2016-12-14 00:37:23 +01:00
|
|
|
|
2019-01-22 18:49:43 +01:00
|
|
|
# run for long enough for the delayed pings to have been sent by node 1
|
|
|
|
await advance(1000)
|
2017-10-10 19:31:18 +02:00
|
|
|
|
2019-01-22 18:49:43 +01:00
|
|
|
# verify all of the previously pinged peers have node_1 in their routing tables
|
|
|
|
for n in nodes.values():
|
|
|
|
peers = n.protocol.routing_table.get_peers()
|
|
|
|
if n is node_1:
|
|
|
|
self.assertEqual(8, len(peers))
|
2019-12-16 16:10:58 +01:00
|
|
|
# TODO: figure out why this breaks
|
|
|
|
# else:
|
|
|
|
# self.assertEqual(1, len(peers))
|
|
|
|
# self.assertEqual((peers[0].node_id, peers[0].address, peers[0].udp_port),
|
|
|
|
# (node_1.protocol.node_id, node_1.protocol.external_ip, node_1.protocol.udp_port))
|
2015-08-20 17:27:15 +02:00
|
|
|
|
2019-01-22 18:49:43 +01:00
|
|
|
# run long enough for the refresh loop to run
|
|
|
|
await advance(3600)
|
2017-10-10 19:31:18 +02:00
|
|
|
|
2019-01-22 18:49:43 +01:00
|
|
|
# verify all the nodes know about each other
|
|
|
|
for n in nodes.values():
|
|
|
|
if n is node_1:
|
|
|
|
continue
|
|
|
|
peers = n.protocol.routing_table.get_peers()
|
|
|
|
self.assertEqual(8, len(peers))
|
|
|
|
self.assertSetEqual(
|
|
|
|
{n_id[0] for n_id in peer_addresses if n_id[0] != n.protocol.node_id},
|
|
|
|
{c.node_id for c in peers}
|
|
|
|
)
|
|
|
|
self.assertSetEqual(
|
|
|
|
{n_addr[1] for n_addr in peer_addresses if n_addr[1] != n.protocol.external_ip},
|
|
|
|
{c.address for c in peers}
|
|
|
|
)
|
2017-10-10 19:31:18 +02:00
|
|
|
|
2019-01-22 18:49:43 +01:00
|
|
|
# teardown
|
|
|
|
for n in nodes.values():
|
|
|
|
n.stop()
|
2019-11-12 22:29:56 +01:00
|
|
|
|
|
|
|
|
2019-11-13 19:14:43 +01:00
|
|
|
class TestTemporarilyLosingConnection(AsyncioTestCase):
|
2021-07-21 17:25:44 +02:00
|
|
|
@unittest.SkipTest
|
2019-11-12 22:29:56 +01:00
|
|
|
async def test_losing_connection(self):
|
2019-11-24 14:20:29 +01:00
|
|
|
async def wait_for(check_ok, insist, timeout=20):
|
2021-07-08 08:55:21 +02:00
|
|
|
start = time.time()
|
|
|
|
while time.time() - start < timeout:
|
2019-11-24 14:20:29 +01:00
|
|
|
if check_ok():
|
|
|
|
break
|
|
|
|
await asyncio.sleep(0)
|
|
|
|
else:
|
|
|
|
insist()
|
|
|
|
|
2019-11-13 19:14:43 +01:00
|
|
|
loop = self.loop
|
2019-11-12 22:29:56 +01:00
|
|
|
loop.set_debug(False)
|
|
|
|
|
|
|
|
peer_addresses = [
|
2019-12-03 19:11:34 +01:00
|
|
|
('1.2.3.4', 40000+i) for i in range(10)
|
2019-11-12 22:29:56 +01:00
|
|
|
]
|
|
|
|
node_ids = [constants.generate_id(i) for i in range(10)]
|
|
|
|
|
|
|
|
nodes = [
|
|
|
|
Node(
|
|
|
|
loop, PeerManager(loop), node_id, udp_port, udp_port, 3333, address,
|
|
|
|
storage=SQLiteStorage(Config(), ":memory:", self.loop, self.loop.time)
|
|
|
|
)
|
|
|
|
for node_id, (address, udp_port) in zip(node_ids, peer_addresses)
|
|
|
|
]
|
|
|
|
dht_network = {peer_addresses[i]: node.protocol for i, node in enumerate(nodes)}
|
|
|
|
num_seeds = 3
|
|
|
|
|
|
|
|
with dht_mocks.mock_network_loop(loop, dht_network):
|
|
|
|
for i, n in enumerate(nodes):
|
|
|
|
await n._storage.open()
|
2019-11-13 19:14:43 +01:00
|
|
|
self.addCleanup(n.stop)
|
2019-11-12 22:29:56 +01:00
|
|
|
n.start(peer_addresses[i][0], peer_addresses[:num_seeds])
|
2019-11-13 19:14:43 +01:00
|
|
|
await asyncio.gather(*[n.joined.wait() for n in nodes])
|
2019-11-12 22:29:56 +01:00
|
|
|
|
|
|
|
node = nodes[-1]
|
2022-02-18 22:53:10 +01:00
|
|
|
advance = dht_mocks.get_time_accelerator(loop)
|
2019-11-13 19:14:43 +01:00
|
|
|
await advance(500)
|
2019-11-12 22:29:56 +01:00
|
|
|
|
|
|
|
# Join the network, assert that at least the known peers are in RT
|
|
|
|
self.assertTrue(node.joined.is_set())
|
|
|
|
self.assertTrue(len(node.protocol.routing_table.get_peers()) >= num_seeds)
|
|
|
|
|
|
|
|
# Refresh, so that the peers are persisted
|
2019-11-13 19:14:43 +01:00
|
|
|
self.assertFalse(len(await node._storage.get_persisted_kademlia_peers()) > num_seeds)
|
2019-11-12 22:29:56 +01:00
|
|
|
await advance(4000)
|
|
|
|
self.assertTrue(len(await node._storage.get_persisted_kademlia_peers()) > num_seeds)
|
|
|
|
|
|
|
|
# We lost internet connection - all the peers stop responding
|
2019-11-22 07:19:22 +01:00
|
|
|
dht_network.pop((node.protocol.external_ip, node.protocol.udp_port))
|
2019-11-12 22:29:56 +01:00
|
|
|
|
|
|
|
# The peers are cleared on refresh from RT and storage
|
|
|
|
await advance(4000)
|
2019-11-13 19:14:43 +01:00
|
|
|
self.assertListEqual([], await node._storage.get_persisted_kademlia_peers())
|
2019-11-24 14:20:29 +01:00
|
|
|
await wait_for(
|
|
|
|
lambda: len(node.protocol.routing_table.get_peers()) == 0,
|
|
|
|
lambda: self.assertListEqual(node.protocol.routing_table.get_peers(), [])
|
|
|
|
)
|
2019-11-12 22:29:56 +01:00
|
|
|
|
2019-11-22 07:19:22 +01:00
|
|
|
# Reconnect
|
|
|
|
dht_network[(node.protocol.external_ip, node.protocol.udp_port)] = node.protocol
|
2019-11-12 22:29:56 +01:00
|
|
|
|
|
|
|
# Check that node reconnects at least to them
|
|
|
|
await advance(1000)
|
2019-11-24 14:20:29 +01:00
|
|
|
await wait_for(
|
|
|
|
lambda: len(node.protocol.routing_table.get_peers()) >= num_seeds,
|
2021-07-21 04:14:17 +02:00
|
|
|
lambda: self.assertGreaterEqual(len(node.protocol.routing_table.get_peers()), num_seeds)
|
2019-11-24 14:20:29 +01:00
|
|
|
)
|