2019-05-12 04:30:05 +02:00
|
|
|
import asyncio
|
2019-05-12 05:42:19 +02:00
|
|
|
from binascii import hexlify
|
2019-05-12 04:30:05 +02:00
|
|
|
|
2019-10-31 13:57:30 +01:00
|
|
|
from lbry.extras.daemon.storage import SQLiteStorage
|
|
|
|
from lbry.conf import Config
|
2019-06-21 03:02:58 +02:00
|
|
|
from lbry.dht import constants
|
|
|
|
from lbry.dht.node import Node
|
2019-12-01 23:21:42 +01:00
|
|
|
from lbry.dht import peer as dht_peer
|
2019-10-01 02:00:10 +02:00
|
|
|
from lbry.dht.peer import PeerManager, make_kademlia_peer
|
2019-05-12 04:30:05 +02:00
|
|
|
from torba.testcase import AsyncioTestCase
|
|
|
|
|
|
|
|
|
2019-05-12 05:42:19 +02:00
|
|
|
class DHTIntegrationTest(AsyncioTestCase):
|
2019-05-12 04:30:05 +02:00
|
|
|
|
|
|
|
async def asyncSetUp(self):
|
2019-12-01 23:21:42 +01:00
|
|
|
dht_peer.ALLOW_LOCALHOST = True
|
|
|
|
self.addCleanup(setattr, dht_peer, 'ALLOW_LOCALHOST', False)
|
2019-05-12 04:30:05 +02:00
|
|
|
import logging
|
|
|
|
logging.getLogger('asyncio').setLevel(logging.ERROR)
|
2019-06-21 03:02:58 +02:00
|
|
|
logging.getLogger('lbry.dht').setLevel(logging.WARN)
|
2019-05-12 04:30:05 +02:00
|
|
|
self.nodes = []
|
|
|
|
self.known_node_addresses = []
|
|
|
|
|
2019-10-31 13:57:30 +01:00
|
|
|
async def create_node(self, node_id, port, external_ip='127.0.0.1'):
|
|
|
|
storage = SQLiteStorage(Config(), ":memory:", self.loop, self.loop.time)
|
|
|
|
await storage.open()
|
|
|
|
node = Node(self.loop, PeerManager(self.loop), node_id=node_id,
|
|
|
|
udp_port=port, internal_udp_port=port,
|
|
|
|
peer_port=3333, external_ip=external_ip,
|
|
|
|
storage=storage)
|
|
|
|
self.addCleanup(node.stop)
|
|
|
|
node.protocol.rpc_timeout = .5
|
|
|
|
node.protocol.ping_queue._default_delay = .5
|
|
|
|
node._peer_search_timeout = .5
|
|
|
|
return node
|
|
|
|
|
|
|
|
async def setup_network(self, size: int, start_port=40000, seed_nodes=1, external_ip='127.0.0.1'):
|
2019-05-12 04:30:05 +02:00
|
|
|
for i in range(size):
|
|
|
|
node_port = start_port + i
|
2019-10-31 13:57:30 +01:00
|
|
|
node_id = constants.generate_id(i)
|
|
|
|
node = await self.create_node(node_id, node_port)
|
2019-05-12 04:30:05 +02:00
|
|
|
self.nodes.append(node)
|
2019-10-31 13:57:30 +01:00
|
|
|
self.known_node_addresses.append((external_ip, node_port))
|
|
|
|
|
2019-05-12 04:30:05 +02:00
|
|
|
for node in self.nodes:
|
2019-10-31 13:57:30 +01:00
|
|
|
node.start(external_ip, self.known_node_addresses[:seed_nodes])
|
2019-05-12 04:30:05 +02:00
|
|
|
|
|
|
|
async def test_replace_bad_nodes(self):
|
|
|
|
await self.setup_network(20)
|
2019-11-11 15:32:24 +01:00
|
|
|
await asyncio.gather(*[node.joined.wait() for node in self.nodes])
|
2019-08-16 10:34:57 +02:00
|
|
|
self.assertEqual(len(self.nodes), 20)
|
2019-05-12 04:30:05 +02:00
|
|
|
node = self.nodes[0]
|
|
|
|
bad_peers = []
|
|
|
|
for candidate in self.nodes[1:10]:
|
|
|
|
address, port, node_id = candidate.protocol.external_ip, candidate.protocol.udp_port, candidate.protocol.node_id
|
2019-10-01 02:00:10 +02:00
|
|
|
peer = make_kademlia_peer(node_id, address, udp_port=port)
|
2019-05-12 04:30:05 +02:00
|
|
|
bad_peers.append(peer)
|
|
|
|
node.protocol.add_peer(peer)
|
|
|
|
candidate.stop()
|
|
|
|
await asyncio.sleep(.3) # let pending events settle
|
|
|
|
for bad_peer in bad_peers:
|
|
|
|
self.assertIn(bad_peer, node.protocol.routing_table.get_peers())
|
|
|
|
await node.refresh_node(True)
|
|
|
|
await asyncio.sleep(.3) # let pending events settle
|
|
|
|
good_nodes = {good_node.protocol.node_id for good_node in self.nodes[10:]}
|
|
|
|
for peer in node.protocol.routing_table.get_peers():
|
|
|
|
self.assertIn(peer.node_id, good_nodes)
|
|
|
|
|
2019-05-12 08:39:11 +02:00
|
|
|
async def test_re_join(self):
|
|
|
|
await self.setup_network(20, seed_nodes=10)
|
2019-11-11 15:32:24 +01:00
|
|
|
await asyncio.gather(*[node.joined.wait() for node in self.nodes])
|
2019-05-12 08:39:11 +02:00
|
|
|
node = self.nodes[-1]
|
|
|
|
self.assertTrue(node.joined.is_set())
|
|
|
|
self.assertTrue(node.protocol.routing_table.get_peers())
|
|
|
|
for network_node in self.nodes[:-1]:
|
|
|
|
network_node.stop()
|
|
|
|
await node.refresh_node(True)
|
2019-05-12 08:53:46 +02:00
|
|
|
await asyncio.sleep(.3) # let pending events settle
|
2019-05-12 08:39:11 +02:00
|
|
|
self.assertFalse(node.protocol.routing_table.get_peers())
|
|
|
|
for network_node in self.nodes[:-1]:
|
2019-05-12 09:28:58 +02:00
|
|
|
network_node.start('127.0.0.1', self.known_node_addresses)
|
2019-05-12 08:39:11 +02:00
|
|
|
self.assertFalse(node.protocol.routing_table.get_peers())
|
|
|
|
timeout = 20
|
|
|
|
while not node.protocol.routing_table.get_peers():
|
|
|
|
await asyncio.sleep(.1)
|
|
|
|
timeout -= 1
|
|
|
|
if not timeout:
|
2019-10-02 20:04:30 +02:00
|
|
|
self.fail("node didn't join back after 2 seconds")
|
2019-05-12 04:30:05 +02:00
|
|
|
|
2019-05-12 05:42:19 +02:00
|
|
|
async def test_announce_no_peers(self):
|
|
|
|
await self.setup_network(1)
|
|
|
|
node = self.nodes[0]
|
|
|
|
blob_hash = hexlify(constants.generate_id(1337)).decode()
|
|
|
|
peers = await node.announce_blob(blob_hash)
|
|
|
|
self.assertEqual(len(peers), 0)
|
2019-05-12 08:53:46 +02:00
|
|
|
|
|
|
|
async def test_get_token_on_announce(self):
|
|
|
|
await self.setup_network(2, seed_nodes=2)
|
2019-11-11 15:32:24 +01:00
|
|
|
await asyncio.gather(*[node.joined.wait() for node in self.nodes])
|
2019-05-12 08:53:46 +02:00
|
|
|
node1, node2 = self.nodes
|
|
|
|
node1.protocol.peer_manager.clear_token(node2.protocol.node_id)
|
|
|
|
blob_hash = hexlify(constants.generate_id(1337)).decode()
|
|
|
|
node_ids = await node1.announce_blob(blob_hash)
|
|
|
|
self.assertIn(node2.protocol.node_id, node_ids)
|
2019-05-12 09:12:12 +02:00
|
|
|
node2.protocol.node_rpc.refresh_token()
|
|
|
|
node_ids = await node1.announce_blob(blob_hash)
|
|
|
|
self.assertIn(node2.protocol.node_id, node_ids)
|
|
|
|
node2.protocol.node_rpc.refresh_token()
|
|
|
|
node_ids = await node1.announce_blob(blob_hash)
|
|
|
|
self.assertIn(node2.protocol.node_id, node_ids)
|
2019-05-13 07:40:04 +02:00
|
|
|
|
|
|
|
async def test_peer_search_removes_bad_peers(self):
|
|
|
|
# that's an edge case discovered by Tom, but an important one
|
|
|
|
# imagine that you only got bad peers and refresh will happen in one hour
|
|
|
|
# instead of failing for one hour we should be able to recover by scheduling pings to bad peers we find
|
|
|
|
await self.setup_network(2, seed_nodes=2)
|
2019-11-11 15:32:24 +01:00
|
|
|
await asyncio.gather(*[node.joined.wait() for node in self.nodes])
|
2019-05-13 07:40:04 +02:00
|
|
|
node1, node2 = self.nodes
|
|
|
|
node2.stop()
|
2019-10-02 20:04:30 +02:00
|
|
|
# forcefully make it a bad peer but don't remove it from routing table
|
2019-05-13 07:40:04 +02:00
|
|
|
address, port, node_id = node2.protocol.external_ip, node2.protocol.udp_port, node2.protocol.node_id
|
2019-10-01 02:00:10 +02:00
|
|
|
peer = make_kademlia_peer(node_id, address, udp_port=port)
|
2019-05-13 07:40:04 +02:00
|
|
|
self.assertTrue(node1.protocol.peer_manager.peer_is_good(peer))
|
|
|
|
node1.protocol.peer_manager.report_failure(node2.protocol.external_ip, node2.protocol.udp_port)
|
|
|
|
node1.protocol.peer_manager.report_failure(node2.protocol.external_ip, node2.protocol.udp_port)
|
|
|
|
self.assertFalse(node1.protocol.peer_manager.peer_is_good(peer))
|
|
|
|
|
|
|
|
# now a search happens, which removes bad peers while contacting them
|
2019-05-13 18:14:08 +02:00
|
|
|
self.assertTrue(node1.protocol.routing_table.get_peers())
|
2019-05-13 07:40:04 +02:00
|
|
|
await node1.peer_search(node2.protocol.node_id)
|
2019-05-13 19:46:06 +02:00
|
|
|
await asyncio.sleep(.3) # let pending events settle
|
|
|
|
self.assertFalse(node1.protocol.routing_table.get_peers())
|
2019-10-31 13:57:30 +01:00
|
|
|
|
|
|
|
async def test_peer_persistance(self):
|
2019-11-11 15:32:24 +01:00
|
|
|
num_nodes = 6
|
2019-10-31 13:57:30 +01:00
|
|
|
start_port = 40000
|
2019-11-11 15:32:24 +01:00
|
|
|
num_seeds = 2
|
2019-10-31 13:57:30 +01:00
|
|
|
external_ip = '127.0.0.1'
|
|
|
|
|
|
|
|
# Start a node
|
2019-11-11 15:32:24 +01:00
|
|
|
await self.setup_network(num_nodes, start_port=start_port, seed_nodes=num_seeds)
|
|
|
|
await asyncio.gather(*[node.joined.wait() for node in self.nodes])
|
2019-10-31 13:57:30 +01:00
|
|
|
|
2019-11-11 15:32:24 +01:00
|
|
|
node1 = self.nodes[-1]
|
|
|
|
peer_args = [(n.protocol.node_id, n.protocol.external_ip, n.protocol.udp_port, n.protocol.peer_port) for n in
|
|
|
|
self.nodes[:num_seeds]]
|
2019-10-31 13:57:30 +01:00
|
|
|
peers = [make_kademlia_peer(*args) for args in peer_args]
|
|
|
|
|
2019-11-11 15:32:24 +01:00
|
|
|
# node1 is bootstrapped from the fixed seeds
|
2019-10-31 13:57:30 +01:00
|
|
|
self.assertCountEqual(peers, node1.protocol.routing_table.get_peers())
|
|
|
|
|
|
|
|
# Refresh and assert that the peers were persisted
|
|
|
|
await node1.refresh_node(True)
|
2019-11-11 15:32:24 +01:00
|
|
|
self.assertEqual(len(peer_args), len(await node1._storage.get_persisted_kademlia_peers()))
|
2019-10-31 13:57:30 +01:00
|
|
|
node1.stop()
|
|
|
|
|
2019-11-11 15:32:24 +01:00
|
|
|
# Start a fresh node with the same node_id and storage, but no known peers
|
|
|
|
node2 = await self.create_node(constants.generate_id(num_nodes-1), start_port+num_nodes-1)
|
2019-10-31 13:57:30 +01:00
|
|
|
node2._storage = node1._storage
|
2019-11-11 15:32:24 +01:00
|
|
|
node2.start(external_ip, [])
|
|
|
|
await node2.joined.wait()
|
2019-10-31 13:57:30 +01:00
|
|
|
|
|
|
|
# The peers are restored
|
2019-11-11 15:32:24 +01:00
|
|
|
self.assertEqual(num_seeds, len(node2.protocol.routing_table.get_peers()))
|
2019-10-31 13:57:30 +01:00
|
|
|
for bucket1, bucket2 in zip(node1.protocol.routing_table.buckets, node2.protocol.routing_table.buckets):
|
|
|
|
self.assertEqual((bucket1.range_min, bucket1.range_max), (bucket2.range_min, bucket2.range_max))
|
|
|
|
|
|
|
|
async def test_switch_to_known_seeds(self):
|
|
|
|
num_peers = 10
|
|
|
|
start_port = 40000
|
|
|
|
external_ip = '127.0.0.1'
|
|
|
|
|
|
|
|
await self.setup_network(num_peers, seed_nodes=num_peers // 2, start_port=start_port)
|
2019-11-11 15:32:24 +01:00
|
|
|
await asyncio.gather(*[node.joined.wait() for node in self.nodes])
|
2019-10-31 13:57:30 +01:00
|
|
|
peer_args = [
|
|
|
|
(n.protocol.node_id, n.protocol.external_ip, n.protocol.udp_port) for n in self.nodes
|
|
|
|
]
|
|
|
|
known_peers = [make_kademlia_peer(*args) for args in peer_args[:num_peers // 2]]
|
|
|
|
known_nodes = self.nodes[:num_peers // 2]
|
|
|
|
persisted_peers = [make_kademlia_peer(*args) for args in peer_args[num_peers // 2:]]
|
|
|
|
persisted_nodes = self.nodes[num_peers // 2:]
|
|
|
|
|
|
|
|
# Create node with the persisted nodes in storage
|
|
|
|
node = await self.create_node(constants.generate_id(num_peers), start_port+num_peers)
|
2019-11-11 15:32:24 +01:00
|
|
|
await node._storage.save_kademlia_peers(persisted_peers)
|
2019-10-31 13:57:30 +01:00
|
|
|
|
|
|
|
# Stop known peers so they stop replying and won't be added
|
|
|
|
for n in known_nodes:
|
|
|
|
n.stop()
|
|
|
|
|
|
|
|
node.start(external_ip, self.known_node_addresses[:num_peers // 2])
|
|
|
|
await node.joined.wait()
|
|
|
|
self.assertTrue(node.joined.is_set())
|
|
|
|
|
|
|
|
# Only persisted ones are added to the routing table
|
|
|
|
self.assertCountEqual(persisted_peers, node.protocol.routing_table.get_peers())
|
|
|
|
|
|
|
|
# Start the known ones, stop the persisted
|
|
|
|
for n1, n2 in zip(known_nodes, persisted_nodes):
|
|
|
|
n1.start(external_ip)
|
|
|
|
n2.stop()
|
|
|
|
asyncio.gather(*[n.joined.wait() for n in known_nodes])
|
|
|
|
await asyncio.sleep(3)
|
|
|
|
self.assertTrue(all(known.joined.is_set() for known in known_nodes))
|
|
|
|
self.assertTrue(all(not persisted.joined.is_set() for persisted in persisted_nodes))
|
|
|
|
|
|
|
|
# Remove persisted from node's routing table, set them as bad
|
|
|
|
for peer in persisted_peers:
|
|
|
|
node.protocol.routing_table.remove_peer(peer)
|
|
|
|
node.protocol.peer_manager.report_failure(peer.address, peer.udp_port)
|
|
|
|
self.assertFalse(node.protocol.routing_table.get_peers())
|
|
|
|
|
|
|
|
# The known_peers replace the persisted ones
|
|
|
|
await node.joined.wait()
|
|
|
|
await asyncio.sleep(3)
|
|
|
|
self.assertCountEqual(known_peers, node.protocol.routing_table.get_peers())
|